pub trait Simd:
Sized
+ Clone
+ Copy
+ Send
+ Sync
+ Seal
+ 'static {
type f32s: SimdFloat<f32, Self, Block = f32x4<Self>> + SimdCvtFloat<Self::u32s> + SimdCvtFloat<Self::i32s>;
type u8s: SimdInt<u8, Self, Block = u8x16<Self>>;
type i8s: SimdInt<i8, Self, Block = i8x16<Self>>;
type u16s: SimdInt<u16, Self, Block = u16x8<Self>>;
type i16s: SimdInt<i16, Self, Block = i16x8<Self>>;
type u32s: SimdInt<u32, Self, Block = u32x4<Self>> + SimdCvtTruncate<Self::f32s>;
type i32s: SimdInt<i32, Self, Block = i32x4<Self>> + SimdCvtTruncate<Self::f32s>;
type mask8s: SimdMask<i8, Self, Block = mask8x16<Self>>;
type mask16s: SimdMask<i16, Self, Block = mask16x8<Self>>;
type mask32s: SimdMask<i32, Self, Block = mask32x4<Self>>;
Show 749 methods
// Required methods
fn level(self) -> Level;
fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R;
fn splat_f32x4(self, val: f32) -> f32x4<Self>;
fn abs_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn neg_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn sqrt_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn add_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn sub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn mul_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn div_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn copysign_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn simd_eq_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
fn simd_lt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
fn simd_le_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
fn simd_ge_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
fn simd_gt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
fn zip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn zip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn unzip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn unzip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn max_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn max_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn min_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn min_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
fn madd_f32x4(
self,
a: f32x4<Self>,
b: f32x4<Self>,
c: f32x4<Self>,
) -> f32x4<Self>;
fn msub_f32x4(
self,
a: f32x4<Self>,
b: f32x4<Self>,
c: f32x4<Self>,
) -> f32x4<Self>;
fn floor_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn fract_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn trunc_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
fn select_f32x4(
self,
a: mask32x4<Self>,
b: f32x4<Self>,
c: f32x4<Self>,
) -> f32x4<Self>;
fn combine_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x8<Self>;
fn reinterpret_f64_f32x4(self, a: f32x4<Self>) -> f64x2<Self>;
fn reinterpret_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
fn reinterpret_u8_f32x4(self, a: f32x4<Self>) -> u8x16<Self>;
fn reinterpret_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
fn cvt_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
fn cvt_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
fn splat_i8x16(self, val: i8) -> i8x16<Self>;
fn not_i8x16(self, a: i8x16<Self>) -> i8x16<Self>;
fn add_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn sub_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn mul_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn and_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn or_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn xor_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn shr_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>;
fn simd_eq_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
fn simd_lt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
fn simd_le_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
fn simd_ge_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
fn simd_gt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
fn zip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn zip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn unzip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn unzip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn select_i8x16(
self,
a: mask8x16<Self>,
b: i8x16<Self>,
c: i8x16<Self>,
) -> i8x16<Self>;
fn min_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn max_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
fn combine_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x32<Self>;
fn reinterpret_u8_i8x16(self, a: i8x16<Self>) -> u8x16<Self>;
fn reinterpret_u32_i8x16(self, a: i8x16<Self>) -> u32x4<Self>;
fn splat_u8x16(self, val: u8) -> u8x16<Self>;
fn not_u8x16(self, a: u8x16<Self>) -> u8x16<Self>;
fn add_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn sub_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn mul_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn and_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn or_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn xor_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn shr_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>;
fn simd_eq_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
fn simd_lt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
fn simd_le_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
fn simd_ge_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
fn simd_gt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
fn zip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn zip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn unzip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn unzip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn select_u8x16(
self,
a: mask8x16<Self>,
b: u8x16<Self>,
c: u8x16<Self>,
) -> u8x16<Self>;
fn min_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn max_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
fn combine_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x32<Self>;
fn widen_u8x16(self, a: u8x16<Self>) -> u16x16<Self>;
fn reinterpret_u32_u8x16(self, a: u8x16<Self>) -> u32x4<Self>;
fn splat_mask8x16(self, val: i8) -> mask8x16<Self>;
fn not_mask8x16(self, a: mask8x16<Self>) -> mask8x16<Self>;
fn and_mask8x16(
self,
a: mask8x16<Self>,
b: mask8x16<Self>,
) -> mask8x16<Self>;
fn or_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
fn xor_mask8x16(
self,
a: mask8x16<Self>,
b: mask8x16<Self>,
) -> mask8x16<Self>;
fn select_mask8x16(
self,
a: mask8x16<Self>,
b: mask8x16<Self>,
c: mask8x16<Self>,
) -> mask8x16<Self>;
fn simd_eq_mask8x16(
self,
a: mask8x16<Self>,
b: mask8x16<Self>,
) -> mask8x16<Self>;
fn combine_mask8x16(
self,
a: mask8x16<Self>,
b: mask8x16<Self>,
) -> mask8x32<Self>;
fn splat_i16x8(self, val: i16) -> i16x8<Self>;
fn not_i16x8(self, a: i16x8<Self>) -> i16x8<Self>;
fn add_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn sub_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn mul_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn and_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn or_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn xor_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn shr_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>;
fn simd_eq_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
fn simd_lt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
fn simd_le_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
fn simd_ge_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
fn simd_gt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
fn zip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn zip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn unzip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn unzip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn select_i16x8(
self,
a: mask16x8<Self>,
b: i16x8<Self>,
c: i16x8<Self>,
) -> i16x8<Self>;
fn min_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn max_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
fn combine_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x16<Self>;
fn reinterpret_u8_i16x8(self, a: i16x8<Self>) -> u8x16<Self>;
fn reinterpret_u32_i16x8(self, a: i16x8<Self>) -> u32x4<Self>;
fn splat_u16x8(self, val: u16) -> u16x8<Self>;
fn not_u16x8(self, a: u16x8<Self>) -> u16x8<Self>;
fn add_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn sub_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn mul_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn and_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn or_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn xor_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn shr_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>;
fn simd_eq_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
fn simd_lt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
fn simd_le_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
fn simd_ge_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
fn simd_gt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
fn zip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn zip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn unzip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn unzip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn select_u16x8(
self,
a: mask16x8<Self>,
b: u16x8<Self>,
c: u16x8<Self>,
) -> u16x8<Self>;
fn min_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn max_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
fn combine_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x16<Self>;
fn reinterpret_u8_u16x8(self, a: u16x8<Self>) -> u8x16<Self>;
fn reinterpret_u32_u16x8(self, a: u16x8<Self>) -> u32x4<Self>;
fn splat_mask16x8(self, val: i16) -> mask16x8<Self>;
fn not_mask16x8(self, a: mask16x8<Self>) -> mask16x8<Self>;
fn and_mask16x8(
self,
a: mask16x8<Self>,
b: mask16x8<Self>,
) -> mask16x8<Self>;
fn or_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
fn xor_mask16x8(
self,
a: mask16x8<Self>,
b: mask16x8<Self>,
) -> mask16x8<Self>;
fn select_mask16x8(
self,
a: mask16x8<Self>,
b: mask16x8<Self>,
c: mask16x8<Self>,
) -> mask16x8<Self>;
fn simd_eq_mask16x8(
self,
a: mask16x8<Self>,
b: mask16x8<Self>,
) -> mask16x8<Self>;
fn combine_mask16x8(
self,
a: mask16x8<Self>,
b: mask16x8<Self>,
) -> mask16x16<Self>;
fn splat_i32x4(self, val: i32) -> i32x4<Self>;
fn not_i32x4(self, a: i32x4<Self>) -> i32x4<Self>;
fn add_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn sub_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn mul_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn and_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn or_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn xor_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn shr_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>;
fn simd_eq_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
fn simd_lt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
fn simd_le_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
fn simd_ge_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
fn simd_gt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
fn zip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn zip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn unzip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn unzip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn select_i32x4(
self,
a: mask32x4<Self>,
b: i32x4<Self>,
c: i32x4<Self>,
) -> i32x4<Self>;
fn min_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn max_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
fn combine_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x8<Self>;
fn reinterpret_u8_i32x4(self, a: i32x4<Self>) -> u8x16<Self>;
fn reinterpret_u32_i32x4(self, a: i32x4<Self>) -> u32x4<Self>;
fn cvt_f32_i32x4(self, a: i32x4<Self>) -> f32x4<Self>;
fn splat_u32x4(self, val: u32) -> u32x4<Self>;
fn not_u32x4(self, a: u32x4<Self>) -> u32x4<Self>;
fn add_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn sub_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn mul_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn and_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn or_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn xor_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn shr_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>;
fn simd_eq_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
fn simd_lt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
fn simd_le_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
fn simd_ge_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
fn simd_gt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
fn zip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn zip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn unzip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn unzip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn select_u32x4(
self,
a: mask32x4<Self>,
b: u32x4<Self>,
c: u32x4<Self>,
) -> u32x4<Self>;
fn min_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn max_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
fn combine_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x8<Self>;
fn reinterpret_u8_u32x4(self, a: u32x4<Self>) -> u8x16<Self>;
fn cvt_f32_u32x4(self, a: u32x4<Self>) -> f32x4<Self>;
fn splat_mask32x4(self, val: i32) -> mask32x4<Self>;
fn not_mask32x4(self, a: mask32x4<Self>) -> mask32x4<Self>;
fn and_mask32x4(
self,
a: mask32x4<Self>,
b: mask32x4<Self>,
) -> mask32x4<Self>;
fn or_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
fn xor_mask32x4(
self,
a: mask32x4<Self>,
b: mask32x4<Self>,
) -> mask32x4<Self>;
fn select_mask32x4(
self,
a: mask32x4<Self>,
b: mask32x4<Self>,
c: mask32x4<Self>,
) -> mask32x4<Self>;
fn simd_eq_mask32x4(
self,
a: mask32x4<Self>,
b: mask32x4<Self>,
) -> mask32x4<Self>;
fn combine_mask32x4(
self,
a: mask32x4<Self>,
b: mask32x4<Self>,
) -> mask32x8<Self>;
fn splat_f64x2(self, val: f64) -> f64x2<Self>;
fn abs_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn neg_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn sqrt_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn add_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn sub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn mul_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn div_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn copysign_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn simd_eq_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
fn simd_lt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
fn simd_le_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
fn simd_ge_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
fn simd_gt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
fn zip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn zip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn unzip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn unzip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn max_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn max_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn min_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn min_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
fn madd_f64x2(
self,
a: f64x2<Self>,
b: f64x2<Self>,
c: f64x2<Self>,
) -> f64x2<Self>;
fn msub_f64x2(
self,
a: f64x2<Self>,
b: f64x2<Self>,
c: f64x2<Self>,
) -> f64x2<Self>;
fn floor_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn fract_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn trunc_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
fn select_f64x2(
self,
a: mask64x2<Self>,
b: f64x2<Self>,
c: f64x2<Self>,
) -> f64x2<Self>;
fn combine_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x4<Self>;
fn reinterpret_f32_f64x2(self, a: f64x2<Self>) -> f32x4<Self>;
fn splat_mask64x2(self, val: i64) -> mask64x2<Self>;
fn not_mask64x2(self, a: mask64x2<Self>) -> mask64x2<Self>;
fn and_mask64x2(
self,
a: mask64x2<Self>,
b: mask64x2<Self>,
) -> mask64x2<Self>;
fn or_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
fn xor_mask64x2(
self,
a: mask64x2<Self>,
b: mask64x2<Self>,
) -> mask64x2<Self>;
fn select_mask64x2(
self,
a: mask64x2<Self>,
b: mask64x2<Self>,
c: mask64x2<Self>,
) -> mask64x2<Self>;
fn simd_eq_mask64x2(
self,
a: mask64x2<Self>,
b: mask64x2<Self>,
) -> mask64x2<Self>;
fn combine_mask64x2(
self,
a: mask64x2<Self>,
b: mask64x2<Self>,
) -> mask64x4<Self>;
fn splat_f32x8(self, val: f32) -> f32x8<Self>;
fn abs_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn neg_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn sqrt_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn add_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn sub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn mul_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn div_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn copysign_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn simd_eq_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
fn simd_lt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
fn simd_le_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
fn simd_ge_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
fn simd_gt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
fn zip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn zip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn unzip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn unzip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn max_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn max_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn min_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn min_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
fn madd_f32x8(
self,
a: f32x8<Self>,
b: f32x8<Self>,
c: f32x8<Self>,
) -> f32x8<Self>;
fn msub_f32x8(
self,
a: f32x8<Self>,
b: f32x8<Self>,
c: f32x8<Self>,
) -> f32x8<Self>;
fn floor_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn fract_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn trunc_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
fn select_f32x8(
self,
a: mask32x8<Self>,
b: f32x8<Self>,
c: f32x8<Self>,
) -> f32x8<Self>;
fn combine_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x16<Self>;
fn split_f32x8(self, a: f32x8<Self>) -> (f32x4<Self>, f32x4<Self>);
fn reinterpret_f64_f32x8(self, a: f32x8<Self>) -> f64x4<Self>;
fn reinterpret_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
fn reinterpret_u8_f32x8(self, a: f32x8<Self>) -> u8x32<Self>;
fn reinterpret_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
fn cvt_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
fn cvt_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
fn splat_i8x32(self, val: i8) -> i8x32<Self>;
fn not_i8x32(self, a: i8x32<Self>) -> i8x32<Self>;
fn add_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn sub_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn mul_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn and_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn or_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn xor_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn shr_i8x32(self, a: i8x32<Self>, shift: u32) -> i8x32<Self>;
fn simd_eq_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
fn simd_lt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
fn simd_le_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
fn simd_ge_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
fn simd_gt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
fn zip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn zip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn unzip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn unzip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn select_i8x32(
self,
a: mask8x32<Self>,
b: i8x32<Self>,
c: i8x32<Self>,
) -> i8x32<Self>;
fn min_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn max_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
fn combine_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x64<Self>;
fn split_i8x32(self, a: i8x32<Self>) -> (i8x16<Self>, i8x16<Self>);
fn reinterpret_u8_i8x32(self, a: i8x32<Self>) -> u8x32<Self>;
fn reinterpret_u32_i8x32(self, a: i8x32<Self>) -> u32x8<Self>;
fn splat_u8x32(self, val: u8) -> u8x32<Self>;
fn not_u8x32(self, a: u8x32<Self>) -> u8x32<Self>;
fn add_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn sub_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn mul_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn and_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn or_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn xor_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn shr_u8x32(self, a: u8x32<Self>, shift: u32) -> u8x32<Self>;
fn simd_eq_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
fn simd_lt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
fn simd_le_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
fn simd_ge_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
fn simd_gt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
fn zip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn zip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn unzip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn unzip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn select_u8x32(
self,
a: mask8x32<Self>,
b: u8x32<Self>,
c: u8x32<Self>,
) -> u8x32<Self>;
fn min_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn max_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
fn combine_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x64<Self>;
fn split_u8x32(self, a: u8x32<Self>) -> (u8x16<Self>, u8x16<Self>);
fn widen_u8x32(self, a: u8x32<Self>) -> u16x32<Self>;
fn reinterpret_u32_u8x32(self, a: u8x32<Self>) -> u32x8<Self>;
fn splat_mask8x32(self, val: i8) -> mask8x32<Self>;
fn not_mask8x32(self, a: mask8x32<Self>) -> mask8x32<Self>;
fn and_mask8x32(
self,
a: mask8x32<Self>,
b: mask8x32<Self>,
) -> mask8x32<Self>;
fn or_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
fn xor_mask8x32(
self,
a: mask8x32<Self>,
b: mask8x32<Self>,
) -> mask8x32<Self>;
fn select_mask8x32(
self,
a: mask8x32<Self>,
b: mask8x32<Self>,
c: mask8x32<Self>,
) -> mask8x32<Self>;
fn simd_eq_mask8x32(
self,
a: mask8x32<Self>,
b: mask8x32<Self>,
) -> mask8x32<Self>;
fn combine_mask8x32(
self,
a: mask8x32<Self>,
b: mask8x32<Self>,
) -> mask8x64<Self>;
fn split_mask8x32(
self,
a: mask8x32<Self>,
) -> (mask8x16<Self>, mask8x16<Self>);
fn splat_i16x16(self, val: i16) -> i16x16<Self>;
fn not_i16x16(self, a: i16x16<Self>) -> i16x16<Self>;
fn add_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn sub_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn mul_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn and_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn or_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn xor_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn shr_i16x16(self, a: i16x16<Self>, shift: u32) -> i16x16<Self>;
fn simd_eq_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
fn simd_lt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
fn simd_le_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
fn simd_ge_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
fn simd_gt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
fn zip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn zip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn unzip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn unzip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn select_i16x16(
self,
a: mask16x16<Self>,
b: i16x16<Self>,
c: i16x16<Self>,
) -> i16x16<Self>;
fn min_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn max_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
fn combine_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x32<Self>;
fn split_i16x16(self, a: i16x16<Self>) -> (i16x8<Self>, i16x8<Self>);
fn reinterpret_u8_i16x16(self, a: i16x16<Self>) -> u8x32<Self>;
fn reinterpret_u32_i16x16(self, a: i16x16<Self>) -> u32x8<Self>;
fn splat_u16x16(self, val: u16) -> u16x16<Self>;
fn not_u16x16(self, a: u16x16<Self>) -> u16x16<Self>;
fn add_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn sub_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn mul_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn and_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn or_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn xor_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn shr_u16x16(self, a: u16x16<Self>, shift: u32) -> u16x16<Self>;
fn simd_eq_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
fn simd_lt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
fn simd_le_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
fn simd_ge_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
fn simd_gt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
fn zip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn zip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn unzip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn unzip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn select_u16x16(
self,
a: mask16x16<Self>,
b: u16x16<Self>,
c: u16x16<Self>,
) -> u16x16<Self>;
fn min_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn max_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
fn combine_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x32<Self>;
fn split_u16x16(self, a: u16x16<Self>) -> (u16x8<Self>, u16x8<Self>);
fn narrow_u16x16(self, a: u16x16<Self>) -> u8x16<Self>;
fn reinterpret_u8_u16x16(self, a: u16x16<Self>) -> u8x32<Self>;
fn reinterpret_u32_u16x16(self, a: u16x16<Self>) -> u32x8<Self>;
fn splat_mask16x16(self, val: i16) -> mask16x16<Self>;
fn not_mask16x16(self, a: mask16x16<Self>) -> mask16x16<Self>;
fn and_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
) -> mask16x16<Self>;
fn or_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
) -> mask16x16<Self>;
fn xor_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
) -> mask16x16<Self>;
fn select_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
c: mask16x16<Self>,
) -> mask16x16<Self>;
fn simd_eq_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
) -> mask16x16<Self>;
fn combine_mask16x16(
self,
a: mask16x16<Self>,
b: mask16x16<Self>,
) -> mask16x32<Self>;
fn split_mask16x16(
self,
a: mask16x16<Self>,
) -> (mask16x8<Self>, mask16x8<Self>);
fn splat_i32x8(self, val: i32) -> i32x8<Self>;
fn not_i32x8(self, a: i32x8<Self>) -> i32x8<Self>;
fn add_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn sub_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn mul_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn and_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn or_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn xor_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn shr_i32x8(self, a: i32x8<Self>, shift: u32) -> i32x8<Self>;
fn simd_eq_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
fn simd_lt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
fn simd_le_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
fn simd_ge_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
fn simd_gt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
fn zip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn zip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn unzip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn unzip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn select_i32x8(
self,
a: mask32x8<Self>,
b: i32x8<Self>,
c: i32x8<Self>,
) -> i32x8<Self>;
fn min_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn max_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
fn combine_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x16<Self>;
fn split_i32x8(self, a: i32x8<Self>) -> (i32x4<Self>, i32x4<Self>);
fn reinterpret_u8_i32x8(self, a: i32x8<Self>) -> u8x32<Self>;
fn reinterpret_u32_i32x8(self, a: i32x8<Self>) -> u32x8<Self>;
fn cvt_f32_i32x8(self, a: i32x8<Self>) -> f32x8<Self>;
fn splat_u32x8(self, val: u32) -> u32x8<Self>;
fn not_u32x8(self, a: u32x8<Self>) -> u32x8<Self>;
fn add_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn sub_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn mul_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn and_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn or_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn xor_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn shr_u32x8(self, a: u32x8<Self>, shift: u32) -> u32x8<Self>;
fn simd_eq_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
fn simd_lt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
fn simd_le_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
fn simd_ge_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
fn simd_gt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
fn zip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn zip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn unzip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn unzip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn select_u32x8(
self,
a: mask32x8<Self>,
b: u32x8<Self>,
c: u32x8<Self>,
) -> u32x8<Self>;
fn min_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn max_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
fn combine_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x16<Self>;
fn split_u32x8(self, a: u32x8<Self>) -> (u32x4<Self>, u32x4<Self>);
fn reinterpret_u8_u32x8(self, a: u32x8<Self>) -> u8x32<Self>;
fn cvt_f32_u32x8(self, a: u32x8<Self>) -> f32x8<Self>;
fn splat_mask32x8(self, val: i32) -> mask32x8<Self>;
fn not_mask32x8(self, a: mask32x8<Self>) -> mask32x8<Self>;
fn and_mask32x8(
self,
a: mask32x8<Self>,
b: mask32x8<Self>,
) -> mask32x8<Self>;
fn or_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
fn xor_mask32x8(
self,
a: mask32x8<Self>,
b: mask32x8<Self>,
) -> mask32x8<Self>;
fn select_mask32x8(
self,
a: mask32x8<Self>,
b: mask32x8<Self>,
c: mask32x8<Self>,
) -> mask32x8<Self>;
fn simd_eq_mask32x8(
self,
a: mask32x8<Self>,
b: mask32x8<Self>,
) -> mask32x8<Self>;
fn combine_mask32x8(
self,
a: mask32x8<Self>,
b: mask32x8<Self>,
) -> mask32x16<Self>;
fn split_mask32x8(
self,
a: mask32x8<Self>,
) -> (mask32x4<Self>, mask32x4<Self>);
fn splat_f64x4(self, val: f64) -> f64x4<Self>;
fn abs_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn neg_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn sqrt_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn add_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn sub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn mul_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn div_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn copysign_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn simd_eq_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
fn simd_lt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
fn simd_le_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
fn simd_ge_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
fn simd_gt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
fn zip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn zip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn unzip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn unzip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn max_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn max_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn min_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn min_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
fn madd_f64x4(
self,
a: f64x4<Self>,
b: f64x4<Self>,
c: f64x4<Self>,
) -> f64x4<Self>;
fn msub_f64x4(
self,
a: f64x4<Self>,
b: f64x4<Self>,
c: f64x4<Self>,
) -> f64x4<Self>;
fn floor_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn fract_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn trunc_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
fn select_f64x4(
self,
a: mask64x4<Self>,
b: f64x4<Self>,
c: f64x4<Self>,
) -> f64x4<Self>;
fn combine_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x8<Self>;
fn split_f64x4(self, a: f64x4<Self>) -> (f64x2<Self>, f64x2<Self>);
fn reinterpret_f32_f64x4(self, a: f64x4<Self>) -> f32x8<Self>;
fn splat_mask64x4(self, val: i64) -> mask64x4<Self>;
fn not_mask64x4(self, a: mask64x4<Self>) -> mask64x4<Self>;
fn and_mask64x4(
self,
a: mask64x4<Self>,
b: mask64x4<Self>,
) -> mask64x4<Self>;
fn or_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
fn xor_mask64x4(
self,
a: mask64x4<Self>,
b: mask64x4<Self>,
) -> mask64x4<Self>;
fn select_mask64x4(
self,
a: mask64x4<Self>,
b: mask64x4<Self>,
c: mask64x4<Self>,
) -> mask64x4<Self>;
fn simd_eq_mask64x4(
self,
a: mask64x4<Self>,
b: mask64x4<Self>,
) -> mask64x4<Self>;
fn combine_mask64x4(
self,
a: mask64x4<Self>,
b: mask64x4<Self>,
) -> mask64x8<Self>;
fn split_mask64x4(
self,
a: mask64x4<Self>,
) -> (mask64x2<Self>, mask64x2<Self>);
fn splat_f32x16(self, val: f32) -> f32x16<Self>;
fn abs_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn neg_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn sqrt_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn add_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn sub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn mul_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn div_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn copysign_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn simd_eq_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
fn simd_lt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
fn simd_le_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
fn simd_ge_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
fn simd_gt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
fn zip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn zip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn unzip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn unzip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn max_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn max_precise_f32x16(
self,
a: f32x16<Self>,
b: f32x16<Self>,
) -> f32x16<Self>;
fn min_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
fn min_precise_f32x16(
self,
a: f32x16<Self>,
b: f32x16<Self>,
) -> f32x16<Self>;
fn madd_f32x16(
self,
a: f32x16<Self>,
b: f32x16<Self>,
c: f32x16<Self>,
) -> f32x16<Self>;
fn msub_f32x16(
self,
a: f32x16<Self>,
b: f32x16<Self>,
c: f32x16<Self>,
) -> f32x16<Self>;
fn floor_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn fract_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn trunc_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
fn select_f32x16(
self,
a: mask32x16<Self>,
b: f32x16<Self>,
c: f32x16<Self>,
) -> f32x16<Self>;
fn split_f32x16(self, a: f32x16<Self>) -> (f32x8<Self>, f32x8<Self>);
fn reinterpret_f64_f32x16(self, a: f32x16<Self>) -> f64x8<Self>;
fn reinterpret_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
fn load_interleaved_128_f32x16(self, src: &[f32; 16]) -> f32x16<Self>;
fn store_interleaved_128_f32x16(self, a: f32x16<Self>, dest: &mut [f32; 16]);
fn reinterpret_u8_f32x16(self, a: f32x16<Self>) -> u8x64<Self>;
fn reinterpret_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
fn cvt_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
fn cvt_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
fn splat_i8x64(self, val: i8) -> i8x64<Self>;
fn not_i8x64(self, a: i8x64<Self>) -> i8x64<Self>;
fn add_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn sub_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn mul_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn and_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn or_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn xor_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn shr_i8x64(self, a: i8x64<Self>, shift: u32) -> i8x64<Self>;
fn simd_eq_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
fn simd_lt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
fn simd_le_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
fn simd_ge_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
fn simd_gt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
fn zip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn zip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn unzip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn unzip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn select_i8x64(
self,
a: mask8x64<Self>,
b: i8x64<Self>,
c: i8x64<Self>,
) -> i8x64<Self>;
fn min_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn max_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
fn split_i8x64(self, a: i8x64<Self>) -> (i8x32<Self>, i8x32<Self>);
fn reinterpret_u8_i8x64(self, a: i8x64<Self>) -> u8x64<Self>;
fn reinterpret_u32_i8x64(self, a: i8x64<Self>) -> u32x16<Self>;
fn splat_u8x64(self, val: u8) -> u8x64<Self>;
fn not_u8x64(self, a: u8x64<Self>) -> u8x64<Self>;
fn add_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn sub_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn mul_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn and_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn or_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn xor_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn shr_u8x64(self, a: u8x64<Self>, shift: u32) -> u8x64<Self>;
fn simd_eq_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
fn simd_lt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
fn simd_le_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
fn simd_ge_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
fn simd_gt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
fn zip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn zip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn unzip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn unzip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn select_u8x64(
self,
a: mask8x64<Self>,
b: u8x64<Self>,
c: u8x64<Self>,
) -> u8x64<Self>;
fn min_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn max_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
fn split_u8x64(self, a: u8x64<Self>) -> (u8x32<Self>, u8x32<Self>);
fn load_interleaved_128_u8x64(self, src: &[u8; 64]) -> u8x64<Self>;
fn store_interleaved_128_u8x64(self, a: u8x64<Self>, dest: &mut [u8; 64]);
fn reinterpret_u32_u8x64(self, a: u8x64<Self>) -> u32x16<Self>;
fn splat_mask8x64(self, val: i8) -> mask8x64<Self>;
fn not_mask8x64(self, a: mask8x64<Self>) -> mask8x64<Self>;
fn and_mask8x64(
self,
a: mask8x64<Self>,
b: mask8x64<Self>,
) -> mask8x64<Self>;
fn or_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
fn xor_mask8x64(
self,
a: mask8x64<Self>,
b: mask8x64<Self>,
) -> mask8x64<Self>;
fn select_mask8x64(
self,
a: mask8x64<Self>,
b: mask8x64<Self>,
c: mask8x64<Self>,
) -> mask8x64<Self>;
fn simd_eq_mask8x64(
self,
a: mask8x64<Self>,
b: mask8x64<Self>,
) -> mask8x64<Self>;
fn split_mask8x64(
self,
a: mask8x64<Self>,
) -> (mask8x32<Self>, mask8x32<Self>);
fn splat_i16x32(self, val: i16) -> i16x32<Self>;
fn not_i16x32(self, a: i16x32<Self>) -> i16x32<Self>;
fn add_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn sub_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn mul_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn and_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn or_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn xor_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn shr_i16x32(self, a: i16x32<Self>, shift: u32) -> i16x32<Self>;
fn simd_eq_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
fn simd_lt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
fn simd_le_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
fn simd_ge_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
fn simd_gt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
fn zip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn zip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn unzip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn unzip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn select_i16x32(
self,
a: mask16x32<Self>,
b: i16x32<Self>,
c: i16x32<Self>,
) -> i16x32<Self>;
fn min_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn max_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
fn split_i16x32(self, a: i16x32<Self>) -> (i16x16<Self>, i16x16<Self>);
fn reinterpret_u8_i16x32(self, a: i16x32<Self>) -> u8x64<Self>;
fn reinterpret_u32_i16x32(self, a: i16x32<Self>) -> u32x16<Self>;
fn splat_u16x32(self, val: u16) -> u16x32<Self>;
fn not_u16x32(self, a: u16x32<Self>) -> u16x32<Self>;
fn add_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn sub_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn mul_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn and_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn or_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn xor_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn shr_u16x32(self, a: u16x32<Self>, shift: u32) -> u16x32<Self>;
fn simd_eq_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
fn simd_lt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
fn simd_le_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
fn simd_ge_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
fn simd_gt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
fn zip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn zip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn unzip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn unzip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn select_u16x32(
self,
a: mask16x32<Self>,
b: u16x32<Self>,
c: u16x32<Self>,
) -> u16x32<Self>;
fn min_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn max_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
fn split_u16x32(self, a: u16x32<Self>) -> (u16x16<Self>, u16x16<Self>);
fn load_interleaved_128_u16x32(self, src: &[u16; 32]) -> u16x32<Self>;
fn store_interleaved_128_u16x32(self, a: u16x32<Self>, dest: &mut [u16; 32]);
fn narrow_u16x32(self, a: u16x32<Self>) -> u8x32<Self>;
fn reinterpret_u8_u16x32(self, a: u16x32<Self>) -> u8x64<Self>;
fn reinterpret_u32_u16x32(self, a: u16x32<Self>) -> u32x16<Self>;
fn splat_mask16x32(self, val: i16) -> mask16x32<Self>;
fn not_mask16x32(self, a: mask16x32<Self>) -> mask16x32<Self>;
fn and_mask16x32(
self,
a: mask16x32<Self>,
b: mask16x32<Self>,
) -> mask16x32<Self>;
fn or_mask16x32(
self,
a: mask16x32<Self>,
b: mask16x32<Self>,
) -> mask16x32<Self>;
fn xor_mask16x32(
self,
a: mask16x32<Self>,
b: mask16x32<Self>,
) -> mask16x32<Self>;
fn select_mask16x32(
self,
a: mask16x32<Self>,
b: mask16x32<Self>,
c: mask16x32<Self>,
) -> mask16x32<Self>;
fn simd_eq_mask16x32(
self,
a: mask16x32<Self>,
b: mask16x32<Self>,
) -> mask16x32<Self>;
fn split_mask16x32(
self,
a: mask16x32<Self>,
) -> (mask16x16<Self>, mask16x16<Self>);
fn splat_i32x16(self, val: i32) -> i32x16<Self>;
fn not_i32x16(self, a: i32x16<Self>) -> i32x16<Self>;
fn add_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn sub_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn mul_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn and_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn or_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn xor_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn shr_i32x16(self, a: i32x16<Self>, shift: u32) -> i32x16<Self>;
fn simd_eq_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
fn simd_lt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
fn simd_le_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
fn simd_ge_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
fn simd_gt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
fn zip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn zip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn unzip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn unzip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn select_i32x16(
self,
a: mask32x16<Self>,
b: i32x16<Self>,
c: i32x16<Self>,
) -> i32x16<Self>;
fn min_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn max_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
fn split_i32x16(self, a: i32x16<Self>) -> (i32x8<Self>, i32x8<Self>);
fn reinterpret_u8_i32x16(self, a: i32x16<Self>) -> u8x64<Self>;
fn reinterpret_u32_i32x16(self, a: i32x16<Self>) -> u32x16<Self>;
fn cvt_f32_i32x16(self, a: i32x16<Self>) -> f32x16<Self>;
fn splat_u32x16(self, val: u32) -> u32x16<Self>;
fn not_u32x16(self, a: u32x16<Self>) -> u32x16<Self>;
fn add_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn sub_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn mul_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn and_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn or_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn xor_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn shr_u32x16(self, a: u32x16<Self>, shift: u32) -> u32x16<Self>;
fn simd_eq_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
fn simd_lt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
fn simd_le_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
fn simd_ge_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
fn simd_gt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
fn zip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn zip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn unzip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn unzip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn select_u32x16(
self,
a: mask32x16<Self>,
b: u32x16<Self>,
c: u32x16<Self>,
) -> u32x16<Self>;
fn min_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn max_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
fn split_u32x16(self, a: u32x16<Self>) -> (u32x8<Self>, u32x8<Self>);
fn load_interleaved_128_u32x16(self, src: &[u32; 16]) -> u32x16<Self>;
fn store_interleaved_128_u32x16(self, a: u32x16<Self>, dest: &mut [u32; 16]);
fn reinterpret_u8_u32x16(self, a: u32x16<Self>) -> u8x64<Self>;
fn cvt_f32_u32x16(self, a: u32x16<Self>) -> f32x16<Self>;
fn splat_mask32x16(self, val: i32) -> mask32x16<Self>;
fn not_mask32x16(self, a: mask32x16<Self>) -> mask32x16<Self>;
fn and_mask32x16(
self,
a: mask32x16<Self>,
b: mask32x16<Self>,
) -> mask32x16<Self>;
fn or_mask32x16(
self,
a: mask32x16<Self>,
b: mask32x16<Self>,
) -> mask32x16<Self>;
fn xor_mask32x16(
self,
a: mask32x16<Self>,
b: mask32x16<Self>,
) -> mask32x16<Self>;
fn select_mask32x16(
self,
a: mask32x16<Self>,
b: mask32x16<Self>,
c: mask32x16<Self>,
) -> mask32x16<Self>;
fn simd_eq_mask32x16(
self,
a: mask32x16<Self>,
b: mask32x16<Self>,
) -> mask32x16<Self>;
fn split_mask32x16(
self,
a: mask32x16<Self>,
) -> (mask32x8<Self>, mask32x8<Self>);
fn splat_f64x8(self, val: f64) -> f64x8<Self>;
fn abs_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn neg_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn sqrt_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn add_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn sub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn mul_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn div_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn copysign_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn simd_eq_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
fn simd_lt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
fn simd_le_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
fn simd_ge_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
fn simd_gt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
fn zip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn zip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn unzip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn unzip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn max_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn max_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn min_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn min_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
fn madd_f64x8(
self,
a: f64x8<Self>,
b: f64x8<Self>,
c: f64x8<Self>,
) -> f64x8<Self>;
fn msub_f64x8(
self,
a: f64x8<Self>,
b: f64x8<Self>,
c: f64x8<Self>,
) -> f64x8<Self>;
fn floor_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn fract_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn trunc_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
fn select_f64x8(
self,
a: mask64x8<Self>,
b: f64x8<Self>,
c: f64x8<Self>,
) -> f64x8<Self>;
fn split_f64x8(self, a: f64x8<Self>) -> (f64x4<Self>, f64x4<Self>);
fn reinterpret_f32_f64x8(self, a: f64x8<Self>) -> f32x16<Self>;
fn splat_mask64x8(self, val: i64) -> mask64x8<Self>;
fn not_mask64x8(self, a: mask64x8<Self>) -> mask64x8<Self>;
fn and_mask64x8(
self,
a: mask64x8<Self>,
b: mask64x8<Self>,
) -> mask64x8<Self>;
fn or_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
fn xor_mask64x8(
self,
a: mask64x8<Self>,
b: mask64x8<Self>,
) -> mask64x8<Self>;
fn select_mask64x8(
self,
a: mask64x8<Self>,
b: mask64x8<Self>,
c: mask64x8<Self>,
) -> mask64x8<Self>;
fn simd_eq_mask64x8(
self,
a: mask64x8<Self>,
b: mask64x8<Self>,
) -> mask64x8<Self>;
fn split_mask64x8(
self,
a: mask64x8<Self>,
) -> (mask64x4<Self>, mask64x4<Self>);
}
Expand description
TODO: docstring
Required Associated Types§
type f32s: SimdFloat<f32, Self, Block = f32x4<Self>> + SimdCvtFloat<Self::u32s> + SimdCvtFloat<Self::i32s>
type u8s: SimdInt<u8, Self, Block = u8x16<Self>>
type i8s: SimdInt<i8, Self, Block = i8x16<Self>>
type u16s: SimdInt<u16, Self, Block = u16x8<Self>>
type i16s: SimdInt<i16, Self, Block = i16x8<Self>>
type u32s: SimdInt<u32, Self, Block = u32x4<Self>> + SimdCvtTruncate<Self::f32s>
type i32s: SimdInt<i32, Self, Block = i32x4<Self>> + SimdCvtTruncate<Self::f32s>
type mask8s: SimdMask<i8, Self, Block = mask8x16<Self>>
type mask16s: SimdMask<i16, Self, Block = mask16x8<Self>>
type mask32s: SimdMask<i32, Self, Block = mask32x4<Self>>
Required Methods§
fn level(self) -> Level
Sourcefn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R
fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R
Call function with CPU features enabled.
For performance, the provided function should be #[inline(always)]
.
fn splat_f32x4(self, val: f32) -> f32x4<Self>
fn abs_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn neg_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn sqrt_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn add_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn sub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn mul_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn div_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn copysign_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn simd_eq_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_lt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_le_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_ge_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_gt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn zip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn zip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn unzip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn unzip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn max_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn max_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn min_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn min_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn madd_f32x4( self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn msub_f32x4( self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn floor_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn fract_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn trunc_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn select_f32x4( self, a: mask32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn combine_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x8<Self>
fn reinterpret_f64_f32x4(self, a: f32x4<Self>) -> f64x2<Self>
fn reinterpret_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>
fn reinterpret_u8_f32x4(self, a: f32x4<Self>) -> u8x16<Self>
fn reinterpret_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>
fn cvt_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>
fn cvt_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>
fn splat_i8x16(self, val: i8) -> i8x16<Self>
fn not_i8x16(self, a: i8x16<Self>) -> i8x16<Self>
fn add_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn sub_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn mul_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn and_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn or_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn xor_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn shr_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>
fn simd_eq_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_lt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_le_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_ge_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_gt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn zip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn zip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn unzip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn unzip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn select_i8x16( self, a: mask8x16<Self>, b: i8x16<Self>, c: i8x16<Self>, ) -> i8x16<Self>
fn min_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn max_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn combine_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x32<Self>
fn reinterpret_u8_i8x16(self, a: i8x16<Self>) -> u8x16<Self>
fn reinterpret_u32_i8x16(self, a: i8x16<Self>) -> u32x4<Self>
fn splat_u8x16(self, val: u8) -> u8x16<Self>
fn not_u8x16(self, a: u8x16<Self>) -> u8x16<Self>
fn add_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn sub_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn mul_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn and_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn or_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn xor_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn shr_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>
fn simd_eq_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_lt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_le_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_ge_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_gt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn zip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn zip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn unzip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn unzip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn select_u8x16( self, a: mask8x16<Self>, b: u8x16<Self>, c: u8x16<Self>, ) -> u8x16<Self>
fn min_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn max_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn combine_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x32<Self>
fn widen_u8x16(self, a: u8x16<Self>) -> u16x16<Self>
fn reinterpret_u32_u8x16(self, a: u8x16<Self>) -> u32x4<Self>
fn splat_mask8x16(self, val: i8) -> mask8x16<Self>
fn not_mask8x16(self, a: mask8x16<Self>) -> mask8x16<Self>
fn and_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn or_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn xor_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn select_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, c: mask8x16<Self>, ) -> mask8x16<Self>
fn simd_eq_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, ) -> mask8x16<Self>
fn combine_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, ) -> mask8x32<Self>
fn splat_i16x8(self, val: i16) -> i16x8<Self>
fn not_i16x8(self, a: i16x8<Self>) -> i16x8<Self>
fn add_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn sub_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn mul_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn and_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn or_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn xor_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn shr_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>
fn simd_eq_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_lt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_le_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_ge_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_gt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn zip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn zip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn unzip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn unzip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn select_i16x8( self, a: mask16x8<Self>, b: i16x8<Self>, c: i16x8<Self>, ) -> i16x8<Self>
fn min_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn max_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn combine_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x16<Self>
fn reinterpret_u8_i16x8(self, a: i16x8<Self>) -> u8x16<Self>
fn reinterpret_u32_i16x8(self, a: i16x8<Self>) -> u32x4<Self>
fn splat_u16x8(self, val: u16) -> u16x8<Self>
fn not_u16x8(self, a: u16x8<Self>) -> u16x8<Self>
fn add_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn sub_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn mul_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn and_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn or_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn xor_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn shr_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>
fn simd_eq_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_lt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_le_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_ge_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_gt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn zip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn zip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn unzip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn unzip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn select_u16x8( self, a: mask16x8<Self>, b: u16x8<Self>, c: u16x8<Self>, ) -> u16x8<Self>
fn min_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn max_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn combine_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x16<Self>
fn reinterpret_u8_u16x8(self, a: u16x8<Self>) -> u8x16<Self>
fn reinterpret_u32_u16x8(self, a: u16x8<Self>) -> u32x4<Self>
fn splat_mask16x8(self, val: i16) -> mask16x8<Self>
fn not_mask16x8(self, a: mask16x8<Self>) -> mask16x8<Self>
fn and_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn or_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn xor_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn select_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, c: mask16x8<Self>, ) -> mask16x8<Self>
fn simd_eq_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, ) -> mask16x8<Self>
fn combine_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, ) -> mask16x16<Self>
fn splat_i32x4(self, val: i32) -> i32x4<Self>
fn not_i32x4(self, a: i32x4<Self>) -> i32x4<Self>
fn add_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn sub_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn mul_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn and_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn or_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn xor_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn shr_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>
fn simd_eq_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_lt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_le_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_ge_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_gt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn zip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn zip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn unzip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn unzip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn select_i32x4( self, a: mask32x4<Self>, b: i32x4<Self>, c: i32x4<Self>, ) -> i32x4<Self>
fn min_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn max_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn combine_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x8<Self>
fn reinterpret_u8_i32x4(self, a: i32x4<Self>) -> u8x16<Self>
fn reinterpret_u32_i32x4(self, a: i32x4<Self>) -> u32x4<Self>
fn cvt_f32_i32x4(self, a: i32x4<Self>) -> f32x4<Self>
fn splat_u32x4(self, val: u32) -> u32x4<Self>
fn not_u32x4(self, a: u32x4<Self>) -> u32x4<Self>
fn add_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn sub_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn mul_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn and_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn or_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn xor_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn shr_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>
fn simd_eq_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_lt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_le_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_ge_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_gt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn zip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn zip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn unzip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn unzip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn select_u32x4( self, a: mask32x4<Self>, b: u32x4<Self>, c: u32x4<Self>, ) -> u32x4<Self>
fn min_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn max_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn combine_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x8<Self>
fn reinterpret_u8_u32x4(self, a: u32x4<Self>) -> u8x16<Self>
fn cvt_f32_u32x4(self, a: u32x4<Self>) -> f32x4<Self>
fn splat_mask32x4(self, val: i32) -> mask32x4<Self>
fn not_mask32x4(self, a: mask32x4<Self>) -> mask32x4<Self>
fn and_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn or_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn xor_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn select_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, c: mask32x4<Self>, ) -> mask32x4<Self>
fn simd_eq_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, ) -> mask32x4<Self>
fn combine_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, ) -> mask32x8<Self>
fn splat_f64x2(self, val: f64) -> f64x2<Self>
fn abs_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn neg_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn sqrt_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn add_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn sub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn mul_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn div_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn copysign_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn simd_eq_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_lt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_le_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_ge_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_gt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn zip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn zip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn unzip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn unzip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn max_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn max_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn min_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn min_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn madd_f64x2( self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn msub_f64x2( self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn floor_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn fract_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn trunc_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn select_f64x2( self, a: mask64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn combine_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x4<Self>
fn reinterpret_f32_f64x2(self, a: f64x2<Self>) -> f32x4<Self>
fn splat_mask64x2(self, val: i64) -> mask64x2<Self>
fn not_mask64x2(self, a: mask64x2<Self>) -> mask64x2<Self>
fn and_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn or_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn xor_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn select_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, c: mask64x2<Self>, ) -> mask64x2<Self>
fn simd_eq_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, ) -> mask64x2<Self>
fn combine_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, ) -> mask64x4<Self>
fn splat_f32x8(self, val: f32) -> f32x8<Self>
fn abs_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn neg_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn sqrt_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn add_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn sub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn mul_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn div_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn copysign_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn simd_eq_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_lt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_le_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_ge_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_gt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn zip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn zip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn unzip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn unzip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn max_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn max_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn min_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn min_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn madd_f32x8( self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn msub_f32x8( self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn floor_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn fract_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn trunc_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn select_f32x8( self, a: mask32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn combine_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x16<Self>
fn split_f32x8(self, a: f32x8<Self>) -> (f32x4<Self>, f32x4<Self>)
fn reinterpret_f64_f32x8(self, a: f32x8<Self>) -> f64x4<Self>
fn reinterpret_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>
fn reinterpret_u8_f32x8(self, a: f32x8<Self>) -> u8x32<Self>
fn reinterpret_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>
fn cvt_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>
fn cvt_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>
fn splat_i8x32(self, val: i8) -> i8x32<Self>
fn not_i8x32(self, a: i8x32<Self>) -> i8x32<Self>
fn add_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn sub_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn mul_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn and_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn or_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn xor_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn shr_i8x32(self, a: i8x32<Self>, shift: u32) -> i8x32<Self>
fn simd_eq_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_lt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_le_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_ge_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_gt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn zip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn zip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn unzip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn unzip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn select_i8x32( self, a: mask8x32<Self>, b: i8x32<Self>, c: i8x32<Self>, ) -> i8x32<Self>
fn min_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn max_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn combine_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x64<Self>
fn split_i8x32(self, a: i8x32<Self>) -> (i8x16<Self>, i8x16<Self>)
fn reinterpret_u8_i8x32(self, a: i8x32<Self>) -> u8x32<Self>
fn reinterpret_u32_i8x32(self, a: i8x32<Self>) -> u32x8<Self>
fn splat_u8x32(self, val: u8) -> u8x32<Self>
fn not_u8x32(self, a: u8x32<Self>) -> u8x32<Self>
fn add_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn sub_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn mul_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn and_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn or_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn xor_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn shr_u8x32(self, a: u8x32<Self>, shift: u32) -> u8x32<Self>
fn simd_eq_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_lt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_le_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_ge_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_gt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn zip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn zip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn unzip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn unzip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn select_u8x32( self, a: mask8x32<Self>, b: u8x32<Self>, c: u8x32<Self>, ) -> u8x32<Self>
fn min_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn max_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn combine_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x64<Self>
fn split_u8x32(self, a: u8x32<Self>) -> (u8x16<Self>, u8x16<Self>)
fn widen_u8x32(self, a: u8x32<Self>) -> u16x32<Self>
fn reinterpret_u32_u8x32(self, a: u8x32<Self>) -> u32x8<Self>
fn splat_mask8x32(self, val: i8) -> mask8x32<Self>
fn not_mask8x32(self, a: mask8x32<Self>) -> mask8x32<Self>
fn and_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn or_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn xor_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn select_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, c: mask8x32<Self>, ) -> mask8x32<Self>
fn simd_eq_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, ) -> mask8x32<Self>
fn combine_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, ) -> mask8x64<Self>
fn split_mask8x32(self, a: mask8x32<Self>) -> (mask8x16<Self>, mask8x16<Self>)
fn splat_i16x16(self, val: i16) -> i16x16<Self>
fn not_i16x16(self, a: i16x16<Self>) -> i16x16<Self>
fn add_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn sub_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn mul_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn and_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn or_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn xor_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn shr_i16x16(self, a: i16x16<Self>, shift: u32) -> i16x16<Self>
fn simd_eq_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_lt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_le_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_ge_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_gt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn zip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn zip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn unzip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn unzip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn select_i16x16( self, a: mask16x16<Self>, b: i16x16<Self>, c: i16x16<Self>, ) -> i16x16<Self>
fn min_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn max_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn combine_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x32<Self>
fn split_i16x16(self, a: i16x16<Self>) -> (i16x8<Self>, i16x8<Self>)
fn reinterpret_u8_i16x16(self, a: i16x16<Self>) -> u8x32<Self>
fn reinterpret_u32_i16x16(self, a: i16x16<Self>) -> u32x8<Self>
fn splat_u16x16(self, val: u16) -> u16x16<Self>
fn not_u16x16(self, a: u16x16<Self>) -> u16x16<Self>
fn add_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn sub_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn mul_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn and_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn or_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn xor_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn shr_u16x16(self, a: u16x16<Self>, shift: u32) -> u16x16<Self>
fn simd_eq_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_lt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_le_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_ge_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_gt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn zip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn zip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn unzip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn unzip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn select_u16x16( self, a: mask16x16<Self>, b: u16x16<Self>, c: u16x16<Self>, ) -> u16x16<Self>
fn min_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn max_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn combine_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x32<Self>
fn split_u16x16(self, a: u16x16<Self>) -> (u16x8<Self>, u16x8<Self>)
fn narrow_u16x16(self, a: u16x16<Self>) -> u8x16<Self>
fn reinterpret_u8_u16x16(self, a: u16x16<Self>) -> u8x32<Self>
fn reinterpret_u32_u16x16(self, a: u16x16<Self>) -> u32x8<Self>
fn splat_mask16x16(self, val: i16) -> mask16x16<Self>
fn not_mask16x16(self, a: mask16x16<Self>) -> mask16x16<Self>
fn and_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn or_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>
fn xor_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn select_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, c: mask16x16<Self>, ) -> mask16x16<Self>
fn simd_eq_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn combine_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x32<Self>
fn split_mask16x16(self, a: mask16x16<Self>) -> (mask16x8<Self>, mask16x8<Self>)
fn splat_i32x8(self, val: i32) -> i32x8<Self>
fn not_i32x8(self, a: i32x8<Self>) -> i32x8<Self>
fn add_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn sub_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn mul_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn and_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn or_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn xor_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn shr_i32x8(self, a: i32x8<Self>, shift: u32) -> i32x8<Self>
fn simd_eq_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_lt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_le_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_ge_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_gt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn zip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn zip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn unzip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn unzip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn select_i32x8( self, a: mask32x8<Self>, b: i32x8<Self>, c: i32x8<Self>, ) -> i32x8<Self>
fn min_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn max_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn combine_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x16<Self>
fn split_i32x8(self, a: i32x8<Self>) -> (i32x4<Self>, i32x4<Self>)
fn reinterpret_u8_i32x8(self, a: i32x8<Self>) -> u8x32<Self>
fn reinterpret_u32_i32x8(self, a: i32x8<Self>) -> u32x8<Self>
fn cvt_f32_i32x8(self, a: i32x8<Self>) -> f32x8<Self>
fn splat_u32x8(self, val: u32) -> u32x8<Self>
fn not_u32x8(self, a: u32x8<Self>) -> u32x8<Self>
fn add_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn sub_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn mul_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn and_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn or_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn xor_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn shr_u32x8(self, a: u32x8<Self>, shift: u32) -> u32x8<Self>
fn simd_eq_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_lt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_le_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_ge_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_gt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn zip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn zip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn unzip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn unzip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn select_u32x8( self, a: mask32x8<Self>, b: u32x8<Self>, c: u32x8<Self>, ) -> u32x8<Self>
fn min_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn max_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn combine_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x16<Self>
fn split_u32x8(self, a: u32x8<Self>) -> (u32x4<Self>, u32x4<Self>)
fn reinterpret_u8_u32x8(self, a: u32x8<Self>) -> u8x32<Self>
fn cvt_f32_u32x8(self, a: u32x8<Self>) -> f32x8<Self>
fn splat_mask32x8(self, val: i32) -> mask32x8<Self>
fn not_mask32x8(self, a: mask32x8<Self>) -> mask32x8<Self>
fn and_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn or_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn xor_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn select_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, c: mask32x8<Self>, ) -> mask32x8<Self>
fn simd_eq_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, ) -> mask32x8<Self>
fn combine_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, ) -> mask32x16<Self>
fn split_mask32x8(self, a: mask32x8<Self>) -> (mask32x4<Self>, mask32x4<Self>)
fn splat_f64x4(self, val: f64) -> f64x4<Self>
fn abs_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn neg_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn sqrt_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn add_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn sub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn mul_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn div_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn copysign_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn simd_eq_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_lt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_le_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_ge_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_gt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn zip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn zip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn unzip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn unzip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn max_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn max_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn min_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn min_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn madd_f64x4( self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn msub_f64x4( self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn floor_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn fract_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn trunc_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn select_f64x4( self, a: mask64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn combine_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x8<Self>
fn split_f64x4(self, a: f64x4<Self>) -> (f64x2<Self>, f64x2<Self>)
fn reinterpret_f32_f64x4(self, a: f64x4<Self>) -> f32x8<Self>
fn splat_mask64x4(self, val: i64) -> mask64x4<Self>
fn not_mask64x4(self, a: mask64x4<Self>) -> mask64x4<Self>
fn and_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn or_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn xor_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn select_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, c: mask64x4<Self>, ) -> mask64x4<Self>
fn simd_eq_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, ) -> mask64x4<Self>
fn combine_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, ) -> mask64x8<Self>
fn split_mask64x4(self, a: mask64x4<Self>) -> (mask64x2<Self>, mask64x2<Self>)
fn splat_f32x16(self, val: f32) -> f32x16<Self>
fn abs_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn neg_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn sqrt_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn add_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn sub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn mul_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn div_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn copysign_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn simd_eq_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_lt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_le_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_ge_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_gt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn zip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn zip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn unzip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn unzip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn max_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn max_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn min_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn min_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn madd_f32x16( self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn msub_f32x16( self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn floor_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn fract_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn trunc_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn select_f32x16( self, a: mask32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn split_f32x16(self, a: f32x16<Self>) -> (f32x8<Self>, f32x8<Self>)
fn reinterpret_f64_f32x16(self, a: f32x16<Self>) -> f64x8<Self>
fn reinterpret_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>
fn load_interleaved_128_f32x16(self, src: &[f32; 16]) -> f32x16<Self>
fn store_interleaved_128_f32x16(self, a: f32x16<Self>, dest: &mut [f32; 16])
fn reinterpret_u8_f32x16(self, a: f32x16<Self>) -> u8x64<Self>
fn reinterpret_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>
fn cvt_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>
fn cvt_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>
fn splat_i8x64(self, val: i8) -> i8x64<Self>
fn not_i8x64(self, a: i8x64<Self>) -> i8x64<Self>
fn add_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn sub_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn mul_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn and_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn or_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn xor_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn shr_i8x64(self, a: i8x64<Self>, shift: u32) -> i8x64<Self>
fn simd_eq_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_lt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_le_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_ge_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_gt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn zip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn zip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn unzip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn unzip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn select_i8x64( self, a: mask8x64<Self>, b: i8x64<Self>, c: i8x64<Self>, ) -> i8x64<Self>
fn min_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn max_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn split_i8x64(self, a: i8x64<Self>) -> (i8x32<Self>, i8x32<Self>)
fn reinterpret_u8_i8x64(self, a: i8x64<Self>) -> u8x64<Self>
fn reinterpret_u32_i8x64(self, a: i8x64<Self>) -> u32x16<Self>
fn splat_u8x64(self, val: u8) -> u8x64<Self>
fn not_u8x64(self, a: u8x64<Self>) -> u8x64<Self>
fn add_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn sub_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn mul_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn and_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn or_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn xor_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn shr_u8x64(self, a: u8x64<Self>, shift: u32) -> u8x64<Self>
fn simd_eq_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_lt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_le_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_ge_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_gt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn zip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn zip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn unzip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn unzip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn select_u8x64( self, a: mask8x64<Self>, b: u8x64<Self>, c: u8x64<Self>, ) -> u8x64<Self>
fn min_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn max_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn split_u8x64(self, a: u8x64<Self>) -> (u8x32<Self>, u8x32<Self>)
fn load_interleaved_128_u8x64(self, src: &[u8; 64]) -> u8x64<Self>
fn store_interleaved_128_u8x64(self, a: u8x64<Self>, dest: &mut [u8; 64])
fn reinterpret_u32_u8x64(self, a: u8x64<Self>) -> u32x16<Self>
fn splat_mask8x64(self, val: i8) -> mask8x64<Self>
fn not_mask8x64(self, a: mask8x64<Self>) -> mask8x64<Self>
fn and_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn or_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn xor_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn select_mask8x64( self, a: mask8x64<Self>, b: mask8x64<Self>, c: mask8x64<Self>, ) -> mask8x64<Self>
fn simd_eq_mask8x64( self, a: mask8x64<Self>, b: mask8x64<Self>, ) -> mask8x64<Self>
fn split_mask8x64(self, a: mask8x64<Self>) -> (mask8x32<Self>, mask8x32<Self>)
fn splat_i16x32(self, val: i16) -> i16x32<Self>
fn not_i16x32(self, a: i16x32<Self>) -> i16x32<Self>
fn add_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn sub_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn mul_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn and_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn or_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn xor_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn shr_i16x32(self, a: i16x32<Self>, shift: u32) -> i16x32<Self>
fn simd_eq_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_lt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_le_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_ge_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_gt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn zip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn zip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn unzip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn unzip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn select_i16x32( self, a: mask16x32<Self>, b: i16x32<Self>, c: i16x32<Self>, ) -> i16x32<Self>
fn min_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn max_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn split_i16x32(self, a: i16x32<Self>) -> (i16x16<Self>, i16x16<Self>)
fn reinterpret_u8_i16x32(self, a: i16x32<Self>) -> u8x64<Self>
fn reinterpret_u32_i16x32(self, a: i16x32<Self>) -> u32x16<Self>
fn splat_u16x32(self, val: u16) -> u16x32<Self>
fn not_u16x32(self, a: u16x32<Self>) -> u16x32<Self>
fn add_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn sub_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn mul_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn and_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn or_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn xor_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn shr_u16x32(self, a: u16x32<Self>, shift: u32) -> u16x32<Self>
fn simd_eq_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_lt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_le_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_ge_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_gt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn zip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn zip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn unzip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn unzip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn select_u16x32( self, a: mask16x32<Self>, b: u16x32<Self>, c: u16x32<Self>, ) -> u16x32<Self>
fn min_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn max_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn split_u16x32(self, a: u16x32<Self>) -> (u16x16<Self>, u16x16<Self>)
fn load_interleaved_128_u16x32(self, src: &[u16; 32]) -> u16x32<Self>
fn store_interleaved_128_u16x32(self, a: u16x32<Self>, dest: &mut [u16; 32])
fn narrow_u16x32(self, a: u16x32<Self>) -> u8x32<Self>
fn reinterpret_u8_u16x32(self, a: u16x32<Self>) -> u8x64<Self>
fn reinterpret_u32_u16x32(self, a: u16x32<Self>) -> u32x16<Self>
fn splat_mask16x32(self, val: i16) -> mask16x32<Self>
fn not_mask16x32(self, a: mask16x32<Self>) -> mask16x32<Self>
fn and_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn or_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>
fn xor_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn select_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, c: mask16x32<Self>, ) -> mask16x32<Self>
fn simd_eq_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn split_mask16x32( self, a: mask16x32<Self>, ) -> (mask16x16<Self>, mask16x16<Self>)
fn splat_i32x16(self, val: i32) -> i32x16<Self>
fn not_i32x16(self, a: i32x16<Self>) -> i32x16<Self>
fn add_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn sub_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn mul_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn and_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn or_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn xor_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn shr_i32x16(self, a: i32x16<Self>, shift: u32) -> i32x16<Self>
fn simd_eq_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_lt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_le_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_ge_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_gt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn zip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn zip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn unzip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn unzip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn select_i32x16( self, a: mask32x16<Self>, b: i32x16<Self>, c: i32x16<Self>, ) -> i32x16<Self>
fn min_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn max_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn split_i32x16(self, a: i32x16<Self>) -> (i32x8<Self>, i32x8<Self>)
fn reinterpret_u8_i32x16(self, a: i32x16<Self>) -> u8x64<Self>
fn reinterpret_u32_i32x16(self, a: i32x16<Self>) -> u32x16<Self>
fn cvt_f32_i32x16(self, a: i32x16<Self>) -> f32x16<Self>
fn splat_u32x16(self, val: u32) -> u32x16<Self>
fn not_u32x16(self, a: u32x16<Self>) -> u32x16<Self>
fn add_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn sub_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn mul_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn and_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn or_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn xor_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn shr_u32x16(self, a: u32x16<Self>, shift: u32) -> u32x16<Self>
fn simd_eq_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_lt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_le_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_ge_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_gt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn zip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn zip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn unzip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn unzip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn select_u32x16( self, a: mask32x16<Self>, b: u32x16<Self>, c: u32x16<Self>, ) -> u32x16<Self>
fn min_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn max_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn split_u32x16(self, a: u32x16<Self>) -> (u32x8<Self>, u32x8<Self>)
fn load_interleaved_128_u32x16(self, src: &[u32; 16]) -> u32x16<Self>
fn store_interleaved_128_u32x16(self, a: u32x16<Self>, dest: &mut [u32; 16])
fn reinterpret_u8_u32x16(self, a: u32x16<Self>) -> u8x64<Self>
fn cvt_f32_u32x16(self, a: u32x16<Self>) -> f32x16<Self>
fn splat_mask32x16(self, val: i32) -> mask32x16<Self>
fn not_mask32x16(self, a: mask32x16<Self>) -> mask32x16<Self>
fn and_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn or_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>
fn xor_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn select_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, c: mask32x16<Self>, ) -> mask32x16<Self>
fn simd_eq_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn split_mask32x16(self, a: mask32x16<Self>) -> (mask32x8<Self>, mask32x8<Self>)
fn splat_f64x8(self, val: f64) -> f64x8<Self>
fn abs_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn neg_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn sqrt_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn add_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn sub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn mul_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn div_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn copysign_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn simd_eq_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_lt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_le_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_ge_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_gt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn zip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn zip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn unzip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn unzip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn max_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn max_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn min_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn min_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn madd_f64x8( self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn msub_f64x8( self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn floor_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn fract_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn trunc_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn select_f64x8( self, a: mask64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn split_f64x8(self, a: f64x8<Self>) -> (f64x4<Self>, f64x4<Self>)
fn reinterpret_f32_f64x8(self, a: f64x8<Self>) -> f32x16<Self>
fn splat_mask64x8(self, val: i64) -> mask64x8<Self>
fn not_mask64x8(self, a: mask64x8<Self>) -> mask64x8<Self>
fn and_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn or_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn xor_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn select_mask64x8( self, a: mask64x8<Self>, b: mask64x8<Self>, c: mask64x8<Self>, ) -> mask64x8<Self>
fn simd_eq_mask64x8( self, a: mask64x8<Self>, b: mask64x8<Self>, ) -> mask64x8<Self>
fn split_mask64x8(self, a: mask64x8<Self>) -> (mask64x4<Self>, mask64x4<Self>)
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.