pub struct Fallback {
pub fallback: Fallback,
}
Expand description
The SIMD token for the “fallback” level.
Fields§
§fallback: Fallback
Implementations§
Trait Implementations§
Source§impl Simd for Fallback
impl Simd for Fallback
type f32s = f32x4<Fallback>
type u8s = u8x16<Fallback>
type i8s = i8x16<Fallback>
type u16s = u16x8<Fallback>
type i16s = i16x8<Fallback>
type u32s = u32x4<Fallback>
type i32s = i32x4<Fallback>
type mask8s = mask8x16<Fallback>
type mask16s = mask16x8<Fallback>
type mask32s = mask32x4<Fallback>
fn level(self) -> Level
Source§fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R
fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R
Call function with CPU features enabled. Read more
fn splat_f32x4(self, val: f32) -> f32x4<Self>
fn abs_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn neg_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn sqrt_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn add_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn sub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn mul_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn div_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn copysign_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn simd_eq_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_lt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_le_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_ge_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn simd_gt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>
fn zip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn zip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn unzip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn unzip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn max_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn max_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn min_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn min_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>
fn madd_f32x4( self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn msub_f32x4( self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn floor_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn fract_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn trunc_f32x4(self, a: f32x4<Self>) -> f32x4<Self>
fn select_f32x4( self, a: mask32x4<Self>, b: f32x4<Self>, c: f32x4<Self>, ) -> f32x4<Self>
fn combine_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x8<Self>
fn reinterpret_f64_f32x4(self, a: f32x4<Self>) -> f64x2<Self>
fn reinterpret_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>
fn reinterpret_u8_f32x4(self, a: f32x4<Self>) -> u8x16<Self>
fn reinterpret_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>
fn cvt_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>
fn cvt_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>
fn splat_i8x16(self, val: i8) -> i8x16<Self>
fn not_i8x16(self, a: i8x16<Self>) -> i8x16<Self>
fn add_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn sub_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn mul_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn and_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn or_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn xor_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn shr_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>
fn simd_eq_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_lt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_le_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_ge_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn simd_gt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>
fn zip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn zip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn unzip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn unzip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn select_i8x16( self, a: mask8x16<Self>, b: i8x16<Self>, c: i8x16<Self>, ) -> i8x16<Self>
fn min_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn max_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>
fn combine_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x32<Self>
fn reinterpret_u8_i8x16(self, a: i8x16<Self>) -> u8x16<Self>
fn reinterpret_u32_i8x16(self, a: i8x16<Self>) -> u32x4<Self>
fn splat_u8x16(self, val: u8) -> u8x16<Self>
fn not_u8x16(self, a: u8x16<Self>) -> u8x16<Self>
fn add_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn sub_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn mul_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn and_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn or_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn xor_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn shr_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>
fn simd_eq_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_lt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_le_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_ge_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn simd_gt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>
fn zip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn zip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn unzip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn unzip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn select_u8x16( self, a: mask8x16<Self>, b: u8x16<Self>, c: u8x16<Self>, ) -> u8x16<Self>
fn min_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn max_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>
fn combine_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x32<Self>
fn widen_u8x16(self, a: u8x16<Self>) -> u16x16<Self>
fn reinterpret_u32_u8x16(self, a: u8x16<Self>) -> u32x4<Self>
fn splat_mask8x16(self, val: i8) -> mask8x16<Self>
fn not_mask8x16(self, a: mask8x16<Self>) -> mask8x16<Self>
fn and_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn or_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn xor_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>
fn select_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, c: mask8x16<Self>, ) -> mask8x16<Self>
fn simd_eq_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, ) -> mask8x16<Self>
fn combine_mask8x16( self, a: mask8x16<Self>, b: mask8x16<Self>, ) -> mask8x32<Self>
fn splat_i16x8(self, val: i16) -> i16x8<Self>
fn not_i16x8(self, a: i16x8<Self>) -> i16x8<Self>
fn add_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn sub_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn mul_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn and_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn or_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn xor_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn shr_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>
fn simd_eq_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_lt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_le_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_ge_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn simd_gt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>
fn zip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn zip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn unzip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn unzip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn select_i16x8( self, a: mask16x8<Self>, b: i16x8<Self>, c: i16x8<Self>, ) -> i16x8<Self>
fn min_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn max_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>
fn combine_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x16<Self>
fn reinterpret_u8_i16x8(self, a: i16x8<Self>) -> u8x16<Self>
fn reinterpret_u32_i16x8(self, a: i16x8<Self>) -> u32x4<Self>
fn splat_u16x8(self, val: u16) -> u16x8<Self>
fn not_u16x8(self, a: u16x8<Self>) -> u16x8<Self>
fn add_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn sub_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn mul_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn and_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn or_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn xor_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn shr_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>
fn simd_eq_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_lt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_le_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_ge_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn simd_gt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>
fn zip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn zip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn unzip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn unzip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn select_u16x8( self, a: mask16x8<Self>, b: u16x8<Self>, c: u16x8<Self>, ) -> u16x8<Self>
fn min_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn max_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>
fn combine_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x16<Self>
fn reinterpret_u8_u16x8(self, a: u16x8<Self>) -> u8x16<Self>
fn reinterpret_u32_u16x8(self, a: u16x8<Self>) -> u32x4<Self>
fn splat_mask16x8(self, val: i16) -> mask16x8<Self>
fn not_mask16x8(self, a: mask16x8<Self>) -> mask16x8<Self>
fn and_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn or_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn xor_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>
fn select_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, c: mask16x8<Self>, ) -> mask16x8<Self>
fn simd_eq_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, ) -> mask16x8<Self>
fn combine_mask16x8( self, a: mask16x8<Self>, b: mask16x8<Self>, ) -> mask16x16<Self>
fn splat_i32x4(self, val: i32) -> i32x4<Self>
fn not_i32x4(self, a: i32x4<Self>) -> i32x4<Self>
fn add_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn sub_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn mul_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn and_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn or_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn xor_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn shr_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>
fn simd_eq_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_lt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_le_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_ge_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn simd_gt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>
fn zip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn zip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn unzip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn unzip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn select_i32x4( self, a: mask32x4<Self>, b: i32x4<Self>, c: i32x4<Self>, ) -> i32x4<Self>
fn min_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn max_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>
fn combine_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x8<Self>
fn reinterpret_u8_i32x4(self, a: i32x4<Self>) -> u8x16<Self>
fn reinterpret_u32_i32x4(self, a: i32x4<Self>) -> u32x4<Self>
fn cvt_f32_i32x4(self, a: i32x4<Self>) -> f32x4<Self>
fn splat_u32x4(self, val: u32) -> u32x4<Self>
fn not_u32x4(self, a: u32x4<Self>) -> u32x4<Self>
fn add_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn sub_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn mul_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn and_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn or_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn xor_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn shr_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>
fn simd_eq_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_lt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_le_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_ge_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn simd_gt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>
fn zip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn zip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn unzip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn unzip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn select_u32x4( self, a: mask32x4<Self>, b: u32x4<Self>, c: u32x4<Self>, ) -> u32x4<Self>
fn min_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn max_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>
fn combine_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x8<Self>
fn reinterpret_u8_u32x4(self, a: u32x4<Self>) -> u8x16<Self>
fn cvt_f32_u32x4(self, a: u32x4<Self>) -> f32x4<Self>
fn splat_mask32x4(self, val: i32) -> mask32x4<Self>
fn not_mask32x4(self, a: mask32x4<Self>) -> mask32x4<Self>
fn and_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn or_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn xor_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>
fn select_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, c: mask32x4<Self>, ) -> mask32x4<Self>
fn simd_eq_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, ) -> mask32x4<Self>
fn combine_mask32x4( self, a: mask32x4<Self>, b: mask32x4<Self>, ) -> mask32x8<Self>
fn splat_f64x2(self, val: f64) -> f64x2<Self>
fn abs_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn neg_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn sqrt_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn add_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn sub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn mul_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn div_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn copysign_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn simd_eq_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_lt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_le_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_ge_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn simd_gt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>
fn zip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn zip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn unzip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn unzip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn max_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn max_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn min_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn min_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>
fn madd_f64x2( self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn msub_f64x2( self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn floor_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn fract_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn trunc_f64x2(self, a: f64x2<Self>) -> f64x2<Self>
fn select_f64x2( self, a: mask64x2<Self>, b: f64x2<Self>, c: f64x2<Self>, ) -> f64x2<Self>
fn combine_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x4<Self>
fn reinterpret_f32_f64x2(self, a: f64x2<Self>) -> f32x4<Self>
fn splat_mask64x2(self, val: i64) -> mask64x2<Self>
fn not_mask64x2(self, a: mask64x2<Self>) -> mask64x2<Self>
fn and_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn or_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn xor_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>
fn select_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, c: mask64x2<Self>, ) -> mask64x2<Self>
fn simd_eq_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, ) -> mask64x2<Self>
fn combine_mask64x2( self, a: mask64x2<Self>, b: mask64x2<Self>, ) -> mask64x4<Self>
fn splat_f32x8(self, a: f32) -> f32x8<Self>
fn abs_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn neg_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn sqrt_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn add_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn sub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn mul_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn div_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn copysign_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn simd_eq_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_lt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_le_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_ge_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn simd_gt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>
fn zip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn zip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn unzip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn unzip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn max_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn max_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn min_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn min_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>
fn madd_f32x8( self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn msub_f32x8( self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn floor_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn fract_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn trunc_f32x8(self, a: f32x8<Self>) -> f32x8<Self>
fn select_f32x8( self, a: mask32x8<Self>, b: f32x8<Self>, c: f32x8<Self>, ) -> f32x8<Self>
fn combine_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x16<Self>
fn split_f32x8(self, a: f32x8<Self>) -> (f32x4<Self>, f32x4<Self>)
fn reinterpret_f64_f32x8(self, a: f32x8<Self>) -> f64x4<Self>
fn reinterpret_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>
fn reinterpret_u8_f32x8(self, a: f32x8<Self>) -> u8x32<Self>
fn reinterpret_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>
fn cvt_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>
fn cvt_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>
fn splat_i8x32(self, a: i8) -> i8x32<Self>
fn not_i8x32(self, a: i8x32<Self>) -> i8x32<Self>
fn add_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn sub_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn mul_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn and_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn or_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn xor_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn shr_i8x32(self, a: i8x32<Self>, b: u32) -> i8x32<Self>
fn simd_eq_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_lt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_le_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_ge_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn simd_gt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>
fn zip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn zip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn unzip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn unzip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn select_i8x32( self, a: mask8x32<Self>, b: i8x32<Self>, c: i8x32<Self>, ) -> i8x32<Self>
fn min_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn max_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>
fn combine_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x64<Self>
fn split_i8x32(self, a: i8x32<Self>) -> (i8x16<Self>, i8x16<Self>)
fn reinterpret_u8_i8x32(self, a: i8x32<Self>) -> u8x32<Self>
fn reinterpret_u32_i8x32(self, a: i8x32<Self>) -> u32x8<Self>
fn splat_u8x32(self, a: u8) -> u8x32<Self>
fn not_u8x32(self, a: u8x32<Self>) -> u8x32<Self>
fn add_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn sub_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn mul_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn and_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn or_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn xor_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn shr_u8x32(self, a: u8x32<Self>, b: u32) -> u8x32<Self>
fn simd_eq_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_lt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_le_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_ge_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn simd_gt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>
fn zip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn zip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn unzip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn unzip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn select_u8x32( self, a: mask8x32<Self>, b: u8x32<Self>, c: u8x32<Self>, ) -> u8x32<Self>
fn min_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn max_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>
fn combine_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x64<Self>
fn split_u8x32(self, a: u8x32<Self>) -> (u8x16<Self>, u8x16<Self>)
fn widen_u8x32(self, a: u8x32<Self>) -> u16x32<Self>
fn reinterpret_u32_u8x32(self, a: u8x32<Self>) -> u32x8<Self>
fn splat_mask8x32(self, a: i8) -> mask8x32<Self>
fn not_mask8x32(self, a: mask8x32<Self>) -> mask8x32<Self>
fn and_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn or_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn xor_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>
fn select_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, c: mask8x32<Self>, ) -> mask8x32<Self>
fn simd_eq_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, ) -> mask8x32<Self>
fn combine_mask8x32( self, a: mask8x32<Self>, b: mask8x32<Self>, ) -> mask8x64<Self>
fn split_mask8x32(self, a: mask8x32<Self>) -> (mask8x16<Self>, mask8x16<Self>)
fn splat_i16x16(self, a: i16) -> i16x16<Self>
fn not_i16x16(self, a: i16x16<Self>) -> i16x16<Self>
fn add_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn sub_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn mul_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn and_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn or_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn xor_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn shr_i16x16(self, a: i16x16<Self>, b: u32) -> i16x16<Self>
fn simd_eq_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_lt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_le_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_ge_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn simd_gt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>
fn zip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn zip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn unzip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn unzip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn select_i16x16( self, a: mask16x16<Self>, b: i16x16<Self>, c: i16x16<Self>, ) -> i16x16<Self>
fn min_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn max_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>
fn combine_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x32<Self>
fn split_i16x16(self, a: i16x16<Self>) -> (i16x8<Self>, i16x8<Self>)
fn reinterpret_u8_i16x16(self, a: i16x16<Self>) -> u8x32<Self>
fn reinterpret_u32_i16x16(self, a: i16x16<Self>) -> u32x8<Self>
fn splat_u16x16(self, a: u16) -> u16x16<Self>
fn not_u16x16(self, a: u16x16<Self>) -> u16x16<Self>
fn add_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn sub_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn mul_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn and_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn or_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn xor_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn shr_u16x16(self, a: u16x16<Self>, b: u32) -> u16x16<Self>
fn simd_eq_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_lt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_le_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_ge_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn simd_gt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>
fn zip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn zip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn unzip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn unzip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn select_u16x16( self, a: mask16x16<Self>, b: u16x16<Self>, c: u16x16<Self>, ) -> u16x16<Self>
fn min_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn max_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>
fn combine_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x32<Self>
fn split_u16x16(self, a: u16x16<Self>) -> (u16x8<Self>, u16x8<Self>)
fn narrow_u16x16(self, a: u16x16<Self>) -> u8x16<Self>
fn reinterpret_u8_u16x16(self, a: u16x16<Self>) -> u8x32<Self>
fn reinterpret_u32_u16x16(self, a: u16x16<Self>) -> u32x8<Self>
fn splat_mask16x16(self, a: i16) -> mask16x16<Self>
fn not_mask16x16(self, a: mask16x16<Self>) -> mask16x16<Self>
fn and_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn or_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>
fn xor_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn select_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, c: mask16x16<Self>, ) -> mask16x16<Self>
fn simd_eq_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x16<Self>
fn combine_mask16x16( self, a: mask16x16<Self>, b: mask16x16<Self>, ) -> mask16x32<Self>
fn split_mask16x16(self, a: mask16x16<Self>) -> (mask16x8<Self>, mask16x8<Self>)
fn splat_i32x8(self, a: i32) -> i32x8<Self>
fn not_i32x8(self, a: i32x8<Self>) -> i32x8<Self>
fn add_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn sub_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn mul_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn and_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn or_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn xor_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn shr_i32x8(self, a: i32x8<Self>, b: u32) -> i32x8<Self>
fn simd_eq_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_lt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_le_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_ge_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn simd_gt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>
fn zip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn zip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn unzip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn unzip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn select_i32x8( self, a: mask32x8<Self>, b: i32x8<Self>, c: i32x8<Self>, ) -> i32x8<Self>
fn min_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn max_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>
fn combine_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x16<Self>
fn split_i32x8(self, a: i32x8<Self>) -> (i32x4<Self>, i32x4<Self>)
fn reinterpret_u8_i32x8(self, a: i32x8<Self>) -> u8x32<Self>
fn reinterpret_u32_i32x8(self, a: i32x8<Self>) -> u32x8<Self>
fn cvt_f32_i32x8(self, a: i32x8<Self>) -> f32x8<Self>
fn splat_u32x8(self, a: u32) -> u32x8<Self>
fn not_u32x8(self, a: u32x8<Self>) -> u32x8<Self>
fn add_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn sub_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn mul_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn and_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn or_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn xor_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn shr_u32x8(self, a: u32x8<Self>, b: u32) -> u32x8<Self>
fn simd_eq_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_lt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_le_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_ge_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn simd_gt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>
fn zip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn zip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn unzip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn unzip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn select_u32x8( self, a: mask32x8<Self>, b: u32x8<Self>, c: u32x8<Self>, ) -> u32x8<Self>
fn min_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn max_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>
fn combine_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x16<Self>
fn split_u32x8(self, a: u32x8<Self>) -> (u32x4<Self>, u32x4<Self>)
fn reinterpret_u8_u32x8(self, a: u32x8<Self>) -> u8x32<Self>
fn cvt_f32_u32x8(self, a: u32x8<Self>) -> f32x8<Self>
fn splat_mask32x8(self, a: i32) -> mask32x8<Self>
fn not_mask32x8(self, a: mask32x8<Self>) -> mask32x8<Self>
fn and_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn or_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn xor_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>
fn select_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, c: mask32x8<Self>, ) -> mask32x8<Self>
fn simd_eq_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, ) -> mask32x8<Self>
fn combine_mask32x8( self, a: mask32x8<Self>, b: mask32x8<Self>, ) -> mask32x16<Self>
fn split_mask32x8(self, a: mask32x8<Self>) -> (mask32x4<Self>, mask32x4<Self>)
fn splat_f64x4(self, a: f64) -> f64x4<Self>
fn abs_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn neg_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn sqrt_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn add_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn sub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn mul_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn div_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn copysign_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn simd_eq_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_lt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_le_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_ge_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn simd_gt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>
fn zip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn zip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn unzip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn unzip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn max_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn max_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn min_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn min_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>
fn madd_f64x4( self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn msub_f64x4( self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn floor_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn fract_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn trunc_f64x4(self, a: f64x4<Self>) -> f64x4<Self>
fn select_f64x4( self, a: mask64x4<Self>, b: f64x4<Self>, c: f64x4<Self>, ) -> f64x4<Self>
fn combine_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x8<Self>
fn split_f64x4(self, a: f64x4<Self>) -> (f64x2<Self>, f64x2<Self>)
fn reinterpret_f32_f64x4(self, a: f64x4<Self>) -> f32x8<Self>
fn splat_mask64x4(self, a: i64) -> mask64x4<Self>
fn not_mask64x4(self, a: mask64x4<Self>) -> mask64x4<Self>
fn and_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn or_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn xor_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>
fn select_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, c: mask64x4<Self>, ) -> mask64x4<Self>
fn simd_eq_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, ) -> mask64x4<Self>
fn combine_mask64x4( self, a: mask64x4<Self>, b: mask64x4<Self>, ) -> mask64x8<Self>
fn split_mask64x4(self, a: mask64x4<Self>) -> (mask64x2<Self>, mask64x2<Self>)
fn splat_f32x16(self, a: f32) -> f32x16<Self>
fn abs_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn neg_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn sqrt_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn add_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn sub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn mul_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn div_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn copysign_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn simd_eq_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_lt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_le_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_ge_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn simd_gt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>
fn zip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn zip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn unzip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn unzip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn max_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn max_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn min_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn min_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>
fn madd_f32x16( self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn msub_f32x16( self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn floor_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn fract_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn trunc_f32x16(self, a: f32x16<Self>) -> f32x16<Self>
fn select_f32x16( self, a: mask32x16<Self>, b: f32x16<Self>, c: f32x16<Self>, ) -> f32x16<Self>
fn split_f32x16(self, a: f32x16<Self>) -> (f32x8<Self>, f32x8<Self>)
fn reinterpret_f64_f32x16(self, a: f32x16<Self>) -> f64x8<Self>
fn reinterpret_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>
fn load_interleaved_128_f32x16(self, src: &[f32; 16]) -> f32x16<Self>
fn store_interleaved_128_f32x16(self, a: f32x16<Self>, dest: &mut [f32; 16])
fn reinterpret_u8_f32x16(self, a: f32x16<Self>) -> u8x64<Self>
fn reinterpret_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>
fn cvt_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>
fn cvt_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>
fn splat_i8x64(self, a: i8) -> i8x64<Self>
fn not_i8x64(self, a: i8x64<Self>) -> i8x64<Self>
fn add_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn sub_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn mul_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn and_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn or_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn xor_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn shr_i8x64(self, a: i8x64<Self>, b: u32) -> i8x64<Self>
fn simd_eq_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_lt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_le_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_ge_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn simd_gt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>
fn zip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn zip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn unzip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn unzip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn select_i8x64( self, a: mask8x64<Self>, b: i8x64<Self>, c: i8x64<Self>, ) -> i8x64<Self>
fn min_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn max_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>
fn split_i8x64(self, a: i8x64<Self>) -> (i8x32<Self>, i8x32<Self>)
fn reinterpret_u8_i8x64(self, a: i8x64<Self>) -> u8x64<Self>
fn reinterpret_u32_i8x64(self, a: i8x64<Self>) -> u32x16<Self>
fn splat_u8x64(self, a: u8) -> u8x64<Self>
fn not_u8x64(self, a: u8x64<Self>) -> u8x64<Self>
fn add_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn sub_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn mul_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn and_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn or_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn xor_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn shr_u8x64(self, a: u8x64<Self>, b: u32) -> u8x64<Self>
fn simd_eq_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_lt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_le_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_ge_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn simd_gt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>
fn zip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn zip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn unzip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn unzip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn select_u8x64( self, a: mask8x64<Self>, b: u8x64<Self>, c: u8x64<Self>, ) -> u8x64<Self>
fn min_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn max_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>
fn split_u8x64(self, a: u8x64<Self>) -> (u8x32<Self>, u8x32<Self>)
fn load_interleaved_128_u8x64(self, src: &[u8; 64]) -> u8x64<Self>
fn store_interleaved_128_u8x64(self, a: u8x64<Self>, dest: &mut [u8; 64])
fn reinterpret_u32_u8x64(self, a: u8x64<Self>) -> u32x16<Self>
fn splat_mask8x64(self, a: i8) -> mask8x64<Self>
fn not_mask8x64(self, a: mask8x64<Self>) -> mask8x64<Self>
fn and_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn or_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn xor_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>
fn select_mask8x64( self, a: mask8x64<Self>, b: mask8x64<Self>, c: mask8x64<Self>, ) -> mask8x64<Self>
fn simd_eq_mask8x64( self, a: mask8x64<Self>, b: mask8x64<Self>, ) -> mask8x64<Self>
fn split_mask8x64(self, a: mask8x64<Self>) -> (mask8x32<Self>, mask8x32<Self>)
fn splat_i16x32(self, a: i16) -> i16x32<Self>
fn not_i16x32(self, a: i16x32<Self>) -> i16x32<Self>
fn add_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn sub_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn mul_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn and_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn or_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn xor_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn shr_i16x32(self, a: i16x32<Self>, b: u32) -> i16x32<Self>
fn simd_eq_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_lt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_le_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_ge_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn simd_gt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>
fn zip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn zip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn unzip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn unzip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn select_i16x32( self, a: mask16x32<Self>, b: i16x32<Self>, c: i16x32<Self>, ) -> i16x32<Self>
fn min_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn max_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>
fn split_i16x32(self, a: i16x32<Self>) -> (i16x16<Self>, i16x16<Self>)
fn reinterpret_u8_i16x32(self, a: i16x32<Self>) -> u8x64<Self>
fn reinterpret_u32_i16x32(self, a: i16x32<Self>) -> u32x16<Self>
fn splat_u16x32(self, a: u16) -> u16x32<Self>
fn not_u16x32(self, a: u16x32<Self>) -> u16x32<Self>
fn add_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn sub_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn mul_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn and_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn or_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn xor_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn shr_u16x32(self, a: u16x32<Self>, b: u32) -> u16x32<Self>
fn simd_eq_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_lt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_le_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_ge_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn simd_gt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>
fn zip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn zip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn unzip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn unzip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn select_u16x32( self, a: mask16x32<Self>, b: u16x32<Self>, c: u16x32<Self>, ) -> u16x32<Self>
fn min_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn max_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>
fn split_u16x32(self, a: u16x32<Self>) -> (u16x16<Self>, u16x16<Self>)
fn load_interleaved_128_u16x32(self, src: &[u16; 32]) -> u16x32<Self>
fn store_interleaved_128_u16x32(self, a: u16x32<Self>, dest: &mut [u16; 32])
fn narrow_u16x32(self, a: u16x32<Self>) -> u8x32<Self>
fn reinterpret_u8_u16x32(self, a: u16x32<Self>) -> u8x64<Self>
fn reinterpret_u32_u16x32(self, a: u16x32<Self>) -> u32x16<Self>
fn splat_mask16x32(self, a: i16) -> mask16x32<Self>
fn not_mask16x32(self, a: mask16x32<Self>) -> mask16x32<Self>
fn and_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn or_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>
fn xor_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn select_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, c: mask16x32<Self>, ) -> mask16x32<Self>
fn simd_eq_mask16x32( self, a: mask16x32<Self>, b: mask16x32<Self>, ) -> mask16x32<Self>
fn split_mask16x32( self, a: mask16x32<Self>, ) -> (mask16x16<Self>, mask16x16<Self>)
fn splat_i32x16(self, a: i32) -> i32x16<Self>
fn not_i32x16(self, a: i32x16<Self>) -> i32x16<Self>
fn add_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn sub_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn mul_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn and_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn or_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn xor_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn shr_i32x16(self, a: i32x16<Self>, b: u32) -> i32x16<Self>
fn simd_eq_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_lt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_le_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_ge_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn simd_gt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>
fn zip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn zip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn unzip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn unzip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn select_i32x16( self, a: mask32x16<Self>, b: i32x16<Self>, c: i32x16<Self>, ) -> i32x16<Self>
fn min_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn max_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>
fn split_i32x16(self, a: i32x16<Self>) -> (i32x8<Self>, i32x8<Self>)
fn reinterpret_u8_i32x16(self, a: i32x16<Self>) -> u8x64<Self>
fn reinterpret_u32_i32x16(self, a: i32x16<Self>) -> u32x16<Self>
fn cvt_f32_i32x16(self, a: i32x16<Self>) -> f32x16<Self>
fn splat_u32x16(self, a: u32) -> u32x16<Self>
fn not_u32x16(self, a: u32x16<Self>) -> u32x16<Self>
fn add_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn sub_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn mul_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn and_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn or_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn xor_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn shr_u32x16(self, a: u32x16<Self>, b: u32) -> u32x16<Self>
fn simd_eq_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_lt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_le_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_ge_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn simd_gt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>
fn zip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn zip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn unzip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn unzip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn select_u32x16( self, a: mask32x16<Self>, b: u32x16<Self>, c: u32x16<Self>, ) -> u32x16<Self>
fn min_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn max_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>
fn split_u32x16(self, a: u32x16<Self>) -> (u32x8<Self>, u32x8<Self>)
fn load_interleaved_128_u32x16(self, src: &[u32; 16]) -> u32x16<Self>
fn store_interleaved_128_u32x16(self, a: u32x16<Self>, dest: &mut [u32; 16])
fn reinterpret_u8_u32x16(self, a: u32x16<Self>) -> u8x64<Self>
fn cvt_f32_u32x16(self, a: u32x16<Self>) -> f32x16<Self>
fn splat_mask32x16(self, a: i32) -> mask32x16<Self>
fn not_mask32x16(self, a: mask32x16<Self>) -> mask32x16<Self>
fn and_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn or_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>
fn xor_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn select_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, c: mask32x16<Self>, ) -> mask32x16<Self>
fn simd_eq_mask32x16( self, a: mask32x16<Self>, b: mask32x16<Self>, ) -> mask32x16<Self>
fn split_mask32x16(self, a: mask32x16<Self>) -> (mask32x8<Self>, mask32x8<Self>)
fn splat_f64x8(self, a: f64) -> f64x8<Self>
fn abs_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn neg_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn sqrt_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn add_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn sub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn mul_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn div_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn copysign_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn simd_eq_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_lt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_le_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_ge_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn simd_gt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>
fn zip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn zip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn unzip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn unzip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn max_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn max_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn min_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn min_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>
fn madd_f64x8( self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn msub_f64x8( self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn floor_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn fract_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn trunc_f64x8(self, a: f64x8<Self>) -> f64x8<Self>
fn select_f64x8( self, a: mask64x8<Self>, b: f64x8<Self>, c: f64x8<Self>, ) -> f64x8<Self>
fn split_f64x8(self, a: f64x8<Self>) -> (f64x4<Self>, f64x4<Self>)
fn reinterpret_f32_f64x8(self, a: f64x8<Self>) -> f32x16<Self>
fn splat_mask64x8(self, a: i64) -> mask64x8<Self>
fn not_mask64x8(self, a: mask64x8<Self>) -> mask64x8<Self>
fn and_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn or_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn xor_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>
fn select_mask64x8( self, a: mask64x8<Self>, b: mask64x8<Self>, c: mask64x8<Self>, ) -> mask64x8<Self>
fn simd_eq_mask64x8( self, a: mask64x8<Self>, b: mask64x8<Self>, ) -> mask64x8<Self>
fn split_mask64x8(self, a: mask64x8<Self>) -> (mask64x4<Self>, mask64x4<Self>)
impl Copy for Fallback
impl Seal for Fallback
Auto Trait Implementations§
impl Freeze for Fallback
impl RefUnwindSafe for Fallback
impl Send for Fallback
impl Sync for Fallback
impl Unpin for Fallback
impl UnwindSafe for Fallback
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more