fearless_simd/generated/
simd_trait.rs

1// Copyright 2025 the Fearless_SIMD Authors
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3
4// This file is autogenerated by fearless_simd_gen
5
6use crate::{Level, SimdCvtFloat, SimdCvtTruncate, SimdElement, SimdInto, seal::Seal};
7use crate::{
8    f32x4, f32x8, f32x16, f64x2, f64x4, f64x8, i8x16, i8x32, i8x64, i16x8, i16x16, i16x32, i32x4,
9    i32x8, i32x16, mask8x16, mask8x32, mask8x64, mask16x8, mask16x16, mask16x32, mask32x4,
10    mask32x8, mask32x16, mask64x2, mask64x4, mask64x8, u8x16, u8x32, u8x64, u16x8, u16x16, u16x32,
11    u32x4, u32x8, u32x16,
12};
13#[doc = r" TODO: docstring"]
14pub trait Simd: Sized + Clone + Copy + Send + Sync + Seal + 'static {
15    type f32s: SimdFloat<f32, Self, Block = f32x4<Self>>
16        + SimdCvtFloat<Self::u32s>
17        + SimdCvtFloat<Self::i32s>;
18    type u8s: SimdInt<u8, Self, Block = u8x16<Self>>;
19    type i8s: SimdInt<i8, Self, Block = i8x16<Self>>;
20    type u16s: SimdInt<u16, Self, Block = u16x8<Self>>;
21    type i16s: SimdInt<i16, Self, Block = i16x8<Self>>;
22    type u32s: SimdInt<u32, Self, Block = u32x4<Self>> + SimdCvtTruncate<Self::f32s>;
23    type i32s: SimdInt<i32, Self, Block = i32x4<Self>> + SimdCvtTruncate<Self::f32s>;
24    type mask8s: SimdMask<i8, Self, Block = mask8x16<Self>>;
25    type mask16s: SimdMask<i16, Self, Block = mask16x8<Self>>;
26    type mask32s: SimdMask<i32, Self, Block = mask32x4<Self>>;
27    fn level(self) -> Level;
28    #[doc = r" Call function with CPU features enabled."]
29    #[doc = r""]
30    #[doc = r" For performance, the provided function should be `#[inline(always)]`."]
31    fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R;
32    fn splat_f32x4(self, val: f32) -> f32x4<Self>;
33    fn abs_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
34    fn neg_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
35    fn sqrt_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
36    fn add_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
37    fn sub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
38    fn mul_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
39    fn div_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
40    fn copysign_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
41    fn simd_eq_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
42    fn simd_lt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
43    fn simd_le_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
44    fn simd_ge_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
45    fn simd_gt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
46    fn zip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
47    fn zip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
48    fn unzip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
49    fn unzip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
50    fn max_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
51    fn max_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
52    fn min_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
53    fn min_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
54    fn madd_f32x4(self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
55    fn msub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
56    fn floor_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
57    fn fract_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
58    fn trunc_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
59    fn select_f32x4(self, a: mask32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
60    fn combine_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x8<Self>;
61    fn reinterpret_f64_f32x4(self, a: f32x4<Self>) -> f64x2<Self>;
62    fn reinterpret_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
63    fn reinterpret_u8_f32x4(self, a: f32x4<Self>) -> u8x16<Self>;
64    fn reinterpret_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
65    fn cvt_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
66    fn cvt_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
67    fn splat_i8x16(self, val: i8) -> i8x16<Self>;
68    fn not_i8x16(self, a: i8x16<Self>) -> i8x16<Self>;
69    fn add_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
70    fn sub_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
71    fn mul_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
72    fn and_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
73    fn or_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
74    fn xor_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
75    fn shr_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>;
76    fn simd_eq_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
77    fn simd_lt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
78    fn simd_le_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
79    fn simd_ge_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
80    fn simd_gt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
81    fn zip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
82    fn zip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
83    fn unzip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
84    fn unzip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
85    fn select_i8x16(self, a: mask8x16<Self>, b: i8x16<Self>, c: i8x16<Self>) -> i8x16<Self>;
86    fn min_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
87    fn max_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
88    fn combine_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x32<Self>;
89    fn reinterpret_u8_i8x16(self, a: i8x16<Self>) -> u8x16<Self>;
90    fn reinterpret_u32_i8x16(self, a: i8x16<Self>) -> u32x4<Self>;
91    fn splat_u8x16(self, val: u8) -> u8x16<Self>;
92    fn not_u8x16(self, a: u8x16<Self>) -> u8x16<Self>;
93    fn add_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
94    fn sub_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
95    fn mul_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
96    fn and_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
97    fn or_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
98    fn xor_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
99    fn shr_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>;
100    fn simd_eq_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
101    fn simd_lt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
102    fn simd_le_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
103    fn simd_ge_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
104    fn simd_gt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
105    fn zip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
106    fn zip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
107    fn unzip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
108    fn unzip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
109    fn select_u8x16(self, a: mask8x16<Self>, b: u8x16<Self>, c: u8x16<Self>) -> u8x16<Self>;
110    fn min_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
111    fn max_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
112    fn combine_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x32<Self>;
113    fn widen_u8x16(self, a: u8x16<Self>) -> u16x16<Self>;
114    fn reinterpret_u32_u8x16(self, a: u8x16<Self>) -> u32x4<Self>;
115    fn splat_mask8x16(self, val: i8) -> mask8x16<Self>;
116    fn not_mask8x16(self, a: mask8x16<Self>) -> mask8x16<Self>;
117    fn and_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
118    fn or_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
119    fn xor_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
120    fn select_mask8x16(
121        self,
122        a: mask8x16<Self>,
123        b: mask8x16<Self>,
124        c: mask8x16<Self>,
125    ) -> mask8x16<Self>;
126    fn simd_eq_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
127    fn combine_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x32<Self>;
128    fn splat_i16x8(self, val: i16) -> i16x8<Self>;
129    fn not_i16x8(self, a: i16x8<Self>) -> i16x8<Self>;
130    fn add_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
131    fn sub_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
132    fn mul_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
133    fn and_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
134    fn or_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
135    fn xor_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
136    fn shr_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>;
137    fn simd_eq_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
138    fn simd_lt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
139    fn simd_le_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
140    fn simd_ge_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
141    fn simd_gt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
142    fn zip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
143    fn zip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
144    fn unzip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
145    fn unzip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
146    fn select_i16x8(self, a: mask16x8<Self>, b: i16x8<Self>, c: i16x8<Self>) -> i16x8<Self>;
147    fn min_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
148    fn max_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
149    fn combine_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x16<Self>;
150    fn reinterpret_u8_i16x8(self, a: i16x8<Self>) -> u8x16<Self>;
151    fn reinterpret_u32_i16x8(self, a: i16x8<Self>) -> u32x4<Self>;
152    fn splat_u16x8(self, val: u16) -> u16x8<Self>;
153    fn not_u16x8(self, a: u16x8<Self>) -> u16x8<Self>;
154    fn add_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
155    fn sub_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
156    fn mul_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
157    fn and_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
158    fn or_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
159    fn xor_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
160    fn shr_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>;
161    fn simd_eq_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
162    fn simd_lt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
163    fn simd_le_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
164    fn simd_ge_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
165    fn simd_gt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
166    fn zip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
167    fn zip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
168    fn unzip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
169    fn unzip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
170    fn select_u16x8(self, a: mask16x8<Self>, b: u16x8<Self>, c: u16x8<Self>) -> u16x8<Self>;
171    fn min_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
172    fn max_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
173    fn combine_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x16<Self>;
174    fn reinterpret_u8_u16x8(self, a: u16x8<Self>) -> u8x16<Self>;
175    fn reinterpret_u32_u16x8(self, a: u16x8<Self>) -> u32x4<Self>;
176    fn splat_mask16x8(self, val: i16) -> mask16x8<Self>;
177    fn not_mask16x8(self, a: mask16x8<Self>) -> mask16x8<Self>;
178    fn and_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
179    fn or_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
180    fn xor_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
181    fn select_mask16x8(
182        self,
183        a: mask16x8<Self>,
184        b: mask16x8<Self>,
185        c: mask16x8<Self>,
186    ) -> mask16x8<Self>;
187    fn simd_eq_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
188    fn combine_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x16<Self>;
189    fn splat_i32x4(self, val: i32) -> i32x4<Self>;
190    fn not_i32x4(self, a: i32x4<Self>) -> i32x4<Self>;
191    fn add_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
192    fn sub_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
193    fn mul_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
194    fn and_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
195    fn or_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
196    fn xor_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
197    fn shr_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>;
198    fn simd_eq_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
199    fn simd_lt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
200    fn simd_le_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
201    fn simd_ge_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
202    fn simd_gt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
203    fn zip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
204    fn zip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
205    fn unzip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
206    fn unzip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
207    fn select_i32x4(self, a: mask32x4<Self>, b: i32x4<Self>, c: i32x4<Self>) -> i32x4<Self>;
208    fn min_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
209    fn max_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
210    fn combine_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x8<Self>;
211    fn reinterpret_u8_i32x4(self, a: i32x4<Self>) -> u8x16<Self>;
212    fn reinterpret_u32_i32x4(self, a: i32x4<Self>) -> u32x4<Self>;
213    fn cvt_f32_i32x4(self, a: i32x4<Self>) -> f32x4<Self>;
214    fn splat_u32x4(self, val: u32) -> u32x4<Self>;
215    fn not_u32x4(self, a: u32x4<Self>) -> u32x4<Self>;
216    fn add_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
217    fn sub_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
218    fn mul_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
219    fn and_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
220    fn or_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
221    fn xor_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
222    fn shr_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>;
223    fn simd_eq_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
224    fn simd_lt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
225    fn simd_le_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
226    fn simd_ge_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
227    fn simd_gt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
228    fn zip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
229    fn zip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
230    fn unzip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
231    fn unzip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
232    fn select_u32x4(self, a: mask32x4<Self>, b: u32x4<Self>, c: u32x4<Self>) -> u32x4<Self>;
233    fn min_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
234    fn max_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
235    fn combine_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x8<Self>;
236    fn reinterpret_u8_u32x4(self, a: u32x4<Self>) -> u8x16<Self>;
237    fn cvt_f32_u32x4(self, a: u32x4<Self>) -> f32x4<Self>;
238    fn splat_mask32x4(self, val: i32) -> mask32x4<Self>;
239    fn not_mask32x4(self, a: mask32x4<Self>) -> mask32x4<Self>;
240    fn and_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
241    fn or_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
242    fn xor_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
243    fn select_mask32x4(
244        self,
245        a: mask32x4<Self>,
246        b: mask32x4<Self>,
247        c: mask32x4<Self>,
248    ) -> mask32x4<Self>;
249    fn simd_eq_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
250    fn combine_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x8<Self>;
251    fn splat_f64x2(self, val: f64) -> f64x2<Self>;
252    fn abs_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
253    fn neg_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
254    fn sqrt_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
255    fn add_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
256    fn sub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
257    fn mul_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
258    fn div_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
259    fn copysign_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
260    fn simd_eq_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
261    fn simd_lt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
262    fn simd_le_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
263    fn simd_ge_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
264    fn simd_gt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
265    fn zip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
266    fn zip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
267    fn unzip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
268    fn unzip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
269    fn max_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
270    fn max_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
271    fn min_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
272    fn min_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
273    fn madd_f64x2(self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
274    fn msub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
275    fn floor_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
276    fn fract_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
277    fn trunc_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
278    fn select_f64x2(self, a: mask64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
279    fn combine_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x4<Self>;
280    fn reinterpret_f32_f64x2(self, a: f64x2<Self>) -> f32x4<Self>;
281    fn splat_mask64x2(self, val: i64) -> mask64x2<Self>;
282    fn not_mask64x2(self, a: mask64x2<Self>) -> mask64x2<Self>;
283    fn and_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
284    fn or_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
285    fn xor_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
286    fn select_mask64x2(
287        self,
288        a: mask64x2<Self>,
289        b: mask64x2<Self>,
290        c: mask64x2<Self>,
291    ) -> mask64x2<Self>;
292    fn simd_eq_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
293    fn combine_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x4<Self>;
294    fn splat_f32x8(self, val: f32) -> f32x8<Self>;
295    fn abs_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
296    fn neg_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
297    fn sqrt_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
298    fn add_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
299    fn sub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
300    fn mul_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
301    fn div_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
302    fn copysign_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
303    fn simd_eq_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
304    fn simd_lt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
305    fn simd_le_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
306    fn simd_ge_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
307    fn simd_gt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
308    fn zip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
309    fn zip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
310    fn unzip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
311    fn unzip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
312    fn max_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
313    fn max_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
314    fn min_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
315    fn min_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
316    fn madd_f32x8(self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
317    fn msub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
318    fn floor_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
319    fn fract_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
320    fn trunc_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
321    fn select_f32x8(self, a: mask32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
322    fn combine_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x16<Self>;
323    fn split_f32x8(self, a: f32x8<Self>) -> (f32x4<Self>, f32x4<Self>);
324    fn reinterpret_f64_f32x8(self, a: f32x8<Self>) -> f64x4<Self>;
325    fn reinterpret_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
326    fn reinterpret_u8_f32x8(self, a: f32x8<Self>) -> u8x32<Self>;
327    fn reinterpret_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
328    fn cvt_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
329    fn cvt_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
330    fn splat_i8x32(self, val: i8) -> i8x32<Self>;
331    fn not_i8x32(self, a: i8x32<Self>) -> i8x32<Self>;
332    fn add_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
333    fn sub_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
334    fn mul_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
335    fn and_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
336    fn or_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
337    fn xor_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
338    fn shr_i8x32(self, a: i8x32<Self>, shift: u32) -> i8x32<Self>;
339    fn simd_eq_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
340    fn simd_lt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
341    fn simd_le_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
342    fn simd_ge_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
343    fn simd_gt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
344    fn zip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
345    fn zip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
346    fn unzip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
347    fn unzip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
348    fn select_i8x32(self, a: mask8x32<Self>, b: i8x32<Self>, c: i8x32<Self>) -> i8x32<Self>;
349    fn min_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
350    fn max_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
351    fn combine_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x64<Self>;
352    fn split_i8x32(self, a: i8x32<Self>) -> (i8x16<Self>, i8x16<Self>);
353    fn reinterpret_u8_i8x32(self, a: i8x32<Self>) -> u8x32<Self>;
354    fn reinterpret_u32_i8x32(self, a: i8x32<Self>) -> u32x8<Self>;
355    fn splat_u8x32(self, val: u8) -> u8x32<Self>;
356    fn not_u8x32(self, a: u8x32<Self>) -> u8x32<Self>;
357    fn add_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
358    fn sub_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
359    fn mul_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
360    fn and_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
361    fn or_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
362    fn xor_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
363    fn shr_u8x32(self, a: u8x32<Self>, shift: u32) -> u8x32<Self>;
364    fn simd_eq_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
365    fn simd_lt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
366    fn simd_le_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
367    fn simd_ge_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
368    fn simd_gt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
369    fn zip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
370    fn zip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
371    fn unzip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
372    fn unzip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
373    fn select_u8x32(self, a: mask8x32<Self>, b: u8x32<Self>, c: u8x32<Self>) -> u8x32<Self>;
374    fn min_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
375    fn max_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
376    fn combine_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x64<Self>;
377    fn split_u8x32(self, a: u8x32<Self>) -> (u8x16<Self>, u8x16<Self>);
378    fn widen_u8x32(self, a: u8x32<Self>) -> u16x32<Self>;
379    fn reinterpret_u32_u8x32(self, a: u8x32<Self>) -> u32x8<Self>;
380    fn splat_mask8x32(self, val: i8) -> mask8x32<Self>;
381    fn not_mask8x32(self, a: mask8x32<Self>) -> mask8x32<Self>;
382    fn and_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
383    fn or_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
384    fn xor_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
385    fn select_mask8x32(
386        self,
387        a: mask8x32<Self>,
388        b: mask8x32<Self>,
389        c: mask8x32<Self>,
390    ) -> mask8x32<Self>;
391    fn simd_eq_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
392    fn combine_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x64<Self>;
393    fn split_mask8x32(self, a: mask8x32<Self>) -> (mask8x16<Self>, mask8x16<Self>);
394    fn splat_i16x16(self, val: i16) -> i16x16<Self>;
395    fn not_i16x16(self, a: i16x16<Self>) -> i16x16<Self>;
396    fn add_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
397    fn sub_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
398    fn mul_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
399    fn and_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
400    fn or_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
401    fn xor_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
402    fn shr_i16x16(self, a: i16x16<Self>, shift: u32) -> i16x16<Self>;
403    fn simd_eq_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
404    fn simd_lt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
405    fn simd_le_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
406    fn simd_ge_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
407    fn simd_gt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
408    fn zip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
409    fn zip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
410    fn unzip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
411    fn unzip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
412    fn select_i16x16(self, a: mask16x16<Self>, b: i16x16<Self>, c: i16x16<Self>) -> i16x16<Self>;
413    fn min_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
414    fn max_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
415    fn combine_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x32<Self>;
416    fn split_i16x16(self, a: i16x16<Self>) -> (i16x8<Self>, i16x8<Self>);
417    fn reinterpret_u8_i16x16(self, a: i16x16<Self>) -> u8x32<Self>;
418    fn reinterpret_u32_i16x16(self, a: i16x16<Self>) -> u32x8<Self>;
419    fn splat_u16x16(self, val: u16) -> u16x16<Self>;
420    fn not_u16x16(self, a: u16x16<Self>) -> u16x16<Self>;
421    fn add_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
422    fn sub_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
423    fn mul_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
424    fn and_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
425    fn or_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
426    fn xor_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
427    fn shr_u16x16(self, a: u16x16<Self>, shift: u32) -> u16x16<Self>;
428    fn simd_eq_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
429    fn simd_lt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
430    fn simd_le_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
431    fn simd_ge_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
432    fn simd_gt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
433    fn zip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
434    fn zip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
435    fn unzip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
436    fn unzip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
437    fn select_u16x16(self, a: mask16x16<Self>, b: u16x16<Self>, c: u16x16<Self>) -> u16x16<Self>;
438    fn min_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
439    fn max_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
440    fn combine_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x32<Self>;
441    fn split_u16x16(self, a: u16x16<Self>) -> (u16x8<Self>, u16x8<Self>);
442    fn narrow_u16x16(self, a: u16x16<Self>) -> u8x16<Self>;
443    fn reinterpret_u8_u16x16(self, a: u16x16<Self>) -> u8x32<Self>;
444    fn reinterpret_u32_u16x16(self, a: u16x16<Self>) -> u32x8<Self>;
445    fn splat_mask16x16(self, val: i16) -> mask16x16<Self>;
446    fn not_mask16x16(self, a: mask16x16<Self>) -> mask16x16<Self>;
447    fn and_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
448    fn or_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
449    fn xor_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
450    fn select_mask16x16(
451        self,
452        a: mask16x16<Self>,
453        b: mask16x16<Self>,
454        c: mask16x16<Self>,
455    ) -> mask16x16<Self>;
456    fn simd_eq_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
457    fn combine_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x32<Self>;
458    fn split_mask16x16(self, a: mask16x16<Self>) -> (mask16x8<Self>, mask16x8<Self>);
459    fn splat_i32x8(self, val: i32) -> i32x8<Self>;
460    fn not_i32x8(self, a: i32x8<Self>) -> i32x8<Self>;
461    fn add_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
462    fn sub_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
463    fn mul_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
464    fn and_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
465    fn or_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
466    fn xor_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
467    fn shr_i32x8(self, a: i32x8<Self>, shift: u32) -> i32x8<Self>;
468    fn simd_eq_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
469    fn simd_lt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
470    fn simd_le_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
471    fn simd_ge_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
472    fn simd_gt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
473    fn zip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
474    fn zip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
475    fn unzip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
476    fn unzip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
477    fn select_i32x8(self, a: mask32x8<Self>, b: i32x8<Self>, c: i32x8<Self>) -> i32x8<Self>;
478    fn min_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
479    fn max_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
480    fn combine_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x16<Self>;
481    fn split_i32x8(self, a: i32x8<Self>) -> (i32x4<Self>, i32x4<Self>);
482    fn reinterpret_u8_i32x8(self, a: i32x8<Self>) -> u8x32<Self>;
483    fn reinterpret_u32_i32x8(self, a: i32x8<Self>) -> u32x8<Self>;
484    fn cvt_f32_i32x8(self, a: i32x8<Self>) -> f32x8<Self>;
485    fn splat_u32x8(self, val: u32) -> u32x8<Self>;
486    fn not_u32x8(self, a: u32x8<Self>) -> u32x8<Self>;
487    fn add_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
488    fn sub_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
489    fn mul_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
490    fn and_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
491    fn or_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
492    fn xor_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
493    fn shr_u32x8(self, a: u32x8<Self>, shift: u32) -> u32x8<Self>;
494    fn simd_eq_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
495    fn simd_lt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
496    fn simd_le_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
497    fn simd_ge_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
498    fn simd_gt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
499    fn zip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
500    fn zip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
501    fn unzip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
502    fn unzip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
503    fn select_u32x8(self, a: mask32x8<Self>, b: u32x8<Self>, c: u32x8<Self>) -> u32x8<Self>;
504    fn min_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
505    fn max_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
506    fn combine_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x16<Self>;
507    fn split_u32x8(self, a: u32x8<Self>) -> (u32x4<Self>, u32x4<Self>);
508    fn reinterpret_u8_u32x8(self, a: u32x8<Self>) -> u8x32<Self>;
509    fn cvt_f32_u32x8(self, a: u32x8<Self>) -> f32x8<Self>;
510    fn splat_mask32x8(self, val: i32) -> mask32x8<Self>;
511    fn not_mask32x8(self, a: mask32x8<Self>) -> mask32x8<Self>;
512    fn and_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
513    fn or_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
514    fn xor_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
515    fn select_mask32x8(
516        self,
517        a: mask32x8<Self>,
518        b: mask32x8<Self>,
519        c: mask32x8<Self>,
520    ) -> mask32x8<Self>;
521    fn simd_eq_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
522    fn combine_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x16<Self>;
523    fn split_mask32x8(self, a: mask32x8<Self>) -> (mask32x4<Self>, mask32x4<Self>);
524    fn splat_f64x4(self, val: f64) -> f64x4<Self>;
525    fn abs_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
526    fn neg_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
527    fn sqrt_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
528    fn add_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
529    fn sub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
530    fn mul_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
531    fn div_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
532    fn copysign_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
533    fn simd_eq_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
534    fn simd_lt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
535    fn simd_le_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
536    fn simd_ge_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
537    fn simd_gt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
538    fn zip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
539    fn zip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
540    fn unzip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
541    fn unzip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
542    fn max_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
543    fn max_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
544    fn min_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
545    fn min_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
546    fn madd_f64x4(self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
547    fn msub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
548    fn floor_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
549    fn fract_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
550    fn trunc_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
551    fn select_f64x4(self, a: mask64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
552    fn combine_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x8<Self>;
553    fn split_f64x4(self, a: f64x4<Self>) -> (f64x2<Self>, f64x2<Self>);
554    fn reinterpret_f32_f64x4(self, a: f64x4<Self>) -> f32x8<Self>;
555    fn splat_mask64x4(self, val: i64) -> mask64x4<Self>;
556    fn not_mask64x4(self, a: mask64x4<Self>) -> mask64x4<Self>;
557    fn and_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
558    fn or_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
559    fn xor_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
560    fn select_mask64x4(
561        self,
562        a: mask64x4<Self>,
563        b: mask64x4<Self>,
564        c: mask64x4<Self>,
565    ) -> mask64x4<Self>;
566    fn simd_eq_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
567    fn combine_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x8<Self>;
568    fn split_mask64x4(self, a: mask64x4<Self>) -> (mask64x2<Self>, mask64x2<Self>);
569    fn splat_f32x16(self, val: f32) -> f32x16<Self>;
570    fn abs_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
571    fn neg_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
572    fn sqrt_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
573    fn add_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
574    fn sub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
575    fn mul_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
576    fn div_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
577    fn copysign_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
578    fn simd_eq_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
579    fn simd_lt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
580    fn simd_le_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
581    fn simd_ge_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
582    fn simd_gt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
583    fn zip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
584    fn zip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
585    fn unzip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
586    fn unzip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
587    fn max_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
588    fn max_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
589    fn min_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
590    fn min_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
591    fn madd_f32x16(self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
592    fn msub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
593    fn floor_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
594    fn fract_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
595    fn trunc_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
596    fn select_f32x16(self, a: mask32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
597    fn split_f32x16(self, a: f32x16<Self>) -> (f32x8<Self>, f32x8<Self>);
598    fn reinterpret_f64_f32x16(self, a: f32x16<Self>) -> f64x8<Self>;
599    fn reinterpret_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
600    fn load_interleaved_128_f32x16(self, src: &[f32; 16usize]) -> f32x16<Self>;
601    fn store_interleaved_128_f32x16(self, a: f32x16<Self>, dest: &mut [f32; 16usize]) -> ();
602    fn reinterpret_u8_f32x16(self, a: f32x16<Self>) -> u8x64<Self>;
603    fn reinterpret_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
604    fn cvt_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
605    fn cvt_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
606    fn splat_i8x64(self, val: i8) -> i8x64<Self>;
607    fn not_i8x64(self, a: i8x64<Self>) -> i8x64<Self>;
608    fn add_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
609    fn sub_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
610    fn mul_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
611    fn and_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
612    fn or_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
613    fn xor_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
614    fn shr_i8x64(self, a: i8x64<Self>, shift: u32) -> i8x64<Self>;
615    fn simd_eq_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
616    fn simd_lt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
617    fn simd_le_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
618    fn simd_ge_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
619    fn simd_gt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
620    fn zip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
621    fn zip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
622    fn unzip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
623    fn unzip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
624    fn select_i8x64(self, a: mask8x64<Self>, b: i8x64<Self>, c: i8x64<Self>) -> i8x64<Self>;
625    fn min_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
626    fn max_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
627    fn split_i8x64(self, a: i8x64<Self>) -> (i8x32<Self>, i8x32<Self>);
628    fn reinterpret_u8_i8x64(self, a: i8x64<Self>) -> u8x64<Self>;
629    fn reinterpret_u32_i8x64(self, a: i8x64<Self>) -> u32x16<Self>;
630    fn splat_u8x64(self, val: u8) -> u8x64<Self>;
631    fn not_u8x64(self, a: u8x64<Self>) -> u8x64<Self>;
632    fn add_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
633    fn sub_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
634    fn mul_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
635    fn and_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
636    fn or_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
637    fn xor_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
638    fn shr_u8x64(self, a: u8x64<Self>, shift: u32) -> u8x64<Self>;
639    fn simd_eq_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
640    fn simd_lt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
641    fn simd_le_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
642    fn simd_ge_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
643    fn simd_gt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
644    fn zip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
645    fn zip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
646    fn unzip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
647    fn unzip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
648    fn select_u8x64(self, a: mask8x64<Self>, b: u8x64<Self>, c: u8x64<Self>) -> u8x64<Self>;
649    fn min_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
650    fn max_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
651    fn split_u8x64(self, a: u8x64<Self>) -> (u8x32<Self>, u8x32<Self>);
652    fn load_interleaved_128_u8x64(self, src: &[u8; 64usize]) -> u8x64<Self>;
653    fn store_interleaved_128_u8x64(self, a: u8x64<Self>, dest: &mut [u8; 64usize]) -> ();
654    fn reinterpret_u32_u8x64(self, a: u8x64<Self>) -> u32x16<Self>;
655    fn splat_mask8x64(self, val: i8) -> mask8x64<Self>;
656    fn not_mask8x64(self, a: mask8x64<Self>) -> mask8x64<Self>;
657    fn and_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
658    fn or_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
659    fn xor_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
660    fn select_mask8x64(
661        self,
662        a: mask8x64<Self>,
663        b: mask8x64<Self>,
664        c: mask8x64<Self>,
665    ) -> mask8x64<Self>;
666    fn simd_eq_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
667    fn split_mask8x64(self, a: mask8x64<Self>) -> (mask8x32<Self>, mask8x32<Self>);
668    fn splat_i16x32(self, val: i16) -> i16x32<Self>;
669    fn not_i16x32(self, a: i16x32<Self>) -> i16x32<Self>;
670    fn add_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
671    fn sub_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
672    fn mul_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
673    fn and_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
674    fn or_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
675    fn xor_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
676    fn shr_i16x32(self, a: i16x32<Self>, shift: u32) -> i16x32<Self>;
677    fn simd_eq_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
678    fn simd_lt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
679    fn simd_le_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
680    fn simd_ge_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
681    fn simd_gt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
682    fn zip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
683    fn zip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
684    fn unzip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
685    fn unzip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
686    fn select_i16x32(self, a: mask16x32<Self>, b: i16x32<Self>, c: i16x32<Self>) -> i16x32<Self>;
687    fn min_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
688    fn max_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
689    fn split_i16x32(self, a: i16x32<Self>) -> (i16x16<Self>, i16x16<Self>);
690    fn reinterpret_u8_i16x32(self, a: i16x32<Self>) -> u8x64<Self>;
691    fn reinterpret_u32_i16x32(self, a: i16x32<Self>) -> u32x16<Self>;
692    fn splat_u16x32(self, val: u16) -> u16x32<Self>;
693    fn not_u16x32(self, a: u16x32<Self>) -> u16x32<Self>;
694    fn add_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
695    fn sub_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
696    fn mul_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
697    fn and_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
698    fn or_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
699    fn xor_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
700    fn shr_u16x32(self, a: u16x32<Self>, shift: u32) -> u16x32<Self>;
701    fn simd_eq_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
702    fn simd_lt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
703    fn simd_le_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
704    fn simd_ge_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
705    fn simd_gt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
706    fn zip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
707    fn zip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
708    fn unzip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
709    fn unzip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
710    fn select_u16x32(self, a: mask16x32<Self>, b: u16x32<Self>, c: u16x32<Self>) -> u16x32<Self>;
711    fn min_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
712    fn max_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
713    fn split_u16x32(self, a: u16x32<Self>) -> (u16x16<Self>, u16x16<Self>);
714    fn load_interleaved_128_u16x32(self, src: &[u16; 32usize]) -> u16x32<Self>;
715    fn store_interleaved_128_u16x32(self, a: u16x32<Self>, dest: &mut [u16; 32usize]) -> ();
716    fn narrow_u16x32(self, a: u16x32<Self>) -> u8x32<Self>;
717    fn reinterpret_u8_u16x32(self, a: u16x32<Self>) -> u8x64<Self>;
718    fn reinterpret_u32_u16x32(self, a: u16x32<Self>) -> u32x16<Self>;
719    fn splat_mask16x32(self, val: i16) -> mask16x32<Self>;
720    fn not_mask16x32(self, a: mask16x32<Self>) -> mask16x32<Self>;
721    fn and_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
722    fn or_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
723    fn xor_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
724    fn select_mask16x32(
725        self,
726        a: mask16x32<Self>,
727        b: mask16x32<Self>,
728        c: mask16x32<Self>,
729    ) -> mask16x32<Self>;
730    fn simd_eq_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
731    fn split_mask16x32(self, a: mask16x32<Self>) -> (mask16x16<Self>, mask16x16<Self>);
732    fn splat_i32x16(self, val: i32) -> i32x16<Self>;
733    fn not_i32x16(self, a: i32x16<Self>) -> i32x16<Self>;
734    fn add_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
735    fn sub_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
736    fn mul_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
737    fn and_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
738    fn or_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
739    fn xor_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
740    fn shr_i32x16(self, a: i32x16<Self>, shift: u32) -> i32x16<Self>;
741    fn simd_eq_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
742    fn simd_lt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
743    fn simd_le_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
744    fn simd_ge_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
745    fn simd_gt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
746    fn zip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
747    fn zip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
748    fn unzip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
749    fn unzip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
750    fn select_i32x16(self, a: mask32x16<Self>, b: i32x16<Self>, c: i32x16<Self>) -> i32x16<Self>;
751    fn min_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
752    fn max_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
753    fn split_i32x16(self, a: i32x16<Self>) -> (i32x8<Self>, i32x8<Self>);
754    fn reinterpret_u8_i32x16(self, a: i32x16<Self>) -> u8x64<Self>;
755    fn reinterpret_u32_i32x16(self, a: i32x16<Self>) -> u32x16<Self>;
756    fn cvt_f32_i32x16(self, a: i32x16<Self>) -> f32x16<Self>;
757    fn splat_u32x16(self, val: u32) -> u32x16<Self>;
758    fn not_u32x16(self, a: u32x16<Self>) -> u32x16<Self>;
759    fn add_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
760    fn sub_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
761    fn mul_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
762    fn and_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
763    fn or_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
764    fn xor_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
765    fn shr_u32x16(self, a: u32x16<Self>, shift: u32) -> u32x16<Self>;
766    fn simd_eq_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
767    fn simd_lt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
768    fn simd_le_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
769    fn simd_ge_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
770    fn simd_gt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
771    fn zip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
772    fn zip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
773    fn unzip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
774    fn unzip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
775    fn select_u32x16(self, a: mask32x16<Self>, b: u32x16<Self>, c: u32x16<Self>) -> u32x16<Self>;
776    fn min_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
777    fn max_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
778    fn split_u32x16(self, a: u32x16<Self>) -> (u32x8<Self>, u32x8<Self>);
779    fn load_interleaved_128_u32x16(self, src: &[u32; 16usize]) -> u32x16<Self>;
780    fn store_interleaved_128_u32x16(self, a: u32x16<Self>, dest: &mut [u32; 16usize]) -> ();
781    fn reinterpret_u8_u32x16(self, a: u32x16<Self>) -> u8x64<Self>;
782    fn cvt_f32_u32x16(self, a: u32x16<Self>) -> f32x16<Self>;
783    fn splat_mask32x16(self, val: i32) -> mask32x16<Self>;
784    fn not_mask32x16(self, a: mask32x16<Self>) -> mask32x16<Self>;
785    fn and_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
786    fn or_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
787    fn xor_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
788    fn select_mask32x16(
789        self,
790        a: mask32x16<Self>,
791        b: mask32x16<Self>,
792        c: mask32x16<Self>,
793    ) -> mask32x16<Self>;
794    fn simd_eq_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
795    fn split_mask32x16(self, a: mask32x16<Self>) -> (mask32x8<Self>, mask32x8<Self>);
796    fn splat_f64x8(self, val: f64) -> f64x8<Self>;
797    fn abs_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
798    fn neg_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
799    fn sqrt_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
800    fn add_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
801    fn sub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
802    fn mul_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
803    fn div_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
804    fn copysign_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
805    fn simd_eq_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
806    fn simd_lt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
807    fn simd_le_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
808    fn simd_ge_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
809    fn simd_gt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
810    fn zip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
811    fn zip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
812    fn unzip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
813    fn unzip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
814    fn max_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
815    fn max_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
816    fn min_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
817    fn min_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
818    fn madd_f64x8(self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
819    fn msub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
820    fn floor_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
821    fn fract_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
822    fn trunc_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
823    fn select_f64x8(self, a: mask64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
824    fn split_f64x8(self, a: f64x8<Self>) -> (f64x4<Self>, f64x4<Self>);
825    fn reinterpret_f32_f64x8(self, a: f64x8<Self>) -> f32x16<Self>;
826    fn splat_mask64x8(self, val: i64) -> mask64x8<Self>;
827    fn not_mask64x8(self, a: mask64x8<Self>) -> mask64x8<Self>;
828    fn and_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
829    fn or_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
830    fn xor_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
831    fn select_mask64x8(
832        self,
833        a: mask64x8<Self>,
834        b: mask64x8<Self>,
835        c: mask64x8<Self>,
836    ) -> mask64x8<Self>;
837    fn simd_eq_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
838    fn split_mask64x8(self, a: mask64x8<Self>) -> (mask64x4<Self>, mask64x4<Self>);
839}
840pub trait SimdBase<Element: SimdElement, S: Simd>:
841    Copy + Sync + Send + 'static + crate::Bytes
842{
843    const N: usize;
844    #[doc = r" A SIMD vector mask with the same number of elements."]
845    #[doc = r""]
846    #[doc = r" The mask element is represented as an integer which is"]
847    #[doc = r" all-0 for `false` and all-1 for `true`. When we get deep"]
848    #[doc = r" into AVX-512, we need to think about predication masks."]
849    #[doc = r""]
850    #[doc = r" One possibility to consider is that the SIMD trait grows"]
851    #[doc = r" `maskAxB` associated types."]
852    type Mask: SimdMask<Element::Mask, S>;
853    #[doc = r" A 128 bit SIMD vector of the same scalar type."]
854    type Block: SimdBase<Element, S>;
855    fn as_slice(&self) -> &[Element];
856    fn as_mut_slice(&mut self) -> &mut [Element];
857    #[doc = r" Create a SIMD vector from a slice."]
858    #[doc = r""]
859    #[doc = r" The slice must be the proper width."]
860    fn from_slice(simd: S, slice: &[Element]) -> Self;
861    fn splat(simd: S, val: Element) -> Self;
862    fn block_splat(block: Self::Block) -> Self;
863}
864pub trait SimdFloat<Element: SimdElement, S: Simd>:
865    SimdBase<Element, S>
866    + core::ops::Neg<Output = Self>
867    + core::ops::Add<Output = Self>
868    + core::ops::Add<Element, Output = Self>
869    + core::ops::Sub<Output = Self>
870    + core::ops::Sub<Element, Output = Self>
871    + core::ops::Mul<Output = Self>
872    + core::ops::Mul<Element, Output = Self>
873    + core::ops::Div<Output = Self>
874    + core::ops::Div<Element, Output = Self>
875{
876    #[inline(always)]
877    fn to_int<T: SimdCvtTruncate<Self>>(self) -> T {
878        T::truncate_from(self)
879    }
880    fn abs(self) -> Self;
881    fn sqrt(self) -> Self;
882    fn copysign(self, rhs: impl SimdInto<Self, S>) -> Self;
883    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
884    fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
885    fn simd_le(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
886    fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
887    fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
888    fn zip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
889    fn zip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
890    fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
891    fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
892    fn max(self, rhs: impl SimdInto<Self, S>) -> Self;
893    fn max_precise(self, rhs: impl SimdInto<Self, S>) -> Self;
894    fn min(self, rhs: impl SimdInto<Self, S>) -> Self;
895    fn min_precise(self, rhs: impl SimdInto<Self, S>) -> Self;
896    fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> Self;
897    fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> Self;
898    fn floor(self) -> Self;
899    fn fract(self) -> Self;
900    fn trunc(self) -> Self;
901}
902pub trait SimdInt<Element: SimdElement, S: Simd>:
903    SimdBase<Element, S>
904    + core::ops::Add<Output = Self>
905    + core::ops::Add<Element, Output = Self>
906    + core::ops::Sub<Output = Self>
907    + core::ops::Sub<Element, Output = Self>
908    + core::ops::Mul<Output = Self>
909    + core::ops::Mul<Element, Output = Self>
910    + core::ops::BitAnd<Output = Self>
911    + core::ops::BitAnd<Element, Output = Self>
912    + core::ops::BitOr<Output = Self>
913    + core::ops::BitOr<Element, Output = Self>
914    + core::ops::BitXor<Output = Self>
915    + core::ops::BitXor<Element, Output = Self>
916{
917    #[inline(always)]
918    fn to_float<T: SimdCvtFloat<Self>>(self) -> T {
919        T::float_from(self)
920    }
921    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
922    fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
923    fn simd_le(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
924    fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
925    fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
926    fn zip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
927    fn zip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
928    fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
929    fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
930    fn min(self, rhs: impl SimdInto<Self, S>) -> Self;
931    fn max(self, rhs: impl SimdInto<Self, S>) -> Self;
932}
933pub trait SimdMask<Element: SimdElement, S: Simd>:
934    SimdBase<Element, S>
935    + core::ops::Not<Output = Self>
936    + core::ops::BitAnd<Output = Self>
937    + core::ops::BitOr<Output = Self>
938    + core::ops::BitXor<Output = Self>
939{
940    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
941}