fearless_simd/generated/
simd_trait.rs

1// Copyright 2025 the Fearless_SIMD Authors
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3
4// This file is autogenerated by fearless_simd_gen
5
6use crate::{
7    Bytes, Level, Select, SimdCvtFloat, SimdCvtTruncate, SimdElement, SimdFrom, SimdInto,
8    seal::Seal,
9};
10use crate::{
11    f32x4, f32x8, f32x16, f64x2, f64x4, f64x8, i8x16, i8x32, i8x64, i16x8, i16x16, i16x32, i32x4,
12    i32x8, i32x16, mask8x16, mask8x32, mask8x64, mask16x8, mask16x16, mask16x32, mask32x4,
13    mask32x8, mask32x16, mask64x2, mask64x4, mask64x8, u8x16, u8x32, u8x64, u16x8, u16x16, u16x32,
14    u32x4, u32x8, u32x16,
15};
16#[doc = r" TODO: docstring"]
17pub trait Simd: Sized + Clone + Copy + Send + Sync + Seal + 'static {
18    type f32s: SimdFloat<
19            f32,
20            Self,
21            Block = f32x4<Self>,
22            Mask = Self::mask32s,
23            Bytes = <Self::u32s as Bytes>::Bytes,
24        > + SimdCvtFloat<Self::u32s>
25        + SimdCvtFloat<Self::i32s>;
26    type u8s: SimdInt<u8, Self, Block = u8x16<Self>, Mask = Self::mask8s>;
27    type i8s: SimdInt<
28            i8,
29            Self,
30            Block = i8x16<Self>,
31            Mask = Self::mask8s,
32            Bytes = <Self::u8s as Bytes>::Bytes,
33        > + core::ops::Neg<Output = Self::i8s>;
34    type u16s: SimdInt<u16, Self, Block = u16x8<Self>, Mask = Self::mask16s>;
35    type i16s: SimdInt<
36            i16,
37            Self,
38            Block = i16x8<Self>,
39            Mask = Self::mask16s,
40            Bytes = <Self::u16s as Bytes>::Bytes,
41        > + core::ops::Neg<Output = Self::i16s>;
42    type u32s: SimdInt<u32, Self, Block = u32x4<Self>, Mask = Self::mask32s>
43        + SimdCvtTruncate<Self::f32s>;
44    type i32s: SimdInt<
45            i32,
46            Self,
47            Block = i32x4<Self>,
48            Mask = Self::mask32s,
49            Bytes = <Self::u32s as Bytes>::Bytes,
50        > + SimdCvtTruncate<Self::f32s>
51        + core::ops::Neg<Output = Self::i32s>;
52    type mask8s: SimdMask<i8, Self, Block = mask8x16<Self>, Bytes = <Self::u8s as Bytes>::Bytes>
53        + Select<Self::u8s>
54        + Select<Self::i8s>
55        + Select<Self::mask8s>;
56    type mask16s: SimdMask<i16, Self, Block = mask16x8<Self>, Bytes = <Self::u16s as Bytes>::Bytes>
57        + Select<Self::u16s>
58        + Select<Self::i16s>
59        + Select<Self::mask16s>;
60    type mask32s: SimdMask<i32, Self, Block = mask32x4<Self>, Bytes = <Self::u32s as Bytes>::Bytes>
61        + Select<Self::f32s>
62        + Select<Self::u32s>
63        + Select<Self::i32s>
64        + Select<Self::mask32s>;
65    fn level(self) -> Level;
66    #[doc = r" Call function with CPU features enabled."]
67    #[doc = r""]
68    #[doc = r" For performance, the provided function should be `#[inline(always)]`."]
69    fn vectorize<F: FnOnce() -> R, R>(self, f: F) -> R;
70    fn splat_f32x4(self, val: f32) -> f32x4<Self>;
71    fn abs_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
72    fn neg_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
73    fn sqrt_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
74    fn add_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
75    fn sub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
76    fn mul_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
77    fn div_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
78    fn copysign_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
79    fn simd_eq_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
80    fn simd_lt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
81    fn simd_le_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
82    fn simd_ge_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
83    fn simd_gt_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> mask32x4<Self>;
84    fn zip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
85    fn zip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
86    fn unzip_low_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
87    fn unzip_high_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
88    fn max_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
89    fn max_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
90    fn min_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
91    fn min_precise_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x4<Self>;
92    fn madd_f32x4(self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
93    fn msub_f32x4(self, a: f32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
94    fn floor_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
95    fn fract_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
96    fn trunc_f32x4(self, a: f32x4<Self>) -> f32x4<Self>;
97    fn select_f32x4(self, a: mask32x4<Self>, b: f32x4<Self>, c: f32x4<Self>) -> f32x4<Self>;
98    fn combine_f32x4(self, a: f32x4<Self>, b: f32x4<Self>) -> f32x8<Self>;
99    fn reinterpret_f64_f32x4(self, a: f32x4<Self>) -> f64x2<Self>;
100    fn reinterpret_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
101    fn reinterpret_u8_f32x4(self, a: f32x4<Self>) -> u8x16<Self>;
102    fn reinterpret_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
103    fn cvt_u32_f32x4(self, a: f32x4<Self>) -> u32x4<Self>;
104    fn cvt_i32_f32x4(self, a: f32x4<Self>) -> i32x4<Self>;
105    fn splat_i8x16(self, val: i8) -> i8x16<Self>;
106    fn not_i8x16(self, a: i8x16<Self>) -> i8x16<Self>;
107    fn add_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
108    fn sub_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
109    fn mul_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
110    fn and_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
111    fn or_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
112    fn xor_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
113    fn shr_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>;
114    fn shrv_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
115    fn shl_i8x16(self, a: i8x16<Self>, shift: u32) -> i8x16<Self>;
116    fn simd_eq_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
117    fn simd_lt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
118    fn simd_le_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
119    fn simd_ge_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
120    fn simd_gt_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> mask8x16<Self>;
121    fn zip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
122    fn zip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
123    fn unzip_low_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
124    fn unzip_high_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
125    fn select_i8x16(self, a: mask8x16<Self>, b: i8x16<Self>, c: i8x16<Self>) -> i8x16<Self>;
126    fn min_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
127    fn max_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x16<Self>;
128    fn combine_i8x16(self, a: i8x16<Self>, b: i8x16<Self>) -> i8x32<Self>;
129    fn neg_i8x16(self, a: i8x16<Self>) -> i8x16<Self>;
130    fn reinterpret_u8_i8x16(self, a: i8x16<Self>) -> u8x16<Self>;
131    fn reinterpret_u32_i8x16(self, a: i8x16<Self>) -> u32x4<Self>;
132    fn splat_u8x16(self, val: u8) -> u8x16<Self>;
133    fn not_u8x16(self, a: u8x16<Self>) -> u8x16<Self>;
134    fn add_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
135    fn sub_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
136    fn mul_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
137    fn and_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
138    fn or_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
139    fn xor_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
140    fn shr_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>;
141    fn shrv_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
142    fn shl_u8x16(self, a: u8x16<Self>, shift: u32) -> u8x16<Self>;
143    fn simd_eq_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
144    fn simd_lt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
145    fn simd_le_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
146    fn simd_ge_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
147    fn simd_gt_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> mask8x16<Self>;
148    fn zip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
149    fn zip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
150    fn unzip_low_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
151    fn unzip_high_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
152    fn select_u8x16(self, a: mask8x16<Self>, b: u8x16<Self>, c: u8x16<Self>) -> u8x16<Self>;
153    fn min_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
154    fn max_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x16<Self>;
155    fn combine_u8x16(self, a: u8x16<Self>, b: u8x16<Self>) -> u8x32<Self>;
156    fn widen_u8x16(self, a: u8x16<Self>) -> u16x16<Self>;
157    fn reinterpret_u32_u8x16(self, a: u8x16<Self>) -> u32x4<Self>;
158    fn splat_mask8x16(self, val: i8) -> mask8x16<Self>;
159    fn not_mask8x16(self, a: mask8x16<Self>) -> mask8x16<Self>;
160    fn and_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
161    fn or_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
162    fn xor_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
163    fn select_mask8x16(
164        self,
165        a: mask8x16<Self>,
166        b: mask8x16<Self>,
167        c: mask8x16<Self>,
168    ) -> mask8x16<Self>;
169    fn simd_eq_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x16<Self>;
170    fn combine_mask8x16(self, a: mask8x16<Self>, b: mask8x16<Self>) -> mask8x32<Self>;
171    fn splat_i16x8(self, val: i16) -> i16x8<Self>;
172    fn not_i16x8(self, a: i16x8<Self>) -> i16x8<Self>;
173    fn add_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
174    fn sub_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
175    fn mul_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
176    fn and_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
177    fn or_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
178    fn xor_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
179    fn shr_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>;
180    fn shrv_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
181    fn shl_i16x8(self, a: i16x8<Self>, shift: u32) -> i16x8<Self>;
182    fn simd_eq_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
183    fn simd_lt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
184    fn simd_le_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
185    fn simd_ge_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
186    fn simd_gt_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> mask16x8<Self>;
187    fn zip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
188    fn zip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
189    fn unzip_low_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
190    fn unzip_high_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
191    fn select_i16x8(self, a: mask16x8<Self>, b: i16x8<Self>, c: i16x8<Self>) -> i16x8<Self>;
192    fn min_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
193    fn max_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x8<Self>;
194    fn combine_i16x8(self, a: i16x8<Self>, b: i16x8<Self>) -> i16x16<Self>;
195    fn neg_i16x8(self, a: i16x8<Self>) -> i16x8<Self>;
196    fn reinterpret_u8_i16x8(self, a: i16x8<Self>) -> u8x16<Self>;
197    fn reinterpret_u32_i16x8(self, a: i16x8<Self>) -> u32x4<Self>;
198    fn splat_u16x8(self, val: u16) -> u16x8<Self>;
199    fn not_u16x8(self, a: u16x8<Self>) -> u16x8<Self>;
200    fn add_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
201    fn sub_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
202    fn mul_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
203    fn and_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
204    fn or_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
205    fn xor_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
206    fn shr_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>;
207    fn shrv_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
208    fn shl_u16x8(self, a: u16x8<Self>, shift: u32) -> u16x8<Self>;
209    fn simd_eq_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
210    fn simd_lt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
211    fn simd_le_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
212    fn simd_ge_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
213    fn simd_gt_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> mask16x8<Self>;
214    fn zip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
215    fn zip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
216    fn unzip_low_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
217    fn unzip_high_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
218    fn select_u16x8(self, a: mask16x8<Self>, b: u16x8<Self>, c: u16x8<Self>) -> u16x8<Self>;
219    fn min_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
220    fn max_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x8<Self>;
221    fn combine_u16x8(self, a: u16x8<Self>, b: u16x8<Self>) -> u16x16<Self>;
222    fn reinterpret_u8_u16x8(self, a: u16x8<Self>) -> u8x16<Self>;
223    fn reinterpret_u32_u16x8(self, a: u16x8<Self>) -> u32x4<Self>;
224    fn splat_mask16x8(self, val: i16) -> mask16x8<Self>;
225    fn not_mask16x8(self, a: mask16x8<Self>) -> mask16x8<Self>;
226    fn and_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
227    fn or_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
228    fn xor_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
229    fn select_mask16x8(
230        self,
231        a: mask16x8<Self>,
232        b: mask16x8<Self>,
233        c: mask16x8<Self>,
234    ) -> mask16x8<Self>;
235    fn simd_eq_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x8<Self>;
236    fn combine_mask16x8(self, a: mask16x8<Self>, b: mask16x8<Self>) -> mask16x16<Self>;
237    fn splat_i32x4(self, val: i32) -> i32x4<Self>;
238    fn not_i32x4(self, a: i32x4<Self>) -> i32x4<Self>;
239    fn add_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
240    fn sub_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
241    fn mul_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
242    fn and_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
243    fn or_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
244    fn xor_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
245    fn shr_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>;
246    fn shrv_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
247    fn shl_i32x4(self, a: i32x4<Self>, shift: u32) -> i32x4<Self>;
248    fn simd_eq_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
249    fn simd_lt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
250    fn simd_le_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
251    fn simd_ge_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
252    fn simd_gt_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> mask32x4<Self>;
253    fn zip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
254    fn zip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
255    fn unzip_low_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
256    fn unzip_high_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
257    fn select_i32x4(self, a: mask32x4<Self>, b: i32x4<Self>, c: i32x4<Self>) -> i32x4<Self>;
258    fn min_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
259    fn max_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x4<Self>;
260    fn combine_i32x4(self, a: i32x4<Self>, b: i32x4<Self>) -> i32x8<Self>;
261    fn neg_i32x4(self, a: i32x4<Self>) -> i32x4<Self>;
262    fn reinterpret_u8_i32x4(self, a: i32x4<Self>) -> u8x16<Self>;
263    fn reinterpret_u32_i32x4(self, a: i32x4<Self>) -> u32x4<Self>;
264    fn cvt_f32_i32x4(self, a: i32x4<Self>) -> f32x4<Self>;
265    fn splat_u32x4(self, val: u32) -> u32x4<Self>;
266    fn not_u32x4(self, a: u32x4<Self>) -> u32x4<Self>;
267    fn add_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
268    fn sub_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
269    fn mul_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
270    fn and_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
271    fn or_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
272    fn xor_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
273    fn shr_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>;
274    fn shrv_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
275    fn shl_u32x4(self, a: u32x4<Self>, shift: u32) -> u32x4<Self>;
276    fn simd_eq_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
277    fn simd_lt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
278    fn simd_le_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
279    fn simd_ge_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
280    fn simd_gt_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> mask32x4<Self>;
281    fn zip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
282    fn zip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
283    fn unzip_low_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
284    fn unzip_high_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
285    fn select_u32x4(self, a: mask32x4<Self>, b: u32x4<Self>, c: u32x4<Self>) -> u32x4<Self>;
286    fn min_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
287    fn max_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x4<Self>;
288    fn combine_u32x4(self, a: u32x4<Self>, b: u32x4<Self>) -> u32x8<Self>;
289    fn reinterpret_u8_u32x4(self, a: u32x4<Self>) -> u8x16<Self>;
290    fn cvt_f32_u32x4(self, a: u32x4<Self>) -> f32x4<Self>;
291    fn splat_mask32x4(self, val: i32) -> mask32x4<Self>;
292    fn not_mask32x4(self, a: mask32x4<Self>) -> mask32x4<Self>;
293    fn and_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
294    fn or_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
295    fn xor_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
296    fn select_mask32x4(
297        self,
298        a: mask32x4<Self>,
299        b: mask32x4<Self>,
300        c: mask32x4<Self>,
301    ) -> mask32x4<Self>;
302    fn simd_eq_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x4<Self>;
303    fn combine_mask32x4(self, a: mask32x4<Self>, b: mask32x4<Self>) -> mask32x8<Self>;
304    fn splat_f64x2(self, val: f64) -> f64x2<Self>;
305    fn abs_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
306    fn neg_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
307    fn sqrt_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
308    fn add_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
309    fn sub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
310    fn mul_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
311    fn div_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
312    fn copysign_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
313    fn simd_eq_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
314    fn simd_lt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
315    fn simd_le_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
316    fn simd_ge_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
317    fn simd_gt_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> mask64x2<Self>;
318    fn zip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
319    fn zip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
320    fn unzip_low_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
321    fn unzip_high_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
322    fn max_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
323    fn max_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
324    fn min_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
325    fn min_precise_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x2<Self>;
326    fn madd_f64x2(self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
327    fn msub_f64x2(self, a: f64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
328    fn floor_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
329    fn fract_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
330    fn trunc_f64x2(self, a: f64x2<Self>) -> f64x2<Self>;
331    fn select_f64x2(self, a: mask64x2<Self>, b: f64x2<Self>, c: f64x2<Self>) -> f64x2<Self>;
332    fn combine_f64x2(self, a: f64x2<Self>, b: f64x2<Self>) -> f64x4<Self>;
333    fn reinterpret_f32_f64x2(self, a: f64x2<Self>) -> f32x4<Self>;
334    fn splat_mask64x2(self, val: i64) -> mask64x2<Self>;
335    fn not_mask64x2(self, a: mask64x2<Self>) -> mask64x2<Self>;
336    fn and_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
337    fn or_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
338    fn xor_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
339    fn select_mask64x2(
340        self,
341        a: mask64x2<Self>,
342        b: mask64x2<Self>,
343        c: mask64x2<Self>,
344    ) -> mask64x2<Self>;
345    fn simd_eq_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x2<Self>;
346    fn combine_mask64x2(self, a: mask64x2<Self>, b: mask64x2<Self>) -> mask64x4<Self>;
347    fn splat_f32x8(self, val: f32) -> f32x8<Self>;
348    fn abs_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
349    fn neg_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
350    fn sqrt_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
351    fn add_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
352    fn sub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
353    fn mul_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
354    fn div_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
355    fn copysign_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
356    fn simd_eq_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
357    fn simd_lt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
358    fn simd_le_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
359    fn simd_ge_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
360    fn simd_gt_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> mask32x8<Self>;
361    fn zip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
362    fn zip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
363    fn unzip_low_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
364    fn unzip_high_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
365    fn max_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
366    fn max_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
367    fn min_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
368    fn min_precise_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x8<Self>;
369    fn madd_f32x8(self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
370    fn msub_f32x8(self, a: f32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
371    fn floor_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
372    fn fract_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
373    fn trunc_f32x8(self, a: f32x8<Self>) -> f32x8<Self>;
374    fn select_f32x8(self, a: mask32x8<Self>, b: f32x8<Self>, c: f32x8<Self>) -> f32x8<Self>;
375    fn combine_f32x8(self, a: f32x8<Self>, b: f32x8<Self>) -> f32x16<Self>;
376    fn split_f32x8(self, a: f32x8<Self>) -> (f32x4<Self>, f32x4<Self>);
377    fn reinterpret_f64_f32x8(self, a: f32x8<Self>) -> f64x4<Self>;
378    fn reinterpret_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
379    fn reinterpret_u8_f32x8(self, a: f32x8<Self>) -> u8x32<Self>;
380    fn reinterpret_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
381    fn cvt_u32_f32x8(self, a: f32x8<Self>) -> u32x8<Self>;
382    fn cvt_i32_f32x8(self, a: f32x8<Self>) -> i32x8<Self>;
383    fn splat_i8x32(self, val: i8) -> i8x32<Self>;
384    fn not_i8x32(self, a: i8x32<Self>) -> i8x32<Self>;
385    fn add_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
386    fn sub_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
387    fn mul_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
388    fn and_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
389    fn or_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
390    fn xor_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
391    fn shr_i8x32(self, a: i8x32<Self>, shift: u32) -> i8x32<Self>;
392    fn shrv_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
393    fn shl_i8x32(self, a: i8x32<Self>, shift: u32) -> i8x32<Self>;
394    fn simd_eq_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
395    fn simd_lt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
396    fn simd_le_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
397    fn simd_ge_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
398    fn simd_gt_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> mask8x32<Self>;
399    fn zip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
400    fn zip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
401    fn unzip_low_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
402    fn unzip_high_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
403    fn select_i8x32(self, a: mask8x32<Self>, b: i8x32<Self>, c: i8x32<Self>) -> i8x32<Self>;
404    fn min_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
405    fn max_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x32<Self>;
406    fn combine_i8x32(self, a: i8x32<Self>, b: i8x32<Self>) -> i8x64<Self>;
407    fn split_i8x32(self, a: i8x32<Self>) -> (i8x16<Self>, i8x16<Self>);
408    fn neg_i8x32(self, a: i8x32<Self>) -> i8x32<Self>;
409    fn reinterpret_u8_i8x32(self, a: i8x32<Self>) -> u8x32<Self>;
410    fn reinterpret_u32_i8x32(self, a: i8x32<Self>) -> u32x8<Self>;
411    fn splat_u8x32(self, val: u8) -> u8x32<Self>;
412    fn not_u8x32(self, a: u8x32<Self>) -> u8x32<Self>;
413    fn add_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
414    fn sub_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
415    fn mul_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
416    fn and_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
417    fn or_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
418    fn xor_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
419    fn shr_u8x32(self, a: u8x32<Self>, shift: u32) -> u8x32<Self>;
420    fn shrv_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
421    fn shl_u8x32(self, a: u8x32<Self>, shift: u32) -> u8x32<Self>;
422    fn simd_eq_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
423    fn simd_lt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
424    fn simd_le_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
425    fn simd_ge_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
426    fn simd_gt_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> mask8x32<Self>;
427    fn zip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
428    fn zip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
429    fn unzip_low_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
430    fn unzip_high_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
431    fn select_u8x32(self, a: mask8x32<Self>, b: u8x32<Self>, c: u8x32<Self>) -> u8x32<Self>;
432    fn min_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
433    fn max_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x32<Self>;
434    fn combine_u8x32(self, a: u8x32<Self>, b: u8x32<Self>) -> u8x64<Self>;
435    fn split_u8x32(self, a: u8x32<Self>) -> (u8x16<Self>, u8x16<Self>);
436    fn widen_u8x32(self, a: u8x32<Self>) -> u16x32<Self>;
437    fn reinterpret_u32_u8x32(self, a: u8x32<Self>) -> u32x8<Self>;
438    fn splat_mask8x32(self, val: i8) -> mask8x32<Self>;
439    fn not_mask8x32(self, a: mask8x32<Self>) -> mask8x32<Self>;
440    fn and_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
441    fn or_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
442    fn xor_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
443    fn select_mask8x32(
444        self,
445        a: mask8x32<Self>,
446        b: mask8x32<Self>,
447        c: mask8x32<Self>,
448    ) -> mask8x32<Self>;
449    fn simd_eq_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x32<Self>;
450    fn combine_mask8x32(self, a: mask8x32<Self>, b: mask8x32<Self>) -> mask8x64<Self>;
451    fn split_mask8x32(self, a: mask8x32<Self>) -> (mask8x16<Self>, mask8x16<Self>);
452    fn splat_i16x16(self, val: i16) -> i16x16<Self>;
453    fn not_i16x16(self, a: i16x16<Self>) -> i16x16<Self>;
454    fn add_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
455    fn sub_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
456    fn mul_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
457    fn and_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
458    fn or_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
459    fn xor_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
460    fn shr_i16x16(self, a: i16x16<Self>, shift: u32) -> i16x16<Self>;
461    fn shrv_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
462    fn shl_i16x16(self, a: i16x16<Self>, shift: u32) -> i16x16<Self>;
463    fn simd_eq_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
464    fn simd_lt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
465    fn simd_le_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
466    fn simd_ge_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
467    fn simd_gt_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> mask16x16<Self>;
468    fn zip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
469    fn zip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
470    fn unzip_low_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
471    fn unzip_high_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
472    fn select_i16x16(self, a: mask16x16<Self>, b: i16x16<Self>, c: i16x16<Self>) -> i16x16<Self>;
473    fn min_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
474    fn max_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x16<Self>;
475    fn combine_i16x16(self, a: i16x16<Self>, b: i16x16<Self>) -> i16x32<Self>;
476    fn split_i16x16(self, a: i16x16<Self>) -> (i16x8<Self>, i16x8<Self>);
477    fn neg_i16x16(self, a: i16x16<Self>) -> i16x16<Self>;
478    fn reinterpret_u8_i16x16(self, a: i16x16<Self>) -> u8x32<Self>;
479    fn reinterpret_u32_i16x16(self, a: i16x16<Self>) -> u32x8<Self>;
480    fn splat_u16x16(self, val: u16) -> u16x16<Self>;
481    fn not_u16x16(self, a: u16x16<Self>) -> u16x16<Self>;
482    fn add_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
483    fn sub_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
484    fn mul_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
485    fn and_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
486    fn or_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
487    fn xor_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
488    fn shr_u16x16(self, a: u16x16<Self>, shift: u32) -> u16x16<Self>;
489    fn shrv_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
490    fn shl_u16x16(self, a: u16x16<Self>, shift: u32) -> u16x16<Self>;
491    fn simd_eq_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
492    fn simd_lt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
493    fn simd_le_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
494    fn simd_ge_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
495    fn simd_gt_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> mask16x16<Self>;
496    fn zip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
497    fn zip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
498    fn unzip_low_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
499    fn unzip_high_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
500    fn select_u16x16(self, a: mask16x16<Self>, b: u16x16<Self>, c: u16x16<Self>) -> u16x16<Self>;
501    fn min_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
502    fn max_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x16<Self>;
503    fn combine_u16x16(self, a: u16x16<Self>, b: u16x16<Self>) -> u16x32<Self>;
504    fn split_u16x16(self, a: u16x16<Self>) -> (u16x8<Self>, u16x8<Self>);
505    fn narrow_u16x16(self, a: u16x16<Self>) -> u8x16<Self>;
506    fn reinterpret_u8_u16x16(self, a: u16x16<Self>) -> u8x32<Self>;
507    fn reinterpret_u32_u16x16(self, a: u16x16<Self>) -> u32x8<Self>;
508    fn splat_mask16x16(self, val: i16) -> mask16x16<Self>;
509    fn not_mask16x16(self, a: mask16x16<Self>) -> mask16x16<Self>;
510    fn and_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
511    fn or_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
512    fn xor_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
513    fn select_mask16x16(
514        self,
515        a: mask16x16<Self>,
516        b: mask16x16<Self>,
517        c: mask16x16<Self>,
518    ) -> mask16x16<Self>;
519    fn simd_eq_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x16<Self>;
520    fn combine_mask16x16(self, a: mask16x16<Self>, b: mask16x16<Self>) -> mask16x32<Self>;
521    fn split_mask16x16(self, a: mask16x16<Self>) -> (mask16x8<Self>, mask16x8<Self>);
522    fn splat_i32x8(self, val: i32) -> i32x8<Self>;
523    fn not_i32x8(self, a: i32x8<Self>) -> i32x8<Self>;
524    fn add_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
525    fn sub_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
526    fn mul_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
527    fn and_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
528    fn or_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
529    fn xor_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
530    fn shr_i32x8(self, a: i32x8<Self>, shift: u32) -> i32x8<Self>;
531    fn shrv_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
532    fn shl_i32x8(self, a: i32x8<Self>, shift: u32) -> i32x8<Self>;
533    fn simd_eq_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
534    fn simd_lt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
535    fn simd_le_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
536    fn simd_ge_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
537    fn simd_gt_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> mask32x8<Self>;
538    fn zip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
539    fn zip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
540    fn unzip_low_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
541    fn unzip_high_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
542    fn select_i32x8(self, a: mask32x8<Self>, b: i32x8<Self>, c: i32x8<Self>) -> i32x8<Self>;
543    fn min_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
544    fn max_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x8<Self>;
545    fn combine_i32x8(self, a: i32x8<Self>, b: i32x8<Self>) -> i32x16<Self>;
546    fn split_i32x8(self, a: i32x8<Self>) -> (i32x4<Self>, i32x4<Self>);
547    fn neg_i32x8(self, a: i32x8<Self>) -> i32x8<Self>;
548    fn reinterpret_u8_i32x8(self, a: i32x8<Self>) -> u8x32<Self>;
549    fn reinterpret_u32_i32x8(self, a: i32x8<Self>) -> u32x8<Self>;
550    fn cvt_f32_i32x8(self, a: i32x8<Self>) -> f32x8<Self>;
551    fn splat_u32x8(self, val: u32) -> u32x8<Self>;
552    fn not_u32x8(self, a: u32x8<Self>) -> u32x8<Self>;
553    fn add_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
554    fn sub_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
555    fn mul_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
556    fn and_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
557    fn or_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
558    fn xor_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
559    fn shr_u32x8(self, a: u32x8<Self>, shift: u32) -> u32x8<Self>;
560    fn shrv_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
561    fn shl_u32x8(self, a: u32x8<Self>, shift: u32) -> u32x8<Self>;
562    fn simd_eq_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
563    fn simd_lt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
564    fn simd_le_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
565    fn simd_ge_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
566    fn simd_gt_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> mask32x8<Self>;
567    fn zip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
568    fn zip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
569    fn unzip_low_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
570    fn unzip_high_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
571    fn select_u32x8(self, a: mask32x8<Self>, b: u32x8<Self>, c: u32x8<Self>) -> u32x8<Self>;
572    fn min_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
573    fn max_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x8<Self>;
574    fn combine_u32x8(self, a: u32x8<Self>, b: u32x8<Self>) -> u32x16<Self>;
575    fn split_u32x8(self, a: u32x8<Self>) -> (u32x4<Self>, u32x4<Self>);
576    fn reinterpret_u8_u32x8(self, a: u32x8<Self>) -> u8x32<Self>;
577    fn cvt_f32_u32x8(self, a: u32x8<Self>) -> f32x8<Self>;
578    fn splat_mask32x8(self, val: i32) -> mask32x8<Self>;
579    fn not_mask32x8(self, a: mask32x8<Self>) -> mask32x8<Self>;
580    fn and_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
581    fn or_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
582    fn xor_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
583    fn select_mask32x8(
584        self,
585        a: mask32x8<Self>,
586        b: mask32x8<Self>,
587        c: mask32x8<Self>,
588    ) -> mask32x8<Self>;
589    fn simd_eq_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x8<Self>;
590    fn combine_mask32x8(self, a: mask32x8<Self>, b: mask32x8<Self>) -> mask32x16<Self>;
591    fn split_mask32x8(self, a: mask32x8<Self>) -> (mask32x4<Self>, mask32x4<Self>);
592    fn splat_f64x4(self, val: f64) -> f64x4<Self>;
593    fn abs_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
594    fn neg_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
595    fn sqrt_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
596    fn add_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
597    fn sub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
598    fn mul_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
599    fn div_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
600    fn copysign_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
601    fn simd_eq_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
602    fn simd_lt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
603    fn simd_le_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
604    fn simd_ge_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
605    fn simd_gt_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> mask64x4<Self>;
606    fn zip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
607    fn zip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
608    fn unzip_low_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
609    fn unzip_high_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
610    fn max_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
611    fn max_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
612    fn min_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
613    fn min_precise_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x4<Self>;
614    fn madd_f64x4(self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
615    fn msub_f64x4(self, a: f64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
616    fn floor_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
617    fn fract_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
618    fn trunc_f64x4(self, a: f64x4<Self>) -> f64x4<Self>;
619    fn select_f64x4(self, a: mask64x4<Self>, b: f64x4<Self>, c: f64x4<Self>) -> f64x4<Self>;
620    fn combine_f64x4(self, a: f64x4<Self>, b: f64x4<Self>) -> f64x8<Self>;
621    fn split_f64x4(self, a: f64x4<Self>) -> (f64x2<Self>, f64x2<Self>);
622    fn reinterpret_f32_f64x4(self, a: f64x4<Self>) -> f32x8<Self>;
623    fn splat_mask64x4(self, val: i64) -> mask64x4<Self>;
624    fn not_mask64x4(self, a: mask64x4<Self>) -> mask64x4<Self>;
625    fn and_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
626    fn or_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
627    fn xor_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
628    fn select_mask64x4(
629        self,
630        a: mask64x4<Self>,
631        b: mask64x4<Self>,
632        c: mask64x4<Self>,
633    ) -> mask64x4<Self>;
634    fn simd_eq_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x4<Self>;
635    fn combine_mask64x4(self, a: mask64x4<Self>, b: mask64x4<Self>) -> mask64x8<Self>;
636    fn split_mask64x4(self, a: mask64x4<Self>) -> (mask64x2<Self>, mask64x2<Self>);
637    fn splat_f32x16(self, val: f32) -> f32x16<Self>;
638    fn abs_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
639    fn neg_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
640    fn sqrt_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
641    fn add_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
642    fn sub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
643    fn mul_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
644    fn div_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
645    fn copysign_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
646    fn simd_eq_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
647    fn simd_lt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
648    fn simd_le_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
649    fn simd_ge_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
650    fn simd_gt_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> mask32x16<Self>;
651    fn zip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
652    fn zip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
653    fn unzip_low_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
654    fn unzip_high_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
655    fn max_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
656    fn max_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
657    fn min_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
658    fn min_precise_f32x16(self, a: f32x16<Self>, b: f32x16<Self>) -> f32x16<Self>;
659    fn madd_f32x16(self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
660    fn msub_f32x16(self, a: f32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
661    fn floor_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
662    fn fract_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
663    fn trunc_f32x16(self, a: f32x16<Self>) -> f32x16<Self>;
664    fn select_f32x16(self, a: mask32x16<Self>, b: f32x16<Self>, c: f32x16<Self>) -> f32x16<Self>;
665    fn split_f32x16(self, a: f32x16<Self>) -> (f32x8<Self>, f32x8<Self>);
666    fn reinterpret_f64_f32x16(self, a: f32x16<Self>) -> f64x8<Self>;
667    fn reinterpret_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
668    fn load_interleaved_128_f32x16(self, src: &[f32; 16usize]) -> f32x16<Self>;
669    fn store_interleaved_128_f32x16(self, a: f32x16<Self>, dest: &mut [f32; 16usize]) -> ();
670    fn reinterpret_u8_f32x16(self, a: f32x16<Self>) -> u8x64<Self>;
671    fn reinterpret_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
672    fn cvt_u32_f32x16(self, a: f32x16<Self>) -> u32x16<Self>;
673    fn cvt_i32_f32x16(self, a: f32x16<Self>) -> i32x16<Self>;
674    fn splat_i8x64(self, val: i8) -> i8x64<Self>;
675    fn not_i8x64(self, a: i8x64<Self>) -> i8x64<Self>;
676    fn add_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
677    fn sub_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
678    fn mul_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
679    fn and_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
680    fn or_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
681    fn xor_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
682    fn shr_i8x64(self, a: i8x64<Self>, shift: u32) -> i8x64<Self>;
683    fn shrv_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
684    fn shl_i8x64(self, a: i8x64<Self>, shift: u32) -> i8x64<Self>;
685    fn simd_eq_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
686    fn simd_lt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
687    fn simd_le_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
688    fn simd_ge_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
689    fn simd_gt_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> mask8x64<Self>;
690    fn zip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
691    fn zip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
692    fn unzip_low_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
693    fn unzip_high_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
694    fn select_i8x64(self, a: mask8x64<Self>, b: i8x64<Self>, c: i8x64<Self>) -> i8x64<Self>;
695    fn min_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
696    fn max_i8x64(self, a: i8x64<Self>, b: i8x64<Self>) -> i8x64<Self>;
697    fn split_i8x64(self, a: i8x64<Self>) -> (i8x32<Self>, i8x32<Self>);
698    fn neg_i8x64(self, a: i8x64<Self>) -> i8x64<Self>;
699    fn reinterpret_u8_i8x64(self, a: i8x64<Self>) -> u8x64<Self>;
700    fn reinterpret_u32_i8x64(self, a: i8x64<Self>) -> u32x16<Self>;
701    fn splat_u8x64(self, val: u8) -> u8x64<Self>;
702    fn not_u8x64(self, a: u8x64<Self>) -> u8x64<Self>;
703    fn add_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
704    fn sub_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
705    fn mul_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
706    fn and_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
707    fn or_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
708    fn xor_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
709    fn shr_u8x64(self, a: u8x64<Self>, shift: u32) -> u8x64<Self>;
710    fn shrv_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
711    fn shl_u8x64(self, a: u8x64<Self>, shift: u32) -> u8x64<Self>;
712    fn simd_eq_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
713    fn simd_lt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
714    fn simd_le_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
715    fn simd_ge_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
716    fn simd_gt_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> mask8x64<Self>;
717    fn zip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
718    fn zip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
719    fn unzip_low_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
720    fn unzip_high_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
721    fn select_u8x64(self, a: mask8x64<Self>, b: u8x64<Self>, c: u8x64<Self>) -> u8x64<Self>;
722    fn min_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
723    fn max_u8x64(self, a: u8x64<Self>, b: u8x64<Self>) -> u8x64<Self>;
724    fn split_u8x64(self, a: u8x64<Self>) -> (u8x32<Self>, u8x32<Self>);
725    fn load_interleaved_128_u8x64(self, src: &[u8; 64usize]) -> u8x64<Self>;
726    fn store_interleaved_128_u8x64(self, a: u8x64<Self>, dest: &mut [u8; 64usize]) -> ();
727    fn reinterpret_u32_u8x64(self, a: u8x64<Self>) -> u32x16<Self>;
728    fn splat_mask8x64(self, val: i8) -> mask8x64<Self>;
729    fn not_mask8x64(self, a: mask8x64<Self>) -> mask8x64<Self>;
730    fn and_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
731    fn or_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
732    fn xor_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
733    fn select_mask8x64(
734        self,
735        a: mask8x64<Self>,
736        b: mask8x64<Self>,
737        c: mask8x64<Self>,
738    ) -> mask8x64<Self>;
739    fn simd_eq_mask8x64(self, a: mask8x64<Self>, b: mask8x64<Self>) -> mask8x64<Self>;
740    fn split_mask8x64(self, a: mask8x64<Self>) -> (mask8x32<Self>, mask8x32<Self>);
741    fn splat_i16x32(self, val: i16) -> i16x32<Self>;
742    fn not_i16x32(self, a: i16x32<Self>) -> i16x32<Self>;
743    fn add_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
744    fn sub_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
745    fn mul_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
746    fn and_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
747    fn or_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
748    fn xor_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
749    fn shr_i16x32(self, a: i16x32<Self>, shift: u32) -> i16x32<Self>;
750    fn shrv_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
751    fn shl_i16x32(self, a: i16x32<Self>, shift: u32) -> i16x32<Self>;
752    fn simd_eq_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
753    fn simd_lt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
754    fn simd_le_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
755    fn simd_ge_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
756    fn simd_gt_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> mask16x32<Self>;
757    fn zip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
758    fn zip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
759    fn unzip_low_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
760    fn unzip_high_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
761    fn select_i16x32(self, a: mask16x32<Self>, b: i16x32<Self>, c: i16x32<Self>) -> i16x32<Self>;
762    fn min_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
763    fn max_i16x32(self, a: i16x32<Self>, b: i16x32<Self>) -> i16x32<Self>;
764    fn split_i16x32(self, a: i16x32<Self>) -> (i16x16<Self>, i16x16<Self>);
765    fn neg_i16x32(self, a: i16x32<Self>) -> i16x32<Self>;
766    fn reinterpret_u8_i16x32(self, a: i16x32<Self>) -> u8x64<Self>;
767    fn reinterpret_u32_i16x32(self, a: i16x32<Self>) -> u32x16<Self>;
768    fn splat_u16x32(self, val: u16) -> u16x32<Self>;
769    fn not_u16x32(self, a: u16x32<Self>) -> u16x32<Self>;
770    fn add_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
771    fn sub_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
772    fn mul_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
773    fn and_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
774    fn or_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
775    fn xor_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
776    fn shr_u16x32(self, a: u16x32<Self>, shift: u32) -> u16x32<Self>;
777    fn shrv_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
778    fn shl_u16x32(self, a: u16x32<Self>, shift: u32) -> u16x32<Self>;
779    fn simd_eq_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
780    fn simd_lt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
781    fn simd_le_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
782    fn simd_ge_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
783    fn simd_gt_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> mask16x32<Self>;
784    fn zip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
785    fn zip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
786    fn unzip_low_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
787    fn unzip_high_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
788    fn select_u16x32(self, a: mask16x32<Self>, b: u16x32<Self>, c: u16x32<Self>) -> u16x32<Self>;
789    fn min_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
790    fn max_u16x32(self, a: u16x32<Self>, b: u16x32<Self>) -> u16x32<Self>;
791    fn split_u16x32(self, a: u16x32<Self>) -> (u16x16<Self>, u16x16<Self>);
792    fn load_interleaved_128_u16x32(self, src: &[u16; 32usize]) -> u16x32<Self>;
793    fn store_interleaved_128_u16x32(self, a: u16x32<Self>, dest: &mut [u16; 32usize]) -> ();
794    fn narrow_u16x32(self, a: u16x32<Self>) -> u8x32<Self>;
795    fn reinterpret_u8_u16x32(self, a: u16x32<Self>) -> u8x64<Self>;
796    fn reinterpret_u32_u16x32(self, a: u16x32<Self>) -> u32x16<Self>;
797    fn splat_mask16x32(self, val: i16) -> mask16x32<Self>;
798    fn not_mask16x32(self, a: mask16x32<Self>) -> mask16x32<Self>;
799    fn and_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
800    fn or_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
801    fn xor_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
802    fn select_mask16x32(
803        self,
804        a: mask16x32<Self>,
805        b: mask16x32<Self>,
806        c: mask16x32<Self>,
807    ) -> mask16x32<Self>;
808    fn simd_eq_mask16x32(self, a: mask16x32<Self>, b: mask16x32<Self>) -> mask16x32<Self>;
809    fn split_mask16x32(self, a: mask16x32<Self>) -> (mask16x16<Self>, mask16x16<Self>);
810    fn splat_i32x16(self, val: i32) -> i32x16<Self>;
811    fn not_i32x16(self, a: i32x16<Self>) -> i32x16<Self>;
812    fn add_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
813    fn sub_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
814    fn mul_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
815    fn and_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
816    fn or_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
817    fn xor_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
818    fn shr_i32x16(self, a: i32x16<Self>, shift: u32) -> i32x16<Self>;
819    fn shrv_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
820    fn shl_i32x16(self, a: i32x16<Self>, shift: u32) -> i32x16<Self>;
821    fn simd_eq_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
822    fn simd_lt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
823    fn simd_le_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
824    fn simd_ge_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
825    fn simd_gt_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> mask32x16<Self>;
826    fn zip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
827    fn zip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
828    fn unzip_low_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
829    fn unzip_high_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
830    fn select_i32x16(self, a: mask32x16<Self>, b: i32x16<Self>, c: i32x16<Self>) -> i32x16<Self>;
831    fn min_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
832    fn max_i32x16(self, a: i32x16<Self>, b: i32x16<Self>) -> i32x16<Self>;
833    fn split_i32x16(self, a: i32x16<Self>) -> (i32x8<Self>, i32x8<Self>);
834    fn neg_i32x16(self, a: i32x16<Self>) -> i32x16<Self>;
835    fn reinterpret_u8_i32x16(self, a: i32x16<Self>) -> u8x64<Self>;
836    fn reinterpret_u32_i32x16(self, a: i32x16<Self>) -> u32x16<Self>;
837    fn cvt_f32_i32x16(self, a: i32x16<Self>) -> f32x16<Self>;
838    fn splat_u32x16(self, val: u32) -> u32x16<Self>;
839    fn not_u32x16(self, a: u32x16<Self>) -> u32x16<Self>;
840    fn add_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
841    fn sub_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
842    fn mul_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
843    fn and_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
844    fn or_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
845    fn xor_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
846    fn shr_u32x16(self, a: u32x16<Self>, shift: u32) -> u32x16<Self>;
847    fn shrv_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
848    fn shl_u32x16(self, a: u32x16<Self>, shift: u32) -> u32x16<Self>;
849    fn simd_eq_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
850    fn simd_lt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
851    fn simd_le_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
852    fn simd_ge_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
853    fn simd_gt_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> mask32x16<Self>;
854    fn zip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
855    fn zip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
856    fn unzip_low_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
857    fn unzip_high_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
858    fn select_u32x16(self, a: mask32x16<Self>, b: u32x16<Self>, c: u32x16<Self>) -> u32x16<Self>;
859    fn min_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
860    fn max_u32x16(self, a: u32x16<Self>, b: u32x16<Self>) -> u32x16<Self>;
861    fn split_u32x16(self, a: u32x16<Self>) -> (u32x8<Self>, u32x8<Self>);
862    fn load_interleaved_128_u32x16(self, src: &[u32; 16usize]) -> u32x16<Self>;
863    fn store_interleaved_128_u32x16(self, a: u32x16<Self>, dest: &mut [u32; 16usize]) -> ();
864    fn reinterpret_u8_u32x16(self, a: u32x16<Self>) -> u8x64<Self>;
865    fn cvt_f32_u32x16(self, a: u32x16<Self>) -> f32x16<Self>;
866    fn splat_mask32x16(self, val: i32) -> mask32x16<Self>;
867    fn not_mask32x16(self, a: mask32x16<Self>) -> mask32x16<Self>;
868    fn and_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
869    fn or_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
870    fn xor_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
871    fn select_mask32x16(
872        self,
873        a: mask32x16<Self>,
874        b: mask32x16<Self>,
875        c: mask32x16<Self>,
876    ) -> mask32x16<Self>;
877    fn simd_eq_mask32x16(self, a: mask32x16<Self>, b: mask32x16<Self>) -> mask32x16<Self>;
878    fn split_mask32x16(self, a: mask32x16<Self>) -> (mask32x8<Self>, mask32x8<Self>);
879    fn splat_f64x8(self, val: f64) -> f64x8<Self>;
880    fn abs_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
881    fn neg_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
882    fn sqrt_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
883    fn add_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
884    fn sub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
885    fn mul_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
886    fn div_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
887    fn copysign_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
888    fn simd_eq_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
889    fn simd_lt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
890    fn simd_le_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
891    fn simd_ge_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
892    fn simd_gt_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> mask64x8<Self>;
893    fn zip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
894    fn zip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
895    fn unzip_low_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
896    fn unzip_high_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
897    fn max_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
898    fn max_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
899    fn min_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
900    fn min_precise_f64x8(self, a: f64x8<Self>, b: f64x8<Self>) -> f64x8<Self>;
901    fn madd_f64x8(self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
902    fn msub_f64x8(self, a: f64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
903    fn floor_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
904    fn fract_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
905    fn trunc_f64x8(self, a: f64x8<Self>) -> f64x8<Self>;
906    fn select_f64x8(self, a: mask64x8<Self>, b: f64x8<Self>, c: f64x8<Self>) -> f64x8<Self>;
907    fn split_f64x8(self, a: f64x8<Self>) -> (f64x4<Self>, f64x4<Self>);
908    fn reinterpret_f32_f64x8(self, a: f64x8<Self>) -> f32x16<Self>;
909    fn splat_mask64x8(self, val: i64) -> mask64x8<Self>;
910    fn not_mask64x8(self, a: mask64x8<Self>) -> mask64x8<Self>;
911    fn and_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
912    fn or_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
913    fn xor_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
914    fn select_mask64x8(
915        self,
916        a: mask64x8<Self>,
917        b: mask64x8<Self>,
918        c: mask64x8<Self>,
919    ) -> mask64x8<Self>;
920    fn simd_eq_mask64x8(self, a: mask64x8<Self>, b: mask64x8<Self>) -> mask64x8<Self>;
921    fn split_mask64x8(self, a: mask64x8<Self>) -> (mask64x4<Self>, mask64x4<Self>);
922}
923pub trait SimdBase<Element: SimdElement, S: Simd>:
924    Copy + Sync + Send + 'static + crate::Bytes + SimdFrom<Element, S>
925{
926    const N: usize;
927    #[doc = r" A SIMD vector mask with the same number of elements."]
928    #[doc = r""]
929    #[doc = r" The mask element is represented as an integer which is"]
930    #[doc = r" all-0 for `false` and all-1 for `true`. When we get deep"]
931    #[doc = r" into AVX-512, we need to think about predication masks."]
932    #[doc = r""]
933    #[doc = r" One possibility to consider is that the SIMD trait grows"]
934    #[doc = r" `maskAxB` associated types."]
935    type Mask: SimdMask<Element::Mask, S>;
936    #[doc = r" A 128 bit SIMD vector of the same scalar type."]
937    type Block: SimdBase<Element, S>;
938    #[doc = r" Get the [`Simd`] implementation associated with this type."]
939    fn witness(&self) -> S;
940    fn as_slice(&self) -> &[Element];
941    fn as_mut_slice(&mut self) -> &mut [Element];
942    #[doc = r" Create a SIMD vector from a slice."]
943    #[doc = r""]
944    #[doc = r" The slice must be the proper width."]
945    fn from_slice(simd: S, slice: &[Element]) -> Self;
946    fn splat(simd: S, val: Element) -> Self;
947    fn block_splat(block: Self::Block) -> Self;
948}
949pub trait SimdFloat<Element: SimdElement, S: Simd>:
950    SimdBase<Element, S>
951    + core::ops::Neg<Output = Self>
952    + core::ops::Add<Output = Self>
953    + core::ops::AddAssign
954    + core::ops::Add<Element, Output = Self>
955    + core::ops::AddAssign<Element>
956    + core::ops::Sub<Output = Self>
957    + core::ops::SubAssign
958    + core::ops::Sub<Element, Output = Self>
959    + core::ops::SubAssign<Element>
960    + core::ops::Mul<Output = Self>
961    + core::ops::MulAssign
962    + core::ops::Mul<Element, Output = Self>
963    + core::ops::MulAssign<Element>
964    + core::ops::Div<Output = Self>
965    + core::ops::DivAssign
966    + core::ops::Div<Element, Output = Self>
967    + core::ops::DivAssign<Element>
968{
969    #[inline(always)]
970    fn to_int<T: SimdCvtTruncate<Self>>(self) -> T {
971        T::truncate_from(self)
972    }
973    fn abs(self) -> Self;
974    fn sqrt(self) -> Self;
975    fn copysign(self, rhs: impl SimdInto<Self, S>) -> Self;
976    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
977    fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
978    fn simd_le(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
979    fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
980    fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
981    fn zip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
982    fn zip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
983    fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
984    fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
985    fn max(self, rhs: impl SimdInto<Self, S>) -> Self;
986    fn max_precise(self, rhs: impl SimdInto<Self, S>) -> Self;
987    fn min(self, rhs: impl SimdInto<Self, S>) -> Self;
988    fn min_precise(self, rhs: impl SimdInto<Self, S>) -> Self;
989    fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> Self;
990    fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> Self;
991    fn floor(self) -> Self;
992    fn fract(self) -> Self;
993    fn trunc(self) -> Self;
994}
995pub trait SimdInt<Element: SimdElement, S: Simd>:
996    SimdBase<Element, S>
997    + core::ops::Add<Output = Self>
998    + core::ops::AddAssign
999    + core::ops::Add<Element, Output = Self>
1000    + core::ops::AddAssign<Element>
1001    + core::ops::Sub<Output = Self>
1002    + core::ops::SubAssign
1003    + core::ops::Sub<Element, Output = Self>
1004    + core::ops::SubAssign<Element>
1005    + core::ops::Mul<Output = Self>
1006    + core::ops::MulAssign
1007    + core::ops::Mul<Element, Output = Self>
1008    + core::ops::MulAssign<Element>
1009    + core::ops::BitAnd<Output = Self>
1010    + core::ops::BitAndAssign
1011    + core::ops::BitAnd<Element, Output = Self>
1012    + core::ops::BitAndAssign<Element>
1013    + core::ops::BitOr<Output = Self>
1014    + core::ops::BitOrAssign
1015    + core::ops::BitOr<Element, Output = Self>
1016    + core::ops::BitOrAssign<Element>
1017    + core::ops::BitXor<Output = Self>
1018    + core::ops::BitXorAssign
1019    + core::ops::BitXor<Element, Output = Self>
1020    + core::ops::BitXorAssign<Element>
1021    + core::ops::Shl<u32, Output = Self>
1022    + core::ops::ShlAssign<u32>
1023    + core::ops::Shr<Output = Self>
1024    + core::ops::ShrAssign
1025    + core::ops::Shr<u32, Output = Self>
1026    + core::ops::ShrAssign<u32>
1027{
1028    #[inline(always)]
1029    fn to_float<T: SimdCvtFloat<Self>>(self) -> T {
1030        T::float_from(self)
1031    }
1032    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1033    fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1034    fn simd_le(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1035    fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1036    fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1037    fn zip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
1038    fn zip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
1039    fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> Self;
1040    fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> Self;
1041    fn min(self, rhs: impl SimdInto<Self, S>) -> Self;
1042    fn max(self, rhs: impl SimdInto<Self, S>) -> Self;
1043}
1044pub trait SimdMask<Element: SimdElement, S: Simd>:
1045    SimdBase<Element, S>
1046    + core::ops::Not<Output = Self>
1047    + core::ops::BitAnd<Output = Self>
1048    + core::ops::BitOr<Output = Self>
1049    + core::ops::BitXor<Output = Self>
1050{
1051    fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> Self::Mask;
1052}