1use crate::{Bytes, Select, Simd, SimdCvtFloat, SimdCvtTruncate, SimdFrom, SimdInto};
7#[derive(Clone, Copy, Debug)]
8#[repr(C, align(16))]
9pub struct f32x4<S: Simd> {
10 pub val: [f32; 4],
11 pub simd: S,
12}
13impl<S: Simd> SimdFrom<[f32; 4], S> for f32x4<S> {
14 #[inline(always)]
15 fn simd_from(val: [f32; 4], simd: S) -> Self {
16 Self {
17 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
18 simd,
19 }
20 }
21}
22impl<S: Simd> From<f32x4<S>> for [f32; 4] {
23 #[inline(always)]
24 fn from(value: f32x4<S>) -> Self {
25 value.val
26 }
27}
28impl<S: Simd> core::ops::Deref for f32x4<S> {
29 type Target = [f32; 4];
30 #[inline(always)]
31 fn deref(&self) -> &Self::Target {
32 &self.val
33 }
34}
35impl<S: Simd> core::ops::DerefMut for f32x4<S> {
36 #[inline(always)]
37 fn deref_mut(&mut self) -> &mut Self::Target {
38 &mut self.val
39 }
40}
41impl<S: Simd> SimdFrom<f32, S> for f32x4<S> {
42 #[inline(always)]
43 fn simd_from(value: f32, simd: S) -> Self {
44 simd.splat_f32x4(value)
45 }
46}
47impl<S: Simd> Select<f32x4<S>> for mask32x4<S> {
48 #[inline(always)]
49 fn select(self, if_true: f32x4<S>, if_false: f32x4<S>) -> f32x4<S> {
50 self.simd.select_f32x4(self, if_true, if_false)
51 }
52}
53impl<S: Simd> Bytes for f32x4<S> {
54 type Bytes = u8x16<S>;
55 #[inline(always)]
56 fn to_bytes(self) -> Self::Bytes {
57 unsafe {
58 u8x16 {
59 val: core::mem::transmute(self.val),
60 simd: self.simd,
61 }
62 }
63 }
64 #[inline(always)]
65 fn from_bytes(value: Self::Bytes) -> Self {
66 unsafe {
67 Self {
68 val: core::mem::transmute(value.val),
69 simd: value.simd,
70 }
71 }
72 }
73}
74impl<S: Simd> f32x4<S> {
75 #[inline(always)]
76 pub fn abs(self) -> f32x4<S> {
77 self.simd.abs_f32x4(self)
78 }
79 #[inline(always)]
80 pub fn neg(self) -> f32x4<S> {
81 self.simd.neg_f32x4(self)
82 }
83 #[inline(always)]
84 pub fn sqrt(self) -> f32x4<S> {
85 self.simd.sqrt_f32x4(self)
86 }
87 #[inline(always)]
88 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
89 self.simd.add_f32x4(self, rhs.simd_into(self.simd))
90 }
91 #[inline(always)]
92 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
93 self.simd.sub_f32x4(self, rhs.simd_into(self.simd))
94 }
95 #[inline(always)]
96 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
97 self.simd.mul_f32x4(self, rhs.simd_into(self.simd))
98 }
99 #[inline(always)]
100 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
101 self.simd.div_f32x4(self, rhs.simd_into(self.simd))
102 }
103 #[inline(always)]
104 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
105 self.simd.copysign_f32x4(self, rhs.simd_into(self.simd))
106 }
107 #[inline(always)]
108 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
109 self.simd.simd_eq_f32x4(self, rhs.simd_into(self.simd))
110 }
111 #[inline(always)]
112 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
113 self.simd.simd_lt_f32x4(self, rhs.simd_into(self.simd))
114 }
115 #[inline(always)]
116 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
117 self.simd.simd_le_f32x4(self, rhs.simd_into(self.simd))
118 }
119 #[inline(always)]
120 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
121 self.simd.simd_ge_f32x4(self, rhs.simd_into(self.simd))
122 }
123 #[inline(always)]
124 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
125 self.simd.simd_gt_f32x4(self, rhs.simd_into(self.simd))
126 }
127 #[inline(always)]
128 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
129 self.simd.max_f32x4(self, rhs.simd_into(self.simd))
130 }
131 #[inline(always)]
132 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
133 self.simd.max_precise_f32x4(self, rhs.simd_into(self.simd))
134 }
135 #[inline(always)]
136 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
137 self.simd.min_f32x4(self, rhs.simd_into(self.simd))
138 }
139 #[inline(always)]
140 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
141 self.simd.min_precise_f32x4(self, rhs.simd_into(self.simd))
142 }
143 #[inline(always)]
144 pub fn floor(self) -> f32x4<S> {
145 self.simd.floor_f32x4(self)
146 }
147 #[inline(always)]
148 pub fn fract(self) -> f32x4<S> {
149 self.simd.fract_f32x4(self)
150 }
151 #[inline(always)]
152 pub fn trunc(self) -> f32x4<S> {
153 self.simd.trunc_f32x4(self)
154 }
155 #[inline(always)]
156 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
157 self.simd.combine_f32x4(self, rhs.simd_into(self.simd))
158 }
159 #[inline(always)]
160 pub fn reinterpret_f64(self) -> f64x2<S> {
161 self.simd.reinterpret_f64_f32x4(self)
162 }
163 #[inline(always)]
164 pub fn reinterpret_i32(self) -> i32x4<S> {
165 self.simd.reinterpret_i32_f32x4(self)
166 }
167 #[inline(always)]
168 pub fn reinterpret_u8(self) -> u8x16<S> {
169 self.simd.reinterpret_u8_f32x4(self)
170 }
171 #[inline(always)]
172 pub fn reinterpret_u32(self) -> u32x4<S> {
173 self.simd.reinterpret_u32_f32x4(self)
174 }
175 #[inline(always)]
176 pub fn cvt_u32(self) -> u32x4<S> {
177 self.simd.cvt_u32_f32x4(self)
178 }
179 #[inline(always)]
180 pub fn cvt_i32(self) -> i32x4<S> {
181 self.simd.cvt_i32_f32x4(self)
182 }
183}
184impl<S: Simd> crate::SimdBase<f32, S> for f32x4<S> {
185 const N: usize = 4;
186 type Mask = mask32x4<S>;
187 type Block = f32x4<S>;
188 #[inline(always)]
189 fn as_slice(&self) -> &[f32] {
190 &self.val
191 }
192 #[inline(always)]
193 fn as_mut_slice(&mut self) -> &mut [f32] {
194 &mut self.val
195 }
196 #[inline(always)]
197 fn from_slice(simd: S, slice: &[f32]) -> Self {
198 let mut val = [0.0; 4];
199 val.copy_from_slice(slice);
200 Self { val, simd }
201 }
202 #[inline(always)]
203 fn splat(simd: S, val: f32) -> Self {
204 simd.splat_f32x4(val)
205 }
206 #[inline(always)]
207 fn block_splat(block: Self::Block) -> Self {
208 block
209 }
210}
211impl<S: Simd> crate::SimdFloat<f32, S> for f32x4<S> {
212 #[inline(always)]
213 fn abs(self) -> f32x4<S> {
214 self.simd.abs_f32x4(self)
215 }
216 #[inline(always)]
217 fn sqrt(self) -> f32x4<S> {
218 self.simd.sqrt_f32x4(self)
219 }
220 #[inline(always)]
221 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
222 self.simd.copysign_f32x4(self, rhs.simd_into(self.simd))
223 }
224 #[inline(always)]
225 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
226 self.simd.simd_eq_f32x4(self, rhs.simd_into(self.simd))
227 }
228 #[inline(always)]
229 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
230 self.simd.simd_lt_f32x4(self, rhs.simd_into(self.simd))
231 }
232 #[inline(always)]
233 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
234 self.simd.simd_le_f32x4(self, rhs.simd_into(self.simd))
235 }
236 #[inline(always)]
237 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
238 self.simd.simd_ge_f32x4(self, rhs.simd_into(self.simd))
239 }
240 #[inline(always)]
241 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
242 self.simd.simd_gt_f32x4(self, rhs.simd_into(self.simd))
243 }
244 #[inline(always)]
245 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
246 self.simd.zip_low_f32x4(self, rhs.simd_into(self.simd))
247 }
248 #[inline(always)]
249 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
250 self.simd.zip_high_f32x4(self, rhs.simd_into(self.simd))
251 }
252 #[inline(always)]
253 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
254 self.simd.unzip_low_f32x4(self, rhs.simd_into(self.simd))
255 }
256 #[inline(always)]
257 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
258 self.simd.unzip_high_f32x4(self, rhs.simd_into(self.simd))
259 }
260 #[inline(always)]
261 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
262 self.simd.max_f32x4(self, rhs.simd_into(self.simd))
263 }
264 #[inline(always)]
265 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
266 self.simd.max_precise_f32x4(self, rhs.simd_into(self.simd))
267 }
268 #[inline(always)]
269 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
270 self.simd.min_f32x4(self, rhs.simd_into(self.simd))
271 }
272 #[inline(always)]
273 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
274 self.simd.min_precise_f32x4(self, rhs.simd_into(self.simd))
275 }
276 #[inline(always)]
277 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x4<S> {
278 self.simd
279 .madd_f32x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
280 }
281 #[inline(always)]
282 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x4<S> {
283 self.simd
284 .msub_f32x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
285 }
286 #[inline(always)]
287 fn floor(self) -> f32x4<S> {
288 self.simd.floor_f32x4(self)
289 }
290 #[inline(always)]
291 fn fract(self) -> f32x4<S> {
292 self.simd.fract_f32x4(self)
293 }
294 #[inline(always)]
295 fn trunc(self) -> f32x4<S> {
296 self.simd.trunc_f32x4(self)
297 }
298}
299impl<S: Simd> SimdCvtFloat<u32x4<S>> for f32x4<S> {
300 fn float_from(x: u32x4<S>) -> Self {
301 x.simd.cvt_f32_u32x4(x)
302 }
303}
304impl<S: Simd> SimdCvtFloat<i32x4<S>> for f32x4<S> {
305 fn float_from(x: i32x4<S>) -> Self {
306 x.simd.cvt_f32_i32x4(x)
307 }
308}
309#[derive(Clone, Copy, Debug)]
310#[repr(C, align(16))]
311pub struct i8x16<S: Simd> {
312 pub val: [i8; 16],
313 pub simd: S,
314}
315impl<S: Simd> SimdFrom<[i8; 16], S> for i8x16<S> {
316 #[inline(always)]
317 fn simd_from(val: [i8; 16], simd: S) -> Self {
318 Self {
319 val: [
320 val[0usize],
321 val[1usize],
322 val[2usize],
323 val[3usize],
324 val[4usize],
325 val[5usize],
326 val[6usize],
327 val[7usize],
328 val[8usize],
329 val[9usize],
330 val[10usize],
331 val[11usize],
332 val[12usize],
333 val[13usize],
334 val[14usize],
335 val[15usize],
336 ],
337 simd,
338 }
339 }
340}
341impl<S: Simd> From<i8x16<S>> for [i8; 16] {
342 #[inline(always)]
343 fn from(value: i8x16<S>) -> Self {
344 value.val
345 }
346}
347impl<S: Simd> core::ops::Deref for i8x16<S> {
348 type Target = [i8; 16];
349 #[inline(always)]
350 fn deref(&self) -> &Self::Target {
351 &self.val
352 }
353}
354impl<S: Simd> core::ops::DerefMut for i8x16<S> {
355 #[inline(always)]
356 fn deref_mut(&mut self) -> &mut Self::Target {
357 &mut self.val
358 }
359}
360impl<S: Simd> SimdFrom<i8, S> for i8x16<S> {
361 #[inline(always)]
362 fn simd_from(value: i8, simd: S) -> Self {
363 simd.splat_i8x16(value)
364 }
365}
366impl<S: Simd> Select<i8x16<S>> for mask8x16<S> {
367 #[inline(always)]
368 fn select(self, if_true: i8x16<S>, if_false: i8x16<S>) -> i8x16<S> {
369 self.simd.select_i8x16(self, if_true, if_false)
370 }
371}
372impl<S: Simd> Bytes for i8x16<S> {
373 type Bytes = u8x16<S>;
374 #[inline(always)]
375 fn to_bytes(self) -> Self::Bytes {
376 unsafe {
377 u8x16 {
378 val: core::mem::transmute(self.val),
379 simd: self.simd,
380 }
381 }
382 }
383 #[inline(always)]
384 fn from_bytes(value: Self::Bytes) -> Self {
385 unsafe {
386 Self {
387 val: core::mem::transmute(value.val),
388 simd: value.simd,
389 }
390 }
391 }
392}
393impl<S: Simd> i8x16<S> {
394 #[inline(always)]
395 pub fn not(self) -> i8x16<S> {
396 self.simd.not_i8x16(self)
397 }
398 #[inline(always)]
399 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
400 self.simd.add_i8x16(self, rhs.simd_into(self.simd))
401 }
402 #[inline(always)]
403 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
404 self.simd.sub_i8x16(self, rhs.simd_into(self.simd))
405 }
406 #[inline(always)]
407 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
408 self.simd.mul_i8x16(self, rhs.simd_into(self.simd))
409 }
410 #[inline(always)]
411 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
412 self.simd.and_i8x16(self, rhs.simd_into(self.simd))
413 }
414 #[inline(always)]
415 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
416 self.simd.or_i8x16(self, rhs.simd_into(self.simd))
417 }
418 #[inline(always)]
419 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
420 self.simd.xor_i8x16(self, rhs.simd_into(self.simd))
421 }
422 #[inline(always)]
423 pub fn shr(self, shift: u32) -> i8x16<S> {
424 self.simd.shr_i8x16(self, shift)
425 }
426 #[inline(always)]
427 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
428 self.simd.simd_eq_i8x16(self, rhs.simd_into(self.simd))
429 }
430 #[inline(always)]
431 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
432 self.simd.simd_lt_i8x16(self, rhs.simd_into(self.simd))
433 }
434 #[inline(always)]
435 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
436 self.simd.simd_le_i8x16(self, rhs.simd_into(self.simd))
437 }
438 #[inline(always)]
439 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
440 self.simd.simd_ge_i8x16(self, rhs.simd_into(self.simd))
441 }
442 #[inline(always)]
443 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
444 self.simd.simd_gt_i8x16(self, rhs.simd_into(self.simd))
445 }
446 #[inline(always)]
447 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
448 self.simd.min_i8x16(self, rhs.simd_into(self.simd))
449 }
450 #[inline(always)]
451 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
452 self.simd.max_i8x16(self, rhs.simd_into(self.simd))
453 }
454 #[inline(always)]
455 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
456 self.simd.combine_i8x16(self, rhs.simd_into(self.simd))
457 }
458 #[inline(always)]
459 pub fn reinterpret_u8(self) -> u8x16<S> {
460 self.simd.reinterpret_u8_i8x16(self)
461 }
462 #[inline(always)]
463 pub fn reinterpret_u32(self) -> u32x4<S> {
464 self.simd.reinterpret_u32_i8x16(self)
465 }
466}
467impl<S: Simd> crate::SimdBase<i8, S> for i8x16<S> {
468 const N: usize = 16;
469 type Mask = mask8x16<S>;
470 type Block = i8x16<S>;
471 #[inline(always)]
472 fn as_slice(&self) -> &[i8] {
473 &self.val
474 }
475 #[inline(always)]
476 fn as_mut_slice(&mut self) -> &mut [i8] {
477 &mut self.val
478 }
479 #[inline(always)]
480 fn from_slice(simd: S, slice: &[i8]) -> Self {
481 let mut val = [0; 16];
482 val.copy_from_slice(slice);
483 Self { val, simd }
484 }
485 #[inline(always)]
486 fn splat(simd: S, val: i8) -> Self {
487 simd.splat_i8x16(val)
488 }
489 #[inline(always)]
490 fn block_splat(block: Self::Block) -> Self {
491 block
492 }
493}
494impl<S: Simd> crate::SimdInt<i8, S> for i8x16<S> {
495 #[inline(always)]
496 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
497 self.simd.simd_eq_i8x16(self, rhs.simd_into(self.simd))
498 }
499 #[inline(always)]
500 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
501 self.simd.simd_lt_i8x16(self, rhs.simd_into(self.simd))
502 }
503 #[inline(always)]
504 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
505 self.simd.simd_le_i8x16(self, rhs.simd_into(self.simd))
506 }
507 #[inline(always)]
508 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
509 self.simd.simd_ge_i8x16(self, rhs.simd_into(self.simd))
510 }
511 #[inline(always)]
512 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
513 self.simd.simd_gt_i8x16(self, rhs.simd_into(self.simd))
514 }
515 #[inline(always)]
516 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
517 self.simd.zip_low_i8x16(self, rhs.simd_into(self.simd))
518 }
519 #[inline(always)]
520 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
521 self.simd.zip_high_i8x16(self, rhs.simd_into(self.simd))
522 }
523 #[inline(always)]
524 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
525 self.simd.unzip_low_i8x16(self, rhs.simd_into(self.simd))
526 }
527 #[inline(always)]
528 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
529 self.simd.unzip_high_i8x16(self, rhs.simd_into(self.simd))
530 }
531 #[inline(always)]
532 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
533 self.simd.min_i8x16(self, rhs.simd_into(self.simd))
534 }
535 #[inline(always)]
536 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
537 self.simd.max_i8x16(self, rhs.simd_into(self.simd))
538 }
539}
540#[derive(Clone, Copy, Debug)]
541#[repr(C, align(16))]
542pub struct u8x16<S: Simd> {
543 pub val: [u8; 16],
544 pub simd: S,
545}
546impl<S: Simd> SimdFrom<[u8; 16], S> for u8x16<S> {
547 #[inline(always)]
548 fn simd_from(val: [u8; 16], simd: S) -> Self {
549 Self {
550 val: [
551 val[0usize],
552 val[1usize],
553 val[2usize],
554 val[3usize],
555 val[4usize],
556 val[5usize],
557 val[6usize],
558 val[7usize],
559 val[8usize],
560 val[9usize],
561 val[10usize],
562 val[11usize],
563 val[12usize],
564 val[13usize],
565 val[14usize],
566 val[15usize],
567 ],
568 simd,
569 }
570 }
571}
572impl<S: Simd> From<u8x16<S>> for [u8; 16] {
573 #[inline(always)]
574 fn from(value: u8x16<S>) -> Self {
575 value.val
576 }
577}
578impl<S: Simd> core::ops::Deref for u8x16<S> {
579 type Target = [u8; 16];
580 #[inline(always)]
581 fn deref(&self) -> &Self::Target {
582 &self.val
583 }
584}
585impl<S: Simd> core::ops::DerefMut for u8x16<S> {
586 #[inline(always)]
587 fn deref_mut(&mut self) -> &mut Self::Target {
588 &mut self.val
589 }
590}
591impl<S: Simd> SimdFrom<u8, S> for u8x16<S> {
592 #[inline(always)]
593 fn simd_from(value: u8, simd: S) -> Self {
594 simd.splat_u8x16(value)
595 }
596}
597impl<S: Simd> Select<u8x16<S>> for mask8x16<S> {
598 #[inline(always)]
599 fn select(self, if_true: u8x16<S>, if_false: u8x16<S>) -> u8x16<S> {
600 self.simd.select_u8x16(self, if_true, if_false)
601 }
602}
603impl<S: Simd> Bytes for u8x16<S> {
604 type Bytes = u8x16<S>;
605 #[inline(always)]
606 fn to_bytes(self) -> Self::Bytes {
607 unsafe {
608 u8x16 {
609 val: core::mem::transmute(self.val),
610 simd: self.simd,
611 }
612 }
613 }
614 #[inline(always)]
615 fn from_bytes(value: Self::Bytes) -> Self {
616 unsafe {
617 Self {
618 val: core::mem::transmute(value.val),
619 simd: value.simd,
620 }
621 }
622 }
623}
624impl<S: Simd> u8x16<S> {
625 #[inline(always)]
626 pub fn not(self) -> u8x16<S> {
627 self.simd.not_u8x16(self)
628 }
629 #[inline(always)]
630 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
631 self.simd.add_u8x16(self, rhs.simd_into(self.simd))
632 }
633 #[inline(always)]
634 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
635 self.simd.sub_u8x16(self, rhs.simd_into(self.simd))
636 }
637 #[inline(always)]
638 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
639 self.simd.mul_u8x16(self, rhs.simd_into(self.simd))
640 }
641 #[inline(always)]
642 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
643 self.simd.and_u8x16(self, rhs.simd_into(self.simd))
644 }
645 #[inline(always)]
646 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
647 self.simd.or_u8x16(self, rhs.simd_into(self.simd))
648 }
649 #[inline(always)]
650 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
651 self.simd.xor_u8x16(self, rhs.simd_into(self.simd))
652 }
653 #[inline(always)]
654 pub fn shr(self, shift: u32) -> u8x16<S> {
655 self.simd.shr_u8x16(self, shift)
656 }
657 #[inline(always)]
658 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
659 self.simd.simd_eq_u8x16(self, rhs.simd_into(self.simd))
660 }
661 #[inline(always)]
662 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
663 self.simd.simd_lt_u8x16(self, rhs.simd_into(self.simd))
664 }
665 #[inline(always)]
666 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
667 self.simd.simd_le_u8x16(self, rhs.simd_into(self.simd))
668 }
669 #[inline(always)]
670 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
671 self.simd.simd_ge_u8x16(self, rhs.simd_into(self.simd))
672 }
673 #[inline(always)]
674 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
675 self.simd.simd_gt_u8x16(self, rhs.simd_into(self.simd))
676 }
677 #[inline(always)]
678 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
679 self.simd.min_u8x16(self, rhs.simd_into(self.simd))
680 }
681 #[inline(always)]
682 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
683 self.simd.max_u8x16(self, rhs.simd_into(self.simd))
684 }
685 #[inline(always)]
686 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
687 self.simd.combine_u8x16(self, rhs.simd_into(self.simd))
688 }
689 #[inline(always)]
690 pub fn reinterpret_u32(self) -> u32x4<S> {
691 self.simd.reinterpret_u32_u8x16(self)
692 }
693}
694impl<S: Simd> crate::SimdBase<u8, S> for u8x16<S> {
695 const N: usize = 16;
696 type Mask = mask8x16<S>;
697 type Block = u8x16<S>;
698 #[inline(always)]
699 fn as_slice(&self) -> &[u8] {
700 &self.val
701 }
702 #[inline(always)]
703 fn as_mut_slice(&mut self) -> &mut [u8] {
704 &mut self.val
705 }
706 #[inline(always)]
707 fn from_slice(simd: S, slice: &[u8]) -> Self {
708 let mut val = [0; 16];
709 val.copy_from_slice(slice);
710 Self { val, simd }
711 }
712 #[inline(always)]
713 fn splat(simd: S, val: u8) -> Self {
714 simd.splat_u8x16(val)
715 }
716 #[inline(always)]
717 fn block_splat(block: Self::Block) -> Self {
718 block
719 }
720}
721impl<S: Simd> crate::SimdInt<u8, S> for u8x16<S> {
722 #[inline(always)]
723 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
724 self.simd.simd_eq_u8x16(self, rhs.simd_into(self.simd))
725 }
726 #[inline(always)]
727 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
728 self.simd.simd_lt_u8x16(self, rhs.simd_into(self.simd))
729 }
730 #[inline(always)]
731 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
732 self.simd.simd_le_u8x16(self, rhs.simd_into(self.simd))
733 }
734 #[inline(always)]
735 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
736 self.simd.simd_ge_u8x16(self, rhs.simd_into(self.simd))
737 }
738 #[inline(always)]
739 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
740 self.simd.simd_gt_u8x16(self, rhs.simd_into(self.simd))
741 }
742 #[inline(always)]
743 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
744 self.simd.zip_low_u8x16(self, rhs.simd_into(self.simd))
745 }
746 #[inline(always)]
747 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
748 self.simd.zip_high_u8x16(self, rhs.simd_into(self.simd))
749 }
750 #[inline(always)]
751 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
752 self.simd.unzip_low_u8x16(self, rhs.simd_into(self.simd))
753 }
754 #[inline(always)]
755 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
756 self.simd.unzip_high_u8x16(self, rhs.simd_into(self.simd))
757 }
758 #[inline(always)]
759 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
760 self.simd.min_u8x16(self, rhs.simd_into(self.simd))
761 }
762 #[inline(always)]
763 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
764 self.simd.max_u8x16(self, rhs.simd_into(self.simd))
765 }
766}
767#[derive(Clone, Copy, Debug)]
768#[repr(C, align(16))]
769pub struct mask8x16<S: Simd> {
770 pub val: [i8; 16],
771 pub simd: S,
772}
773impl<S: Simd> SimdFrom<[i8; 16], S> for mask8x16<S> {
774 #[inline(always)]
775 fn simd_from(val: [i8; 16], simd: S) -> Self {
776 Self {
777 val: [
778 val[0usize],
779 val[1usize],
780 val[2usize],
781 val[3usize],
782 val[4usize],
783 val[5usize],
784 val[6usize],
785 val[7usize],
786 val[8usize],
787 val[9usize],
788 val[10usize],
789 val[11usize],
790 val[12usize],
791 val[13usize],
792 val[14usize],
793 val[15usize],
794 ],
795 simd,
796 }
797 }
798}
799impl<S: Simd> From<mask8x16<S>> for [i8; 16] {
800 #[inline(always)]
801 fn from(value: mask8x16<S>) -> Self {
802 value.val
803 }
804}
805impl<S: Simd> core::ops::Deref for mask8x16<S> {
806 type Target = [i8; 16];
807 #[inline(always)]
808 fn deref(&self) -> &Self::Target {
809 &self.val
810 }
811}
812impl<S: Simd> core::ops::DerefMut for mask8x16<S> {
813 #[inline(always)]
814 fn deref_mut(&mut self) -> &mut Self::Target {
815 &mut self.val
816 }
817}
818impl<S: Simd> SimdFrom<i8, S> for mask8x16<S> {
819 #[inline(always)]
820 fn simd_from(value: i8, simd: S) -> Self {
821 simd.splat_mask8x16(value)
822 }
823}
824impl<S: Simd> Select<mask8x16<S>> for mask8x16<S> {
825 #[inline(always)]
826 fn select(self, if_true: mask8x16<S>, if_false: mask8x16<S>) -> mask8x16<S> {
827 self.simd.select_mask8x16(self, if_true, if_false)
828 }
829}
830impl<S: Simd> Bytes for mask8x16<S> {
831 type Bytes = u8x16<S>;
832 #[inline(always)]
833 fn to_bytes(self) -> Self::Bytes {
834 unsafe {
835 u8x16 {
836 val: core::mem::transmute(self.val),
837 simd: self.simd,
838 }
839 }
840 }
841 #[inline(always)]
842 fn from_bytes(value: Self::Bytes) -> Self {
843 unsafe {
844 Self {
845 val: core::mem::transmute(value.val),
846 simd: value.simd,
847 }
848 }
849 }
850}
851impl<S: Simd> mask8x16<S> {
852 #[inline(always)]
853 pub fn not(self) -> mask8x16<S> {
854 self.simd.not_mask8x16(self)
855 }
856 #[inline(always)]
857 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
858 self.simd.and_mask8x16(self, rhs.simd_into(self.simd))
859 }
860 #[inline(always)]
861 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
862 self.simd.or_mask8x16(self, rhs.simd_into(self.simd))
863 }
864 #[inline(always)]
865 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
866 self.simd.xor_mask8x16(self, rhs.simd_into(self.simd))
867 }
868 #[inline(always)]
869 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
870 self.simd.simd_eq_mask8x16(self, rhs.simd_into(self.simd))
871 }
872 #[inline(always)]
873 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
874 self.simd.combine_mask8x16(self, rhs.simd_into(self.simd))
875 }
876}
877impl<S: Simd> crate::SimdBase<i8, S> for mask8x16<S> {
878 const N: usize = 16;
879 type Mask = mask8x16<S>;
880 type Block = mask8x16<S>;
881 #[inline(always)]
882 fn as_slice(&self) -> &[i8] {
883 &self.val
884 }
885 #[inline(always)]
886 fn as_mut_slice(&mut self) -> &mut [i8] {
887 &mut self.val
888 }
889 #[inline(always)]
890 fn from_slice(simd: S, slice: &[i8]) -> Self {
891 let mut val = [0; 16];
892 val.copy_from_slice(slice);
893 Self { val, simd }
894 }
895 #[inline(always)]
896 fn splat(simd: S, val: i8) -> Self {
897 simd.splat_mask8x16(val)
898 }
899 #[inline(always)]
900 fn block_splat(block: Self::Block) -> Self {
901 block
902 }
903}
904impl<S: Simd> crate::SimdMask<i8, S> for mask8x16<S> {
905 #[inline(always)]
906 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
907 self.simd.simd_eq_mask8x16(self, rhs.simd_into(self.simd))
908 }
909}
910#[derive(Clone, Copy, Debug)]
911#[repr(C, align(16))]
912pub struct i16x8<S: Simd> {
913 pub val: [i16; 8],
914 pub simd: S,
915}
916impl<S: Simd> SimdFrom<[i16; 8], S> for i16x8<S> {
917 #[inline(always)]
918 fn simd_from(val: [i16; 8], simd: S) -> Self {
919 Self {
920 val: [
921 val[0usize],
922 val[1usize],
923 val[2usize],
924 val[3usize],
925 val[4usize],
926 val[5usize],
927 val[6usize],
928 val[7usize],
929 ],
930 simd,
931 }
932 }
933}
934impl<S: Simd> From<i16x8<S>> for [i16; 8] {
935 #[inline(always)]
936 fn from(value: i16x8<S>) -> Self {
937 value.val
938 }
939}
940impl<S: Simd> core::ops::Deref for i16x8<S> {
941 type Target = [i16; 8];
942 #[inline(always)]
943 fn deref(&self) -> &Self::Target {
944 &self.val
945 }
946}
947impl<S: Simd> core::ops::DerefMut for i16x8<S> {
948 #[inline(always)]
949 fn deref_mut(&mut self) -> &mut Self::Target {
950 &mut self.val
951 }
952}
953impl<S: Simd> SimdFrom<i16, S> for i16x8<S> {
954 #[inline(always)]
955 fn simd_from(value: i16, simd: S) -> Self {
956 simd.splat_i16x8(value)
957 }
958}
959impl<S: Simd> Select<i16x8<S>> for mask16x8<S> {
960 #[inline(always)]
961 fn select(self, if_true: i16x8<S>, if_false: i16x8<S>) -> i16x8<S> {
962 self.simd.select_i16x8(self, if_true, if_false)
963 }
964}
965impl<S: Simd> Bytes for i16x8<S> {
966 type Bytes = u8x16<S>;
967 #[inline(always)]
968 fn to_bytes(self) -> Self::Bytes {
969 unsafe {
970 u8x16 {
971 val: core::mem::transmute(self.val),
972 simd: self.simd,
973 }
974 }
975 }
976 #[inline(always)]
977 fn from_bytes(value: Self::Bytes) -> Self {
978 unsafe {
979 Self {
980 val: core::mem::transmute(value.val),
981 simd: value.simd,
982 }
983 }
984 }
985}
986impl<S: Simd> i16x8<S> {
987 #[inline(always)]
988 pub fn not(self) -> i16x8<S> {
989 self.simd.not_i16x8(self)
990 }
991 #[inline(always)]
992 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
993 self.simd.add_i16x8(self, rhs.simd_into(self.simd))
994 }
995 #[inline(always)]
996 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
997 self.simd.sub_i16x8(self, rhs.simd_into(self.simd))
998 }
999 #[inline(always)]
1000 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1001 self.simd.mul_i16x8(self, rhs.simd_into(self.simd))
1002 }
1003 #[inline(always)]
1004 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1005 self.simd.and_i16x8(self, rhs.simd_into(self.simd))
1006 }
1007 #[inline(always)]
1008 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1009 self.simd.or_i16x8(self, rhs.simd_into(self.simd))
1010 }
1011 #[inline(always)]
1012 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1013 self.simd.xor_i16x8(self, rhs.simd_into(self.simd))
1014 }
1015 #[inline(always)]
1016 pub fn shr(self, shift: u32) -> i16x8<S> {
1017 self.simd.shr_i16x8(self, shift)
1018 }
1019 #[inline(always)]
1020 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1021 self.simd.simd_eq_i16x8(self, rhs.simd_into(self.simd))
1022 }
1023 #[inline(always)]
1024 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1025 self.simd.simd_lt_i16x8(self, rhs.simd_into(self.simd))
1026 }
1027 #[inline(always)]
1028 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1029 self.simd.simd_le_i16x8(self, rhs.simd_into(self.simd))
1030 }
1031 #[inline(always)]
1032 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1033 self.simd.simd_ge_i16x8(self, rhs.simd_into(self.simd))
1034 }
1035 #[inline(always)]
1036 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1037 self.simd.simd_gt_i16x8(self, rhs.simd_into(self.simd))
1038 }
1039 #[inline(always)]
1040 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1041 self.simd.min_i16x8(self, rhs.simd_into(self.simd))
1042 }
1043 #[inline(always)]
1044 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1045 self.simd.max_i16x8(self, rhs.simd_into(self.simd))
1046 }
1047 #[inline(always)]
1048 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
1049 self.simd.combine_i16x8(self, rhs.simd_into(self.simd))
1050 }
1051 #[inline(always)]
1052 pub fn reinterpret_u8(self) -> u8x16<S> {
1053 self.simd.reinterpret_u8_i16x8(self)
1054 }
1055 #[inline(always)]
1056 pub fn reinterpret_u32(self) -> u32x4<S> {
1057 self.simd.reinterpret_u32_i16x8(self)
1058 }
1059}
1060impl<S: Simd> crate::SimdBase<i16, S> for i16x8<S> {
1061 const N: usize = 8;
1062 type Mask = mask16x8<S>;
1063 type Block = i16x8<S>;
1064 #[inline(always)]
1065 fn as_slice(&self) -> &[i16] {
1066 &self.val
1067 }
1068 #[inline(always)]
1069 fn as_mut_slice(&mut self) -> &mut [i16] {
1070 &mut self.val
1071 }
1072 #[inline(always)]
1073 fn from_slice(simd: S, slice: &[i16]) -> Self {
1074 let mut val = [0; 8];
1075 val.copy_from_slice(slice);
1076 Self { val, simd }
1077 }
1078 #[inline(always)]
1079 fn splat(simd: S, val: i16) -> Self {
1080 simd.splat_i16x8(val)
1081 }
1082 #[inline(always)]
1083 fn block_splat(block: Self::Block) -> Self {
1084 block
1085 }
1086}
1087impl<S: Simd> crate::SimdInt<i16, S> for i16x8<S> {
1088 #[inline(always)]
1089 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1090 self.simd.simd_eq_i16x8(self, rhs.simd_into(self.simd))
1091 }
1092 #[inline(always)]
1093 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1094 self.simd.simd_lt_i16x8(self, rhs.simd_into(self.simd))
1095 }
1096 #[inline(always)]
1097 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1098 self.simd.simd_le_i16x8(self, rhs.simd_into(self.simd))
1099 }
1100 #[inline(always)]
1101 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1102 self.simd.simd_ge_i16x8(self, rhs.simd_into(self.simd))
1103 }
1104 #[inline(always)]
1105 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1106 self.simd.simd_gt_i16x8(self, rhs.simd_into(self.simd))
1107 }
1108 #[inline(always)]
1109 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1110 self.simd.zip_low_i16x8(self, rhs.simd_into(self.simd))
1111 }
1112 #[inline(always)]
1113 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1114 self.simd.zip_high_i16x8(self, rhs.simd_into(self.simd))
1115 }
1116 #[inline(always)]
1117 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1118 self.simd.unzip_low_i16x8(self, rhs.simd_into(self.simd))
1119 }
1120 #[inline(always)]
1121 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1122 self.simd.unzip_high_i16x8(self, rhs.simd_into(self.simd))
1123 }
1124 #[inline(always)]
1125 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1126 self.simd.min_i16x8(self, rhs.simd_into(self.simd))
1127 }
1128 #[inline(always)]
1129 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1130 self.simd.max_i16x8(self, rhs.simd_into(self.simd))
1131 }
1132}
1133#[derive(Clone, Copy, Debug)]
1134#[repr(C, align(16))]
1135pub struct u16x8<S: Simd> {
1136 pub val: [u16; 8],
1137 pub simd: S,
1138}
1139impl<S: Simd> SimdFrom<[u16; 8], S> for u16x8<S> {
1140 #[inline(always)]
1141 fn simd_from(val: [u16; 8], simd: S) -> Self {
1142 Self {
1143 val: [
1144 val[0usize],
1145 val[1usize],
1146 val[2usize],
1147 val[3usize],
1148 val[4usize],
1149 val[5usize],
1150 val[6usize],
1151 val[7usize],
1152 ],
1153 simd,
1154 }
1155 }
1156}
1157impl<S: Simd> From<u16x8<S>> for [u16; 8] {
1158 #[inline(always)]
1159 fn from(value: u16x8<S>) -> Self {
1160 value.val
1161 }
1162}
1163impl<S: Simd> core::ops::Deref for u16x8<S> {
1164 type Target = [u16; 8];
1165 #[inline(always)]
1166 fn deref(&self) -> &Self::Target {
1167 &self.val
1168 }
1169}
1170impl<S: Simd> core::ops::DerefMut for u16x8<S> {
1171 #[inline(always)]
1172 fn deref_mut(&mut self) -> &mut Self::Target {
1173 &mut self.val
1174 }
1175}
1176impl<S: Simd> SimdFrom<u16, S> for u16x8<S> {
1177 #[inline(always)]
1178 fn simd_from(value: u16, simd: S) -> Self {
1179 simd.splat_u16x8(value)
1180 }
1181}
1182impl<S: Simd> Select<u16x8<S>> for mask16x8<S> {
1183 #[inline(always)]
1184 fn select(self, if_true: u16x8<S>, if_false: u16x8<S>) -> u16x8<S> {
1185 self.simd.select_u16x8(self, if_true, if_false)
1186 }
1187}
1188impl<S: Simd> Bytes for u16x8<S> {
1189 type Bytes = u8x16<S>;
1190 #[inline(always)]
1191 fn to_bytes(self) -> Self::Bytes {
1192 unsafe {
1193 u8x16 {
1194 val: core::mem::transmute(self.val),
1195 simd: self.simd,
1196 }
1197 }
1198 }
1199 #[inline(always)]
1200 fn from_bytes(value: Self::Bytes) -> Self {
1201 unsafe {
1202 Self {
1203 val: core::mem::transmute(value.val),
1204 simd: value.simd,
1205 }
1206 }
1207 }
1208}
1209impl<S: Simd> u16x8<S> {
1210 #[inline(always)]
1211 pub fn not(self) -> u16x8<S> {
1212 self.simd.not_u16x8(self)
1213 }
1214 #[inline(always)]
1215 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1216 self.simd.add_u16x8(self, rhs.simd_into(self.simd))
1217 }
1218 #[inline(always)]
1219 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1220 self.simd.sub_u16x8(self, rhs.simd_into(self.simd))
1221 }
1222 #[inline(always)]
1223 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1224 self.simd.mul_u16x8(self, rhs.simd_into(self.simd))
1225 }
1226 #[inline(always)]
1227 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1228 self.simd.and_u16x8(self, rhs.simd_into(self.simd))
1229 }
1230 #[inline(always)]
1231 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1232 self.simd.or_u16x8(self, rhs.simd_into(self.simd))
1233 }
1234 #[inline(always)]
1235 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1236 self.simd.xor_u16x8(self, rhs.simd_into(self.simd))
1237 }
1238 #[inline(always)]
1239 pub fn shr(self, shift: u32) -> u16x8<S> {
1240 self.simd.shr_u16x8(self, shift)
1241 }
1242 #[inline(always)]
1243 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1244 self.simd.simd_eq_u16x8(self, rhs.simd_into(self.simd))
1245 }
1246 #[inline(always)]
1247 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1248 self.simd.simd_lt_u16x8(self, rhs.simd_into(self.simd))
1249 }
1250 #[inline(always)]
1251 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1252 self.simd.simd_le_u16x8(self, rhs.simd_into(self.simd))
1253 }
1254 #[inline(always)]
1255 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1256 self.simd.simd_ge_u16x8(self, rhs.simd_into(self.simd))
1257 }
1258 #[inline(always)]
1259 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1260 self.simd.simd_gt_u16x8(self, rhs.simd_into(self.simd))
1261 }
1262 #[inline(always)]
1263 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1264 self.simd.min_u16x8(self, rhs.simd_into(self.simd))
1265 }
1266 #[inline(always)]
1267 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1268 self.simd.max_u16x8(self, rhs.simd_into(self.simd))
1269 }
1270 #[inline(always)]
1271 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
1272 self.simd.combine_u16x8(self, rhs.simd_into(self.simd))
1273 }
1274 #[inline(always)]
1275 pub fn reinterpret_u8(self) -> u8x16<S> {
1276 self.simd.reinterpret_u8_u16x8(self)
1277 }
1278 #[inline(always)]
1279 pub fn reinterpret_u32(self) -> u32x4<S> {
1280 self.simd.reinterpret_u32_u16x8(self)
1281 }
1282}
1283impl<S: Simd> crate::SimdBase<u16, S> for u16x8<S> {
1284 const N: usize = 8;
1285 type Mask = mask16x8<S>;
1286 type Block = u16x8<S>;
1287 #[inline(always)]
1288 fn as_slice(&self) -> &[u16] {
1289 &self.val
1290 }
1291 #[inline(always)]
1292 fn as_mut_slice(&mut self) -> &mut [u16] {
1293 &mut self.val
1294 }
1295 #[inline(always)]
1296 fn from_slice(simd: S, slice: &[u16]) -> Self {
1297 let mut val = [0; 8];
1298 val.copy_from_slice(slice);
1299 Self { val, simd }
1300 }
1301 #[inline(always)]
1302 fn splat(simd: S, val: u16) -> Self {
1303 simd.splat_u16x8(val)
1304 }
1305 #[inline(always)]
1306 fn block_splat(block: Self::Block) -> Self {
1307 block
1308 }
1309}
1310impl<S: Simd> crate::SimdInt<u16, S> for u16x8<S> {
1311 #[inline(always)]
1312 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1313 self.simd.simd_eq_u16x8(self, rhs.simd_into(self.simd))
1314 }
1315 #[inline(always)]
1316 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1317 self.simd.simd_lt_u16x8(self, rhs.simd_into(self.simd))
1318 }
1319 #[inline(always)]
1320 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1321 self.simd.simd_le_u16x8(self, rhs.simd_into(self.simd))
1322 }
1323 #[inline(always)]
1324 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1325 self.simd.simd_ge_u16x8(self, rhs.simd_into(self.simd))
1326 }
1327 #[inline(always)]
1328 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1329 self.simd.simd_gt_u16x8(self, rhs.simd_into(self.simd))
1330 }
1331 #[inline(always)]
1332 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1333 self.simd.zip_low_u16x8(self, rhs.simd_into(self.simd))
1334 }
1335 #[inline(always)]
1336 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1337 self.simd.zip_high_u16x8(self, rhs.simd_into(self.simd))
1338 }
1339 #[inline(always)]
1340 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1341 self.simd.unzip_low_u16x8(self, rhs.simd_into(self.simd))
1342 }
1343 #[inline(always)]
1344 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1345 self.simd.unzip_high_u16x8(self, rhs.simd_into(self.simd))
1346 }
1347 #[inline(always)]
1348 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1349 self.simd.min_u16x8(self, rhs.simd_into(self.simd))
1350 }
1351 #[inline(always)]
1352 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1353 self.simd.max_u16x8(self, rhs.simd_into(self.simd))
1354 }
1355}
1356#[derive(Clone, Copy, Debug)]
1357#[repr(C, align(16))]
1358pub struct mask16x8<S: Simd> {
1359 pub val: [i16; 8],
1360 pub simd: S,
1361}
1362impl<S: Simd> SimdFrom<[i16; 8], S> for mask16x8<S> {
1363 #[inline(always)]
1364 fn simd_from(val: [i16; 8], simd: S) -> Self {
1365 Self {
1366 val: [
1367 val[0usize],
1368 val[1usize],
1369 val[2usize],
1370 val[3usize],
1371 val[4usize],
1372 val[5usize],
1373 val[6usize],
1374 val[7usize],
1375 ],
1376 simd,
1377 }
1378 }
1379}
1380impl<S: Simd> From<mask16x8<S>> for [i16; 8] {
1381 #[inline(always)]
1382 fn from(value: mask16x8<S>) -> Self {
1383 value.val
1384 }
1385}
1386impl<S: Simd> core::ops::Deref for mask16x8<S> {
1387 type Target = [i16; 8];
1388 #[inline(always)]
1389 fn deref(&self) -> &Self::Target {
1390 &self.val
1391 }
1392}
1393impl<S: Simd> core::ops::DerefMut for mask16x8<S> {
1394 #[inline(always)]
1395 fn deref_mut(&mut self) -> &mut Self::Target {
1396 &mut self.val
1397 }
1398}
1399impl<S: Simd> SimdFrom<i16, S> for mask16x8<S> {
1400 #[inline(always)]
1401 fn simd_from(value: i16, simd: S) -> Self {
1402 simd.splat_mask16x8(value)
1403 }
1404}
1405impl<S: Simd> Select<mask16x8<S>> for mask16x8<S> {
1406 #[inline(always)]
1407 fn select(self, if_true: mask16x8<S>, if_false: mask16x8<S>) -> mask16x8<S> {
1408 self.simd.select_mask16x8(self, if_true, if_false)
1409 }
1410}
1411impl<S: Simd> Bytes for mask16x8<S> {
1412 type Bytes = u8x16<S>;
1413 #[inline(always)]
1414 fn to_bytes(self) -> Self::Bytes {
1415 unsafe {
1416 u8x16 {
1417 val: core::mem::transmute(self.val),
1418 simd: self.simd,
1419 }
1420 }
1421 }
1422 #[inline(always)]
1423 fn from_bytes(value: Self::Bytes) -> Self {
1424 unsafe {
1425 Self {
1426 val: core::mem::transmute(value.val),
1427 simd: value.simd,
1428 }
1429 }
1430 }
1431}
1432impl<S: Simd> mask16x8<S> {
1433 #[inline(always)]
1434 pub fn not(self) -> mask16x8<S> {
1435 self.simd.not_mask16x8(self)
1436 }
1437 #[inline(always)]
1438 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1439 self.simd.and_mask16x8(self, rhs.simd_into(self.simd))
1440 }
1441 #[inline(always)]
1442 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1443 self.simd.or_mask16x8(self, rhs.simd_into(self.simd))
1444 }
1445 #[inline(always)]
1446 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1447 self.simd.xor_mask16x8(self, rhs.simd_into(self.simd))
1448 }
1449 #[inline(always)]
1450 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1451 self.simd.simd_eq_mask16x8(self, rhs.simd_into(self.simd))
1452 }
1453 #[inline(always)]
1454 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
1455 self.simd.combine_mask16x8(self, rhs.simd_into(self.simd))
1456 }
1457}
1458impl<S: Simd> crate::SimdBase<i16, S> for mask16x8<S> {
1459 const N: usize = 8;
1460 type Mask = mask16x8<S>;
1461 type Block = mask16x8<S>;
1462 #[inline(always)]
1463 fn as_slice(&self) -> &[i16] {
1464 &self.val
1465 }
1466 #[inline(always)]
1467 fn as_mut_slice(&mut self) -> &mut [i16] {
1468 &mut self.val
1469 }
1470 #[inline(always)]
1471 fn from_slice(simd: S, slice: &[i16]) -> Self {
1472 let mut val = [0; 8];
1473 val.copy_from_slice(slice);
1474 Self { val, simd }
1475 }
1476 #[inline(always)]
1477 fn splat(simd: S, val: i16) -> Self {
1478 simd.splat_mask16x8(val)
1479 }
1480 #[inline(always)]
1481 fn block_splat(block: Self::Block) -> Self {
1482 block
1483 }
1484}
1485impl<S: Simd> crate::SimdMask<i16, S> for mask16x8<S> {
1486 #[inline(always)]
1487 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1488 self.simd.simd_eq_mask16x8(self, rhs.simd_into(self.simd))
1489 }
1490}
1491#[derive(Clone, Copy, Debug)]
1492#[repr(C, align(16))]
1493pub struct i32x4<S: Simd> {
1494 pub val: [i32; 4],
1495 pub simd: S,
1496}
1497impl<S: Simd> SimdFrom<[i32; 4], S> for i32x4<S> {
1498 #[inline(always)]
1499 fn simd_from(val: [i32; 4], simd: S) -> Self {
1500 Self {
1501 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
1502 simd,
1503 }
1504 }
1505}
1506impl<S: Simd> From<i32x4<S>> for [i32; 4] {
1507 #[inline(always)]
1508 fn from(value: i32x4<S>) -> Self {
1509 value.val
1510 }
1511}
1512impl<S: Simd> core::ops::Deref for i32x4<S> {
1513 type Target = [i32; 4];
1514 #[inline(always)]
1515 fn deref(&self) -> &Self::Target {
1516 &self.val
1517 }
1518}
1519impl<S: Simd> core::ops::DerefMut for i32x4<S> {
1520 #[inline(always)]
1521 fn deref_mut(&mut self) -> &mut Self::Target {
1522 &mut self.val
1523 }
1524}
1525impl<S: Simd> SimdFrom<i32, S> for i32x4<S> {
1526 #[inline(always)]
1527 fn simd_from(value: i32, simd: S) -> Self {
1528 simd.splat_i32x4(value)
1529 }
1530}
1531impl<S: Simd> Select<i32x4<S>> for mask32x4<S> {
1532 #[inline(always)]
1533 fn select(self, if_true: i32x4<S>, if_false: i32x4<S>) -> i32x4<S> {
1534 self.simd.select_i32x4(self, if_true, if_false)
1535 }
1536}
1537impl<S: Simd> Bytes for i32x4<S> {
1538 type Bytes = u8x16<S>;
1539 #[inline(always)]
1540 fn to_bytes(self) -> Self::Bytes {
1541 unsafe {
1542 u8x16 {
1543 val: core::mem::transmute(self.val),
1544 simd: self.simd,
1545 }
1546 }
1547 }
1548 #[inline(always)]
1549 fn from_bytes(value: Self::Bytes) -> Self {
1550 unsafe {
1551 Self {
1552 val: core::mem::transmute(value.val),
1553 simd: value.simd,
1554 }
1555 }
1556 }
1557}
1558impl<S: Simd> i32x4<S> {
1559 #[inline(always)]
1560 pub fn not(self) -> i32x4<S> {
1561 self.simd.not_i32x4(self)
1562 }
1563 #[inline(always)]
1564 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1565 self.simd.add_i32x4(self, rhs.simd_into(self.simd))
1566 }
1567 #[inline(always)]
1568 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1569 self.simd.sub_i32x4(self, rhs.simd_into(self.simd))
1570 }
1571 #[inline(always)]
1572 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1573 self.simd.mul_i32x4(self, rhs.simd_into(self.simd))
1574 }
1575 #[inline(always)]
1576 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1577 self.simd.and_i32x4(self, rhs.simd_into(self.simd))
1578 }
1579 #[inline(always)]
1580 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1581 self.simd.or_i32x4(self, rhs.simd_into(self.simd))
1582 }
1583 #[inline(always)]
1584 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1585 self.simd.xor_i32x4(self, rhs.simd_into(self.simd))
1586 }
1587 #[inline(always)]
1588 pub fn shr(self, shift: u32) -> i32x4<S> {
1589 self.simd.shr_i32x4(self, shift)
1590 }
1591 #[inline(always)]
1592 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1593 self.simd.simd_eq_i32x4(self, rhs.simd_into(self.simd))
1594 }
1595 #[inline(always)]
1596 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1597 self.simd.simd_lt_i32x4(self, rhs.simd_into(self.simd))
1598 }
1599 #[inline(always)]
1600 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1601 self.simd.simd_le_i32x4(self, rhs.simd_into(self.simd))
1602 }
1603 #[inline(always)]
1604 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1605 self.simd.simd_ge_i32x4(self, rhs.simd_into(self.simd))
1606 }
1607 #[inline(always)]
1608 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1609 self.simd.simd_gt_i32x4(self, rhs.simd_into(self.simd))
1610 }
1611 #[inline(always)]
1612 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1613 self.simd.min_i32x4(self, rhs.simd_into(self.simd))
1614 }
1615 #[inline(always)]
1616 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1617 self.simd.max_i32x4(self, rhs.simd_into(self.simd))
1618 }
1619 #[inline(always)]
1620 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
1621 self.simd.combine_i32x4(self, rhs.simd_into(self.simd))
1622 }
1623 #[inline(always)]
1624 pub fn reinterpret_u8(self) -> u8x16<S> {
1625 self.simd.reinterpret_u8_i32x4(self)
1626 }
1627 #[inline(always)]
1628 pub fn reinterpret_u32(self) -> u32x4<S> {
1629 self.simd.reinterpret_u32_i32x4(self)
1630 }
1631 #[inline(always)]
1632 pub fn cvt_f32(self) -> f32x4<S> {
1633 self.simd.cvt_f32_i32x4(self)
1634 }
1635}
1636impl<S: Simd> crate::SimdBase<i32, S> for i32x4<S> {
1637 const N: usize = 4;
1638 type Mask = mask32x4<S>;
1639 type Block = i32x4<S>;
1640 #[inline(always)]
1641 fn as_slice(&self) -> &[i32] {
1642 &self.val
1643 }
1644 #[inline(always)]
1645 fn as_mut_slice(&mut self) -> &mut [i32] {
1646 &mut self.val
1647 }
1648 #[inline(always)]
1649 fn from_slice(simd: S, slice: &[i32]) -> Self {
1650 let mut val = [0; 4];
1651 val.copy_from_slice(slice);
1652 Self { val, simd }
1653 }
1654 #[inline(always)]
1655 fn splat(simd: S, val: i32) -> Self {
1656 simd.splat_i32x4(val)
1657 }
1658 #[inline(always)]
1659 fn block_splat(block: Self::Block) -> Self {
1660 block
1661 }
1662}
1663impl<S: Simd> crate::SimdInt<i32, S> for i32x4<S> {
1664 #[inline(always)]
1665 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1666 self.simd.simd_eq_i32x4(self, rhs.simd_into(self.simd))
1667 }
1668 #[inline(always)]
1669 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1670 self.simd.simd_lt_i32x4(self, rhs.simd_into(self.simd))
1671 }
1672 #[inline(always)]
1673 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1674 self.simd.simd_le_i32x4(self, rhs.simd_into(self.simd))
1675 }
1676 #[inline(always)]
1677 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1678 self.simd.simd_ge_i32x4(self, rhs.simd_into(self.simd))
1679 }
1680 #[inline(always)]
1681 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1682 self.simd.simd_gt_i32x4(self, rhs.simd_into(self.simd))
1683 }
1684 #[inline(always)]
1685 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1686 self.simd.zip_low_i32x4(self, rhs.simd_into(self.simd))
1687 }
1688 #[inline(always)]
1689 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1690 self.simd.zip_high_i32x4(self, rhs.simd_into(self.simd))
1691 }
1692 #[inline(always)]
1693 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1694 self.simd.unzip_low_i32x4(self, rhs.simd_into(self.simd))
1695 }
1696 #[inline(always)]
1697 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1698 self.simd.unzip_high_i32x4(self, rhs.simd_into(self.simd))
1699 }
1700 #[inline(always)]
1701 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1702 self.simd.min_i32x4(self, rhs.simd_into(self.simd))
1703 }
1704 #[inline(always)]
1705 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1706 self.simd.max_i32x4(self, rhs.simd_into(self.simd))
1707 }
1708}
1709impl<S: Simd> SimdCvtTruncate<f32x4<S>> for i32x4<S> {
1710 fn truncate_from(x: f32x4<S>) -> Self {
1711 x.simd.cvt_i32_f32x4(x)
1712 }
1713}
1714#[derive(Clone, Copy, Debug)]
1715#[repr(C, align(16))]
1716pub struct u32x4<S: Simd> {
1717 pub val: [u32; 4],
1718 pub simd: S,
1719}
1720impl<S: Simd> SimdFrom<[u32; 4], S> for u32x4<S> {
1721 #[inline(always)]
1722 fn simd_from(val: [u32; 4], simd: S) -> Self {
1723 Self {
1724 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
1725 simd,
1726 }
1727 }
1728}
1729impl<S: Simd> From<u32x4<S>> for [u32; 4] {
1730 #[inline(always)]
1731 fn from(value: u32x4<S>) -> Self {
1732 value.val
1733 }
1734}
1735impl<S: Simd> core::ops::Deref for u32x4<S> {
1736 type Target = [u32; 4];
1737 #[inline(always)]
1738 fn deref(&self) -> &Self::Target {
1739 &self.val
1740 }
1741}
1742impl<S: Simd> core::ops::DerefMut for u32x4<S> {
1743 #[inline(always)]
1744 fn deref_mut(&mut self) -> &mut Self::Target {
1745 &mut self.val
1746 }
1747}
1748impl<S: Simd> SimdFrom<u32, S> for u32x4<S> {
1749 #[inline(always)]
1750 fn simd_from(value: u32, simd: S) -> Self {
1751 simd.splat_u32x4(value)
1752 }
1753}
1754impl<S: Simd> Select<u32x4<S>> for mask32x4<S> {
1755 #[inline(always)]
1756 fn select(self, if_true: u32x4<S>, if_false: u32x4<S>) -> u32x4<S> {
1757 self.simd.select_u32x4(self, if_true, if_false)
1758 }
1759}
1760impl<S: Simd> Bytes for u32x4<S> {
1761 type Bytes = u8x16<S>;
1762 #[inline(always)]
1763 fn to_bytes(self) -> Self::Bytes {
1764 unsafe {
1765 u8x16 {
1766 val: core::mem::transmute(self.val),
1767 simd: self.simd,
1768 }
1769 }
1770 }
1771 #[inline(always)]
1772 fn from_bytes(value: Self::Bytes) -> Self {
1773 unsafe {
1774 Self {
1775 val: core::mem::transmute(value.val),
1776 simd: value.simd,
1777 }
1778 }
1779 }
1780}
1781impl<S: Simd> u32x4<S> {
1782 #[inline(always)]
1783 pub fn not(self) -> u32x4<S> {
1784 self.simd.not_u32x4(self)
1785 }
1786 #[inline(always)]
1787 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1788 self.simd.add_u32x4(self, rhs.simd_into(self.simd))
1789 }
1790 #[inline(always)]
1791 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1792 self.simd.sub_u32x4(self, rhs.simd_into(self.simd))
1793 }
1794 #[inline(always)]
1795 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1796 self.simd.mul_u32x4(self, rhs.simd_into(self.simd))
1797 }
1798 #[inline(always)]
1799 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1800 self.simd.and_u32x4(self, rhs.simd_into(self.simd))
1801 }
1802 #[inline(always)]
1803 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1804 self.simd.or_u32x4(self, rhs.simd_into(self.simd))
1805 }
1806 #[inline(always)]
1807 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1808 self.simd.xor_u32x4(self, rhs.simd_into(self.simd))
1809 }
1810 #[inline(always)]
1811 pub fn shr(self, shift: u32) -> u32x4<S> {
1812 self.simd.shr_u32x4(self, shift)
1813 }
1814 #[inline(always)]
1815 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1816 self.simd.simd_eq_u32x4(self, rhs.simd_into(self.simd))
1817 }
1818 #[inline(always)]
1819 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1820 self.simd.simd_lt_u32x4(self, rhs.simd_into(self.simd))
1821 }
1822 #[inline(always)]
1823 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1824 self.simd.simd_le_u32x4(self, rhs.simd_into(self.simd))
1825 }
1826 #[inline(always)]
1827 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1828 self.simd.simd_ge_u32x4(self, rhs.simd_into(self.simd))
1829 }
1830 #[inline(always)]
1831 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1832 self.simd.simd_gt_u32x4(self, rhs.simd_into(self.simd))
1833 }
1834 #[inline(always)]
1835 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1836 self.simd.min_u32x4(self, rhs.simd_into(self.simd))
1837 }
1838 #[inline(always)]
1839 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1840 self.simd.max_u32x4(self, rhs.simd_into(self.simd))
1841 }
1842 #[inline(always)]
1843 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
1844 self.simd.combine_u32x4(self, rhs.simd_into(self.simd))
1845 }
1846 #[inline(always)]
1847 pub fn reinterpret_u8(self) -> u8x16<S> {
1848 self.simd.reinterpret_u8_u32x4(self)
1849 }
1850 #[inline(always)]
1851 pub fn cvt_f32(self) -> f32x4<S> {
1852 self.simd.cvt_f32_u32x4(self)
1853 }
1854}
1855impl<S: Simd> crate::SimdBase<u32, S> for u32x4<S> {
1856 const N: usize = 4;
1857 type Mask = mask32x4<S>;
1858 type Block = u32x4<S>;
1859 #[inline(always)]
1860 fn as_slice(&self) -> &[u32] {
1861 &self.val
1862 }
1863 #[inline(always)]
1864 fn as_mut_slice(&mut self) -> &mut [u32] {
1865 &mut self.val
1866 }
1867 #[inline(always)]
1868 fn from_slice(simd: S, slice: &[u32]) -> Self {
1869 let mut val = [0; 4];
1870 val.copy_from_slice(slice);
1871 Self { val, simd }
1872 }
1873 #[inline(always)]
1874 fn splat(simd: S, val: u32) -> Self {
1875 simd.splat_u32x4(val)
1876 }
1877 #[inline(always)]
1878 fn block_splat(block: Self::Block) -> Self {
1879 block
1880 }
1881}
1882impl<S: Simd> crate::SimdInt<u32, S> for u32x4<S> {
1883 #[inline(always)]
1884 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1885 self.simd.simd_eq_u32x4(self, rhs.simd_into(self.simd))
1886 }
1887 #[inline(always)]
1888 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1889 self.simd.simd_lt_u32x4(self, rhs.simd_into(self.simd))
1890 }
1891 #[inline(always)]
1892 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1893 self.simd.simd_le_u32x4(self, rhs.simd_into(self.simd))
1894 }
1895 #[inline(always)]
1896 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1897 self.simd.simd_ge_u32x4(self, rhs.simd_into(self.simd))
1898 }
1899 #[inline(always)]
1900 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1901 self.simd.simd_gt_u32x4(self, rhs.simd_into(self.simd))
1902 }
1903 #[inline(always)]
1904 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1905 self.simd.zip_low_u32x4(self, rhs.simd_into(self.simd))
1906 }
1907 #[inline(always)]
1908 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1909 self.simd.zip_high_u32x4(self, rhs.simd_into(self.simd))
1910 }
1911 #[inline(always)]
1912 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1913 self.simd.unzip_low_u32x4(self, rhs.simd_into(self.simd))
1914 }
1915 #[inline(always)]
1916 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1917 self.simd.unzip_high_u32x4(self, rhs.simd_into(self.simd))
1918 }
1919 #[inline(always)]
1920 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1921 self.simd.min_u32x4(self, rhs.simd_into(self.simd))
1922 }
1923 #[inline(always)]
1924 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1925 self.simd.max_u32x4(self, rhs.simd_into(self.simd))
1926 }
1927}
1928impl<S: Simd> SimdCvtTruncate<f32x4<S>> for u32x4<S> {
1929 fn truncate_from(x: f32x4<S>) -> Self {
1930 x.simd.cvt_u32_f32x4(x)
1931 }
1932}
1933#[derive(Clone, Copy, Debug)]
1934#[repr(C, align(16))]
1935pub struct mask32x4<S: Simd> {
1936 pub val: [i32; 4],
1937 pub simd: S,
1938}
1939impl<S: Simd> SimdFrom<[i32; 4], S> for mask32x4<S> {
1940 #[inline(always)]
1941 fn simd_from(val: [i32; 4], simd: S) -> Self {
1942 Self {
1943 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
1944 simd,
1945 }
1946 }
1947}
1948impl<S: Simd> From<mask32x4<S>> for [i32; 4] {
1949 #[inline(always)]
1950 fn from(value: mask32x4<S>) -> Self {
1951 value.val
1952 }
1953}
1954impl<S: Simd> core::ops::Deref for mask32x4<S> {
1955 type Target = [i32; 4];
1956 #[inline(always)]
1957 fn deref(&self) -> &Self::Target {
1958 &self.val
1959 }
1960}
1961impl<S: Simd> core::ops::DerefMut for mask32x4<S> {
1962 #[inline(always)]
1963 fn deref_mut(&mut self) -> &mut Self::Target {
1964 &mut self.val
1965 }
1966}
1967impl<S: Simd> SimdFrom<i32, S> for mask32x4<S> {
1968 #[inline(always)]
1969 fn simd_from(value: i32, simd: S) -> Self {
1970 simd.splat_mask32x4(value)
1971 }
1972}
1973impl<S: Simd> Select<mask32x4<S>> for mask32x4<S> {
1974 #[inline(always)]
1975 fn select(self, if_true: mask32x4<S>, if_false: mask32x4<S>) -> mask32x4<S> {
1976 self.simd.select_mask32x4(self, if_true, if_false)
1977 }
1978}
1979impl<S: Simd> Bytes for mask32x4<S> {
1980 type Bytes = u8x16<S>;
1981 #[inline(always)]
1982 fn to_bytes(self) -> Self::Bytes {
1983 unsafe {
1984 u8x16 {
1985 val: core::mem::transmute(self.val),
1986 simd: self.simd,
1987 }
1988 }
1989 }
1990 #[inline(always)]
1991 fn from_bytes(value: Self::Bytes) -> Self {
1992 unsafe {
1993 Self {
1994 val: core::mem::transmute(value.val),
1995 simd: value.simd,
1996 }
1997 }
1998 }
1999}
2000impl<S: Simd> mask32x4<S> {
2001 #[inline(always)]
2002 pub fn not(self) -> mask32x4<S> {
2003 self.simd.not_mask32x4(self)
2004 }
2005 #[inline(always)]
2006 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2007 self.simd.and_mask32x4(self, rhs.simd_into(self.simd))
2008 }
2009 #[inline(always)]
2010 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2011 self.simd.or_mask32x4(self, rhs.simd_into(self.simd))
2012 }
2013 #[inline(always)]
2014 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2015 self.simd.xor_mask32x4(self, rhs.simd_into(self.simd))
2016 }
2017 #[inline(always)]
2018 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2019 self.simd.simd_eq_mask32x4(self, rhs.simd_into(self.simd))
2020 }
2021 #[inline(always)]
2022 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2023 self.simd.combine_mask32x4(self, rhs.simd_into(self.simd))
2024 }
2025}
2026impl<S: Simd> crate::SimdBase<i32, S> for mask32x4<S> {
2027 const N: usize = 4;
2028 type Mask = mask32x4<S>;
2029 type Block = mask32x4<S>;
2030 #[inline(always)]
2031 fn as_slice(&self) -> &[i32] {
2032 &self.val
2033 }
2034 #[inline(always)]
2035 fn as_mut_slice(&mut self) -> &mut [i32] {
2036 &mut self.val
2037 }
2038 #[inline(always)]
2039 fn from_slice(simd: S, slice: &[i32]) -> Self {
2040 let mut val = [0; 4];
2041 val.copy_from_slice(slice);
2042 Self { val, simd }
2043 }
2044 #[inline(always)]
2045 fn splat(simd: S, val: i32) -> Self {
2046 simd.splat_mask32x4(val)
2047 }
2048 #[inline(always)]
2049 fn block_splat(block: Self::Block) -> Self {
2050 block
2051 }
2052}
2053impl<S: Simd> crate::SimdMask<i32, S> for mask32x4<S> {
2054 #[inline(always)]
2055 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2056 self.simd.simd_eq_mask32x4(self, rhs.simd_into(self.simd))
2057 }
2058}
2059#[derive(Clone, Copy, Debug)]
2060#[repr(C, align(16))]
2061pub struct f64x2<S: Simd> {
2062 pub val: [f64; 2],
2063 pub simd: S,
2064}
2065impl<S: Simd> SimdFrom<[f64; 2], S> for f64x2<S> {
2066 #[inline(always)]
2067 fn simd_from(val: [f64; 2], simd: S) -> Self {
2068 Self {
2069 val: [val[0usize], val[1usize]],
2070 simd,
2071 }
2072 }
2073}
2074impl<S: Simd> From<f64x2<S>> for [f64; 2] {
2075 #[inline(always)]
2076 fn from(value: f64x2<S>) -> Self {
2077 value.val
2078 }
2079}
2080impl<S: Simd> core::ops::Deref for f64x2<S> {
2081 type Target = [f64; 2];
2082 #[inline(always)]
2083 fn deref(&self) -> &Self::Target {
2084 &self.val
2085 }
2086}
2087impl<S: Simd> core::ops::DerefMut for f64x2<S> {
2088 #[inline(always)]
2089 fn deref_mut(&mut self) -> &mut Self::Target {
2090 &mut self.val
2091 }
2092}
2093impl<S: Simd> SimdFrom<f64, S> for f64x2<S> {
2094 #[inline(always)]
2095 fn simd_from(value: f64, simd: S) -> Self {
2096 simd.splat_f64x2(value)
2097 }
2098}
2099impl<S: Simd> Select<f64x2<S>> for mask64x2<S> {
2100 #[inline(always)]
2101 fn select(self, if_true: f64x2<S>, if_false: f64x2<S>) -> f64x2<S> {
2102 self.simd.select_f64x2(self, if_true, if_false)
2103 }
2104}
2105impl<S: Simd> Bytes for f64x2<S> {
2106 type Bytes = u8x16<S>;
2107 #[inline(always)]
2108 fn to_bytes(self) -> Self::Bytes {
2109 unsafe {
2110 u8x16 {
2111 val: core::mem::transmute(self.val),
2112 simd: self.simd,
2113 }
2114 }
2115 }
2116 #[inline(always)]
2117 fn from_bytes(value: Self::Bytes) -> Self {
2118 unsafe {
2119 Self {
2120 val: core::mem::transmute(value.val),
2121 simd: value.simd,
2122 }
2123 }
2124 }
2125}
2126impl<S: Simd> f64x2<S> {
2127 #[inline(always)]
2128 pub fn abs(self) -> f64x2<S> {
2129 self.simd.abs_f64x2(self)
2130 }
2131 #[inline(always)]
2132 pub fn neg(self) -> f64x2<S> {
2133 self.simd.neg_f64x2(self)
2134 }
2135 #[inline(always)]
2136 pub fn sqrt(self) -> f64x2<S> {
2137 self.simd.sqrt_f64x2(self)
2138 }
2139 #[inline(always)]
2140 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2141 self.simd.add_f64x2(self, rhs.simd_into(self.simd))
2142 }
2143 #[inline(always)]
2144 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2145 self.simd.sub_f64x2(self, rhs.simd_into(self.simd))
2146 }
2147 #[inline(always)]
2148 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2149 self.simd.mul_f64x2(self, rhs.simd_into(self.simd))
2150 }
2151 #[inline(always)]
2152 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2153 self.simd.div_f64x2(self, rhs.simd_into(self.simd))
2154 }
2155 #[inline(always)]
2156 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2157 self.simd.copysign_f64x2(self, rhs.simd_into(self.simd))
2158 }
2159 #[inline(always)]
2160 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2161 self.simd.simd_eq_f64x2(self, rhs.simd_into(self.simd))
2162 }
2163 #[inline(always)]
2164 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2165 self.simd.simd_lt_f64x2(self, rhs.simd_into(self.simd))
2166 }
2167 #[inline(always)]
2168 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2169 self.simd.simd_le_f64x2(self, rhs.simd_into(self.simd))
2170 }
2171 #[inline(always)]
2172 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2173 self.simd.simd_ge_f64x2(self, rhs.simd_into(self.simd))
2174 }
2175 #[inline(always)]
2176 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2177 self.simd.simd_gt_f64x2(self, rhs.simd_into(self.simd))
2178 }
2179 #[inline(always)]
2180 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2181 self.simd.max_f64x2(self, rhs.simd_into(self.simd))
2182 }
2183 #[inline(always)]
2184 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2185 self.simd.max_precise_f64x2(self, rhs.simd_into(self.simd))
2186 }
2187 #[inline(always)]
2188 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2189 self.simd.min_f64x2(self, rhs.simd_into(self.simd))
2190 }
2191 #[inline(always)]
2192 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2193 self.simd.min_precise_f64x2(self, rhs.simd_into(self.simd))
2194 }
2195 #[inline(always)]
2196 pub fn floor(self) -> f64x2<S> {
2197 self.simd.floor_f64x2(self)
2198 }
2199 #[inline(always)]
2200 pub fn fract(self) -> f64x2<S> {
2201 self.simd.fract_f64x2(self)
2202 }
2203 #[inline(always)]
2204 pub fn trunc(self) -> f64x2<S> {
2205 self.simd.trunc_f64x2(self)
2206 }
2207 #[inline(always)]
2208 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
2209 self.simd.combine_f64x2(self, rhs.simd_into(self.simd))
2210 }
2211 #[inline(always)]
2212 pub fn reinterpret_f32(self) -> f32x4<S> {
2213 self.simd.reinterpret_f32_f64x2(self)
2214 }
2215}
2216impl<S: Simd> crate::SimdBase<f64, S> for f64x2<S> {
2217 const N: usize = 2;
2218 type Mask = mask64x2<S>;
2219 type Block = f64x2<S>;
2220 #[inline(always)]
2221 fn as_slice(&self) -> &[f64] {
2222 &self.val
2223 }
2224 #[inline(always)]
2225 fn as_mut_slice(&mut self) -> &mut [f64] {
2226 &mut self.val
2227 }
2228 #[inline(always)]
2229 fn from_slice(simd: S, slice: &[f64]) -> Self {
2230 let mut val = [0.0; 2];
2231 val.copy_from_slice(slice);
2232 Self { val, simd }
2233 }
2234 #[inline(always)]
2235 fn splat(simd: S, val: f64) -> Self {
2236 simd.splat_f64x2(val)
2237 }
2238 #[inline(always)]
2239 fn block_splat(block: Self::Block) -> Self {
2240 block
2241 }
2242}
2243impl<S: Simd> crate::SimdFloat<f64, S> for f64x2<S> {
2244 #[inline(always)]
2245 fn abs(self) -> f64x2<S> {
2246 self.simd.abs_f64x2(self)
2247 }
2248 #[inline(always)]
2249 fn sqrt(self) -> f64x2<S> {
2250 self.simd.sqrt_f64x2(self)
2251 }
2252 #[inline(always)]
2253 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2254 self.simd.copysign_f64x2(self, rhs.simd_into(self.simd))
2255 }
2256 #[inline(always)]
2257 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2258 self.simd.simd_eq_f64x2(self, rhs.simd_into(self.simd))
2259 }
2260 #[inline(always)]
2261 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2262 self.simd.simd_lt_f64x2(self, rhs.simd_into(self.simd))
2263 }
2264 #[inline(always)]
2265 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2266 self.simd.simd_le_f64x2(self, rhs.simd_into(self.simd))
2267 }
2268 #[inline(always)]
2269 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2270 self.simd.simd_ge_f64x2(self, rhs.simd_into(self.simd))
2271 }
2272 #[inline(always)]
2273 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2274 self.simd.simd_gt_f64x2(self, rhs.simd_into(self.simd))
2275 }
2276 #[inline(always)]
2277 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2278 self.simd.zip_low_f64x2(self, rhs.simd_into(self.simd))
2279 }
2280 #[inline(always)]
2281 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2282 self.simd.zip_high_f64x2(self, rhs.simd_into(self.simd))
2283 }
2284 #[inline(always)]
2285 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2286 self.simd.unzip_low_f64x2(self, rhs.simd_into(self.simd))
2287 }
2288 #[inline(always)]
2289 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2290 self.simd.unzip_high_f64x2(self, rhs.simd_into(self.simd))
2291 }
2292 #[inline(always)]
2293 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2294 self.simd.max_f64x2(self, rhs.simd_into(self.simd))
2295 }
2296 #[inline(always)]
2297 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2298 self.simd.max_precise_f64x2(self, rhs.simd_into(self.simd))
2299 }
2300 #[inline(always)]
2301 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2302 self.simd.min_f64x2(self, rhs.simd_into(self.simd))
2303 }
2304 #[inline(always)]
2305 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2306 self.simd.min_precise_f64x2(self, rhs.simd_into(self.simd))
2307 }
2308 #[inline(always)]
2309 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x2<S> {
2310 self.simd
2311 .madd_f64x2(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2312 }
2313 #[inline(always)]
2314 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x2<S> {
2315 self.simd
2316 .msub_f64x2(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2317 }
2318 #[inline(always)]
2319 fn floor(self) -> f64x2<S> {
2320 self.simd.floor_f64x2(self)
2321 }
2322 #[inline(always)]
2323 fn fract(self) -> f64x2<S> {
2324 self.simd.fract_f64x2(self)
2325 }
2326 #[inline(always)]
2327 fn trunc(self) -> f64x2<S> {
2328 self.simd.trunc_f64x2(self)
2329 }
2330}
2331#[derive(Clone, Copy, Debug)]
2332#[repr(C, align(16))]
2333pub struct mask64x2<S: Simd> {
2334 pub val: [i64; 2],
2335 pub simd: S,
2336}
2337impl<S: Simd> SimdFrom<[i64; 2], S> for mask64x2<S> {
2338 #[inline(always)]
2339 fn simd_from(val: [i64; 2], simd: S) -> Self {
2340 Self {
2341 val: [val[0usize], val[1usize]],
2342 simd,
2343 }
2344 }
2345}
2346impl<S: Simd> From<mask64x2<S>> for [i64; 2] {
2347 #[inline(always)]
2348 fn from(value: mask64x2<S>) -> Self {
2349 value.val
2350 }
2351}
2352impl<S: Simd> core::ops::Deref for mask64x2<S> {
2353 type Target = [i64; 2];
2354 #[inline(always)]
2355 fn deref(&self) -> &Self::Target {
2356 &self.val
2357 }
2358}
2359impl<S: Simd> core::ops::DerefMut for mask64x2<S> {
2360 #[inline(always)]
2361 fn deref_mut(&mut self) -> &mut Self::Target {
2362 &mut self.val
2363 }
2364}
2365impl<S: Simd> SimdFrom<i64, S> for mask64x2<S> {
2366 #[inline(always)]
2367 fn simd_from(value: i64, simd: S) -> Self {
2368 simd.splat_mask64x2(value)
2369 }
2370}
2371impl<S: Simd> Select<mask64x2<S>> for mask64x2<S> {
2372 #[inline(always)]
2373 fn select(self, if_true: mask64x2<S>, if_false: mask64x2<S>) -> mask64x2<S> {
2374 self.simd.select_mask64x2(self, if_true, if_false)
2375 }
2376}
2377impl<S: Simd> Bytes for mask64x2<S> {
2378 type Bytes = u8x16<S>;
2379 #[inline(always)]
2380 fn to_bytes(self) -> Self::Bytes {
2381 unsafe {
2382 u8x16 {
2383 val: core::mem::transmute(self.val),
2384 simd: self.simd,
2385 }
2386 }
2387 }
2388 #[inline(always)]
2389 fn from_bytes(value: Self::Bytes) -> Self {
2390 unsafe {
2391 Self {
2392 val: core::mem::transmute(value.val),
2393 simd: value.simd,
2394 }
2395 }
2396 }
2397}
2398impl<S: Simd> mask64x2<S> {
2399 #[inline(always)]
2400 pub fn not(self) -> mask64x2<S> {
2401 self.simd.not_mask64x2(self)
2402 }
2403 #[inline(always)]
2404 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2405 self.simd.and_mask64x2(self, rhs.simd_into(self.simd))
2406 }
2407 #[inline(always)]
2408 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2409 self.simd.or_mask64x2(self, rhs.simd_into(self.simd))
2410 }
2411 #[inline(always)]
2412 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2413 self.simd.xor_mask64x2(self, rhs.simd_into(self.simd))
2414 }
2415 #[inline(always)]
2416 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2417 self.simd.simd_eq_mask64x2(self, rhs.simd_into(self.simd))
2418 }
2419 #[inline(always)]
2420 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
2421 self.simd.combine_mask64x2(self, rhs.simd_into(self.simd))
2422 }
2423}
2424impl<S: Simd> crate::SimdBase<i64, S> for mask64x2<S> {
2425 const N: usize = 2;
2426 type Mask = mask64x2<S>;
2427 type Block = mask64x2<S>;
2428 #[inline(always)]
2429 fn as_slice(&self) -> &[i64] {
2430 &self.val
2431 }
2432 #[inline(always)]
2433 fn as_mut_slice(&mut self) -> &mut [i64] {
2434 &mut self.val
2435 }
2436 #[inline(always)]
2437 fn from_slice(simd: S, slice: &[i64]) -> Self {
2438 let mut val = [0; 2];
2439 val.copy_from_slice(slice);
2440 Self { val, simd }
2441 }
2442 #[inline(always)]
2443 fn splat(simd: S, val: i64) -> Self {
2444 simd.splat_mask64x2(val)
2445 }
2446 #[inline(always)]
2447 fn block_splat(block: Self::Block) -> Self {
2448 block
2449 }
2450}
2451impl<S: Simd> crate::SimdMask<i64, S> for mask64x2<S> {
2452 #[inline(always)]
2453 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2454 self.simd.simd_eq_mask64x2(self, rhs.simd_into(self.simd))
2455 }
2456}
2457#[derive(Clone, Copy, Debug)]
2458#[repr(C, align(32))]
2459pub struct f32x8<S: Simd> {
2460 pub val: [f32; 8],
2461 pub simd: S,
2462}
2463impl<S: Simd> SimdFrom<[f32; 8], S> for f32x8<S> {
2464 #[inline(always)]
2465 fn simd_from(val: [f32; 8], simd: S) -> Self {
2466 Self {
2467 val: [
2468 val[0usize],
2469 val[1usize],
2470 val[2usize],
2471 val[3usize],
2472 val[4usize],
2473 val[5usize],
2474 val[6usize],
2475 val[7usize],
2476 ],
2477 simd,
2478 }
2479 }
2480}
2481impl<S: Simd> From<f32x8<S>> for [f32; 8] {
2482 #[inline(always)]
2483 fn from(value: f32x8<S>) -> Self {
2484 value.val
2485 }
2486}
2487impl<S: Simd> core::ops::Deref for f32x8<S> {
2488 type Target = [f32; 8];
2489 #[inline(always)]
2490 fn deref(&self) -> &Self::Target {
2491 &self.val
2492 }
2493}
2494impl<S: Simd> core::ops::DerefMut for f32x8<S> {
2495 #[inline(always)]
2496 fn deref_mut(&mut self) -> &mut Self::Target {
2497 &mut self.val
2498 }
2499}
2500impl<S: Simd> SimdFrom<f32, S> for f32x8<S> {
2501 #[inline(always)]
2502 fn simd_from(value: f32, simd: S) -> Self {
2503 simd.splat_f32x8(value)
2504 }
2505}
2506impl<S: Simd> Select<f32x8<S>> for mask32x8<S> {
2507 #[inline(always)]
2508 fn select(self, if_true: f32x8<S>, if_false: f32x8<S>) -> f32x8<S> {
2509 self.simd.select_f32x8(self, if_true, if_false)
2510 }
2511}
2512impl<S: Simd> Bytes for f32x8<S> {
2513 type Bytes = u8x32<S>;
2514 #[inline(always)]
2515 fn to_bytes(self) -> Self::Bytes {
2516 unsafe {
2517 u8x32 {
2518 val: core::mem::transmute(self.val),
2519 simd: self.simd,
2520 }
2521 }
2522 }
2523 #[inline(always)]
2524 fn from_bytes(value: Self::Bytes) -> Self {
2525 unsafe {
2526 Self {
2527 val: core::mem::transmute(value.val),
2528 simd: value.simd,
2529 }
2530 }
2531 }
2532}
2533impl<S: Simd> f32x8<S> {
2534 #[inline(always)]
2535 pub fn abs(self) -> f32x8<S> {
2536 self.simd.abs_f32x8(self)
2537 }
2538 #[inline(always)]
2539 pub fn neg(self) -> f32x8<S> {
2540 self.simd.neg_f32x8(self)
2541 }
2542 #[inline(always)]
2543 pub fn sqrt(self) -> f32x8<S> {
2544 self.simd.sqrt_f32x8(self)
2545 }
2546 #[inline(always)]
2547 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2548 self.simd.add_f32x8(self, rhs.simd_into(self.simd))
2549 }
2550 #[inline(always)]
2551 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2552 self.simd.sub_f32x8(self, rhs.simd_into(self.simd))
2553 }
2554 #[inline(always)]
2555 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2556 self.simd.mul_f32x8(self, rhs.simd_into(self.simd))
2557 }
2558 #[inline(always)]
2559 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2560 self.simd.div_f32x8(self, rhs.simd_into(self.simd))
2561 }
2562 #[inline(always)]
2563 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2564 self.simd.copysign_f32x8(self, rhs.simd_into(self.simd))
2565 }
2566 #[inline(always)]
2567 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2568 self.simd.simd_eq_f32x8(self, rhs.simd_into(self.simd))
2569 }
2570 #[inline(always)]
2571 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2572 self.simd.simd_lt_f32x8(self, rhs.simd_into(self.simd))
2573 }
2574 #[inline(always)]
2575 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2576 self.simd.simd_le_f32x8(self, rhs.simd_into(self.simd))
2577 }
2578 #[inline(always)]
2579 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2580 self.simd.simd_ge_f32x8(self, rhs.simd_into(self.simd))
2581 }
2582 #[inline(always)]
2583 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2584 self.simd.simd_gt_f32x8(self, rhs.simd_into(self.simd))
2585 }
2586 #[inline(always)]
2587 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2588 self.simd.max_f32x8(self, rhs.simd_into(self.simd))
2589 }
2590 #[inline(always)]
2591 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2592 self.simd.max_precise_f32x8(self, rhs.simd_into(self.simd))
2593 }
2594 #[inline(always)]
2595 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2596 self.simd.min_f32x8(self, rhs.simd_into(self.simd))
2597 }
2598 #[inline(always)]
2599 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2600 self.simd.min_precise_f32x8(self, rhs.simd_into(self.simd))
2601 }
2602 #[inline(always)]
2603 pub fn floor(self) -> f32x8<S> {
2604 self.simd.floor_f32x8(self)
2605 }
2606 #[inline(always)]
2607 pub fn fract(self) -> f32x8<S> {
2608 self.simd.fract_f32x8(self)
2609 }
2610 #[inline(always)]
2611 pub fn trunc(self) -> f32x8<S> {
2612 self.simd.trunc_f32x8(self)
2613 }
2614 #[inline(always)]
2615 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
2616 self.simd.combine_f32x8(self, rhs.simd_into(self.simd))
2617 }
2618 #[inline(always)]
2619 pub fn reinterpret_f64(self) -> f64x4<S> {
2620 self.simd.reinterpret_f64_f32x8(self)
2621 }
2622 #[inline(always)]
2623 pub fn reinterpret_i32(self) -> i32x8<S> {
2624 self.simd.reinterpret_i32_f32x8(self)
2625 }
2626 #[inline(always)]
2627 pub fn reinterpret_u8(self) -> u8x32<S> {
2628 self.simd.reinterpret_u8_f32x8(self)
2629 }
2630 #[inline(always)]
2631 pub fn reinterpret_u32(self) -> u32x8<S> {
2632 self.simd.reinterpret_u32_f32x8(self)
2633 }
2634 #[inline(always)]
2635 pub fn cvt_u32(self) -> u32x8<S> {
2636 self.simd.cvt_u32_f32x8(self)
2637 }
2638 #[inline(always)]
2639 pub fn cvt_i32(self) -> i32x8<S> {
2640 self.simd.cvt_i32_f32x8(self)
2641 }
2642}
2643impl<S: Simd> crate::SimdBase<f32, S> for f32x8<S> {
2644 const N: usize = 8;
2645 type Mask = mask32x8<S>;
2646 type Block = f32x4<S>;
2647 #[inline(always)]
2648 fn as_slice(&self) -> &[f32] {
2649 &self.val
2650 }
2651 #[inline(always)]
2652 fn as_mut_slice(&mut self) -> &mut [f32] {
2653 &mut self.val
2654 }
2655 #[inline(always)]
2656 fn from_slice(simd: S, slice: &[f32]) -> Self {
2657 let mut val = [0.0; 8];
2658 val.copy_from_slice(slice);
2659 Self { val, simd }
2660 }
2661 #[inline(always)]
2662 fn splat(simd: S, val: f32) -> Self {
2663 simd.splat_f32x8(val)
2664 }
2665 #[inline(always)]
2666 fn block_splat(block: Self::Block) -> Self {
2667 block.combine(block)
2668 }
2669}
2670impl<S: Simd> crate::SimdFloat<f32, S> for f32x8<S> {
2671 #[inline(always)]
2672 fn abs(self) -> f32x8<S> {
2673 self.simd.abs_f32x8(self)
2674 }
2675 #[inline(always)]
2676 fn sqrt(self) -> f32x8<S> {
2677 self.simd.sqrt_f32x8(self)
2678 }
2679 #[inline(always)]
2680 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2681 self.simd.copysign_f32x8(self, rhs.simd_into(self.simd))
2682 }
2683 #[inline(always)]
2684 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2685 self.simd.simd_eq_f32x8(self, rhs.simd_into(self.simd))
2686 }
2687 #[inline(always)]
2688 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2689 self.simd.simd_lt_f32x8(self, rhs.simd_into(self.simd))
2690 }
2691 #[inline(always)]
2692 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2693 self.simd.simd_le_f32x8(self, rhs.simd_into(self.simd))
2694 }
2695 #[inline(always)]
2696 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2697 self.simd.simd_ge_f32x8(self, rhs.simd_into(self.simd))
2698 }
2699 #[inline(always)]
2700 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2701 self.simd.simd_gt_f32x8(self, rhs.simd_into(self.simd))
2702 }
2703 #[inline(always)]
2704 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2705 self.simd.zip_low_f32x8(self, rhs.simd_into(self.simd))
2706 }
2707 #[inline(always)]
2708 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2709 self.simd.zip_high_f32x8(self, rhs.simd_into(self.simd))
2710 }
2711 #[inline(always)]
2712 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2713 self.simd.unzip_low_f32x8(self, rhs.simd_into(self.simd))
2714 }
2715 #[inline(always)]
2716 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2717 self.simd.unzip_high_f32x8(self, rhs.simd_into(self.simd))
2718 }
2719 #[inline(always)]
2720 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2721 self.simd.max_f32x8(self, rhs.simd_into(self.simd))
2722 }
2723 #[inline(always)]
2724 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2725 self.simd.max_precise_f32x8(self, rhs.simd_into(self.simd))
2726 }
2727 #[inline(always)]
2728 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2729 self.simd.min_f32x8(self, rhs.simd_into(self.simd))
2730 }
2731 #[inline(always)]
2732 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2733 self.simd.min_precise_f32x8(self, rhs.simd_into(self.simd))
2734 }
2735 #[inline(always)]
2736 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x8<S> {
2737 self.simd
2738 .madd_f32x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2739 }
2740 #[inline(always)]
2741 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x8<S> {
2742 self.simd
2743 .msub_f32x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2744 }
2745 #[inline(always)]
2746 fn floor(self) -> f32x8<S> {
2747 self.simd.floor_f32x8(self)
2748 }
2749 #[inline(always)]
2750 fn fract(self) -> f32x8<S> {
2751 self.simd.fract_f32x8(self)
2752 }
2753 #[inline(always)]
2754 fn trunc(self) -> f32x8<S> {
2755 self.simd.trunc_f32x8(self)
2756 }
2757}
2758impl<S: Simd> SimdCvtFloat<u32x8<S>> for f32x8<S> {
2759 fn float_from(x: u32x8<S>) -> Self {
2760 x.simd.cvt_f32_u32x8(x)
2761 }
2762}
2763impl<S: Simd> SimdCvtFloat<i32x8<S>> for f32x8<S> {
2764 fn float_from(x: i32x8<S>) -> Self {
2765 x.simd.cvt_f32_i32x8(x)
2766 }
2767}
2768#[derive(Clone, Copy, Debug)]
2769#[repr(C, align(32))]
2770pub struct i8x32<S: Simd> {
2771 pub val: [i8; 32],
2772 pub simd: S,
2773}
2774impl<S: Simd> SimdFrom<[i8; 32], S> for i8x32<S> {
2775 #[inline(always)]
2776 fn simd_from(val: [i8; 32], simd: S) -> Self {
2777 Self {
2778 val: [
2779 val[0usize],
2780 val[1usize],
2781 val[2usize],
2782 val[3usize],
2783 val[4usize],
2784 val[5usize],
2785 val[6usize],
2786 val[7usize],
2787 val[8usize],
2788 val[9usize],
2789 val[10usize],
2790 val[11usize],
2791 val[12usize],
2792 val[13usize],
2793 val[14usize],
2794 val[15usize],
2795 val[16usize],
2796 val[17usize],
2797 val[18usize],
2798 val[19usize],
2799 val[20usize],
2800 val[21usize],
2801 val[22usize],
2802 val[23usize],
2803 val[24usize],
2804 val[25usize],
2805 val[26usize],
2806 val[27usize],
2807 val[28usize],
2808 val[29usize],
2809 val[30usize],
2810 val[31usize],
2811 ],
2812 simd,
2813 }
2814 }
2815}
2816impl<S: Simd> From<i8x32<S>> for [i8; 32] {
2817 #[inline(always)]
2818 fn from(value: i8x32<S>) -> Self {
2819 value.val
2820 }
2821}
2822impl<S: Simd> core::ops::Deref for i8x32<S> {
2823 type Target = [i8; 32];
2824 #[inline(always)]
2825 fn deref(&self) -> &Self::Target {
2826 &self.val
2827 }
2828}
2829impl<S: Simd> core::ops::DerefMut for i8x32<S> {
2830 #[inline(always)]
2831 fn deref_mut(&mut self) -> &mut Self::Target {
2832 &mut self.val
2833 }
2834}
2835impl<S: Simd> SimdFrom<i8, S> for i8x32<S> {
2836 #[inline(always)]
2837 fn simd_from(value: i8, simd: S) -> Self {
2838 simd.splat_i8x32(value)
2839 }
2840}
2841impl<S: Simd> Select<i8x32<S>> for mask8x32<S> {
2842 #[inline(always)]
2843 fn select(self, if_true: i8x32<S>, if_false: i8x32<S>) -> i8x32<S> {
2844 self.simd.select_i8x32(self, if_true, if_false)
2845 }
2846}
2847impl<S: Simd> Bytes for i8x32<S> {
2848 type Bytes = u8x32<S>;
2849 #[inline(always)]
2850 fn to_bytes(self) -> Self::Bytes {
2851 unsafe {
2852 u8x32 {
2853 val: core::mem::transmute(self.val),
2854 simd: self.simd,
2855 }
2856 }
2857 }
2858 #[inline(always)]
2859 fn from_bytes(value: Self::Bytes) -> Self {
2860 unsafe {
2861 Self {
2862 val: core::mem::transmute(value.val),
2863 simd: value.simd,
2864 }
2865 }
2866 }
2867}
2868impl<S: Simd> i8x32<S> {
2869 #[inline(always)]
2870 pub fn not(self) -> i8x32<S> {
2871 self.simd.not_i8x32(self)
2872 }
2873 #[inline(always)]
2874 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2875 self.simd.add_i8x32(self, rhs.simd_into(self.simd))
2876 }
2877 #[inline(always)]
2878 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2879 self.simd.sub_i8x32(self, rhs.simd_into(self.simd))
2880 }
2881 #[inline(always)]
2882 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2883 self.simd.mul_i8x32(self, rhs.simd_into(self.simd))
2884 }
2885 #[inline(always)]
2886 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2887 self.simd.and_i8x32(self, rhs.simd_into(self.simd))
2888 }
2889 #[inline(always)]
2890 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2891 self.simd.or_i8x32(self, rhs.simd_into(self.simd))
2892 }
2893 #[inline(always)]
2894 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2895 self.simd.xor_i8x32(self, rhs.simd_into(self.simd))
2896 }
2897 #[inline(always)]
2898 pub fn shr(self, shift: u32) -> i8x32<S> {
2899 self.simd.shr_i8x32(self, shift)
2900 }
2901 #[inline(always)]
2902 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2903 self.simd.simd_eq_i8x32(self, rhs.simd_into(self.simd))
2904 }
2905 #[inline(always)]
2906 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2907 self.simd.simd_lt_i8x32(self, rhs.simd_into(self.simd))
2908 }
2909 #[inline(always)]
2910 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2911 self.simd.simd_le_i8x32(self, rhs.simd_into(self.simd))
2912 }
2913 #[inline(always)]
2914 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2915 self.simd.simd_ge_i8x32(self, rhs.simd_into(self.simd))
2916 }
2917 #[inline(always)]
2918 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2919 self.simd.simd_gt_i8x32(self, rhs.simd_into(self.simd))
2920 }
2921 #[inline(always)]
2922 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2923 self.simd.min_i8x32(self, rhs.simd_into(self.simd))
2924 }
2925 #[inline(always)]
2926 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2927 self.simd.max_i8x32(self, rhs.simd_into(self.simd))
2928 }
2929 #[inline(always)]
2930 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
2931 self.simd.combine_i8x32(self, rhs.simd_into(self.simd))
2932 }
2933 #[inline(always)]
2934 pub fn reinterpret_u8(self) -> u8x32<S> {
2935 self.simd.reinterpret_u8_i8x32(self)
2936 }
2937 #[inline(always)]
2938 pub fn reinterpret_u32(self) -> u32x8<S> {
2939 self.simd.reinterpret_u32_i8x32(self)
2940 }
2941}
2942impl<S: Simd> crate::SimdBase<i8, S> for i8x32<S> {
2943 const N: usize = 32;
2944 type Mask = mask8x32<S>;
2945 type Block = i8x16<S>;
2946 #[inline(always)]
2947 fn as_slice(&self) -> &[i8] {
2948 &self.val
2949 }
2950 #[inline(always)]
2951 fn as_mut_slice(&mut self) -> &mut [i8] {
2952 &mut self.val
2953 }
2954 #[inline(always)]
2955 fn from_slice(simd: S, slice: &[i8]) -> Self {
2956 let mut val = [0; 32];
2957 val.copy_from_slice(slice);
2958 Self { val, simd }
2959 }
2960 #[inline(always)]
2961 fn splat(simd: S, val: i8) -> Self {
2962 simd.splat_i8x32(val)
2963 }
2964 #[inline(always)]
2965 fn block_splat(block: Self::Block) -> Self {
2966 block.combine(block)
2967 }
2968}
2969impl<S: Simd> crate::SimdInt<i8, S> for i8x32<S> {
2970 #[inline(always)]
2971 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2972 self.simd.simd_eq_i8x32(self, rhs.simd_into(self.simd))
2973 }
2974 #[inline(always)]
2975 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2976 self.simd.simd_lt_i8x32(self, rhs.simd_into(self.simd))
2977 }
2978 #[inline(always)]
2979 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2980 self.simd.simd_le_i8x32(self, rhs.simd_into(self.simd))
2981 }
2982 #[inline(always)]
2983 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2984 self.simd.simd_ge_i8x32(self, rhs.simd_into(self.simd))
2985 }
2986 #[inline(always)]
2987 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
2988 self.simd.simd_gt_i8x32(self, rhs.simd_into(self.simd))
2989 }
2990 #[inline(always)]
2991 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2992 self.simd.zip_low_i8x32(self, rhs.simd_into(self.simd))
2993 }
2994 #[inline(always)]
2995 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2996 self.simd.zip_high_i8x32(self, rhs.simd_into(self.simd))
2997 }
2998 #[inline(always)]
2999 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3000 self.simd.unzip_low_i8x32(self, rhs.simd_into(self.simd))
3001 }
3002 #[inline(always)]
3003 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3004 self.simd.unzip_high_i8x32(self, rhs.simd_into(self.simd))
3005 }
3006 #[inline(always)]
3007 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3008 self.simd.min_i8x32(self, rhs.simd_into(self.simd))
3009 }
3010 #[inline(always)]
3011 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3012 self.simd.max_i8x32(self, rhs.simd_into(self.simd))
3013 }
3014}
3015#[derive(Clone, Copy, Debug)]
3016#[repr(C, align(32))]
3017pub struct u8x32<S: Simd> {
3018 pub val: [u8; 32],
3019 pub simd: S,
3020}
3021impl<S: Simd> SimdFrom<[u8; 32], S> for u8x32<S> {
3022 #[inline(always)]
3023 fn simd_from(val: [u8; 32], simd: S) -> Self {
3024 Self {
3025 val: [
3026 val[0usize],
3027 val[1usize],
3028 val[2usize],
3029 val[3usize],
3030 val[4usize],
3031 val[5usize],
3032 val[6usize],
3033 val[7usize],
3034 val[8usize],
3035 val[9usize],
3036 val[10usize],
3037 val[11usize],
3038 val[12usize],
3039 val[13usize],
3040 val[14usize],
3041 val[15usize],
3042 val[16usize],
3043 val[17usize],
3044 val[18usize],
3045 val[19usize],
3046 val[20usize],
3047 val[21usize],
3048 val[22usize],
3049 val[23usize],
3050 val[24usize],
3051 val[25usize],
3052 val[26usize],
3053 val[27usize],
3054 val[28usize],
3055 val[29usize],
3056 val[30usize],
3057 val[31usize],
3058 ],
3059 simd,
3060 }
3061 }
3062}
3063impl<S: Simd> From<u8x32<S>> for [u8; 32] {
3064 #[inline(always)]
3065 fn from(value: u8x32<S>) -> Self {
3066 value.val
3067 }
3068}
3069impl<S: Simd> core::ops::Deref for u8x32<S> {
3070 type Target = [u8; 32];
3071 #[inline(always)]
3072 fn deref(&self) -> &Self::Target {
3073 &self.val
3074 }
3075}
3076impl<S: Simd> core::ops::DerefMut for u8x32<S> {
3077 #[inline(always)]
3078 fn deref_mut(&mut self) -> &mut Self::Target {
3079 &mut self.val
3080 }
3081}
3082impl<S: Simd> SimdFrom<u8, S> for u8x32<S> {
3083 #[inline(always)]
3084 fn simd_from(value: u8, simd: S) -> Self {
3085 simd.splat_u8x32(value)
3086 }
3087}
3088impl<S: Simd> Select<u8x32<S>> for mask8x32<S> {
3089 #[inline(always)]
3090 fn select(self, if_true: u8x32<S>, if_false: u8x32<S>) -> u8x32<S> {
3091 self.simd.select_u8x32(self, if_true, if_false)
3092 }
3093}
3094impl<S: Simd> Bytes for u8x32<S> {
3095 type Bytes = u8x32<S>;
3096 #[inline(always)]
3097 fn to_bytes(self) -> Self::Bytes {
3098 unsafe {
3099 u8x32 {
3100 val: core::mem::transmute(self.val),
3101 simd: self.simd,
3102 }
3103 }
3104 }
3105 #[inline(always)]
3106 fn from_bytes(value: Self::Bytes) -> Self {
3107 unsafe {
3108 Self {
3109 val: core::mem::transmute(value.val),
3110 simd: value.simd,
3111 }
3112 }
3113 }
3114}
3115impl<S: Simd> u8x32<S> {
3116 #[inline(always)]
3117 pub fn not(self) -> u8x32<S> {
3118 self.simd.not_u8x32(self)
3119 }
3120 #[inline(always)]
3121 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3122 self.simd.add_u8x32(self, rhs.simd_into(self.simd))
3123 }
3124 #[inline(always)]
3125 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3126 self.simd.sub_u8x32(self, rhs.simd_into(self.simd))
3127 }
3128 #[inline(always)]
3129 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3130 self.simd.mul_u8x32(self, rhs.simd_into(self.simd))
3131 }
3132 #[inline(always)]
3133 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3134 self.simd.and_u8x32(self, rhs.simd_into(self.simd))
3135 }
3136 #[inline(always)]
3137 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3138 self.simd.or_u8x32(self, rhs.simd_into(self.simd))
3139 }
3140 #[inline(always)]
3141 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3142 self.simd.xor_u8x32(self, rhs.simd_into(self.simd))
3143 }
3144 #[inline(always)]
3145 pub fn shr(self, shift: u32) -> u8x32<S> {
3146 self.simd.shr_u8x32(self, shift)
3147 }
3148 #[inline(always)]
3149 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3150 self.simd.simd_eq_u8x32(self, rhs.simd_into(self.simd))
3151 }
3152 #[inline(always)]
3153 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3154 self.simd.simd_lt_u8x32(self, rhs.simd_into(self.simd))
3155 }
3156 #[inline(always)]
3157 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3158 self.simd.simd_le_u8x32(self, rhs.simd_into(self.simd))
3159 }
3160 #[inline(always)]
3161 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3162 self.simd.simd_ge_u8x32(self, rhs.simd_into(self.simd))
3163 }
3164 #[inline(always)]
3165 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3166 self.simd.simd_gt_u8x32(self, rhs.simd_into(self.simd))
3167 }
3168 #[inline(always)]
3169 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3170 self.simd.min_u8x32(self, rhs.simd_into(self.simd))
3171 }
3172 #[inline(always)]
3173 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3174 self.simd.max_u8x32(self, rhs.simd_into(self.simd))
3175 }
3176 #[inline(always)]
3177 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
3178 self.simd.combine_u8x32(self, rhs.simd_into(self.simd))
3179 }
3180 #[inline(always)]
3181 pub fn reinterpret_u32(self) -> u32x8<S> {
3182 self.simd.reinterpret_u32_u8x32(self)
3183 }
3184}
3185impl<S: Simd> crate::SimdBase<u8, S> for u8x32<S> {
3186 const N: usize = 32;
3187 type Mask = mask8x32<S>;
3188 type Block = u8x16<S>;
3189 #[inline(always)]
3190 fn as_slice(&self) -> &[u8] {
3191 &self.val
3192 }
3193 #[inline(always)]
3194 fn as_mut_slice(&mut self) -> &mut [u8] {
3195 &mut self.val
3196 }
3197 #[inline(always)]
3198 fn from_slice(simd: S, slice: &[u8]) -> Self {
3199 let mut val = [0; 32];
3200 val.copy_from_slice(slice);
3201 Self { val, simd }
3202 }
3203 #[inline(always)]
3204 fn splat(simd: S, val: u8) -> Self {
3205 simd.splat_u8x32(val)
3206 }
3207 #[inline(always)]
3208 fn block_splat(block: Self::Block) -> Self {
3209 block.combine(block)
3210 }
3211}
3212impl<S: Simd> crate::SimdInt<u8, S> for u8x32<S> {
3213 #[inline(always)]
3214 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3215 self.simd.simd_eq_u8x32(self, rhs.simd_into(self.simd))
3216 }
3217 #[inline(always)]
3218 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3219 self.simd.simd_lt_u8x32(self, rhs.simd_into(self.simd))
3220 }
3221 #[inline(always)]
3222 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3223 self.simd.simd_le_u8x32(self, rhs.simd_into(self.simd))
3224 }
3225 #[inline(always)]
3226 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3227 self.simd.simd_ge_u8x32(self, rhs.simd_into(self.simd))
3228 }
3229 #[inline(always)]
3230 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3231 self.simd.simd_gt_u8x32(self, rhs.simd_into(self.simd))
3232 }
3233 #[inline(always)]
3234 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3235 self.simd.zip_low_u8x32(self, rhs.simd_into(self.simd))
3236 }
3237 #[inline(always)]
3238 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3239 self.simd.zip_high_u8x32(self, rhs.simd_into(self.simd))
3240 }
3241 #[inline(always)]
3242 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3243 self.simd.unzip_low_u8x32(self, rhs.simd_into(self.simd))
3244 }
3245 #[inline(always)]
3246 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3247 self.simd.unzip_high_u8x32(self, rhs.simd_into(self.simd))
3248 }
3249 #[inline(always)]
3250 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3251 self.simd.min_u8x32(self, rhs.simd_into(self.simd))
3252 }
3253 #[inline(always)]
3254 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3255 self.simd.max_u8x32(self, rhs.simd_into(self.simd))
3256 }
3257}
3258#[derive(Clone, Copy, Debug)]
3259#[repr(C, align(32))]
3260pub struct mask8x32<S: Simd> {
3261 pub val: [i8; 32],
3262 pub simd: S,
3263}
3264impl<S: Simd> SimdFrom<[i8; 32], S> for mask8x32<S> {
3265 #[inline(always)]
3266 fn simd_from(val: [i8; 32], simd: S) -> Self {
3267 Self {
3268 val: [
3269 val[0usize],
3270 val[1usize],
3271 val[2usize],
3272 val[3usize],
3273 val[4usize],
3274 val[5usize],
3275 val[6usize],
3276 val[7usize],
3277 val[8usize],
3278 val[9usize],
3279 val[10usize],
3280 val[11usize],
3281 val[12usize],
3282 val[13usize],
3283 val[14usize],
3284 val[15usize],
3285 val[16usize],
3286 val[17usize],
3287 val[18usize],
3288 val[19usize],
3289 val[20usize],
3290 val[21usize],
3291 val[22usize],
3292 val[23usize],
3293 val[24usize],
3294 val[25usize],
3295 val[26usize],
3296 val[27usize],
3297 val[28usize],
3298 val[29usize],
3299 val[30usize],
3300 val[31usize],
3301 ],
3302 simd,
3303 }
3304 }
3305}
3306impl<S: Simd> From<mask8x32<S>> for [i8; 32] {
3307 #[inline(always)]
3308 fn from(value: mask8x32<S>) -> Self {
3309 value.val
3310 }
3311}
3312impl<S: Simd> core::ops::Deref for mask8x32<S> {
3313 type Target = [i8; 32];
3314 #[inline(always)]
3315 fn deref(&self) -> &Self::Target {
3316 &self.val
3317 }
3318}
3319impl<S: Simd> core::ops::DerefMut for mask8x32<S> {
3320 #[inline(always)]
3321 fn deref_mut(&mut self) -> &mut Self::Target {
3322 &mut self.val
3323 }
3324}
3325impl<S: Simd> SimdFrom<i8, S> for mask8x32<S> {
3326 #[inline(always)]
3327 fn simd_from(value: i8, simd: S) -> Self {
3328 simd.splat_mask8x32(value)
3329 }
3330}
3331impl<S: Simd> Select<mask8x32<S>> for mask8x32<S> {
3332 #[inline(always)]
3333 fn select(self, if_true: mask8x32<S>, if_false: mask8x32<S>) -> mask8x32<S> {
3334 self.simd.select_mask8x32(self, if_true, if_false)
3335 }
3336}
3337impl<S: Simd> Bytes for mask8x32<S> {
3338 type Bytes = u8x32<S>;
3339 #[inline(always)]
3340 fn to_bytes(self) -> Self::Bytes {
3341 unsafe {
3342 u8x32 {
3343 val: core::mem::transmute(self.val),
3344 simd: self.simd,
3345 }
3346 }
3347 }
3348 #[inline(always)]
3349 fn from_bytes(value: Self::Bytes) -> Self {
3350 unsafe {
3351 Self {
3352 val: core::mem::transmute(value.val),
3353 simd: value.simd,
3354 }
3355 }
3356 }
3357}
3358impl<S: Simd> mask8x32<S> {
3359 #[inline(always)]
3360 pub fn not(self) -> mask8x32<S> {
3361 self.simd.not_mask8x32(self)
3362 }
3363 #[inline(always)]
3364 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3365 self.simd.and_mask8x32(self, rhs.simd_into(self.simd))
3366 }
3367 #[inline(always)]
3368 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3369 self.simd.or_mask8x32(self, rhs.simd_into(self.simd))
3370 }
3371 #[inline(always)]
3372 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3373 self.simd.xor_mask8x32(self, rhs.simd_into(self.simd))
3374 }
3375 #[inline(always)]
3376 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3377 self.simd.simd_eq_mask8x32(self, rhs.simd_into(self.simd))
3378 }
3379 #[inline(always)]
3380 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
3381 self.simd.combine_mask8x32(self, rhs.simd_into(self.simd))
3382 }
3383}
3384impl<S: Simd> crate::SimdBase<i8, S> for mask8x32<S> {
3385 const N: usize = 32;
3386 type Mask = mask8x32<S>;
3387 type Block = mask8x16<S>;
3388 #[inline(always)]
3389 fn as_slice(&self) -> &[i8] {
3390 &self.val
3391 }
3392 #[inline(always)]
3393 fn as_mut_slice(&mut self) -> &mut [i8] {
3394 &mut self.val
3395 }
3396 #[inline(always)]
3397 fn from_slice(simd: S, slice: &[i8]) -> Self {
3398 let mut val = [0; 32];
3399 val.copy_from_slice(slice);
3400 Self { val, simd }
3401 }
3402 #[inline(always)]
3403 fn splat(simd: S, val: i8) -> Self {
3404 simd.splat_mask8x32(val)
3405 }
3406 #[inline(always)]
3407 fn block_splat(block: Self::Block) -> Self {
3408 block.combine(block)
3409 }
3410}
3411impl<S: Simd> crate::SimdMask<i8, S> for mask8x32<S> {
3412 #[inline(always)]
3413 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3414 self.simd.simd_eq_mask8x32(self, rhs.simd_into(self.simd))
3415 }
3416}
3417#[derive(Clone, Copy, Debug)]
3418#[repr(C, align(32))]
3419pub struct i16x16<S: Simd> {
3420 pub val: [i16; 16],
3421 pub simd: S,
3422}
3423impl<S: Simd> SimdFrom<[i16; 16], S> for i16x16<S> {
3424 #[inline(always)]
3425 fn simd_from(val: [i16; 16], simd: S) -> Self {
3426 Self {
3427 val: [
3428 val[0usize],
3429 val[1usize],
3430 val[2usize],
3431 val[3usize],
3432 val[4usize],
3433 val[5usize],
3434 val[6usize],
3435 val[7usize],
3436 val[8usize],
3437 val[9usize],
3438 val[10usize],
3439 val[11usize],
3440 val[12usize],
3441 val[13usize],
3442 val[14usize],
3443 val[15usize],
3444 ],
3445 simd,
3446 }
3447 }
3448}
3449impl<S: Simd> From<i16x16<S>> for [i16; 16] {
3450 #[inline(always)]
3451 fn from(value: i16x16<S>) -> Self {
3452 value.val
3453 }
3454}
3455impl<S: Simd> core::ops::Deref for i16x16<S> {
3456 type Target = [i16; 16];
3457 #[inline(always)]
3458 fn deref(&self) -> &Self::Target {
3459 &self.val
3460 }
3461}
3462impl<S: Simd> core::ops::DerefMut for i16x16<S> {
3463 #[inline(always)]
3464 fn deref_mut(&mut self) -> &mut Self::Target {
3465 &mut self.val
3466 }
3467}
3468impl<S: Simd> SimdFrom<i16, S> for i16x16<S> {
3469 #[inline(always)]
3470 fn simd_from(value: i16, simd: S) -> Self {
3471 simd.splat_i16x16(value)
3472 }
3473}
3474impl<S: Simd> Select<i16x16<S>> for mask16x16<S> {
3475 #[inline(always)]
3476 fn select(self, if_true: i16x16<S>, if_false: i16x16<S>) -> i16x16<S> {
3477 self.simd.select_i16x16(self, if_true, if_false)
3478 }
3479}
3480impl<S: Simd> Bytes for i16x16<S> {
3481 type Bytes = u8x32<S>;
3482 #[inline(always)]
3483 fn to_bytes(self) -> Self::Bytes {
3484 unsafe {
3485 u8x32 {
3486 val: core::mem::transmute(self.val),
3487 simd: self.simd,
3488 }
3489 }
3490 }
3491 #[inline(always)]
3492 fn from_bytes(value: Self::Bytes) -> Self {
3493 unsafe {
3494 Self {
3495 val: core::mem::transmute(value.val),
3496 simd: value.simd,
3497 }
3498 }
3499 }
3500}
3501impl<S: Simd> i16x16<S> {
3502 #[inline(always)]
3503 pub fn not(self) -> i16x16<S> {
3504 self.simd.not_i16x16(self)
3505 }
3506 #[inline(always)]
3507 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3508 self.simd.add_i16x16(self, rhs.simd_into(self.simd))
3509 }
3510 #[inline(always)]
3511 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3512 self.simd.sub_i16x16(self, rhs.simd_into(self.simd))
3513 }
3514 #[inline(always)]
3515 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3516 self.simd.mul_i16x16(self, rhs.simd_into(self.simd))
3517 }
3518 #[inline(always)]
3519 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3520 self.simd.and_i16x16(self, rhs.simd_into(self.simd))
3521 }
3522 #[inline(always)]
3523 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3524 self.simd.or_i16x16(self, rhs.simd_into(self.simd))
3525 }
3526 #[inline(always)]
3527 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3528 self.simd.xor_i16x16(self, rhs.simd_into(self.simd))
3529 }
3530 #[inline(always)]
3531 pub fn shr(self, shift: u32) -> i16x16<S> {
3532 self.simd.shr_i16x16(self, shift)
3533 }
3534 #[inline(always)]
3535 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3536 self.simd.simd_eq_i16x16(self, rhs.simd_into(self.simd))
3537 }
3538 #[inline(always)]
3539 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3540 self.simd.simd_lt_i16x16(self, rhs.simd_into(self.simd))
3541 }
3542 #[inline(always)]
3543 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3544 self.simd.simd_le_i16x16(self, rhs.simd_into(self.simd))
3545 }
3546 #[inline(always)]
3547 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3548 self.simd.simd_ge_i16x16(self, rhs.simd_into(self.simd))
3549 }
3550 #[inline(always)]
3551 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3552 self.simd.simd_gt_i16x16(self, rhs.simd_into(self.simd))
3553 }
3554 #[inline(always)]
3555 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3556 self.simd.min_i16x16(self, rhs.simd_into(self.simd))
3557 }
3558 #[inline(always)]
3559 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3560 self.simd.max_i16x16(self, rhs.simd_into(self.simd))
3561 }
3562 #[inline(always)]
3563 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
3564 self.simd.combine_i16x16(self, rhs.simd_into(self.simd))
3565 }
3566 #[inline(always)]
3567 pub fn reinterpret_u8(self) -> u8x32<S> {
3568 self.simd.reinterpret_u8_i16x16(self)
3569 }
3570 #[inline(always)]
3571 pub fn reinterpret_u32(self) -> u32x8<S> {
3572 self.simd.reinterpret_u32_i16x16(self)
3573 }
3574}
3575impl<S: Simd> crate::SimdBase<i16, S> for i16x16<S> {
3576 const N: usize = 16;
3577 type Mask = mask16x16<S>;
3578 type Block = i16x8<S>;
3579 #[inline(always)]
3580 fn as_slice(&self) -> &[i16] {
3581 &self.val
3582 }
3583 #[inline(always)]
3584 fn as_mut_slice(&mut self) -> &mut [i16] {
3585 &mut self.val
3586 }
3587 #[inline(always)]
3588 fn from_slice(simd: S, slice: &[i16]) -> Self {
3589 let mut val = [0; 16];
3590 val.copy_from_slice(slice);
3591 Self { val, simd }
3592 }
3593 #[inline(always)]
3594 fn splat(simd: S, val: i16) -> Self {
3595 simd.splat_i16x16(val)
3596 }
3597 #[inline(always)]
3598 fn block_splat(block: Self::Block) -> Self {
3599 block.combine(block)
3600 }
3601}
3602impl<S: Simd> crate::SimdInt<i16, S> for i16x16<S> {
3603 #[inline(always)]
3604 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3605 self.simd.simd_eq_i16x16(self, rhs.simd_into(self.simd))
3606 }
3607 #[inline(always)]
3608 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3609 self.simd.simd_lt_i16x16(self, rhs.simd_into(self.simd))
3610 }
3611 #[inline(always)]
3612 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3613 self.simd.simd_le_i16x16(self, rhs.simd_into(self.simd))
3614 }
3615 #[inline(always)]
3616 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3617 self.simd.simd_ge_i16x16(self, rhs.simd_into(self.simd))
3618 }
3619 #[inline(always)]
3620 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3621 self.simd.simd_gt_i16x16(self, rhs.simd_into(self.simd))
3622 }
3623 #[inline(always)]
3624 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3625 self.simd.zip_low_i16x16(self, rhs.simd_into(self.simd))
3626 }
3627 #[inline(always)]
3628 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3629 self.simd.zip_high_i16x16(self, rhs.simd_into(self.simd))
3630 }
3631 #[inline(always)]
3632 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3633 self.simd.unzip_low_i16x16(self, rhs.simd_into(self.simd))
3634 }
3635 #[inline(always)]
3636 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3637 self.simd.unzip_high_i16x16(self, rhs.simd_into(self.simd))
3638 }
3639 #[inline(always)]
3640 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3641 self.simd.min_i16x16(self, rhs.simd_into(self.simd))
3642 }
3643 #[inline(always)]
3644 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3645 self.simd.max_i16x16(self, rhs.simd_into(self.simd))
3646 }
3647}
3648#[derive(Clone, Copy, Debug)]
3649#[repr(C, align(32))]
3650pub struct u16x16<S: Simd> {
3651 pub val: [u16; 16],
3652 pub simd: S,
3653}
3654impl<S: Simd> SimdFrom<[u16; 16], S> for u16x16<S> {
3655 #[inline(always)]
3656 fn simd_from(val: [u16; 16], simd: S) -> Self {
3657 Self {
3658 val: [
3659 val[0usize],
3660 val[1usize],
3661 val[2usize],
3662 val[3usize],
3663 val[4usize],
3664 val[5usize],
3665 val[6usize],
3666 val[7usize],
3667 val[8usize],
3668 val[9usize],
3669 val[10usize],
3670 val[11usize],
3671 val[12usize],
3672 val[13usize],
3673 val[14usize],
3674 val[15usize],
3675 ],
3676 simd,
3677 }
3678 }
3679}
3680impl<S: Simd> From<u16x16<S>> for [u16; 16] {
3681 #[inline(always)]
3682 fn from(value: u16x16<S>) -> Self {
3683 value.val
3684 }
3685}
3686impl<S: Simd> core::ops::Deref for u16x16<S> {
3687 type Target = [u16; 16];
3688 #[inline(always)]
3689 fn deref(&self) -> &Self::Target {
3690 &self.val
3691 }
3692}
3693impl<S: Simd> core::ops::DerefMut for u16x16<S> {
3694 #[inline(always)]
3695 fn deref_mut(&mut self) -> &mut Self::Target {
3696 &mut self.val
3697 }
3698}
3699impl<S: Simd> SimdFrom<u16, S> for u16x16<S> {
3700 #[inline(always)]
3701 fn simd_from(value: u16, simd: S) -> Self {
3702 simd.splat_u16x16(value)
3703 }
3704}
3705impl<S: Simd> Select<u16x16<S>> for mask16x16<S> {
3706 #[inline(always)]
3707 fn select(self, if_true: u16x16<S>, if_false: u16x16<S>) -> u16x16<S> {
3708 self.simd.select_u16x16(self, if_true, if_false)
3709 }
3710}
3711impl<S: Simd> Bytes for u16x16<S> {
3712 type Bytes = u8x32<S>;
3713 #[inline(always)]
3714 fn to_bytes(self) -> Self::Bytes {
3715 unsafe {
3716 u8x32 {
3717 val: core::mem::transmute(self.val),
3718 simd: self.simd,
3719 }
3720 }
3721 }
3722 #[inline(always)]
3723 fn from_bytes(value: Self::Bytes) -> Self {
3724 unsafe {
3725 Self {
3726 val: core::mem::transmute(value.val),
3727 simd: value.simd,
3728 }
3729 }
3730 }
3731}
3732impl<S: Simd> u16x16<S> {
3733 #[inline(always)]
3734 pub fn not(self) -> u16x16<S> {
3735 self.simd.not_u16x16(self)
3736 }
3737 #[inline(always)]
3738 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3739 self.simd.add_u16x16(self, rhs.simd_into(self.simd))
3740 }
3741 #[inline(always)]
3742 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3743 self.simd.sub_u16x16(self, rhs.simd_into(self.simd))
3744 }
3745 #[inline(always)]
3746 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3747 self.simd.mul_u16x16(self, rhs.simd_into(self.simd))
3748 }
3749 #[inline(always)]
3750 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3751 self.simd.and_u16x16(self, rhs.simd_into(self.simd))
3752 }
3753 #[inline(always)]
3754 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3755 self.simd.or_u16x16(self, rhs.simd_into(self.simd))
3756 }
3757 #[inline(always)]
3758 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3759 self.simd.xor_u16x16(self, rhs.simd_into(self.simd))
3760 }
3761 #[inline(always)]
3762 pub fn shr(self, shift: u32) -> u16x16<S> {
3763 self.simd.shr_u16x16(self, shift)
3764 }
3765 #[inline(always)]
3766 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3767 self.simd.simd_eq_u16x16(self, rhs.simd_into(self.simd))
3768 }
3769 #[inline(always)]
3770 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3771 self.simd.simd_lt_u16x16(self, rhs.simd_into(self.simd))
3772 }
3773 #[inline(always)]
3774 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3775 self.simd.simd_le_u16x16(self, rhs.simd_into(self.simd))
3776 }
3777 #[inline(always)]
3778 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3779 self.simd.simd_ge_u16x16(self, rhs.simd_into(self.simd))
3780 }
3781 #[inline(always)]
3782 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3783 self.simd.simd_gt_u16x16(self, rhs.simd_into(self.simd))
3784 }
3785 #[inline(always)]
3786 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3787 self.simd.min_u16x16(self, rhs.simd_into(self.simd))
3788 }
3789 #[inline(always)]
3790 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3791 self.simd.max_u16x16(self, rhs.simd_into(self.simd))
3792 }
3793 #[inline(always)]
3794 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
3795 self.simd.combine_u16x16(self, rhs.simd_into(self.simd))
3796 }
3797 #[inline(always)]
3798 pub fn reinterpret_u8(self) -> u8x32<S> {
3799 self.simd.reinterpret_u8_u16x16(self)
3800 }
3801 #[inline(always)]
3802 pub fn reinterpret_u32(self) -> u32x8<S> {
3803 self.simd.reinterpret_u32_u16x16(self)
3804 }
3805}
3806impl<S: Simd> crate::SimdBase<u16, S> for u16x16<S> {
3807 const N: usize = 16;
3808 type Mask = mask16x16<S>;
3809 type Block = u16x8<S>;
3810 #[inline(always)]
3811 fn as_slice(&self) -> &[u16] {
3812 &self.val
3813 }
3814 #[inline(always)]
3815 fn as_mut_slice(&mut self) -> &mut [u16] {
3816 &mut self.val
3817 }
3818 #[inline(always)]
3819 fn from_slice(simd: S, slice: &[u16]) -> Self {
3820 let mut val = [0; 16];
3821 val.copy_from_slice(slice);
3822 Self { val, simd }
3823 }
3824 #[inline(always)]
3825 fn splat(simd: S, val: u16) -> Self {
3826 simd.splat_u16x16(val)
3827 }
3828 #[inline(always)]
3829 fn block_splat(block: Self::Block) -> Self {
3830 block.combine(block)
3831 }
3832}
3833impl<S: Simd> crate::SimdInt<u16, S> for u16x16<S> {
3834 #[inline(always)]
3835 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3836 self.simd.simd_eq_u16x16(self, rhs.simd_into(self.simd))
3837 }
3838 #[inline(always)]
3839 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3840 self.simd.simd_lt_u16x16(self, rhs.simd_into(self.simd))
3841 }
3842 #[inline(always)]
3843 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3844 self.simd.simd_le_u16x16(self, rhs.simd_into(self.simd))
3845 }
3846 #[inline(always)]
3847 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3848 self.simd.simd_ge_u16x16(self, rhs.simd_into(self.simd))
3849 }
3850 #[inline(always)]
3851 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3852 self.simd.simd_gt_u16x16(self, rhs.simd_into(self.simd))
3853 }
3854 #[inline(always)]
3855 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3856 self.simd.zip_low_u16x16(self, rhs.simd_into(self.simd))
3857 }
3858 #[inline(always)]
3859 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3860 self.simd.zip_high_u16x16(self, rhs.simd_into(self.simd))
3861 }
3862 #[inline(always)]
3863 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3864 self.simd.unzip_low_u16x16(self, rhs.simd_into(self.simd))
3865 }
3866 #[inline(always)]
3867 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3868 self.simd.unzip_high_u16x16(self, rhs.simd_into(self.simd))
3869 }
3870 #[inline(always)]
3871 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3872 self.simd.min_u16x16(self, rhs.simd_into(self.simd))
3873 }
3874 #[inline(always)]
3875 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3876 self.simd.max_u16x16(self, rhs.simd_into(self.simd))
3877 }
3878}
3879#[derive(Clone, Copy, Debug)]
3880#[repr(C, align(32))]
3881pub struct mask16x16<S: Simd> {
3882 pub val: [i16; 16],
3883 pub simd: S,
3884}
3885impl<S: Simd> SimdFrom<[i16; 16], S> for mask16x16<S> {
3886 #[inline(always)]
3887 fn simd_from(val: [i16; 16], simd: S) -> Self {
3888 Self {
3889 val: [
3890 val[0usize],
3891 val[1usize],
3892 val[2usize],
3893 val[3usize],
3894 val[4usize],
3895 val[5usize],
3896 val[6usize],
3897 val[7usize],
3898 val[8usize],
3899 val[9usize],
3900 val[10usize],
3901 val[11usize],
3902 val[12usize],
3903 val[13usize],
3904 val[14usize],
3905 val[15usize],
3906 ],
3907 simd,
3908 }
3909 }
3910}
3911impl<S: Simd> From<mask16x16<S>> for [i16; 16] {
3912 #[inline(always)]
3913 fn from(value: mask16x16<S>) -> Self {
3914 value.val
3915 }
3916}
3917impl<S: Simd> core::ops::Deref for mask16x16<S> {
3918 type Target = [i16; 16];
3919 #[inline(always)]
3920 fn deref(&self) -> &Self::Target {
3921 &self.val
3922 }
3923}
3924impl<S: Simd> core::ops::DerefMut for mask16x16<S> {
3925 #[inline(always)]
3926 fn deref_mut(&mut self) -> &mut Self::Target {
3927 &mut self.val
3928 }
3929}
3930impl<S: Simd> SimdFrom<i16, S> for mask16x16<S> {
3931 #[inline(always)]
3932 fn simd_from(value: i16, simd: S) -> Self {
3933 simd.splat_mask16x16(value)
3934 }
3935}
3936impl<S: Simd> Select<mask16x16<S>> for mask16x16<S> {
3937 #[inline(always)]
3938 fn select(self, if_true: mask16x16<S>, if_false: mask16x16<S>) -> mask16x16<S> {
3939 self.simd.select_mask16x16(self, if_true, if_false)
3940 }
3941}
3942impl<S: Simd> Bytes for mask16x16<S> {
3943 type Bytes = u8x32<S>;
3944 #[inline(always)]
3945 fn to_bytes(self) -> Self::Bytes {
3946 unsafe {
3947 u8x32 {
3948 val: core::mem::transmute(self.val),
3949 simd: self.simd,
3950 }
3951 }
3952 }
3953 #[inline(always)]
3954 fn from_bytes(value: Self::Bytes) -> Self {
3955 unsafe {
3956 Self {
3957 val: core::mem::transmute(value.val),
3958 simd: value.simd,
3959 }
3960 }
3961 }
3962}
3963impl<S: Simd> mask16x16<S> {
3964 #[inline(always)]
3965 pub fn not(self) -> mask16x16<S> {
3966 self.simd.not_mask16x16(self)
3967 }
3968 #[inline(always)]
3969 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3970 self.simd.and_mask16x16(self, rhs.simd_into(self.simd))
3971 }
3972 #[inline(always)]
3973 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3974 self.simd.or_mask16x16(self, rhs.simd_into(self.simd))
3975 }
3976 #[inline(always)]
3977 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3978 self.simd.xor_mask16x16(self, rhs.simd_into(self.simd))
3979 }
3980 #[inline(always)]
3981 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3982 self.simd.simd_eq_mask16x16(self, rhs.simd_into(self.simd))
3983 }
3984 #[inline(always)]
3985 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
3986 self.simd.combine_mask16x16(self, rhs.simd_into(self.simd))
3987 }
3988}
3989impl<S: Simd> crate::SimdBase<i16, S> for mask16x16<S> {
3990 const N: usize = 16;
3991 type Mask = mask16x16<S>;
3992 type Block = mask16x8<S>;
3993 #[inline(always)]
3994 fn as_slice(&self) -> &[i16] {
3995 &self.val
3996 }
3997 #[inline(always)]
3998 fn as_mut_slice(&mut self) -> &mut [i16] {
3999 &mut self.val
4000 }
4001 #[inline(always)]
4002 fn from_slice(simd: S, slice: &[i16]) -> Self {
4003 let mut val = [0; 16];
4004 val.copy_from_slice(slice);
4005 Self { val, simd }
4006 }
4007 #[inline(always)]
4008 fn splat(simd: S, val: i16) -> Self {
4009 simd.splat_mask16x16(val)
4010 }
4011 #[inline(always)]
4012 fn block_splat(block: Self::Block) -> Self {
4013 block.combine(block)
4014 }
4015}
4016impl<S: Simd> crate::SimdMask<i16, S> for mask16x16<S> {
4017 #[inline(always)]
4018 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4019 self.simd.simd_eq_mask16x16(self, rhs.simd_into(self.simd))
4020 }
4021}
4022#[derive(Clone, Copy, Debug)]
4023#[repr(C, align(32))]
4024pub struct i32x8<S: Simd> {
4025 pub val: [i32; 8],
4026 pub simd: S,
4027}
4028impl<S: Simd> SimdFrom<[i32; 8], S> for i32x8<S> {
4029 #[inline(always)]
4030 fn simd_from(val: [i32; 8], simd: S) -> Self {
4031 Self {
4032 val: [
4033 val[0usize],
4034 val[1usize],
4035 val[2usize],
4036 val[3usize],
4037 val[4usize],
4038 val[5usize],
4039 val[6usize],
4040 val[7usize],
4041 ],
4042 simd,
4043 }
4044 }
4045}
4046impl<S: Simd> From<i32x8<S>> for [i32; 8] {
4047 #[inline(always)]
4048 fn from(value: i32x8<S>) -> Self {
4049 value.val
4050 }
4051}
4052impl<S: Simd> core::ops::Deref for i32x8<S> {
4053 type Target = [i32; 8];
4054 #[inline(always)]
4055 fn deref(&self) -> &Self::Target {
4056 &self.val
4057 }
4058}
4059impl<S: Simd> core::ops::DerefMut for i32x8<S> {
4060 #[inline(always)]
4061 fn deref_mut(&mut self) -> &mut Self::Target {
4062 &mut self.val
4063 }
4064}
4065impl<S: Simd> SimdFrom<i32, S> for i32x8<S> {
4066 #[inline(always)]
4067 fn simd_from(value: i32, simd: S) -> Self {
4068 simd.splat_i32x8(value)
4069 }
4070}
4071impl<S: Simd> Select<i32x8<S>> for mask32x8<S> {
4072 #[inline(always)]
4073 fn select(self, if_true: i32x8<S>, if_false: i32x8<S>) -> i32x8<S> {
4074 self.simd.select_i32x8(self, if_true, if_false)
4075 }
4076}
4077impl<S: Simd> Bytes for i32x8<S> {
4078 type Bytes = u8x32<S>;
4079 #[inline(always)]
4080 fn to_bytes(self) -> Self::Bytes {
4081 unsafe {
4082 u8x32 {
4083 val: core::mem::transmute(self.val),
4084 simd: self.simd,
4085 }
4086 }
4087 }
4088 #[inline(always)]
4089 fn from_bytes(value: Self::Bytes) -> Self {
4090 unsafe {
4091 Self {
4092 val: core::mem::transmute(value.val),
4093 simd: value.simd,
4094 }
4095 }
4096 }
4097}
4098impl<S: Simd> i32x8<S> {
4099 #[inline(always)]
4100 pub fn not(self) -> i32x8<S> {
4101 self.simd.not_i32x8(self)
4102 }
4103 #[inline(always)]
4104 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4105 self.simd.add_i32x8(self, rhs.simd_into(self.simd))
4106 }
4107 #[inline(always)]
4108 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4109 self.simd.sub_i32x8(self, rhs.simd_into(self.simd))
4110 }
4111 #[inline(always)]
4112 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4113 self.simd.mul_i32x8(self, rhs.simd_into(self.simd))
4114 }
4115 #[inline(always)]
4116 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4117 self.simd.and_i32x8(self, rhs.simd_into(self.simd))
4118 }
4119 #[inline(always)]
4120 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4121 self.simd.or_i32x8(self, rhs.simd_into(self.simd))
4122 }
4123 #[inline(always)]
4124 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4125 self.simd.xor_i32x8(self, rhs.simd_into(self.simd))
4126 }
4127 #[inline(always)]
4128 pub fn shr(self, shift: u32) -> i32x8<S> {
4129 self.simd.shr_i32x8(self, shift)
4130 }
4131 #[inline(always)]
4132 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4133 self.simd.simd_eq_i32x8(self, rhs.simd_into(self.simd))
4134 }
4135 #[inline(always)]
4136 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4137 self.simd.simd_lt_i32x8(self, rhs.simd_into(self.simd))
4138 }
4139 #[inline(always)]
4140 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4141 self.simd.simd_le_i32x8(self, rhs.simd_into(self.simd))
4142 }
4143 #[inline(always)]
4144 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4145 self.simd.simd_ge_i32x8(self, rhs.simd_into(self.simd))
4146 }
4147 #[inline(always)]
4148 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4149 self.simd.simd_gt_i32x8(self, rhs.simd_into(self.simd))
4150 }
4151 #[inline(always)]
4152 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4153 self.simd.min_i32x8(self, rhs.simd_into(self.simd))
4154 }
4155 #[inline(always)]
4156 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4157 self.simd.max_i32x8(self, rhs.simd_into(self.simd))
4158 }
4159 #[inline(always)]
4160 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
4161 self.simd.combine_i32x8(self, rhs.simd_into(self.simd))
4162 }
4163 #[inline(always)]
4164 pub fn reinterpret_u8(self) -> u8x32<S> {
4165 self.simd.reinterpret_u8_i32x8(self)
4166 }
4167 #[inline(always)]
4168 pub fn reinterpret_u32(self) -> u32x8<S> {
4169 self.simd.reinterpret_u32_i32x8(self)
4170 }
4171 #[inline(always)]
4172 pub fn cvt_f32(self) -> f32x8<S> {
4173 self.simd.cvt_f32_i32x8(self)
4174 }
4175}
4176impl<S: Simd> crate::SimdBase<i32, S> for i32x8<S> {
4177 const N: usize = 8;
4178 type Mask = mask32x8<S>;
4179 type Block = i32x4<S>;
4180 #[inline(always)]
4181 fn as_slice(&self) -> &[i32] {
4182 &self.val
4183 }
4184 #[inline(always)]
4185 fn as_mut_slice(&mut self) -> &mut [i32] {
4186 &mut self.val
4187 }
4188 #[inline(always)]
4189 fn from_slice(simd: S, slice: &[i32]) -> Self {
4190 let mut val = [0; 8];
4191 val.copy_from_slice(slice);
4192 Self { val, simd }
4193 }
4194 #[inline(always)]
4195 fn splat(simd: S, val: i32) -> Self {
4196 simd.splat_i32x8(val)
4197 }
4198 #[inline(always)]
4199 fn block_splat(block: Self::Block) -> Self {
4200 block.combine(block)
4201 }
4202}
4203impl<S: Simd> crate::SimdInt<i32, S> for i32x8<S> {
4204 #[inline(always)]
4205 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4206 self.simd.simd_eq_i32x8(self, rhs.simd_into(self.simd))
4207 }
4208 #[inline(always)]
4209 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4210 self.simd.simd_lt_i32x8(self, rhs.simd_into(self.simd))
4211 }
4212 #[inline(always)]
4213 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4214 self.simd.simd_le_i32x8(self, rhs.simd_into(self.simd))
4215 }
4216 #[inline(always)]
4217 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4218 self.simd.simd_ge_i32x8(self, rhs.simd_into(self.simd))
4219 }
4220 #[inline(always)]
4221 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4222 self.simd.simd_gt_i32x8(self, rhs.simd_into(self.simd))
4223 }
4224 #[inline(always)]
4225 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4226 self.simd.zip_low_i32x8(self, rhs.simd_into(self.simd))
4227 }
4228 #[inline(always)]
4229 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4230 self.simd.zip_high_i32x8(self, rhs.simd_into(self.simd))
4231 }
4232 #[inline(always)]
4233 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4234 self.simd.unzip_low_i32x8(self, rhs.simd_into(self.simd))
4235 }
4236 #[inline(always)]
4237 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4238 self.simd.unzip_high_i32x8(self, rhs.simd_into(self.simd))
4239 }
4240 #[inline(always)]
4241 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4242 self.simd.min_i32x8(self, rhs.simd_into(self.simd))
4243 }
4244 #[inline(always)]
4245 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4246 self.simd.max_i32x8(self, rhs.simd_into(self.simd))
4247 }
4248}
4249impl<S: Simd> SimdCvtTruncate<f32x8<S>> for i32x8<S> {
4250 fn truncate_from(x: f32x8<S>) -> Self {
4251 x.simd.cvt_i32_f32x8(x)
4252 }
4253}
4254#[derive(Clone, Copy, Debug)]
4255#[repr(C, align(32))]
4256pub struct u32x8<S: Simd> {
4257 pub val: [u32; 8],
4258 pub simd: S,
4259}
4260impl<S: Simd> SimdFrom<[u32; 8], S> for u32x8<S> {
4261 #[inline(always)]
4262 fn simd_from(val: [u32; 8], simd: S) -> Self {
4263 Self {
4264 val: [
4265 val[0usize],
4266 val[1usize],
4267 val[2usize],
4268 val[3usize],
4269 val[4usize],
4270 val[5usize],
4271 val[6usize],
4272 val[7usize],
4273 ],
4274 simd,
4275 }
4276 }
4277}
4278impl<S: Simd> From<u32x8<S>> for [u32; 8] {
4279 #[inline(always)]
4280 fn from(value: u32x8<S>) -> Self {
4281 value.val
4282 }
4283}
4284impl<S: Simd> core::ops::Deref for u32x8<S> {
4285 type Target = [u32; 8];
4286 #[inline(always)]
4287 fn deref(&self) -> &Self::Target {
4288 &self.val
4289 }
4290}
4291impl<S: Simd> core::ops::DerefMut for u32x8<S> {
4292 #[inline(always)]
4293 fn deref_mut(&mut self) -> &mut Self::Target {
4294 &mut self.val
4295 }
4296}
4297impl<S: Simd> SimdFrom<u32, S> for u32x8<S> {
4298 #[inline(always)]
4299 fn simd_from(value: u32, simd: S) -> Self {
4300 simd.splat_u32x8(value)
4301 }
4302}
4303impl<S: Simd> Select<u32x8<S>> for mask32x8<S> {
4304 #[inline(always)]
4305 fn select(self, if_true: u32x8<S>, if_false: u32x8<S>) -> u32x8<S> {
4306 self.simd.select_u32x8(self, if_true, if_false)
4307 }
4308}
4309impl<S: Simd> Bytes for u32x8<S> {
4310 type Bytes = u8x32<S>;
4311 #[inline(always)]
4312 fn to_bytes(self) -> Self::Bytes {
4313 unsafe {
4314 u8x32 {
4315 val: core::mem::transmute(self.val),
4316 simd: self.simd,
4317 }
4318 }
4319 }
4320 #[inline(always)]
4321 fn from_bytes(value: Self::Bytes) -> Self {
4322 unsafe {
4323 Self {
4324 val: core::mem::transmute(value.val),
4325 simd: value.simd,
4326 }
4327 }
4328 }
4329}
4330impl<S: Simd> u32x8<S> {
4331 #[inline(always)]
4332 pub fn not(self) -> u32x8<S> {
4333 self.simd.not_u32x8(self)
4334 }
4335 #[inline(always)]
4336 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4337 self.simd.add_u32x8(self, rhs.simd_into(self.simd))
4338 }
4339 #[inline(always)]
4340 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4341 self.simd.sub_u32x8(self, rhs.simd_into(self.simd))
4342 }
4343 #[inline(always)]
4344 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4345 self.simd.mul_u32x8(self, rhs.simd_into(self.simd))
4346 }
4347 #[inline(always)]
4348 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4349 self.simd.and_u32x8(self, rhs.simd_into(self.simd))
4350 }
4351 #[inline(always)]
4352 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4353 self.simd.or_u32x8(self, rhs.simd_into(self.simd))
4354 }
4355 #[inline(always)]
4356 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4357 self.simd.xor_u32x8(self, rhs.simd_into(self.simd))
4358 }
4359 #[inline(always)]
4360 pub fn shr(self, shift: u32) -> u32x8<S> {
4361 self.simd.shr_u32x8(self, shift)
4362 }
4363 #[inline(always)]
4364 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4365 self.simd.simd_eq_u32x8(self, rhs.simd_into(self.simd))
4366 }
4367 #[inline(always)]
4368 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4369 self.simd.simd_lt_u32x8(self, rhs.simd_into(self.simd))
4370 }
4371 #[inline(always)]
4372 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4373 self.simd.simd_le_u32x8(self, rhs.simd_into(self.simd))
4374 }
4375 #[inline(always)]
4376 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4377 self.simd.simd_ge_u32x8(self, rhs.simd_into(self.simd))
4378 }
4379 #[inline(always)]
4380 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4381 self.simd.simd_gt_u32x8(self, rhs.simd_into(self.simd))
4382 }
4383 #[inline(always)]
4384 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4385 self.simd.min_u32x8(self, rhs.simd_into(self.simd))
4386 }
4387 #[inline(always)]
4388 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4389 self.simd.max_u32x8(self, rhs.simd_into(self.simd))
4390 }
4391 #[inline(always)]
4392 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
4393 self.simd.combine_u32x8(self, rhs.simd_into(self.simd))
4394 }
4395 #[inline(always)]
4396 pub fn reinterpret_u8(self) -> u8x32<S> {
4397 self.simd.reinterpret_u8_u32x8(self)
4398 }
4399 #[inline(always)]
4400 pub fn cvt_f32(self) -> f32x8<S> {
4401 self.simd.cvt_f32_u32x8(self)
4402 }
4403}
4404impl<S: Simd> crate::SimdBase<u32, S> for u32x8<S> {
4405 const N: usize = 8;
4406 type Mask = mask32x8<S>;
4407 type Block = u32x4<S>;
4408 #[inline(always)]
4409 fn as_slice(&self) -> &[u32] {
4410 &self.val
4411 }
4412 #[inline(always)]
4413 fn as_mut_slice(&mut self) -> &mut [u32] {
4414 &mut self.val
4415 }
4416 #[inline(always)]
4417 fn from_slice(simd: S, slice: &[u32]) -> Self {
4418 let mut val = [0; 8];
4419 val.copy_from_slice(slice);
4420 Self { val, simd }
4421 }
4422 #[inline(always)]
4423 fn splat(simd: S, val: u32) -> Self {
4424 simd.splat_u32x8(val)
4425 }
4426 #[inline(always)]
4427 fn block_splat(block: Self::Block) -> Self {
4428 block.combine(block)
4429 }
4430}
4431impl<S: Simd> crate::SimdInt<u32, S> for u32x8<S> {
4432 #[inline(always)]
4433 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4434 self.simd.simd_eq_u32x8(self, rhs.simd_into(self.simd))
4435 }
4436 #[inline(always)]
4437 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4438 self.simd.simd_lt_u32x8(self, rhs.simd_into(self.simd))
4439 }
4440 #[inline(always)]
4441 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4442 self.simd.simd_le_u32x8(self, rhs.simd_into(self.simd))
4443 }
4444 #[inline(always)]
4445 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4446 self.simd.simd_ge_u32x8(self, rhs.simd_into(self.simd))
4447 }
4448 #[inline(always)]
4449 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4450 self.simd.simd_gt_u32x8(self, rhs.simd_into(self.simd))
4451 }
4452 #[inline(always)]
4453 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4454 self.simd.zip_low_u32x8(self, rhs.simd_into(self.simd))
4455 }
4456 #[inline(always)]
4457 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4458 self.simd.zip_high_u32x8(self, rhs.simd_into(self.simd))
4459 }
4460 #[inline(always)]
4461 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4462 self.simd.unzip_low_u32x8(self, rhs.simd_into(self.simd))
4463 }
4464 #[inline(always)]
4465 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4466 self.simd.unzip_high_u32x8(self, rhs.simd_into(self.simd))
4467 }
4468 #[inline(always)]
4469 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4470 self.simd.min_u32x8(self, rhs.simd_into(self.simd))
4471 }
4472 #[inline(always)]
4473 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4474 self.simd.max_u32x8(self, rhs.simd_into(self.simd))
4475 }
4476}
4477impl<S: Simd> SimdCvtTruncate<f32x8<S>> for u32x8<S> {
4478 fn truncate_from(x: f32x8<S>) -> Self {
4479 x.simd.cvt_u32_f32x8(x)
4480 }
4481}
4482#[derive(Clone, Copy, Debug)]
4483#[repr(C, align(32))]
4484pub struct mask32x8<S: Simd> {
4485 pub val: [i32; 8],
4486 pub simd: S,
4487}
4488impl<S: Simd> SimdFrom<[i32; 8], S> for mask32x8<S> {
4489 #[inline(always)]
4490 fn simd_from(val: [i32; 8], simd: S) -> Self {
4491 Self {
4492 val: [
4493 val[0usize],
4494 val[1usize],
4495 val[2usize],
4496 val[3usize],
4497 val[4usize],
4498 val[5usize],
4499 val[6usize],
4500 val[7usize],
4501 ],
4502 simd,
4503 }
4504 }
4505}
4506impl<S: Simd> From<mask32x8<S>> for [i32; 8] {
4507 #[inline(always)]
4508 fn from(value: mask32x8<S>) -> Self {
4509 value.val
4510 }
4511}
4512impl<S: Simd> core::ops::Deref for mask32x8<S> {
4513 type Target = [i32; 8];
4514 #[inline(always)]
4515 fn deref(&self) -> &Self::Target {
4516 &self.val
4517 }
4518}
4519impl<S: Simd> core::ops::DerefMut for mask32x8<S> {
4520 #[inline(always)]
4521 fn deref_mut(&mut self) -> &mut Self::Target {
4522 &mut self.val
4523 }
4524}
4525impl<S: Simd> SimdFrom<i32, S> for mask32x8<S> {
4526 #[inline(always)]
4527 fn simd_from(value: i32, simd: S) -> Self {
4528 simd.splat_mask32x8(value)
4529 }
4530}
4531impl<S: Simd> Select<mask32x8<S>> for mask32x8<S> {
4532 #[inline(always)]
4533 fn select(self, if_true: mask32x8<S>, if_false: mask32x8<S>) -> mask32x8<S> {
4534 self.simd.select_mask32x8(self, if_true, if_false)
4535 }
4536}
4537impl<S: Simd> Bytes for mask32x8<S> {
4538 type Bytes = u8x32<S>;
4539 #[inline(always)]
4540 fn to_bytes(self) -> Self::Bytes {
4541 unsafe {
4542 u8x32 {
4543 val: core::mem::transmute(self.val),
4544 simd: self.simd,
4545 }
4546 }
4547 }
4548 #[inline(always)]
4549 fn from_bytes(value: Self::Bytes) -> Self {
4550 unsafe {
4551 Self {
4552 val: core::mem::transmute(value.val),
4553 simd: value.simd,
4554 }
4555 }
4556 }
4557}
4558impl<S: Simd> mask32x8<S> {
4559 #[inline(always)]
4560 pub fn not(self) -> mask32x8<S> {
4561 self.simd.not_mask32x8(self)
4562 }
4563 #[inline(always)]
4564 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4565 self.simd.and_mask32x8(self, rhs.simd_into(self.simd))
4566 }
4567 #[inline(always)]
4568 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4569 self.simd.or_mask32x8(self, rhs.simd_into(self.simd))
4570 }
4571 #[inline(always)]
4572 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4573 self.simd.xor_mask32x8(self, rhs.simd_into(self.simd))
4574 }
4575 #[inline(always)]
4576 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4577 self.simd.simd_eq_mask32x8(self, rhs.simd_into(self.simd))
4578 }
4579 #[inline(always)]
4580 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
4581 self.simd.combine_mask32x8(self, rhs.simd_into(self.simd))
4582 }
4583}
4584impl<S: Simd> crate::SimdBase<i32, S> for mask32x8<S> {
4585 const N: usize = 8;
4586 type Mask = mask32x8<S>;
4587 type Block = mask32x4<S>;
4588 #[inline(always)]
4589 fn as_slice(&self) -> &[i32] {
4590 &self.val
4591 }
4592 #[inline(always)]
4593 fn as_mut_slice(&mut self) -> &mut [i32] {
4594 &mut self.val
4595 }
4596 #[inline(always)]
4597 fn from_slice(simd: S, slice: &[i32]) -> Self {
4598 let mut val = [0; 8];
4599 val.copy_from_slice(slice);
4600 Self { val, simd }
4601 }
4602 #[inline(always)]
4603 fn splat(simd: S, val: i32) -> Self {
4604 simd.splat_mask32x8(val)
4605 }
4606 #[inline(always)]
4607 fn block_splat(block: Self::Block) -> Self {
4608 block.combine(block)
4609 }
4610}
4611impl<S: Simd> crate::SimdMask<i32, S> for mask32x8<S> {
4612 #[inline(always)]
4613 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4614 self.simd.simd_eq_mask32x8(self, rhs.simd_into(self.simd))
4615 }
4616}
4617#[derive(Clone, Copy, Debug)]
4618#[repr(C, align(32))]
4619pub struct f64x4<S: Simd> {
4620 pub val: [f64; 4],
4621 pub simd: S,
4622}
4623impl<S: Simd> SimdFrom<[f64; 4], S> for f64x4<S> {
4624 #[inline(always)]
4625 fn simd_from(val: [f64; 4], simd: S) -> Self {
4626 Self {
4627 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
4628 simd,
4629 }
4630 }
4631}
4632impl<S: Simd> From<f64x4<S>> for [f64; 4] {
4633 #[inline(always)]
4634 fn from(value: f64x4<S>) -> Self {
4635 value.val
4636 }
4637}
4638impl<S: Simd> core::ops::Deref for f64x4<S> {
4639 type Target = [f64; 4];
4640 #[inline(always)]
4641 fn deref(&self) -> &Self::Target {
4642 &self.val
4643 }
4644}
4645impl<S: Simd> core::ops::DerefMut for f64x4<S> {
4646 #[inline(always)]
4647 fn deref_mut(&mut self) -> &mut Self::Target {
4648 &mut self.val
4649 }
4650}
4651impl<S: Simd> SimdFrom<f64, S> for f64x4<S> {
4652 #[inline(always)]
4653 fn simd_from(value: f64, simd: S) -> Self {
4654 simd.splat_f64x4(value)
4655 }
4656}
4657impl<S: Simd> Select<f64x4<S>> for mask64x4<S> {
4658 #[inline(always)]
4659 fn select(self, if_true: f64x4<S>, if_false: f64x4<S>) -> f64x4<S> {
4660 self.simd.select_f64x4(self, if_true, if_false)
4661 }
4662}
4663impl<S: Simd> Bytes for f64x4<S> {
4664 type Bytes = u8x32<S>;
4665 #[inline(always)]
4666 fn to_bytes(self) -> Self::Bytes {
4667 unsafe {
4668 u8x32 {
4669 val: core::mem::transmute(self.val),
4670 simd: self.simd,
4671 }
4672 }
4673 }
4674 #[inline(always)]
4675 fn from_bytes(value: Self::Bytes) -> Self {
4676 unsafe {
4677 Self {
4678 val: core::mem::transmute(value.val),
4679 simd: value.simd,
4680 }
4681 }
4682 }
4683}
4684impl<S: Simd> f64x4<S> {
4685 #[inline(always)]
4686 pub fn abs(self) -> f64x4<S> {
4687 self.simd.abs_f64x4(self)
4688 }
4689 #[inline(always)]
4690 pub fn neg(self) -> f64x4<S> {
4691 self.simd.neg_f64x4(self)
4692 }
4693 #[inline(always)]
4694 pub fn sqrt(self) -> f64x4<S> {
4695 self.simd.sqrt_f64x4(self)
4696 }
4697 #[inline(always)]
4698 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4699 self.simd.add_f64x4(self, rhs.simd_into(self.simd))
4700 }
4701 #[inline(always)]
4702 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4703 self.simd.sub_f64x4(self, rhs.simd_into(self.simd))
4704 }
4705 #[inline(always)]
4706 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4707 self.simd.mul_f64x4(self, rhs.simd_into(self.simd))
4708 }
4709 #[inline(always)]
4710 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4711 self.simd.div_f64x4(self, rhs.simd_into(self.simd))
4712 }
4713 #[inline(always)]
4714 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4715 self.simd.copysign_f64x4(self, rhs.simd_into(self.simd))
4716 }
4717 #[inline(always)]
4718 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4719 self.simd.simd_eq_f64x4(self, rhs.simd_into(self.simd))
4720 }
4721 #[inline(always)]
4722 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4723 self.simd.simd_lt_f64x4(self, rhs.simd_into(self.simd))
4724 }
4725 #[inline(always)]
4726 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4727 self.simd.simd_le_f64x4(self, rhs.simd_into(self.simd))
4728 }
4729 #[inline(always)]
4730 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4731 self.simd.simd_ge_f64x4(self, rhs.simd_into(self.simd))
4732 }
4733 #[inline(always)]
4734 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4735 self.simd.simd_gt_f64x4(self, rhs.simd_into(self.simd))
4736 }
4737 #[inline(always)]
4738 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4739 self.simd.max_f64x4(self, rhs.simd_into(self.simd))
4740 }
4741 #[inline(always)]
4742 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4743 self.simd.max_precise_f64x4(self, rhs.simd_into(self.simd))
4744 }
4745 #[inline(always)]
4746 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4747 self.simd.min_f64x4(self, rhs.simd_into(self.simd))
4748 }
4749 #[inline(always)]
4750 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4751 self.simd.min_precise_f64x4(self, rhs.simd_into(self.simd))
4752 }
4753 #[inline(always)]
4754 pub fn floor(self) -> f64x4<S> {
4755 self.simd.floor_f64x4(self)
4756 }
4757 #[inline(always)]
4758 pub fn fract(self) -> f64x4<S> {
4759 self.simd.fract_f64x4(self)
4760 }
4761 #[inline(always)]
4762 pub fn trunc(self) -> f64x4<S> {
4763 self.simd.trunc_f64x4(self)
4764 }
4765 #[inline(always)]
4766 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
4767 self.simd.combine_f64x4(self, rhs.simd_into(self.simd))
4768 }
4769 #[inline(always)]
4770 pub fn reinterpret_f32(self) -> f32x8<S> {
4771 self.simd.reinterpret_f32_f64x4(self)
4772 }
4773}
4774impl<S: Simd> crate::SimdBase<f64, S> for f64x4<S> {
4775 const N: usize = 4;
4776 type Mask = mask64x4<S>;
4777 type Block = f64x2<S>;
4778 #[inline(always)]
4779 fn as_slice(&self) -> &[f64] {
4780 &self.val
4781 }
4782 #[inline(always)]
4783 fn as_mut_slice(&mut self) -> &mut [f64] {
4784 &mut self.val
4785 }
4786 #[inline(always)]
4787 fn from_slice(simd: S, slice: &[f64]) -> Self {
4788 let mut val = [0.0; 4];
4789 val.copy_from_slice(slice);
4790 Self { val, simd }
4791 }
4792 #[inline(always)]
4793 fn splat(simd: S, val: f64) -> Self {
4794 simd.splat_f64x4(val)
4795 }
4796 #[inline(always)]
4797 fn block_splat(block: Self::Block) -> Self {
4798 block.combine(block)
4799 }
4800}
4801impl<S: Simd> crate::SimdFloat<f64, S> for f64x4<S> {
4802 #[inline(always)]
4803 fn abs(self) -> f64x4<S> {
4804 self.simd.abs_f64x4(self)
4805 }
4806 #[inline(always)]
4807 fn sqrt(self) -> f64x4<S> {
4808 self.simd.sqrt_f64x4(self)
4809 }
4810 #[inline(always)]
4811 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4812 self.simd.copysign_f64x4(self, rhs.simd_into(self.simd))
4813 }
4814 #[inline(always)]
4815 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4816 self.simd.simd_eq_f64x4(self, rhs.simd_into(self.simd))
4817 }
4818 #[inline(always)]
4819 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4820 self.simd.simd_lt_f64x4(self, rhs.simd_into(self.simd))
4821 }
4822 #[inline(always)]
4823 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4824 self.simd.simd_le_f64x4(self, rhs.simd_into(self.simd))
4825 }
4826 #[inline(always)]
4827 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4828 self.simd.simd_ge_f64x4(self, rhs.simd_into(self.simd))
4829 }
4830 #[inline(always)]
4831 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4832 self.simd.simd_gt_f64x4(self, rhs.simd_into(self.simd))
4833 }
4834 #[inline(always)]
4835 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4836 self.simd.zip_low_f64x4(self, rhs.simd_into(self.simd))
4837 }
4838 #[inline(always)]
4839 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4840 self.simd.zip_high_f64x4(self, rhs.simd_into(self.simd))
4841 }
4842 #[inline(always)]
4843 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4844 self.simd.unzip_low_f64x4(self, rhs.simd_into(self.simd))
4845 }
4846 #[inline(always)]
4847 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4848 self.simd.unzip_high_f64x4(self, rhs.simd_into(self.simd))
4849 }
4850 #[inline(always)]
4851 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4852 self.simd.max_f64x4(self, rhs.simd_into(self.simd))
4853 }
4854 #[inline(always)]
4855 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4856 self.simd.max_precise_f64x4(self, rhs.simd_into(self.simd))
4857 }
4858 #[inline(always)]
4859 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4860 self.simd.min_f64x4(self, rhs.simd_into(self.simd))
4861 }
4862 #[inline(always)]
4863 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4864 self.simd.min_precise_f64x4(self, rhs.simd_into(self.simd))
4865 }
4866 #[inline(always)]
4867 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x4<S> {
4868 self.simd
4869 .madd_f64x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
4870 }
4871 #[inline(always)]
4872 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x4<S> {
4873 self.simd
4874 .msub_f64x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
4875 }
4876 #[inline(always)]
4877 fn floor(self) -> f64x4<S> {
4878 self.simd.floor_f64x4(self)
4879 }
4880 #[inline(always)]
4881 fn fract(self) -> f64x4<S> {
4882 self.simd.fract_f64x4(self)
4883 }
4884 #[inline(always)]
4885 fn trunc(self) -> f64x4<S> {
4886 self.simd.trunc_f64x4(self)
4887 }
4888}
4889#[derive(Clone, Copy, Debug)]
4890#[repr(C, align(32))]
4891pub struct mask64x4<S: Simd> {
4892 pub val: [i64; 4],
4893 pub simd: S,
4894}
4895impl<S: Simd> SimdFrom<[i64; 4], S> for mask64x4<S> {
4896 #[inline(always)]
4897 fn simd_from(val: [i64; 4], simd: S) -> Self {
4898 Self {
4899 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
4900 simd,
4901 }
4902 }
4903}
4904impl<S: Simd> From<mask64x4<S>> for [i64; 4] {
4905 #[inline(always)]
4906 fn from(value: mask64x4<S>) -> Self {
4907 value.val
4908 }
4909}
4910impl<S: Simd> core::ops::Deref for mask64x4<S> {
4911 type Target = [i64; 4];
4912 #[inline(always)]
4913 fn deref(&self) -> &Self::Target {
4914 &self.val
4915 }
4916}
4917impl<S: Simd> core::ops::DerefMut for mask64x4<S> {
4918 #[inline(always)]
4919 fn deref_mut(&mut self) -> &mut Self::Target {
4920 &mut self.val
4921 }
4922}
4923impl<S: Simd> SimdFrom<i64, S> for mask64x4<S> {
4924 #[inline(always)]
4925 fn simd_from(value: i64, simd: S) -> Self {
4926 simd.splat_mask64x4(value)
4927 }
4928}
4929impl<S: Simd> Select<mask64x4<S>> for mask64x4<S> {
4930 #[inline(always)]
4931 fn select(self, if_true: mask64x4<S>, if_false: mask64x4<S>) -> mask64x4<S> {
4932 self.simd.select_mask64x4(self, if_true, if_false)
4933 }
4934}
4935impl<S: Simd> Bytes for mask64x4<S> {
4936 type Bytes = u8x32<S>;
4937 #[inline(always)]
4938 fn to_bytes(self) -> Self::Bytes {
4939 unsafe {
4940 u8x32 {
4941 val: core::mem::transmute(self.val),
4942 simd: self.simd,
4943 }
4944 }
4945 }
4946 #[inline(always)]
4947 fn from_bytes(value: Self::Bytes) -> Self {
4948 unsafe {
4949 Self {
4950 val: core::mem::transmute(value.val),
4951 simd: value.simd,
4952 }
4953 }
4954 }
4955}
4956impl<S: Simd> mask64x4<S> {
4957 #[inline(always)]
4958 pub fn not(self) -> mask64x4<S> {
4959 self.simd.not_mask64x4(self)
4960 }
4961 #[inline(always)]
4962 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4963 self.simd.and_mask64x4(self, rhs.simd_into(self.simd))
4964 }
4965 #[inline(always)]
4966 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4967 self.simd.or_mask64x4(self, rhs.simd_into(self.simd))
4968 }
4969 #[inline(always)]
4970 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4971 self.simd.xor_mask64x4(self, rhs.simd_into(self.simd))
4972 }
4973 #[inline(always)]
4974 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4975 self.simd.simd_eq_mask64x4(self, rhs.simd_into(self.simd))
4976 }
4977 #[inline(always)]
4978 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
4979 self.simd.combine_mask64x4(self, rhs.simd_into(self.simd))
4980 }
4981}
4982impl<S: Simd> crate::SimdBase<i64, S> for mask64x4<S> {
4983 const N: usize = 4;
4984 type Mask = mask64x4<S>;
4985 type Block = mask64x2<S>;
4986 #[inline(always)]
4987 fn as_slice(&self) -> &[i64] {
4988 &self.val
4989 }
4990 #[inline(always)]
4991 fn as_mut_slice(&mut self) -> &mut [i64] {
4992 &mut self.val
4993 }
4994 #[inline(always)]
4995 fn from_slice(simd: S, slice: &[i64]) -> Self {
4996 let mut val = [0; 4];
4997 val.copy_from_slice(slice);
4998 Self { val, simd }
4999 }
5000 #[inline(always)]
5001 fn splat(simd: S, val: i64) -> Self {
5002 simd.splat_mask64x4(val)
5003 }
5004 #[inline(always)]
5005 fn block_splat(block: Self::Block) -> Self {
5006 block.combine(block)
5007 }
5008}
5009impl<S: Simd> crate::SimdMask<i64, S> for mask64x4<S> {
5010 #[inline(always)]
5011 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5012 self.simd.simd_eq_mask64x4(self, rhs.simd_into(self.simd))
5013 }
5014}
5015#[derive(Clone, Copy, Debug)]
5016#[repr(C, align(64))]
5017pub struct f32x16<S: Simd> {
5018 pub val: [f32; 16],
5019 pub simd: S,
5020}
5021impl<S: Simd> SimdFrom<[f32; 16], S> for f32x16<S> {
5022 #[inline(always)]
5023 fn simd_from(val: [f32; 16], simd: S) -> Self {
5024 Self {
5025 val: [
5026 val[0usize],
5027 val[1usize],
5028 val[2usize],
5029 val[3usize],
5030 val[4usize],
5031 val[5usize],
5032 val[6usize],
5033 val[7usize],
5034 val[8usize],
5035 val[9usize],
5036 val[10usize],
5037 val[11usize],
5038 val[12usize],
5039 val[13usize],
5040 val[14usize],
5041 val[15usize],
5042 ],
5043 simd,
5044 }
5045 }
5046}
5047impl<S: Simd> From<f32x16<S>> for [f32; 16] {
5048 #[inline(always)]
5049 fn from(value: f32x16<S>) -> Self {
5050 value.val
5051 }
5052}
5053impl<S: Simd> core::ops::Deref for f32x16<S> {
5054 type Target = [f32; 16];
5055 #[inline(always)]
5056 fn deref(&self) -> &Self::Target {
5057 &self.val
5058 }
5059}
5060impl<S: Simd> core::ops::DerefMut for f32x16<S> {
5061 #[inline(always)]
5062 fn deref_mut(&mut self) -> &mut Self::Target {
5063 &mut self.val
5064 }
5065}
5066impl<S: Simd> SimdFrom<f32, S> for f32x16<S> {
5067 #[inline(always)]
5068 fn simd_from(value: f32, simd: S) -> Self {
5069 simd.splat_f32x16(value)
5070 }
5071}
5072impl<S: Simd> Select<f32x16<S>> for mask32x16<S> {
5073 #[inline(always)]
5074 fn select(self, if_true: f32x16<S>, if_false: f32x16<S>) -> f32x16<S> {
5075 self.simd.select_f32x16(self, if_true, if_false)
5076 }
5077}
5078impl<S: Simd> Bytes for f32x16<S> {
5079 type Bytes = u8x64<S>;
5080 #[inline(always)]
5081 fn to_bytes(self) -> Self::Bytes {
5082 unsafe {
5083 u8x64 {
5084 val: core::mem::transmute(self.val),
5085 simd: self.simd,
5086 }
5087 }
5088 }
5089 #[inline(always)]
5090 fn from_bytes(value: Self::Bytes) -> Self {
5091 unsafe {
5092 Self {
5093 val: core::mem::transmute(value.val),
5094 simd: value.simd,
5095 }
5096 }
5097 }
5098}
5099impl<S: Simd> f32x16<S> {
5100 #[inline(always)]
5101 pub fn abs(self) -> f32x16<S> {
5102 self.simd.abs_f32x16(self)
5103 }
5104 #[inline(always)]
5105 pub fn neg(self) -> f32x16<S> {
5106 self.simd.neg_f32x16(self)
5107 }
5108 #[inline(always)]
5109 pub fn sqrt(self) -> f32x16<S> {
5110 self.simd.sqrt_f32x16(self)
5111 }
5112 #[inline(always)]
5113 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5114 self.simd.add_f32x16(self, rhs.simd_into(self.simd))
5115 }
5116 #[inline(always)]
5117 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5118 self.simd.sub_f32x16(self, rhs.simd_into(self.simd))
5119 }
5120 #[inline(always)]
5121 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5122 self.simd.mul_f32x16(self, rhs.simd_into(self.simd))
5123 }
5124 #[inline(always)]
5125 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5126 self.simd.div_f32x16(self, rhs.simd_into(self.simd))
5127 }
5128 #[inline(always)]
5129 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5130 self.simd.copysign_f32x16(self, rhs.simd_into(self.simd))
5131 }
5132 #[inline(always)]
5133 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5134 self.simd.simd_eq_f32x16(self, rhs.simd_into(self.simd))
5135 }
5136 #[inline(always)]
5137 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5138 self.simd.simd_lt_f32x16(self, rhs.simd_into(self.simd))
5139 }
5140 #[inline(always)]
5141 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5142 self.simd.simd_le_f32x16(self, rhs.simd_into(self.simd))
5143 }
5144 #[inline(always)]
5145 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5146 self.simd.simd_ge_f32x16(self, rhs.simd_into(self.simd))
5147 }
5148 #[inline(always)]
5149 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5150 self.simd.simd_gt_f32x16(self, rhs.simd_into(self.simd))
5151 }
5152 #[inline(always)]
5153 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5154 self.simd.max_f32x16(self, rhs.simd_into(self.simd))
5155 }
5156 #[inline(always)]
5157 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5158 self.simd.max_precise_f32x16(self, rhs.simd_into(self.simd))
5159 }
5160 #[inline(always)]
5161 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5162 self.simd.min_f32x16(self, rhs.simd_into(self.simd))
5163 }
5164 #[inline(always)]
5165 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5166 self.simd.min_precise_f32x16(self, rhs.simd_into(self.simd))
5167 }
5168 #[inline(always)]
5169 pub fn floor(self) -> f32x16<S> {
5170 self.simd.floor_f32x16(self)
5171 }
5172 #[inline(always)]
5173 pub fn fract(self) -> f32x16<S> {
5174 self.simd.fract_f32x16(self)
5175 }
5176 #[inline(always)]
5177 pub fn trunc(self) -> f32x16<S> {
5178 self.simd.trunc_f32x16(self)
5179 }
5180 #[inline(always)]
5181 pub fn reinterpret_f64(self) -> f64x8<S> {
5182 self.simd.reinterpret_f64_f32x16(self)
5183 }
5184 #[inline(always)]
5185 pub fn reinterpret_i32(self) -> i32x16<S> {
5186 self.simd.reinterpret_i32_f32x16(self)
5187 }
5188 #[inline(always)]
5189 pub fn reinterpret_u8(self) -> u8x64<S> {
5190 self.simd.reinterpret_u8_f32x16(self)
5191 }
5192 #[inline(always)]
5193 pub fn reinterpret_u32(self) -> u32x16<S> {
5194 self.simd.reinterpret_u32_f32x16(self)
5195 }
5196 #[inline(always)]
5197 pub fn cvt_u32(self) -> u32x16<S> {
5198 self.simd.cvt_u32_f32x16(self)
5199 }
5200 #[inline(always)]
5201 pub fn cvt_i32(self) -> i32x16<S> {
5202 self.simd.cvt_i32_f32x16(self)
5203 }
5204}
5205impl<S: Simd> crate::SimdBase<f32, S> for f32x16<S> {
5206 const N: usize = 16;
5207 type Mask = mask32x16<S>;
5208 type Block = f32x4<S>;
5209 #[inline(always)]
5210 fn as_slice(&self) -> &[f32] {
5211 &self.val
5212 }
5213 #[inline(always)]
5214 fn as_mut_slice(&mut self) -> &mut [f32] {
5215 &mut self.val
5216 }
5217 #[inline(always)]
5218 fn from_slice(simd: S, slice: &[f32]) -> Self {
5219 let mut val = [0.0; 16];
5220 val.copy_from_slice(slice);
5221 Self { val, simd }
5222 }
5223 #[inline(always)]
5224 fn splat(simd: S, val: f32) -> Self {
5225 simd.splat_f32x16(val)
5226 }
5227 #[inline(always)]
5228 fn block_splat(block: Self::Block) -> Self {
5229 let block2 = block.combine(block);
5230 block2.combine(block2)
5231 }
5232}
5233impl<S: Simd> crate::SimdFloat<f32, S> for f32x16<S> {
5234 #[inline(always)]
5235 fn abs(self) -> f32x16<S> {
5236 self.simd.abs_f32x16(self)
5237 }
5238 #[inline(always)]
5239 fn sqrt(self) -> f32x16<S> {
5240 self.simd.sqrt_f32x16(self)
5241 }
5242 #[inline(always)]
5243 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5244 self.simd.copysign_f32x16(self, rhs.simd_into(self.simd))
5245 }
5246 #[inline(always)]
5247 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5248 self.simd.simd_eq_f32x16(self, rhs.simd_into(self.simd))
5249 }
5250 #[inline(always)]
5251 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5252 self.simd.simd_lt_f32x16(self, rhs.simd_into(self.simd))
5253 }
5254 #[inline(always)]
5255 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5256 self.simd.simd_le_f32x16(self, rhs.simd_into(self.simd))
5257 }
5258 #[inline(always)]
5259 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5260 self.simd.simd_ge_f32x16(self, rhs.simd_into(self.simd))
5261 }
5262 #[inline(always)]
5263 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5264 self.simd.simd_gt_f32x16(self, rhs.simd_into(self.simd))
5265 }
5266 #[inline(always)]
5267 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5268 self.simd.zip_low_f32x16(self, rhs.simd_into(self.simd))
5269 }
5270 #[inline(always)]
5271 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5272 self.simd.zip_high_f32x16(self, rhs.simd_into(self.simd))
5273 }
5274 #[inline(always)]
5275 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5276 self.simd.unzip_low_f32x16(self, rhs.simd_into(self.simd))
5277 }
5278 #[inline(always)]
5279 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5280 self.simd.unzip_high_f32x16(self, rhs.simd_into(self.simd))
5281 }
5282 #[inline(always)]
5283 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5284 self.simd.max_f32x16(self, rhs.simd_into(self.simd))
5285 }
5286 #[inline(always)]
5287 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5288 self.simd.max_precise_f32x16(self, rhs.simd_into(self.simd))
5289 }
5290 #[inline(always)]
5291 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5292 self.simd.min_f32x16(self, rhs.simd_into(self.simd))
5293 }
5294 #[inline(always)]
5295 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5296 self.simd.min_precise_f32x16(self, rhs.simd_into(self.simd))
5297 }
5298 #[inline(always)]
5299 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x16<S> {
5300 self.simd
5301 .madd_f32x16(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5302 }
5303 #[inline(always)]
5304 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x16<S> {
5305 self.simd
5306 .msub_f32x16(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5307 }
5308 #[inline(always)]
5309 fn floor(self) -> f32x16<S> {
5310 self.simd.floor_f32x16(self)
5311 }
5312 #[inline(always)]
5313 fn fract(self) -> f32x16<S> {
5314 self.simd.fract_f32x16(self)
5315 }
5316 #[inline(always)]
5317 fn trunc(self) -> f32x16<S> {
5318 self.simd.trunc_f32x16(self)
5319 }
5320}
5321impl<S: Simd> SimdCvtFloat<u32x16<S>> for f32x16<S> {
5322 fn float_from(x: u32x16<S>) -> Self {
5323 x.simd.cvt_f32_u32x16(x)
5324 }
5325}
5326impl<S: Simd> SimdCvtFloat<i32x16<S>> for f32x16<S> {
5327 fn float_from(x: i32x16<S>) -> Self {
5328 x.simd.cvt_f32_i32x16(x)
5329 }
5330}
5331#[derive(Clone, Copy, Debug)]
5332#[repr(C, align(64))]
5333pub struct i8x64<S: Simd> {
5334 pub val: [i8; 64],
5335 pub simd: S,
5336}
5337impl<S: Simd> SimdFrom<[i8; 64], S> for i8x64<S> {
5338 #[inline(always)]
5339 fn simd_from(val: [i8; 64], simd: S) -> Self {
5340 Self {
5341 val: [
5342 val[0usize],
5343 val[1usize],
5344 val[2usize],
5345 val[3usize],
5346 val[4usize],
5347 val[5usize],
5348 val[6usize],
5349 val[7usize],
5350 val[8usize],
5351 val[9usize],
5352 val[10usize],
5353 val[11usize],
5354 val[12usize],
5355 val[13usize],
5356 val[14usize],
5357 val[15usize],
5358 val[16usize],
5359 val[17usize],
5360 val[18usize],
5361 val[19usize],
5362 val[20usize],
5363 val[21usize],
5364 val[22usize],
5365 val[23usize],
5366 val[24usize],
5367 val[25usize],
5368 val[26usize],
5369 val[27usize],
5370 val[28usize],
5371 val[29usize],
5372 val[30usize],
5373 val[31usize],
5374 val[32usize],
5375 val[33usize],
5376 val[34usize],
5377 val[35usize],
5378 val[36usize],
5379 val[37usize],
5380 val[38usize],
5381 val[39usize],
5382 val[40usize],
5383 val[41usize],
5384 val[42usize],
5385 val[43usize],
5386 val[44usize],
5387 val[45usize],
5388 val[46usize],
5389 val[47usize],
5390 val[48usize],
5391 val[49usize],
5392 val[50usize],
5393 val[51usize],
5394 val[52usize],
5395 val[53usize],
5396 val[54usize],
5397 val[55usize],
5398 val[56usize],
5399 val[57usize],
5400 val[58usize],
5401 val[59usize],
5402 val[60usize],
5403 val[61usize],
5404 val[62usize],
5405 val[63usize],
5406 ],
5407 simd,
5408 }
5409 }
5410}
5411impl<S: Simd> From<i8x64<S>> for [i8; 64] {
5412 #[inline(always)]
5413 fn from(value: i8x64<S>) -> Self {
5414 value.val
5415 }
5416}
5417impl<S: Simd> core::ops::Deref for i8x64<S> {
5418 type Target = [i8; 64];
5419 #[inline(always)]
5420 fn deref(&self) -> &Self::Target {
5421 &self.val
5422 }
5423}
5424impl<S: Simd> core::ops::DerefMut for i8x64<S> {
5425 #[inline(always)]
5426 fn deref_mut(&mut self) -> &mut Self::Target {
5427 &mut self.val
5428 }
5429}
5430impl<S: Simd> SimdFrom<i8, S> for i8x64<S> {
5431 #[inline(always)]
5432 fn simd_from(value: i8, simd: S) -> Self {
5433 simd.splat_i8x64(value)
5434 }
5435}
5436impl<S: Simd> Select<i8x64<S>> for mask8x64<S> {
5437 #[inline(always)]
5438 fn select(self, if_true: i8x64<S>, if_false: i8x64<S>) -> i8x64<S> {
5439 self.simd.select_i8x64(self, if_true, if_false)
5440 }
5441}
5442impl<S: Simd> Bytes for i8x64<S> {
5443 type Bytes = u8x64<S>;
5444 #[inline(always)]
5445 fn to_bytes(self) -> Self::Bytes {
5446 unsafe {
5447 u8x64 {
5448 val: core::mem::transmute(self.val),
5449 simd: self.simd,
5450 }
5451 }
5452 }
5453 #[inline(always)]
5454 fn from_bytes(value: Self::Bytes) -> Self {
5455 unsafe {
5456 Self {
5457 val: core::mem::transmute(value.val),
5458 simd: value.simd,
5459 }
5460 }
5461 }
5462}
5463impl<S: Simd> i8x64<S> {
5464 #[inline(always)]
5465 pub fn not(self) -> i8x64<S> {
5466 self.simd.not_i8x64(self)
5467 }
5468 #[inline(always)]
5469 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5470 self.simd.add_i8x64(self, rhs.simd_into(self.simd))
5471 }
5472 #[inline(always)]
5473 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5474 self.simd.sub_i8x64(self, rhs.simd_into(self.simd))
5475 }
5476 #[inline(always)]
5477 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5478 self.simd.mul_i8x64(self, rhs.simd_into(self.simd))
5479 }
5480 #[inline(always)]
5481 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5482 self.simd.and_i8x64(self, rhs.simd_into(self.simd))
5483 }
5484 #[inline(always)]
5485 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5486 self.simd.or_i8x64(self, rhs.simd_into(self.simd))
5487 }
5488 #[inline(always)]
5489 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5490 self.simd.xor_i8x64(self, rhs.simd_into(self.simd))
5491 }
5492 #[inline(always)]
5493 pub fn shr(self, shift: u32) -> i8x64<S> {
5494 self.simd.shr_i8x64(self, shift)
5495 }
5496 #[inline(always)]
5497 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5498 self.simd.simd_eq_i8x64(self, rhs.simd_into(self.simd))
5499 }
5500 #[inline(always)]
5501 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5502 self.simd.simd_lt_i8x64(self, rhs.simd_into(self.simd))
5503 }
5504 #[inline(always)]
5505 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5506 self.simd.simd_le_i8x64(self, rhs.simd_into(self.simd))
5507 }
5508 #[inline(always)]
5509 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5510 self.simd.simd_ge_i8x64(self, rhs.simd_into(self.simd))
5511 }
5512 #[inline(always)]
5513 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5514 self.simd.simd_gt_i8x64(self, rhs.simd_into(self.simd))
5515 }
5516 #[inline(always)]
5517 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5518 self.simd.min_i8x64(self, rhs.simd_into(self.simd))
5519 }
5520 #[inline(always)]
5521 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5522 self.simd.max_i8x64(self, rhs.simd_into(self.simd))
5523 }
5524 #[inline(always)]
5525 pub fn reinterpret_u8(self) -> u8x64<S> {
5526 self.simd.reinterpret_u8_i8x64(self)
5527 }
5528 #[inline(always)]
5529 pub fn reinterpret_u32(self) -> u32x16<S> {
5530 self.simd.reinterpret_u32_i8x64(self)
5531 }
5532}
5533impl<S: Simd> crate::SimdBase<i8, S> for i8x64<S> {
5534 const N: usize = 64;
5535 type Mask = mask8x64<S>;
5536 type Block = i8x16<S>;
5537 #[inline(always)]
5538 fn as_slice(&self) -> &[i8] {
5539 &self.val
5540 }
5541 #[inline(always)]
5542 fn as_mut_slice(&mut self) -> &mut [i8] {
5543 &mut self.val
5544 }
5545 #[inline(always)]
5546 fn from_slice(simd: S, slice: &[i8]) -> Self {
5547 let mut val = [0; 64];
5548 val.copy_from_slice(slice);
5549 Self { val, simd }
5550 }
5551 #[inline(always)]
5552 fn splat(simd: S, val: i8) -> Self {
5553 simd.splat_i8x64(val)
5554 }
5555 #[inline(always)]
5556 fn block_splat(block: Self::Block) -> Self {
5557 let block2 = block.combine(block);
5558 block2.combine(block2)
5559 }
5560}
5561impl<S: Simd> crate::SimdInt<i8, S> for i8x64<S> {
5562 #[inline(always)]
5563 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5564 self.simd.simd_eq_i8x64(self, rhs.simd_into(self.simd))
5565 }
5566 #[inline(always)]
5567 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5568 self.simd.simd_lt_i8x64(self, rhs.simd_into(self.simd))
5569 }
5570 #[inline(always)]
5571 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5572 self.simd.simd_le_i8x64(self, rhs.simd_into(self.simd))
5573 }
5574 #[inline(always)]
5575 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5576 self.simd.simd_ge_i8x64(self, rhs.simd_into(self.simd))
5577 }
5578 #[inline(always)]
5579 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5580 self.simd.simd_gt_i8x64(self, rhs.simd_into(self.simd))
5581 }
5582 #[inline(always)]
5583 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5584 self.simd.zip_low_i8x64(self, rhs.simd_into(self.simd))
5585 }
5586 #[inline(always)]
5587 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5588 self.simd.zip_high_i8x64(self, rhs.simd_into(self.simd))
5589 }
5590 #[inline(always)]
5591 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5592 self.simd.unzip_low_i8x64(self, rhs.simd_into(self.simd))
5593 }
5594 #[inline(always)]
5595 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5596 self.simd.unzip_high_i8x64(self, rhs.simd_into(self.simd))
5597 }
5598 #[inline(always)]
5599 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5600 self.simd.min_i8x64(self, rhs.simd_into(self.simd))
5601 }
5602 #[inline(always)]
5603 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5604 self.simd.max_i8x64(self, rhs.simd_into(self.simd))
5605 }
5606}
5607#[derive(Clone, Copy, Debug)]
5608#[repr(C, align(64))]
5609pub struct u8x64<S: Simd> {
5610 pub val: [u8; 64],
5611 pub simd: S,
5612}
5613impl<S: Simd> SimdFrom<[u8; 64], S> for u8x64<S> {
5614 #[inline(always)]
5615 fn simd_from(val: [u8; 64], simd: S) -> Self {
5616 Self {
5617 val: [
5618 val[0usize],
5619 val[1usize],
5620 val[2usize],
5621 val[3usize],
5622 val[4usize],
5623 val[5usize],
5624 val[6usize],
5625 val[7usize],
5626 val[8usize],
5627 val[9usize],
5628 val[10usize],
5629 val[11usize],
5630 val[12usize],
5631 val[13usize],
5632 val[14usize],
5633 val[15usize],
5634 val[16usize],
5635 val[17usize],
5636 val[18usize],
5637 val[19usize],
5638 val[20usize],
5639 val[21usize],
5640 val[22usize],
5641 val[23usize],
5642 val[24usize],
5643 val[25usize],
5644 val[26usize],
5645 val[27usize],
5646 val[28usize],
5647 val[29usize],
5648 val[30usize],
5649 val[31usize],
5650 val[32usize],
5651 val[33usize],
5652 val[34usize],
5653 val[35usize],
5654 val[36usize],
5655 val[37usize],
5656 val[38usize],
5657 val[39usize],
5658 val[40usize],
5659 val[41usize],
5660 val[42usize],
5661 val[43usize],
5662 val[44usize],
5663 val[45usize],
5664 val[46usize],
5665 val[47usize],
5666 val[48usize],
5667 val[49usize],
5668 val[50usize],
5669 val[51usize],
5670 val[52usize],
5671 val[53usize],
5672 val[54usize],
5673 val[55usize],
5674 val[56usize],
5675 val[57usize],
5676 val[58usize],
5677 val[59usize],
5678 val[60usize],
5679 val[61usize],
5680 val[62usize],
5681 val[63usize],
5682 ],
5683 simd,
5684 }
5685 }
5686}
5687impl<S: Simd> From<u8x64<S>> for [u8; 64] {
5688 #[inline(always)]
5689 fn from(value: u8x64<S>) -> Self {
5690 value.val
5691 }
5692}
5693impl<S: Simd> core::ops::Deref for u8x64<S> {
5694 type Target = [u8; 64];
5695 #[inline(always)]
5696 fn deref(&self) -> &Self::Target {
5697 &self.val
5698 }
5699}
5700impl<S: Simd> core::ops::DerefMut for u8x64<S> {
5701 #[inline(always)]
5702 fn deref_mut(&mut self) -> &mut Self::Target {
5703 &mut self.val
5704 }
5705}
5706impl<S: Simd> SimdFrom<u8, S> for u8x64<S> {
5707 #[inline(always)]
5708 fn simd_from(value: u8, simd: S) -> Self {
5709 simd.splat_u8x64(value)
5710 }
5711}
5712impl<S: Simd> Select<u8x64<S>> for mask8x64<S> {
5713 #[inline(always)]
5714 fn select(self, if_true: u8x64<S>, if_false: u8x64<S>) -> u8x64<S> {
5715 self.simd.select_u8x64(self, if_true, if_false)
5716 }
5717}
5718impl<S: Simd> Bytes for u8x64<S> {
5719 type Bytes = u8x64<S>;
5720 #[inline(always)]
5721 fn to_bytes(self) -> Self::Bytes {
5722 unsafe {
5723 u8x64 {
5724 val: core::mem::transmute(self.val),
5725 simd: self.simd,
5726 }
5727 }
5728 }
5729 #[inline(always)]
5730 fn from_bytes(value: Self::Bytes) -> Self {
5731 unsafe {
5732 Self {
5733 val: core::mem::transmute(value.val),
5734 simd: value.simd,
5735 }
5736 }
5737 }
5738}
5739impl<S: Simd> u8x64<S> {
5740 #[inline(always)]
5741 pub fn not(self) -> u8x64<S> {
5742 self.simd.not_u8x64(self)
5743 }
5744 #[inline(always)]
5745 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5746 self.simd.add_u8x64(self, rhs.simd_into(self.simd))
5747 }
5748 #[inline(always)]
5749 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5750 self.simd.sub_u8x64(self, rhs.simd_into(self.simd))
5751 }
5752 #[inline(always)]
5753 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5754 self.simd.mul_u8x64(self, rhs.simd_into(self.simd))
5755 }
5756 #[inline(always)]
5757 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5758 self.simd.and_u8x64(self, rhs.simd_into(self.simd))
5759 }
5760 #[inline(always)]
5761 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5762 self.simd.or_u8x64(self, rhs.simd_into(self.simd))
5763 }
5764 #[inline(always)]
5765 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5766 self.simd.xor_u8x64(self, rhs.simd_into(self.simd))
5767 }
5768 #[inline(always)]
5769 pub fn shr(self, shift: u32) -> u8x64<S> {
5770 self.simd.shr_u8x64(self, shift)
5771 }
5772 #[inline(always)]
5773 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5774 self.simd.simd_eq_u8x64(self, rhs.simd_into(self.simd))
5775 }
5776 #[inline(always)]
5777 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5778 self.simd.simd_lt_u8x64(self, rhs.simd_into(self.simd))
5779 }
5780 #[inline(always)]
5781 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5782 self.simd.simd_le_u8x64(self, rhs.simd_into(self.simd))
5783 }
5784 #[inline(always)]
5785 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5786 self.simd.simd_ge_u8x64(self, rhs.simd_into(self.simd))
5787 }
5788 #[inline(always)]
5789 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5790 self.simd.simd_gt_u8x64(self, rhs.simd_into(self.simd))
5791 }
5792 #[inline(always)]
5793 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5794 self.simd.min_u8x64(self, rhs.simd_into(self.simd))
5795 }
5796 #[inline(always)]
5797 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5798 self.simd.max_u8x64(self, rhs.simd_into(self.simd))
5799 }
5800 #[inline(always)]
5801 pub fn reinterpret_u32(self) -> u32x16<S> {
5802 self.simd.reinterpret_u32_u8x64(self)
5803 }
5804}
5805impl<S: Simd> crate::SimdBase<u8, S> for u8x64<S> {
5806 const N: usize = 64;
5807 type Mask = mask8x64<S>;
5808 type Block = u8x16<S>;
5809 #[inline(always)]
5810 fn as_slice(&self) -> &[u8] {
5811 &self.val
5812 }
5813 #[inline(always)]
5814 fn as_mut_slice(&mut self) -> &mut [u8] {
5815 &mut self.val
5816 }
5817 #[inline(always)]
5818 fn from_slice(simd: S, slice: &[u8]) -> Self {
5819 let mut val = [0; 64];
5820 val.copy_from_slice(slice);
5821 Self { val, simd }
5822 }
5823 #[inline(always)]
5824 fn splat(simd: S, val: u8) -> Self {
5825 simd.splat_u8x64(val)
5826 }
5827 #[inline(always)]
5828 fn block_splat(block: Self::Block) -> Self {
5829 let block2 = block.combine(block);
5830 block2.combine(block2)
5831 }
5832}
5833impl<S: Simd> crate::SimdInt<u8, S> for u8x64<S> {
5834 #[inline(always)]
5835 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5836 self.simd.simd_eq_u8x64(self, rhs.simd_into(self.simd))
5837 }
5838 #[inline(always)]
5839 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5840 self.simd.simd_lt_u8x64(self, rhs.simd_into(self.simd))
5841 }
5842 #[inline(always)]
5843 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5844 self.simd.simd_le_u8x64(self, rhs.simd_into(self.simd))
5845 }
5846 #[inline(always)]
5847 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5848 self.simd.simd_ge_u8x64(self, rhs.simd_into(self.simd))
5849 }
5850 #[inline(always)]
5851 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5852 self.simd.simd_gt_u8x64(self, rhs.simd_into(self.simd))
5853 }
5854 #[inline(always)]
5855 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5856 self.simd.zip_low_u8x64(self, rhs.simd_into(self.simd))
5857 }
5858 #[inline(always)]
5859 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5860 self.simd.zip_high_u8x64(self, rhs.simd_into(self.simd))
5861 }
5862 #[inline(always)]
5863 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5864 self.simd.unzip_low_u8x64(self, rhs.simd_into(self.simd))
5865 }
5866 #[inline(always)]
5867 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5868 self.simd.unzip_high_u8x64(self, rhs.simd_into(self.simd))
5869 }
5870 #[inline(always)]
5871 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5872 self.simd.min_u8x64(self, rhs.simd_into(self.simd))
5873 }
5874 #[inline(always)]
5875 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5876 self.simd.max_u8x64(self, rhs.simd_into(self.simd))
5877 }
5878}
5879#[derive(Clone, Copy, Debug)]
5880#[repr(C, align(64))]
5881pub struct mask8x64<S: Simd> {
5882 pub val: [i8; 64],
5883 pub simd: S,
5884}
5885impl<S: Simd> SimdFrom<[i8; 64], S> for mask8x64<S> {
5886 #[inline(always)]
5887 fn simd_from(val: [i8; 64], simd: S) -> Self {
5888 Self {
5889 val: [
5890 val[0usize],
5891 val[1usize],
5892 val[2usize],
5893 val[3usize],
5894 val[4usize],
5895 val[5usize],
5896 val[6usize],
5897 val[7usize],
5898 val[8usize],
5899 val[9usize],
5900 val[10usize],
5901 val[11usize],
5902 val[12usize],
5903 val[13usize],
5904 val[14usize],
5905 val[15usize],
5906 val[16usize],
5907 val[17usize],
5908 val[18usize],
5909 val[19usize],
5910 val[20usize],
5911 val[21usize],
5912 val[22usize],
5913 val[23usize],
5914 val[24usize],
5915 val[25usize],
5916 val[26usize],
5917 val[27usize],
5918 val[28usize],
5919 val[29usize],
5920 val[30usize],
5921 val[31usize],
5922 val[32usize],
5923 val[33usize],
5924 val[34usize],
5925 val[35usize],
5926 val[36usize],
5927 val[37usize],
5928 val[38usize],
5929 val[39usize],
5930 val[40usize],
5931 val[41usize],
5932 val[42usize],
5933 val[43usize],
5934 val[44usize],
5935 val[45usize],
5936 val[46usize],
5937 val[47usize],
5938 val[48usize],
5939 val[49usize],
5940 val[50usize],
5941 val[51usize],
5942 val[52usize],
5943 val[53usize],
5944 val[54usize],
5945 val[55usize],
5946 val[56usize],
5947 val[57usize],
5948 val[58usize],
5949 val[59usize],
5950 val[60usize],
5951 val[61usize],
5952 val[62usize],
5953 val[63usize],
5954 ],
5955 simd,
5956 }
5957 }
5958}
5959impl<S: Simd> From<mask8x64<S>> for [i8; 64] {
5960 #[inline(always)]
5961 fn from(value: mask8x64<S>) -> Self {
5962 value.val
5963 }
5964}
5965impl<S: Simd> core::ops::Deref for mask8x64<S> {
5966 type Target = [i8; 64];
5967 #[inline(always)]
5968 fn deref(&self) -> &Self::Target {
5969 &self.val
5970 }
5971}
5972impl<S: Simd> core::ops::DerefMut for mask8x64<S> {
5973 #[inline(always)]
5974 fn deref_mut(&mut self) -> &mut Self::Target {
5975 &mut self.val
5976 }
5977}
5978impl<S: Simd> SimdFrom<i8, S> for mask8x64<S> {
5979 #[inline(always)]
5980 fn simd_from(value: i8, simd: S) -> Self {
5981 simd.splat_mask8x64(value)
5982 }
5983}
5984impl<S: Simd> Select<mask8x64<S>> for mask8x64<S> {
5985 #[inline(always)]
5986 fn select(self, if_true: mask8x64<S>, if_false: mask8x64<S>) -> mask8x64<S> {
5987 self.simd.select_mask8x64(self, if_true, if_false)
5988 }
5989}
5990impl<S: Simd> Bytes for mask8x64<S> {
5991 type Bytes = u8x64<S>;
5992 #[inline(always)]
5993 fn to_bytes(self) -> Self::Bytes {
5994 unsafe {
5995 u8x64 {
5996 val: core::mem::transmute(self.val),
5997 simd: self.simd,
5998 }
5999 }
6000 }
6001 #[inline(always)]
6002 fn from_bytes(value: Self::Bytes) -> Self {
6003 unsafe {
6004 Self {
6005 val: core::mem::transmute(value.val),
6006 simd: value.simd,
6007 }
6008 }
6009 }
6010}
6011impl<S: Simd> mask8x64<S> {
6012 #[inline(always)]
6013 pub fn not(self) -> mask8x64<S> {
6014 self.simd.not_mask8x64(self)
6015 }
6016 #[inline(always)]
6017 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6018 self.simd.and_mask8x64(self, rhs.simd_into(self.simd))
6019 }
6020 #[inline(always)]
6021 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6022 self.simd.or_mask8x64(self, rhs.simd_into(self.simd))
6023 }
6024 #[inline(always)]
6025 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6026 self.simd.xor_mask8x64(self, rhs.simd_into(self.simd))
6027 }
6028 #[inline(always)]
6029 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6030 self.simd.simd_eq_mask8x64(self, rhs.simd_into(self.simd))
6031 }
6032}
6033impl<S: Simd> crate::SimdBase<i8, S> for mask8x64<S> {
6034 const N: usize = 64;
6035 type Mask = mask8x64<S>;
6036 type Block = mask8x16<S>;
6037 #[inline(always)]
6038 fn as_slice(&self) -> &[i8] {
6039 &self.val
6040 }
6041 #[inline(always)]
6042 fn as_mut_slice(&mut self) -> &mut [i8] {
6043 &mut self.val
6044 }
6045 #[inline(always)]
6046 fn from_slice(simd: S, slice: &[i8]) -> Self {
6047 let mut val = [0; 64];
6048 val.copy_from_slice(slice);
6049 Self { val, simd }
6050 }
6051 #[inline(always)]
6052 fn splat(simd: S, val: i8) -> Self {
6053 simd.splat_mask8x64(val)
6054 }
6055 #[inline(always)]
6056 fn block_splat(block: Self::Block) -> Self {
6057 let block2 = block.combine(block);
6058 block2.combine(block2)
6059 }
6060}
6061impl<S: Simd> crate::SimdMask<i8, S> for mask8x64<S> {
6062 #[inline(always)]
6063 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6064 self.simd.simd_eq_mask8x64(self, rhs.simd_into(self.simd))
6065 }
6066}
6067#[derive(Clone, Copy, Debug)]
6068#[repr(C, align(64))]
6069pub struct i16x32<S: Simd> {
6070 pub val: [i16; 32],
6071 pub simd: S,
6072}
6073impl<S: Simd> SimdFrom<[i16; 32], S> for i16x32<S> {
6074 #[inline(always)]
6075 fn simd_from(val: [i16; 32], simd: S) -> Self {
6076 Self {
6077 val: [
6078 val[0usize],
6079 val[1usize],
6080 val[2usize],
6081 val[3usize],
6082 val[4usize],
6083 val[5usize],
6084 val[6usize],
6085 val[7usize],
6086 val[8usize],
6087 val[9usize],
6088 val[10usize],
6089 val[11usize],
6090 val[12usize],
6091 val[13usize],
6092 val[14usize],
6093 val[15usize],
6094 val[16usize],
6095 val[17usize],
6096 val[18usize],
6097 val[19usize],
6098 val[20usize],
6099 val[21usize],
6100 val[22usize],
6101 val[23usize],
6102 val[24usize],
6103 val[25usize],
6104 val[26usize],
6105 val[27usize],
6106 val[28usize],
6107 val[29usize],
6108 val[30usize],
6109 val[31usize],
6110 ],
6111 simd,
6112 }
6113 }
6114}
6115impl<S: Simd> From<i16x32<S>> for [i16; 32] {
6116 #[inline(always)]
6117 fn from(value: i16x32<S>) -> Self {
6118 value.val
6119 }
6120}
6121impl<S: Simd> core::ops::Deref for i16x32<S> {
6122 type Target = [i16; 32];
6123 #[inline(always)]
6124 fn deref(&self) -> &Self::Target {
6125 &self.val
6126 }
6127}
6128impl<S: Simd> core::ops::DerefMut for i16x32<S> {
6129 #[inline(always)]
6130 fn deref_mut(&mut self) -> &mut Self::Target {
6131 &mut self.val
6132 }
6133}
6134impl<S: Simd> SimdFrom<i16, S> for i16x32<S> {
6135 #[inline(always)]
6136 fn simd_from(value: i16, simd: S) -> Self {
6137 simd.splat_i16x32(value)
6138 }
6139}
6140impl<S: Simd> Select<i16x32<S>> for mask16x32<S> {
6141 #[inline(always)]
6142 fn select(self, if_true: i16x32<S>, if_false: i16x32<S>) -> i16x32<S> {
6143 self.simd.select_i16x32(self, if_true, if_false)
6144 }
6145}
6146impl<S: Simd> Bytes for i16x32<S> {
6147 type Bytes = u8x64<S>;
6148 #[inline(always)]
6149 fn to_bytes(self) -> Self::Bytes {
6150 unsafe {
6151 u8x64 {
6152 val: core::mem::transmute(self.val),
6153 simd: self.simd,
6154 }
6155 }
6156 }
6157 #[inline(always)]
6158 fn from_bytes(value: Self::Bytes) -> Self {
6159 unsafe {
6160 Self {
6161 val: core::mem::transmute(value.val),
6162 simd: value.simd,
6163 }
6164 }
6165 }
6166}
6167impl<S: Simd> i16x32<S> {
6168 #[inline(always)]
6169 pub fn not(self) -> i16x32<S> {
6170 self.simd.not_i16x32(self)
6171 }
6172 #[inline(always)]
6173 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6174 self.simd.add_i16x32(self, rhs.simd_into(self.simd))
6175 }
6176 #[inline(always)]
6177 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6178 self.simd.sub_i16x32(self, rhs.simd_into(self.simd))
6179 }
6180 #[inline(always)]
6181 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6182 self.simd.mul_i16x32(self, rhs.simd_into(self.simd))
6183 }
6184 #[inline(always)]
6185 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6186 self.simd.and_i16x32(self, rhs.simd_into(self.simd))
6187 }
6188 #[inline(always)]
6189 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6190 self.simd.or_i16x32(self, rhs.simd_into(self.simd))
6191 }
6192 #[inline(always)]
6193 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6194 self.simd.xor_i16x32(self, rhs.simd_into(self.simd))
6195 }
6196 #[inline(always)]
6197 pub fn shr(self, shift: u32) -> i16x32<S> {
6198 self.simd.shr_i16x32(self, shift)
6199 }
6200 #[inline(always)]
6201 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6202 self.simd.simd_eq_i16x32(self, rhs.simd_into(self.simd))
6203 }
6204 #[inline(always)]
6205 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6206 self.simd.simd_lt_i16x32(self, rhs.simd_into(self.simd))
6207 }
6208 #[inline(always)]
6209 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6210 self.simd.simd_le_i16x32(self, rhs.simd_into(self.simd))
6211 }
6212 #[inline(always)]
6213 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6214 self.simd.simd_ge_i16x32(self, rhs.simd_into(self.simd))
6215 }
6216 #[inline(always)]
6217 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6218 self.simd.simd_gt_i16x32(self, rhs.simd_into(self.simd))
6219 }
6220 #[inline(always)]
6221 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6222 self.simd.min_i16x32(self, rhs.simd_into(self.simd))
6223 }
6224 #[inline(always)]
6225 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6226 self.simd.max_i16x32(self, rhs.simd_into(self.simd))
6227 }
6228 #[inline(always)]
6229 pub fn reinterpret_u8(self) -> u8x64<S> {
6230 self.simd.reinterpret_u8_i16x32(self)
6231 }
6232 #[inline(always)]
6233 pub fn reinterpret_u32(self) -> u32x16<S> {
6234 self.simd.reinterpret_u32_i16x32(self)
6235 }
6236}
6237impl<S: Simd> crate::SimdBase<i16, S> for i16x32<S> {
6238 const N: usize = 32;
6239 type Mask = mask16x32<S>;
6240 type Block = i16x8<S>;
6241 #[inline(always)]
6242 fn as_slice(&self) -> &[i16] {
6243 &self.val
6244 }
6245 #[inline(always)]
6246 fn as_mut_slice(&mut self) -> &mut [i16] {
6247 &mut self.val
6248 }
6249 #[inline(always)]
6250 fn from_slice(simd: S, slice: &[i16]) -> Self {
6251 let mut val = [0; 32];
6252 val.copy_from_slice(slice);
6253 Self { val, simd }
6254 }
6255 #[inline(always)]
6256 fn splat(simd: S, val: i16) -> Self {
6257 simd.splat_i16x32(val)
6258 }
6259 #[inline(always)]
6260 fn block_splat(block: Self::Block) -> Self {
6261 let block2 = block.combine(block);
6262 block2.combine(block2)
6263 }
6264}
6265impl<S: Simd> crate::SimdInt<i16, S> for i16x32<S> {
6266 #[inline(always)]
6267 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6268 self.simd.simd_eq_i16x32(self, rhs.simd_into(self.simd))
6269 }
6270 #[inline(always)]
6271 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6272 self.simd.simd_lt_i16x32(self, rhs.simd_into(self.simd))
6273 }
6274 #[inline(always)]
6275 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6276 self.simd.simd_le_i16x32(self, rhs.simd_into(self.simd))
6277 }
6278 #[inline(always)]
6279 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6280 self.simd.simd_ge_i16x32(self, rhs.simd_into(self.simd))
6281 }
6282 #[inline(always)]
6283 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6284 self.simd.simd_gt_i16x32(self, rhs.simd_into(self.simd))
6285 }
6286 #[inline(always)]
6287 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6288 self.simd.zip_low_i16x32(self, rhs.simd_into(self.simd))
6289 }
6290 #[inline(always)]
6291 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6292 self.simd.zip_high_i16x32(self, rhs.simd_into(self.simd))
6293 }
6294 #[inline(always)]
6295 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6296 self.simd.unzip_low_i16x32(self, rhs.simd_into(self.simd))
6297 }
6298 #[inline(always)]
6299 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6300 self.simd.unzip_high_i16x32(self, rhs.simd_into(self.simd))
6301 }
6302 #[inline(always)]
6303 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6304 self.simd.min_i16x32(self, rhs.simd_into(self.simd))
6305 }
6306 #[inline(always)]
6307 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6308 self.simd.max_i16x32(self, rhs.simd_into(self.simd))
6309 }
6310}
6311#[derive(Clone, Copy, Debug)]
6312#[repr(C, align(64))]
6313pub struct u16x32<S: Simd> {
6314 pub val: [u16; 32],
6315 pub simd: S,
6316}
6317impl<S: Simd> SimdFrom<[u16; 32], S> for u16x32<S> {
6318 #[inline(always)]
6319 fn simd_from(val: [u16; 32], simd: S) -> Self {
6320 Self {
6321 val: [
6322 val[0usize],
6323 val[1usize],
6324 val[2usize],
6325 val[3usize],
6326 val[4usize],
6327 val[5usize],
6328 val[6usize],
6329 val[7usize],
6330 val[8usize],
6331 val[9usize],
6332 val[10usize],
6333 val[11usize],
6334 val[12usize],
6335 val[13usize],
6336 val[14usize],
6337 val[15usize],
6338 val[16usize],
6339 val[17usize],
6340 val[18usize],
6341 val[19usize],
6342 val[20usize],
6343 val[21usize],
6344 val[22usize],
6345 val[23usize],
6346 val[24usize],
6347 val[25usize],
6348 val[26usize],
6349 val[27usize],
6350 val[28usize],
6351 val[29usize],
6352 val[30usize],
6353 val[31usize],
6354 ],
6355 simd,
6356 }
6357 }
6358}
6359impl<S: Simd> From<u16x32<S>> for [u16; 32] {
6360 #[inline(always)]
6361 fn from(value: u16x32<S>) -> Self {
6362 value.val
6363 }
6364}
6365impl<S: Simd> core::ops::Deref for u16x32<S> {
6366 type Target = [u16; 32];
6367 #[inline(always)]
6368 fn deref(&self) -> &Self::Target {
6369 &self.val
6370 }
6371}
6372impl<S: Simd> core::ops::DerefMut for u16x32<S> {
6373 #[inline(always)]
6374 fn deref_mut(&mut self) -> &mut Self::Target {
6375 &mut self.val
6376 }
6377}
6378impl<S: Simd> SimdFrom<u16, S> for u16x32<S> {
6379 #[inline(always)]
6380 fn simd_from(value: u16, simd: S) -> Self {
6381 simd.splat_u16x32(value)
6382 }
6383}
6384impl<S: Simd> Select<u16x32<S>> for mask16x32<S> {
6385 #[inline(always)]
6386 fn select(self, if_true: u16x32<S>, if_false: u16x32<S>) -> u16x32<S> {
6387 self.simd.select_u16x32(self, if_true, if_false)
6388 }
6389}
6390impl<S: Simd> Bytes for u16x32<S> {
6391 type Bytes = u8x64<S>;
6392 #[inline(always)]
6393 fn to_bytes(self) -> Self::Bytes {
6394 unsafe {
6395 u8x64 {
6396 val: core::mem::transmute(self.val),
6397 simd: self.simd,
6398 }
6399 }
6400 }
6401 #[inline(always)]
6402 fn from_bytes(value: Self::Bytes) -> Self {
6403 unsafe {
6404 Self {
6405 val: core::mem::transmute(value.val),
6406 simd: value.simd,
6407 }
6408 }
6409 }
6410}
6411impl<S: Simd> u16x32<S> {
6412 #[inline(always)]
6413 pub fn not(self) -> u16x32<S> {
6414 self.simd.not_u16x32(self)
6415 }
6416 #[inline(always)]
6417 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6418 self.simd.add_u16x32(self, rhs.simd_into(self.simd))
6419 }
6420 #[inline(always)]
6421 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6422 self.simd.sub_u16x32(self, rhs.simd_into(self.simd))
6423 }
6424 #[inline(always)]
6425 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6426 self.simd.mul_u16x32(self, rhs.simd_into(self.simd))
6427 }
6428 #[inline(always)]
6429 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6430 self.simd.and_u16x32(self, rhs.simd_into(self.simd))
6431 }
6432 #[inline(always)]
6433 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6434 self.simd.or_u16x32(self, rhs.simd_into(self.simd))
6435 }
6436 #[inline(always)]
6437 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6438 self.simd.xor_u16x32(self, rhs.simd_into(self.simd))
6439 }
6440 #[inline(always)]
6441 pub fn shr(self, shift: u32) -> u16x32<S> {
6442 self.simd.shr_u16x32(self, shift)
6443 }
6444 #[inline(always)]
6445 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6446 self.simd.simd_eq_u16x32(self, rhs.simd_into(self.simd))
6447 }
6448 #[inline(always)]
6449 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6450 self.simd.simd_lt_u16x32(self, rhs.simd_into(self.simd))
6451 }
6452 #[inline(always)]
6453 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6454 self.simd.simd_le_u16x32(self, rhs.simd_into(self.simd))
6455 }
6456 #[inline(always)]
6457 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6458 self.simd.simd_ge_u16x32(self, rhs.simd_into(self.simd))
6459 }
6460 #[inline(always)]
6461 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6462 self.simd.simd_gt_u16x32(self, rhs.simd_into(self.simd))
6463 }
6464 #[inline(always)]
6465 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6466 self.simd.min_u16x32(self, rhs.simd_into(self.simd))
6467 }
6468 #[inline(always)]
6469 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6470 self.simd.max_u16x32(self, rhs.simd_into(self.simd))
6471 }
6472 #[inline(always)]
6473 pub fn reinterpret_u8(self) -> u8x64<S> {
6474 self.simd.reinterpret_u8_u16x32(self)
6475 }
6476 #[inline(always)]
6477 pub fn reinterpret_u32(self) -> u32x16<S> {
6478 self.simd.reinterpret_u32_u16x32(self)
6479 }
6480}
6481impl<S: Simd> crate::SimdBase<u16, S> for u16x32<S> {
6482 const N: usize = 32;
6483 type Mask = mask16x32<S>;
6484 type Block = u16x8<S>;
6485 #[inline(always)]
6486 fn as_slice(&self) -> &[u16] {
6487 &self.val
6488 }
6489 #[inline(always)]
6490 fn as_mut_slice(&mut self) -> &mut [u16] {
6491 &mut self.val
6492 }
6493 #[inline(always)]
6494 fn from_slice(simd: S, slice: &[u16]) -> Self {
6495 let mut val = [0; 32];
6496 val.copy_from_slice(slice);
6497 Self { val, simd }
6498 }
6499 #[inline(always)]
6500 fn splat(simd: S, val: u16) -> Self {
6501 simd.splat_u16x32(val)
6502 }
6503 #[inline(always)]
6504 fn block_splat(block: Self::Block) -> Self {
6505 let block2 = block.combine(block);
6506 block2.combine(block2)
6507 }
6508}
6509impl<S: Simd> crate::SimdInt<u16, S> for u16x32<S> {
6510 #[inline(always)]
6511 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6512 self.simd.simd_eq_u16x32(self, rhs.simd_into(self.simd))
6513 }
6514 #[inline(always)]
6515 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6516 self.simd.simd_lt_u16x32(self, rhs.simd_into(self.simd))
6517 }
6518 #[inline(always)]
6519 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6520 self.simd.simd_le_u16x32(self, rhs.simd_into(self.simd))
6521 }
6522 #[inline(always)]
6523 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6524 self.simd.simd_ge_u16x32(self, rhs.simd_into(self.simd))
6525 }
6526 #[inline(always)]
6527 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6528 self.simd.simd_gt_u16x32(self, rhs.simd_into(self.simd))
6529 }
6530 #[inline(always)]
6531 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6532 self.simd.zip_low_u16x32(self, rhs.simd_into(self.simd))
6533 }
6534 #[inline(always)]
6535 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6536 self.simd.zip_high_u16x32(self, rhs.simd_into(self.simd))
6537 }
6538 #[inline(always)]
6539 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6540 self.simd.unzip_low_u16x32(self, rhs.simd_into(self.simd))
6541 }
6542 #[inline(always)]
6543 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6544 self.simd.unzip_high_u16x32(self, rhs.simd_into(self.simd))
6545 }
6546 #[inline(always)]
6547 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6548 self.simd.min_u16x32(self, rhs.simd_into(self.simd))
6549 }
6550 #[inline(always)]
6551 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6552 self.simd.max_u16x32(self, rhs.simd_into(self.simd))
6553 }
6554}
6555#[derive(Clone, Copy, Debug)]
6556#[repr(C, align(64))]
6557pub struct mask16x32<S: Simd> {
6558 pub val: [i16; 32],
6559 pub simd: S,
6560}
6561impl<S: Simd> SimdFrom<[i16; 32], S> for mask16x32<S> {
6562 #[inline(always)]
6563 fn simd_from(val: [i16; 32], simd: S) -> Self {
6564 Self {
6565 val: [
6566 val[0usize],
6567 val[1usize],
6568 val[2usize],
6569 val[3usize],
6570 val[4usize],
6571 val[5usize],
6572 val[6usize],
6573 val[7usize],
6574 val[8usize],
6575 val[9usize],
6576 val[10usize],
6577 val[11usize],
6578 val[12usize],
6579 val[13usize],
6580 val[14usize],
6581 val[15usize],
6582 val[16usize],
6583 val[17usize],
6584 val[18usize],
6585 val[19usize],
6586 val[20usize],
6587 val[21usize],
6588 val[22usize],
6589 val[23usize],
6590 val[24usize],
6591 val[25usize],
6592 val[26usize],
6593 val[27usize],
6594 val[28usize],
6595 val[29usize],
6596 val[30usize],
6597 val[31usize],
6598 ],
6599 simd,
6600 }
6601 }
6602}
6603impl<S: Simd> From<mask16x32<S>> for [i16; 32] {
6604 #[inline(always)]
6605 fn from(value: mask16x32<S>) -> Self {
6606 value.val
6607 }
6608}
6609impl<S: Simd> core::ops::Deref for mask16x32<S> {
6610 type Target = [i16; 32];
6611 #[inline(always)]
6612 fn deref(&self) -> &Self::Target {
6613 &self.val
6614 }
6615}
6616impl<S: Simd> core::ops::DerefMut for mask16x32<S> {
6617 #[inline(always)]
6618 fn deref_mut(&mut self) -> &mut Self::Target {
6619 &mut self.val
6620 }
6621}
6622impl<S: Simd> SimdFrom<i16, S> for mask16x32<S> {
6623 #[inline(always)]
6624 fn simd_from(value: i16, simd: S) -> Self {
6625 simd.splat_mask16x32(value)
6626 }
6627}
6628impl<S: Simd> Select<mask16x32<S>> for mask16x32<S> {
6629 #[inline(always)]
6630 fn select(self, if_true: mask16x32<S>, if_false: mask16x32<S>) -> mask16x32<S> {
6631 self.simd.select_mask16x32(self, if_true, if_false)
6632 }
6633}
6634impl<S: Simd> Bytes for mask16x32<S> {
6635 type Bytes = u8x64<S>;
6636 #[inline(always)]
6637 fn to_bytes(self) -> Self::Bytes {
6638 unsafe {
6639 u8x64 {
6640 val: core::mem::transmute(self.val),
6641 simd: self.simd,
6642 }
6643 }
6644 }
6645 #[inline(always)]
6646 fn from_bytes(value: Self::Bytes) -> Self {
6647 unsafe {
6648 Self {
6649 val: core::mem::transmute(value.val),
6650 simd: value.simd,
6651 }
6652 }
6653 }
6654}
6655impl<S: Simd> mask16x32<S> {
6656 #[inline(always)]
6657 pub fn not(self) -> mask16x32<S> {
6658 self.simd.not_mask16x32(self)
6659 }
6660 #[inline(always)]
6661 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6662 self.simd.and_mask16x32(self, rhs.simd_into(self.simd))
6663 }
6664 #[inline(always)]
6665 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6666 self.simd.or_mask16x32(self, rhs.simd_into(self.simd))
6667 }
6668 #[inline(always)]
6669 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6670 self.simd.xor_mask16x32(self, rhs.simd_into(self.simd))
6671 }
6672 #[inline(always)]
6673 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6674 self.simd.simd_eq_mask16x32(self, rhs.simd_into(self.simd))
6675 }
6676}
6677impl<S: Simd> crate::SimdBase<i16, S> for mask16x32<S> {
6678 const N: usize = 32;
6679 type Mask = mask16x32<S>;
6680 type Block = mask16x8<S>;
6681 #[inline(always)]
6682 fn as_slice(&self) -> &[i16] {
6683 &self.val
6684 }
6685 #[inline(always)]
6686 fn as_mut_slice(&mut self) -> &mut [i16] {
6687 &mut self.val
6688 }
6689 #[inline(always)]
6690 fn from_slice(simd: S, slice: &[i16]) -> Self {
6691 let mut val = [0; 32];
6692 val.copy_from_slice(slice);
6693 Self { val, simd }
6694 }
6695 #[inline(always)]
6696 fn splat(simd: S, val: i16) -> Self {
6697 simd.splat_mask16x32(val)
6698 }
6699 #[inline(always)]
6700 fn block_splat(block: Self::Block) -> Self {
6701 let block2 = block.combine(block);
6702 block2.combine(block2)
6703 }
6704}
6705impl<S: Simd> crate::SimdMask<i16, S> for mask16x32<S> {
6706 #[inline(always)]
6707 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6708 self.simd.simd_eq_mask16x32(self, rhs.simd_into(self.simd))
6709 }
6710}
6711#[derive(Clone, Copy, Debug)]
6712#[repr(C, align(64))]
6713pub struct i32x16<S: Simd> {
6714 pub val: [i32; 16],
6715 pub simd: S,
6716}
6717impl<S: Simd> SimdFrom<[i32; 16], S> for i32x16<S> {
6718 #[inline(always)]
6719 fn simd_from(val: [i32; 16], simd: S) -> Self {
6720 Self {
6721 val: [
6722 val[0usize],
6723 val[1usize],
6724 val[2usize],
6725 val[3usize],
6726 val[4usize],
6727 val[5usize],
6728 val[6usize],
6729 val[7usize],
6730 val[8usize],
6731 val[9usize],
6732 val[10usize],
6733 val[11usize],
6734 val[12usize],
6735 val[13usize],
6736 val[14usize],
6737 val[15usize],
6738 ],
6739 simd,
6740 }
6741 }
6742}
6743impl<S: Simd> From<i32x16<S>> for [i32; 16] {
6744 #[inline(always)]
6745 fn from(value: i32x16<S>) -> Self {
6746 value.val
6747 }
6748}
6749impl<S: Simd> core::ops::Deref for i32x16<S> {
6750 type Target = [i32; 16];
6751 #[inline(always)]
6752 fn deref(&self) -> &Self::Target {
6753 &self.val
6754 }
6755}
6756impl<S: Simd> core::ops::DerefMut for i32x16<S> {
6757 #[inline(always)]
6758 fn deref_mut(&mut self) -> &mut Self::Target {
6759 &mut self.val
6760 }
6761}
6762impl<S: Simd> SimdFrom<i32, S> for i32x16<S> {
6763 #[inline(always)]
6764 fn simd_from(value: i32, simd: S) -> Self {
6765 simd.splat_i32x16(value)
6766 }
6767}
6768impl<S: Simd> Select<i32x16<S>> for mask32x16<S> {
6769 #[inline(always)]
6770 fn select(self, if_true: i32x16<S>, if_false: i32x16<S>) -> i32x16<S> {
6771 self.simd.select_i32x16(self, if_true, if_false)
6772 }
6773}
6774impl<S: Simd> Bytes for i32x16<S> {
6775 type Bytes = u8x64<S>;
6776 #[inline(always)]
6777 fn to_bytes(self) -> Self::Bytes {
6778 unsafe {
6779 u8x64 {
6780 val: core::mem::transmute(self.val),
6781 simd: self.simd,
6782 }
6783 }
6784 }
6785 #[inline(always)]
6786 fn from_bytes(value: Self::Bytes) -> Self {
6787 unsafe {
6788 Self {
6789 val: core::mem::transmute(value.val),
6790 simd: value.simd,
6791 }
6792 }
6793 }
6794}
6795impl<S: Simd> i32x16<S> {
6796 #[inline(always)]
6797 pub fn not(self) -> i32x16<S> {
6798 self.simd.not_i32x16(self)
6799 }
6800 #[inline(always)]
6801 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6802 self.simd.add_i32x16(self, rhs.simd_into(self.simd))
6803 }
6804 #[inline(always)]
6805 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6806 self.simd.sub_i32x16(self, rhs.simd_into(self.simd))
6807 }
6808 #[inline(always)]
6809 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6810 self.simd.mul_i32x16(self, rhs.simd_into(self.simd))
6811 }
6812 #[inline(always)]
6813 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6814 self.simd.and_i32x16(self, rhs.simd_into(self.simd))
6815 }
6816 #[inline(always)]
6817 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6818 self.simd.or_i32x16(self, rhs.simd_into(self.simd))
6819 }
6820 #[inline(always)]
6821 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6822 self.simd.xor_i32x16(self, rhs.simd_into(self.simd))
6823 }
6824 #[inline(always)]
6825 pub fn shr(self, shift: u32) -> i32x16<S> {
6826 self.simd.shr_i32x16(self, shift)
6827 }
6828 #[inline(always)]
6829 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6830 self.simd.simd_eq_i32x16(self, rhs.simd_into(self.simd))
6831 }
6832 #[inline(always)]
6833 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6834 self.simd.simd_lt_i32x16(self, rhs.simd_into(self.simd))
6835 }
6836 #[inline(always)]
6837 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6838 self.simd.simd_le_i32x16(self, rhs.simd_into(self.simd))
6839 }
6840 #[inline(always)]
6841 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6842 self.simd.simd_ge_i32x16(self, rhs.simd_into(self.simd))
6843 }
6844 #[inline(always)]
6845 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6846 self.simd.simd_gt_i32x16(self, rhs.simd_into(self.simd))
6847 }
6848 #[inline(always)]
6849 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6850 self.simd.min_i32x16(self, rhs.simd_into(self.simd))
6851 }
6852 #[inline(always)]
6853 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6854 self.simd.max_i32x16(self, rhs.simd_into(self.simd))
6855 }
6856 #[inline(always)]
6857 pub fn reinterpret_u8(self) -> u8x64<S> {
6858 self.simd.reinterpret_u8_i32x16(self)
6859 }
6860 #[inline(always)]
6861 pub fn reinterpret_u32(self) -> u32x16<S> {
6862 self.simd.reinterpret_u32_i32x16(self)
6863 }
6864 #[inline(always)]
6865 pub fn cvt_f32(self) -> f32x16<S> {
6866 self.simd.cvt_f32_i32x16(self)
6867 }
6868}
6869impl<S: Simd> crate::SimdBase<i32, S> for i32x16<S> {
6870 const N: usize = 16;
6871 type Mask = mask32x16<S>;
6872 type Block = i32x4<S>;
6873 #[inline(always)]
6874 fn as_slice(&self) -> &[i32] {
6875 &self.val
6876 }
6877 #[inline(always)]
6878 fn as_mut_slice(&mut self) -> &mut [i32] {
6879 &mut self.val
6880 }
6881 #[inline(always)]
6882 fn from_slice(simd: S, slice: &[i32]) -> Self {
6883 let mut val = [0; 16];
6884 val.copy_from_slice(slice);
6885 Self { val, simd }
6886 }
6887 #[inline(always)]
6888 fn splat(simd: S, val: i32) -> Self {
6889 simd.splat_i32x16(val)
6890 }
6891 #[inline(always)]
6892 fn block_splat(block: Self::Block) -> Self {
6893 let block2 = block.combine(block);
6894 block2.combine(block2)
6895 }
6896}
6897impl<S: Simd> crate::SimdInt<i32, S> for i32x16<S> {
6898 #[inline(always)]
6899 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6900 self.simd.simd_eq_i32x16(self, rhs.simd_into(self.simd))
6901 }
6902 #[inline(always)]
6903 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6904 self.simd.simd_lt_i32x16(self, rhs.simd_into(self.simd))
6905 }
6906 #[inline(always)]
6907 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6908 self.simd.simd_le_i32x16(self, rhs.simd_into(self.simd))
6909 }
6910 #[inline(always)]
6911 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6912 self.simd.simd_ge_i32x16(self, rhs.simd_into(self.simd))
6913 }
6914 #[inline(always)]
6915 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
6916 self.simd.simd_gt_i32x16(self, rhs.simd_into(self.simd))
6917 }
6918 #[inline(always)]
6919 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6920 self.simd.zip_low_i32x16(self, rhs.simd_into(self.simd))
6921 }
6922 #[inline(always)]
6923 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6924 self.simd.zip_high_i32x16(self, rhs.simd_into(self.simd))
6925 }
6926 #[inline(always)]
6927 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6928 self.simd.unzip_low_i32x16(self, rhs.simd_into(self.simd))
6929 }
6930 #[inline(always)]
6931 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6932 self.simd.unzip_high_i32x16(self, rhs.simd_into(self.simd))
6933 }
6934 #[inline(always)]
6935 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6936 self.simd.min_i32x16(self, rhs.simd_into(self.simd))
6937 }
6938 #[inline(always)]
6939 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
6940 self.simd.max_i32x16(self, rhs.simd_into(self.simd))
6941 }
6942}
6943impl<S: Simd> SimdCvtTruncate<f32x16<S>> for i32x16<S> {
6944 fn truncate_from(x: f32x16<S>) -> Self {
6945 x.simd.cvt_i32_f32x16(x)
6946 }
6947}
6948#[derive(Clone, Copy, Debug)]
6949#[repr(C, align(64))]
6950pub struct u32x16<S: Simd> {
6951 pub val: [u32; 16],
6952 pub simd: S,
6953}
6954impl<S: Simd> SimdFrom<[u32; 16], S> for u32x16<S> {
6955 #[inline(always)]
6956 fn simd_from(val: [u32; 16], simd: S) -> Self {
6957 Self {
6958 val: [
6959 val[0usize],
6960 val[1usize],
6961 val[2usize],
6962 val[3usize],
6963 val[4usize],
6964 val[5usize],
6965 val[6usize],
6966 val[7usize],
6967 val[8usize],
6968 val[9usize],
6969 val[10usize],
6970 val[11usize],
6971 val[12usize],
6972 val[13usize],
6973 val[14usize],
6974 val[15usize],
6975 ],
6976 simd,
6977 }
6978 }
6979}
6980impl<S: Simd> From<u32x16<S>> for [u32; 16] {
6981 #[inline(always)]
6982 fn from(value: u32x16<S>) -> Self {
6983 value.val
6984 }
6985}
6986impl<S: Simd> core::ops::Deref for u32x16<S> {
6987 type Target = [u32; 16];
6988 #[inline(always)]
6989 fn deref(&self) -> &Self::Target {
6990 &self.val
6991 }
6992}
6993impl<S: Simd> core::ops::DerefMut for u32x16<S> {
6994 #[inline(always)]
6995 fn deref_mut(&mut self) -> &mut Self::Target {
6996 &mut self.val
6997 }
6998}
6999impl<S: Simd> SimdFrom<u32, S> for u32x16<S> {
7000 #[inline(always)]
7001 fn simd_from(value: u32, simd: S) -> Self {
7002 simd.splat_u32x16(value)
7003 }
7004}
7005impl<S: Simd> Select<u32x16<S>> for mask32x16<S> {
7006 #[inline(always)]
7007 fn select(self, if_true: u32x16<S>, if_false: u32x16<S>) -> u32x16<S> {
7008 self.simd.select_u32x16(self, if_true, if_false)
7009 }
7010}
7011impl<S: Simd> Bytes for u32x16<S> {
7012 type Bytes = u8x64<S>;
7013 #[inline(always)]
7014 fn to_bytes(self) -> Self::Bytes {
7015 unsafe {
7016 u8x64 {
7017 val: core::mem::transmute(self.val),
7018 simd: self.simd,
7019 }
7020 }
7021 }
7022 #[inline(always)]
7023 fn from_bytes(value: Self::Bytes) -> Self {
7024 unsafe {
7025 Self {
7026 val: core::mem::transmute(value.val),
7027 simd: value.simd,
7028 }
7029 }
7030 }
7031}
7032impl<S: Simd> u32x16<S> {
7033 #[inline(always)]
7034 pub fn not(self) -> u32x16<S> {
7035 self.simd.not_u32x16(self)
7036 }
7037 #[inline(always)]
7038 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7039 self.simd.add_u32x16(self, rhs.simd_into(self.simd))
7040 }
7041 #[inline(always)]
7042 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7043 self.simd.sub_u32x16(self, rhs.simd_into(self.simd))
7044 }
7045 #[inline(always)]
7046 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7047 self.simd.mul_u32x16(self, rhs.simd_into(self.simd))
7048 }
7049 #[inline(always)]
7050 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7051 self.simd.and_u32x16(self, rhs.simd_into(self.simd))
7052 }
7053 #[inline(always)]
7054 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7055 self.simd.or_u32x16(self, rhs.simd_into(self.simd))
7056 }
7057 #[inline(always)]
7058 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7059 self.simd.xor_u32x16(self, rhs.simd_into(self.simd))
7060 }
7061 #[inline(always)]
7062 pub fn shr(self, shift: u32) -> u32x16<S> {
7063 self.simd.shr_u32x16(self, shift)
7064 }
7065 #[inline(always)]
7066 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7067 self.simd.simd_eq_u32x16(self, rhs.simd_into(self.simd))
7068 }
7069 #[inline(always)]
7070 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7071 self.simd.simd_lt_u32x16(self, rhs.simd_into(self.simd))
7072 }
7073 #[inline(always)]
7074 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7075 self.simd.simd_le_u32x16(self, rhs.simd_into(self.simd))
7076 }
7077 #[inline(always)]
7078 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7079 self.simd.simd_ge_u32x16(self, rhs.simd_into(self.simd))
7080 }
7081 #[inline(always)]
7082 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7083 self.simd.simd_gt_u32x16(self, rhs.simd_into(self.simd))
7084 }
7085 #[inline(always)]
7086 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7087 self.simd.min_u32x16(self, rhs.simd_into(self.simd))
7088 }
7089 #[inline(always)]
7090 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7091 self.simd.max_u32x16(self, rhs.simd_into(self.simd))
7092 }
7093 #[inline(always)]
7094 pub fn reinterpret_u8(self) -> u8x64<S> {
7095 self.simd.reinterpret_u8_u32x16(self)
7096 }
7097 #[inline(always)]
7098 pub fn cvt_f32(self) -> f32x16<S> {
7099 self.simd.cvt_f32_u32x16(self)
7100 }
7101}
7102impl<S: Simd> crate::SimdBase<u32, S> for u32x16<S> {
7103 const N: usize = 16;
7104 type Mask = mask32x16<S>;
7105 type Block = u32x4<S>;
7106 #[inline(always)]
7107 fn as_slice(&self) -> &[u32] {
7108 &self.val
7109 }
7110 #[inline(always)]
7111 fn as_mut_slice(&mut self) -> &mut [u32] {
7112 &mut self.val
7113 }
7114 #[inline(always)]
7115 fn from_slice(simd: S, slice: &[u32]) -> Self {
7116 let mut val = [0; 16];
7117 val.copy_from_slice(slice);
7118 Self { val, simd }
7119 }
7120 #[inline(always)]
7121 fn splat(simd: S, val: u32) -> Self {
7122 simd.splat_u32x16(val)
7123 }
7124 #[inline(always)]
7125 fn block_splat(block: Self::Block) -> Self {
7126 let block2 = block.combine(block);
7127 block2.combine(block2)
7128 }
7129}
7130impl<S: Simd> crate::SimdInt<u32, S> for u32x16<S> {
7131 #[inline(always)]
7132 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7133 self.simd.simd_eq_u32x16(self, rhs.simd_into(self.simd))
7134 }
7135 #[inline(always)]
7136 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7137 self.simd.simd_lt_u32x16(self, rhs.simd_into(self.simd))
7138 }
7139 #[inline(always)]
7140 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7141 self.simd.simd_le_u32x16(self, rhs.simd_into(self.simd))
7142 }
7143 #[inline(always)]
7144 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7145 self.simd.simd_ge_u32x16(self, rhs.simd_into(self.simd))
7146 }
7147 #[inline(always)]
7148 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7149 self.simd.simd_gt_u32x16(self, rhs.simd_into(self.simd))
7150 }
7151 #[inline(always)]
7152 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7153 self.simd.zip_low_u32x16(self, rhs.simd_into(self.simd))
7154 }
7155 #[inline(always)]
7156 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7157 self.simd.zip_high_u32x16(self, rhs.simd_into(self.simd))
7158 }
7159 #[inline(always)]
7160 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7161 self.simd.unzip_low_u32x16(self, rhs.simd_into(self.simd))
7162 }
7163 #[inline(always)]
7164 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7165 self.simd.unzip_high_u32x16(self, rhs.simd_into(self.simd))
7166 }
7167 #[inline(always)]
7168 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7169 self.simd.min_u32x16(self, rhs.simd_into(self.simd))
7170 }
7171 #[inline(always)]
7172 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7173 self.simd.max_u32x16(self, rhs.simd_into(self.simd))
7174 }
7175}
7176impl<S: Simd> SimdCvtTruncate<f32x16<S>> for u32x16<S> {
7177 fn truncate_from(x: f32x16<S>) -> Self {
7178 x.simd.cvt_u32_f32x16(x)
7179 }
7180}
7181#[derive(Clone, Copy, Debug)]
7182#[repr(C, align(64))]
7183pub struct mask32x16<S: Simd> {
7184 pub val: [i32; 16],
7185 pub simd: S,
7186}
7187impl<S: Simd> SimdFrom<[i32; 16], S> for mask32x16<S> {
7188 #[inline(always)]
7189 fn simd_from(val: [i32; 16], simd: S) -> Self {
7190 Self {
7191 val: [
7192 val[0usize],
7193 val[1usize],
7194 val[2usize],
7195 val[3usize],
7196 val[4usize],
7197 val[5usize],
7198 val[6usize],
7199 val[7usize],
7200 val[8usize],
7201 val[9usize],
7202 val[10usize],
7203 val[11usize],
7204 val[12usize],
7205 val[13usize],
7206 val[14usize],
7207 val[15usize],
7208 ],
7209 simd,
7210 }
7211 }
7212}
7213impl<S: Simd> From<mask32x16<S>> for [i32; 16] {
7214 #[inline(always)]
7215 fn from(value: mask32x16<S>) -> Self {
7216 value.val
7217 }
7218}
7219impl<S: Simd> core::ops::Deref for mask32x16<S> {
7220 type Target = [i32; 16];
7221 #[inline(always)]
7222 fn deref(&self) -> &Self::Target {
7223 &self.val
7224 }
7225}
7226impl<S: Simd> core::ops::DerefMut for mask32x16<S> {
7227 #[inline(always)]
7228 fn deref_mut(&mut self) -> &mut Self::Target {
7229 &mut self.val
7230 }
7231}
7232impl<S: Simd> SimdFrom<i32, S> for mask32x16<S> {
7233 #[inline(always)]
7234 fn simd_from(value: i32, simd: S) -> Self {
7235 simd.splat_mask32x16(value)
7236 }
7237}
7238impl<S: Simd> Select<mask32x16<S>> for mask32x16<S> {
7239 #[inline(always)]
7240 fn select(self, if_true: mask32x16<S>, if_false: mask32x16<S>) -> mask32x16<S> {
7241 self.simd.select_mask32x16(self, if_true, if_false)
7242 }
7243}
7244impl<S: Simd> Bytes for mask32x16<S> {
7245 type Bytes = u8x64<S>;
7246 #[inline(always)]
7247 fn to_bytes(self) -> Self::Bytes {
7248 unsafe {
7249 u8x64 {
7250 val: core::mem::transmute(self.val),
7251 simd: self.simd,
7252 }
7253 }
7254 }
7255 #[inline(always)]
7256 fn from_bytes(value: Self::Bytes) -> Self {
7257 unsafe {
7258 Self {
7259 val: core::mem::transmute(value.val),
7260 simd: value.simd,
7261 }
7262 }
7263 }
7264}
7265impl<S: Simd> mask32x16<S> {
7266 #[inline(always)]
7267 pub fn not(self) -> mask32x16<S> {
7268 self.simd.not_mask32x16(self)
7269 }
7270 #[inline(always)]
7271 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7272 self.simd.and_mask32x16(self, rhs.simd_into(self.simd))
7273 }
7274 #[inline(always)]
7275 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7276 self.simd.or_mask32x16(self, rhs.simd_into(self.simd))
7277 }
7278 #[inline(always)]
7279 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7280 self.simd.xor_mask32x16(self, rhs.simd_into(self.simd))
7281 }
7282 #[inline(always)]
7283 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7284 self.simd.simd_eq_mask32x16(self, rhs.simd_into(self.simd))
7285 }
7286}
7287impl<S: Simd> crate::SimdBase<i32, S> for mask32x16<S> {
7288 const N: usize = 16;
7289 type Mask = mask32x16<S>;
7290 type Block = mask32x4<S>;
7291 #[inline(always)]
7292 fn as_slice(&self) -> &[i32] {
7293 &self.val
7294 }
7295 #[inline(always)]
7296 fn as_mut_slice(&mut self) -> &mut [i32] {
7297 &mut self.val
7298 }
7299 #[inline(always)]
7300 fn from_slice(simd: S, slice: &[i32]) -> Self {
7301 let mut val = [0; 16];
7302 val.copy_from_slice(slice);
7303 Self { val, simd }
7304 }
7305 #[inline(always)]
7306 fn splat(simd: S, val: i32) -> Self {
7307 simd.splat_mask32x16(val)
7308 }
7309 #[inline(always)]
7310 fn block_splat(block: Self::Block) -> Self {
7311 let block2 = block.combine(block);
7312 block2.combine(block2)
7313 }
7314}
7315impl<S: Simd> crate::SimdMask<i32, S> for mask32x16<S> {
7316 #[inline(always)]
7317 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7318 self.simd.simd_eq_mask32x16(self, rhs.simd_into(self.simd))
7319 }
7320}
7321#[derive(Clone, Copy, Debug)]
7322#[repr(C, align(64))]
7323pub struct f64x8<S: Simd> {
7324 pub val: [f64; 8],
7325 pub simd: S,
7326}
7327impl<S: Simd> SimdFrom<[f64; 8], S> for f64x8<S> {
7328 #[inline(always)]
7329 fn simd_from(val: [f64; 8], simd: S) -> Self {
7330 Self {
7331 val: [
7332 val[0usize],
7333 val[1usize],
7334 val[2usize],
7335 val[3usize],
7336 val[4usize],
7337 val[5usize],
7338 val[6usize],
7339 val[7usize],
7340 ],
7341 simd,
7342 }
7343 }
7344}
7345impl<S: Simd> From<f64x8<S>> for [f64; 8] {
7346 #[inline(always)]
7347 fn from(value: f64x8<S>) -> Self {
7348 value.val
7349 }
7350}
7351impl<S: Simd> core::ops::Deref for f64x8<S> {
7352 type Target = [f64; 8];
7353 #[inline(always)]
7354 fn deref(&self) -> &Self::Target {
7355 &self.val
7356 }
7357}
7358impl<S: Simd> core::ops::DerefMut for f64x8<S> {
7359 #[inline(always)]
7360 fn deref_mut(&mut self) -> &mut Self::Target {
7361 &mut self.val
7362 }
7363}
7364impl<S: Simd> SimdFrom<f64, S> for f64x8<S> {
7365 #[inline(always)]
7366 fn simd_from(value: f64, simd: S) -> Self {
7367 simd.splat_f64x8(value)
7368 }
7369}
7370impl<S: Simd> Select<f64x8<S>> for mask64x8<S> {
7371 #[inline(always)]
7372 fn select(self, if_true: f64x8<S>, if_false: f64x8<S>) -> f64x8<S> {
7373 self.simd.select_f64x8(self, if_true, if_false)
7374 }
7375}
7376impl<S: Simd> Bytes for f64x8<S> {
7377 type Bytes = u8x64<S>;
7378 #[inline(always)]
7379 fn to_bytes(self) -> Self::Bytes {
7380 unsafe {
7381 u8x64 {
7382 val: core::mem::transmute(self.val),
7383 simd: self.simd,
7384 }
7385 }
7386 }
7387 #[inline(always)]
7388 fn from_bytes(value: Self::Bytes) -> Self {
7389 unsafe {
7390 Self {
7391 val: core::mem::transmute(value.val),
7392 simd: value.simd,
7393 }
7394 }
7395 }
7396}
7397impl<S: Simd> f64x8<S> {
7398 #[inline(always)]
7399 pub fn abs(self) -> f64x8<S> {
7400 self.simd.abs_f64x8(self)
7401 }
7402 #[inline(always)]
7403 pub fn neg(self) -> f64x8<S> {
7404 self.simd.neg_f64x8(self)
7405 }
7406 #[inline(always)]
7407 pub fn sqrt(self) -> f64x8<S> {
7408 self.simd.sqrt_f64x8(self)
7409 }
7410 #[inline(always)]
7411 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7412 self.simd.add_f64x8(self, rhs.simd_into(self.simd))
7413 }
7414 #[inline(always)]
7415 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7416 self.simd.sub_f64x8(self, rhs.simd_into(self.simd))
7417 }
7418 #[inline(always)]
7419 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7420 self.simd.mul_f64x8(self, rhs.simd_into(self.simd))
7421 }
7422 #[inline(always)]
7423 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7424 self.simd.div_f64x8(self, rhs.simd_into(self.simd))
7425 }
7426 #[inline(always)]
7427 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7428 self.simd.copysign_f64x8(self, rhs.simd_into(self.simd))
7429 }
7430 #[inline(always)]
7431 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7432 self.simd.simd_eq_f64x8(self, rhs.simd_into(self.simd))
7433 }
7434 #[inline(always)]
7435 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7436 self.simd.simd_lt_f64x8(self, rhs.simd_into(self.simd))
7437 }
7438 #[inline(always)]
7439 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7440 self.simd.simd_le_f64x8(self, rhs.simd_into(self.simd))
7441 }
7442 #[inline(always)]
7443 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7444 self.simd.simd_ge_f64x8(self, rhs.simd_into(self.simd))
7445 }
7446 #[inline(always)]
7447 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7448 self.simd.simd_gt_f64x8(self, rhs.simd_into(self.simd))
7449 }
7450 #[inline(always)]
7451 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7452 self.simd.max_f64x8(self, rhs.simd_into(self.simd))
7453 }
7454 #[inline(always)]
7455 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7456 self.simd.max_precise_f64x8(self, rhs.simd_into(self.simd))
7457 }
7458 #[inline(always)]
7459 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7460 self.simd.min_f64x8(self, rhs.simd_into(self.simd))
7461 }
7462 #[inline(always)]
7463 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7464 self.simd.min_precise_f64x8(self, rhs.simd_into(self.simd))
7465 }
7466 #[inline(always)]
7467 pub fn floor(self) -> f64x8<S> {
7468 self.simd.floor_f64x8(self)
7469 }
7470 #[inline(always)]
7471 pub fn fract(self) -> f64x8<S> {
7472 self.simd.fract_f64x8(self)
7473 }
7474 #[inline(always)]
7475 pub fn trunc(self) -> f64x8<S> {
7476 self.simd.trunc_f64x8(self)
7477 }
7478 #[inline(always)]
7479 pub fn reinterpret_f32(self) -> f32x16<S> {
7480 self.simd.reinterpret_f32_f64x8(self)
7481 }
7482}
7483impl<S: Simd> crate::SimdBase<f64, S> for f64x8<S> {
7484 const N: usize = 8;
7485 type Mask = mask64x8<S>;
7486 type Block = f64x2<S>;
7487 #[inline(always)]
7488 fn as_slice(&self) -> &[f64] {
7489 &self.val
7490 }
7491 #[inline(always)]
7492 fn as_mut_slice(&mut self) -> &mut [f64] {
7493 &mut self.val
7494 }
7495 #[inline(always)]
7496 fn from_slice(simd: S, slice: &[f64]) -> Self {
7497 let mut val = [0.0; 8];
7498 val.copy_from_slice(slice);
7499 Self { val, simd }
7500 }
7501 #[inline(always)]
7502 fn splat(simd: S, val: f64) -> Self {
7503 simd.splat_f64x8(val)
7504 }
7505 #[inline(always)]
7506 fn block_splat(block: Self::Block) -> Self {
7507 let block2 = block.combine(block);
7508 block2.combine(block2)
7509 }
7510}
7511impl<S: Simd> crate::SimdFloat<f64, S> for f64x8<S> {
7512 #[inline(always)]
7513 fn abs(self) -> f64x8<S> {
7514 self.simd.abs_f64x8(self)
7515 }
7516 #[inline(always)]
7517 fn sqrt(self) -> f64x8<S> {
7518 self.simd.sqrt_f64x8(self)
7519 }
7520 #[inline(always)]
7521 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7522 self.simd.copysign_f64x8(self, rhs.simd_into(self.simd))
7523 }
7524 #[inline(always)]
7525 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7526 self.simd.simd_eq_f64x8(self, rhs.simd_into(self.simd))
7527 }
7528 #[inline(always)]
7529 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7530 self.simd.simd_lt_f64x8(self, rhs.simd_into(self.simd))
7531 }
7532 #[inline(always)]
7533 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7534 self.simd.simd_le_f64x8(self, rhs.simd_into(self.simd))
7535 }
7536 #[inline(always)]
7537 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7538 self.simd.simd_ge_f64x8(self, rhs.simd_into(self.simd))
7539 }
7540 #[inline(always)]
7541 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7542 self.simd.simd_gt_f64x8(self, rhs.simd_into(self.simd))
7543 }
7544 #[inline(always)]
7545 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7546 self.simd.zip_low_f64x8(self, rhs.simd_into(self.simd))
7547 }
7548 #[inline(always)]
7549 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7550 self.simd.zip_high_f64x8(self, rhs.simd_into(self.simd))
7551 }
7552 #[inline(always)]
7553 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7554 self.simd.unzip_low_f64x8(self, rhs.simd_into(self.simd))
7555 }
7556 #[inline(always)]
7557 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7558 self.simd.unzip_high_f64x8(self, rhs.simd_into(self.simd))
7559 }
7560 #[inline(always)]
7561 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7562 self.simd.max_f64x8(self, rhs.simd_into(self.simd))
7563 }
7564 #[inline(always)]
7565 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7566 self.simd.max_precise_f64x8(self, rhs.simd_into(self.simd))
7567 }
7568 #[inline(always)]
7569 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7570 self.simd.min_f64x8(self, rhs.simd_into(self.simd))
7571 }
7572 #[inline(always)]
7573 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7574 self.simd.min_precise_f64x8(self, rhs.simd_into(self.simd))
7575 }
7576 #[inline(always)]
7577 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x8<S> {
7578 self.simd
7579 .madd_f64x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
7580 }
7581 #[inline(always)]
7582 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x8<S> {
7583 self.simd
7584 .msub_f64x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
7585 }
7586 #[inline(always)]
7587 fn floor(self) -> f64x8<S> {
7588 self.simd.floor_f64x8(self)
7589 }
7590 #[inline(always)]
7591 fn fract(self) -> f64x8<S> {
7592 self.simd.fract_f64x8(self)
7593 }
7594 #[inline(always)]
7595 fn trunc(self) -> f64x8<S> {
7596 self.simd.trunc_f64x8(self)
7597 }
7598}
7599#[derive(Clone, Copy, Debug)]
7600#[repr(C, align(64))]
7601pub struct mask64x8<S: Simd> {
7602 pub val: [i64; 8],
7603 pub simd: S,
7604}
7605impl<S: Simd> SimdFrom<[i64; 8], S> for mask64x8<S> {
7606 #[inline(always)]
7607 fn simd_from(val: [i64; 8], simd: S) -> Self {
7608 Self {
7609 val: [
7610 val[0usize],
7611 val[1usize],
7612 val[2usize],
7613 val[3usize],
7614 val[4usize],
7615 val[5usize],
7616 val[6usize],
7617 val[7usize],
7618 ],
7619 simd,
7620 }
7621 }
7622}
7623impl<S: Simd> From<mask64x8<S>> for [i64; 8] {
7624 #[inline(always)]
7625 fn from(value: mask64x8<S>) -> Self {
7626 value.val
7627 }
7628}
7629impl<S: Simd> core::ops::Deref for mask64x8<S> {
7630 type Target = [i64; 8];
7631 #[inline(always)]
7632 fn deref(&self) -> &Self::Target {
7633 &self.val
7634 }
7635}
7636impl<S: Simd> core::ops::DerefMut for mask64x8<S> {
7637 #[inline(always)]
7638 fn deref_mut(&mut self) -> &mut Self::Target {
7639 &mut self.val
7640 }
7641}
7642impl<S: Simd> SimdFrom<i64, S> for mask64x8<S> {
7643 #[inline(always)]
7644 fn simd_from(value: i64, simd: S) -> Self {
7645 simd.splat_mask64x8(value)
7646 }
7647}
7648impl<S: Simd> Select<mask64x8<S>> for mask64x8<S> {
7649 #[inline(always)]
7650 fn select(self, if_true: mask64x8<S>, if_false: mask64x8<S>) -> mask64x8<S> {
7651 self.simd.select_mask64x8(self, if_true, if_false)
7652 }
7653}
7654impl<S: Simd> Bytes for mask64x8<S> {
7655 type Bytes = u8x64<S>;
7656 #[inline(always)]
7657 fn to_bytes(self) -> Self::Bytes {
7658 unsafe {
7659 u8x64 {
7660 val: core::mem::transmute(self.val),
7661 simd: self.simd,
7662 }
7663 }
7664 }
7665 #[inline(always)]
7666 fn from_bytes(value: Self::Bytes) -> Self {
7667 unsafe {
7668 Self {
7669 val: core::mem::transmute(value.val),
7670 simd: value.simd,
7671 }
7672 }
7673 }
7674}
7675impl<S: Simd> mask64x8<S> {
7676 #[inline(always)]
7677 pub fn not(self) -> mask64x8<S> {
7678 self.simd.not_mask64x8(self)
7679 }
7680 #[inline(always)]
7681 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7682 self.simd.and_mask64x8(self, rhs.simd_into(self.simd))
7683 }
7684 #[inline(always)]
7685 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7686 self.simd.or_mask64x8(self, rhs.simd_into(self.simd))
7687 }
7688 #[inline(always)]
7689 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7690 self.simd.xor_mask64x8(self, rhs.simd_into(self.simd))
7691 }
7692 #[inline(always)]
7693 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7694 self.simd.simd_eq_mask64x8(self, rhs.simd_into(self.simd))
7695 }
7696}
7697impl<S: Simd> crate::SimdBase<i64, S> for mask64x8<S> {
7698 const N: usize = 8;
7699 type Mask = mask64x8<S>;
7700 type Block = mask64x2<S>;
7701 #[inline(always)]
7702 fn as_slice(&self) -> &[i64] {
7703 &self.val
7704 }
7705 #[inline(always)]
7706 fn as_mut_slice(&mut self) -> &mut [i64] {
7707 &mut self.val
7708 }
7709 #[inline(always)]
7710 fn from_slice(simd: S, slice: &[i64]) -> Self {
7711 let mut val = [0; 8];
7712 val.copy_from_slice(slice);
7713 Self { val, simd }
7714 }
7715 #[inline(always)]
7716 fn splat(simd: S, val: i64) -> Self {
7717 simd.splat_mask64x8(val)
7718 }
7719 #[inline(always)]
7720 fn block_splat(block: Self::Block) -> Self {
7721 let block2 = block.combine(block);
7722 block2.combine(block2)
7723 }
7724}
7725impl<S: Simd> crate::SimdMask<i64, S> for mask64x8<S> {
7726 #[inline(always)]
7727 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7728 self.simd.simd_eq_mask64x8(self, rhs.simd_into(self.simd))
7729 }
7730}