1use crate::{Bytes, Select, Simd, SimdCvtFloat, SimdCvtTruncate, SimdFrom, SimdInto};
7#[derive(Clone, Copy, Debug)]
8#[repr(C, align(16))]
9pub struct f32x4<S: Simd> {
10 pub val: [f32; 4],
11 pub simd: S,
12}
13impl<S: Simd> SimdFrom<[f32; 4], S> for f32x4<S> {
14 #[inline(always)]
15 fn simd_from(val: [f32; 4], simd: S) -> Self {
16 Self {
17 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
18 simd,
19 }
20 }
21}
22impl<S: Simd> From<f32x4<S>> for [f32; 4] {
23 #[inline(always)]
24 fn from(value: f32x4<S>) -> Self {
25 value.val
26 }
27}
28impl<S: Simd> core::ops::Deref for f32x4<S> {
29 type Target = [f32; 4];
30 #[inline(always)]
31 fn deref(&self) -> &Self::Target {
32 &self.val
33 }
34}
35impl<S: Simd> core::ops::DerefMut for f32x4<S> {
36 #[inline(always)]
37 fn deref_mut(&mut self) -> &mut Self::Target {
38 &mut self.val
39 }
40}
41impl<S: Simd> SimdFrom<f32, S> for f32x4<S> {
42 #[inline(always)]
43 fn simd_from(value: f32, simd: S) -> Self {
44 simd.splat_f32x4(value)
45 }
46}
47impl<S: Simd> Select<f32x4<S>> for mask32x4<S> {
48 #[inline(always)]
49 fn select(self, if_true: f32x4<S>, if_false: f32x4<S>) -> f32x4<S> {
50 self.simd.select_f32x4(self, if_true, if_false)
51 }
52}
53impl<S: Simd> Bytes for f32x4<S> {
54 type Bytes = u8x16<S>;
55 #[inline(always)]
56 fn to_bytes(self) -> Self::Bytes {
57 unsafe {
58 u8x16 {
59 val: core::mem::transmute(self.val),
60 simd: self.simd,
61 }
62 }
63 }
64 #[inline(always)]
65 fn from_bytes(value: Self::Bytes) -> Self {
66 unsafe {
67 Self {
68 val: core::mem::transmute(value.val),
69 simd: value.simd,
70 }
71 }
72 }
73}
74impl<S: Simd> f32x4<S> {
75 #[inline(always)]
76 pub fn abs(self) -> f32x4<S> {
77 self.simd.abs_f32x4(self)
78 }
79 #[inline(always)]
80 pub fn neg(self) -> f32x4<S> {
81 self.simd.neg_f32x4(self)
82 }
83 #[inline(always)]
84 pub fn sqrt(self) -> f32x4<S> {
85 self.simd.sqrt_f32x4(self)
86 }
87 #[inline(always)]
88 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
89 self.simd.add_f32x4(self, rhs.simd_into(self.simd))
90 }
91 #[inline(always)]
92 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
93 self.simd.sub_f32x4(self, rhs.simd_into(self.simd))
94 }
95 #[inline(always)]
96 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
97 self.simd.mul_f32x4(self, rhs.simd_into(self.simd))
98 }
99 #[inline(always)]
100 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
101 self.simd.div_f32x4(self, rhs.simd_into(self.simd))
102 }
103 #[inline(always)]
104 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
105 self.simd.copysign_f32x4(self, rhs.simd_into(self.simd))
106 }
107 #[inline(always)]
108 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
109 self.simd.simd_eq_f32x4(self, rhs.simd_into(self.simd))
110 }
111 #[inline(always)]
112 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
113 self.simd.simd_lt_f32x4(self, rhs.simd_into(self.simd))
114 }
115 #[inline(always)]
116 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
117 self.simd.simd_le_f32x4(self, rhs.simd_into(self.simd))
118 }
119 #[inline(always)]
120 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
121 self.simd.simd_ge_f32x4(self, rhs.simd_into(self.simd))
122 }
123 #[inline(always)]
124 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
125 self.simd.simd_gt_f32x4(self, rhs.simd_into(self.simd))
126 }
127 #[inline(always)]
128 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
129 self.simd.max_f32x4(self, rhs.simd_into(self.simd))
130 }
131 #[inline(always)]
132 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
133 self.simd.max_precise_f32x4(self, rhs.simd_into(self.simd))
134 }
135 #[inline(always)]
136 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
137 self.simd.min_f32x4(self, rhs.simd_into(self.simd))
138 }
139 #[inline(always)]
140 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
141 self.simd.min_precise_f32x4(self, rhs.simd_into(self.simd))
142 }
143 #[inline(always)]
144 pub fn floor(self) -> f32x4<S> {
145 self.simd.floor_f32x4(self)
146 }
147 #[inline(always)]
148 pub fn fract(self) -> f32x4<S> {
149 self.simd.fract_f32x4(self)
150 }
151 #[inline(always)]
152 pub fn trunc(self) -> f32x4<S> {
153 self.simd.trunc_f32x4(self)
154 }
155 #[inline(always)]
156 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
157 self.simd.combine_f32x4(self, rhs.simd_into(self.simd))
158 }
159 #[inline(always)]
160 pub fn reinterpret_f64(self) -> f64x2<S> {
161 self.simd.reinterpret_f64_f32x4(self)
162 }
163 #[inline(always)]
164 pub fn reinterpret_i32(self) -> i32x4<S> {
165 self.simd.reinterpret_i32_f32x4(self)
166 }
167 #[inline(always)]
168 pub fn reinterpret_u8(self) -> u8x16<S> {
169 self.simd.reinterpret_u8_f32x4(self)
170 }
171 #[inline(always)]
172 pub fn reinterpret_u32(self) -> u32x4<S> {
173 self.simd.reinterpret_u32_f32x4(self)
174 }
175 #[inline(always)]
176 pub fn cvt_u32(self) -> u32x4<S> {
177 self.simd.cvt_u32_f32x4(self)
178 }
179 #[inline(always)]
180 pub fn cvt_i32(self) -> i32x4<S> {
181 self.simd.cvt_i32_f32x4(self)
182 }
183}
184impl<S: Simd> crate::SimdBase<f32, S> for f32x4<S> {
185 const N: usize = 4;
186 type Mask = mask32x4<S>;
187 type Block = f32x4<S>;
188 #[inline(always)]
189 fn witness(&self) -> S {
190 self.simd
191 }
192 #[inline(always)]
193 fn as_slice(&self) -> &[f32] {
194 &self.val
195 }
196 #[inline(always)]
197 fn as_mut_slice(&mut self) -> &mut [f32] {
198 &mut self.val
199 }
200 #[inline(always)]
201 fn from_slice(simd: S, slice: &[f32]) -> Self {
202 let mut val = [0.0; 4];
203 val.copy_from_slice(slice);
204 Self { val, simd }
205 }
206 #[inline(always)]
207 fn splat(simd: S, val: f32) -> Self {
208 simd.splat_f32x4(val)
209 }
210 #[inline(always)]
211 fn block_splat(block: Self::Block) -> Self {
212 block
213 }
214}
215impl<S: Simd> crate::SimdFloat<f32, S> for f32x4<S> {
216 #[inline(always)]
217 fn abs(self) -> f32x4<S> {
218 self.simd.abs_f32x4(self)
219 }
220 #[inline(always)]
221 fn sqrt(self) -> f32x4<S> {
222 self.simd.sqrt_f32x4(self)
223 }
224 #[inline(always)]
225 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
226 self.simd.copysign_f32x4(self, rhs.simd_into(self.simd))
227 }
228 #[inline(always)]
229 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
230 self.simd.simd_eq_f32x4(self, rhs.simd_into(self.simd))
231 }
232 #[inline(always)]
233 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
234 self.simd.simd_lt_f32x4(self, rhs.simd_into(self.simd))
235 }
236 #[inline(always)]
237 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
238 self.simd.simd_le_f32x4(self, rhs.simd_into(self.simd))
239 }
240 #[inline(always)]
241 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
242 self.simd.simd_ge_f32x4(self, rhs.simd_into(self.simd))
243 }
244 #[inline(always)]
245 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
246 self.simd.simd_gt_f32x4(self, rhs.simd_into(self.simd))
247 }
248 #[inline(always)]
249 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
250 self.simd.zip_low_f32x4(self, rhs.simd_into(self.simd))
251 }
252 #[inline(always)]
253 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
254 self.simd.zip_high_f32x4(self, rhs.simd_into(self.simd))
255 }
256 #[inline(always)]
257 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
258 self.simd.unzip_low_f32x4(self, rhs.simd_into(self.simd))
259 }
260 #[inline(always)]
261 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
262 self.simd.unzip_high_f32x4(self, rhs.simd_into(self.simd))
263 }
264 #[inline(always)]
265 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
266 self.simd.max_f32x4(self, rhs.simd_into(self.simd))
267 }
268 #[inline(always)]
269 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
270 self.simd.max_precise_f32x4(self, rhs.simd_into(self.simd))
271 }
272 #[inline(always)]
273 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
274 self.simd.min_f32x4(self, rhs.simd_into(self.simd))
275 }
276 #[inline(always)]
277 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x4<S> {
278 self.simd.min_precise_f32x4(self, rhs.simd_into(self.simd))
279 }
280 #[inline(always)]
281 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x4<S> {
282 self.simd
283 .madd_f32x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
284 }
285 #[inline(always)]
286 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x4<S> {
287 self.simd
288 .msub_f32x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
289 }
290 #[inline(always)]
291 fn floor(self) -> f32x4<S> {
292 self.simd.floor_f32x4(self)
293 }
294 #[inline(always)]
295 fn fract(self) -> f32x4<S> {
296 self.simd.fract_f32x4(self)
297 }
298 #[inline(always)]
299 fn trunc(self) -> f32x4<S> {
300 self.simd.trunc_f32x4(self)
301 }
302}
303impl<S: Simd> SimdCvtFloat<u32x4<S>> for f32x4<S> {
304 fn float_from(x: u32x4<S>) -> Self {
305 x.simd.cvt_f32_u32x4(x)
306 }
307}
308impl<S: Simd> SimdCvtFloat<i32x4<S>> for f32x4<S> {
309 fn float_from(x: i32x4<S>) -> Self {
310 x.simd.cvt_f32_i32x4(x)
311 }
312}
313#[derive(Clone, Copy, Debug)]
314#[repr(C, align(16))]
315pub struct i8x16<S: Simd> {
316 pub val: [i8; 16],
317 pub simd: S,
318}
319impl<S: Simd> SimdFrom<[i8; 16], S> for i8x16<S> {
320 #[inline(always)]
321 fn simd_from(val: [i8; 16], simd: S) -> Self {
322 Self {
323 val: [
324 val[0usize],
325 val[1usize],
326 val[2usize],
327 val[3usize],
328 val[4usize],
329 val[5usize],
330 val[6usize],
331 val[7usize],
332 val[8usize],
333 val[9usize],
334 val[10usize],
335 val[11usize],
336 val[12usize],
337 val[13usize],
338 val[14usize],
339 val[15usize],
340 ],
341 simd,
342 }
343 }
344}
345impl<S: Simd> From<i8x16<S>> for [i8; 16] {
346 #[inline(always)]
347 fn from(value: i8x16<S>) -> Self {
348 value.val
349 }
350}
351impl<S: Simd> core::ops::Deref for i8x16<S> {
352 type Target = [i8; 16];
353 #[inline(always)]
354 fn deref(&self) -> &Self::Target {
355 &self.val
356 }
357}
358impl<S: Simd> core::ops::DerefMut for i8x16<S> {
359 #[inline(always)]
360 fn deref_mut(&mut self) -> &mut Self::Target {
361 &mut self.val
362 }
363}
364impl<S: Simd> SimdFrom<i8, S> for i8x16<S> {
365 #[inline(always)]
366 fn simd_from(value: i8, simd: S) -> Self {
367 simd.splat_i8x16(value)
368 }
369}
370impl<S: Simd> Select<i8x16<S>> for mask8x16<S> {
371 #[inline(always)]
372 fn select(self, if_true: i8x16<S>, if_false: i8x16<S>) -> i8x16<S> {
373 self.simd.select_i8x16(self, if_true, if_false)
374 }
375}
376impl<S: Simd> Bytes for i8x16<S> {
377 type Bytes = u8x16<S>;
378 #[inline(always)]
379 fn to_bytes(self) -> Self::Bytes {
380 unsafe {
381 u8x16 {
382 val: core::mem::transmute(self.val),
383 simd: self.simd,
384 }
385 }
386 }
387 #[inline(always)]
388 fn from_bytes(value: Self::Bytes) -> Self {
389 unsafe {
390 Self {
391 val: core::mem::transmute(value.val),
392 simd: value.simd,
393 }
394 }
395 }
396}
397impl<S: Simd> i8x16<S> {
398 #[inline(always)]
399 pub fn not(self) -> i8x16<S> {
400 self.simd.not_i8x16(self)
401 }
402 #[inline(always)]
403 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
404 self.simd.add_i8x16(self, rhs.simd_into(self.simd))
405 }
406 #[inline(always)]
407 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
408 self.simd.sub_i8x16(self, rhs.simd_into(self.simd))
409 }
410 #[inline(always)]
411 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
412 self.simd.mul_i8x16(self, rhs.simd_into(self.simd))
413 }
414 #[inline(always)]
415 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
416 self.simd.and_i8x16(self, rhs.simd_into(self.simd))
417 }
418 #[inline(always)]
419 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
420 self.simd.or_i8x16(self, rhs.simd_into(self.simd))
421 }
422 #[inline(always)]
423 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
424 self.simd.xor_i8x16(self, rhs.simd_into(self.simd))
425 }
426 #[inline(always)]
427 pub fn shr(self, shift: u32) -> i8x16<S> {
428 self.simd.shr_i8x16(self, shift)
429 }
430 #[inline(always)]
431 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
432 self.simd.shrv_i8x16(self, rhs.simd_into(self.simd))
433 }
434 #[inline(always)]
435 pub fn shl(self, shift: u32) -> i8x16<S> {
436 self.simd.shl_i8x16(self, shift)
437 }
438 #[inline(always)]
439 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
440 self.simd.simd_eq_i8x16(self, rhs.simd_into(self.simd))
441 }
442 #[inline(always)]
443 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
444 self.simd.simd_lt_i8x16(self, rhs.simd_into(self.simd))
445 }
446 #[inline(always)]
447 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
448 self.simd.simd_le_i8x16(self, rhs.simd_into(self.simd))
449 }
450 #[inline(always)]
451 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
452 self.simd.simd_ge_i8x16(self, rhs.simd_into(self.simd))
453 }
454 #[inline(always)]
455 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
456 self.simd.simd_gt_i8x16(self, rhs.simd_into(self.simd))
457 }
458 #[inline(always)]
459 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
460 self.simd.min_i8x16(self, rhs.simd_into(self.simd))
461 }
462 #[inline(always)]
463 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
464 self.simd.max_i8x16(self, rhs.simd_into(self.simd))
465 }
466 #[inline(always)]
467 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
468 self.simd.combine_i8x16(self, rhs.simd_into(self.simd))
469 }
470 #[inline(always)]
471 pub fn neg(self) -> i8x16<S> {
472 self.simd.neg_i8x16(self)
473 }
474 #[inline(always)]
475 pub fn reinterpret_u8(self) -> u8x16<S> {
476 self.simd.reinterpret_u8_i8x16(self)
477 }
478 #[inline(always)]
479 pub fn reinterpret_u32(self) -> u32x4<S> {
480 self.simd.reinterpret_u32_i8x16(self)
481 }
482}
483impl<S: Simd> crate::SimdBase<i8, S> for i8x16<S> {
484 const N: usize = 16;
485 type Mask = mask8x16<S>;
486 type Block = i8x16<S>;
487 #[inline(always)]
488 fn witness(&self) -> S {
489 self.simd
490 }
491 #[inline(always)]
492 fn as_slice(&self) -> &[i8] {
493 &self.val
494 }
495 #[inline(always)]
496 fn as_mut_slice(&mut self) -> &mut [i8] {
497 &mut self.val
498 }
499 #[inline(always)]
500 fn from_slice(simd: S, slice: &[i8]) -> Self {
501 let mut val = [0; 16];
502 val.copy_from_slice(slice);
503 Self { val, simd }
504 }
505 #[inline(always)]
506 fn splat(simd: S, val: i8) -> Self {
507 simd.splat_i8x16(val)
508 }
509 #[inline(always)]
510 fn block_splat(block: Self::Block) -> Self {
511 block
512 }
513}
514impl<S: Simd> crate::SimdInt<i8, S> for i8x16<S> {
515 #[inline(always)]
516 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
517 self.simd.simd_eq_i8x16(self, rhs.simd_into(self.simd))
518 }
519 #[inline(always)]
520 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
521 self.simd.simd_lt_i8x16(self, rhs.simd_into(self.simd))
522 }
523 #[inline(always)]
524 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
525 self.simd.simd_le_i8x16(self, rhs.simd_into(self.simd))
526 }
527 #[inline(always)]
528 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
529 self.simd.simd_ge_i8x16(self, rhs.simd_into(self.simd))
530 }
531 #[inline(always)]
532 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
533 self.simd.simd_gt_i8x16(self, rhs.simd_into(self.simd))
534 }
535 #[inline(always)]
536 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
537 self.simd.zip_low_i8x16(self, rhs.simd_into(self.simd))
538 }
539 #[inline(always)]
540 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
541 self.simd.zip_high_i8x16(self, rhs.simd_into(self.simd))
542 }
543 #[inline(always)]
544 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
545 self.simd.unzip_low_i8x16(self, rhs.simd_into(self.simd))
546 }
547 #[inline(always)]
548 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
549 self.simd.unzip_high_i8x16(self, rhs.simd_into(self.simd))
550 }
551 #[inline(always)]
552 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
553 self.simd.min_i8x16(self, rhs.simd_into(self.simd))
554 }
555 #[inline(always)]
556 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x16<S> {
557 self.simd.max_i8x16(self, rhs.simd_into(self.simd))
558 }
559}
560#[derive(Clone, Copy, Debug)]
561#[repr(C, align(16))]
562pub struct u8x16<S: Simd> {
563 pub val: [u8; 16],
564 pub simd: S,
565}
566impl<S: Simd> SimdFrom<[u8; 16], S> for u8x16<S> {
567 #[inline(always)]
568 fn simd_from(val: [u8; 16], simd: S) -> Self {
569 Self {
570 val: [
571 val[0usize],
572 val[1usize],
573 val[2usize],
574 val[3usize],
575 val[4usize],
576 val[5usize],
577 val[6usize],
578 val[7usize],
579 val[8usize],
580 val[9usize],
581 val[10usize],
582 val[11usize],
583 val[12usize],
584 val[13usize],
585 val[14usize],
586 val[15usize],
587 ],
588 simd,
589 }
590 }
591}
592impl<S: Simd> From<u8x16<S>> for [u8; 16] {
593 #[inline(always)]
594 fn from(value: u8x16<S>) -> Self {
595 value.val
596 }
597}
598impl<S: Simd> core::ops::Deref for u8x16<S> {
599 type Target = [u8; 16];
600 #[inline(always)]
601 fn deref(&self) -> &Self::Target {
602 &self.val
603 }
604}
605impl<S: Simd> core::ops::DerefMut for u8x16<S> {
606 #[inline(always)]
607 fn deref_mut(&mut self) -> &mut Self::Target {
608 &mut self.val
609 }
610}
611impl<S: Simd> SimdFrom<u8, S> for u8x16<S> {
612 #[inline(always)]
613 fn simd_from(value: u8, simd: S) -> Self {
614 simd.splat_u8x16(value)
615 }
616}
617impl<S: Simd> Select<u8x16<S>> for mask8x16<S> {
618 #[inline(always)]
619 fn select(self, if_true: u8x16<S>, if_false: u8x16<S>) -> u8x16<S> {
620 self.simd.select_u8x16(self, if_true, if_false)
621 }
622}
623impl<S: Simd> Bytes for u8x16<S> {
624 type Bytes = u8x16<S>;
625 #[inline(always)]
626 fn to_bytes(self) -> Self::Bytes {
627 unsafe {
628 u8x16 {
629 val: core::mem::transmute(self.val),
630 simd: self.simd,
631 }
632 }
633 }
634 #[inline(always)]
635 fn from_bytes(value: Self::Bytes) -> Self {
636 unsafe {
637 Self {
638 val: core::mem::transmute(value.val),
639 simd: value.simd,
640 }
641 }
642 }
643}
644impl<S: Simd> u8x16<S> {
645 #[inline(always)]
646 pub fn not(self) -> u8x16<S> {
647 self.simd.not_u8x16(self)
648 }
649 #[inline(always)]
650 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
651 self.simd.add_u8x16(self, rhs.simd_into(self.simd))
652 }
653 #[inline(always)]
654 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
655 self.simd.sub_u8x16(self, rhs.simd_into(self.simd))
656 }
657 #[inline(always)]
658 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
659 self.simd.mul_u8x16(self, rhs.simd_into(self.simd))
660 }
661 #[inline(always)]
662 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
663 self.simd.and_u8x16(self, rhs.simd_into(self.simd))
664 }
665 #[inline(always)]
666 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
667 self.simd.or_u8x16(self, rhs.simd_into(self.simd))
668 }
669 #[inline(always)]
670 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
671 self.simd.xor_u8x16(self, rhs.simd_into(self.simd))
672 }
673 #[inline(always)]
674 pub fn shr(self, shift: u32) -> u8x16<S> {
675 self.simd.shr_u8x16(self, shift)
676 }
677 #[inline(always)]
678 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
679 self.simd.shrv_u8x16(self, rhs.simd_into(self.simd))
680 }
681 #[inline(always)]
682 pub fn shl(self, shift: u32) -> u8x16<S> {
683 self.simd.shl_u8x16(self, shift)
684 }
685 #[inline(always)]
686 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
687 self.simd.simd_eq_u8x16(self, rhs.simd_into(self.simd))
688 }
689 #[inline(always)]
690 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
691 self.simd.simd_lt_u8x16(self, rhs.simd_into(self.simd))
692 }
693 #[inline(always)]
694 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
695 self.simd.simd_le_u8x16(self, rhs.simd_into(self.simd))
696 }
697 #[inline(always)]
698 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
699 self.simd.simd_ge_u8x16(self, rhs.simd_into(self.simd))
700 }
701 #[inline(always)]
702 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
703 self.simd.simd_gt_u8x16(self, rhs.simd_into(self.simd))
704 }
705 #[inline(always)]
706 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
707 self.simd.min_u8x16(self, rhs.simd_into(self.simd))
708 }
709 #[inline(always)]
710 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
711 self.simd.max_u8x16(self, rhs.simd_into(self.simd))
712 }
713 #[inline(always)]
714 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
715 self.simd.combine_u8x16(self, rhs.simd_into(self.simd))
716 }
717 #[inline(always)]
718 pub fn reinterpret_u32(self) -> u32x4<S> {
719 self.simd.reinterpret_u32_u8x16(self)
720 }
721}
722impl<S: Simd> crate::SimdBase<u8, S> for u8x16<S> {
723 const N: usize = 16;
724 type Mask = mask8x16<S>;
725 type Block = u8x16<S>;
726 #[inline(always)]
727 fn witness(&self) -> S {
728 self.simd
729 }
730 #[inline(always)]
731 fn as_slice(&self) -> &[u8] {
732 &self.val
733 }
734 #[inline(always)]
735 fn as_mut_slice(&mut self) -> &mut [u8] {
736 &mut self.val
737 }
738 #[inline(always)]
739 fn from_slice(simd: S, slice: &[u8]) -> Self {
740 let mut val = [0; 16];
741 val.copy_from_slice(slice);
742 Self { val, simd }
743 }
744 #[inline(always)]
745 fn splat(simd: S, val: u8) -> Self {
746 simd.splat_u8x16(val)
747 }
748 #[inline(always)]
749 fn block_splat(block: Self::Block) -> Self {
750 block
751 }
752}
753impl<S: Simd> crate::SimdInt<u8, S> for u8x16<S> {
754 #[inline(always)]
755 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
756 self.simd.simd_eq_u8x16(self, rhs.simd_into(self.simd))
757 }
758 #[inline(always)]
759 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
760 self.simd.simd_lt_u8x16(self, rhs.simd_into(self.simd))
761 }
762 #[inline(always)]
763 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
764 self.simd.simd_le_u8x16(self, rhs.simd_into(self.simd))
765 }
766 #[inline(always)]
767 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
768 self.simd.simd_ge_u8x16(self, rhs.simd_into(self.simd))
769 }
770 #[inline(always)]
771 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
772 self.simd.simd_gt_u8x16(self, rhs.simd_into(self.simd))
773 }
774 #[inline(always)]
775 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
776 self.simd.zip_low_u8x16(self, rhs.simd_into(self.simd))
777 }
778 #[inline(always)]
779 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
780 self.simd.zip_high_u8x16(self, rhs.simd_into(self.simd))
781 }
782 #[inline(always)]
783 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
784 self.simd.unzip_low_u8x16(self, rhs.simd_into(self.simd))
785 }
786 #[inline(always)]
787 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
788 self.simd.unzip_high_u8x16(self, rhs.simd_into(self.simd))
789 }
790 #[inline(always)]
791 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
792 self.simd.min_u8x16(self, rhs.simd_into(self.simd))
793 }
794 #[inline(always)]
795 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x16<S> {
796 self.simd.max_u8x16(self, rhs.simd_into(self.simd))
797 }
798}
799#[derive(Clone, Copy, Debug)]
800#[repr(C, align(16))]
801pub struct mask8x16<S: Simd> {
802 pub val: [i8; 16],
803 pub simd: S,
804}
805impl<S: Simd> SimdFrom<[i8; 16], S> for mask8x16<S> {
806 #[inline(always)]
807 fn simd_from(val: [i8; 16], simd: S) -> Self {
808 Self {
809 val: [
810 val[0usize],
811 val[1usize],
812 val[2usize],
813 val[3usize],
814 val[4usize],
815 val[5usize],
816 val[6usize],
817 val[7usize],
818 val[8usize],
819 val[9usize],
820 val[10usize],
821 val[11usize],
822 val[12usize],
823 val[13usize],
824 val[14usize],
825 val[15usize],
826 ],
827 simd,
828 }
829 }
830}
831impl<S: Simd> From<mask8x16<S>> for [i8; 16] {
832 #[inline(always)]
833 fn from(value: mask8x16<S>) -> Self {
834 value.val
835 }
836}
837impl<S: Simd> core::ops::Deref for mask8x16<S> {
838 type Target = [i8; 16];
839 #[inline(always)]
840 fn deref(&self) -> &Self::Target {
841 &self.val
842 }
843}
844impl<S: Simd> core::ops::DerefMut for mask8x16<S> {
845 #[inline(always)]
846 fn deref_mut(&mut self) -> &mut Self::Target {
847 &mut self.val
848 }
849}
850impl<S: Simd> SimdFrom<i8, S> for mask8x16<S> {
851 #[inline(always)]
852 fn simd_from(value: i8, simd: S) -> Self {
853 simd.splat_mask8x16(value)
854 }
855}
856impl<S: Simd> Select<mask8x16<S>> for mask8x16<S> {
857 #[inline(always)]
858 fn select(self, if_true: mask8x16<S>, if_false: mask8x16<S>) -> mask8x16<S> {
859 self.simd.select_mask8x16(self, if_true, if_false)
860 }
861}
862impl<S: Simd> Bytes for mask8x16<S> {
863 type Bytes = u8x16<S>;
864 #[inline(always)]
865 fn to_bytes(self) -> Self::Bytes {
866 unsafe {
867 u8x16 {
868 val: core::mem::transmute(self.val),
869 simd: self.simd,
870 }
871 }
872 }
873 #[inline(always)]
874 fn from_bytes(value: Self::Bytes) -> Self {
875 unsafe {
876 Self {
877 val: core::mem::transmute(value.val),
878 simd: value.simd,
879 }
880 }
881 }
882}
883impl<S: Simd> mask8x16<S> {
884 #[inline(always)]
885 pub fn not(self) -> mask8x16<S> {
886 self.simd.not_mask8x16(self)
887 }
888 #[inline(always)]
889 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
890 self.simd.and_mask8x16(self, rhs.simd_into(self.simd))
891 }
892 #[inline(always)]
893 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
894 self.simd.or_mask8x16(self, rhs.simd_into(self.simd))
895 }
896 #[inline(always)]
897 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
898 self.simd.xor_mask8x16(self, rhs.simd_into(self.simd))
899 }
900 #[inline(always)]
901 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
902 self.simd.simd_eq_mask8x16(self, rhs.simd_into(self.simd))
903 }
904 #[inline(always)]
905 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
906 self.simd.combine_mask8x16(self, rhs.simd_into(self.simd))
907 }
908}
909impl<S: Simd> crate::SimdBase<i8, S> for mask8x16<S> {
910 const N: usize = 16;
911 type Mask = mask8x16<S>;
912 type Block = mask8x16<S>;
913 #[inline(always)]
914 fn witness(&self) -> S {
915 self.simd
916 }
917 #[inline(always)]
918 fn as_slice(&self) -> &[i8] {
919 &self.val
920 }
921 #[inline(always)]
922 fn as_mut_slice(&mut self) -> &mut [i8] {
923 &mut self.val
924 }
925 #[inline(always)]
926 fn from_slice(simd: S, slice: &[i8]) -> Self {
927 let mut val = [0; 16];
928 val.copy_from_slice(slice);
929 Self { val, simd }
930 }
931 #[inline(always)]
932 fn splat(simd: S, val: i8) -> Self {
933 simd.splat_mask8x16(val)
934 }
935 #[inline(always)]
936 fn block_splat(block: Self::Block) -> Self {
937 block
938 }
939}
940impl<S: Simd> crate::SimdMask<i8, S> for mask8x16<S> {
941 #[inline(always)]
942 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x16<S> {
943 self.simd.simd_eq_mask8x16(self, rhs.simd_into(self.simd))
944 }
945}
946#[derive(Clone, Copy, Debug)]
947#[repr(C, align(16))]
948pub struct i16x8<S: Simd> {
949 pub val: [i16; 8],
950 pub simd: S,
951}
952impl<S: Simd> SimdFrom<[i16; 8], S> for i16x8<S> {
953 #[inline(always)]
954 fn simd_from(val: [i16; 8], simd: S) -> Self {
955 Self {
956 val: [
957 val[0usize],
958 val[1usize],
959 val[2usize],
960 val[3usize],
961 val[4usize],
962 val[5usize],
963 val[6usize],
964 val[7usize],
965 ],
966 simd,
967 }
968 }
969}
970impl<S: Simd> From<i16x8<S>> for [i16; 8] {
971 #[inline(always)]
972 fn from(value: i16x8<S>) -> Self {
973 value.val
974 }
975}
976impl<S: Simd> core::ops::Deref for i16x8<S> {
977 type Target = [i16; 8];
978 #[inline(always)]
979 fn deref(&self) -> &Self::Target {
980 &self.val
981 }
982}
983impl<S: Simd> core::ops::DerefMut for i16x8<S> {
984 #[inline(always)]
985 fn deref_mut(&mut self) -> &mut Self::Target {
986 &mut self.val
987 }
988}
989impl<S: Simd> SimdFrom<i16, S> for i16x8<S> {
990 #[inline(always)]
991 fn simd_from(value: i16, simd: S) -> Self {
992 simd.splat_i16x8(value)
993 }
994}
995impl<S: Simd> Select<i16x8<S>> for mask16x8<S> {
996 #[inline(always)]
997 fn select(self, if_true: i16x8<S>, if_false: i16x8<S>) -> i16x8<S> {
998 self.simd.select_i16x8(self, if_true, if_false)
999 }
1000}
1001impl<S: Simd> Bytes for i16x8<S> {
1002 type Bytes = u8x16<S>;
1003 #[inline(always)]
1004 fn to_bytes(self) -> Self::Bytes {
1005 unsafe {
1006 u8x16 {
1007 val: core::mem::transmute(self.val),
1008 simd: self.simd,
1009 }
1010 }
1011 }
1012 #[inline(always)]
1013 fn from_bytes(value: Self::Bytes) -> Self {
1014 unsafe {
1015 Self {
1016 val: core::mem::transmute(value.val),
1017 simd: value.simd,
1018 }
1019 }
1020 }
1021}
1022impl<S: Simd> i16x8<S> {
1023 #[inline(always)]
1024 pub fn not(self) -> i16x8<S> {
1025 self.simd.not_i16x8(self)
1026 }
1027 #[inline(always)]
1028 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1029 self.simd.add_i16x8(self, rhs.simd_into(self.simd))
1030 }
1031 #[inline(always)]
1032 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1033 self.simd.sub_i16x8(self, rhs.simd_into(self.simd))
1034 }
1035 #[inline(always)]
1036 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1037 self.simd.mul_i16x8(self, rhs.simd_into(self.simd))
1038 }
1039 #[inline(always)]
1040 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1041 self.simd.and_i16x8(self, rhs.simd_into(self.simd))
1042 }
1043 #[inline(always)]
1044 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1045 self.simd.or_i16x8(self, rhs.simd_into(self.simd))
1046 }
1047 #[inline(always)]
1048 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1049 self.simd.xor_i16x8(self, rhs.simd_into(self.simd))
1050 }
1051 #[inline(always)]
1052 pub fn shr(self, shift: u32) -> i16x8<S> {
1053 self.simd.shr_i16x8(self, shift)
1054 }
1055 #[inline(always)]
1056 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1057 self.simd.shrv_i16x8(self, rhs.simd_into(self.simd))
1058 }
1059 #[inline(always)]
1060 pub fn shl(self, shift: u32) -> i16x8<S> {
1061 self.simd.shl_i16x8(self, shift)
1062 }
1063 #[inline(always)]
1064 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1065 self.simd.simd_eq_i16x8(self, rhs.simd_into(self.simd))
1066 }
1067 #[inline(always)]
1068 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1069 self.simd.simd_lt_i16x8(self, rhs.simd_into(self.simd))
1070 }
1071 #[inline(always)]
1072 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1073 self.simd.simd_le_i16x8(self, rhs.simd_into(self.simd))
1074 }
1075 #[inline(always)]
1076 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1077 self.simd.simd_ge_i16x8(self, rhs.simd_into(self.simd))
1078 }
1079 #[inline(always)]
1080 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1081 self.simd.simd_gt_i16x8(self, rhs.simd_into(self.simd))
1082 }
1083 #[inline(always)]
1084 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1085 self.simd.min_i16x8(self, rhs.simd_into(self.simd))
1086 }
1087 #[inline(always)]
1088 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1089 self.simd.max_i16x8(self, rhs.simd_into(self.simd))
1090 }
1091 #[inline(always)]
1092 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
1093 self.simd.combine_i16x8(self, rhs.simd_into(self.simd))
1094 }
1095 #[inline(always)]
1096 pub fn neg(self) -> i16x8<S> {
1097 self.simd.neg_i16x8(self)
1098 }
1099 #[inline(always)]
1100 pub fn reinterpret_u8(self) -> u8x16<S> {
1101 self.simd.reinterpret_u8_i16x8(self)
1102 }
1103 #[inline(always)]
1104 pub fn reinterpret_u32(self) -> u32x4<S> {
1105 self.simd.reinterpret_u32_i16x8(self)
1106 }
1107}
1108impl<S: Simd> crate::SimdBase<i16, S> for i16x8<S> {
1109 const N: usize = 8;
1110 type Mask = mask16x8<S>;
1111 type Block = i16x8<S>;
1112 #[inline(always)]
1113 fn witness(&self) -> S {
1114 self.simd
1115 }
1116 #[inline(always)]
1117 fn as_slice(&self) -> &[i16] {
1118 &self.val
1119 }
1120 #[inline(always)]
1121 fn as_mut_slice(&mut self) -> &mut [i16] {
1122 &mut self.val
1123 }
1124 #[inline(always)]
1125 fn from_slice(simd: S, slice: &[i16]) -> Self {
1126 let mut val = [0; 8];
1127 val.copy_from_slice(slice);
1128 Self { val, simd }
1129 }
1130 #[inline(always)]
1131 fn splat(simd: S, val: i16) -> Self {
1132 simd.splat_i16x8(val)
1133 }
1134 #[inline(always)]
1135 fn block_splat(block: Self::Block) -> Self {
1136 block
1137 }
1138}
1139impl<S: Simd> crate::SimdInt<i16, S> for i16x8<S> {
1140 #[inline(always)]
1141 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1142 self.simd.simd_eq_i16x8(self, rhs.simd_into(self.simd))
1143 }
1144 #[inline(always)]
1145 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1146 self.simd.simd_lt_i16x8(self, rhs.simd_into(self.simd))
1147 }
1148 #[inline(always)]
1149 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1150 self.simd.simd_le_i16x8(self, rhs.simd_into(self.simd))
1151 }
1152 #[inline(always)]
1153 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1154 self.simd.simd_ge_i16x8(self, rhs.simd_into(self.simd))
1155 }
1156 #[inline(always)]
1157 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1158 self.simd.simd_gt_i16x8(self, rhs.simd_into(self.simd))
1159 }
1160 #[inline(always)]
1161 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1162 self.simd.zip_low_i16x8(self, rhs.simd_into(self.simd))
1163 }
1164 #[inline(always)]
1165 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1166 self.simd.zip_high_i16x8(self, rhs.simd_into(self.simd))
1167 }
1168 #[inline(always)]
1169 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1170 self.simd.unzip_low_i16x8(self, rhs.simd_into(self.simd))
1171 }
1172 #[inline(always)]
1173 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1174 self.simd.unzip_high_i16x8(self, rhs.simd_into(self.simd))
1175 }
1176 #[inline(always)]
1177 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1178 self.simd.min_i16x8(self, rhs.simd_into(self.simd))
1179 }
1180 #[inline(always)]
1181 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x8<S> {
1182 self.simd.max_i16x8(self, rhs.simd_into(self.simd))
1183 }
1184}
1185#[derive(Clone, Copy, Debug)]
1186#[repr(C, align(16))]
1187pub struct u16x8<S: Simd> {
1188 pub val: [u16; 8],
1189 pub simd: S,
1190}
1191impl<S: Simd> SimdFrom<[u16; 8], S> for u16x8<S> {
1192 #[inline(always)]
1193 fn simd_from(val: [u16; 8], simd: S) -> Self {
1194 Self {
1195 val: [
1196 val[0usize],
1197 val[1usize],
1198 val[2usize],
1199 val[3usize],
1200 val[4usize],
1201 val[5usize],
1202 val[6usize],
1203 val[7usize],
1204 ],
1205 simd,
1206 }
1207 }
1208}
1209impl<S: Simd> From<u16x8<S>> for [u16; 8] {
1210 #[inline(always)]
1211 fn from(value: u16x8<S>) -> Self {
1212 value.val
1213 }
1214}
1215impl<S: Simd> core::ops::Deref for u16x8<S> {
1216 type Target = [u16; 8];
1217 #[inline(always)]
1218 fn deref(&self) -> &Self::Target {
1219 &self.val
1220 }
1221}
1222impl<S: Simd> core::ops::DerefMut for u16x8<S> {
1223 #[inline(always)]
1224 fn deref_mut(&mut self) -> &mut Self::Target {
1225 &mut self.val
1226 }
1227}
1228impl<S: Simd> SimdFrom<u16, S> for u16x8<S> {
1229 #[inline(always)]
1230 fn simd_from(value: u16, simd: S) -> Self {
1231 simd.splat_u16x8(value)
1232 }
1233}
1234impl<S: Simd> Select<u16x8<S>> for mask16x8<S> {
1235 #[inline(always)]
1236 fn select(self, if_true: u16x8<S>, if_false: u16x8<S>) -> u16x8<S> {
1237 self.simd.select_u16x8(self, if_true, if_false)
1238 }
1239}
1240impl<S: Simd> Bytes for u16x8<S> {
1241 type Bytes = u8x16<S>;
1242 #[inline(always)]
1243 fn to_bytes(self) -> Self::Bytes {
1244 unsafe {
1245 u8x16 {
1246 val: core::mem::transmute(self.val),
1247 simd: self.simd,
1248 }
1249 }
1250 }
1251 #[inline(always)]
1252 fn from_bytes(value: Self::Bytes) -> Self {
1253 unsafe {
1254 Self {
1255 val: core::mem::transmute(value.val),
1256 simd: value.simd,
1257 }
1258 }
1259 }
1260}
1261impl<S: Simd> u16x8<S> {
1262 #[inline(always)]
1263 pub fn not(self) -> u16x8<S> {
1264 self.simd.not_u16x8(self)
1265 }
1266 #[inline(always)]
1267 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1268 self.simd.add_u16x8(self, rhs.simd_into(self.simd))
1269 }
1270 #[inline(always)]
1271 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1272 self.simd.sub_u16x8(self, rhs.simd_into(self.simd))
1273 }
1274 #[inline(always)]
1275 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1276 self.simd.mul_u16x8(self, rhs.simd_into(self.simd))
1277 }
1278 #[inline(always)]
1279 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1280 self.simd.and_u16x8(self, rhs.simd_into(self.simd))
1281 }
1282 #[inline(always)]
1283 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1284 self.simd.or_u16x8(self, rhs.simd_into(self.simd))
1285 }
1286 #[inline(always)]
1287 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1288 self.simd.xor_u16x8(self, rhs.simd_into(self.simd))
1289 }
1290 #[inline(always)]
1291 pub fn shr(self, shift: u32) -> u16x8<S> {
1292 self.simd.shr_u16x8(self, shift)
1293 }
1294 #[inline(always)]
1295 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1296 self.simd.shrv_u16x8(self, rhs.simd_into(self.simd))
1297 }
1298 #[inline(always)]
1299 pub fn shl(self, shift: u32) -> u16x8<S> {
1300 self.simd.shl_u16x8(self, shift)
1301 }
1302 #[inline(always)]
1303 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1304 self.simd.simd_eq_u16x8(self, rhs.simd_into(self.simd))
1305 }
1306 #[inline(always)]
1307 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1308 self.simd.simd_lt_u16x8(self, rhs.simd_into(self.simd))
1309 }
1310 #[inline(always)]
1311 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1312 self.simd.simd_le_u16x8(self, rhs.simd_into(self.simd))
1313 }
1314 #[inline(always)]
1315 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1316 self.simd.simd_ge_u16x8(self, rhs.simd_into(self.simd))
1317 }
1318 #[inline(always)]
1319 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1320 self.simd.simd_gt_u16x8(self, rhs.simd_into(self.simd))
1321 }
1322 #[inline(always)]
1323 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1324 self.simd.min_u16x8(self, rhs.simd_into(self.simd))
1325 }
1326 #[inline(always)]
1327 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1328 self.simd.max_u16x8(self, rhs.simd_into(self.simd))
1329 }
1330 #[inline(always)]
1331 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
1332 self.simd.combine_u16x8(self, rhs.simd_into(self.simd))
1333 }
1334 #[inline(always)]
1335 pub fn reinterpret_u8(self) -> u8x16<S> {
1336 self.simd.reinterpret_u8_u16x8(self)
1337 }
1338 #[inline(always)]
1339 pub fn reinterpret_u32(self) -> u32x4<S> {
1340 self.simd.reinterpret_u32_u16x8(self)
1341 }
1342}
1343impl<S: Simd> crate::SimdBase<u16, S> for u16x8<S> {
1344 const N: usize = 8;
1345 type Mask = mask16x8<S>;
1346 type Block = u16x8<S>;
1347 #[inline(always)]
1348 fn witness(&self) -> S {
1349 self.simd
1350 }
1351 #[inline(always)]
1352 fn as_slice(&self) -> &[u16] {
1353 &self.val
1354 }
1355 #[inline(always)]
1356 fn as_mut_slice(&mut self) -> &mut [u16] {
1357 &mut self.val
1358 }
1359 #[inline(always)]
1360 fn from_slice(simd: S, slice: &[u16]) -> Self {
1361 let mut val = [0; 8];
1362 val.copy_from_slice(slice);
1363 Self { val, simd }
1364 }
1365 #[inline(always)]
1366 fn splat(simd: S, val: u16) -> Self {
1367 simd.splat_u16x8(val)
1368 }
1369 #[inline(always)]
1370 fn block_splat(block: Self::Block) -> Self {
1371 block
1372 }
1373}
1374impl<S: Simd> crate::SimdInt<u16, S> for u16x8<S> {
1375 #[inline(always)]
1376 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1377 self.simd.simd_eq_u16x8(self, rhs.simd_into(self.simd))
1378 }
1379 #[inline(always)]
1380 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1381 self.simd.simd_lt_u16x8(self, rhs.simd_into(self.simd))
1382 }
1383 #[inline(always)]
1384 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1385 self.simd.simd_le_u16x8(self, rhs.simd_into(self.simd))
1386 }
1387 #[inline(always)]
1388 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1389 self.simd.simd_ge_u16x8(self, rhs.simd_into(self.simd))
1390 }
1391 #[inline(always)]
1392 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1393 self.simd.simd_gt_u16x8(self, rhs.simd_into(self.simd))
1394 }
1395 #[inline(always)]
1396 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1397 self.simd.zip_low_u16x8(self, rhs.simd_into(self.simd))
1398 }
1399 #[inline(always)]
1400 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1401 self.simd.zip_high_u16x8(self, rhs.simd_into(self.simd))
1402 }
1403 #[inline(always)]
1404 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1405 self.simd.unzip_low_u16x8(self, rhs.simd_into(self.simd))
1406 }
1407 #[inline(always)]
1408 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1409 self.simd.unzip_high_u16x8(self, rhs.simd_into(self.simd))
1410 }
1411 #[inline(always)]
1412 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1413 self.simd.min_u16x8(self, rhs.simd_into(self.simd))
1414 }
1415 #[inline(always)]
1416 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x8<S> {
1417 self.simd.max_u16x8(self, rhs.simd_into(self.simd))
1418 }
1419}
1420#[derive(Clone, Copy, Debug)]
1421#[repr(C, align(16))]
1422pub struct mask16x8<S: Simd> {
1423 pub val: [i16; 8],
1424 pub simd: S,
1425}
1426impl<S: Simd> SimdFrom<[i16; 8], S> for mask16x8<S> {
1427 #[inline(always)]
1428 fn simd_from(val: [i16; 8], simd: S) -> Self {
1429 Self {
1430 val: [
1431 val[0usize],
1432 val[1usize],
1433 val[2usize],
1434 val[3usize],
1435 val[4usize],
1436 val[5usize],
1437 val[6usize],
1438 val[7usize],
1439 ],
1440 simd,
1441 }
1442 }
1443}
1444impl<S: Simd> From<mask16x8<S>> for [i16; 8] {
1445 #[inline(always)]
1446 fn from(value: mask16x8<S>) -> Self {
1447 value.val
1448 }
1449}
1450impl<S: Simd> core::ops::Deref for mask16x8<S> {
1451 type Target = [i16; 8];
1452 #[inline(always)]
1453 fn deref(&self) -> &Self::Target {
1454 &self.val
1455 }
1456}
1457impl<S: Simd> core::ops::DerefMut for mask16x8<S> {
1458 #[inline(always)]
1459 fn deref_mut(&mut self) -> &mut Self::Target {
1460 &mut self.val
1461 }
1462}
1463impl<S: Simd> SimdFrom<i16, S> for mask16x8<S> {
1464 #[inline(always)]
1465 fn simd_from(value: i16, simd: S) -> Self {
1466 simd.splat_mask16x8(value)
1467 }
1468}
1469impl<S: Simd> Select<mask16x8<S>> for mask16x8<S> {
1470 #[inline(always)]
1471 fn select(self, if_true: mask16x8<S>, if_false: mask16x8<S>) -> mask16x8<S> {
1472 self.simd.select_mask16x8(self, if_true, if_false)
1473 }
1474}
1475impl<S: Simd> Bytes for mask16x8<S> {
1476 type Bytes = u8x16<S>;
1477 #[inline(always)]
1478 fn to_bytes(self) -> Self::Bytes {
1479 unsafe {
1480 u8x16 {
1481 val: core::mem::transmute(self.val),
1482 simd: self.simd,
1483 }
1484 }
1485 }
1486 #[inline(always)]
1487 fn from_bytes(value: Self::Bytes) -> Self {
1488 unsafe {
1489 Self {
1490 val: core::mem::transmute(value.val),
1491 simd: value.simd,
1492 }
1493 }
1494 }
1495}
1496impl<S: Simd> mask16x8<S> {
1497 #[inline(always)]
1498 pub fn not(self) -> mask16x8<S> {
1499 self.simd.not_mask16x8(self)
1500 }
1501 #[inline(always)]
1502 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1503 self.simd.and_mask16x8(self, rhs.simd_into(self.simd))
1504 }
1505 #[inline(always)]
1506 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1507 self.simd.or_mask16x8(self, rhs.simd_into(self.simd))
1508 }
1509 #[inline(always)]
1510 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1511 self.simd.xor_mask16x8(self, rhs.simd_into(self.simd))
1512 }
1513 #[inline(always)]
1514 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1515 self.simd.simd_eq_mask16x8(self, rhs.simd_into(self.simd))
1516 }
1517 #[inline(always)]
1518 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
1519 self.simd.combine_mask16x8(self, rhs.simd_into(self.simd))
1520 }
1521}
1522impl<S: Simd> crate::SimdBase<i16, S> for mask16x8<S> {
1523 const N: usize = 8;
1524 type Mask = mask16x8<S>;
1525 type Block = mask16x8<S>;
1526 #[inline(always)]
1527 fn witness(&self) -> S {
1528 self.simd
1529 }
1530 #[inline(always)]
1531 fn as_slice(&self) -> &[i16] {
1532 &self.val
1533 }
1534 #[inline(always)]
1535 fn as_mut_slice(&mut self) -> &mut [i16] {
1536 &mut self.val
1537 }
1538 #[inline(always)]
1539 fn from_slice(simd: S, slice: &[i16]) -> Self {
1540 let mut val = [0; 8];
1541 val.copy_from_slice(slice);
1542 Self { val, simd }
1543 }
1544 #[inline(always)]
1545 fn splat(simd: S, val: i16) -> Self {
1546 simd.splat_mask16x8(val)
1547 }
1548 #[inline(always)]
1549 fn block_splat(block: Self::Block) -> Self {
1550 block
1551 }
1552}
1553impl<S: Simd> crate::SimdMask<i16, S> for mask16x8<S> {
1554 #[inline(always)]
1555 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x8<S> {
1556 self.simd.simd_eq_mask16x8(self, rhs.simd_into(self.simd))
1557 }
1558}
1559#[derive(Clone, Copy, Debug)]
1560#[repr(C, align(16))]
1561pub struct i32x4<S: Simd> {
1562 pub val: [i32; 4],
1563 pub simd: S,
1564}
1565impl<S: Simd> SimdFrom<[i32; 4], S> for i32x4<S> {
1566 #[inline(always)]
1567 fn simd_from(val: [i32; 4], simd: S) -> Self {
1568 Self {
1569 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
1570 simd,
1571 }
1572 }
1573}
1574impl<S: Simd> From<i32x4<S>> for [i32; 4] {
1575 #[inline(always)]
1576 fn from(value: i32x4<S>) -> Self {
1577 value.val
1578 }
1579}
1580impl<S: Simd> core::ops::Deref for i32x4<S> {
1581 type Target = [i32; 4];
1582 #[inline(always)]
1583 fn deref(&self) -> &Self::Target {
1584 &self.val
1585 }
1586}
1587impl<S: Simd> core::ops::DerefMut for i32x4<S> {
1588 #[inline(always)]
1589 fn deref_mut(&mut self) -> &mut Self::Target {
1590 &mut self.val
1591 }
1592}
1593impl<S: Simd> SimdFrom<i32, S> for i32x4<S> {
1594 #[inline(always)]
1595 fn simd_from(value: i32, simd: S) -> Self {
1596 simd.splat_i32x4(value)
1597 }
1598}
1599impl<S: Simd> Select<i32x4<S>> for mask32x4<S> {
1600 #[inline(always)]
1601 fn select(self, if_true: i32x4<S>, if_false: i32x4<S>) -> i32x4<S> {
1602 self.simd.select_i32x4(self, if_true, if_false)
1603 }
1604}
1605impl<S: Simd> Bytes for i32x4<S> {
1606 type Bytes = u8x16<S>;
1607 #[inline(always)]
1608 fn to_bytes(self) -> Self::Bytes {
1609 unsafe {
1610 u8x16 {
1611 val: core::mem::transmute(self.val),
1612 simd: self.simd,
1613 }
1614 }
1615 }
1616 #[inline(always)]
1617 fn from_bytes(value: Self::Bytes) -> Self {
1618 unsafe {
1619 Self {
1620 val: core::mem::transmute(value.val),
1621 simd: value.simd,
1622 }
1623 }
1624 }
1625}
1626impl<S: Simd> i32x4<S> {
1627 #[inline(always)]
1628 pub fn not(self) -> i32x4<S> {
1629 self.simd.not_i32x4(self)
1630 }
1631 #[inline(always)]
1632 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1633 self.simd.add_i32x4(self, rhs.simd_into(self.simd))
1634 }
1635 #[inline(always)]
1636 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1637 self.simd.sub_i32x4(self, rhs.simd_into(self.simd))
1638 }
1639 #[inline(always)]
1640 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1641 self.simd.mul_i32x4(self, rhs.simd_into(self.simd))
1642 }
1643 #[inline(always)]
1644 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1645 self.simd.and_i32x4(self, rhs.simd_into(self.simd))
1646 }
1647 #[inline(always)]
1648 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1649 self.simd.or_i32x4(self, rhs.simd_into(self.simd))
1650 }
1651 #[inline(always)]
1652 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1653 self.simd.xor_i32x4(self, rhs.simd_into(self.simd))
1654 }
1655 #[inline(always)]
1656 pub fn shr(self, shift: u32) -> i32x4<S> {
1657 self.simd.shr_i32x4(self, shift)
1658 }
1659 #[inline(always)]
1660 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1661 self.simd.shrv_i32x4(self, rhs.simd_into(self.simd))
1662 }
1663 #[inline(always)]
1664 pub fn shl(self, shift: u32) -> i32x4<S> {
1665 self.simd.shl_i32x4(self, shift)
1666 }
1667 #[inline(always)]
1668 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1669 self.simd.simd_eq_i32x4(self, rhs.simd_into(self.simd))
1670 }
1671 #[inline(always)]
1672 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1673 self.simd.simd_lt_i32x4(self, rhs.simd_into(self.simd))
1674 }
1675 #[inline(always)]
1676 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1677 self.simd.simd_le_i32x4(self, rhs.simd_into(self.simd))
1678 }
1679 #[inline(always)]
1680 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1681 self.simd.simd_ge_i32x4(self, rhs.simd_into(self.simd))
1682 }
1683 #[inline(always)]
1684 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1685 self.simd.simd_gt_i32x4(self, rhs.simd_into(self.simd))
1686 }
1687 #[inline(always)]
1688 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1689 self.simd.min_i32x4(self, rhs.simd_into(self.simd))
1690 }
1691 #[inline(always)]
1692 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1693 self.simd.max_i32x4(self, rhs.simd_into(self.simd))
1694 }
1695 #[inline(always)]
1696 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
1697 self.simd.combine_i32x4(self, rhs.simd_into(self.simd))
1698 }
1699 #[inline(always)]
1700 pub fn neg(self) -> i32x4<S> {
1701 self.simd.neg_i32x4(self)
1702 }
1703 #[inline(always)]
1704 pub fn reinterpret_u8(self) -> u8x16<S> {
1705 self.simd.reinterpret_u8_i32x4(self)
1706 }
1707 #[inline(always)]
1708 pub fn reinterpret_u32(self) -> u32x4<S> {
1709 self.simd.reinterpret_u32_i32x4(self)
1710 }
1711 #[inline(always)]
1712 pub fn cvt_f32(self) -> f32x4<S> {
1713 self.simd.cvt_f32_i32x4(self)
1714 }
1715}
1716impl<S: Simd> crate::SimdBase<i32, S> for i32x4<S> {
1717 const N: usize = 4;
1718 type Mask = mask32x4<S>;
1719 type Block = i32x4<S>;
1720 #[inline(always)]
1721 fn witness(&self) -> S {
1722 self.simd
1723 }
1724 #[inline(always)]
1725 fn as_slice(&self) -> &[i32] {
1726 &self.val
1727 }
1728 #[inline(always)]
1729 fn as_mut_slice(&mut self) -> &mut [i32] {
1730 &mut self.val
1731 }
1732 #[inline(always)]
1733 fn from_slice(simd: S, slice: &[i32]) -> Self {
1734 let mut val = [0; 4];
1735 val.copy_from_slice(slice);
1736 Self { val, simd }
1737 }
1738 #[inline(always)]
1739 fn splat(simd: S, val: i32) -> Self {
1740 simd.splat_i32x4(val)
1741 }
1742 #[inline(always)]
1743 fn block_splat(block: Self::Block) -> Self {
1744 block
1745 }
1746}
1747impl<S: Simd> crate::SimdInt<i32, S> for i32x4<S> {
1748 #[inline(always)]
1749 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1750 self.simd.simd_eq_i32x4(self, rhs.simd_into(self.simd))
1751 }
1752 #[inline(always)]
1753 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1754 self.simd.simd_lt_i32x4(self, rhs.simd_into(self.simd))
1755 }
1756 #[inline(always)]
1757 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1758 self.simd.simd_le_i32x4(self, rhs.simd_into(self.simd))
1759 }
1760 #[inline(always)]
1761 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1762 self.simd.simd_ge_i32x4(self, rhs.simd_into(self.simd))
1763 }
1764 #[inline(always)]
1765 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1766 self.simd.simd_gt_i32x4(self, rhs.simd_into(self.simd))
1767 }
1768 #[inline(always)]
1769 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1770 self.simd.zip_low_i32x4(self, rhs.simd_into(self.simd))
1771 }
1772 #[inline(always)]
1773 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1774 self.simd.zip_high_i32x4(self, rhs.simd_into(self.simd))
1775 }
1776 #[inline(always)]
1777 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1778 self.simd.unzip_low_i32x4(self, rhs.simd_into(self.simd))
1779 }
1780 #[inline(always)]
1781 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1782 self.simd.unzip_high_i32x4(self, rhs.simd_into(self.simd))
1783 }
1784 #[inline(always)]
1785 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1786 self.simd.min_i32x4(self, rhs.simd_into(self.simd))
1787 }
1788 #[inline(always)]
1789 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x4<S> {
1790 self.simd.max_i32x4(self, rhs.simd_into(self.simd))
1791 }
1792}
1793impl<S: Simd> SimdCvtTruncate<f32x4<S>> for i32x4<S> {
1794 fn truncate_from(x: f32x4<S>) -> Self {
1795 x.simd.cvt_i32_f32x4(x)
1796 }
1797}
1798#[derive(Clone, Copy, Debug)]
1799#[repr(C, align(16))]
1800pub struct u32x4<S: Simd> {
1801 pub val: [u32; 4],
1802 pub simd: S,
1803}
1804impl<S: Simd> SimdFrom<[u32; 4], S> for u32x4<S> {
1805 #[inline(always)]
1806 fn simd_from(val: [u32; 4], simd: S) -> Self {
1807 Self {
1808 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
1809 simd,
1810 }
1811 }
1812}
1813impl<S: Simd> From<u32x4<S>> for [u32; 4] {
1814 #[inline(always)]
1815 fn from(value: u32x4<S>) -> Self {
1816 value.val
1817 }
1818}
1819impl<S: Simd> core::ops::Deref for u32x4<S> {
1820 type Target = [u32; 4];
1821 #[inline(always)]
1822 fn deref(&self) -> &Self::Target {
1823 &self.val
1824 }
1825}
1826impl<S: Simd> core::ops::DerefMut for u32x4<S> {
1827 #[inline(always)]
1828 fn deref_mut(&mut self) -> &mut Self::Target {
1829 &mut self.val
1830 }
1831}
1832impl<S: Simd> SimdFrom<u32, S> for u32x4<S> {
1833 #[inline(always)]
1834 fn simd_from(value: u32, simd: S) -> Self {
1835 simd.splat_u32x4(value)
1836 }
1837}
1838impl<S: Simd> Select<u32x4<S>> for mask32x4<S> {
1839 #[inline(always)]
1840 fn select(self, if_true: u32x4<S>, if_false: u32x4<S>) -> u32x4<S> {
1841 self.simd.select_u32x4(self, if_true, if_false)
1842 }
1843}
1844impl<S: Simd> Bytes for u32x4<S> {
1845 type Bytes = u8x16<S>;
1846 #[inline(always)]
1847 fn to_bytes(self) -> Self::Bytes {
1848 unsafe {
1849 u8x16 {
1850 val: core::mem::transmute(self.val),
1851 simd: self.simd,
1852 }
1853 }
1854 }
1855 #[inline(always)]
1856 fn from_bytes(value: Self::Bytes) -> Self {
1857 unsafe {
1858 Self {
1859 val: core::mem::transmute(value.val),
1860 simd: value.simd,
1861 }
1862 }
1863 }
1864}
1865impl<S: Simd> u32x4<S> {
1866 #[inline(always)]
1867 pub fn not(self) -> u32x4<S> {
1868 self.simd.not_u32x4(self)
1869 }
1870 #[inline(always)]
1871 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1872 self.simd.add_u32x4(self, rhs.simd_into(self.simd))
1873 }
1874 #[inline(always)]
1875 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1876 self.simd.sub_u32x4(self, rhs.simd_into(self.simd))
1877 }
1878 #[inline(always)]
1879 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1880 self.simd.mul_u32x4(self, rhs.simd_into(self.simd))
1881 }
1882 #[inline(always)]
1883 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1884 self.simd.and_u32x4(self, rhs.simd_into(self.simd))
1885 }
1886 #[inline(always)]
1887 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1888 self.simd.or_u32x4(self, rhs.simd_into(self.simd))
1889 }
1890 #[inline(always)]
1891 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1892 self.simd.xor_u32x4(self, rhs.simd_into(self.simd))
1893 }
1894 #[inline(always)]
1895 pub fn shr(self, shift: u32) -> u32x4<S> {
1896 self.simd.shr_u32x4(self, shift)
1897 }
1898 #[inline(always)]
1899 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1900 self.simd.shrv_u32x4(self, rhs.simd_into(self.simd))
1901 }
1902 #[inline(always)]
1903 pub fn shl(self, shift: u32) -> u32x4<S> {
1904 self.simd.shl_u32x4(self, shift)
1905 }
1906 #[inline(always)]
1907 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1908 self.simd.simd_eq_u32x4(self, rhs.simd_into(self.simd))
1909 }
1910 #[inline(always)]
1911 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1912 self.simd.simd_lt_u32x4(self, rhs.simd_into(self.simd))
1913 }
1914 #[inline(always)]
1915 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1916 self.simd.simd_le_u32x4(self, rhs.simd_into(self.simd))
1917 }
1918 #[inline(always)]
1919 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1920 self.simd.simd_ge_u32x4(self, rhs.simd_into(self.simd))
1921 }
1922 #[inline(always)]
1923 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1924 self.simd.simd_gt_u32x4(self, rhs.simd_into(self.simd))
1925 }
1926 #[inline(always)]
1927 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1928 self.simd.min_u32x4(self, rhs.simd_into(self.simd))
1929 }
1930 #[inline(always)]
1931 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
1932 self.simd.max_u32x4(self, rhs.simd_into(self.simd))
1933 }
1934 #[inline(always)]
1935 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
1936 self.simd.combine_u32x4(self, rhs.simd_into(self.simd))
1937 }
1938 #[inline(always)]
1939 pub fn reinterpret_u8(self) -> u8x16<S> {
1940 self.simd.reinterpret_u8_u32x4(self)
1941 }
1942 #[inline(always)]
1943 pub fn cvt_f32(self) -> f32x4<S> {
1944 self.simd.cvt_f32_u32x4(self)
1945 }
1946}
1947impl<S: Simd> crate::SimdBase<u32, S> for u32x4<S> {
1948 const N: usize = 4;
1949 type Mask = mask32x4<S>;
1950 type Block = u32x4<S>;
1951 #[inline(always)]
1952 fn witness(&self) -> S {
1953 self.simd
1954 }
1955 #[inline(always)]
1956 fn as_slice(&self) -> &[u32] {
1957 &self.val
1958 }
1959 #[inline(always)]
1960 fn as_mut_slice(&mut self) -> &mut [u32] {
1961 &mut self.val
1962 }
1963 #[inline(always)]
1964 fn from_slice(simd: S, slice: &[u32]) -> Self {
1965 let mut val = [0; 4];
1966 val.copy_from_slice(slice);
1967 Self { val, simd }
1968 }
1969 #[inline(always)]
1970 fn splat(simd: S, val: u32) -> Self {
1971 simd.splat_u32x4(val)
1972 }
1973 #[inline(always)]
1974 fn block_splat(block: Self::Block) -> Self {
1975 block
1976 }
1977}
1978impl<S: Simd> crate::SimdInt<u32, S> for u32x4<S> {
1979 #[inline(always)]
1980 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1981 self.simd.simd_eq_u32x4(self, rhs.simd_into(self.simd))
1982 }
1983 #[inline(always)]
1984 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1985 self.simd.simd_lt_u32x4(self, rhs.simd_into(self.simd))
1986 }
1987 #[inline(always)]
1988 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1989 self.simd.simd_le_u32x4(self, rhs.simd_into(self.simd))
1990 }
1991 #[inline(always)]
1992 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1993 self.simd.simd_ge_u32x4(self, rhs.simd_into(self.simd))
1994 }
1995 #[inline(always)]
1996 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
1997 self.simd.simd_gt_u32x4(self, rhs.simd_into(self.simd))
1998 }
1999 #[inline(always)]
2000 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2001 self.simd.zip_low_u32x4(self, rhs.simd_into(self.simd))
2002 }
2003 #[inline(always)]
2004 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2005 self.simd.zip_high_u32x4(self, rhs.simd_into(self.simd))
2006 }
2007 #[inline(always)]
2008 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2009 self.simd.unzip_low_u32x4(self, rhs.simd_into(self.simd))
2010 }
2011 #[inline(always)]
2012 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2013 self.simd.unzip_high_u32x4(self, rhs.simd_into(self.simd))
2014 }
2015 #[inline(always)]
2016 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2017 self.simd.min_u32x4(self, rhs.simd_into(self.simd))
2018 }
2019 #[inline(always)]
2020 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x4<S> {
2021 self.simd.max_u32x4(self, rhs.simd_into(self.simd))
2022 }
2023}
2024impl<S: Simd> SimdCvtTruncate<f32x4<S>> for u32x4<S> {
2025 fn truncate_from(x: f32x4<S>) -> Self {
2026 x.simd.cvt_u32_f32x4(x)
2027 }
2028}
2029#[derive(Clone, Copy, Debug)]
2030#[repr(C, align(16))]
2031pub struct mask32x4<S: Simd> {
2032 pub val: [i32; 4],
2033 pub simd: S,
2034}
2035impl<S: Simd> SimdFrom<[i32; 4], S> for mask32x4<S> {
2036 #[inline(always)]
2037 fn simd_from(val: [i32; 4], simd: S) -> Self {
2038 Self {
2039 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
2040 simd,
2041 }
2042 }
2043}
2044impl<S: Simd> From<mask32x4<S>> for [i32; 4] {
2045 #[inline(always)]
2046 fn from(value: mask32x4<S>) -> Self {
2047 value.val
2048 }
2049}
2050impl<S: Simd> core::ops::Deref for mask32x4<S> {
2051 type Target = [i32; 4];
2052 #[inline(always)]
2053 fn deref(&self) -> &Self::Target {
2054 &self.val
2055 }
2056}
2057impl<S: Simd> core::ops::DerefMut for mask32x4<S> {
2058 #[inline(always)]
2059 fn deref_mut(&mut self) -> &mut Self::Target {
2060 &mut self.val
2061 }
2062}
2063impl<S: Simd> SimdFrom<i32, S> for mask32x4<S> {
2064 #[inline(always)]
2065 fn simd_from(value: i32, simd: S) -> Self {
2066 simd.splat_mask32x4(value)
2067 }
2068}
2069impl<S: Simd> Select<mask32x4<S>> for mask32x4<S> {
2070 #[inline(always)]
2071 fn select(self, if_true: mask32x4<S>, if_false: mask32x4<S>) -> mask32x4<S> {
2072 self.simd.select_mask32x4(self, if_true, if_false)
2073 }
2074}
2075impl<S: Simd> Bytes for mask32x4<S> {
2076 type Bytes = u8x16<S>;
2077 #[inline(always)]
2078 fn to_bytes(self) -> Self::Bytes {
2079 unsafe {
2080 u8x16 {
2081 val: core::mem::transmute(self.val),
2082 simd: self.simd,
2083 }
2084 }
2085 }
2086 #[inline(always)]
2087 fn from_bytes(value: Self::Bytes) -> Self {
2088 unsafe {
2089 Self {
2090 val: core::mem::transmute(value.val),
2091 simd: value.simd,
2092 }
2093 }
2094 }
2095}
2096impl<S: Simd> mask32x4<S> {
2097 #[inline(always)]
2098 pub fn not(self) -> mask32x4<S> {
2099 self.simd.not_mask32x4(self)
2100 }
2101 #[inline(always)]
2102 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2103 self.simd.and_mask32x4(self, rhs.simd_into(self.simd))
2104 }
2105 #[inline(always)]
2106 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2107 self.simd.or_mask32x4(self, rhs.simd_into(self.simd))
2108 }
2109 #[inline(always)]
2110 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2111 self.simd.xor_mask32x4(self, rhs.simd_into(self.simd))
2112 }
2113 #[inline(always)]
2114 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2115 self.simd.simd_eq_mask32x4(self, rhs.simd_into(self.simd))
2116 }
2117 #[inline(always)]
2118 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2119 self.simd.combine_mask32x4(self, rhs.simd_into(self.simd))
2120 }
2121}
2122impl<S: Simd> crate::SimdBase<i32, S> for mask32x4<S> {
2123 const N: usize = 4;
2124 type Mask = mask32x4<S>;
2125 type Block = mask32x4<S>;
2126 #[inline(always)]
2127 fn witness(&self) -> S {
2128 self.simd
2129 }
2130 #[inline(always)]
2131 fn as_slice(&self) -> &[i32] {
2132 &self.val
2133 }
2134 #[inline(always)]
2135 fn as_mut_slice(&mut self) -> &mut [i32] {
2136 &mut self.val
2137 }
2138 #[inline(always)]
2139 fn from_slice(simd: S, slice: &[i32]) -> Self {
2140 let mut val = [0; 4];
2141 val.copy_from_slice(slice);
2142 Self { val, simd }
2143 }
2144 #[inline(always)]
2145 fn splat(simd: S, val: i32) -> Self {
2146 simd.splat_mask32x4(val)
2147 }
2148 #[inline(always)]
2149 fn block_splat(block: Self::Block) -> Self {
2150 block
2151 }
2152}
2153impl<S: Simd> crate::SimdMask<i32, S> for mask32x4<S> {
2154 #[inline(always)]
2155 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x4<S> {
2156 self.simd.simd_eq_mask32x4(self, rhs.simd_into(self.simd))
2157 }
2158}
2159#[derive(Clone, Copy, Debug)]
2160#[repr(C, align(16))]
2161pub struct f64x2<S: Simd> {
2162 pub val: [f64; 2],
2163 pub simd: S,
2164}
2165impl<S: Simd> SimdFrom<[f64; 2], S> for f64x2<S> {
2166 #[inline(always)]
2167 fn simd_from(val: [f64; 2], simd: S) -> Self {
2168 Self {
2169 val: [val[0usize], val[1usize]],
2170 simd,
2171 }
2172 }
2173}
2174impl<S: Simd> From<f64x2<S>> for [f64; 2] {
2175 #[inline(always)]
2176 fn from(value: f64x2<S>) -> Self {
2177 value.val
2178 }
2179}
2180impl<S: Simd> core::ops::Deref for f64x2<S> {
2181 type Target = [f64; 2];
2182 #[inline(always)]
2183 fn deref(&self) -> &Self::Target {
2184 &self.val
2185 }
2186}
2187impl<S: Simd> core::ops::DerefMut for f64x2<S> {
2188 #[inline(always)]
2189 fn deref_mut(&mut self) -> &mut Self::Target {
2190 &mut self.val
2191 }
2192}
2193impl<S: Simd> SimdFrom<f64, S> for f64x2<S> {
2194 #[inline(always)]
2195 fn simd_from(value: f64, simd: S) -> Self {
2196 simd.splat_f64x2(value)
2197 }
2198}
2199impl<S: Simd> Select<f64x2<S>> for mask64x2<S> {
2200 #[inline(always)]
2201 fn select(self, if_true: f64x2<S>, if_false: f64x2<S>) -> f64x2<S> {
2202 self.simd.select_f64x2(self, if_true, if_false)
2203 }
2204}
2205impl<S: Simd> Bytes for f64x2<S> {
2206 type Bytes = u8x16<S>;
2207 #[inline(always)]
2208 fn to_bytes(self) -> Self::Bytes {
2209 unsafe {
2210 u8x16 {
2211 val: core::mem::transmute(self.val),
2212 simd: self.simd,
2213 }
2214 }
2215 }
2216 #[inline(always)]
2217 fn from_bytes(value: Self::Bytes) -> Self {
2218 unsafe {
2219 Self {
2220 val: core::mem::transmute(value.val),
2221 simd: value.simd,
2222 }
2223 }
2224 }
2225}
2226impl<S: Simd> f64x2<S> {
2227 #[inline(always)]
2228 pub fn abs(self) -> f64x2<S> {
2229 self.simd.abs_f64x2(self)
2230 }
2231 #[inline(always)]
2232 pub fn neg(self) -> f64x2<S> {
2233 self.simd.neg_f64x2(self)
2234 }
2235 #[inline(always)]
2236 pub fn sqrt(self) -> f64x2<S> {
2237 self.simd.sqrt_f64x2(self)
2238 }
2239 #[inline(always)]
2240 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2241 self.simd.add_f64x2(self, rhs.simd_into(self.simd))
2242 }
2243 #[inline(always)]
2244 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2245 self.simd.sub_f64x2(self, rhs.simd_into(self.simd))
2246 }
2247 #[inline(always)]
2248 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2249 self.simd.mul_f64x2(self, rhs.simd_into(self.simd))
2250 }
2251 #[inline(always)]
2252 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2253 self.simd.div_f64x2(self, rhs.simd_into(self.simd))
2254 }
2255 #[inline(always)]
2256 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2257 self.simd.copysign_f64x2(self, rhs.simd_into(self.simd))
2258 }
2259 #[inline(always)]
2260 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2261 self.simd.simd_eq_f64x2(self, rhs.simd_into(self.simd))
2262 }
2263 #[inline(always)]
2264 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2265 self.simd.simd_lt_f64x2(self, rhs.simd_into(self.simd))
2266 }
2267 #[inline(always)]
2268 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2269 self.simd.simd_le_f64x2(self, rhs.simd_into(self.simd))
2270 }
2271 #[inline(always)]
2272 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2273 self.simd.simd_ge_f64x2(self, rhs.simd_into(self.simd))
2274 }
2275 #[inline(always)]
2276 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2277 self.simd.simd_gt_f64x2(self, rhs.simd_into(self.simd))
2278 }
2279 #[inline(always)]
2280 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2281 self.simd.max_f64x2(self, rhs.simd_into(self.simd))
2282 }
2283 #[inline(always)]
2284 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2285 self.simd.max_precise_f64x2(self, rhs.simd_into(self.simd))
2286 }
2287 #[inline(always)]
2288 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2289 self.simd.min_f64x2(self, rhs.simd_into(self.simd))
2290 }
2291 #[inline(always)]
2292 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2293 self.simd.min_precise_f64x2(self, rhs.simd_into(self.simd))
2294 }
2295 #[inline(always)]
2296 pub fn floor(self) -> f64x2<S> {
2297 self.simd.floor_f64x2(self)
2298 }
2299 #[inline(always)]
2300 pub fn fract(self) -> f64x2<S> {
2301 self.simd.fract_f64x2(self)
2302 }
2303 #[inline(always)]
2304 pub fn trunc(self) -> f64x2<S> {
2305 self.simd.trunc_f64x2(self)
2306 }
2307 #[inline(always)]
2308 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
2309 self.simd.combine_f64x2(self, rhs.simd_into(self.simd))
2310 }
2311 #[inline(always)]
2312 pub fn reinterpret_f32(self) -> f32x4<S> {
2313 self.simd.reinterpret_f32_f64x2(self)
2314 }
2315}
2316impl<S: Simd> crate::SimdBase<f64, S> for f64x2<S> {
2317 const N: usize = 2;
2318 type Mask = mask64x2<S>;
2319 type Block = f64x2<S>;
2320 #[inline(always)]
2321 fn witness(&self) -> S {
2322 self.simd
2323 }
2324 #[inline(always)]
2325 fn as_slice(&self) -> &[f64] {
2326 &self.val
2327 }
2328 #[inline(always)]
2329 fn as_mut_slice(&mut self) -> &mut [f64] {
2330 &mut self.val
2331 }
2332 #[inline(always)]
2333 fn from_slice(simd: S, slice: &[f64]) -> Self {
2334 let mut val = [0.0; 2];
2335 val.copy_from_slice(slice);
2336 Self { val, simd }
2337 }
2338 #[inline(always)]
2339 fn splat(simd: S, val: f64) -> Self {
2340 simd.splat_f64x2(val)
2341 }
2342 #[inline(always)]
2343 fn block_splat(block: Self::Block) -> Self {
2344 block
2345 }
2346}
2347impl<S: Simd> crate::SimdFloat<f64, S> for f64x2<S> {
2348 #[inline(always)]
2349 fn abs(self) -> f64x2<S> {
2350 self.simd.abs_f64x2(self)
2351 }
2352 #[inline(always)]
2353 fn sqrt(self) -> f64x2<S> {
2354 self.simd.sqrt_f64x2(self)
2355 }
2356 #[inline(always)]
2357 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2358 self.simd.copysign_f64x2(self, rhs.simd_into(self.simd))
2359 }
2360 #[inline(always)]
2361 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2362 self.simd.simd_eq_f64x2(self, rhs.simd_into(self.simd))
2363 }
2364 #[inline(always)]
2365 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2366 self.simd.simd_lt_f64x2(self, rhs.simd_into(self.simd))
2367 }
2368 #[inline(always)]
2369 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2370 self.simd.simd_le_f64x2(self, rhs.simd_into(self.simd))
2371 }
2372 #[inline(always)]
2373 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2374 self.simd.simd_ge_f64x2(self, rhs.simd_into(self.simd))
2375 }
2376 #[inline(always)]
2377 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2378 self.simd.simd_gt_f64x2(self, rhs.simd_into(self.simd))
2379 }
2380 #[inline(always)]
2381 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2382 self.simd.zip_low_f64x2(self, rhs.simd_into(self.simd))
2383 }
2384 #[inline(always)]
2385 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2386 self.simd.zip_high_f64x2(self, rhs.simd_into(self.simd))
2387 }
2388 #[inline(always)]
2389 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2390 self.simd.unzip_low_f64x2(self, rhs.simd_into(self.simd))
2391 }
2392 #[inline(always)]
2393 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2394 self.simd.unzip_high_f64x2(self, rhs.simd_into(self.simd))
2395 }
2396 #[inline(always)]
2397 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2398 self.simd.max_f64x2(self, rhs.simd_into(self.simd))
2399 }
2400 #[inline(always)]
2401 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2402 self.simd.max_precise_f64x2(self, rhs.simd_into(self.simd))
2403 }
2404 #[inline(always)]
2405 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2406 self.simd.min_f64x2(self, rhs.simd_into(self.simd))
2407 }
2408 #[inline(always)]
2409 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x2<S> {
2410 self.simd.min_precise_f64x2(self, rhs.simd_into(self.simd))
2411 }
2412 #[inline(always)]
2413 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x2<S> {
2414 self.simd
2415 .madd_f64x2(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2416 }
2417 #[inline(always)]
2418 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x2<S> {
2419 self.simd
2420 .msub_f64x2(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2421 }
2422 #[inline(always)]
2423 fn floor(self) -> f64x2<S> {
2424 self.simd.floor_f64x2(self)
2425 }
2426 #[inline(always)]
2427 fn fract(self) -> f64x2<S> {
2428 self.simd.fract_f64x2(self)
2429 }
2430 #[inline(always)]
2431 fn trunc(self) -> f64x2<S> {
2432 self.simd.trunc_f64x2(self)
2433 }
2434}
2435#[derive(Clone, Copy, Debug)]
2436#[repr(C, align(16))]
2437pub struct mask64x2<S: Simd> {
2438 pub val: [i64; 2],
2439 pub simd: S,
2440}
2441impl<S: Simd> SimdFrom<[i64; 2], S> for mask64x2<S> {
2442 #[inline(always)]
2443 fn simd_from(val: [i64; 2], simd: S) -> Self {
2444 Self {
2445 val: [val[0usize], val[1usize]],
2446 simd,
2447 }
2448 }
2449}
2450impl<S: Simd> From<mask64x2<S>> for [i64; 2] {
2451 #[inline(always)]
2452 fn from(value: mask64x2<S>) -> Self {
2453 value.val
2454 }
2455}
2456impl<S: Simd> core::ops::Deref for mask64x2<S> {
2457 type Target = [i64; 2];
2458 #[inline(always)]
2459 fn deref(&self) -> &Self::Target {
2460 &self.val
2461 }
2462}
2463impl<S: Simd> core::ops::DerefMut for mask64x2<S> {
2464 #[inline(always)]
2465 fn deref_mut(&mut self) -> &mut Self::Target {
2466 &mut self.val
2467 }
2468}
2469impl<S: Simd> SimdFrom<i64, S> for mask64x2<S> {
2470 #[inline(always)]
2471 fn simd_from(value: i64, simd: S) -> Self {
2472 simd.splat_mask64x2(value)
2473 }
2474}
2475impl<S: Simd> Select<mask64x2<S>> for mask64x2<S> {
2476 #[inline(always)]
2477 fn select(self, if_true: mask64x2<S>, if_false: mask64x2<S>) -> mask64x2<S> {
2478 self.simd.select_mask64x2(self, if_true, if_false)
2479 }
2480}
2481impl<S: Simd> Bytes for mask64x2<S> {
2482 type Bytes = u8x16<S>;
2483 #[inline(always)]
2484 fn to_bytes(self) -> Self::Bytes {
2485 unsafe {
2486 u8x16 {
2487 val: core::mem::transmute(self.val),
2488 simd: self.simd,
2489 }
2490 }
2491 }
2492 #[inline(always)]
2493 fn from_bytes(value: Self::Bytes) -> Self {
2494 unsafe {
2495 Self {
2496 val: core::mem::transmute(value.val),
2497 simd: value.simd,
2498 }
2499 }
2500 }
2501}
2502impl<S: Simd> mask64x2<S> {
2503 #[inline(always)]
2504 pub fn not(self) -> mask64x2<S> {
2505 self.simd.not_mask64x2(self)
2506 }
2507 #[inline(always)]
2508 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2509 self.simd.and_mask64x2(self, rhs.simd_into(self.simd))
2510 }
2511 #[inline(always)]
2512 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2513 self.simd.or_mask64x2(self, rhs.simd_into(self.simd))
2514 }
2515 #[inline(always)]
2516 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2517 self.simd.xor_mask64x2(self, rhs.simd_into(self.simd))
2518 }
2519 #[inline(always)]
2520 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2521 self.simd.simd_eq_mask64x2(self, rhs.simd_into(self.simd))
2522 }
2523 #[inline(always)]
2524 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
2525 self.simd.combine_mask64x2(self, rhs.simd_into(self.simd))
2526 }
2527}
2528impl<S: Simd> crate::SimdBase<i64, S> for mask64x2<S> {
2529 const N: usize = 2;
2530 type Mask = mask64x2<S>;
2531 type Block = mask64x2<S>;
2532 #[inline(always)]
2533 fn witness(&self) -> S {
2534 self.simd
2535 }
2536 #[inline(always)]
2537 fn as_slice(&self) -> &[i64] {
2538 &self.val
2539 }
2540 #[inline(always)]
2541 fn as_mut_slice(&mut self) -> &mut [i64] {
2542 &mut self.val
2543 }
2544 #[inline(always)]
2545 fn from_slice(simd: S, slice: &[i64]) -> Self {
2546 let mut val = [0; 2];
2547 val.copy_from_slice(slice);
2548 Self { val, simd }
2549 }
2550 #[inline(always)]
2551 fn splat(simd: S, val: i64) -> Self {
2552 simd.splat_mask64x2(val)
2553 }
2554 #[inline(always)]
2555 fn block_splat(block: Self::Block) -> Self {
2556 block
2557 }
2558}
2559impl<S: Simd> crate::SimdMask<i64, S> for mask64x2<S> {
2560 #[inline(always)]
2561 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x2<S> {
2562 self.simd.simd_eq_mask64x2(self, rhs.simd_into(self.simd))
2563 }
2564}
2565#[derive(Clone, Copy, Debug)]
2566#[repr(C, align(32))]
2567pub struct f32x8<S: Simd> {
2568 pub val: [f32; 8],
2569 pub simd: S,
2570}
2571impl<S: Simd> SimdFrom<[f32; 8], S> for f32x8<S> {
2572 #[inline(always)]
2573 fn simd_from(val: [f32; 8], simd: S) -> Self {
2574 Self {
2575 val: [
2576 val[0usize],
2577 val[1usize],
2578 val[2usize],
2579 val[3usize],
2580 val[4usize],
2581 val[5usize],
2582 val[6usize],
2583 val[7usize],
2584 ],
2585 simd,
2586 }
2587 }
2588}
2589impl<S: Simd> From<f32x8<S>> for [f32; 8] {
2590 #[inline(always)]
2591 fn from(value: f32x8<S>) -> Self {
2592 value.val
2593 }
2594}
2595impl<S: Simd> core::ops::Deref for f32x8<S> {
2596 type Target = [f32; 8];
2597 #[inline(always)]
2598 fn deref(&self) -> &Self::Target {
2599 &self.val
2600 }
2601}
2602impl<S: Simd> core::ops::DerefMut for f32x8<S> {
2603 #[inline(always)]
2604 fn deref_mut(&mut self) -> &mut Self::Target {
2605 &mut self.val
2606 }
2607}
2608impl<S: Simd> SimdFrom<f32, S> for f32x8<S> {
2609 #[inline(always)]
2610 fn simd_from(value: f32, simd: S) -> Self {
2611 simd.splat_f32x8(value)
2612 }
2613}
2614impl<S: Simd> Select<f32x8<S>> for mask32x8<S> {
2615 #[inline(always)]
2616 fn select(self, if_true: f32x8<S>, if_false: f32x8<S>) -> f32x8<S> {
2617 self.simd.select_f32x8(self, if_true, if_false)
2618 }
2619}
2620impl<S: Simd> Bytes for f32x8<S> {
2621 type Bytes = u8x32<S>;
2622 #[inline(always)]
2623 fn to_bytes(self) -> Self::Bytes {
2624 unsafe {
2625 u8x32 {
2626 val: core::mem::transmute(self.val),
2627 simd: self.simd,
2628 }
2629 }
2630 }
2631 #[inline(always)]
2632 fn from_bytes(value: Self::Bytes) -> Self {
2633 unsafe {
2634 Self {
2635 val: core::mem::transmute(value.val),
2636 simd: value.simd,
2637 }
2638 }
2639 }
2640}
2641impl<S: Simd> f32x8<S> {
2642 #[inline(always)]
2643 pub fn abs(self) -> f32x8<S> {
2644 self.simd.abs_f32x8(self)
2645 }
2646 #[inline(always)]
2647 pub fn neg(self) -> f32x8<S> {
2648 self.simd.neg_f32x8(self)
2649 }
2650 #[inline(always)]
2651 pub fn sqrt(self) -> f32x8<S> {
2652 self.simd.sqrt_f32x8(self)
2653 }
2654 #[inline(always)]
2655 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2656 self.simd.add_f32x8(self, rhs.simd_into(self.simd))
2657 }
2658 #[inline(always)]
2659 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2660 self.simd.sub_f32x8(self, rhs.simd_into(self.simd))
2661 }
2662 #[inline(always)]
2663 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2664 self.simd.mul_f32x8(self, rhs.simd_into(self.simd))
2665 }
2666 #[inline(always)]
2667 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2668 self.simd.div_f32x8(self, rhs.simd_into(self.simd))
2669 }
2670 #[inline(always)]
2671 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2672 self.simd.copysign_f32x8(self, rhs.simd_into(self.simd))
2673 }
2674 #[inline(always)]
2675 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2676 self.simd.simd_eq_f32x8(self, rhs.simd_into(self.simd))
2677 }
2678 #[inline(always)]
2679 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2680 self.simd.simd_lt_f32x8(self, rhs.simd_into(self.simd))
2681 }
2682 #[inline(always)]
2683 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2684 self.simd.simd_le_f32x8(self, rhs.simd_into(self.simd))
2685 }
2686 #[inline(always)]
2687 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2688 self.simd.simd_ge_f32x8(self, rhs.simd_into(self.simd))
2689 }
2690 #[inline(always)]
2691 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2692 self.simd.simd_gt_f32x8(self, rhs.simd_into(self.simd))
2693 }
2694 #[inline(always)]
2695 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2696 self.simd.max_f32x8(self, rhs.simd_into(self.simd))
2697 }
2698 #[inline(always)]
2699 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2700 self.simd.max_precise_f32x8(self, rhs.simd_into(self.simd))
2701 }
2702 #[inline(always)]
2703 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2704 self.simd.min_f32x8(self, rhs.simd_into(self.simd))
2705 }
2706 #[inline(always)]
2707 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2708 self.simd.min_precise_f32x8(self, rhs.simd_into(self.simd))
2709 }
2710 #[inline(always)]
2711 pub fn floor(self) -> f32x8<S> {
2712 self.simd.floor_f32x8(self)
2713 }
2714 #[inline(always)]
2715 pub fn fract(self) -> f32x8<S> {
2716 self.simd.fract_f32x8(self)
2717 }
2718 #[inline(always)]
2719 pub fn trunc(self) -> f32x8<S> {
2720 self.simd.trunc_f32x8(self)
2721 }
2722 #[inline(always)]
2723 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
2724 self.simd.combine_f32x8(self, rhs.simd_into(self.simd))
2725 }
2726 #[inline(always)]
2727 pub fn reinterpret_f64(self) -> f64x4<S> {
2728 self.simd.reinterpret_f64_f32x8(self)
2729 }
2730 #[inline(always)]
2731 pub fn reinterpret_i32(self) -> i32x8<S> {
2732 self.simd.reinterpret_i32_f32x8(self)
2733 }
2734 #[inline(always)]
2735 pub fn reinterpret_u8(self) -> u8x32<S> {
2736 self.simd.reinterpret_u8_f32x8(self)
2737 }
2738 #[inline(always)]
2739 pub fn reinterpret_u32(self) -> u32x8<S> {
2740 self.simd.reinterpret_u32_f32x8(self)
2741 }
2742 #[inline(always)]
2743 pub fn cvt_u32(self) -> u32x8<S> {
2744 self.simd.cvt_u32_f32x8(self)
2745 }
2746 #[inline(always)]
2747 pub fn cvt_i32(self) -> i32x8<S> {
2748 self.simd.cvt_i32_f32x8(self)
2749 }
2750}
2751impl<S: Simd> crate::SimdBase<f32, S> for f32x8<S> {
2752 const N: usize = 8;
2753 type Mask = mask32x8<S>;
2754 type Block = f32x4<S>;
2755 #[inline(always)]
2756 fn witness(&self) -> S {
2757 self.simd
2758 }
2759 #[inline(always)]
2760 fn as_slice(&self) -> &[f32] {
2761 &self.val
2762 }
2763 #[inline(always)]
2764 fn as_mut_slice(&mut self) -> &mut [f32] {
2765 &mut self.val
2766 }
2767 #[inline(always)]
2768 fn from_slice(simd: S, slice: &[f32]) -> Self {
2769 let mut val = [0.0; 8];
2770 val.copy_from_slice(slice);
2771 Self { val, simd }
2772 }
2773 #[inline(always)]
2774 fn splat(simd: S, val: f32) -> Self {
2775 simd.splat_f32x8(val)
2776 }
2777 #[inline(always)]
2778 fn block_splat(block: Self::Block) -> Self {
2779 block.combine(block)
2780 }
2781}
2782impl<S: Simd> crate::SimdFloat<f32, S> for f32x8<S> {
2783 #[inline(always)]
2784 fn abs(self) -> f32x8<S> {
2785 self.simd.abs_f32x8(self)
2786 }
2787 #[inline(always)]
2788 fn sqrt(self) -> f32x8<S> {
2789 self.simd.sqrt_f32x8(self)
2790 }
2791 #[inline(always)]
2792 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2793 self.simd.copysign_f32x8(self, rhs.simd_into(self.simd))
2794 }
2795 #[inline(always)]
2796 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2797 self.simd.simd_eq_f32x8(self, rhs.simd_into(self.simd))
2798 }
2799 #[inline(always)]
2800 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2801 self.simd.simd_lt_f32x8(self, rhs.simd_into(self.simd))
2802 }
2803 #[inline(always)]
2804 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2805 self.simd.simd_le_f32x8(self, rhs.simd_into(self.simd))
2806 }
2807 #[inline(always)]
2808 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2809 self.simd.simd_ge_f32x8(self, rhs.simd_into(self.simd))
2810 }
2811 #[inline(always)]
2812 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
2813 self.simd.simd_gt_f32x8(self, rhs.simd_into(self.simd))
2814 }
2815 #[inline(always)]
2816 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2817 self.simd.zip_low_f32x8(self, rhs.simd_into(self.simd))
2818 }
2819 #[inline(always)]
2820 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2821 self.simd.zip_high_f32x8(self, rhs.simd_into(self.simd))
2822 }
2823 #[inline(always)]
2824 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2825 self.simd.unzip_low_f32x8(self, rhs.simd_into(self.simd))
2826 }
2827 #[inline(always)]
2828 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2829 self.simd.unzip_high_f32x8(self, rhs.simd_into(self.simd))
2830 }
2831 #[inline(always)]
2832 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2833 self.simd.max_f32x8(self, rhs.simd_into(self.simd))
2834 }
2835 #[inline(always)]
2836 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2837 self.simd.max_precise_f32x8(self, rhs.simd_into(self.simd))
2838 }
2839 #[inline(always)]
2840 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2841 self.simd.min_f32x8(self, rhs.simd_into(self.simd))
2842 }
2843 #[inline(always)]
2844 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x8<S> {
2845 self.simd.min_precise_f32x8(self, rhs.simd_into(self.simd))
2846 }
2847 #[inline(always)]
2848 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x8<S> {
2849 self.simd
2850 .madd_f32x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2851 }
2852 #[inline(always)]
2853 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x8<S> {
2854 self.simd
2855 .msub_f32x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
2856 }
2857 #[inline(always)]
2858 fn floor(self) -> f32x8<S> {
2859 self.simd.floor_f32x8(self)
2860 }
2861 #[inline(always)]
2862 fn fract(self) -> f32x8<S> {
2863 self.simd.fract_f32x8(self)
2864 }
2865 #[inline(always)]
2866 fn trunc(self) -> f32x8<S> {
2867 self.simd.trunc_f32x8(self)
2868 }
2869}
2870impl<S: Simd> SimdCvtFloat<u32x8<S>> for f32x8<S> {
2871 fn float_from(x: u32x8<S>) -> Self {
2872 x.simd.cvt_f32_u32x8(x)
2873 }
2874}
2875impl<S: Simd> SimdCvtFloat<i32x8<S>> for f32x8<S> {
2876 fn float_from(x: i32x8<S>) -> Self {
2877 x.simd.cvt_f32_i32x8(x)
2878 }
2879}
2880#[derive(Clone, Copy, Debug)]
2881#[repr(C, align(32))]
2882pub struct i8x32<S: Simd> {
2883 pub val: [i8; 32],
2884 pub simd: S,
2885}
2886impl<S: Simd> SimdFrom<[i8; 32], S> for i8x32<S> {
2887 #[inline(always)]
2888 fn simd_from(val: [i8; 32], simd: S) -> Self {
2889 Self {
2890 val: [
2891 val[0usize],
2892 val[1usize],
2893 val[2usize],
2894 val[3usize],
2895 val[4usize],
2896 val[5usize],
2897 val[6usize],
2898 val[7usize],
2899 val[8usize],
2900 val[9usize],
2901 val[10usize],
2902 val[11usize],
2903 val[12usize],
2904 val[13usize],
2905 val[14usize],
2906 val[15usize],
2907 val[16usize],
2908 val[17usize],
2909 val[18usize],
2910 val[19usize],
2911 val[20usize],
2912 val[21usize],
2913 val[22usize],
2914 val[23usize],
2915 val[24usize],
2916 val[25usize],
2917 val[26usize],
2918 val[27usize],
2919 val[28usize],
2920 val[29usize],
2921 val[30usize],
2922 val[31usize],
2923 ],
2924 simd,
2925 }
2926 }
2927}
2928impl<S: Simd> From<i8x32<S>> for [i8; 32] {
2929 #[inline(always)]
2930 fn from(value: i8x32<S>) -> Self {
2931 value.val
2932 }
2933}
2934impl<S: Simd> core::ops::Deref for i8x32<S> {
2935 type Target = [i8; 32];
2936 #[inline(always)]
2937 fn deref(&self) -> &Self::Target {
2938 &self.val
2939 }
2940}
2941impl<S: Simd> core::ops::DerefMut for i8x32<S> {
2942 #[inline(always)]
2943 fn deref_mut(&mut self) -> &mut Self::Target {
2944 &mut self.val
2945 }
2946}
2947impl<S: Simd> SimdFrom<i8, S> for i8x32<S> {
2948 #[inline(always)]
2949 fn simd_from(value: i8, simd: S) -> Self {
2950 simd.splat_i8x32(value)
2951 }
2952}
2953impl<S: Simd> Select<i8x32<S>> for mask8x32<S> {
2954 #[inline(always)]
2955 fn select(self, if_true: i8x32<S>, if_false: i8x32<S>) -> i8x32<S> {
2956 self.simd.select_i8x32(self, if_true, if_false)
2957 }
2958}
2959impl<S: Simd> Bytes for i8x32<S> {
2960 type Bytes = u8x32<S>;
2961 #[inline(always)]
2962 fn to_bytes(self) -> Self::Bytes {
2963 unsafe {
2964 u8x32 {
2965 val: core::mem::transmute(self.val),
2966 simd: self.simd,
2967 }
2968 }
2969 }
2970 #[inline(always)]
2971 fn from_bytes(value: Self::Bytes) -> Self {
2972 unsafe {
2973 Self {
2974 val: core::mem::transmute(value.val),
2975 simd: value.simd,
2976 }
2977 }
2978 }
2979}
2980impl<S: Simd> i8x32<S> {
2981 #[inline(always)]
2982 pub fn not(self) -> i8x32<S> {
2983 self.simd.not_i8x32(self)
2984 }
2985 #[inline(always)]
2986 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2987 self.simd.add_i8x32(self, rhs.simd_into(self.simd))
2988 }
2989 #[inline(always)]
2990 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2991 self.simd.sub_i8x32(self, rhs.simd_into(self.simd))
2992 }
2993 #[inline(always)]
2994 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2995 self.simd.mul_i8x32(self, rhs.simd_into(self.simd))
2996 }
2997 #[inline(always)]
2998 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
2999 self.simd.and_i8x32(self, rhs.simd_into(self.simd))
3000 }
3001 #[inline(always)]
3002 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3003 self.simd.or_i8x32(self, rhs.simd_into(self.simd))
3004 }
3005 #[inline(always)]
3006 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3007 self.simd.xor_i8x32(self, rhs.simd_into(self.simd))
3008 }
3009 #[inline(always)]
3010 pub fn shr(self, shift: u32) -> i8x32<S> {
3011 self.simd.shr_i8x32(self, shift)
3012 }
3013 #[inline(always)]
3014 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3015 self.simd.shrv_i8x32(self, rhs.simd_into(self.simd))
3016 }
3017 #[inline(always)]
3018 pub fn shl(self, shift: u32) -> i8x32<S> {
3019 self.simd.shl_i8x32(self, shift)
3020 }
3021 #[inline(always)]
3022 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3023 self.simd.simd_eq_i8x32(self, rhs.simd_into(self.simd))
3024 }
3025 #[inline(always)]
3026 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3027 self.simd.simd_lt_i8x32(self, rhs.simd_into(self.simd))
3028 }
3029 #[inline(always)]
3030 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3031 self.simd.simd_le_i8x32(self, rhs.simd_into(self.simd))
3032 }
3033 #[inline(always)]
3034 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3035 self.simd.simd_ge_i8x32(self, rhs.simd_into(self.simd))
3036 }
3037 #[inline(always)]
3038 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3039 self.simd.simd_gt_i8x32(self, rhs.simd_into(self.simd))
3040 }
3041 #[inline(always)]
3042 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3043 self.simd.min_i8x32(self, rhs.simd_into(self.simd))
3044 }
3045 #[inline(always)]
3046 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3047 self.simd.max_i8x32(self, rhs.simd_into(self.simd))
3048 }
3049 #[inline(always)]
3050 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
3051 self.simd.combine_i8x32(self, rhs.simd_into(self.simd))
3052 }
3053 #[inline(always)]
3054 pub fn neg(self) -> i8x32<S> {
3055 self.simd.neg_i8x32(self)
3056 }
3057 #[inline(always)]
3058 pub fn reinterpret_u8(self) -> u8x32<S> {
3059 self.simd.reinterpret_u8_i8x32(self)
3060 }
3061 #[inline(always)]
3062 pub fn reinterpret_u32(self) -> u32x8<S> {
3063 self.simd.reinterpret_u32_i8x32(self)
3064 }
3065}
3066impl<S: Simd> crate::SimdBase<i8, S> for i8x32<S> {
3067 const N: usize = 32;
3068 type Mask = mask8x32<S>;
3069 type Block = i8x16<S>;
3070 #[inline(always)]
3071 fn witness(&self) -> S {
3072 self.simd
3073 }
3074 #[inline(always)]
3075 fn as_slice(&self) -> &[i8] {
3076 &self.val
3077 }
3078 #[inline(always)]
3079 fn as_mut_slice(&mut self) -> &mut [i8] {
3080 &mut self.val
3081 }
3082 #[inline(always)]
3083 fn from_slice(simd: S, slice: &[i8]) -> Self {
3084 let mut val = [0; 32];
3085 val.copy_from_slice(slice);
3086 Self { val, simd }
3087 }
3088 #[inline(always)]
3089 fn splat(simd: S, val: i8) -> Self {
3090 simd.splat_i8x32(val)
3091 }
3092 #[inline(always)]
3093 fn block_splat(block: Self::Block) -> Self {
3094 block.combine(block)
3095 }
3096}
3097impl<S: Simd> crate::SimdInt<i8, S> for i8x32<S> {
3098 #[inline(always)]
3099 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3100 self.simd.simd_eq_i8x32(self, rhs.simd_into(self.simd))
3101 }
3102 #[inline(always)]
3103 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3104 self.simd.simd_lt_i8x32(self, rhs.simd_into(self.simd))
3105 }
3106 #[inline(always)]
3107 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3108 self.simd.simd_le_i8x32(self, rhs.simd_into(self.simd))
3109 }
3110 #[inline(always)]
3111 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3112 self.simd.simd_ge_i8x32(self, rhs.simd_into(self.simd))
3113 }
3114 #[inline(always)]
3115 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3116 self.simd.simd_gt_i8x32(self, rhs.simd_into(self.simd))
3117 }
3118 #[inline(always)]
3119 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3120 self.simd.zip_low_i8x32(self, rhs.simd_into(self.simd))
3121 }
3122 #[inline(always)]
3123 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3124 self.simd.zip_high_i8x32(self, rhs.simd_into(self.simd))
3125 }
3126 #[inline(always)]
3127 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3128 self.simd.unzip_low_i8x32(self, rhs.simd_into(self.simd))
3129 }
3130 #[inline(always)]
3131 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3132 self.simd.unzip_high_i8x32(self, rhs.simd_into(self.simd))
3133 }
3134 #[inline(always)]
3135 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3136 self.simd.min_i8x32(self, rhs.simd_into(self.simd))
3137 }
3138 #[inline(always)]
3139 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x32<S> {
3140 self.simd.max_i8x32(self, rhs.simd_into(self.simd))
3141 }
3142}
3143#[derive(Clone, Copy, Debug)]
3144#[repr(C, align(32))]
3145pub struct u8x32<S: Simd> {
3146 pub val: [u8; 32],
3147 pub simd: S,
3148}
3149impl<S: Simd> SimdFrom<[u8; 32], S> for u8x32<S> {
3150 #[inline(always)]
3151 fn simd_from(val: [u8; 32], simd: S) -> Self {
3152 Self {
3153 val: [
3154 val[0usize],
3155 val[1usize],
3156 val[2usize],
3157 val[3usize],
3158 val[4usize],
3159 val[5usize],
3160 val[6usize],
3161 val[7usize],
3162 val[8usize],
3163 val[9usize],
3164 val[10usize],
3165 val[11usize],
3166 val[12usize],
3167 val[13usize],
3168 val[14usize],
3169 val[15usize],
3170 val[16usize],
3171 val[17usize],
3172 val[18usize],
3173 val[19usize],
3174 val[20usize],
3175 val[21usize],
3176 val[22usize],
3177 val[23usize],
3178 val[24usize],
3179 val[25usize],
3180 val[26usize],
3181 val[27usize],
3182 val[28usize],
3183 val[29usize],
3184 val[30usize],
3185 val[31usize],
3186 ],
3187 simd,
3188 }
3189 }
3190}
3191impl<S: Simd> From<u8x32<S>> for [u8; 32] {
3192 #[inline(always)]
3193 fn from(value: u8x32<S>) -> Self {
3194 value.val
3195 }
3196}
3197impl<S: Simd> core::ops::Deref for u8x32<S> {
3198 type Target = [u8; 32];
3199 #[inline(always)]
3200 fn deref(&self) -> &Self::Target {
3201 &self.val
3202 }
3203}
3204impl<S: Simd> core::ops::DerefMut for u8x32<S> {
3205 #[inline(always)]
3206 fn deref_mut(&mut self) -> &mut Self::Target {
3207 &mut self.val
3208 }
3209}
3210impl<S: Simd> SimdFrom<u8, S> for u8x32<S> {
3211 #[inline(always)]
3212 fn simd_from(value: u8, simd: S) -> Self {
3213 simd.splat_u8x32(value)
3214 }
3215}
3216impl<S: Simd> Select<u8x32<S>> for mask8x32<S> {
3217 #[inline(always)]
3218 fn select(self, if_true: u8x32<S>, if_false: u8x32<S>) -> u8x32<S> {
3219 self.simd.select_u8x32(self, if_true, if_false)
3220 }
3221}
3222impl<S: Simd> Bytes for u8x32<S> {
3223 type Bytes = u8x32<S>;
3224 #[inline(always)]
3225 fn to_bytes(self) -> Self::Bytes {
3226 unsafe {
3227 u8x32 {
3228 val: core::mem::transmute(self.val),
3229 simd: self.simd,
3230 }
3231 }
3232 }
3233 #[inline(always)]
3234 fn from_bytes(value: Self::Bytes) -> Self {
3235 unsafe {
3236 Self {
3237 val: core::mem::transmute(value.val),
3238 simd: value.simd,
3239 }
3240 }
3241 }
3242}
3243impl<S: Simd> u8x32<S> {
3244 #[inline(always)]
3245 pub fn not(self) -> u8x32<S> {
3246 self.simd.not_u8x32(self)
3247 }
3248 #[inline(always)]
3249 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3250 self.simd.add_u8x32(self, rhs.simd_into(self.simd))
3251 }
3252 #[inline(always)]
3253 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3254 self.simd.sub_u8x32(self, rhs.simd_into(self.simd))
3255 }
3256 #[inline(always)]
3257 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3258 self.simd.mul_u8x32(self, rhs.simd_into(self.simd))
3259 }
3260 #[inline(always)]
3261 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3262 self.simd.and_u8x32(self, rhs.simd_into(self.simd))
3263 }
3264 #[inline(always)]
3265 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3266 self.simd.or_u8x32(self, rhs.simd_into(self.simd))
3267 }
3268 #[inline(always)]
3269 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3270 self.simd.xor_u8x32(self, rhs.simd_into(self.simd))
3271 }
3272 #[inline(always)]
3273 pub fn shr(self, shift: u32) -> u8x32<S> {
3274 self.simd.shr_u8x32(self, shift)
3275 }
3276 #[inline(always)]
3277 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3278 self.simd.shrv_u8x32(self, rhs.simd_into(self.simd))
3279 }
3280 #[inline(always)]
3281 pub fn shl(self, shift: u32) -> u8x32<S> {
3282 self.simd.shl_u8x32(self, shift)
3283 }
3284 #[inline(always)]
3285 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3286 self.simd.simd_eq_u8x32(self, rhs.simd_into(self.simd))
3287 }
3288 #[inline(always)]
3289 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3290 self.simd.simd_lt_u8x32(self, rhs.simd_into(self.simd))
3291 }
3292 #[inline(always)]
3293 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3294 self.simd.simd_le_u8x32(self, rhs.simd_into(self.simd))
3295 }
3296 #[inline(always)]
3297 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3298 self.simd.simd_ge_u8x32(self, rhs.simd_into(self.simd))
3299 }
3300 #[inline(always)]
3301 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3302 self.simd.simd_gt_u8x32(self, rhs.simd_into(self.simd))
3303 }
3304 #[inline(always)]
3305 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3306 self.simd.min_u8x32(self, rhs.simd_into(self.simd))
3307 }
3308 #[inline(always)]
3309 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3310 self.simd.max_u8x32(self, rhs.simd_into(self.simd))
3311 }
3312 #[inline(always)]
3313 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
3314 self.simd.combine_u8x32(self, rhs.simd_into(self.simd))
3315 }
3316 #[inline(always)]
3317 pub fn reinterpret_u32(self) -> u32x8<S> {
3318 self.simd.reinterpret_u32_u8x32(self)
3319 }
3320}
3321impl<S: Simd> crate::SimdBase<u8, S> for u8x32<S> {
3322 const N: usize = 32;
3323 type Mask = mask8x32<S>;
3324 type Block = u8x16<S>;
3325 #[inline(always)]
3326 fn witness(&self) -> S {
3327 self.simd
3328 }
3329 #[inline(always)]
3330 fn as_slice(&self) -> &[u8] {
3331 &self.val
3332 }
3333 #[inline(always)]
3334 fn as_mut_slice(&mut self) -> &mut [u8] {
3335 &mut self.val
3336 }
3337 #[inline(always)]
3338 fn from_slice(simd: S, slice: &[u8]) -> Self {
3339 let mut val = [0; 32];
3340 val.copy_from_slice(slice);
3341 Self { val, simd }
3342 }
3343 #[inline(always)]
3344 fn splat(simd: S, val: u8) -> Self {
3345 simd.splat_u8x32(val)
3346 }
3347 #[inline(always)]
3348 fn block_splat(block: Self::Block) -> Self {
3349 block.combine(block)
3350 }
3351}
3352impl<S: Simd> crate::SimdInt<u8, S> for u8x32<S> {
3353 #[inline(always)]
3354 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3355 self.simd.simd_eq_u8x32(self, rhs.simd_into(self.simd))
3356 }
3357 #[inline(always)]
3358 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3359 self.simd.simd_lt_u8x32(self, rhs.simd_into(self.simd))
3360 }
3361 #[inline(always)]
3362 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3363 self.simd.simd_le_u8x32(self, rhs.simd_into(self.simd))
3364 }
3365 #[inline(always)]
3366 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3367 self.simd.simd_ge_u8x32(self, rhs.simd_into(self.simd))
3368 }
3369 #[inline(always)]
3370 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3371 self.simd.simd_gt_u8x32(self, rhs.simd_into(self.simd))
3372 }
3373 #[inline(always)]
3374 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3375 self.simd.zip_low_u8x32(self, rhs.simd_into(self.simd))
3376 }
3377 #[inline(always)]
3378 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3379 self.simd.zip_high_u8x32(self, rhs.simd_into(self.simd))
3380 }
3381 #[inline(always)]
3382 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3383 self.simd.unzip_low_u8x32(self, rhs.simd_into(self.simd))
3384 }
3385 #[inline(always)]
3386 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3387 self.simd.unzip_high_u8x32(self, rhs.simd_into(self.simd))
3388 }
3389 #[inline(always)]
3390 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3391 self.simd.min_u8x32(self, rhs.simd_into(self.simd))
3392 }
3393 #[inline(always)]
3394 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x32<S> {
3395 self.simd.max_u8x32(self, rhs.simd_into(self.simd))
3396 }
3397}
3398#[derive(Clone, Copy, Debug)]
3399#[repr(C, align(32))]
3400pub struct mask8x32<S: Simd> {
3401 pub val: [i8; 32],
3402 pub simd: S,
3403}
3404impl<S: Simd> SimdFrom<[i8; 32], S> for mask8x32<S> {
3405 #[inline(always)]
3406 fn simd_from(val: [i8; 32], simd: S) -> Self {
3407 Self {
3408 val: [
3409 val[0usize],
3410 val[1usize],
3411 val[2usize],
3412 val[3usize],
3413 val[4usize],
3414 val[5usize],
3415 val[6usize],
3416 val[7usize],
3417 val[8usize],
3418 val[9usize],
3419 val[10usize],
3420 val[11usize],
3421 val[12usize],
3422 val[13usize],
3423 val[14usize],
3424 val[15usize],
3425 val[16usize],
3426 val[17usize],
3427 val[18usize],
3428 val[19usize],
3429 val[20usize],
3430 val[21usize],
3431 val[22usize],
3432 val[23usize],
3433 val[24usize],
3434 val[25usize],
3435 val[26usize],
3436 val[27usize],
3437 val[28usize],
3438 val[29usize],
3439 val[30usize],
3440 val[31usize],
3441 ],
3442 simd,
3443 }
3444 }
3445}
3446impl<S: Simd> From<mask8x32<S>> for [i8; 32] {
3447 #[inline(always)]
3448 fn from(value: mask8x32<S>) -> Self {
3449 value.val
3450 }
3451}
3452impl<S: Simd> core::ops::Deref for mask8x32<S> {
3453 type Target = [i8; 32];
3454 #[inline(always)]
3455 fn deref(&self) -> &Self::Target {
3456 &self.val
3457 }
3458}
3459impl<S: Simd> core::ops::DerefMut for mask8x32<S> {
3460 #[inline(always)]
3461 fn deref_mut(&mut self) -> &mut Self::Target {
3462 &mut self.val
3463 }
3464}
3465impl<S: Simd> SimdFrom<i8, S> for mask8x32<S> {
3466 #[inline(always)]
3467 fn simd_from(value: i8, simd: S) -> Self {
3468 simd.splat_mask8x32(value)
3469 }
3470}
3471impl<S: Simd> Select<mask8x32<S>> for mask8x32<S> {
3472 #[inline(always)]
3473 fn select(self, if_true: mask8x32<S>, if_false: mask8x32<S>) -> mask8x32<S> {
3474 self.simd.select_mask8x32(self, if_true, if_false)
3475 }
3476}
3477impl<S: Simd> Bytes for mask8x32<S> {
3478 type Bytes = u8x32<S>;
3479 #[inline(always)]
3480 fn to_bytes(self) -> Self::Bytes {
3481 unsafe {
3482 u8x32 {
3483 val: core::mem::transmute(self.val),
3484 simd: self.simd,
3485 }
3486 }
3487 }
3488 #[inline(always)]
3489 fn from_bytes(value: Self::Bytes) -> Self {
3490 unsafe {
3491 Self {
3492 val: core::mem::transmute(value.val),
3493 simd: value.simd,
3494 }
3495 }
3496 }
3497}
3498impl<S: Simd> mask8x32<S> {
3499 #[inline(always)]
3500 pub fn not(self) -> mask8x32<S> {
3501 self.simd.not_mask8x32(self)
3502 }
3503 #[inline(always)]
3504 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3505 self.simd.and_mask8x32(self, rhs.simd_into(self.simd))
3506 }
3507 #[inline(always)]
3508 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3509 self.simd.or_mask8x32(self, rhs.simd_into(self.simd))
3510 }
3511 #[inline(always)]
3512 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3513 self.simd.xor_mask8x32(self, rhs.simd_into(self.simd))
3514 }
3515 #[inline(always)]
3516 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3517 self.simd.simd_eq_mask8x32(self, rhs.simd_into(self.simd))
3518 }
3519 #[inline(always)]
3520 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
3521 self.simd.combine_mask8x32(self, rhs.simd_into(self.simd))
3522 }
3523}
3524impl<S: Simd> crate::SimdBase<i8, S> for mask8x32<S> {
3525 const N: usize = 32;
3526 type Mask = mask8x32<S>;
3527 type Block = mask8x16<S>;
3528 #[inline(always)]
3529 fn witness(&self) -> S {
3530 self.simd
3531 }
3532 #[inline(always)]
3533 fn as_slice(&self) -> &[i8] {
3534 &self.val
3535 }
3536 #[inline(always)]
3537 fn as_mut_slice(&mut self) -> &mut [i8] {
3538 &mut self.val
3539 }
3540 #[inline(always)]
3541 fn from_slice(simd: S, slice: &[i8]) -> Self {
3542 let mut val = [0; 32];
3543 val.copy_from_slice(slice);
3544 Self { val, simd }
3545 }
3546 #[inline(always)]
3547 fn splat(simd: S, val: i8) -> Self {
3548 simd.splat_mask8x32(val)
3549 }
3550 #[inline(always)]
3551 fn block_splat(block: Self::Block) -> Self {
3552 block.combine(block)
3553 }
3554}
3555impl<S: Simd> crate::SimdMask<i8, S> for mask8x32<S> {
3556 #[inline(always)]
3557 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x32<S> {
3558 self.simd.simd_eq_mask8x32(self, rhs.simd_into(self.simd))
3559 }
3560}
3561#[derive(Clone, Copy, Debug)]
3562#[repr(C, align(32))]
3563pub struct i16x16<S: Simd> {
3564 pub val: [i16; 16],
3565 pub simd: S,
3566}
3567impl<S: Simd> SimdFrom<[i16; 16], S> for i16x16<S> {
3568 #[inline(always)]
3569 fn simd_from(val: [i16; 16], simd: S) -> Self {
3570 Self {
3571 val: [
3572 val[0usize],
3573 val[1usize],
3574 val[2usize],
3575 val[3usize],
3576 val[4usize],
3577 val[5usize],
3578 val[6usize],
3579 val[7usize],
3580 val[8usize],
3581 val[9usize],
3582 val[10usize],
3583 val[11usize],
3584 val[12usize],
3585 val[13usize],
3586 val[14usize],
3587 val[15usize],
3588 ],
3589 simd,
3590 }
3591 }
3592}
3593impl<S: Simd> From<i16x16<S>> for [i16; 16] {
3594 #[inline(always)]
3595 fn from(value: i16x16<S>) -> Self {
3596 value.val
3597 }
3598}
3599impl<S: Simd> core::ops::Deref for i16x16<S> {
3600 type Target = [i16; 16];
3601 #[inline(always)]
3602 fn deref(&self) -> &Self::Target {
3603 &self.val
3604 }
3605}
3606impl<S: Simd> core::ops::DerefMut for i16x16<S> {
3607 #[inline(always)]
3608 fn deref_mut(&mut self) -> &mut Self::Target {
3609 &mut self.val
3610 }
3611}
3612impl<S: Simd> SimdFrom<i16, S> for i16x16<S> {
3613 #[inline(always)]
3614 fn simd_from(value: i16, simd: S) -> Self {
3615 simd.splat_i16x16(value)
3616 }
3617}
3618impl<S: Simd> Select<i16x16<S>> for mask16x16<S> {
3619 #[inline(always)]
3620 fn select(self, if_true: i16x16<S>, if_false: i16x16<S>) -> i16x16<S> {
3621 self.simd.select_i16x16(self, if_true, if_false)
3622 }
3623}
3624impl<S: Simd> Bytes for i16x16<S> {
3625 type Bytes = u8x32<S>;
3626 #[inline(always)]
3627 fn to_bytes(self) -> Self::Bytes {
3628 unsafe {
3629 u8x32 {
3630 val: core::mem::transmute(self.val),
3631 simd: self.simd,
3632 }
3633 }
3634 }
3635 #[inline(always)]
3636 fn from_bytes(value: Self::Bytes) -> Self {
3637 unsafe {
3638 Self {
3639 val: core::mem::transmute(value.val),
3640 simd: value.simd,
3641 }
3642 }
3643 }
3644}
3645impl<S: Simd> i16x16<S> {
3646 #[inline(always)]
3647 pub fn not(self) -> i16x16<S> {
3648 self.simd.not_i16x16(self)
3649 }
3650 #[inline(always)]
3651 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3652 self.simd.add_i16x16(self, rhs.simd_into(self.simd))
3653 }
3654 #[inline(always)]
3655 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3656 self.simd.sub_i16x16(self, rhs.simd_into(self.simd))
3657 }
3658 #[inline(always)]
3659 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3660 self.simd.mul_i16x16(self, rhs.simd_into(self.simd))
3661 }
3662 #[inline(always)]
3663 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3664 self.simd.and_i16x16(self, rhs.simd_into(self.simd))
3665 }
3666 #[inline(always)]
3667 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3668 self.simd.or_i16x16(self, rhs.simd_into(self.simd))
3669 }
3670 #[inline(always)]
3671 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3672 self.simd.xor_i16x16(self, rhs.simd_into(self.simd))
3673 }
3674 #[inline(always)]
3675 pub fn shr(self, shift: u32) -> i16x16<S> {
3676 self.simd.shr_i16x16(self, shift)
3677 }
3678 #[inline(always)]
3679 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3680 self.simd.shrv_i16x16(self, rhs.simd_into(self.simd))
3681 }
3682 #[inline(always)]
3683 pub fn shl(self, shift: u32) -> i16x16<S> {
3684 self.simd.shl_i16x16(self, shift)
3685 }
3686 #[inline(always)]
3687 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3688 self.simd.simd_eq_i16x16(self, rhs.simd_into(self.simd))
3689 }
3690 #[inline(always)]
3691 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3692 self.simd.simd_lt_i16x16(self, rhs.simd_into(self.simd))
3693 }
3694 #[inline(always)]
3695 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3696 self.simd.simd_le_i16x16(self, rhs.simd_into(self.simd))
3697 }
3698 #[inline(always)]
3699 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3700 self.simd.simd_ge_i16x16(self, rhs.simd_into(self.simd))
3701 }
3702 #[inline(always)]
3703 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3704 self.simd.simd_gt_i16x16(self, rhs.simd_into(self.simd))
3705 }
3706 #[inline(always)]
3707 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3708 self.simd.min_i16x16(self, rhs.simd_into(self.simd))
3709 }
3710 #[inline(always)]
3711 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3712 self.simd.max_i16x16(self, rhs.simd_into(self.simd))
3713 }
3714 #[inline(always)]
3715 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
3716 self.simd.combine_i16x16(self, rhs.simd_into(self.simd))
3717 }
3718 #[inline(always)]
3719 pub fn neg(self) -> i16x16<S> {
3720 self.simd.neg_i16x16(self)
3721 }
3722 #[inline(always)]
3723 pub fn reinterpret_u8(self) -> u8x32<S> {
3724 self.simd.reinterpret_u8_i16x16(self)
3725 }
3726 #[inline(always)]
3727 pub fn reinterpret_u32(self) -> u32x8<S> {
3728 self.simd.reinterpret_u32_i16x16(self)
3729 }
3730}
3731impl<S: Simd> crate::SimdBase<i16, S> for i16x16<S> {
3732 const N: usize = 16;
3733 type Mask = mask16x16<S>;
3734 type Block = i16x8<S>;
3735 #[inline(always)]
3736 fn witness(&self) -> S {
3737 self.simd
3738 }
3739 #[inline(always)]
3740 fn as_slice(&self) -> &[i16] {
3741 &self.val
3742 }
3743 #[inline(always)]
3744 fn as_mut_slice(&mut self) -> &mut [i16] {
3745 &mut self.val
3746 }
3747 #[inline(always)]
3748 fn from_slice(simd: S, slice: &[i16]) -> Self {
3749 let mut val = [0; 16];
3750 val.copy_from_slice(slice);
3751 Self { val, simd }
3752 }
3753 #[inline(always)]
3754 fn splat(simd: S, val: i16) -> Self {
3755 simd.splat_i16x16(val)
3756 }
3757 #[inline(always)]
3758 fn block_splat(block: Self::Block) -> Self {
3759 block.combine(block)
3760 }
3761}
3762impl<S: Simd> crate::SimdInt<i16, S> for i16x16<S> {
3763 #[inline(always)]
3764 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3765 self.simd.simd_eq_i16x16(self, rhs.simd_into(self.simd))
3766 }
3767 #[inline(always)]
3768 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3769 self.simd.simd_lt_i16x16(self, rhs.simd_into(self.simd))
3770 }
3771 #[inline(always)]
3772 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3773 self.simd.simd_le_i16x16(self, rhs.simd_into(self.simd))
3774 }
3775 #[inline(always)]
3776 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3777 self.simd.simd_ge_i16x16(self, rhs.simd_into(self.simd))
3778 }
3779 #[inline(always)]
3780 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3781 self.simd.simd_gt_i16x16(self, rhs.simd_into(self.simd))
3782 }
3783 #[inline(always)]
3784 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3785 self.simd.zip_low_i16x16(self, rhs.simd_into(self.simd))
3786 }
3787 #[inline(always)]
3788 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3789 self.simd.zip_high_i16x16(self, rhs.simd_into(self.simd))
3790 }
3791 #[inline(always)]
3792 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3793 self.simd.unzip_low_i16x16(self, rhs.simd_into(self.simd))
3794 }
3795 #[inline(always)]
3796 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3797 self.simd.unzip_high_i16x16(self, rhs.simd_into(self.simd))
3798 }
3799 #[inline(always)]
3800 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3801 self.simd.min_i16x16(self, rhs.simd_into(self.simd))
3802 }
3803 #[inline(always)]
3804 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x16<S> {
3805 self.simd.max_i16x16(self, rhs.simd_into(self.simd))
3806 }
3807}
3808#[derive(Clone, Copy, Debug)]
3809#[repr(C, align(32))]
3810pub struct u16x16<S: Simd> {
3811 pub val: [u16; 16],
3812 pub simd: S,
3813}
3814impl<S: Simd> SimdFrom<[u16; 16], S> for u16x16<S> {
3815 #[inline(always)]
3816 fn simd_from(val: [u16; 16], simd: S) -> Self {
3817 Self {
3818 val: [
3819 val[0usize],
3820 val[1usize],
3821 val[2usize],
3822 val[3usize],
3823 val[4usize],
3824 val[5usize],
3825 val[6usize],
3826 val[7usize],
3827 val[8usize],
3828 val[9usize],
3829 val[10usize],
3830 val[11usize],
3831 val[12usize],
3832 val[13usize],
3833 val[14usize],
3834 val[15usize],
3835 ],
3836 simd,
3837 }
3838 }
3839}
3840impl<S: Simd> From<u16x16<S>> for [u16; 16] {
3841 #[inline(always)]
3842 fn from(value: u16x16<S>) -> Self {
3843 value.val
3844 }
3845}
3846impl<S: Simd> core::ops::Deref for u16x16<S> {
3847 type Target = [u16; 16];
3848 #[inline(always)]
3849 fn deref(&self) -> &Self::Target {
3850 &self.val
3851 }
3852}
3853impl<S: Simd> core::ops::DerefMut for u16x16<S> {
3854 #[inline(always)]
3855 fn deref_mut(&mut self) -> &mut Self::Target {
3856 &mut self.val
3857 }
3858}
3859impl<S: Simd> SimdFrom<u16, S> for u16x16<S> {
3860 #[inline(always)]
3861 fn simd_from(value: u16, simd: S) -> Self {
3862 simd.splat_u16x16(value)
3863 }
3864}
3865impl<S: Simd> Select<u16x16<S>> for mask16x16<S> {
3866 #[inline(always)]
3867 fn select(self, if_true: u16x16<S>, if_false: u16x16<S>) -> u16x16<S> {
3868 self.simd.select_u16x16(self, if_true, if_false)
3869 }
3870}
3871impl<S: Simd> Bytes for u16x16<S> {
3872 type Bytes = u8x32<S>;
3873 #[inline(always)]
3874 fn to_bytes(self) -> Self::Bytes {
3875 unsafe {
3876 u8x32 {
3877 val: core::mem::transmute(self.val),
3878 simd: self.simd,
3879 }
3880 }
3881 }
3882 #[inline(always)]
3883 fn from_bytes(value: Self::Bytes) -> Self {
3884 unsafe {
3885 Self {
3886 val: core::mem::transmute(value.val),
3887 simd: value.simd,
3888 }
3889 }
3890 }
3891}
3892impl<S: Simd> u16x16<S> {
3893 #[inline(always)]
3894 pub fn not(self) -> u16x16<S> {
3895 self.simd.not_u16x16(self)
3896 }
3897 #[inline(always)]
3898 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3899 self.simd.add_u16x16(self, rhs.simd_into(self.simd))
3900 }
3901 #[inline(always)]
3902 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3903 self.simd.sub_u16x16(self, rhs.simd_into(self.simd))
3904 }
3905 #[inline(always)]
3906 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3907 self.simd.mul_u16x16(self, rhs.simd_into(self.simd))
3908 }
3909 #[inline(always)]
3910 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3911 self.simd.and_u16x16(self, rhs.simd_into(self.simd))
3912 }
3913 #[inline(always)]
3914 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3915 self.simd.or_u16x16(self, rhs.simd_into(self.simd))
3916 }
3917 #[inline(always)]
3918 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3919 self.simd.xor_u16x16(self, rhs.simd_into(self.simd))
3920 }
3921 #[inline(always)]
3922 pub fn shr(self, shift: u32) -> u16x16<S> {
3923 self.simd.shr_u16x16(self, shift)
3924 }
3925 #[inline(always)]
3926 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3927 self.simd.shrv_u16x16(self, rhs.simd_into(self.simd))
3928 }
3929 #[inline(always)]
3930 pub fn shl(self, shift: u32) -> u16x16<S> {
3931 self.simd.shl_u16x16(self, shift)
3932 }
3933 #[inline(always)]
3934 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3935 self.simd.simd_eq_u16x16(self, rhs.simd_into(self.simd))
3936 }
3937 #[inline(always)]
3938 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3939 self.simd.simd_lt_u16x16(self, rhs.simd_into(self.simd))
3940 }
3941 #[inline(always)]
3942 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3943 self.simd.simd_le_u16x16(self, rhs.simd_into(self.simd))
3944 }
3945 #[inline(always)]
3946 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3947 self.simd.simd_ge_u16x16(self, rhs.simd_into(self.simd))
3948 }
3949 #[inline(always)]
3950 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
3951 self.simd.simd_gt_u16x16(self, rhs.simd_into(self.simd))
3952 }
3953 #[inline(always)]
3954 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3955 self.simd.min_u16x16(self, rhs.simd_into(self.simd))
3956 }
3957 #[inline(always)]
3958 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
3959 self.simd.max_u16x16(self, rhs.simd_into(self.simd))
3960 }
3961 #[inline(always)]
3962 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
3963 self.simd.combine_u16x16(self, rhs.simd_into(self.simd))
3964 }
3965 #[inline(always)]
3966 pub fn reinterpret_u8(self) -> u8x32<S> {
3967 self.simd.reinterpret_u8_u16x16(self)
3968 }
3969 #[inline(always)]
3970 pub fn reinterpret_u32(self) -> u32x8<S> {
3971 self.simd.reinterpret_u32_u16x16(self)
3972 }
3973}
3974impl<S: Simd> crate::SimdBase<u16, S> for u16x16<S> {
3975 const N: usize = 16;
3976 type Mask = mask16x16<S>;
3977 type Block = u16x8<S>;
3978 #[inline(always)]
3979 fn witness(&self) -> S {
3980 self.simd
3981 }
3982 #[inline(always)]
3983 fn as_slice(&self) -> &[u16] {
3984 &self.val
3985 }
3986 #[inline(always)]
3987 fn as_mut_slice(&mut self) -> &mut [u16] {
3988 &mut self.val
3989 }
3990 #[inline(always)]
3991 fn from_slice(simd: S, slice: &[u16]) -> Self {
3992 let mut val = [0; 16];
3993 val.copy_from_slice(slice);
3994 Self { val, simd }
3995 }
3996 #[inline(always)]
3997 fn splat(simd: S, val: u16) -> Self {
3998 simd.splat_u16x16(val)
3999 }
4000 #[inline(always)]
4001 fn block_splat(block: Self::Block) -> Self {
4002 block.combine(block)
4003 }
4004}
4005impl<S: Simd> crate::SimdInt<u16, S> for u16x16<S> {
4006 #[inline(always)]
4007 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4008 self.simd.simd_eq_u16x16(self, rhs.simd_into(self.simd))
4009 }
4010 #[inline(always)]
4011 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4012 self.simd.simd_lt_u16x16(self, rhs.simd_into(self.simd))
4013 }
4014 #[inline(always)]
4015 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4016 self.simd.simd_le_u16x16(self, rhs.simd_into(self.simd))
4017 }
4018 #[inline(always)]
4019 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4020 self.simd.simd_ge_u16x16(self, rhs.simd_into(self.simd))
4021 }
4022 #[inline(always)]
4023 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4024 self.simd.simd_gt_u16x16(self, rhs.simd_into(self.simd))
4025 }
4026 #[inline(always)]
4027 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4028 self.simd.zip_low_u16x16(self, rhs.simd_into(self.simd))
4029 }
4030 #[inline(always)]
4031 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4032 self.simd.zip_high_u16x16(self, rhs.simd_into(self.simd))
4033 }
4034 #[inline(always)]
4035 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4036 self.simd.unzip_low_u16x16(self, rhs.simd_into(self.simd))
4037 }
4038 #[inline(always)]
4039 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4040 self.simd.unzip_high_u16x16(self, rhs.simd_into(self.simd))
4041 }
4042 #[inline(always)]
4043 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4044 self.simd.min_u16x16(self, rhs.simd_into(self.simd))
4045 }
4046 #[inline(always)]
4047 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x16<S> {
4048 self.simd.max_u16x16(self, rhs.simd_into(self.simd))
4049 }
4050}
4051#[derive(Clone, Copy, Debug)]
4052#[repr(C, align(32))]
4053pub struct mask16x16<S: Simd> {
4054 pub val: [i16; 16],
4055 pub simd: S,
4056}
4057impl<S: Simd> SimdFrom<[i16; 16], S> for mask16x16<S> {
4058 #[inline(always)]
4059 fn simd_from(val: [i16; 16], simd: S) -> Self {
4060 Self {
4061 val: [
4062 val[0usize],
4063 val[1usize],
4064 val[2usize],
4065 val[3usize],
4066 val[4usize],
4067 val[5usize],
4068 val[6usize],
4069 val[7usize],
4070 val[8usize],
4071 val[9usize],
4072 val[10usize],
4073 val[11usize],
4074 val[12usize],
4075 val[13usize],
4076 val[14usize],
4077 val[15usize],
4078 ],
4079 simd,
4080 }
4081 }
4082}
4083impl<S: Simd> From<mask16x16<S>> for [i16; 16] {
4084 #[inline(always)]
4085 fn from(value: mask16x16<S>) -> Self {
4086 value.val
4087 }
4088}
4089impl<S: Simd> core::ops::Deref for mask16x16<S> {
4090 type Target = [i16; 16];
4091 #[inline(always)]
4092 fn deref(&self) -> &Self::Target {
4093 &self.val
4094 }
4095}
4096impl<S: Simd> core::ops::DerefMut for mask16x16<S> {
4097 #[inline(always)]
4098 fn deref_mut(&mut self) -> &mut Self::Target {
4099 &mut self.val
4100 }
4101}
4102impl<S: Simd> SimdFrom<i16, S> for mask16x16<S> {
4103 #[inline(always)]
4104 fn simd_from(value: i16, simd: S) -> Self {
4105 simd.splat_mask16x16(value)
4106 }
4107}
4108impl<S: Simd> Select<mask16x16<S>> for mask16x16<S> {
4109 #[inline(always)]
4110 fn select(self, if_true: mask16x16<S>, if_false: mask16x16<S>) -> mask16x16<S> {
4111 self.simd.select_mask16x16(self, if_true, if_false)
4112 }
4113}
4114impl<S: Simd> Bytes for mask16x16<S> {
4115 type Bytes = u8x32<S>;
4116 #[inline(always)]
4117 fn to_bytes(self) -> Self::Bytes {
4118 unsafe {
4119 u8x32 {
4120 val: core::mem::transmute(self.val),
4121 simd: self.simd,
4122 }
4123 }
4124 }
4125 #[inline(always)]
4126 fn from_bytes(value: Self::Bytes) -> Self {
4127 unsafe {
4128 Self {
4129 val: core::mem::transmute(value.val),
4130 simd: value.simd,
4131 }
4132 }
4133 }
4134}
4135impl<S: Simd> mask16x16<S> {
4136 #[inline(always)]
4137 pub fn not(self) -> mask16x16<S> {
4138 self.simd.not_mask16x16(self)
4139 }
4140 #[inline(always)]
4141 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4142 self.simd.and_mask16x16(self, rhs.simd_into(self.simd))
4143 }
4144 #[inline(always)]
4145 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4146 self.simd.or_mask16x16(self, rhs.simd_into(self.simd))
4147 }
4148 #[inline(always)]
4149 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4150 self.simd.xor_mask16x16(self, rhs.simd_into(self.simd))
4151 }
4152 #[inline(always)]
4153 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4154 self.simd.simd_eq_mask16x16(self, rhs.simd_into(self.simd))
4155 }
4156 #[inline(always)]
4157 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
4158 self.simd.combine_mask16x16(self, rhs.simd_into(self.simd))
4159 }
4160}
4161impl<S: Simd> crate::SimdBase<i16, S> for mask16x16<S> {
4162 const N: usize = 16;
4163 type Mask = mask16x16<S>;
4164 type Block = mask16x8<S>;
4165 #[inline(always)]
4166 fn witness(&self) -> S {
4167 self.simd
4168 }
4169 #[inline(always)]
4170 fn as_slice(&self) -> &[i16] {
4171 &self.val
4172 }
4173 #[inline(always)]
4174 fn as_mut_slice(&mut self) -> &mut [i16] {
4175 &mut self.val
4176 }
4177 #[inline(always)]
4178 fn from_slice(simd: S, slice: &[i16]) -> Self {
4179 let mut val = [0; 16];
4180 val.copy_from_slice(slice);
4181 Self { val, simd }
4182 }
4183 #[inline(always)]
4184 fn splat(simd: S, val: i16) -> Self {
4185 simd.splat_mask16x16(val)
4186 }
4187 #[inline(always)]
4188 fn block_splat(block: Self::Block) -> Self {
4189 block.combine(block)
4190 }
4191}
4192impl<S: Simd> crate::SimdMask<i16, S> for mask16x16<S> {
4193 #[inline(always)]
4194 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x16<S> {
4195 self.simd.simd_eq_mask16x16(self, rhs.simd_into(self.simd))
4196 }
4197}
4198#[derive(Clone, Copy, Debug)]
4199#[repr(C, align(32))]
4200pub struct i32x8<S: Simd> {
4201 pub val: [i32; 8],
4202 pub simd: S,
4203}
4204impl<S: Simd> SimdFrom<[i32; 8], S> for i32x8<S> {
4205 #[inline(always)]
4206 fn simd_from(val: [i32; 8], simd: S) -> Self {
4207 Self {
4208 val: [
4209 val[0usize],
4210 val[1usize],
4211 val[2usize],
4212 val[3usize],
4213 val[4usize],
4214 val[5usize],
4215 val[6usize],
4216 val[7usize],
4217 ],
4218 simd,
4219 }
4220 }
4221}
4222impl<S: Simd> From<i32x8<S>> for [i32; 8] {
4223 #[inline(always)]
4224 fn from(value: i32x8<S>) -> Self {
4225 value.val
4226 }
4227}
4228impl<S: Simd> core::ops::Deref for i32x8<S> {
4229 type Target = [i32; 8];
4230 #[inline(always)]
4231 fn deref(&self) -> &Self::Target {
4232 &self.val
4233 }
4234}
4235impl<S: Simd> core::ops::DerefMut for i32x8<S> {
4236 #[inline(always)]
4237 fn deref_mut(&mut self) -> &mut Self::Target {
4238 &mut self.val
4239 }
4240}
4241impl<S: Simd> SimdFrom<i32, S> for i32x8<S> {
4242 #[inline(always)]
4243 fn simd_from(value: i32, simd: S) -> Self {
4244 simd.splat_i32x8(value)
4245 }
4246}
4247impl<S: Simd> Select<i32x8<S>> for mask32x8<S> {
4248 #[inline(always)]
4249 fn select(self, if_true: i32x8<S>, if_false: i32x8<S>) -> i32x8<S> {
4250 self.simd.select_i32x8(self, if_true, if_false)
4251 }
4252}
4253impl<S: Simd> Bytes for i32x8<S> {
4254 type Bytes = u8x32<S>;
4255 #[inline(always)]
4256 fn to_bytes(self) -> Self::Bytes {
4257 unsafe {
4258 u8x32 {
4259 val: core::mem::transmute(self.val),
4260 simd: self.simd,
4261 }
4262 }
4263 }
4264 #[inline(always)]
4265 fn from_bytes(value: Self::Bytes) -> Self {
4266 unsafe {
4267 Self {
4268 val: core::mem::transmute(value.val),
4269 simd: value.simd,
4270 }
4271 }
4272 }
4273}
4274impl<S: Simd> i32x8<S> {
4275 #[inline(always)]
4276 pub fn not(self) -> i32x8<S> {
4277 self.simd.not_i32x8(self)
4278 }
4279 #[inline(always)]
4280 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4281 self.simd.add_i32x8(self, rhs.simd_into(self.simd))
4282 }
4283 #[inline(always)]
4284 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4285 self.simd.sub_i32x8(self, rhs.simd_into(self.simd))
4286 }
4287 #[inline(always)]
4288 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4289 self.simd.mul_i32x8(self, rhs.simd_into(self.simd))
4290 }
4291 #[inline(always)]
4292 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4293 self.simd.and_i32x8(self, rhs.simd_into(self.simd))
4294 }
4295 #[inline(always)]
4296 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4297 self.simd.or_i32x8(self, rhs.simd_into(self.simd))
4298 }
4299 #[inline(always)]
4300 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4301 self.simd.xor_i32x8(self, rhs.simd_into(self.simd))
4302 }
4303 #[inline(always)]
4304 pub fn shr(self, shift: u32) -> i32x8<S> {
4305 self.simd.shr_i32x8(self, shift)
4306 }
4307 #[inline(always)]
4308 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4309 self.simd.shrv_i32x8(self, rhs.simd_into(self.simd))
4310 }
4311 #[inline(always)]
4312 pub fn shl(self, shift: u32) -> i32x8<S> {
4313 self.simd.shl_i32x8(self, shift)
4314 }
4315 #[inline(always)]
4316 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4317 self.simd.simd_eq_i32x8(self, rhs.simd_into(self.simd))
4318 }
4319 #[inline(always)]
4320 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4321 self.simd.simd_lt_i32x8(self, rhs.simd_into(self.simd))
4322 }
4323 #[inline(always)]
4324 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4325 self.simd.simd_le_i32x8(self, rhs.simd_into(self.simd))
4326 }
4327 #[inline(always)]
4328 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4329 self.simd.simd_ge_i32x8(self, rhs.simd_into(self.simd))
4330 }
4331 #[inline(always)]
4332 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4333 self.simd.simd_gt_i32x8(self, rhs.simd_into(self.simd))
4334 }
4335 #[inline(always)]
4336 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4337 self.simd.min_i32x8(self, rhs.simd_into(self.simd))
4338 }
4339 #[inline(always)]
4340 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4341 self.simd.max_i32x8(self, rhs.simd_into(self.simd))
4342 }
4343 #[inline(always)]
4344 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
4345 self.simd.combine_i32x8(self, rhs.simd_into(self.simd))
4346 }
4347 #[inline(always)]
4348 pub fn neg(self) -> i32x8<S> {
4349 self.simd.neg_i32x8(self)
4350 }
4351 #[inline(always)]
4352 pub fn reinterpret_u8(self) -> u8x32<S> {
4353 self.simd.reinterpret_u8_i32x8(self)
4354 }
4355 #[inline(always)]
4356 pub fn reinterpret_u32(self) -> u32x8<S> {
4357 self.simd.reinterpret_u32_i32x8(self)
4358 }
4359 #[inline(always)]
4360 pub fn cvt_f32(self) -> f32x8<S> {
4361 self.simd.cvt_f32_i32x8(self)
4362 }
4363}
4364impl<S: Simd> crate::SimdBase<i32, S> for i32x8<S> {
4365 const N: usize = 8;
4366 type Mask = mask32x8<S>;
4367 type Block = i32x4<S>;
4368 #[inline(always)]
4369 fn witness(&self) -> S {
4370 self.simd
4371 }
4372 #[inline(always)]
4373 fn as_slice(&self) -> &[i32] {
4374 &self.val
4375 }
4376 #[inline(always)]
4377 fn as_mut_slice(&mut self) -> &mut [i32] {
4378 &mut self.val
4379 }
4380 #[inline(always)]
4381 fn from_slice(simd: S, slice: &[i32]) -> Self {
4382 let mut val = [0; 8];
4383 val.copy_from_slice(slice);
4384 Self { val, simd }
4385 }
4386 #[inline(always)]
4387 fn splat(simd: S, val: i32) -> Self {
4388 simd.splat_i32x8(val)
4389 }
4390 #[inline(always)]
4391 fn block_splat(block: Self::Block) -> Self {
4392 block.combine(block)
4393 }
4394}
4395impl<S: Simd> crate::SimdInt<i32, S> for i32x8<S> {
4396 #[inline(always)]
4397 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4398 self.simd.simd_eq_i32x8(self, rhs.simd_into(self.simd))
4399 }
4400 #[inline(always)]
4401 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4402 self.simd.simd_lt_i32x8(self, rhs.simd_into(self.simd))
4403 }
4404 #[inline(always)]
4405 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4406 self.simd.simd_le_i32x8(self, rhs.simd_into(self.simd))
4407 }
4408 #[inline(always)]
4409 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4410 self.simd.simd_ge_i32x8(self, rhs.simd_into(self.simd))
4411 }
4412 #[inline(always)]
4413 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4414 self.simd.simd_gt_i32x8(self, rhs.simd_into(self.simd))
4415 }
4416 #[inline(always)]
4417 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4418 self.simd.zip_low_i32x8(self, rhs.simd_into(self.simd))
4419 }
4420 #[inline(always)]
4421 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4422 self.simd.zip_high_i32x8(self, rhs.simd_into(self.simd))
4423 }
4424 #[inline(always)]
4425 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4426 self.simd.unzip_low_i32x8(self, rhs.simd_into(self.simd))
4427 }
4428 #[inline(always)]
4429 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4430 self.simd.unzip_high_i32x8(self, rhs.simd_into(self.simd))
4431 }
4432 #[inline(always)]
4433 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4434 self.simd.min_i32x8(self, rhs.simd_into(self.simd))
4435 }
4436 #[inline(always)]
4437 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x8<S> {
4438 self.simd.max_i32x8(self, rhs.simd_into(self.simd))
4439 }
4440}
4441impl<S: Simd> SimdCvtTruncate<f32x8<S>> for i32x8<S> {
4442 fn truncate_from(x: f32x8<S>) -> Self {
4443 x.simd.cvt_i32_f32x8(x)
4444 }
4445}
4446#[derive(Clone, Copy, Debug)]
4447#[repr(C, align(32))]
4448pub struct u32x8<S: Simd> {
4449 pub val: [u32; 8],
4450 pub simd: S,
4451}
4452impl<S: Simd> SimdFrom<[u32; 8], S> for u32x8<S> {
4453 #[inline(always)]
4454 fn simd_from(val: [u32; 8], simd: S) -> Self {
4455 Self {
4456 val: [
4457 val[0usize],
4458 val[1usize],
4459 val[2usize],
4460 val[3usize],
4461 val[4usize],
4462 val[5usize],
4463 val[6usize],
4464 val[7usize],
4465 ],
4466 simd,
4467 }
4468 }
4469}
4470impl<S: Simd> From<u32x8<S>> for [u32; 8] {
4471 #[inline(always)]
4472 fn from(value: u32x8<S>) -> Self {
4473 value.val
4474 }
4475}
4476impl<S: Simd> core::ops::Deref for u32x8<S> {
4477 type Target = [u32; 8];
4478 #[inline(always)]
4479 fn deref(&self) -> &Self::Target {
4480 &self.val
4481 }
4482}
4483impl<S: Simd> core::ops::DerefMut for u32x8<S> {
4484 #[inline(always)]
4485 fn deref_mut(&mut self) -> &mut Self::Target {
4486 &mut self.val
4487 }
4488}
4489impl<S: Simd> SimdFrom<u32, S> for u32x8<S> {
4490 #[inline(always)]
4491 fn simd_from(value: u32, simd: S) -> Self {
4492 simd.splat_u32x8(value)
4493 }
4494}
4495impl<S: Simd> Select<u32x8<S>> for mask32x8<S> {
4496 #[inline(always)]
4497 fn select(self, if_true: u32x8<S>, if_false: u32x8<S>) -> u32x8<S> {
4498 self.simd.select_u32x8(self, if_true, if_false)
4499 }
4500}
4501impl<S: Simd> Bytes for u32x8<S> {
4502 type Bytes = u8x32<S>;
4503 #[inline(always)]
4504 fn to_bytes(self) -> Self::Bytes {
4505 unsafe {
4506 u8x32 {
4507 val: core::mem::transmute(self.val),
4508 simd: self.simd,
4509 }
4510 }
4511 }
4512 #[inline(always)]
4513 fn from_bytes(value: Self::Bytes) -> Self {
4514 unsafe {
4515 Self {
4516 val: core::mem::transmute(value.val),
4517 simd: value.simd,
4518 }
4519 }
4520 }
4521}
4522impl<S: Simd> u32x8<S> {
4523 #[inline(always)]
4524 pub fn not(self) -> u32x8<S> {
4525 self.simd.not_u32x8(self)
4526 }
4527 #[inline(always)]
4528 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4529 self.simd.add_u32x8(self, rhs.simd_into(self.simd))
4530 }
4531 #[inline(always)]
4532 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4533 self.simd.sub_u32x8(self, rhs.simd_into(self.simd))
4534 }
4535 #[inline(always)]
4536 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4537 self.simd.mul_u32x8(self, rhs.simd_into(self.simd))
4538 }
4539 #[inline(always)]
4540 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4541 self.simd.and_u32x8(self, rhs.simd_into(self.simd))
4542 }
4543 #[inline(always)]
4544 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4545 self.simd.or_u32x8(self, rhs.simd_into(self.simd))
4546 }
4547 #[inline(always)]
4548 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4549 self.simd.xor_u32x8(self, rhs.simd_into(self.simd))
4550 }
4551 #[inline(always)]
4552 pub fn shr(self, shift: u32) -> u32x8<S> {
4553 self.simd.shr_u32x8(self, shift)
4554 }
4555 #[inline(always)]
4556 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4557 self.simd.shrv_u32x8(self, rhs.simd_into(self.simd))
4558 }
4559 #[inline(always)]
4560 pub fn shl(self, shift: u32) -> u32x8<S> {
4561 self.simd.shl_u32x8(self, shift)
4562 }
4563 #[inline(always)]
4564 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4565 self.simd.simd_eq_u32x8(self, rhs.simd_into(self.simd))
4566 }
4567 #[inline(always)]
4568 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4569 self.simd.simd_lt_u32x8(self, rhs.simd_into(self.simd))
4570 }
4571 #[inline(always)]
4572 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4573 self.simd.simd_le_u32x8(self, rhs.simd_into(self.simd))
4574 }
4575 #[inline(always)]
4576 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4577 self.simd.simd_ge_u32x8(self, rhs.simd_into(self.simd))
4578 }
4579 #[inline(always)]
4580 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4581 self.simd.simd_gt_u32x8(self, rhs.simd_into(self.simd))
4582 }
4583 #[inline(always)]
4584 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4585 self.simd.min_u32x8(self, rhs.simd_into(self.simd))
4586 }
4587 #[inline(always)]
4588 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4589 self.simd.max_u32x8(self, rhs.simd_into(self.simd))
4590 }
4591 #[inline(always)]
4592 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
4593 self.simd.combine_u32x8(self, rhs.simd_into(self.simd))
4594 }
4595 #[inline(always)]
4596 pub fn reinterpret_u8(self) -> u8x32<S> {
4597 self.simd.reinterpret_u8_u32x8(self)
4598 }
4599 #[inline(always)]
4600 pub fn cvt_f32(self) -> f32x8<S> {
4601 self.simd.cvt_f32_u32x8(self)
4602 }
4603}
4604impl<S: Simd> crate::SimdBase<u32, S> for u32x8<S> {
4605 const N: usize = 8;
4606 type Mask = mask32x8<S>;
4607 type Block = u32x4<S>;
4608 #[inline(always)]
4609 fn witness(&self) -> S {
4610 self.simd
4611 }
4612 #[inline(always)]
4613 fn as_slice(&self) -> &[u32] {
4614 &self.val
4615 }
4616 #[inline(always)]
4617 fn as_mut_slice(&mut self) -> &mut [u32] {
4618 &mut self.val
4619 }
4620 #[inline(always)]
4621 fn from_slice(simd: S, slice: &[u32]) -> Self {
4622 let mut val = [0; 8];
4623 val.copy_from_slice(slice);
4624 Self { val, simd }
4625 }
4626 #[inline(always)]
4627 fn splat(simd: S, val: u32) -> Self {
4628 simd.splat_u32x8(val)
4629 }
4630 #[inline(always)]
4631 fn block_splat(block: Self::Block) -> Self {
4632 block.combine(block)
4633 }
4634}
4635impl<S: Simd> crate::SimdInt<u32, S> for u32x8<S> {
4636 #[inline(always)]
4637 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4638 self.simd.simd_eq_u32x8(self, rhs.simd_into(self.simd))
4639 }
4640 #[inline(always)]
4641 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4642 self.simd.simd_lt_u32x8(self, rhs.simd_into(self.simd))
4643 }
4644 #[inline(always)]
4645 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4646 self.simd.simd_le_u32x8(self, rhs.simd_into(self.simd))
4647 }
4648 #[inline(always)]
4649 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4650 self.simd.simd_ge_u32x8(self, rhs.simd_into(self.simd))
4651 }
4652 #[inline(always)]
4653 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4654 self.simd.simd_gt_u32x8(self, rhs.simd_into(self.simd))
4655 }
4656 #[inline(always)]
4657 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4658 self.simd.zip_low_u32x8(self, rhs.simd_into(self.simd))
4659 }
4660 #[inline(always)]
4661 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4662 self.simd.zip_high_u32x8(self, rhs.simd_into(self.simd))
4663 }
4664 #[inline(always)]
4665 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4666 self.simd.unzip_low_u32x8(self, rhs.simd_into(self.simd))
4667 }
4668 #[inline(always)]
4669 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4670 self.simd.unzip_high_u32x8(self, rhs.simd_into(self.simd))
4671 }
4672 #[inline(always)]
4673 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4674 self.simd.min_u32x8(self, rhs.simd_into(self.simd))
4675 }
4676 #[inline(always)]
4677 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x8<S> {
4678 self.simd.max_u32x8(self, rhs.simd_into(self.simd))
4679 }
4680}
4681impl<S: Simd> SimdCvtTruncate<f32x8<S>> for u32x8<S> {
4682 fn truncate_from(x: f32x8<S>) -> Self {
4683 x.simd.cvt_u32_f32x8(x)
4684 }
4685}
4686#[derive(Clone, Copy, Debug)]
4687#[repr(C, align(32))]
4688pub struct mask32x8<S: Simd> {
4689 pub val: [i32; 8],
4690 pub simd: S,
4691}
4692impl<S: Simd> SimdFrom<[i32; 8], S> for mask32x8<S> {
4693 #[inline(always)]
4694 fn simd_from(val: [i32; 8], simd: S) -> Self {
4695 Self {
4696 val: [
4697 val[0usize],
4698 val[1usize],
4699 val[2usize],
4700 val[3usize],
4701 val[4usize],
4702 val[5usize],
4703 val[6usize],
4704 val[7usize],
4705 ],
4706 simd,
4707 }
4708 }
4709}
4710impl<S: Simd> From<mask32x8<S>> for [i32; 8] {
4711 #[inline(always)]
4712 fn from(value: mask32x8<S>) -> Self {
4713 value.val
4714 }
4715}
4716impl<S: Simd> core::ops::Deref for mask32x8<S> {
4717 type Target = [i32; 8];
4718 #[inline(always)]
4719 fn deref(&self) -> &Self::Target {
4720 &self.val
4721 }
4722}
4723impl<S: Simd> core::ops::DerefMut for mask32x8<S> {
4724 #[inline(always)]
4725 fn deref_mut(&mut self) -> &mut Self::Target {
4726 &mut self.val
4727 }
4728}
4729impl<S: Simd> SimdFrom<i32, S> for mask32x8<S> {
4730 #[inline(always)]
4731 fn simd_from(value: i32, simd: S) -> Self {
4732 simd.splat_mask32x8(value)
4733 }
4734}
4735impl<S: Simd> Select<mask32x8<S>> for mask32x8<S> {
4736 #[inline(always)]
4737 fn select(self, if_true: mask32x8<S>, if_false: mask32x8<S>) -> mask32x8<S> {
4738 self.simd.select_mask32x8(self, if_true, if_false)
4739 }
4740}
4741impl<S: Simd> Bytes for mask32x8<S> {
4742 type Bytes = u8x32<S>;
4743 #[inline(always)]
4744 fn to_bytes(self) -> Self::Bytes {
4745 unsafe {
4746 u8x32 {
4747 val: core::mem::transmute(self.val),
4748 simd: self.simd,
4749 }
4750 }
4751 }
4752 #[inline(always)]
4753 fn from_bytes(value: Self::Bytes) -> Self {
4754 unsafe {
4755 Self {
4756 val: core::mem::transmute(value.val),
4757 simd: value.simd,
4758 }
4759 }
4760 }
4761}
4762impl<S: Simd> mask32x8<S> {
4763 #[inline(always)]
4764 pub fn not(self) -> mask32x8<S> {
4765 self.simd.not_mask32x8(self)
4766 }
4767 #[inline(always)]
4768 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4769 self.simd.and_mask32x8(self, rhs.simd_into(self.simd))
4770 }
4771 #[inline(always)]
4772 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4773 self.simd.or_mask32x8(self, rhs.simd_into(self.simd))
4774 }
4775 #[inline(always)]
4776 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4777 self.simd.xor_mask32x8(self, rhs.simd_into(self.simd))
4778 }
4779 #[inline(always)]
4780 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4781 self.simd.simd_eq_mask32x8(self, rhs.simd_into(self.simd))
4782 }
4783 #[inline(always)]
4784 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
4785 self.simd.combine_mask32x8(self, rhs.simd_into(self.simd))
4786 }
4787}
4788impl<S: Simd> crate::SimdBase<i32, S> for mask32x8<S> {
4789 const N: usize = 8;
4790 type Mask = mask32x8<S>;
4791 type Block = mask32x4<S>;
4792 #[inline(always)]
4793 fn witness(&self) -> S {
4794 self.simd
4795 }
4796 #[inline(always)]
4797 fn as_slice(&self) -> &[i32] {
4798 &self.val
4799 }
4800 #[inline(always)]
4801 fn as_mut_slice(&mut self) -> &mut [i32] {
4802 &mut self.val
4803 }
4804 #[inline(always)]
4805 fn from_slice(simd: S, slice: &[i32]) -> Self {
4806 let mut val = [0; 8];
4807 val.copy_from_slice(slice);
4808 Self { val, simd }
4809 }
4810 #[inline(always)]
4811 fn splat(simd: S, val: i32) -> Self {
4812 simd.splat_mask32x8(val)
4813 }
4814 #[inline(always)]
4815 fn block_splat(block: Self::Block) -> Self {
4816 block.combine(block)
4817 }
4818}
4819impl<S: Simd> crate::SimdMask<i32, S> for mask32x8<S> {
4820 #[inline(always)]
4821 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x8<S> {
4822 self.simd.simd_eq_mask32x8(self, rhs.simd_into(self.simd))
4823 }
4824}
4825#[derive(Clone, Copy, Debug)]
4826#[repr(C, align(32))]
4827pub struct f64x4<S: Simd> {
4828 pub val: [f64; 4],
4829 pub simd: S,
4830}
4831impl<S: Simd> SimdFrom<[f64; 4], S> for f64x4<S> {
4832 #[inline(always)]
4833 fn simd_from(val: [f64; 4], simd: S) -> Self {
4834 Self {
4835 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
4836 simd,
4837 }
4838 }
4839}
4840impl<S: Simd> From<f64x4<S>> for [f64; 4] {
4841 #[inline(always)]
4842 fn from(value: f64x4<S>) -> Self {
4843 value.val
4844 }
4845}
4846impl<S: Simd> core::ops::Deref for f64x4<S> {
4847 type Target = [f64; 4];
4848 #[inline(always)]
4849 fn deref(&self) -> &Self::Target {
4850 &self.val
4851 }
4852}
4853impl<S: Simd> core::ops::DerefMut for f64x4<S> {
4854 #[inline(always)]
4855 fn deref_mut(&mut self) -> &mut Self::Target {
4856 &mut self.val
4857 }
4858}
4859impl<S: Simd> SimdFrom<f64, S> for f64x4<S> {
4860 #[inline(always)]
4861 fn simd_from(value: f64, simd: S) -> Self {
4862 simd.splat_f64x4(value)
4863 }
4864}
4865impl<S: Simd> Select<f64x4<S>> for mask64x4<S> {
4866 #[inline(always)]
4867 fn select(self, if_true: f64x4<S>, if_false: f64x4<S>) -> f64x4<S> {
4868 self.simd.select_f64x4(self, if_true, if_false)
4869 }
4870}
4871impl<S: Simd> Bytes for f64x4<S> {
4872 type Bytes = u8x32<S>;
4873 #[inline(always)]
4874 fn to_bytes(self) -> Self::Bytes {
4875 unsafe {
4876 u8x32 {
4877 val: core::mem::transmute(self.val),
4878 simd: self.simd,
4879 }
4880 }
4881 }
4882 #[inline(always)]
4883 fn from_bytes(value: Self::Bytes) -> Self {
4884 unsafe {
4885 Self {
4886 val: core::mem::transmute(value.val),
4887 simd: value.simd,
4888 }
4889 }
4890 }
4891}
4892impl<S: Simd> f64x4<S> {
4893 #[inline(always)]
4894 pub fn abs(self) -> f64x4<S> {
4895 self.simd.abs_f64x4(self)
4896 }
4897 #[inline(always)]
4898 pub fn neg(self) -> f64x4<S> {
4899 self.simd.neg_f64x4(self)
4900 }
4901 #[inline(always)]
4902 pub fn sqrt(self) -> f64x4<S> {
4903 self.simd.sqrt_f64x4(self)
4904 }
4905 #[inline(always)]
4906 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4907 self.simd.add_f64x4(self, rhs.simd_into(self.simd))
4908 }
4909 #[inline(always)]
4910 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4911 self.simd.sub_f64x4(self, rhs.simd_into(self.simd))
4912 }
4913 #[inline(always)]
4914 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4915 self.simd.mul_f64x4(self, rhs.simd_into(self.simd))
4916 }
4917 #[inline(always)]
4918 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4919 self.simd.div_f64x4(self, rhs.simd_into(self.simd))
4920 }
4921 #[inline(always)]
4922 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4923 self.simd.copysign_f64x4(self, rhs.simd_into(self.simd))
4924 }
4925 #[inline(always)]
4926 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4927 self.simd.simd_eq_f64x4(self, rhs.simd_into(self.simd))
4928 }
4929 #[inline(always)]
4930 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4931 self.simd.simd_lt_f64x4(self, rhs.simd_into(self.simd))
4932 }
4933 #[inline(always)]
4934 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4935 self.simd.simd_le_f64x4(self, rhs.simd_into(self.simd))
4936 }
4937 #[inline(always)]
4938 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4939 self.simd.simd_ge_f64x4(self, rhs.simd_into(self.simd))
4940 }
4941 #[inline(always)]
4942 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
4943 self.simd.simd_gt_f64x4(self, rhs.simd_into(self.simd))
4944 }
4945 #[inline(always)]
4946 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4947 self.simd.max_f64x4(self, rhs.simd_into(self.simd))
4948 }
4949 #[inline(always)]
4950 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4951 self.simd.max_precise_f64x4(self, rhs.simd_into(self.simd))
4952 }
4953 #[inline(always)]
4954 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4955 self.simd.min_f64x4(self, rhs.simd_into(self.simd))
4956 }
4957 #[inline(always)]
4958 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
4959 self.simd.min_precise_f64x4(self, rhs.simd_into(self.simd))
4960 }
4961 #[inline(always)]
4962 pub fn floor(self) -> f64x4<S> {
4963 self.simd.floor_f64x4(self)
4964 }
4965 #[inline(always)]
4966 pub fn fract(self) -> f64x4<S> {
4967 self.simd.fract_f64x4(self)
4968 }
4969 #[inline(always)]
4970 pub fn trunc(self) -> f64x4<S> {
4971 self.simd.trunc_f64x4(self)
4972 }
4973 #[inline(always)]
4974 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
4975 self.simd.combine_f64x4(self, rhs.simd_into(self.simd))
4976 }
4977 #[inline(always)]
4978 pub fn reinterpret_f32(self) -> f32x8<S> {
4979 self.simd.reinterpret_f32_f64x4(self)
4980 }
4981}
4982impl<S: Simd> crate::SimdBase<f64, S> for f64x4<S> {
4983 const N: usize = 4;
4984 type Mask = mask64x4<S>;
4985 type Block = f64x2<S>;
4986 #[inline(always)]
4987 fn witness(&self) -> S {
4988 self.simd
4989 }
4990 #[inline(always)]
4991 fn as_slice(&self) -> &[f64] {
4992 &self.val
4993 }
4994 #[inline(always)]
4995 fn as_mut_slice(&mut self) -> &mut [f64] {
4996 &mut self.val
4997 }
4998 #[inline(always)]
4999 fn from_slice(simd: S, slice: &[f64]) -> Self {
5000 let mut val = [0.0; 4];
5001 val.copy_from_slice(slice);
5002 Self { val, simd }
5003 }
5004 #[inline(always)]
5005 fn splat(simd: S, val: f64) -> Self {
5006 simd.splat_f64x4(val)
5007 }
5008 #[inline(always)]
5009 fn block_splat(block: Self::Block) -> Self {
5010 block.combine(block)
5011 }
5012}
5013impl<S: Simd> crate::SimdFloat<f64, S> for f64x4<S> {
5014 #[inline(always)]
5015 fn abs(self) -> f64x4<S> {
5016 self.simd.abs_f64x4(self)
5017 }
5018 #[inline(always)]
5019 fn sqrt(self) -> f64x4<S> {
5020 self.simd.sqrt_f64x4(self)
5021 }
5022 #[inline(always)]
5023 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5024 self.simd.copysign_f64x4(self, rhs.simd_into(self.simd))
5025 }
5026 #[inline(always)]
5027 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5028 self.simd.simd_eq_f64x4(self, rhs.simd_into(self.simd))
5029 }
5030 #[inline(always)]
5031 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5032 self.simd.simd_lt_f64x4(self, rhs.simd_into(self.simd))
5033 }
5034 #[inline(always)]
5035 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5036 self.simd.simd_le_f64x4(self, rhs.simd_into(self.simd))
5037 }
5038 #[inline(always)]
5039 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5040 self.simd.simd_ge_f64x4(self, rhs.simd_into(self.simd))
5041 }
5042 #[inline(always)]
5043 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5044 self.simd.simd_gt_f64x4(self, rhs.simd_into(self.simd))
5045 }
5046 #[inline(always)]
5047 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5048 self.simd.zip_low_f64x4(self, rhs.simd_into(self.simd))
5049 }
5050 #[inline(always)]
5051 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5052 self.simd.zip_high_f64x4(self, rhs.simd_into(self.simd))
5053 }
5054 #[inline(always)]
5055 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5056 self.simd.unzip_low_f64x4(self, rhs.simd_into(self.simd))
5057 }
5058 #[inline(always)]
5059 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5060 self.simd.unzip_high_f64x4(self, rhs.simd_into(self.simd))
5061 }
5062 #[inline(always)]
5063 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5064 self.simd.max_f64x4(self, rhs.simd_into(self.simd))
5065 }
5066 #[inline(always)]
5067 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5068 self.simd.max_precise_f64x4(self, rhs.simd_into(self.simd))
5069 }
5070 #[inline(always)]
5071 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5072 self.simd.min_f64x4(self, rhs.simd_into(self.simd))
5073 }
5074 #[inline(always)]
5075 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x4<S> {
5076 self.simd.min_precise_f64x4(self, rhs.simd_into(self.simd))
5077 }
5078 #[inline(always)]
5079 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x4<S> {
5080 self.simd
5081 .madd_f64x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5082 }
5083 #[inline(always)]
5084 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x4<S> {
5085 self.simd
5086 .msub_f64x4(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5087 }
5088 #[inline(always)]
5089 fn floor(self) -> f64x4<S> {
5090 self.simd.floor_f64x4(self)
5091 }
5092 #[inline(always)]
5093 fn fract(self) -> f64x4<S> {
5094 self.simd.fract_f64x4(self)
5095 }
5096 #[inline(always)]
5097 fn trunc(self) -> f64x4<S> {
5098 self.simd.trunc_f64x4(self)
5099 }
5100}
5101#[derive(Clone, Copy, Debug)]
5102#[repr(C, align(32))]
5103pub struct mask64x4<S: Simd> {
5104 pub val: [i64; 4],
5105 pub simd: S,
5106}
5107impl<S: Simd> SimdFrom<[i64; 4], S> for mask64x4<S> {
5108 #[inline(always)]
5109 fn simd_from(val: [i64; 4], simd: S) -> Self {
5110 Self {
5111 val: [val[0usize], val[1usize], val[2usize], val[3usize]],
5112 simd,
5113 }
5114 }
5115}
5116impl<S: Simd> From<mask64x4<S>> for [i64; 4] {
5117 #[inline(always)]
5118 fn from(value: mask64x4<S>) -> Self {
5119 value.val
5120 }
5121}
5122impl<S: Simd> core::ops::Deref for mask64x4<S> {
5123 type Target = [i64; 4];
5124 #[inline(always)]
5125 fn deref(&self) -> &Self::Target {
5126 &self.val
5127 }
5128}
5129impl<S: Simd> core::ops::DerefMut for mask64x4<S> {
5130 #[inline(always)]
5131 fn deref_mut(&mut self) -> &mut Self::Target {
5132 &mut self.val
5133 }
5134}
5135impl<S: Simd> SimdFrom<i64, S> for mask64x4<S> {
5136 #[inline(always)]
5137 fn simd_from(value: i64, simd: S) -> Self {
5138 simd.splat_mask64x4(value)
5139 }
5140}
5141impl<S: Simd> Select<mask64x4<S>> for mask64x4<S> {
5142 #[inline(always)]
5143 fn select(self, if_true: mask64x4<S>, if_false: mask64x4<S>) -> mask64x4<S> {
5144 self.simd.select_mask64x4(self, if_true, if_false)
5145 }
5146}
5147impl<S: Simd> Bytes for mask64x4<S> {
5148 type Bytes = u8x32<S>;
5149 #[inline(always)]
5150 fn to_bytes(self) -> Self::Bytes {
5151 unsafe {
5152 u8x32 {
5153 val: core::mem::transmute(self.val),
5154 simd: self.simd,
5155 }
5156 }
5157 }
5158 #[inline(always)]
5159 fn from_bytes(value: Self::Bytes) -> Self {
5160 unsafe {
5161 Self {
5162 val: core::mem::transmute(value.val),
5163 simd: value.simd,
5164 }
5165 }
5166 }
5167}
5168impl<S: Simd> mask64x4<S> {
5169 #[inline(always)]
5170 pub fn not(self) -> mask64x4<S> {
5171 self.simd.not_mask64x4(self)
5172 }
5173 #[inline(always)]
5174 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5175 self.simd.and_mask64x4(self, rhs.simd_into(self.simd))
5176 }
5177 #[inline(always)]
5178 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5179 self.simd.or_mask64x4(self, rhs.simd_into(self.simd))
5180 }
5181 #[inline(always)]
5182 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5183 self.simd.xor_mask64x4(self, rhs.simd_into(self.simd))
5184 }
5185 #[inline(always)]
5186 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5187 self.simd.simd_eq_mask64x4(self, rhs.simd_into(self.simd))
5188 }
5189 #[inline(always)]
5190 pub fn combine(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
5191 self.simd.combine_mask64x4(self, rhs.simd_into(self.simd))
5192 }
5193}
5194impl<S: Simd> crate::SimdBase<i64, S> for mask64x4<S> {
5195 const N: usize = 4;
5196 type Mask = mask64x4<S>;
5197 type Block = mask64x2<S>;
5198 #[inline(always)]
5199 fn witness(&self) -> S {
5200 self.simd
5201 }
5202 #[inline(always)]
5203 fn as_slice(&self) -> &[i64] {
5204 &self.val
5205 }
5206 #[inline(always)]
5207 fn as_mut_slice(&mut self) -> &mut [i64] {
5208 &mut self.val
5209 }
5210 #[inline(always)]
5211 fn from_slice(simd: S, slice: &[i64]) -> Self {
5212 let mut val = [0; 4];
5213 val.copy_from_slice(slice);
5214 Self { val, simd }
5215 }
5216 #[inline(always)]
5217 fn splat(simd: S, val: i64) -> Self {
5218 simd.splat_mask64x4(val)
5219 }
5220 #[inline(always)]
5221 fn block_splat(block: Self::Block) -> Self {
5222 block.combine(block)
5223 }
5224}
5225impl<S: Simd> crate::SimdMask<i64, S> for mask64x4<S> {
5226 #[inline(always)]
5227 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x4<S> {
5228 self.simd.simd_eq_mask64x4(self, rhs.simd_into(self.simd))
5229 }
5230}
5231#[derive(Clone, Copy, Debug)]
5232#[repr(C, align(64))]
5233pub struct f32x16<S: Simd> {
5234 pub val: [f32; 16],
5235 pub simd: S,
5236}
5237impl<S: Simd> SimdFrom<[f32; 16], S> for f32x16<S> {
5238 #[inline(always)]
5239 fn simd_from(val: [f32; 16], simd: S) -> Self {
5240 Self {
5241 val: [
5242 val[0usize],
5243 val[1usize],
5244 val[2usize],
5245 val[3usize],
5246 val[4usize],
5247 val[5usize],
5248 val[6usize],
5249 val[7usize],
5250 val[8usize],
5251 val[9usize],
5252 val[10usize],
5253 val[11usize],
5254 val[12usize],
5255 val[13usize],
5256 val[14usize],
5257 val[15usize],
5258 ],
5259 simd,
5260 }
5261 }
5262}
5263impl<S: Simd> From<f32x16<S>> for [f32; 16] {
5264 #[inline(always)]
5265 fn from(value: f32x16<S>) -> Self {
5266 value.val
5267 }
5268}
5269impl<S: Simd> core::ops::Deref for f32x16<S> {
5270 type Target = [f32; 16];
5271 #[inline(always)]
5272 fn deref(&self) -> &Self::Target {
5273 &self.val
5274 }
5275}
5276impl<S: Simd> core::ops::DerefMut for f32x16<S> {
5277 #[inline(always)]
5278 fn deref_mut(&mut self) -> &mut Self::Target {
5279 &mut self.val
5280 }
5281}
5282impl<S: Simd> SimdFrom<f32, S> for f32x16<S> {
5283 #[inline(always)]
5284 fn simd_from(value: f32, simd: S) -> Self {
5285 simd.splat_f32x16(value)
5286 }
5287}
5288impl<S: Simd> Select<f32x16<S>> for mask32x16<S> {
5289 #[inline(always)]
5290 fn select(self, if_true: f32x16<S>, if_false: f32x16<S>) -> f32x16<S> {
5291 self.simd.select_f32x16(self, if_true, if_false)
5292 }
5293}
5294impl<S: Simd> Bytes for f32x16<S> {
5295 type Bytes = u8x64<S>;
5296 #[inline(always)]
5297 fn to_bytes(self) -> Self::Bytes {
5298 unsafe {
5299 u8x64 {
5300 val: core::mem::transmute(self.val),
5301 simd: self.simd,
5302 }
5303 }
5304 }
5305 #[inline(always)]
5306 fn from_bytes(value: Self::Bytes) -> Self {
5307 unsafe {
5308 Self {
5309 val: core::mem::transmute(value.val),
5310 simd: value.simd,
5311 }
5312 }
5313 }
5314}
5315impl<S: Simd> f32x16<S> {
5316 #[inline(always)]
5317 pub fn abs(self) -> f32x16<S> {
5318 self.simd.abs_f32x16(self)
5319 }
5320 #[inline(always)]
5321 pub fn neg(self) -> f32x16<S> {
5322 self.simd.neg_f32x16(self)
5323 }
5324 #[inline(always)]
5325 pub fn sqrt(self) -> f32x16<S> {
5326 self.simd.sqrt_f32x16(self)
5327 }
5328 #[inline(always)]
5329 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5330 self.simd.add_f32x16(self, rhs.simd_into(self.simd))
5331 }
5332 #[inline(always)]
5333 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5334 self.simd.sub_f32x16(self, rhs.simd_into(self.simd))
5335 }
5336 #[inline(always)]
5337 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5338 self.simd.mul_f32x16(self, rhs.simd_into(self.simd))
5339 }
5340 #[inline(always)]
5341 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5342 self.simd.div_f32x16(self, rhs.simd_into(self.simd))
5343 }
5344 #[inline(always)]
5345 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5346 self.simd.copysign_f32x16(self, rhs.simd_into(self.simd))
5347 }
5348 #[inline(always)]
5349 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5350 self.simd.simd_eq_f32x16(self, rhs.simd_into(self.simd))
5351 }
5352 #[inline(always)]
5353 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5354 self.simd.simd_lt_f32x16(self, rhs.simd_into(self.simd))
5355 }
5356 #[inline(always)]
5357 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5358 self.simd.simd_le_f32x16(self, rhs.simd_into(self.simd))
5359 }
5360 #[inline(always)]
5361 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5362 self.simd.simd_ge_f32x16(self, rhs.simd_into(self.simd))
5363 }
5364 #[inline(always)]
5365 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5366 self.simd.simd_gt_f32x16(self, rhs.simd_into(self.simd))
5367 }
5368 #[inline(always)]
5369 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5370 self.simd.max_f32x16(self, rhs.simd_into(self.simd))
5371 }
5372 #[inline(always)]
5373 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5374 self.simd.max_precise_f32x16(self, rhs.simd_into(self.simd))
5375 }
5376 #[inline(always)]
5377 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5378 self.simd.min_f32x16(self, rhs.simd_into(self.simd))
5379 }
5380 #[inline(always)]
5381 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5382 self.simd.min_precise_f32x16(self, rhs.simd_into(self.simd))
5383 }
5384 #[inline(always)]
5385 pub fn floor(self) -> f32x16<S> {
5386 self.simd.floor_f32x16(self)
5387 }
5388 #[inline(always)]
5389 pub fn fract(self) -> f32x16<S> {
5390 self.simd.fract_f32x16(self)
5391 }
5392 #[inline(always)]
5393 pub fn trunc(self) -> f32x16<S> {
5394 self.simd.trunc_f32x16(self)
5395 }
5396 #[inline(always)]
5397 pub fn reinterpret_f64(self) -> f64x8<S> {
5398 self.simd.reinterpret_f64_f32x16(self)
5399 }
5400 #[inline(always)]
5401 pub fn reinterpret_i32(self) -> i32x16<S> {
5402 self.simd.reinterpret_i32_f32x16(self)
5403 }
5404 #[inline(always)]
5405 pub fn reinterpret_u8(self) -> u8x64<S> {
5406 self.simd.reinterpret_u8_f32x16(self)
5407 }
5408 #[inline(always)]
5409 pub fn reinterpret_u32(self) -> u32x16<S> {
5410 self.simd.reinterpret_u32_f32x16(self)
5411 }
5412 #[inline(always)]
5413 pub fn cvt_u32(self) -> u32x16<S> {
5414 self.simd.cvt_u32_f32x16(self)
5415 }
5416 #[inline(always)]
5417 pub fn cvt_i32(self) -> i32x16<S> {
5418 self.simd.cvt_i32_f32x16(self)
5419 }
5420}
5421impl<S: Simd> crate::SimdBase<f32, S> for f32x16<S> {
5422 const N: usize = 16;
5423 type Mask = mask32x16<S>;
5424 type Block = f32x4<S>;
5425 #[inline(always)]
5426 fn witness(&self) -> S {
5427 self.simd
5428 }
5429 #[inline(always)]
5430 fn as_slice(&self) -> &[f32] {
5431 &self.val
5432 }
5433 #[inline(always)]
5434 fn as_mut_slice(&mut self) -> &mut [f32] {
5435 &mut self.val
5436 }
5437 #[inline(always)]
5438 fn from_slice(simd: S, slice: &[f32]) -> Self {
5439 let mut val = [0.0; 16];
5440 val.copy_from_slice(slice);
5441 Self { val, simd }
5442 }
5443 #[inline(always)]
5444 fn splat(simd: S, val: f32) -> Self {
5445 simd.splat_f32x16(val)
5446 }
5447 #[inline(always)]
5448 fn block_splat(block: Self::Block) -> Self {
5449 let block2 = block.combine(block);
5450 block2.combine(block2)
5451 }
5452}
5453impl<S: Simd> crate::SimdFloat<f32, S> for f32x16<S> {
5454 #[inline(always)]
5455 fn abs(self) -> f32x16<S> {
5456 self.simd.abs_f32x16(self)
5457 }
5458 #[inline(always)]
5459 fn sqrt(self) -> f32x16<S> {
5460 self.simd.sqrt_f32x16(self)
5461 }
5462 #[inline(always)]
5463 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5464 self.simd.copysign_f32x16(self, rhs.simd_into(self.simd))
5465 }
5466 #[inline(always)]
5467 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5468 self.simd.simd_eq_f32x16(self, rhs.simd_into(self.simd))
5469 }
5470 #[inline(always)]
5471 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5472 self.simd.simd_lt_f32x16(self, rhs.simd_into(self.simd))
5473 }
5474 #[inline(always)]
5475 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5476 self.simd.simd_le_f32x16(self, rhs.simd_into(self.simd))
5477 }
5478 #[inline(always)]
5479 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5480 self.simd.simd_ge_f32x16(self, rhs.simd_into(self.simd))
5481 }
5482 #[inline(always)]
5483 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
5484 self.simd.simd_gt_f32x16(self, rhs.simd_into(self.simd))
5485 }
5486 #[inline(always)]
5487 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5488 self.simd.zip_low_f32x16(self, rhs.simd_into(self.simd))
5489 }
5490 #[inline(always)]
5491 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5492 self.simd.zip_high_f32x16(self, rhs.simd_into(self.simd))
5493 }
5494 #[inline(always)]
5495 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5496 self.simd.unzip_low_f32x16(self, rhs.simd_into(self.simd))
5497 }
5498 #[inline(always)]
5499 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5500 self.simd.unzip_high_f32x16(self, rhs.simd_into(self.simd))
5501 }
5502 #[inline(always)]
5503 fn max(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5504 self.simd.max_f32x16(self, rhs.simd_into(self.simd))
5505 }
5506 #[inline(always)]
5507 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5508 self.simd.max_precise_f32x16(self, rhs.simd_into(self.simd))
5509 }
5510 #[inline(always)]
5511 fn min(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5512 self.simd.min_f32x16(self, rhs.simd_into(self.simd))
5513 }
5514 #[inline(always)]
5515 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f32x16<S> {
5516 self.simd.min_precise_f32x16(self, rhs.simd_into(self.simd))
5517 }
5518 #[inline(always)]
5519 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x16<S> {
5520 self.simd
5521 .madd_f32x16(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5522 }
5523 #[inline(always)]
5524 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f32x16<S> {
5525 self.simd
5526 .msub_f32x16(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
5527 }
5528 #[inline(always)]
5529 fn floor(self) -> f32x16<S> {
5530 self.simd.floor_f32x16(self)
5531 }
5532 #[inline(always)]
5533 fn fract(self) -> f32x16<S> {
5534 self.simd.fract_f32x16(self)
5535 }
5536 #[inline(always)]
5537 fn trunc(self) -> f32x16<S> {
5538 self.simd.trunc_f32x16(self)
5539 }
5540}
5541impl<S: Simd> SimdCvtFloat<u32x16<S>> for f32x16<S> {
5542 fn float_from(x: u32x16<S>) -> Self {
5543 x.simd.cvt_f32_u32x16(x)
5544 }
5545}
5546impl<S: Simd> SimdCvtFloat<i32x16<S>> for f32x16<S> {
5547 fn float_from(x: i32x16<S>) -> Self {
5548 x.simd.cvt_f32_i32x16(x)
5549 }
5550}
5551#[derive(Clone, Copy, Debug)]
5552#[repr(C, align(64))]
5553pub struct i8x64<S: Simd> {
5554 pub val: [i8; 64],
5555 pub simd: S,
5556}
5557impl<S: Simd> SimdFrom<[i8; 64], S> for i8x64<S> {
5558 #[inline(always)]
5559 fn simd_from(val: [i8; 64], simd: S) -> Self {
5560 Self {
5561 val: [
5562 val[0usize],
5563 val[1usize],
5564 val[2usize],
5565 val[3usize],
5566 val[4usize],
5567 val[5usize],
5568 val[6usize],
5569 val[7usize],
5570 val[8usize],
5571 val[9usize],
5572 val[10usize],
5573 val[11usize],
5574 val[12usize],
5575 val[13usize],
5576 val[14usize],
5577 val[15usize],
5578 val[16usize],
5579 val[17usize],
5580 val[18usize],
5581 val[19usize],
5582 val[20usize],
5583 val[21usize],
5584 val[22usize],
5585 val[23usize],
5586 val[24usize],
5587 val[25usize],
5588 val[26usize],
5589 val[27usize],
5590 val[28usize],
5591 val[29usize],
5592 val[30usize],
5593 val[31usize],
5594 val[32usize],
5595 val[33usize],
5596 val[34usize],
5597 val[35usize],
5598 val[36usize],
5599 val[37usize],
5600 val[38usize],
5601 val[39usize],
5602 val[40usize],
5603 val[41usize],
5604 val[42usize],
5605 val[43usize],
5606 val[44usize],
5607 val[45usize],
5608 val[46usize],
5609 val[47usize],
5610 val[48usize],
5611 val[49usize],
5612 val[50usize],
5613 val[51usize],
5614 val[52usize],
5615 val[53usize],
5616 val[54usize],
5617 val[55usize],
5618 val[56usize],
5619 val[57usize],
5620 val[58usize],
5621 val[59usize],
5622 val[60usize],
5623 val[61usize],
5624 val[62usize],
5625 val[63usize],
5626 ],
5627 simd,
5628 }
5629 }
5630}
5631impl<S: Simd> From<i8x64<S>> for [i8; 64] {
5632 #[inline(always)]
5633 fn from(value: i8x64<S>) -> Self {
5634 value.val
5635 }
5636}
5637impl<S: Simd> core::ops::Deref for i8x64<S> {
5638 type Target = [i8; 64];
5639 #[inline(always)]
5640 fn deref(&self) -> &Self::Target {
5641 &self.val
5642 }
5643}
5644impl<S: Simd> core::ops::DerefMut for i8x64<S> {
5645 #[inline(always)]
5646 fn deref_mut(&mut self) -> &mut Self::Target {
5647 &mut self.val
5648 }
5649}
5650impl<S: Simd> SimdFrom<i8, S> for i8x64<S> {
5651 #[inline(always)]
5652 fn simd_from(value: i8, simd: S) -> Self {
5653 simd.splat_i8x64(value)
5654 }
5655}
5656impl<S: Simd> Select<i8x64<S>> for mask8x64<S> {
5657 #[inline(always)]
5658 fn select(self, if_true: i8x64<S>, if_false: i8x64<S>) -> i8x64<S> {
5659 self.simd.select_i8x64(self, if_true, if_false)
5660 }
5661}
5662impl<S: Simd> Bytes for i8x64<S> {
5663 type Bytes = u8x64<S>;
5664 #[inline(always)]
5665 fn to_bytes(self) -> Self::Bytes {
5666 unsafe {
5667 u8x64 {
5668 val: core::mem::transmute(self.val),
5669 simd: self.simd,
5670 }
5671 }
5672 }
5673 #[inline(always)]
5674 fn from_bytes(value: Self::Bytes) -> Self {
5675 unsafe {
5676 Self {
5677 val: core::mem::transmute(value.val),
5678 simd: value.simd,
5679 }
5680 }
5681 }
5682}
5683impl<S: Simd> i8x64<S> {
5684 #[inline(always)]
5685 pub fn not(self) -> i8x64<S> {
5686 self.simd.not_i8x64(self)
5687 }
5688 #[inline(always)]
5689 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5690 self.simd.add_i8x64(self, rhs.simd_into(self.simd))
5691 }
5692 #[inline(always)]
5693 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5694 self.simd.sub_i8x64(self, rhs.simd_into(self.simd))
5695 }
5696 #[inline(always)]
5697 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5698 self.simd.mul_i8x64(self, rhs.simd_into(self.simd))
5699 }
5700 #[inline(always)]
5701 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5702 self.simd.and_i8x64(self, rhs.simd_into(self.simd))
5703 }
5704 #[inline(always)]
5705 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5706 self.simd.or_i8x64(self, rhs.simd_into(self.simd))
5707 }
5708 #[inline(always)]
5709 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5710 self.simd.xor_i8x64(self, rhs.simd_into(self.simd))
5711 }
5712 #[inline(always)]
5713 pub fn shr(self, shift: u32) -> i8x64<S> {
5714 self.simd.shr_i8x64(self, shift)
5715 }
5716 #[inline(always)]
5717 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5718 self.simd.shrv_i8x64(self, rhs.simd_into(self.simd))
5719 }
5720 #[inline(always)]
5721 pub fn shl(self, shift: u32) -> i8x64<S> {
5722 self.simd.shl_i8x64(self, shift)
5723 }
5724 #[inline(always)]
5725 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5726 self.simd.simd_eq_i8x64(self, rhs.simd_into(self.simd))
5727 }
5728 #[inline(always)]
5729 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5730 self.simd.simd_lt_i8x64(self, rhs.simd_into(self.simd))
5731 }
5732 #[inline(always)]
5733 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5734 self.simd.simd_le_i8x64(self, rhs.simd_into(self.simd))
5735 }
5736 #[inline(always)]
5737 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5738 self.simd.simd_ge_i8x64(self, rhs.simd_into(self.simd))
5739 }
5740 #[inline(always)]
5741 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5742 self.simd.simd_gt_i8x64(self, rhs.simd_into(self.simd))
5743 }
5744 #[inline(always)]
5745 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5746 self.simd.min_i8x64(self, rhs.simd_into(self.simd))
5747 }
5748 #[inline(always)]
5749 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5750 self.simd.max_i8x64(self, rhs.simd_into(self.simd))
5751 }
5752 #[inline(always)]
5753 pub fn neg(self) -> i8x64<S> {
5754 self.simd.neg_i8x64(self)
5755 }
5756 #[inline(always)]
5757 pub fn reinterpret_u8(self) -> u8x64<S> {
5758 self.simd.reinterpret_u8_i8x64(self)
5759 }
5760 #[inline(always)]
5761 pub fn reinterpret_u32(self) -> u32x16<S> {
5762 self.simd.reinterpret_u32_i8x64(self)
5763 }
5764}
5765impl<S: Simd> crate::SimdBase<i8, S> for i8x64<S> {
5766 const N: usize = 64;
5767 type Mask = mask8x64<S>;
5768 type Block = i8x16<S>;
5769 #[inline(always)]
5770 fn witness(&self) -> S {
5771 self.simd
5772 }
5773 #[inline(always)]
5774 fn as_slice(&self) -> &[i8] {
5775 &self.val
5776 }
5777 #[inline(always)]
5778 fn as_mut_slice(&mut self) -> &mut [i8] {
5779 &mut self.val
5780 }
5781 #[inline(always)]
5782 fn from_slice(simd: S, slice: &[i8]) -> Self {
5783 let mut val = [0; 64];
5784 val.copy_from_slice(slice);
5785 Self { val, simd }
5786 }
5787 #[inline(always)]
5788 fn splat(simd: S, val: i8) -> Self {
5789 simd.splat_i8x64(val)
5790 }
5791 #[inline(always)]
5792 fn block_splat(block: Self::Block) -> Self {
5793 let block2 = block.combine(block);
5794 block2.combine(block2)
5795 }
5796}
5797impl<S: Simd> crate::SimdInt<i8, S> for i8x64<S> {
5798 #[inline(always)]
5799 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5800 self.simd.simd_eq_i8x64(self, rhs.simd_into(self.simd))
5801 }
5802 #[inline(always)]
5803 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5804 self.simd.simd_lt_i8x64(self, rhs.simd_into(self.simd))
5805 }
5806 #[inline(always)]
5807 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5808 self.simd.simd_le_i8x64(self, rhs.simd_into(self.simd))
5809 }
5810 #[inline(always)]
5811 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5812 self.simd.simd_ge_i8x64(self, rhs.simd_into(self.simd))
5813 }
5814 #[inline(always)]
5815 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
5816 self.simd.simd_gt_i8x64(self, rhs.simd_into(self.simd))
5817 }
5818 #[inline(always)]
5819 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5820 self.simd.zip_low_i8x64(self, rhs.simd_into(self.simd))
5821 }
5822 #[inline(always)]
5823 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5824 self.simd.zip_high_i8x64(self, rhs.simd_into(self.simd))
5825 }
5826 #[inline(always)]
5827 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5828 self.simd.unzip_low_i8x64(self, rhs.simd_into(self.simd))
5829 }
5830 #[inline(always)]
5831 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5832 self.simd.unzip_high_i8x64(self, rhs.simd_into(self.simd))
5833 }
5834 #[inline(always)]
5835 fn min(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5836 self.simd.min_i8x64(self, rhs.simd_into(self.simd))
5837 }
5838 #[inline(always)]
5839 fn max(self, rhs: impl SimdInto<Self, S>) -> i8x64<S> {
5840 self.simd.max_i8x64(self, rhs.simd_into(self.simd))
5841 }
5842}
5843#[derive(Clone, Copy, Debug)]
5844#[repr(C, align(64))]
5845pub struct u8x64<S: Simd> {
5846 pub val: [u8; 64],
5847 pub simd: S,
5848}
5849impl<S: Simd> SimdFrom<[u8; 64], S> for u8x64<S> {
5850 #[inline(always)]
5851 fn simd_from(val: [u8; 64], simd: S) -> Self {
5852 Self {
5853 val: [
5854 val[0usize],
5855 val[1usize],
5856 val[2usize],
5857 val[3usize],
5858 val[4usize],
5859 val[5usize],
5860 val[6usize],
5861 val[7usize],
5862 val[8usize],
5863 val[9usize],
5864 val[10usize],
5865 val[11usize],
5866 val[12usize],
5867 val[13usize],
5868 val[14usize],
5869 val[15usize],
5870 val[16usize],
5871 val[17usize],
5872 val[18usize],
5873 val[19usize],
5874 val[20usize],
5875 val[21usize],
5876 val[22usize],
5877 val[23usize],
5878 val[24usize],
5879 val[25usize],
5880 val[26usize],
5881 val[27usize],
5882 val[28usize],
5883 val[29usize],
5884 val[30usize],
5885 val[31usize],
5886 val[32usize],
5887 val[33usize],
5888 val[34usize],
5889 val[35usize],
5890 val[36usize],
5891 val[37usize],
5892 val[38usize],
5893 val[39usize],
5894 val[40usize],
5895 val[41usize],
5896 val[42usize],
5897 val[43usize],
5898 val[44usize],
5899 val[45usize],
5900 val[46usize],
5901 val[47usize],
5902 val[48usize],
5903 val[49usize],
5904 val[50usize],
5905 val[51usize],
5906 val[52usize],
5907 val[53usize],
5908 val[54usize],
5909 val[55usize],
5910 val[56usize],
5911 val[57usize],
5912 val[58usize],
5913 val[59usize],
5914 val[60usize],
5915 val[61usize],
5916 val[62usize],
5917 val[63usize],
5918 ],
5919 simd,
5920 }
5921 }
5922}
5923impl<S: Simd> From<u8x64<S>> for [u8; 64] {
5924 #[inline(always)]
5925 fn from(value: u8x64<S>) -> Self {
5926 value.val
5927 }
5928}
5929impl<S: Simd> core::ops::Deref for u8x64<S> {
5930 type Target = [u8; 64];
5931 #[inline(always)]
5932 fn deref(&self) -> &Self::Target {
5933 &self.val
5934 }
5935}
5936impl<S: Simd> core::ops::DerefMut for u8x64<S> {
5937 #[inline(always)]
5938 fn deref_mut(&mut self) -> &mut Self::Target {
5939 &mut self.val
5940 }
5941}
5942impl<S: Simd> SimdFrom<u8, S> for u8x64<S> {
5943 #[inline(always)]
5944 fn simd_from(value: u8, simd: S) -> Self {
5945 simd.splat_u8x64(value)
5946 }
5947}
5948impl<S: Simd> Select<u8x64<S>> for mask8x64<S> {
5949 #[inline(always)]
5950 fn select(self, if_true: u8x64<S>, if_false: u8x64<S>) -> u8x64<S> {
5951 self.simd.select_u8x64(self, if_true, if_false)
5952 }
5953}
5954impl<S: Simd> Bytes for u8x64<S> {
5955 type Bytes = u8x64<S>;
5956 #[inline(always)]
5957 fn to_bytes(self) -> Self::Bytes {
5958 unsafe {
5959 u8x64 {
5960 val: core::mem::transmute(self.val),
5961 simd: self.simd,
5962 }
5963 }
5964 }
5965 #[inline(always)]
5966 fn from_bytes(value: Self::Bytes) -> Self {
5967 unsafe {
5968 Self {
5969 val: core::mem::transmute(value.val),
5970 simd: value.simd,
5971 }
5972 }
5973 }
5974}
5975impl<S: Simd> u8x64<S> {
5976 #[inline(always)]
5977 pub fn not(self) -> u8x64<S> {
5978 self.simd.not_u8x64(self)
5979 }
5980 #[inline(always)]
5981 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5982 self.simd.add_u8x64(self, rhs.simd_into(self.simd))
5983 }
5984 #[inline(always)]
5985 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5986 self.simd.sub_u8x64(self, rhs.simd_into(self.simd))
5987 }
5988 #[inline(always)]
5989 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5990 self.simd.mul_u8x64(self, rhs.simd_into(self.simd))
5991 }
5992 #[inline(always)]
5993 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5994 self.simd.and_u8x64(self, rhs.simd_into(self.simd))
5995 }
5996 #[inline(always)]
5997 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
5998 self.simd.or_u8x64(self, rhs.simd_into(self.simd))
5999 }
6000 #[inline(always)]
6001 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6002 self.simd.xor_u8x64(self, rhs.simd_into(self.simd))
6003 }
6004 #[inline(always)]
6005 pub fn shr(self, shift: u32) -> u8x64<S> {
6006 self.simd.shr_u8x64(self, shift)
6007 }
6008 #[inline(always)]
6009 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6010 self.simd.shrv_u8x64(self, rhs.simd_into(self.simd))
6011 }
6012 #[inline(always)]
6013 pub fn shl(self, shift: u32) -> u8x64<S> {
6014 self.simd.shl_u8x64(self, shift)
6015 }
6016 #[inline(always)]
6017 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6018 self.simd.simd_eq_u8x64(self, rhs.simd_into(self.simd))
6019 }
6020 #[inline(always)]
6021 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6022 self.simd.simd_lt_u8x64(self, rhs.simd_into(self.simd))
6023 }
6024 #[inline(always)]
6025 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6026 self.simd.simd_le_u8x64(self, rhs.simd_into(self.simd))
6027 }
6028 #[inline(always)]
6029 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6030 self.simd.simd_ge_u8x64(self, rhs.simd_into(self.simd))
6031 }
6032 #[inline(always)]
6033 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6034 self.simd.simd_gt_u8x64(self, rhs.simd_into(self.simd))
6035 }
6036 #[inline(always)]
6037 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6038 self.simd.min_u8x64(self, rhs.simd_into(self.simd))
6039 }
6040 #[inline(always)]
6041 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6042 self.simd.max_u8x64(self, rhs.simd_into(self.simd))
6043 }
6044 #[inline(always)]
6045 pub fn reinterpret_u32(self) -> u32x16<S> {
6046 self.simd.reinterpret_u32_u8x64(self)
6047 }
6048}
6049impl<S: Simd> crate::SimdBase<u8, S> for u8x64<S> {
6050 const N: usize = 64;
6051 type Mask = mask8x64<S>;
6052 type Block = u8x16<S>;
6053 #[inline(always)]
6054 fn witness(&self) -> S {
6055 self.simd
6056 }
6057 #[inline(always)]
6058 fn as_slice(&self) -> &[u8] {
6059 &self.val
6060 }
6061 #[inline(always)]
6062 fn as_mut_slice(&mut self) -> &mut [u8] {
6063 &mut self.val
6064 }
6065 #[inline(always)]
6066 fn from_slice(simd: S, slice: &[u8]) -> Self {
6067 let mut val = [0; 64];
6068 val.copy_from_slice(slice);
6069 Self { val, simd }
6070 }
6071 #[inline(always)]
6072 fn splat(simd: S, val: u8) -> Self {
6073 simd.splat_u8x64(val)
6074 }
6075 #[inline(always)]
6076 fn block_splat(block: Self::Block) -> Self {
6077 let block2 = block.combine(block);
6078 block2.combine(block2)
6079 }
6080}
6081impl<S: Simd> crate::SimdInt<u8, S> for u8x64<S> {
6082 #[inline(always)]
6083 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6084 self.simd.simd_eq_u8x64(self, rhs.simd_into(self.simd))
6085 }
6086 #[inline(always)]
6087 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6088 self.simd.simd_lt_u8x64(self, rhs.simd_into(self.simd))
6089 }
6090 #[inline(always)]
6091 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6092 self.simd.simd_le_u8x64(self, rhs.simd_into(self.simd))
6093 }
6094 #[inline(always)]
6095 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6096 self.simd.simd_ge_u8x64(self, rhs.simd_into(self.simd))
6097 }
6098 #[inline(always)]
6099 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6100 self.simd.simd_gt_u8x64(self, rhs.simd_into(self.simd))
6101 }
6102 #[inline(always)]
6103 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6104 self.simd.zip_low_u8x64(self, rhs.simd_into(self.simd))
6105 }
6106 #[inline(always)]
6107 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6108 self.simd.zip_high_u8x64(self, rhs.simd_into(self.simd))
6109 }
6110 #[inline(always)]
6111 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6112 self.simd.unzip_low_u8x64(self, rhs.simd_into(self.simd))
6113 }
6114 #[inline(always)]
6115 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6116 self.simd.unzip_high_u8x64(self, rhs.simd_into(self.simd))
6117 }
6118 #[inline(always)]
6119 fn min(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6120 self.simd.min_u8x64(self, rhs.simd_into(self.simd))
6121 }
6122 #[inline(always)]
6123 fn max(self, rhs: impl SimdInto<Self, S>) -> u8x64<S> {
6124 self.simd.max_u8x64(self, rhs.simd_into(self.simd))
6125 }
6126}
6127#[derive(Clone, Copy, Debug)]
6128#[repr(C, align(64))]
6129pub struct mask8x64<S: Simd> {
6130 pub val: [i8; 64],
6131 pub simd: S,
6132}
6133impl<S: Simd> SimdFrom<[i8; 64], S> for mask8x64<S> {
6134 #[inline(always)]
6135 fn simd_from(val: [i8; 64], simd: S) -> Self {
6136 Self {
6137 val: [
6138 val[0usize],
6139 val[1usize],
6140 val[2usize],
6141 val[3usize],
6142 val[4usize],
6143 val[5usize],
6144 val[6usize],
6145 val[7usize],
6146 val[8usize],
6147 val[9usize],
6148 val[10usize],
6149 val[11usize],
6150 val[12usize],
6151 val[13usize],
6152 val[14usize],
6153 val[15usize],
6154 val[16usize],
6155 val[17usize],
6156 val[18usize],
6157 val[19usize],
6158 val[20usize],
6159 val[21usize],
6160 val[22usize],
6161 val[23usize],
6162 val[24usize],
6163 val[25usize],
6164 val[26usize],
6165 val[27usize],
6166 val[28usize],
6167 val[29usize],
6168 val[30usize],
6169 val[31usize],
6170 val[32usize],
6171 val[33usize],
6172 val[34usize],
6173 val[35usize],
6174 val[36usize],
6175 val[37usize],
6176 val[38usize],
6177 val[39usize],
6178 val[40usize],
6179 val[41usize],
6180 val[42usize],
6181 val[43usize],
6182 val[44usize],
6183 val[45usize],
6184 val[46usize],
6185 val[47usize],
6186 val[48usize],
6187 val[49usize],
6188 val[50usize],
6189 val[51usize],
6190 val[52usize],
6191 val[53usize],
6192 val[54usize],
6193 val[55usize],
6194 val[56usize],
6195 val[57usize],
6196 val[58usize],
6197 val[59usize],
6198 val[60usize],
6199 val[61usize],
6200 val[62usize],
6201 val[63usize],
6202 ],
6203 simd,
6204 }
6205 }
6206}
6207impl<S: Simd> From<mask8x64<S>> for [i8; 64] {
6208 #[inline(always)]
6209 fn from(value: mask8x64<S>) -> Self {
6210 value.val
6211 }
6212}
6213impl<S: Simd> core::ops::Deref for mask8x64<S> {
6214 type Target = [i8; 64];
6215 #[inline(always)]
6216 fn deref(&self) -> &Self::Target {
6217 &self.val
6218 }
6219}
6220impl<S: Simd> core::ops::DerefMut for mask8x64<S> {
6221 #[inline(always)]
6222 fn deref_mut(&mut self) -> &mut Self::Target {
6223 &mut self.val
6224 }
6225}
6226impl<S: Simd> SimdFrom<i8, S> for mask8x64<S> {
6227 #[inline(always)]
6228 fn simd_from(value: i8, simd: S) -> Self {
6229 simd.splat_mask8x64(value)
6230 }
6231}
6232impl<S: Simd> Select<mask8x64<S>> for mask8x64<S> {
6233 #[inline(always)]
6234 fn select(self, if_true: mask8x64<S>, if_false: mask8x64<S>) -> mask8x64<S> {
6235 self.simd.select_mask8x64(self, if_true, if_false)
6236 }
6237}
6238impl<S: Simd> Bytes for mask8x64<S> {
6239 type Bytes = u8x64<S>;
6240 #[inline(always)]
6241 fn to_bytes(self) -> Self::Bytes {
6242 unsafe {
6243 u8x64 {
6244 val: core::mem::transmute(self.val),
6245 simd: self.simd,
6246 }
6247 }
6248 }
6249 #[inline(always)]
6250 fn from_bytes(value: Self::Bytes) -> Self {
6251 unsafe {
6252 Self {
6253 val: core::mem::transmute(value.val),
6254 simd: value.simd,
6255 }
6256 }
6257 }
6258}
6259impl<S: Simd> mask8x64<S> {
6260 #[inline(always)]
6261 pub fn not(self) -> mask8x64<S> {
6262 self.simd.not_mask8x64(self)
6263 }
6264 #[inline(always)]
6265 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6266 self.simd.and_mask8x64(self, rhs.simd_into(self.simd))
6267 }
6268 #[inline(always)]
6269 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6270 self.simd.or_mask8x64(self, rhs.simd_into(self.simd))
6271 }
6272 #[inline(always)]
6273 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6274 self.simd.xor_mask8x64(self, rhs.simd_into(self.simd))
6275 }
6276 #[inline(always)]
6277 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6278 self.simd.simd_eq_mask8x64(self, rhs.simd_into(self.simd))
6279 }
6280}
6281impl<S: Simd> crate::SimdBase<i8, S> for mask8x64<S> {
6282 const N: usize = 64;
6283 type Mask = mask8x64<S>;
6284 type Block = mask8x16<S>;
6285 #[inline(always)]
6286 fn witness(&self) -> S {
6287 self.simd
6288 }
6289 #[inline(always)]
6290 fn as_slice(&self) -> &[i8] {
6291 &self.val
6292 }
6293 #[inline(always)]
6294 fn as_mut_slice(&mut self) -> &mut [i8] {
6295 &mut self.val
6296 }
6297 #[inline(always)]
6298 fn from_slice(simd: S, slice: &[i8]) -> Self {
6299 let mut val = [0; 64];
6300 val.copy_from_slice(slice);
6301 Self { val, simd }
6302 }
6303 #[inline(always)]
6304 fn splat(simd: S, val: i8) -> Self {
6305 simd.splat_mask8x64(val)
6306 }
6307 #[inline(always)]
6308 fn block_splat(block: Self::Block) -> Self {
6309 let block2 = block.combine(block);
6310 block2.combine(block2)
6311 }
6312}
6313impl<S: Simd> crate::SimdMask<i8, S> for mask8x64<S> {
6314 #[inline(always)]
6315 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask8x64<S> {
6316 self.simd.simd_eq_mask8x64(self, rhs.simd_into(self.simd))
6317 }
6318}
6319#[derive(Clone, Copy, Debug)]
6320#[repr(C, align(64))]
6321pub struct i16x32<S: Simd> {
6322 pub val: [i16; 32],
6323 pub simd: S,
6324}
6325impl<S: Simd> SimdFrom<[i16; 32], S> for i16x32<S> {
6326 #[inline(always)]
6327 fn simd_from(val: [i16; 32], simd: S) -> Self {
6328 Self {
6329 val: [
6330 val[0usize],
6331 val[1usize],
6332 val[2usize],
6333 val[3usize],
6334 val[4usize],
6335 val[5usize],
6336 val[6usize],
6337 val[7usize],
6338 val[8usize],
6339 val[9usize],
6340 val[10usize],
6341 val[11usize],
6342 val[12usize],
6343 val[13usize],
6344 val[14usize],
6345 val[15usize],
6346 val[16usize],
6347 val[17usize],
6348 val[18usize],
6349 val[19usize],
6350 val[20usize],
6351 val[21usize],
6352 val[22usize],
6353 val[23usize],
6354 val[24usize],
6355 val[25usize],
6356 val[26usize],
6357 val[27usize],
6358 val[28usize],
6359 val[29usize],
6360 val[30usize],
6361 val[31usize],
6362 ],
6363 simd,
6364 }
6365 }
6366}
6367impl<S: Simd> From<i16x32<S>> for [i16; 32] {
6368 #[inline(always)]
6369 fn from(value: i16x32<S>) -> Self {
6370 value.val
6371 }
6372}
6373impl<S: Simd> core::ops::Deref for i16x32<S> {
6374 type Target = [i16; 32];
6375 #[inline(always)]
6376 fn deref(&self) -> &Self::Target {
6377 &self.val
6378 }
6379}
6380impl<S: Simd> core::ops::DerefMut for i16x32<S> {
6381 #[inline(always)]
6382 fn deref_mut(&mut self) -> &mut Self::Target {
6383 &mut self.val
6384 }
6385}
6386impl<S: Simd> SimdFrom<i16, S> for i16x32<S> {
6387 #[inline(always)]
6388 fn simd_from(value: i16, simd: S) -> Self {
6389 simd.splat_i16x32(value)
6390 }
6391}
6392impl<S: Simd> Select<i16x32<S>> for mask16x32<S> {
6393 #[inline(always)]
6394 fn select(self, if_true: i16x32<S>, if_false: i16x32<S>) -> i16x32<S> {
6395 self.simd.select_i16x32(self, if_true, if_false)
6396 }
6397}
6398impl<S: Simd> Bytes for i16x32<S> {
6399 type Bytes = u8x64<S>;
6400 #[inline(always)]
6401 fn to_bytes(self) -> Self::Bytes {
6402 unsafe {
6403 u8x64 {
6404 val: core::mem::transmute(self.val),
6405 simd: self.simd,
6406 }
6407 }
6408 }
6409 #[inline(always)]
6410 fn from_bytes(value: Self::Bytes) -> Self {
6411 unsafe {
6412 Self {
6413 val: core::mem::transmute(value.val),
6414 simd: value.simd,
6415 }
6416 }
6417 }
6418}
6419impl<S: Simd> i16x32<S> {
6420 #[inline(always)]
6421 pub fn not(self) -> i16x32<S> {
6422 self.simd.not_i16x32(self)
6423 }
6424 #[inline(always)]
6425 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6426 self.simd.add_i16x32(self, rhs.simd_into(self.simd))
6427 }
6428 #[inline(always)]
6429 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6430 self.simd.sub_i16x32(self, rhs.simd_into(self.simd))
6431 }
6432 #[inline(always)]
6433 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6434 self.simd.mul_i16x32(self, rhs.simd_into(self.simd))
6435 }
6436 #[inline(always)]
6437 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6438 self.simd.and_i16x32(self, rhs.simd_into(self.simd))
6439 }
6440 #[inline(always)]
6441 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6442 self.simd.or_i16x32(self, rhs.simd_into(self.simd))
6443 }
6444 #[inline(always)]
6445 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6446 self.simd.xor_i16x32(self, rhs.simd_into(self.simd))
6447 }
6448 #[inline(always)]
6449 pub fn shr(self, shift: u32) -> i16x32<S> {
6450 self.simd.shr_i16x32(self, shift)
6451 }
6452 #[inline(always)]
6453 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6454 self.simd.shrv_i16x32(self, rhs.simd_into(self.simd))
6455 }
6456 #[inline(always)]
6457 pub fn shl(self, shift: u32) -> i16x32<S> {
6458 self.simd.shl_i16x32(self, shift)
6459 }
6460 #[inline(always)]
6461 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6462 self.simd.simd_eq_i16x32(self, rhs.simd_into(self.simd))
6463 }
6464 #[inline(always)]
6465 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6466 self.simd.simd_lt_i16x32(self, rhs.simd_into(self.simd))
6467 }
6468 #[inline(always)]
6469 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6470 self.simd.simd_le_i16x32(self, rhs.simd_into(self.simd))
6471 }
6472 #[inline(always)]
6473 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6474 self.simd.simd_ge_i16x32(self, rhs.simd_into(self.simd))
6475 }
6476 #[inline(always)]
6477 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6478 self.simd.simd_gt_i16x32(self, rhs.simd_into(self.simd))
6479 }
6480 #[inline(always)]
6481 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6482 self.simd.min_i16x32(self, rhs.simd_into(self.simd))
6483 }
6484 #[inline(always)]
6485 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6486 self.simd.max_i16x32(self, rhs.simd_into(self.simd))
6487 }
6488 #[inline(always)]
6489 pub fn neg(self) -> i16x32<S> {
6490 self.simd.neg_i16x32(self)
6491 }
6492 #[inline(always)]
6493 pub fn reinterpret_u8(self) -> u8x64<S> {
6494 self.simd.reinterpret_u8_i16x32(self)
6495 }
6496 #[inline(always)]
6497 pub fn reinterpret_u32(self) -> u32x16<S> {
6498 self.simd.reinterpret_u32_i16x32(self)
6499 }
6500}
6501impl<S: Simd> crate::SimdBase<i16, S> for i16x32<S> {
6502 const N: usize = 32;
6503 type Mask = mask16x32<S>;
6504 type Block = i16x8<S>;
6505 #[inline(always)]
6506 fn witness(&self) -> S {
6507 self.simd
6508 }
6509 #[inline(always)]
6510 fn as_slice(&self) -> &[i16] {
6511 &self.val
6512 }
6513 #[inline(always)]
6514 fn as_mut_slice(&mut self) -> &mut [i16] {
6515 &mut self.val
6516 }
6517 #[inline(always)]
6518 fn from_slice(simd: S, slice: &[i16]) -> Self {
6519 let mut val = [0; 32];
6520 val.copy_from_slice(slice);
6521 Self { val, simd }
6522 }
6523 #[inline(always)]
6524 fn splat(simd: S, val: i16) -> Self {
6525 simd.splat_i16x32(val)
6526 }
6527 #[inline(always)]
6528 fn block_splat(block: Self::Block) -> Self {
6529 let block2 = block.combine(block);
6530 block2.combine(block2)
6531 }
6532}
6533impl<S: Simd> crate::SimdInt<i16, S> for i16x32<S> {
6534 #[inline(always)]
6535 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6536 self.simd.simd_eq_i16x32(self, rhs.simd_into(self.simd))
6537 }
6538 #[inline(always)]
6539 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6540 self.simd.simd_lt_i16x32(self, rhs.simd_into(self.simd))
6541 }
6542 #[inline(always)]
6543 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6544 self.simd.simd_le_i16x32(self, rhs.simd_into(self.simd))
6545 }
6546 #[inline(always)]
6547 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6548 self.simd.simd_ge_i16x32(self, rhs.simd_into(self.simd))
6549 }
6550 #[inline(always)]
6551 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6552 self.simd.simd_gt_i16x32(self, rhs.simd_into(self.simd))
6553 }
6554 #[inline(always)]
6555 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6556 self.simd.zip_low_i16x32(self, rhs.simd_into(self.simd))
6557 }
6558 #[inline(always)]
6559 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6560 self.simd.zip_high_i16x32(self, rhs.simd_into(self.simd))
6561 }
6562 #[inline(always)]
6563 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6564 self.simd.unzip_low_i16x32(self, rhs.simd_into(self.simd))
6565 }
6566 #[inline(always)]
6567 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6568 self.simd.unzip_high_i16x32(self, rhs.simd_into(self.simd))
6569 }
6570 #[inline(always)]
6571 fn min(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6572 self.simd.min_i16x32(self, rhs.simd_into(self.simd))
6573 }
6574 #[inline(always)]
6575 fn max(self, rhs: impl SimdInto<Self, S>) -> i16x32<S> {
6576 self.simd.max_i16x32(self, rhs.simd_into(self.simd))
6577 }
6578}
6579#[derive(Clone, Copy, Debug)]
6580#[repr(C, align(64))]
6581pub struct u16x32<S: Simd> {
6582 pub val: [u16; 32],
6583 pub simd: S,
6584}
6585impl<S: Simd> SimdFrom<[u16; 32], S> for u16x32<S> {
6586 #[inline(always)]
6587 fn simd_from(val: [u16; 32], simd: S) -> Self {
6588 Self {
6589 val: [
6590 val[0usize],
6591 val[1usize],
6592 val[2usize],
6593 val[3usize],
6594 val[4usize],
6595 val[5usize],
6596 val[6usize],
6597 val[7usize],
6598 val[8usize],
6599 val[9usize],
6600 val[10usize],
6601 val[11usize],
6602 val[12usize],
6603 val[13usize],
6604 val[14usize],
6605 val[15usize],
6606 val[16usize],
6607 val[17usize],
6608 val[18usize],
6609 val[19usize],
6610 val[20usize],
6611 val[21usize],
6612 val[22usize],
6613 val[23usize],
6614 val[24usize],
6615 val[25usize],
6616 val[26usize],
6617 val[27usize],
6618 val[28usize],
6619 val[29usize],
6620 val[30usize],
6621 val[31usize],
6622 ],
6623 simd,
6624 }
6625 }
6626}
6627impl<S: Simd> From<u16x32<S>> for [u16; 32] {
6628 #[inline(always)]
6629 fn from(value: u16x32<S>) -> Self {
6630 value.val
6631 }
6632}
6633impl<S: Simd> core::ops::Deref for u16x32<S> {
6634 type Target = [u16; 32];
6635 #[inline(always)]
6636 fn deref(&self) -> &Self::Target {
6637 &self.val
6638 }
6639}
6640impl<S: Simd> core::ops::DerefMut for u16x32<S> {
6641 #[inline(always)]
6642 fn deref_mut(&mut self) -> &mut Self::Target {
6643 &mut self.val
6644 }
6645}
6646impl<S: Simd> SimdFrom<u16, S> for u16x32<S> {
6647 #[inline(always)]
6648 fn simd_from(value: u16, simd: S) -> Self {
6649 simd.splat_u16x32(value)
6650 }
6651}
6652impl<S: Simd> Select<u16x32<S>> for mask16x32<S> {
6653 #[inline(always)]
6654 fn select(self, if_true: u16x32<S>, if_false: u16x32<S>) -> u16x32<S> {
6655 self.simd.select_u16x32(self, if_true, if_false)
6656 }
6657}
6658impl<S: Simd> Bytes for u16x32<S> {
6659 type Bytes = u8x64<S>;
6660 #[inline(always)]
6661 fn to_bytes(self) -> Self::Bytes {
6662 unsafe {
6663 u8x64 {
6664 val: core::mem::transmute(self.val),
6665 simd: self.simd,
6666 }
6667 }
6668 }
6669 #[inline(always)]
6670 fn from_bytes(value: Self::Bytes) -> Self {
6671 unsafe {
6672 Self {
6673 val: core::mem::transmute(value.val),
6674 simd: value.simd,
6675 }
6676 }
6677 }
6678}
6679impl<S: Simd> u16x32<S> {
6680 #[inline(always)]
6681 pub fn not(self) -> u16x32<S> {
6682 self.simd.not_u16x32(self)
6683 }
6684 #[inline(always)]
6685 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6686 self.simd.add_u16x32(self, rhs.simd_into(self.simd))
6687 }
6688 #[inline(always)]
6689 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6690 self.simd.sub_u16x32(self, rhs.simd_into(self.simd))
6691 }
6692 #[inline(always)]
6693 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6694 self.simd.mul_u16x32(self, rhs.simd_into(self.simd))
6695 }
6696 #[inline(always)]
6697 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6698 self.simd.and_u16x32(self, rhs.simd_into(self.simd))
6699 }
6700 #[inline(always)]
6701 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6702 self.simd.or_u16x32(self, rhs.simd_into(self.simd))
6703 }
6704 #[inline(always)]
6705 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6706 self.simd.xor_u16x32(self, rhs.simd_into(self.simd))
6707 }
6708 #[inline(always)]
6709 pub fn shr(self, shift: u32) -> u16x32<S> {
6710 self.simd.shr_u16x32(self, shift)
6711 }
6712 #[inline(always)]
6713 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6714 self.simd.shrv_u16x32(self, rhs.simd_into(self.simd))
6715 }
6716 #[inline(always)]
6717 pub fn shl(self, shift: u32) -> u16x32<S> {
6718 self.simd.shl_u16x32(self, shift)
6719 }
6720 #[inline(always)]
6721 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6722 self.simd.simd_eq_u16x32(self, rhs.simd_into(self.simd))
6723 }
6724 #[inline(always)]
6725 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6726 self.simd.simd_lt_u16x32(self, rhs.simd_into(self.simd))
6727 }
6728 #[inline(always)]
6729 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6730 self.simd.simd_le_u16x32(self, rhs.simd_into(self.simd))
6731 }
6732 #[inline(always)]
6733 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6734 self.simd.simd_ge_u16x32(self, rhs.simd_into(self.simd))
6735 }
6736 #[inline(always)]
6737 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6738 self.simd.simd_gt_u16x32(self, rhs.simd_into(self.simd))
6739 }
6740 #[inline(always)]
6741 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6742 self.simd.min_u16x32(self, rhs.simd_into(self.simd))
6743 }
6744 #[inline(always)]
6745 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6746 self.simd.max_u16x32(self, rhs.simd_into(self.simd))
6747 }
6748 #[inline(always)]
6749 pub fn reinterpret_u8(self) -> u8x64<S> {
6750 self.simd.reinterpret_u8_u16x32(self)
6751 }
6752 #[inline(always)]
6753 pub fn reinterpret_u32(self) -> u32x16<S> {
6754 self.simd.reinterpret_u32_u16x32(self)
6755 }
6756}
6757impl<S: Simd> crate::SimdBase<u16, S> for u16x32<S> {
6758 const N: usize = 32;
6759 type Mask = mask16x32<S>;
6760 type Block = u16x8<S>;
6761 #[inline(always)]
6762 fn witness(&self) -> S {
6763 self.simd
6764 }
6765 #[inline(always)]
6766 fn as_slice(&self) -> &[u16] {
6767 &self.val
6768 }
6769 #[inline(always)]
6770 fn as_mut_slice(&mut self) -> &mut [u16] {
6771 &mut self.val
6772 }
6773 #[inline(always)]
6774 fn from_slice(simd: S, slice: &[u16]) -> Self {
6775 let mut val = [0; 32];
6776 val.copy_from_slice(slice);
6777 Self { val, simd }
6778 }
6779 #[inline(always)]
6780 fn splat(simd: S, val: u16) -> Self {
6781 simd.splat_u16x32(val)
6782 }
6783 #[inline(always)]
6784 fn block_splat(block: Self::Block) -> Self {
6785 let block2 = block.combine(block);
6786 block2.combine(block2)
6787 }
6788}
6789impl<S: Simd> crate::SimdInt<u16, S> for u16x32<S> {
6790 #[inline(always)]
6791 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6792 self.simd.simd_eq_u16x32(self, rhs.simd_into(self.simd))
6793 }
6794 #[inline(always)]
6795 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6796 self.simd.simd_lt_u16x32(self, rhs.simd_into(self.simd))
6797 }
6798 #[inline(always)]
6799 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6800 self.simd.simd_le_u16x32(self, rhs.simd_into(self.simd))
6801 }
6802 #[inline(always)]
6803 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6804 self.simd.simd_ge_u16x32(self, rhs.simd_into(self.simd))
6805 }
6806 #[inline(always)]
6807 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6808 self.simd.simd_gt_u16x32(self, rhs.simd_into(self.simd))
6809 }
6810 #[inline(always)]
6811 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6812 self.simd.zip_low_u16x32(self, rhs.simd_into(self.simd))
6813 }
6814 #[inline(always)]
6815 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6816 self.simd.zip_high_u16x32(self, rhs.simd_into(self.simd))
6817 }
6818 #[inline(always)]
6819 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6820 self.simd.unzip_low_u16x32(self, rhs.simd_into(self.simd))
6821 }
6822 #[inline(always)]
6823 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6824 self.simd.unzip_high_u16x32(self, rhs.simd_into(self.simd))
6825 }
6826 #[inline(always)]
6827 fn min(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6828 self.simd.min_u16x32(self, rhs.simd_into(self.simd))
6829 }
6830 #[inline(always)]
6831 fn max(self, rhs: impl SimdInto<Self, S>) -> u16x32<S> {
6832 self.simd.max_u16x32(self, rhs.simd_into(self.simd))
6833 }
6834}
6835#[derive(Clone, Copy, Debug)]
6836#[repr(C, align(64))]
6837pub struct mask16x32<S: Simd> {
6838 pub val: [i16; 32],
6839 pub simd: S,
6840}
6841impl<S: Simd> SimdFrom<[i16; 32], S> for mask16x32<S> {
6842 #[inline(always)]
6843 fn simd_from(val: [i16; 32], simd: S) -> Self {
6844 Self {
6845 val: [
6846 val[0usize],
6847 val[1usize],
6848 val[2usize],
6849 val[3usize],
6850 val[4usize],
6851 val[5usize],
6852 val[6usize],
6853 val[7usize],
6854 val[8usize],
6855 val[9usize],
6856 val[10usize],
6857 val[11usize],
6858 val[12usize],
6859 val[13usize],
6860 val[14usize],
6861 val[15usize],
6862 val[16usize],
6863 val[17usize],
6864 val[18usize],
6865 val[19usize],
6866 val[20usize],
6867 val[21usize],
6868 val[22usize],
6869 val[23usize],
6870 val[24usize],
6871 val[25usize],
6872 val[26usize],
6873 val[27usize],
6874 val[28usize],
6875 val[29usize],
6876 val[30usize],
6877 val[31usize],
6878 ],
6879 simd,
6880 }
6881 }
6882}
6883impl<S: Simd> From<mask16x32<S>> for [i16; 32] {
6884 #[inline(always)]
6885 fn from(value: mask16x32<S>) -> Self {
6886 value.val
6887 }
6888}
6889impl<S: Simd> core::ops::Deref for mask16x32<S> {
6890 type Target = [i16; 32];
6891 #[inline(always)]
6892 fn deref(&self) -> &Self::Target {
6893 &self.val
6894 }
6895}
6896impl<S: Simd> core::ops::DerefMut for mask16x32<S> {
6897 #[inline(always)]
6898 fn deref_mut(&mut self) -> &mut Self::Target {
6899 &mut self.val
6900 }
6901}
6902impl<S: Simd> SimdFrom<i16, S> for mask16x32<S> {
6903 #[inline(always)]
6904 fn simd_from(value: i16, simd: S) -> Self {
6905 simd.splat_mask16x32(value)
6906 }
6907}
6908impl<S: Simd> Select<mask16x32<S>> for mask16x32<S> {
6909 #[inline(always)]
6910 fn select(self, if_true: mask16x32<S>, if_false: mask16x32<S>) -> mask16x32<S> {
6911 self.simd.select_mask16x32(self, if_true, if_false)
6912 }
6913}
6914impl<S: Simd> Bytes for mask16x32<S> {
6915 type Bytes = u8x64<S>;
6916 #[inline(always)]
6917 fn to_bytes(self) -> Self::Bytes {
6918 unsafe {
6919 u8x64 {
6920 val: core::mem::transmute(self.val),
6921 simd: self.simd,
6922 }
6923 }
6924 }
6925 #[inline(always)]
6926 fn from_bytes(value: Self::Bytes) -> Self {
6927 unsafe {
6928 Self {
6929 val: core::mem::transmute(value.val),
6930 simd: value.simd,
6931 }
6932 }
6933 }
6934}
6935impl<S: Simd> mask16x32<S> {
6936 #[inline(always)]
6937 pub fn not(self) -> mask16x32<S> {
6938 self.simd.not_mask16x32(self)
6939 }
6940 #[inline(always)]
6941 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6942 self.simd.and_mask16x32(self, rhs.simd_into(self.simd))
6943 }
6944 #[inline(always)]
6945 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6946 self.simd.or_mask16x32(self, rhs.simd_into(self.simd))
6947 }
6948 #[inline(always)]
6949 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6950 self.simd.xor_mask16x32(self, rhs.simd_into(self.simd))
6951 }
6952 #[inline(always)]
6953 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6954 self.simd.simd_eq_mask16x32(self, rhs.simd_into(self.simd))
6955 }
6956}
6957impl<S: Simd> crate::SimdBase<i16, S> for mask16x32<S> {
6958 const N: usize = 32;
6959 type Mask = mask16x32<S>;
6960 type Block = mask16x8<S>;
6961 #[inline(always)]
6962 fn witness(&self) -> S {
6963 self.simd
6964 }
6965 #[inline(always)]
6966 fn as_slice(&self) -> &[i16] {
6967 &self.val
6968 }
6969 #[inline(always)]
6970 fn as_mut_slice(&mut self) -> &mut [i16] {
6971 &mut self.val
6972 }
6973 #[inline(always)]
6974 fn from_slice(simd: S, slice: &[i16]) -> Self {
6975 let mut val = [0; 32];
6976 val.copy_from_slice(slice);
6977 Self { val, simd }
6978 }
6979 #[inline(always)]
6980 fn splat(simd: S, val: i16) -> Self {
6981 simd.splat_mask16x32(val)
6982 }
6983 #[inline(always)]
6984 fn block_splat(block: Self::Block) -> Self {
6985 let block2 = block.combine(block);
6986 block2.combine(block2)
6987 }
6988}
6989impl<S: Simd> crate::SimdMask<i16, S> for mask16x32<S> {
6990 #[inline(always)]
6991 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask16x32<S> {
6992 self.simd.simd_eq_mask16x32(self, rhs.simd_into(self.simd))
6993 }
6994}
6995#[derive(Clone, Copy, Debug)]
6996#[repr(C, align(64))]
6997pub struct i32x16<S: Simd> {
6998 pub val: [i32; 16],
6999 pub simd: S,
7000}
7001impl<S: Simd> SimdFrom<[i32; 16], S> for i32x16<S> {
7002 #[inline(always)]
7003 fn simd_from(val: [i32; 16], simd: S) -> Self {
7004 Self {
7005 val: [
7006 val[0usize],
7007 val[1usize],
7008 val[2usize],
7009 val[3usize],
7010 val[4usize],
7011 val[5usize],
7012 val[6usize],
7013 val[7usize],
7014 val[8usize],
7015 val[9usize],
7016 val[10usize],
7017 val[11usize],
7018 val[12usize],
7019 val[13usize],
7020 val[14usize],
7021 val[15usize],
7022 ],
7023 simd,
7024 }
7025 }
7026}
7027impl<S: Simd> From<i32x16<S>> for [i32; 16] {
7028 #[inline(always)]
7029 fn from(value: i32x16<S>) -> Self {
7030 value.val
7031 }
7032}
7033impl<S: Simd> core::ops::Deref for i32x16<S> {
7034 type Target = [i32; 16];
7035 #[inline(always)]
7036 fn deref(&self) -> &Self::Target {
7037 &self.val
7038 }
7039}
7040impl<S: Simd> core::ops::DerefMut for i32x16<S> {
7041 #[inline(always)]
7042 fn deref_mut(&mut self) -> &mut Self::Target {
7043 &mut self.val
7044 }
7045}
7046impl<S: Simd> SimdFrom<i32, S> for i32x16<S> {
7047 #[inline(always)]
7048 fn simd_from(value: i32, simd: S) -> Self {
7049 simd.splat_i32x16(value)
7050 }
7051}
7052impl<S: Simd> Select<i32x16<S>> for mask32x16<S> {
7053 #[inline(always)]
7054 fn select(self, if_true: i32x16<S>, if_false: i32x16<S>) -> i32x16<S> {
7055 self.simd.select_i32x16(self, if_true, if_false)
7056 }
7057}
7058impl<S: Simd> Bytes for i32x16<S> {
7059 type Bytes = u8x64<S>;
7060 #[inline(always)]
7061 fn to_bytes(self) -> Self::Bytes {
7062 unsafe {
7063 u8x64 {
7064 val: core::mem::transmute(self.val),
7065 simd: self.simd,
7066 }
7067 }
7068 }
7069 #[inline(always)]
7070 fn from_bytes(value: Self::Bytes) -> Self {
7071 unsafe {
7072 Self {
7073 val: core::mem::transmute(value.val),
7074 simd: value.simd,
7075 }
7076 }
7077 }
7078}
7079impl<S: Simd> i32x16<S> {
7080 #[inline(always)]
7081 pub fn not(self) -> i32x16<S> {
7082 self.simd.not_i32x16(self)
7083 }
7084 #[inline(always)]
7085 pub fn add(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7086 self.simd.add_i32x16(self, rhs.simd_into(self.simd))
7087 }
7088 #[inline(always)]
7089 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7090 self.simd.sub_i32x16(self, rhs.simd_into(self.simd))
7091 }
7092 #[inline(always)]
7093 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7094 self.simd.mul_i32x16(self, rhs.simd_into(self.simd))
7095 }
7096 #[inline(always)]
7097 pub fn and(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7098 self.simd.and_i32x16(self, rhs.simd_into(self.simd))
7099 }
7100 #[inline(always)]
7101 pub fn or(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7102 self.simd.or_i32x16(self, rhs.simd_into(self.simd))
7103 }
7104 #[inline(always)]
7105 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7106 self.simd.xor_i32x16(self, rhs.simd_into(self.simd))
7107 }
7108 #[inline(always)]
7109 pub fn shr(self, shift: u32) -> i32x16<S> {
7110 self.simd.shr_i32x16(self, shift)
7111 }
7112 #[inline(always)]
7113 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7114 self.simd.shrv_i32x16(self, rhs.simd_into(self.simd))
7115 }
7116 #[inline(always)]
7117 pub fn shl(self, shift: u32) -> i32x16<S> {
7118 self.simd.shl_i32x16(self, shift)
7119 }
7120 #[inline(always)]
7121 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7122 self.simd.simd_eq_i32x16(self, rhs.simd_into(self.simd))
7123 }
7124 #[inline(always)]
7125 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7126 self.simd.simd_lt_i32x16(self, rhs.simd_into(self.simd))
7127 }
7128 #[inline(always)]
7129 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7130 self.simd.simd_le_i32x16(self, rhs.simd_into(self.simd))
7131 }
7132 #[inline(always)]
7133 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7134 self.simd.simd_ge_i32x16(self, rhs.simd_into(self.simd))
7135 }
7136 #[inline(always)]
7137 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7138 self.simd.simd_gt_i32x16(self, rhs.simd_into(self.simd))
7139 }
7140 #[inline(always)]
7141 pub fn min(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7142 self.simd.min_i32x16(self, rhs.simd_into(self.simd))
7143 }
7144 #[inline(always)]
7145 pub fn max(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7146 self.simd.max_i32x16(self, rhs.simd_into(self.simd))
7147 }
7148 #[inline(always)]
7149 pub fn neg(self) -> i32x16<S> {
7150 self.simd.neg_i32x16(self)
7151 }
7152 #[inline(always)]
7153 pub fn reinterpret_u8(self) -> u8x64<S> {
7154 self.simd.reinterpret_u8_i32x16(self)
7155 }
7156 #[inline(always)]
7157 pub fn reinterpret_u32(self) -> u32x16<S> {
7158 self.simd.reinterpret_u32_i32x16(self)
7159 }
7160 #[inline(always)]
7161 pub fn cvt_f32(self) -> f32x16<S> {
7162 self.simd.cvt_f32_i32x16(self)
7163 }
7164}
7165impl<S: Simd> crate::SimdBase<i32, S> for i32x16<S> {
7166 const N: usize = 16;
7167 type Mask = mask32x16<S>;
7168 type Block = i32x4<S>;
7169 #[inline(always)]
7170 fn witness(&self) -> S {
7171 self.simd
7172 }
7173 #[inline(always)]
7174 fn as_slice(&self) -> &[i32] {
7175 &self.val
7176 }
7177 #[inline(always)]
7178 fn as_mut_slice(&mut self) -> &mut [i32] {
7179 &mut self.val
7180 }
7181 #[inline(always)]
7182 fn from_slice(simd: S, slice: &[i32]) -> Self {
7183 let mut val = [0; 16];
7184 val.copy_from_slice(slice);
7185 Self { val, simd }
7186 }
7187 #[inline(always)]
7188 fn splat(simd: S, val: i32) -> Self {
7189 simd.splat_i32x16(val)
7190 }
7191 #[inline(always)]
7192 fn block_splat(block: Self::Block) -> Self {
7193 let block2 = block.combine(block);
7194 block2.combine(block2)
7195 }
7196}
7197impl<S: Simd> crate::SimdInt<i32, S> for i32x16<S> {
7198 #[inline(always)]
7199 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7200 self.simd.simd_eq_i32x16(self, rhs.simd_into(self.simd))
7201 }
7202 #[inline(always)]
7203 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7204 self.simd.simd_lt_i32x16(self, rhs.simd_into(self.simd))
7205 }
7206 #[inline(always)]
7207 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7208 self.simd.simd_le_i32x16(self, rhs.simd_into(self.simd))
7209 }
7210 #[inline(always)]
7211 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7212 self.simd.simd_ge_i32x16(self, rhs.simd_into(self.simd))
7213 }
7214 #[inline(always)]
7215 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7216 self.simd.simd_gt_i32x16(self, rhs.simd_into(self.simd))
7217 }
7218 #[inline(always)]
7219 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7220 self.simd.zip_low_i32x16(self, rhs.simd_into(self.simd))
7221 }
7222 #[inline(always)]
7223 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7224 self.simd.zip_high_i32x16(self, rhs.simd_into(self.simd))
7225 }
7226 #[inline(always)]
7227 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7228 self.simd.unzip_low_i32x16(self, rhs.simd_into(self.simd))
7229 }
7230 #[inline(always)]
7231 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7232 self.simd.unzip_high_i32x16(self, rhs.simd_into(self.simd))
7233 }
7234 #[inline(always)]
7235 fn min(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7236 self.simd.min_i32x16(self, rhs.simd_into(self.simd))
7237 }
7238 #[inline(always)]
7239 fn max(self, rhs: impl SimdInto<Self, S>) -> i32x16<S> {
7240 self.simd.max_i32x16(self, rhs.simd_into(self.simd))
7241 }
7242}
7243impl<S: Simd> SimdCvtTruncate<f32x16<S>> for i32x16<S> {
7244 fn truncate_from(x: f32x16<S>) -> Self {
7245 x.simd.cvt_i32_f32x16(x)
7246 }
7247}
7248#[derive(Clone, Copy, Debug)]
7249#[repr(C, align(64))]
7250pub struct u32x16<S: Simd> {
7251 pub val: [u32; 16],
7252 pub simd: S,
7253}
7254impl<S: Simd> SimdFrom<[u32; 16], S> for u32x16<S> {
7255 #[inline(always)]
7256 fn simd_from(val: [u32; 16], simd: S) -> Self {
7257 Self {
7258 val: [
7259 val[0usize],
7260 val[1usize],
7261 val[2usize],
7262 val[3usize],
7263 val[4usize],
7264 val[5usize],
7265 val[6usize],
7266 val[7usize],
7267 val[8usize],
7268 val[9usize],
7269 val[10usize],
7270 val[11usize],
7271 val[12usize],
7272 val[13usize],
7273 val[14usize],
7274 val[15usize],
7275 ],
7276 simd,
7277 }
7278 }
7279}
7280impl<S: Simd> From<u32x16<S>> for [u32; 16] {
7281 #[inline(always)]
7282 fn from(value: u32x16<S>) -> Self {
7283 value.val
7284 }
7285}
7286impl<S: Simd> core::ops::Deref for u32x16<S> {
7287 type Target = [u32; 16];
7288 #[inline(always)]
7289 fn deref(&self) -> &Self::Target {
7290 &self.val
7291 }
7292}
7293impl<S: Simd> core::ops::DerefMut for u32x16<S> {
7294 #[inline(always)]
7295 fn deref_mut(&mut self) -> &mut Self::Target {
7296 &mut self.val
7297 }
7298}
7299impl<S: Simd> SimdFrom<u32, S> for u32x16<S> {
7300 #[inline(always)]
7301 fn simd_from(value: u32, simd: S) -> Self {
7302 simd.splat_u32x16(value)
7303 }
7304}
7305impl<S: Simd> Select<u32x16<S>> for mask32x16<S> {
7306 #[inline(always)]
7307 fn select(self, if_true: u32x16<S>, if_false: u32x16<S>) -> u32x16<S> {
7308 self.simd.select_u32x16(self, if_true, if_false)
7309 }
7310}
7311impl<S: Simd> Bytes for u32x16<S> {
7312 type Bytes = u8x64<S>;
7313 #[inline(always)]
7314 fn to_bytes(self) -> Self::Bytes {
7315 unsafe {
7316 u8x64 {
7317 val: core::mem::transmute(self.val),
7318 simd: self.simd,
7319 }
7320 }
7321 }
7322 #[inline(always)]
7323 fn from_bytes(value: Self::Bytes) -> Self {
7324 unsafe {
7325 Self {
7326 val: core::mem::transmute(value.val),
7327 simd: value.simd,
7328 }
7329 }
7330 }
7331}
7332impl<S: Simd> u32x16<S> {
7333 #[inline(always)]
7334 pub fn not(self) -> u32x16<S> {
7335 self.simd.not_u32x16(self)
7336 }
7337 #[inline(always)]
7338 pub fn add(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7339 self.simd.add_u32x16(self, rhs.simd_into(self.simd))
7340 }
7341 #[inline(always)]
7342 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7343 self.simd.sub_u32x16(self, rhs.simd_into(self.simd))
7344 }
7345 #[inline(always)]
7346 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7347 self.simd.mul_u32x16(self, rhs.simd_into(self.simd))
7348 }
7349 #[inline(always)]
7350 pub fn and(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7351 self.simd.and_u32x16(self, rhs.simd_into(self.simd))
7352 }
7353 #[inline(always)]
7354 pub fn or(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7355 self.simd.or_u32x16(self, rhs.simd_into(self.simd))
7356 }
7357 #[inline(always)]
7358 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7359 self.simd.xor_u32x16(self, rhs.simd_into(self.simd))
7360 }
7361 #[inline(always)]
7362 pub fn shr(self, shift: u32) -> u32x16<S> {
7363 self.simd.shr_u32x16(self, shift)
7364 }
7365 #[inline(always)]
7366 pub fn shrv(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7367 self.simd.shrv_u32x16(self, rhs.simd_into(self.simd))
7368 }
7369 #[inline(always)]
7370 pub fn shl(self, shift: u32) -> u32x16<S> {
7371 self.simd.shl_u32x16(self, shift)
7372 }
7373 #[inline(always)]
7374 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7375 self.simd.simd_eq_u32x16(self, rhs.simd_into(self.simd))
7376 }
7377 #[inline(always)]
7378 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7379 self.simd.simd_lt_u32x16(self, rhs.simd_into(self.simd))
7380 }
7381 #[inline(always)]
7382 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7383 self.simd.simd_le_u32x16(self, rhs.simd_into(self.simd))
7384 }
7385 #[inline(always)]
7386 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7387 self.simd.simd_ge_u32x16(self, rhs.simd_into(self.simd))
7388 }
7389 #[inline(always)]
7390 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7391 self.simd.simd_gt_u32x16(self, rhs.simd_into(self.simd))
7392 }
7393 #[inline(always)]
7394 pub fn min(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7395 self.simd.min_u32x16(self, rhs.simd_into(self.simd))
7396 }
7397 #[inline(always)]
7398 pub fn max(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7399 self.simd.max_u32x16(self, rhs.simd_into(self.simd))
7400 }
7401 #[inline(always)]
7402 pub fn reinterpret_u8(self) -> u8x64<S> {
7403 self.simd.reinterpret_u8_u32x16(self)
7404 }
7405 #[inline(always)]
7406 pub fn cvt_f32(self) -> f32x16<S> {
7407 self.simd.cvt_f32_u32x16(self)
7408 }
7409}
7410impl<S: Simd> crate::SimdBase<u32, S> for u32x16<S> {
7411 const N: usize = 16;
7412 type Mask = mask32x16<S>;
7413 type Block = u32x4<S>;
7414 #[inline(always)]
7415 fn witness(&self) -> S {
7416 self.simd
7417 }
7418 #[inline(always)]
7419 fn as_slice(&self) -> &[u32] {
7420 &self.val
7421 }
7422 #[inline(always)]
7423 fn as_mut_slice(&mut self) -> &mut [u32] {
7424 &mut self.val
7425 }
7426 #[inline(always)]
7427 fn from_slice(simd: S, slice: &[u32]) -> Self {
7428 let mut val = [0; 16];
7429 val.copy_from_slice(slice);
7430 Self { val, simd }
7431 }
7432 #[inline(always)]
7433 fn splat(simd: S, val: u32) -> Self {
7434 simd.splat_u32x16(val)
7435 }
7436 #[inline(always)]
7437 fn block_splat(block: Self::Block) -> Self {
7438 let block2 = block.combine(block);
7439 block2.combine(block2)
7440 }
7441}
7442impl<S: Simd> crate::SimdInt<u32, S> for u32x16<S> {
7443 #[inline(always)]
7444 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7445 self.simd.simd_eq_u32x16(self, rhs.simd_into(self.simd))
7446 }
7447 #[inline(always)]
7448 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7449 self.simd.simd_lt_u32x16(self, rhs.simd_into(self.simd))
7450 }
7451 #[inline(always)]
7452 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7453 self.simd.simd_le_u32x16(self, rhs.simd_into(self.simd))
7454 }
7455 #[inline(always)]
7456 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7457 self.simd.simd_ge_u32x16(self, rhs.simd_into(self.simd))
7458 }
7459 #[inline(always)]
7460 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7461 self.simd.simd_gt_u32x16(self, rhs.simd_into(self.simd))
7462 }
7463 #[inline(always)]
7464 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7465 self.simd.zip_low_u32x16(self, rhs.simd_into(self.simd))
7466 }
7467 #[inline(always)]
7468 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7469 self.simd.zip_high_u32x16(self, rhs.simd_into(self.simd))
7470 }
7471 #[inline(always)]
7472 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7473 self.simd.unzip_low_u32x16(self, rhs.simd_into(self.simd))
7474 }
7475 #[inline(always)]
7476 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7477 self.simd.unzip_high_u32x16(self, rhs.simd_into(self.simd))
7478 }
7479 #[inline(always)]
7480 fn min(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7481 self.simd.min_u32x16(self, rhs.simd_into(self.simd))
7482 }
7483 #[inline(always)]
7484 fn max(self, rhs: impl SimdInto<Self, S>) -> u32x16<S> {
7485 self.simd.max_u32x16(self, rhs.simd_into(self.simd))
7486 }
7487}
7488impl<S: Simd> SimdCvtTruncate<f32x16<S>> for u32x16<S> {
7489 fn truncate_from(x: f32x16<S>) -> Self {
7490 x.simd.cvt_u32_f32x16(x)
7491 }
7492}
7493#[derive(Clone, Copy, Debug)]
7494#[repr(C, align(64))]
7495pub struct mask32x16<S: Simd> {
7496 pub val: [i32; 16],
7497 pub simd: S,
7498}
7499impl<S: Simd> SimdFrom<[i32; 16], S> for mask32x16<S> {
7500 #[inline(always)]
7501 fn simd_from(val: [i32; 16], simd: S) -> Self {
7502 Self {
7503 val: [
7504 val[0usize],
7505 val[1usize],
7506 val[2usize],
7507 val[3usize],
7508 val[4usize],
7509 val[5usize],
7510 val[6usize],
7511 val[7usize],
7512 val[8usize],
7513 val[9usize],
7514 val[10usize],
7515 val[11usize],
7516 val[12usize],
7517 val[13usize],
7518 val[14usize],
7519 val[15usize],
7520 ],
7521 simd,
7522 }
7523 }
7524}
7525impl<S: Simd> From<mask32x16<S>> for [i32; 16] {
7526 #[inline(always)]
7527 fn from(value: mask32x16<S>) -> Self {
7528 value.val
7529 }
7530}
7531impl<S: Simd> core::ops::Deref for mask32x16<S> {
7532 type Target = [i32; 16];
7533 #[inline(always)]
7534 fn deref(&self) -> &Self::Target {
7535 &self.val
7536 }
7537}
7538impl<S: Simd> core::ops::DerefMut for mask32x16<S> {
7539 #[inline(always)]
7540 fn deref_mut(&mut self) -> &mut Self::Target {
7541 &mut self.val
7542 }
7543}
7544impl<S: Simd> SimdFrom<i32, S> for mask32x16<S> {
7545 #[inline(always)]
7546 fn simd_from(value: i32, simd: S) -> Self {
7547 simd.splat_mask32x16(value)
7548 }
7549}
7550impl<S: Simd> Select<mask32x16<S>> for mask32x16<S> {
7551 #[inline(always)]
7552 fn select(self, if_true: mask32x16<S>, if_false: mask32x16<S>) -> mask32x16<S> {
7553 self.simd.select_mask32x16(self, if_true, if_false)
7554 }
7555}
7556impl<S: Simd> Bytes for mask32x16<S> {
7557 type Bytes = u8x64<S>;
7558 #[inline(always)]
7559 fn to_bytes(self) -> Self::Bytes {
7560 unsafe {
7561 u8x64 {
7562 val: core::mem::transmute(self.val),
7563 simd: self.simd,
7564 }
7565 }
7566 }
7567 #[inline(always)]
7568 fn from_bytes(value: Self::Bytes) -> Self {
7569 unsafe {
7570 Self {
7571 val: core::mem::transmute(value.val),
7572 simd: value.simd,
7573 }
7574 }
7575 }
7576}
7577impl<S: Simd> mask32x16<S> {
7578 #[inline(always)]
7579 pub fn not(self) -> mask32x16<S> {
7580 self.simd.not_mask32x16(self)
7581 }
7582 #[inline(always)]
7583 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7584 self.simd.and_mask32x16(self, rhs.simd_into(self.simd))
7585 }
7586 #[inline(always)]
7587 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7588 self.simd.or_mask32x16(self, rhs.simd_into(self.simd))
7589 }
7590 #[inline(always)]
7591 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7592 self.simd.xor_mask32x16(self, rhs.simd_into(self.simd))
7593 }
7594 #[inline(always)]
7595 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7596 self.simd.simd_eq_mask32x16(self, rhs.simd_into(self.simd))
7597 }
7598}
7599impl<S: Simd> crate::SimdBase<i32, S> for mask32x16<S> {
7600 const N: usize = 16;
7601 type Mask = mask32x16<S>;
7602 type Block = mask32x4<S>;
7603 #[inline(always)]
7604 fn witness(&self) -> S {
7605 self.simd
7606 }
7607 #[inline(always)]
7608 fn as_slice(&self) -> &[i32] {
7609 &self.val
7610 }
7611 #[inline(always)]
7612 fn as_mut_slice(&mut self) -> &mut [i32] {
7613 &mut self.val
7614 }
7615 #[inline(always)]
7616 fn from_slice(simd: S, slice: &[i32]) -> Self {
7617 let mut val = [0; 16];
7618 val.copy_from_slice(slice);
7619 Self { val, simd }
7620 }
7621 #[inline(always)]
7622 fn splat(simd: S, val: i32) -> Self {
7623 simd.splat_mask32x16(val)
7624 }
7625 #[inline(always)]
7626 fn block_splat(block: Self::Block) -> Self {
7627 let block2 = block.combine(block);
7628 block2.combine(block2)
7629 }
7630}
7631impl<S: Simd> crate::SimdMask<i32, S> for mask32x16<S> {
7632 #[inline(always)]
7633 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask32x16<S> {
7634 self.simd.simd_eq_mask32x16(self, rhs.simd_into(self.simd))
7635 }
7636}
7637#[derive(Clone, Copy, Debug)]
7638#[repr(C, align(64))]
7639pub struct f64x8<S: Simd> {
7640 pub val: [f64; 8],
7641 pub simd: S,
7642}
7643impl<S: Simd> SimdFrom<[f64; 8], S> for f64x8<S> {
7644 #[inline(always)]
7645 fn simd_from(val: [f64; 8], simd: S) -> Self {
7646 Self {
7647 val: [
7648 val[0usize],
7649 val[1usize],
7650 val[2usize],
7651 val[3usize],
7652 val[4usize],
7653 val[5usize],
7654 val[6usize],
7655 val[7usize],
7656 ],
7657 simd,
7658 }
7659 }
7660}
7661impl<S: Simd> From<f64x8<S>> for [f64; 8] {
7662 #[inline(always)]
7663 fn from(value: f64x8<S>) -> Self {
7664 value.val
7665 }
7666}
7667impl<S: Simd> core::ops::Deref for f64x8<S> {
7668 type Target = [f64; 8];
7669 #[inline(always)]
7670 fn deref(&self) -> &Self::Target {
7671 &self.val
7672 }
7673}
7674impl<S: Simd> core::ops::DerefMut for f64x8<S> {
7675 #[inline(always)]
7676 fn deref_mut(&mut self) -> &mut Self::Target {
7677 &mut self.val
7678 }
7679}
7680impl<S: Simd> SimdFrom<f64, S> for f64x8<S> {
7681 #[inline(always)]
7682 fn simd_from(value: f64, simd: S) -> Self {
7683 simd.splat_f64x8(value)
7684 }
7685}
7686impl<S: Simd> Select<f64x8<S>> for mask64x8<S> {
7687 #[inline(always)]
7688 fn select(self, if_true: f64x8<S>, if_false: f64x8<S>) -> f64x8<S> {
7689 self.simd.select_f64x8(self, if_true, if_false)
7690 }
7691}
7692impl<S: Simd> Bytes for f64x8<S> {
7693 type Bytes = u8x64<S>;
7694 #[inline(always)]
7695 fn to_bytes(self) -> Self::Bytes {
7696 unsafe {
7697 u8x64 {
7698 val: core::mem::transmute(self.val),
7699 simd: self.simd,
7700 }
7701 }
7702 }
7703 #[inline(always)]
7704 fn from_bytes(value: Self::Bytes) -> Self {
7705 unsafe {
7706 Self {
7707 val: core::mem::transmute(value.val),
7708 simd: value.simd,
7709 }
7710 }
7711 }
7712}
7713impl<S: Simd> f64x8<S> {
7714 #[inline(always)]
7715 pub fn abs(self) -> f64x8<S> {
7716 self.simd.abs_f64x8(self)
7717 }
7718 #[inline(always)]
7719 pub fn neg(self) -> f64x8<S> {
7720 self.simd.neg_f64x8(self)
7721 }
7722 #[inline(always)]
7723 pub fn sqrt(self) -> f64x8<S> {
7724 self.simd.sqrt_f64x8(self)
7725 }
7726 #[inline(always)]
7727 pub fn add(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7728 self.simd.add_f64x8(self, rhs.simd_into(self.simd))
7729 }
7730 #[inline(always)]
7731 pub fn sub(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7732 self.simd.sub_f64x8(self, rhs.simd_into(self.simd))
7733 }
7734 #[inline(always)]
7735 pub fn mul(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7736 self.simd.mul_f64x8(self, rhs.simd_into(self.simd))
7737 }
7738 #[inline(always)]
7739 pub fn div(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7740 self.simd.div_f64x8(self, rhs.simd_into(self.simd))
7741 }
7742 #[inline(always)]
7743 pub fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7744 self.simd.copysign_f64x8(self, rhs.simd_into(self.simd))
7745 }
7746 #[inline(always)]
7747 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7748 self.simd.simd_eq_f64x8(self, rhs.simd_into(self.simd))
7749 }
7750 #[inline(always)]
7751 pub fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7752 self.simd.simd_lt_f64x8(self, rhs.simd_into(self.simd))
7753 }
7754 #[inline(always)]
7755 pub fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7756 self.simd.simd_le_f64x8(self, rhs.simd_into(self.simd))
7757 }
7758 #[inline(always)]
7759 pub fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7760 self.simd.simd_ge_f64x8(self, rhs.simd_into(self.simd))
7761 }
7762 #[inline(always)]
7763 pub fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7764 self.simd.simd_gt_f64x8(self, rhs.simd_into(self.simd))
7765 }
7766 #[inline(always)]
7767 pub fn max(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7768 self.simd.max_f64x8(self, rhs.simd_into(self.simd))
7769 }
7770 #[inline(always)]
7771 pub fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7772 self.simd.max_precise_f64x8(self, rhs.simd_into(self.simd))
7773 }
7774 #[inline(always)]
7775 pub fn min(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7776 self.simd.min_f64x8(self, rhs.simd_into(self.simd))
7777 }
7778 #[inline(always)]
7779 pub fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7780 self.simd.min_precise_f64x8(self, rhs.simd_into(self.simd))
7781 }
7782 #[inline(always)]
7783 pub fn floor(self) -> f64x8<S> {
7784 self.simd.floor_f64x8(self)
7785 }
7786 #[inline(always)]
7787 pub fn fract(self) -> f64x8<S> {
7788 self.simd.fract_f64x8(self)
7789 }
7790 #[inline(always)]
7791 pub fn trunc(self) -> f64x8<S> {
7792 self.simd.trunc_f64x8(self)
7793 }
7794 #[inline(always)]
7795 pub fn reinterpret_f32(self) -> f32x16<S> {
7796 self.simd.reinterpret_f32_f64x8(self)
7797 }
7798}
7799impl<S: Simd> crate::SimdBase<f64, S> for f64x8<S> {
7800 const N: usize = 8;
7801 type Mask = mask64x8<S>;
7802 type Block = f64x2<S>;
7803 #[inline(always)]
7804 fn witness(&self) -> S {
7805 self.simd
7806 }
7807 #[inline(always)]
7808 fn as_slice(&self) -> &[f64] {
7809 &self.val
7810 }
7811 #[inline(always)]
7812 fn as_mut_slice(&mut self) -> &mut [f64] {
7813 &mut self.val
7814 }
7815 #[inline(always)]
7816 fn from_slice(simd: S, slice: &[f64]) -> Self {
7817 let mut val = [0.0; 8];
7818 val.copy_from_slice(slice);
7819 Self { val, simd }
7820 }
7821 #[inline(always)]
7822 fn splat(simd: S, val: f64) -> Self {
7823 simd.splat_f64x8(val)
7824 }
7825 #[inline(always)]
7826 fn block_splat(block: Self::Block) -> Self {
7827 let block2 = block.combine(block);
7828 block2.combine(block2)
7829 }
7830}
7831impl<S: Simd> crate::SimdFloat<f64, S> for f64x8<S> {
7832 #[inline(always)]
7833 fn abs(self) -> f64x8<S> {
7834 self.simd.abs_f64x8(self)
7835 }
7836 #[inline(always)]
7837 fn sqrt(self) -> f64x8<S> {
7838 self.simd.sqrt_f64x8(self)
7839 }
7840 #[inline(always)]
7841 fn copysign(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7842 self.simd.copysign_f64x8(self, rhs.simd_into(self.simd))
7843 }
7844 #[inline(always)]
7845 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7846 self.simd.simd_eq_f64x8(self, rhs.simd_into(self.simd))
7847 }
7848 #[inline(always)]
7849 fn simd_lt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7850 self.simd.simd_lt_f64x8(self, rhs.simd_into(self.simd))
7851 }
7852 #[inline(always)]
7853 fn simd_le(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7854 self.simd.simd_le_f64x8(self, rhs.simd_into(self.simd))
7855 }
7856 #[inline(always)]
7857 fn simd_ge(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7858 self.simd.simd_ge_f64x8(self, rhs.simd_into(self.simd))
7859 }
7860 #[inline(always)]
7861 fn simd_gt(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
7862 self.simd.simd_gt_f64x8(self, rhs.simd_into(self.simd))
7863 }
7864 #[inline(always)]
7865 fn zip_low(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7866 self.simd.zip_low_f64x8(self, rhs.simd_into(self.simd))
7867 }
7868 #[inline(always)]
7869 fn zip_high(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7870 self.simd.zip_high_f64x8(self, rhs.simd_into(self.simd))
7871 }
7872 #[inline(always)]
7873 fn unzip_low(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7874 self.simd.unzip_low_f64x8(self, rhs.simd_into(self.simd))
7875 }
7876 #[inline(always)]
7877 fn unzip_high(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7878 self.simd.unzip_high_f64x8(self, rhs.simd_into(self.simd))
7879 }
7880 #[inline(always)]
7881 fn max(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7882 self.simd.max_f64x8(self, rhs.simd_into(self.simd))
7883 }
7884 #[inline(always)]
7885 fn max_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7886 self.simd.max_precise_f64x8(self, rhs.simd_into(self.simd))
7887 }
7888 #[inline(always)]
7889 fn min(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7890 self.simd.min_f64x8(self, rhs.simd_into(self.simd))
7891 }
7892 #[inline(always)]
7893 fn min_precise(self, rhs: impl SimdInto<Self, S>) -> f64x8<S> {
7894 self.simd.min_precise_f64x8(self, rhs.simd_into(self.simd))
7895 }
7896 #[inline(always)]
7897 fn madd(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x8<S> {
7898 self.simd
7899 .madd_f64x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
7900 }
7901 #[inline(always)]
7902 fn msub(self, op1: impl SimdInto<Self, S>, op2: impl SimdInto<Self, S>) -> f64x8<S> {
7903 self.simd
7904 .msub_f64x8(self, op1.simd_into(self.simd), op2.simd_into(self.simd))
7905 }
7906 #[inline(always)]
7907 fn floor(self) -> f64x8<S> {
7908 self.simd.floor_f64x8(self)
7909 }
7910 #[inline(always)]
7911 fn fract(self) -> f64x8<S> {
7912 self.simd.fract_f64x8(self)
7913 }
7914 #[inline(always)]
7915 fn trunc(self) -> f64x8<S> {
7916 self.simd.trunc_f64x8(self)
7917 }
7918}
7919#[derive(Clone, Copy, Debug)]
7920#[repr(C, align(64))]
7921pub struct mask64x8<S: Simd> {
7922 pub val: [i64; 8],
7923 pub simd: S,
7924}
7925impl<S: Simd> SimdFrom<[i64; 8], S> for mask64x8<S> {
7926 #[inline(always)]
7927 fn simd_from(val: [i64; 8], simd: S) -> Self {
7928 Self {
7929 val: [
7930 val[0usize],
7931 val[1usize],
7932 val[2usize],
7933 val[3usize],
7934 val[4usize],
7935 val[5usize],
7936 val[6usize],
7937 val[7usize],
7938 ],
7939 simd,
7940 }
7941 }
7942}
7943impl<S: Simd> From<mask64x8<S>> for [i64; 8] {
7944 #[inline(always)]
7945 fn from(value: mask64x8<S>) -> Self {
7946 value.val
7947 }
7948}
7949impl<S: Simd> core::ops::Deref for mask64x8<S> {
7950 type Target = [i64; 8];
7951 #[inline(always)]
7952 fn deref(&self) -> &Self::Target {
7953 &self.val
7954 }
7955}
7956impl<S: Simd> core::ops::DerefMut for mask64x8<S> {
7957 #[inline(always)]
7958 fn deref_mut(&mut self) -> &mut Self::Target {
7959 &mut self.val
7960 }
7961}
7962impl<S: Simd> SimdFrom<i64, S> for mask64x8<S> {
7963 #[inline(always)]
7964 fn simd_from(value: i64, simd: S) -> Self {
7965 simd.splat_mask64x8(value)
7966 }
7967}
7968impl<S: Simd> Select<mask64x8<S>> for mask64x8<S> {
7969 #[inline(always)]
7970 fn select(self, if_true: mask64x8<S>, if_false: mask64x8<S>) -> mask64x8<S> {
7971 self.simd.select_mask64x8(self, if_true, if_false)
7972 }
7973}
7974impl<S: Simd> Bytes for mask64x8<S> {
7975 type Bytes = u8x64<S>;
7976 #[inline(always)]
7977 fn to_bytes(self) -> Self::Bytes {
7978 unsafe {
7979 u8x64 {
7980 val: core::mem::transmute(self.val),
7981 simd: self.simd,
7982 }
7983 }
7984 }
7985 #[inline(always)]
7986 fn from_bytes(value: Self::Bytes) -> Self {
7987 unsafe {
7988 Self {
7989 val: core::mem::transmute(value.val),
7990 simd: value.simd,
7991 }
7992 }
7993 }
7994}
7995impl<S: Simd> mask64x8<S> {
7996 #[inline(always)]
7997 pub fn not(self) -> mask64x8<S> {
7998 self.simd.not_mask64x8(self)
7999 }
8000 #[inline(always)]
8001 pub fn and(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
8002 self.simd.and_mask64x8(self, rhs.simd_into(self.simd))
8003 }
8004 #[inline(always)]
8005 pub fn or(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
8006 self.simd.or_mask64x8(self, rhs.simd_into(self.simd))
8007 }
8008 #[inline(always)]
8009 pub fn xor(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
8010 self.simd.xor_mask64x8(self, rhs.simd_into(self.simd))
8011 }
8012 #[inline(always)]
8013 pub fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
8014 self.simd.simd_eq_mask64x8(self, rhs.simd_into(self.simd))
8015 }
8016}
8017impl<S: Simd> crate::SimdBase<i64, S> for mask64x8<S> {
8018 const N: usize = 8;
8019 type Mask = mask64x8<S>;
8020 type Block = mask64x2<S>;
8021 #[inline(always)]
8022 fn witness(&self) -> S {
8023 self.simd
8024 }
8025 #[inline(always)]
8026 fn as_slice(&self) -> &[i64] {
8027 &self.val
8028 }
8029 #[inline(always)]
8030 fn as_mut_slice(&mut self) -> &mut [i64] {
8031 &mut self.val
8032 }
8033 #[inline(always)]
8034 fn from_slice(simd: S, slice: &[i64]) -> Self {
8035 let mut val = [0; 8];
8036 val.copy_from_slice(slice);
8037 Self { val, simd }
8038 }
8039 #[inline(always)]
8040 fn splat(simd: S, val: i64) -> Self {
8041 simd.splat_mask64x8(val)
8042 }
8043 #[inline(always)]
8044 fn block_splat(block: Self::Block) -> Self {
8045 let block2 = block.combine(block);
8046 block2.combine(block2)
8047 }
8048}
8049impl<S: Simd> crate::SimdMask<i64, S> for mask64x8<S> {
8050 #[inline(always)]
8051 fn simd_eq(self, rhs: impl SimdInto<Self, S>) -> mask64x8<S> {
8052 self.simd.simd_eq_mask64x8(self, rhs.simd_into(self.simd))
8053 }
8054}