1use crate::{Simd, SimdInto};
7use crate::{
8 f32x4, f32x8, f32x16, f64x2, f64x4, f64x8, i8x16, i8x32, i8x64, i16x8, i16x16, i16x32, i32x4,
9 i32x8, i32x16, mask8x16, mask8x32, mask8x64, mask16x8, mask16x16, mask16x32, mask32x4,
10 mask32x8, mask32x16, mask64x2, mask64x4, mask64x8, u8x16, u8x32, u8x64, u16x8, u16x16, u16x32,
11 u32x4, u32x8, u32x16,
12};
13impl<S: Simd> core::ops::Neg for f32x4<S> {
14 type Output = Self;
15 #[inline(always)]
16 fn neg(self) -> Self::Output {
17 self.simd.neg_f32x4(self)
18 }
19}
20impl<S: Simd> core::ops::Add for f32x4<S> {
21 type Output = Self;
22 #[inline(always)]
23 fn add(self, rhs: Self) -> Self::Output {
24 self.simd.add_f32x4(self, rhs)
25 }
26}
27impl<S: Simd> core::ops::AddAssign for f32x4<S> {
28 #[inline(always)]
29 fn add_assign(&mut self, rhs: Self) {
30 *self = self.simd.add_f32x4(*self, rhs);
31 }
32}
33impl<S: Simd> core::ops::Add<f32> for f32x4<S> {
34 type Output = Self;
35 #[inline(always)]
36 fn add(self, rhs: f32) -> Self::Output {
37 self.simd.add_f32x4(self, rhs.simd_into(self.simd))
38 }
39}
40impl<S: Simd> core::ops::AddAssign<f32> for f32x4<S> {
41 #[inline(always)]
42 fn add_assign(&mut self, rhs: f32) {
43 *self = self.simd.add_f32x4(*self, rhs.simd_into(self.simd));
44 }
45}
46impl<S: Simd> core::ops::Add<f32x4<S>> for f32 {
47 type Output = f32x4<S>;
48 #[inline(always)]
49 fn add(self, rhs: f32x4<S>) -> Self::Output {
50 rhs.simd.add_f32x4(self.simd_into(rhs.simd), rhs)
51 }
52}
53impl<S: Simd> core::ops::Sub for f32x4<S> {
54 type Output = Self;
55 #[inline(always)]
56 fn sub(self, rhs: Self) -> Self::Output {
57 self.simd.sub_f32x4(self, rhs)
58 }
59}
60impl<S: Simd> core::ops::SubAssign for f32x4<S> {
61 #[inline(always)]
62 fn sub_assign(&mut self, rhs: Self) {
63 *self = self.simd.sub_f32x4(*self, rhs);
64 }
65}
66impl<S: Simd> core::ops::Sub<f32> for f32x4<S> {
67 type Output = Self;
68 #[inline(always)]
69 fn sub(self, rhs: f32) -> Self::Output {
70 self.simd.sub_f32x4(self, rhs.simd_into(self.simd))
71 }
72}
73impl<S: Simd> core::ops::SubAssign<f32> for f32x4<S> {
74 #[inline(always)]
75 fn sub_assign(&mut self, rhs: f32) {
76 *self = self.simd.sub_f32x4(*self, rhs.simd_into(self.simd));
77 }
78}
79impl<S: Simd> core::ops::Sub<f32x4<S>> for f32 {
80 type Output = f32x4<S>;
81 #[inline(always)]
82 fn sub(self, rhs: f32x4<S>) -> Self::Output {
83 rhs.simd.sub_f32x4(self.simd_into(rhs.simd), rhs)
84 }
85}
86impl<S: Simd> core::ops::Mul for f32x4<S> {
87 type Output = Self;
88 #[inline(always)]
89 fn mul(self, rhs: Self) -> Self::Output {
90 self.simd.mul_f32x4(self, rhs)
91 }
92}
93impl<S: Simd> core::ops::MulAssign for f32x4<S> {
94 #[inline(always)]
95 fn mul_assign(&mut self, rhs: Self) {
96 *self = self.simd.mul_f32x4(*self, rhs);
97 }
98}
99impl<S: Simd> core::ops::Mul<f32> for f32x4<S> {
100 type Output = Self;
101 #[inline(always)]
102 fn mul(self, rhs: f32) -> Self::Output {
103 self.simd.mul_f32x4(self, rhs.simd_into(self.simd))
104 }
105}
106impl<S: Simd> core::ops::MulAssign<f32> for f32x4<S> {
107 #[inline(always)]
108 fn mul_assign(&mut self, rhs: f32) {
109 *self = self.simd.mul_f32x4(*self, rhs.simd_into(self.simd));
110 }
111}
112impl<S: Simd> core::ops::Mul<f32x4<S>> for f32 {
113 type Output = f32x4<S>;
114 #[inline(always)]
115 fn mul(self, rhs: f32x4<S>) -> Self::Output {
116 rhs.simd.mul_f32x4(self.simd_into(rhs.simd), rhs)
117 }
118}
119impl<S: Simd> core::ops::Div for f32x4<S> {
120 type Output = Self;
121 #[inline(always)]
122 fn div(self, rhs: Self) -> Self::Output {
123 self.simd.div_f32x4(self, rhs)
124 }
125}
126impl<S: Simd> core::ops::DivAssign for f32x4<S> {
127 #[inline(always)]
128 fn div_assign(&mut self, rhs: Self) {
129 *self = self.simd.div_f32x4(*self, rhs);
130 }
131}
132impl<S: Simd> core::ops::Div<f32> for f32x4<S> {
133 type Output = Self;
134 #[inline(always)]
135 fn div(self, rhs: f32) -> Self::Output {
136 self.simd.div_f32x4(self, rhs.simd_into(self.simd))
137 }
138}
139impl<S: Simd> core::ops::DivAssign<f32> for f32x4<S> {
140 #[inline(always)]
141 fn div_assign(&mut self, rhs: f32) {
142 *self = self.simd.div_f32x4(*self, rhs.simd_into(self.simd));
143 }
144}
145impl<S: Simd> core::ops::Div<f32x4<S>> for f32 {
146 type Output = f32x4<S>;
147 #[inline(always)]
148 fn div(self, rhs: f32x4<S>) -> Self::Output {
149 rhs.simd.div_f32x4(self.simd_into(rhs.simd), rhs)
150 }
151}
152impl<S: Simd> core::ops::Neg for i8x16<S> {
153 type Output = Self;
154 #[inline(always)]
155 fn neg(self) -> Self::Output {
156 self.simd.neg_i8x16(self)
157 }
158}
159impl<S: Simd> core::ops::Add for i8x16<S> {
160 type Output = Self;
161 #[inline(always)]
162 fn add(self, rhs: Self) -> Self::Output {
163 self.simd.add_i8x16(self, rhs)
164 }
165}
166impl<S: Simd> core::ops::AddAssign for i8x16<S> {
167 #[inline(always)]
168 fn add_assign(&mut self, rhs: Self) {
169 *self = self.simd.add_i8x16(*self, rhs);
170 }
171}
172impl<S: Simd> core::ops::Add<i8> for i8x16<S> {
173 type Output = Self;
174 #[inline(always)]
175 fn add(self, rhs: i8) -> Self::Output {
176 self.simd.add_i8x16(self, rhs.simd_into(self.simd))
177 }
178}
179impl<S: Simd> core::ops::AddAssign<i8> for i8x16<S> {
180 #[inline(always)]
181 fn add_assign(&mut self, rhs: i8) {
182 *self = self.simd.add_i8x16(*self, rhs.simd_into(self.simd));
183 }
184}
185impl<S: Simd> core::ops::Add<i8x16<S>> for i8 {
186 type Output = i8x16<S>;
187 #[inline(always)]
188 fn add(self, rhs: i8x16<S>) -> Self::Output {
189 rhs.simd.add_i8x16(self.simd_into(rhs.simd), rhs)
190 }
191}
192impl<S: Simd> core::ops::Sub for i8x16<S> {
193 type Output = Self;
194 #[inline(always)]
195 fn sub(self, rhs: Self) -> Self::Output {
196 self.simd.sub_i8x16(self, rhs)
197 }
198}
199impl<S: Simd> core::ops::SubAssign for i8x16<S> {
200 #[inline(always)]
201 fn sub_assign(&mut self, rhs: Self) {
202 *self = self.simd.sub_i8x16(*self, rhs);
203 }
204}
205impl<S: Simd> core::ops::Sub<i8> for i8x16<S> {
206 type Output = Self;
207 #[inline(always)]
208 fn sub(self, rhs: i8) -> Self::Output {
209 self.simd.sub_i8x16(self, rhs.simd_into(self.simd))
210 }
211}
212impl<S: Simd> core::ops::SubAssign<i8> for i8x16<S> {
213 #[inline(always)]
214 fn sub_assign(&mut self, rhs: i8) {
215 *self = self.simd.sub_i8x16(*self, rhs.simd_into(self.simd));
216 }
217}
218impl<S: Simd> core::ops::Sub<i8x16<S>> for i8 {
219 type Output = i8x16<S>;
220 #[inline(always)]
221 fn sub(self, rhs: i8x16<S>) -> Self::Output {
222 rhs.simd.sub_i8x16(self.simd_into(rhs.simd), rhs)
223 }
224}
225impl<S: Simd> core::ops::Mul for i8x16<S> {
226 type Output = Self;
227 #[inline(always)]
228 fn mul(self, rhs: Self) -> Self::Output {
229 self.simd.mul_i8x16(self, rhs)
230 }
231}
232impl<S: Simd> core::ops::MulAssign for i8x16<S> {
233 #[inline(always)]
234 fn mul_assign(&mut self, rhs: Self) {
235 *self = self.simd.mul_i8x16(*self, rhs);
236 }
237}
238impl<S: Simd> core::ops::Mul<i8> for i8x16<S> {
239 type Output = Self;
240 #[inline(always)]
241 fn mul(self, rhs: i8) -> Self::Output {
242 self.simd.mul_i8x16(self, rhs.simd_into(self.simd))
243 }
244}
245impl<S: Simd> core::ops::MulAssign<i8> for i8x16<S> {
246 #[inline(always)]
247 fn mul_assign(&mut self, rhs: i8) {
248 *self = self.simd.mul_i8x16(*self, rhs.simd_into(self.simd));
249 }
250}
251impl<S: Simd> core::ops::Mul<i8x16<S>> for i8 {
252 type Output = i8x16<S>;
253 #[inline(always)]
254 fn mul(self, rhs: i8x16<S>) -> Self::Output {
255 rhs.simd.mul_i8x16(self.simd_into(rhs.simd), rhs)
256 }
257}
258impl<S: Simd> core::ops::BitAnd for i8x16<S> {
259 type Output = Self;
260 #[inline(always)]
261 fn bitand(self, rhs: Self) -> Self::Output {
262 self.simd.and_i8x16(self, rhs)
263 }
264}
265impl<S: Simd> core::ops::BitAndAssign for i8x16<S> {
266 #[inline(always)]
267 fn bitand_assign(&mut self, rhs: Self) {
268 *self = self.simd.and_i8x16(*self, rhs);
269 }
270}
271impl<S: Simd> core::ops::BitAnd<i8> for i8x16<S> {
272 type Output = Self;
273 #[inline(always)]
274 fn bitand(self, rhs: i8) -> Self::Output {
275 self.simd.and_i8x16(self, rhs.simd_into(self.simd))
276 }
277}
278impl<S: Simd> core::ops::BitAndAssign<i8> for i8x16<S> {
279 #[inline(always)]
280 fn bitand_assign(&mut self, rhs: i8) {
281 *self = self.simd.and_i8x16(*self, rhs.simd_into(self.simd));
282 }
283}
284impl<S: Simd> core::ops::BitAnd<i8x16<S>> for i8 {
285 type Output = i8x16<S>;
286 #[inline(always)]
287 fn bitand(self, rhs: i8x16<S>) -> Self::Output {
288 rhs.simd.and_i8x16(self.simd_into(rhs.simd), rhs)
289 }
290}
291impl<S: Simd> core::ops::BitOr for i8x16<S> {
292 type Output = Self;
293 #[inline(always)]
294 fn bitor(self, rhs: Self) -> Self::Output {
295 self.simd.or_i8x16(self, rhs)
296 }
297}
298impl<S: Simd> core::ops::BitOrAssign for i8x16<S> {
299 #[inline(always)]
300 fn bitor_assign(&mut self, rhs: Self) {
301 *self = self.simd.or_i8x16(*self, rhs);
302 }
303}
304impl<S: Simd> core::ops::BitOr<i8> for i8x16<S> {
305 type Output = Self;
306 #[inline(always)]
307 fn bitor(self, rhs: i8) -> Self::Output {
308 self.simd.or_i8x16(self, rhs.simd_into(self.simd))
309 }
310}
311impl<S: Simd> core::ops::BitOrAssign<i8> for i8x16<S> {
312 #[inline(always)]
313 fn bitor_assign(&mut self, rhs: i8) {
314 *self = self.simd.or_i8x16(*self, rhs.simd_into(self.simd));
315 }
316}
317impl<S: Simd> core::ops::BitOr<i8x16<S>> for i8 {
318 type Output = i8x16<S>;
319 #[inline(always)]
320 fn bitor(self, rhs: i8x16<S>) -> Self::Output {
321 rhs.simd.or_i8x16(self.simd_into(rhs.simd), rhs)
322 }
323}
324impl<S: Simd> core::ops::BitXor for i8x16<S> {
325 type Output = Self;
326 #[inline(always)]
327 fn bitxor(self, rhs: Self) -> Self::Output {
328 self.simd.xor_i8x16(self, rhs)
329 }
330}
331impl<S: Simd> core::ops::BitXorAssign for i8x16<S> {
332 #[inline(always)]
333 fn bitxor_assign(&mut self, rhs: Self) {
334 *self = self.simd.xor_i8x16(*self, rhs);
335 }
336}
337impl<S: Simd> core::ops::BitXor<i8> for i8x16<S> {
338 type Output = Self;
339 #[inline(always)]
340 fn bitxor(self, rhs: i8) -> Self::Output {
341 self.simd.xor_i8x16(self, rhs.simd_into(self.simd))
342 }
343}
344impl<S: Simd> core::ops::BitXorAssign<i8> for i8x16<S> {
345 #[inline(always)]
346 fn bitxor_assign(&mut self, rhs: i8) {
347 *self = self.simd.xor_i8x16(*self, rhs.simd_into(self.simd));
348 }
349}
350impl<S: Simd> core::ops::BitXor<i8x16<S>> for i8 {
351 type Output = i8x16<S>;
352 #[inline(always)]
353 fn bitxor(self, rhs: i8x16<S>) -> Self::Output {
354 rhs.simd.xor_i8x16(self.simd_into(rhs.simd), rhs)
355 }
356}
357impl<S: Simd> core::ops::Shl<u32> for i8x16<S> {
358 type Output = Self;
359 #[inline(always)]
360 fn shl(self, rhs: u32) -> Self::Output {
361 self.simd.shl_i8x16(self, rhs)
362 }
363}
364impl<S: Simd> core::ops::Shr<u32> for i8x16<S> {
365 type Output = Self;
366 #[inline(always)]
367 fn shr(self, rhs: u32) -> Self::Output {
368 self.simd.shr_i8x16(self, rhs)
369 }
370}
371impl<S: Simd> core::ops::ShlAssign<u32> for i8x16<S> {
372 #[inline(always)]
373 fn shl_assign(&mut self, rhs: u32) {
374 *self = self.simd.shl_i8x16(*self, rhs);
375 }
376}
377impl<S: Simd> core::ops::ShrAssign<u32> for i8x16<S> {
378 #[inline(always)]
379 fn shr_assign(&mut self, rhs: u32) {
380 *self = self.simd.shr_i8x16(*self, rhs);
381 }
382}
383impl<S: Simd> core::ops::Shr for i8x16<S> {
384 type Output = Self;
385 #[inline(always)]
386 fn shr(self, rhs: Self) -> Self::Output {
387 self.simd.shrv_i8x16(self, rhs)
388 }
389}
390impl<S: Simd> core::ops::ShrAssign for i8x16<S> {
391 #[inline(always)]
392 fn shr_assign(&mut self, rhs: Self) {
393 *self = self.simd.shrv_i8x16(*self, rhs);
394 }
395}
396impl<S: Simd> core::ops::Add for u8x16<S> {
397 type Output = Self;
398 #[inline(always)]
399 fn add(self, rhs: Self) -> Self::Output {
400 self.simd.add_u8x16(self, rhs)
401 }
402}
403impl<S: Simd> core::ops::AddAssign for u8x16<S> {
404 #[inline(always)]
405 fn add_assign(&mut self, rhs: Self) {
406 *self = self.simd.add_u8x16(*self, rhs);
407 }
408}
409impl<S: Simd> core::ops::Add<u8> for u8x16<S> {
410 type Output = Self;
411 #[inline(always)]
412 fn add(self, rhs: u8) -> Self::Output {
413 self.simd.add_u8x16(self, rhs.simd_into(self.simd))
414 }
415}
416impl<S: Simd> core::ops::AddAssign<u8> for u8x16<S> {
417 #[inline(always)]
418 fn add_assign(&mut self, rhs: u8) {
419 *self = self.simd.add_u8x16(*self, rhs.simd_into(self.simd));
420 }
421}
422impl<S: Simd> core::ops::Add<u8x16<S>> for u8 {
423 type Output = u8x16<S>;
424 #[inline(always)]
425 fn add(self, rhs: u8x16<S>) -> Self::Output {
426 rhs.simd.add_u8x16(self.simd_into(rhs.simd), rhs)
427 }
428}
429impl<S: Simd> core::ops::Sub for u8x16<S> {
430 type Output = Self;
431 #[inline(always)]
432 fn sub(self, rhs: Self) -> Self::Output {
433 self.simd.sub_u8x16(self, rhs)
434 }
435}
436impl<S: Simd> core::ops::SubAssign for u8x16<S> {
437 #[inline(always)]
438 fn sub_assign(&mut self, rhs: Self) {
439 *self = self.simd.sub_u8x16(*self, rhs);
440 }
441}
442impl<S: Simd> core::ops::Sub<u8> for u8x16<S> {
443 type Output = Self;
444 #[inline(always)]
445 fn sub(self, rhs: u8) -> Self::Output {
446 self.simd.sub_u8x16(self, rhs.simd_into(self.simd))
447 }
448}
449impl<S: Simd> core::ops::SubAssign<u8> for u8x16<S> {
450 #[inline(always)]
451 fn sub_assign(&mut self, rhs: u8) {
452 *self = self.simd.sub_u8x16(*self, rhs.simd_into(self.simd));
453 }
454}
455impl<S: Simd> core::ops::Sub<u8x16<S>> for u8 {
456 type Output = u8x16<S>;
457 #[inline(always)]
458 fn sub(self, rhs: u8x16<S>) -> Self::Output {
459 rhs.simd.sub_u8x16(self.simd_into(rhs.simd), rhs)
460 }
461}
462impl<S: Simd> core::ops::Mul for u8x16<S> {
463 type Output = Self;
464 #[inline(always)]
465 fn mul(self, rhs: Self) -> Self::Output {
466 self.simd.mul_u8x16(self, rhs)
467 }
468}
469impl<S: Simd> core::ops::MulAssign for u8x16<S> {
470 #[inline(always)]
471 fn mul_assign(&mut self, rhs: Self) {
472 *self = self.simd.mul_u8x16(*self, rhs);
473 }
474}
475impl<S: Simd> core::ops::Mul<u8> for u8x16<S> {
476 type Output = Self;
477 #[inline(always)]
478 fn mul(self, rhs: u8) -> Self::Output {
479 self.simd.mul_u8x16(self, rhs.simd_into(self.simd))
480 }
481}
482impl<S: Simd> core::ops::MulAssign<u8> for u8x16<S> {
483 #[inline(always)]
484 fn mul_assign(&mut self, rhs: u8) {
485 *self = self.simd.mul_u8x16(*self, rhs.simd_into(self.simd));
486 }
487}
488impl<S: Simd> core::ops::Mul<u8x16<S>> for u8 {
489 type Output = u8x16<S>;
490 #[inline(always)]
491 fn mul(self, rhs: u8x16<S>) -> Self::Output {
492 rhs.simd.mul_u8x16(self.simd_into(rhs.simd), rhs)
493 }
494}
495impl<S: Simd> core::ops::BitAnd for u8x16<S> {
496 type Output = Self;
497 #[inline(always)]
498 fn bitand(self, rhs: Self) -> Self::Output {
499 self.simd.and_u8x16(self, rhs)
500 }
501}
502impl<S: Simd> core::ops::BitAndAssign for u8x16<S> {
503 #[inline(always)]
504 fn bitand_assign(&mut self, rhs: Self) {
505 *self = self.simd.and_u8x16(*self, rhs);
506 }
507}
508impl<S: Simd> core::ops::BitAnd<u8> for u8x16<S> {
509 type Output = Self;
510 #[inline(always)]
511 fn bitand(self, rhs: u8) -> Self::Output {
512 self.simd.and_u8x16(self, rhs.simd_into(self.simd))
513 }
514}
515impl<S: Simd> core::ops::BitAndAssign<u8> for u8x16<S> {
516 #[inline(always)]
517 fn bitand_assign(&mut self, rhs: u8) {
518 *self = self.simd.and_u8x16(*self, rhs.simd_into(self.simd));
519 }
520}
521impl<S: Simd> core::ops::BitAnd<u8x16<S>> for u8 {
522 type Output = u8x16<S>;
523 #[inline(always)]
524 fn bitand(self, rhs: u8x16<S>) -> Self::Output {
525 rhs.simd.and_u8x16(self.simd_into(rhs.simd), rhs)
526 }
527}
528impl<S: Simd> core::ops::BitOr for u8x16<S> {
529 type Output = Self;
530 #[inline(always)]
531 fn bitor(self, rhs: Self) -> Self::Output {
532 self.simd.or_u8x16(self, rhs)
533 }
534}
535impl<S: Simd> core::ops::BitOrAssign for u8x16<S> {
536 #[inline(always)]
537 fn bitor_assign(&mut self, rhs: Self) {
538 *self = self.simd.or_u8x16(*self, rhs);
539 }
540}
541impl<S: Simd> core::ops::BitOr<u8> for u8x16<S> {
542 type Output = Self;
543 #[inline(always)]
544 fn bitor(self, rhs: u8) -> Self::Output {
545 self.simd.or_u8x16(self, rhs.simd_into(self.simd))
546 }
547}
548impl<S: Simd> core::ops::BitOrAssign<u8> for u8x16<S> {
549 #[inline(always)]
550 fn bitor_assign(&mut self, rhs: u8) {
551 *self = self.simd.or_u8x16(*self, rhs.simd_into(self.simd));
552 }
553}
554impl<S: Simd> core::ops::BitOr<u8x16<S>> for u8 {
555 type Output = u8x16<S>;
556 #[inline(always)]
557 fn bitor(self, rhs: u8x16<S>) -> Self::Output {
558 rhs.simd.or_u8x16(self.simd_into(rhs.simd), rhs)
559 }
560}
561impl<S: Simd> core::ops::BitXor for u8x16<S> {
562 type Output = Self;
563 #[inline(always)]
564 fn bitxor(self, rhs: Self) -> Self::Output {
565 self.simd.xor_u8x16(self, rhs)
566 }
567}
568impl<S: Simd> core::ops::BitXorAssign for u8x16<S> {
569 #[inline(always)]
570 fn bitxor_assign(&mut self, rhs: Self) {
571 *self = self.simd.xor_u8x16(*self, rhs);
572 }
573}
574impl<S: Simd> core::ops::BitXor<u8> for u8x16<S> {
575 type Output = Self;
576 #[inline(always)]
577 fn bitxor(self, rhs: u8) -> Self::Output {
578 self.simd.xor_u8x16(self, rhs.simd_into(self.simd))
579 }
580}
581impl<S: Simd> core::ops::BitXorAssign<u8> for u8x16<S> {
582 #[inline(always)]
583 fn bitxor_assign(&mut self, rhs: u8) {
584 *self = self.simd.xor_u8x16(*self, rhs.simd_into(self.simd));
585 }
586}
587impl<S: Simd> core::ops::BitXor<u8x16<S>> for u8 {
588 type Output = u8x16<S>;
589 #[inline(always)]
590 fn bitxor(self, rhs: u8x16<S>) -> Self::Output {
591 rhs.simd.xor_u8x16(self.simd_into(rhs.simd), rhs)
592 }
593}
594impl<S: Simd> core::ops::Shl<u32> for u8x16<S> {
595 type Output = Self;
596 #[inline(always)]
597 fn shl(self, rhs: u32) -> Self::Output {
598 self.simd.shl_u8x16(self, rhs)
599 }
600}
601impl<S: Simd> core::ops::Shr<u32> for u8x16<S> {
602 type Output = Self;
603 #[inline(always)]
604 fn shr(self, rhs: u32) -> Self::Output {
605 self.simd.shr_u8x16(self, rhs)
606 }
607}
608impl<S: Simd> core::ops::ShlAssign<u32> for u8x16<S> {
609 #[inline(always)]
610 fn shl_assign(&mut self, rhs: u32) {
611 *self = self.simd.shl_u8x16(*self, rhs);
612 }
613}
614impl<S: Simd> core::ops::ShrAssign<u32> for u8x16<S> {
615 #[inline(always)]
616 fn shr_assign(&mut self, rhs: u32) {
617 *self = self.simd.shr_u8x16(*self, rhs);
618 }
619}
620impl<S: Simd> core::ops::Shr for u8x16<S> {
621 type Output = Self;
622 #[inline(always)]
623 fn shr(self, rhs: Self) -> Self::Output {
624 self.simd.shrv_u8x16(self, rhs)
625 }
626}
627impl<S: Simd> core::ops::ShrAssign for u8x16<S> {
628 #[inline(always)]
629 fn shr_assign(&mut self, rhs: Self) {
630 *self = self.simd.shrv_u8x16(*self, rhs);
631 }
632}
633impl<S: Simd> core::ops::BitAnd for mask8x16<S> {
634 type Output = Self;
635 #[inline(always)]
636 fn bitand(self, rhs: Self) -> Self::Output {
637 self.simd.and_mask8x16(self, rhs)
638 }
639}
640impl<S: Simd> core::ops::BitAndAssign for mask8x16<S> {
641 #[inline(always)]
642 fn bitand_assign(&mut self, rhs: Self) {
643 *self = self.simd.and_mask8x16(*self, rhs);
644 }
645}
646impl<S: Simd> core::ops::BitAnd<i8> for mask8x16<S> {
647 type Output = Self;
648 #[inline(always)]
649 fn bitand(self, rhs: i8) -> Self::Output {
650 self.simd.and_mask8x16(self, rhs.simd_into(self.simd))
651 }
652}
653impl<S: Simd> core::ops::BitAndAssign<i8> for mask8x16<S> {
654 #[inline(always)]
655 fn bitand_assign(&mut self, rhs: i8) {
656 *self = self.simd.and_mask8x16(*self, rhs.simd_into(self.simd));
657 }
658}
659impl<S: Simd> core::ops::BitAnd<mask8x16<S>> for i8 {
660 type Output = mask8x16<S>;
661 #[inline(always)]
662 fn bitand(self, rhs: mask8x16<S>) -> Self::Output {
663 rhs.simd.and_mask8x16(self.simd_into(rhs.simd), rhs)
664 }
665}
666impl<S: Simd> core::ops::BitOr for mask8x16<S> {
667 type Output = Self;
668 #[inline(always)]
669 fn bitor(self, rhs: Self) -> Self::Output {
670 self.simd.or_mask8x16(self, rhs)
671 }
672}
673impl<S: Simd> core::ops::BitOrAssign for mask8x16<S> {
674 #[inline(always)]
675 fn bitor_assign(&mut self, rhs: Self) {
676 *self = self.simd.or_mask8x16(*self, rhs);
677 }
678}
679impl<S: Simd> core::ops::BitOr<i8> for mask8x16<S> {
680 type Output = Self;
681 #[inline(always)]
682 fn bitor(self, rhs: i8) -> Self::Output {
683 self.simd.or_mask8x16(self, rhs.simd_into(self.simd))
684 }
685}
686impl<S: Simd> core::ops::BitOrAssign<i8> for mask8x16<S> {
687 #[inline(always)]
688 fn bitor_assign(&mut self, rhs: i8) {
689 *self = self.simd.or_mask8x16(*self, rhs.simd_into(self.simd));
690 }
691}
692impl<S: Simd> core::ops::BitOr<mask8x16<S>> for i8 {
693 type Output = mask8x16<S>;
694 #[inline(always)]
695 fn bitor(self, rhs: mask8x16<S>) -> Self::Output {
696 rhs.simd.or_mask8x16(self.simd_into(rhs.simd), rhs)
697 }
698}
699impl<S: Simd> core::ops::BitXor for mask8x16<S> {
700 type Output = Self;
701 #[inline(always)]
702 fn bitxor(self, rhs: Self) -> Self::Output {
703 self.simd.xor_mask8x16(self, rhs)
704 }
705}
706impl<S: Simd> core::ops::BitXorAssign for mask8x16<S> {
707 #[inline(always)]
708 fn bitxor_assign(&mut self, rhs: Self) {
709 *self = self.simd.xor_mask8x16(*self, rhs);
710 }
711}
712impl<S: Simd> core::ops::BitXor<i8> for mask8x16<S> {
713 type Output = Self;
714 #[inline(always)]
715 fn bitxor(self, rhs: i8) -> Self::Output {
716 self.simd.xor_mask8x16(self, rhs.simd_into(self.simd))
717 }
718}
719impl<S: Simd> core::ops::BitXorAssign<i8> for mask8x16<S> {
720 #[inline(always)]
721 fn bitxor_assign(&mut self, rhs: i8) {
722 *self = self.simd.xor_mask8x16(*self, rhs.simd_into(self.simd));
723 }
724}
725impl<S: Simd> core::ops::BitXor<mask8x16<S>> for i8 {
726 type Output = mask8x16<S>;
727 #[inline(always)]
728 fn bitxor(self, rhs: mask8x16<S>) -> Self::Output {
729 rhs.simd.xor_mask8x16(self.simd_into(rhs.simd), rhs)
730 }
731}
732impl<S: Simd> core::ops::Not for mask8x16<S> {
733 type Output = Self;
734 #[inline(always)]
735 fn not(self) -> Self::Output {
736 self.simd.not_mask8x16(self)
737 }
738}
739impl<S: Simd> core::ops::Neg for i16x8<S> {
740 type Output = Self;
741 #[inline(always)]
742 fn neg(self) -> Self::Output {
743 self.simd.neg_i16x8(self)
744 }
745}
746impl<S: Simd> core::ops::Add for i16x8<S> {
747 type Output = Self;
748 #[inline(always)]
749 fn add(self, rhs: Self) -> Self::Output {
750 self.simd.add_i16x8(self, rhs)
751 }
752}
753impl<S: Simd> core::ops::AddAssign for i16x8<S> {
754 #[inline(always)]
755 fn add_assign(&mut self, rhs: Self) {
756 *self = self.simd.add_i16x8(*self, rhs);
757 }
758}
759impl<S: Simd> core::ops::Add<i16> for i16x8<S> {
760 type Output = Self;
761 #[inline(always)]
762 fn add(self, rhs: i16) -> Self::Output {
763 self.simd.add_i16x8(self, rhs.simd_into(self.simd))
764 }
765}
766impl<S: Simd> core::ops::AddAssign<i16> for i16x8<S> {
767 #[inline(always)]
768 fn add_assign(&mut self, rhs: i16) {
769 *self = self.simd.add_i16x8(*self, rhs.simd_into(self.simd));
770 }
771}
772impl<S: Simd> core::ops::Add<i16x8<S>> for i16 {
773 type Output = i16x8<S>;
774 #[inline(always)]
775 fn add(self, rhs: i16x8<S>) -> Self::Output {
776 rhs.simd.add_i16x8(self.simd_into(rhs.simd), rhs)
777 }
778}
779impl<S: Simd> core::ops::Sub for i16x8<S> {
780 type Output = Self;
781 #[inline(always)]
782 fn sub(self, rhs: Self) -> Self::Output {
783 self.simd.sub_i16x8(self, rhs)
784 }
785}
786impl<S: Simd> core::ops::SubAssign for i16x8<S> {
787 #[inline(always)]
788 fn sub_assign(&mut self, rhs: Self) {
789 *self = self.simd.sub_i16x8(*self, rhs);
790 }
791}
792impl<S: Simd> core::ops::Sub<i16> for i16x8<S> {
793 type Output = Self;
794 #[inline(always)]
795 fn sub(self, rhs: i16) -> Self::Output {
796 self.simd.sub_i16x8(self, rhs.simd_into(self.simd))
797 }
798}
799impl<S: Simd> core::ops::SubAssign<i16> for i16x8<S> {
800 #[inline(always)]
801 fn sub_assign(&mut self, rhs: i16) {
802 *self = self.simd.sub_i16x8(*self, rhs.simd_into(self.simd));
803 }
804}
805impl<S: Simd> core::ops::Sub<i16x8<S>> for i16 {
806 type Output = i16x8<S>;
807 #[inline(always)]
808 fn sub(self, rhs: i16x8<S>) -> Self::Output {
809 rhs.simd.sub_i16x8(self.simd_into(rhs.simd), rhs)
810 }
811}
812impl<S: Simd> core::ops::Mul for i16x8<S> {
813 type Output = Self;
814 #[inline(always)]
815 fn mul(self, rhs: Self) -> Self::Output {
816 self.simd.mul_i16x8(self, rhs)
817 }
818}
819impl<S: Simd> core::ops::MulAssign for i16x8<S> {
820 #[inline(always)]
821 fn mul_assign(&mut self, rhs: Self) {
822 *self = self.simd.mul_i16x8(*self, rhs);
823 }
824}
825impl<S: Simd> core::ops::Mul<i16> for i16x8<S> {
826 type Output = Self;
827 #[inline(always)]
828 fn mul(self, rhs: i16) -> Self::Output {
829 self.simd.mul_i16x8(self, rhs.simd_into(self.simd))
830 }
831}
832impl<S: Simd> core::ops::MulAssign<i16> for i16x8<S> {
833 #[inline(always)]
834 fn mul_assign(&mut self, rhs: i16) {
835 *self = self.simd.mul_i16x8(*self, rhs.simd_into(self.simd));
836 }
837}
838impl<S: Simd> core::ops::Mul<i16x8<S>> for i16 {
839 type Output = i16x8<S>;
840 #[inline(always)]
841 fn mul(self, rhs: i16x8<S>) -> Self::Output {
842 rhs.simd.mul_i16x8(self.simd_into(rhs.simd), rhs)
843 }
844}
845impl<S: Simd> core::ops::BitAnd for i16x8<S> {
846 type Output = Self;
847 #[inline(always)]
848 fn bitand(self, rhs: Self) -> Self::Output {
849 self.simd.and_i16x8(self, rhs)
850 }
851}
852impl<S: Simd> core::ops::BitAndAssign for i16x8<S> {
853 #[inline(always)]
854 fn bitand_assign(&mut self, rhs: Self) {
855 *self = self.simd.and_i16x8(*self, rhs);
856 }
857}
858impl<S: Simd> core::ops::BitAnd<i16> for i16x8<S> {
859 type Output = Self;
860 #[inline(always)]
861 fn bitand(self, rhs: i16) -> Self::Output {
862 self.simd.and_i16x8(self, rhs.simd_into(self.simd))
863 }
864}
865impl<S: Simd> core::ops::BitAndAssign<i16> for i16x8<S> {
866 #[inline(always)]
867 fn bitand_assign(&mut self, rhs: i16) {
868 *self = self.simd.and_i16x8(*self, rhs.simd_into(self.simd));
869 }
870}
871impl<S: Simd> core::ops::BitAnd<i16x8<S>> for i16 {
872 type Output = i16x8<S>;
873 #[inline(always)]
874 fn bitand(self, rhs: i16x8<S>) -> Self::Output {
875 rhs.simd.and_i16x8(self.simd_into(rhs.simd), rhs)
876 }
877}
878impl<S: Simd> core::ops::BitOr for i16x8<S> {
879 type Output = Self;
880 #[inline(always)]
881 fn bitor(self, rhs: Self) -> Self::Output {
882 self.simd.or_i16x8(self, rhs)
883 }
884}
885impl<S: Simd> core::ops::BitOrAssign for i16x8<S> {
886 #[inline(always)]
887 fn bitor_assign(&mut self, rhs: Self) {
888 *self = self.simd.or_i16x8(*self, rhs);
889 }
890}
891impl<S: Simd> core::ops::BitOr<i16> for i16x8<S> {
892 type Output = Self;
893 #[inline(always)]
894 fn bitor(self, rhs: i16) -> Self::Output {
895 self.simd.or_i16x8(self, rhs.simd_into(self.simd))
896 }
897}
898impl<S: Simd> core::ops::BitOrAssign<i16> for i16x8<S> {
899 #[inline(always)]
900 fn bitor_assign(&mut self, rhs: i16) {
901 *self = self.simd.or_i16x8(*self, rhs.simd_into(self.simd));
902 }
903}
904impl<S: Simd> core::ops::BitOr<i16x8<S>> for i16 {
905 type Output = i16x8<S>;
906 #[inline(always)]
907 fn bitor(self, rhs: i16x8<S>) -> Self::Output {
908 rhs.simd.or_i16x8(self.simd_into(rhs.simd), rhs)
909 }
910}
911impl<S: Simd> core::ops::BitXor for i16x8<S> {
912 type Output = Self;
913 #[inline(always)]
914 fn bitxor(self, rhs: Self) -> Self::Output {
915 self.simd.xor_i16x8(self, rhs)
916 }
917}
918impl<S: Simd> core::ops::BitXorAssign for i16x8<S> {
919 #[inline(always)]
920 fn bitxor_assign(&mut self, rhs: Self) {
921 *self = self.simd.xor_i16x8(*self, rhs);
922 }
923}
924impl<S: Simd> core::ops::BitXor<i16> for i16x8<S> {
925 type Output = Self;
926 #[inline(always)]
927 fn bitxor(self, rhs: i16) -> Self::Output {
928 self.simd.xor_i16x8(self, rhs.simd_into(self.simd))
929 }
930}
931impl<S: Simd> core::ops::BitXorAssign<i16> for i16x8<S> {
932 #[inline(always)]
933 fn bitxor_assign(&mut self, rhs: i16) {
934 *self = self.simd.xor_i16x8(*self, rhs.simd_into(self.simd));
935 }
936}
937impl<S: Simd> core::ops::BitXor<i16x8<S>> for i16 {
938 type Output = i16x8<S>;
939 #[inline(always)]
940 fn bitxor(self, rhs: i16x8<S>) -> Self::Output {
941 rhs.simd.xor_i16x8(self.simd_into(rhs.simd), rhs)
942 }
943}
944impl<S: Simd> core::ops::Shl<u32> for i16x8<S> {
945 type Output = Self;
946 #[inline(always)]
947 fn shl(self, rhs: u32) -> Self::Output {
948 self.simd.shl_i16x8(self, rhs)
949 }
950}
951impl<S: Simd> core::ops::Shr<u32> for i16x8<S> {
952 type Output = Self;
953 #[inline(always)]
954 fn shr(self, rhs: u32) -> Self::Output {
955 self.simd.shr_i16x8(self, rhs)
956 }
957}
958impl<S: Simd> core::ops::ShlAssign<u32> for i16x8<S> {
959 #[inline(always)]
960 fn shl_assign(&mut self, rhs: u32) {
961 *self = self.simd.shl_i16x8(*self, rhs);
962 }
963}
964impl<S: Simd> core::ops::ShrAssign<u32> for i16x8<S> {
965 #[inline(always)]
966 fn shr_assign(&mut self, rhs: u32) {
967 *self = self.simd.shr_i16x8(*self, rhs);
968 }
969}
970impl<S: Simd> core::ops::Shr for i16x8<S> {
971 type Output = Self;
972 #[inline(always)]
973 fn shr(self, rhs: Self) -> Self::Output {
974 self.simd.shrv_i16x8(self, rhs)
975 }
976}
977impl<S: Simd> core::ops::ShrAssign for i16x8<S> {
978 #[inline(always)]
979 fn shr_assign(&mut self, rhs: Self) {
980 *self = self.simd.shrv_i16x8(*self, rhs);
981 }
982}
983impl<S: Simd> core::ops::Add for u16x8<S> {
984 type Output = Self;
985 #[inline(always)]
986 fn add(self, rhs: Self) -> Self::Output {
987 self.simd.add_u16x8(self, rhs)
988 }
989}
990impl<S: Simd> core::ops::AddAssign for u16x8<S> {
991 #[inline(always)]
992 fn add_assign(&mut self, rhs: Self) {
993 *self = self.simd.add_u16x8(*self, rhs);
994 }
995}
996impl<S: Simd> core::ops::Add<u16> for u16x8<S> {
997 type Output = Self;
998 #[inline(always)]
999 fn add(self, rhs: u16) -> Self::Output {
1000 self.simd.add_u16x8(self, rhs.simd_into(self.simd))
1001 }
1002}
1003impl<S: Simd> core::ops::AddAssign<u16> for u16x8<S> {
1004 #[inline(always)]
1005 fn add_assign(&mut self, rhs: u16) {
1006 *self = self.simd.add_u16x8(*self, rhs.simd_into(self.simd));
1007 }
1008}
1009impl<S: Simd> core::ops::Add<u16x8<S>> for u16 {
1010 type Output = u16x8<S>;
1011 #[inline(always)]
1012 fn add(self, rhs: u16x8<S>) -> Self::Output {
1013 rhs.simd.add_u16x8(self.simd_into(rhs.simd), rhs)
1014 }
1015}
1016impl<S: Simd> core::ops::Sub for u16x8<S> {
1017 type Output = Self;
1018 #[inline(always)]
1019 fn sub(self, rhs: Self) -> Self::Output {
1020 self.simd.sub_u16x8(self, rhs)
1021 }
1022}
1023impl<S: Simd> core::ops::SubAssign for u16x8<S> {
1024 #[inline(always)]
1025 fn sub_assign(&mut self, rhs: Self) {
1026 *self = self.simd.sub_u16x8(*self, rhs);
1027 }
1028}
1029impl<S: Simd> core::ops::Sub<u16> for u16x8<S> {
1030 type Output = Self;
1031 #[inline(always)]
1032 fn sub(self, rhs: u16) -> Self::Output {
1033 self.simd.sub_u16x8(self, rhs.simd_into(self.simd))
1034 }
1035}
1036impl<S: Simd> core::ops::SubAssign<u16> for u16x8<S> {
1037 #[inline(always)]
1038 fn sub_assign(&mut self, rhs: u16) {
1039 *self = self.simd.sub_u16x8(*self, rhs.simd_into(self.simd));
1040 }
1041}
1042impl<S: Simd> core::ops::Sub<u16x8<S>> for u16 {
1043 type Output = u16x8<S>;
1044 #[inline(always)]
1045 fn sub(self, rhs: u16x8<S>) -> Self::Output {
1046 rhs.simd.sub_u16x8(self.simd_into(rhs.simd), rhs)
1047 }
1048}
1049impl<S: Simd> core::ops::Mul for u16x8<S> {
1050 type Output = Self;
1051 #[inline(always)]
1052 fn mul(self, rhs: Self) -> Self::Output {
1053 self.simd.mul_u16x8(self, rhs)
1054 }
1055}
1056impl<S: Simd> core::ops::MulAssign for u16x8<S> {
1057 #[inline(always)]
1058 fn mul_assign(&mut self, rhs: Self) {
1059 *self = self.simd.mul_u16x8(*self, rhs);
1060 }
1061}
1062impl<S: Simd> core::ops::Mul<u16> for u16x8<S> {
1063 type Output = Self;
1064 #[inline(always)]
1065 fn mul(self, rhs: u16) -> Self::Output {
1066 self.simd.mul_u16x8(self, rhs.simd_into(self.simd))
1067 }
1068}
1069impl<S: Simd> core::ops::MulAssign<u16> for u16x8<S> {
1070 #[inline(always)]
1071 fn mul_assign(&mut self, rhs: u16) {
1072 *self = self.simd.mul_u16x8(*self, rhs.simd_into(self.simd));
1073 }
1074}
1075impl<S: Simd> core::ops::Mul<u16x8<S>> for u16 {
1076 type Output = u16x8<S>;
1077 #[inline(always)]
1078 fn mul(self, rhs: u16x8<S>) -> Self::Output {
1079 rhs.simd.mul_u16x8(self.simd_into(rhs.simd), rhs)
1080 }
1081}
1082impl<S: Simd> core::ops::BitAnd for u16x8<S> {
1083 type Output = Self;
1084 #[inline(always)]
1085 fn bitand(self, rhs: Self) -> Self::Output {
1086 self.simd.and_u16x8(self, rhs)
1087 }
1088}
1089impl<S: Simd> core::ops::BitAndAssign for u16x8<S> {
1090 #[inline(always)]
1091 fn bitand_assign(&mut self, rhs: Self) {
1092 *self = self.simd.and_u16x8(*self, rhs);
1093 }
1094}
1095impl<S: Simd> core::ops::BitAnd<u16> for u16x8<S> {
1096 type Output = Self;
1097 #[inline(always)]
1098 fn bitand(self, rhs: u16) -> Self::Output {
1099 self.simd.and_u16x8(self, rhs.simd_into(self.simd))
1100 }
1101}
1102impl<S: Simd> core::ops::BitAndAssign<u16> for u16x8<S> {
1103 #[inline(always)]
1104 fn bitand_assign(&mut self, rhs: u16) {
1105 *self = self.simd.and_u16x8(*self, rhs.simd_into(self.simd));
1106 }
1107}
1108impl<S: Simd> core::ops::BitAnd<u16x8<S>> for u16 {
1109 type Output = u16x8<S>;
1110 #[inline(always)]
1111 fn bitand(self, rhs: u16x8<S>) -> Self::Output {
1112 rhs.simd.and_u16x8(self.simd_into(rhs.simd), rhs)
1113 }
1114}
1115impl<S: Simd> core::ops::BitOr for u16x8<S> {
1116 type Output = Self;
1117 #[inline(always)]
1118 fn bitor(self, rhs: Self) -> Self::Output {
1119 self.simd.or_u16x8(self, rhs)
1120 }
1121}
1122impl<S: Simd> core::ops::BitOrAssign for u16x8<S> {
1123 #[inline(always)]
1124 fn bitor_assign(&mut self, rhs: Self) {
1125 *self = self.simd.or_u16x8(*self, rhs);
1126 }
1127}
1128impl<S: Simd> core::ops::BitOr<u16> for u16x8<S> {
1129 type Output = Self;
1130 #[inline(always)]
1131 fn bitor(self, rhs: u16) -> Self::Output {
1132 self.simd.or_u16x8(self, rhs.simd_into(self.simd))
1133 }
1134}
1135impl<S: Simd> core::ops::BitOrAssign<u16> for u16x8<S> {
1136 #[inline(always)]
1137 fn bitor_assign(&mut self, rhs: u16) {
1138 *self = self.simd.or_u16x8(*self, rhs.simd_into(self.simd));
1139 }
1140}
1141impl<S: Simd> core::ops::BitOr<u16x8<S>> for u16 {
1142 type Output = u16x8<S>;
1143 #[inline(always)]
1144 fn bitor(self, rhs: u16x8<S>) -> Self::Output {
1145 rhs.simd.or_u16x8(self.simd_into(rhs.simd), rhs)
1146 }
1147}
1148impl<S: Simd> core::ops::BitXor for u16x8<S> {
1149 type Output = Self;
1150 #[inline(always)]
1151 fn bitxor(self, rhs: Self) -> Self::Output {
1152 self.simd.xor_u16x8(self, rhs)
1153 }
1154}
1155impl<S: Simd> core::ops::BitXorAssign for u16x8<S> {
1156 #[inline(always)]
1157 fn bitxor_assign(&mut self, rhs: Self) {
1158 *self = self.simd.xor_u16x8(*self, rhs);
1159 }
1160}
1161impl<S: Simd> core::ops::BitXor<u16> for u16x8<S> {
1162 type Output = Self;
1163 #[inline(always)]
1164 fn bitxor(self, rhs: u16) -> Self::Output {
1165 self.simd.xor_u16x8(self, rhs.simd_into(self.simd))
1166 }
1167}
1168impl<S: Simd> core::ops::BitXorAssign<u16> for u16x8<S> {
1169 #[inline(always)]
1170 fn bitxor_assign(&mut self, rhs: u16) {
1171 *self = self.simd.xor_u16x8(*self, rhs.simd_into(self.simd));
1172 }
1173}
1174impl<S: Simd> core::ops::BitXor<u16x8<S>> for u16 {
1175 type Output = u16x8<S>;
1176 #[inline(always)]
1177 fn bitxor(self, rhs: u16x8<S>) -> Self::Output {
1178 rhs.simd.xor_u16x8(self.simd_into(rhs.simd), rhs)
1179 }
1180}
1181impl<S: Simd> core::ops::Shl<u32> for u16x8<S> {
1182 type Output = Self;
1183 #[inline(always)]
1184 fn shl(self, rhs: u32) -> Self::Output {
1185 self.simd.shl_u16x8(self, rhs)
1186 }
1187}
1188impl<S: Simd> core::ops::Shr<u32> for u16x8<S> {
1189 type Output = Self;
1190 #[inline(always)]
1191 fn shr(self, rhs: u32) -> Self::Output {
1192 self.simd.shr_u16x8(self, rhs)
1193 }
1194}
1195impl<S: Simd> core::ops::ShlAssign<u32> for u16x8<S> {
1196 #[inline(always)]
1197 fn shl_assign(&mut self, rhs: u32) {
1198 *self = self.simd.shl_u16x8(*self, rhs);
1199 }
1200}
1201impl<S: Simd> core::ops::ShrAssign<u32> for u16x8<S> {
1202 #[inline(always)]
1203 fn shr_assign(&mut self, rhs: u32) {
1204 *self = self.simd.shr_u16x8(*self, rhs);
1205 }
1206}
1207impl<S: Simd> core::ops::Shr for u16x8<S> {
1208 type Output = Self;
1209 #[inline(always)]
1210 fn shr(self, rhs: Self) -> Self::Output {
1211 self.simd.shrv_u16x8(self, rhs)
1212 }
1213}
1214impl<S: Simd> core::ops::ShrAssign for u16x8<S> {
1215 #[inline(always)]
1216 fn shr_assign(&mut self, rhs: Self) {
1217 *self = self.simd.shrv_u16x8(*self, rhs);
1218 }
1219}
1220impl<S: Simd> core::ops::BitAnd for mask16x8<S> {
1221 type Output = Self;
1222 #[inline(always)]
1223 fn bitand(self, rhs: Self) -> Self::Output {
1224 self.simd.and_mask16x8(self, rhs)
1225 }
1226}
1227impl<S: Simd> core::ops::BitAndAssign for mask16x8<S> {
1228 #[inline(always)]
1229 fn bitand_assign(&mut self, rhs: Self) {
1230 *self = self.simd.and_mask16x8(*self, rhs);
1231 }
1232}
1233impl<S: Simd> core::ops::BitAnd<i16> for mask16x8<S> {
1234 type Output = Self;
1235 #[inline(always)]
1236 fn bitand(self, rhs: i16) -> Self::Output {
1237 self.simd.and_mask16x8(self, rhs.simd_into(self.simd))
1238 }
1239}
1240impl<S: Simd> core::ops::BitAndAssign<i16> for mask16x8<S> {
1241 #[inline(always)]
1242 fn bitand_assign(&mut self, rhs: i16) {
1243 *self = self.simd.and_mask16x8(*self, rhs.simd_into(self.simd));
1244 }
1245}
1246impl<S: Simd> core::ops::BitAnd<mask16x8<S>> for i16 {
1247 type Output = mask16x8<S>;
1248 #[inline(always)]
1249 fn bitand(self, rhs: mask16x8<S>) -> Self::Output {
1250 rhs.simd.and_mask16x8(self.simd_into(rhs.simd), rhs)
1251 }
1252}
1253impl<S: Simd> core::ops::BitOr for mask16x8<S> {
1254 type Output = Self;
1255 #[inline(always)]
1256 fn bitor(self, rhs: Self) -> Self::Output {
1257 self.simd.or_mask16x8(self, rhs)
1258 }
1259}
1260impl<S: Simd> core::ops::BitOrAssign for mask16x8<S> {
1261 #[inline(always)]
1262 fn bitor_assign(&mut self, rhs: Self) {
1263 *self = self.simd.or_mask16x8(*self, rhs);
1264 }
1265}
1266impl<S: Simd> core::ops::BitOr<i16> for mask16x8<S> {
1267 type Output = Self;
1268 #[inline(always)]
1269 fn bitor(self, rhs: i16) -> Self::Output {
1270 self.simd.or_mask16x8(self, rhs.simd_into(self.simd))
1271 }
1272}
1273impl<S: Simd> core::ops::BitOrAssign<i16> for mask16x8<S> {
1274 #[inline(always)]
1275 fn bitor_assign(&mut self, rhs: i16) {
1276 *self = self.simd.or_mask16x8(*self, rhs.simd_into(self.simd));
1277 }
1278}
1279impl<S: Simd> core::ops::BitOr<mask16x8<S>> for i16 {
1280 type Output = mask16x8<S>;
1281 #[inline(always)]
1282 fn bitor(self, rhs: mask16x8<S>) -> Self::Output {
1283 rhs.simd.or_mask16x8(self.simd_into(rhs.simd), rhs)
1284 }
1285}
1286impl<S: Simd> core::ops::BitXor for mask16x8<S> {
1287 type Output = Self;
1288 #[inline(always)]
1289 fn bitxor(self, rhs: Self) -> Self::Output {
1290 self.simd.xor_mask16x8(self, rhs)
1291 }
1292}
1293impl<S: Simd> core::ops::BitXorAssign for mask16x8<S> {
1294 #[inline(always)]
1295 fn bitxor_assign(&mut self, rhs: Self) {
1296 *self = self.simd.xor_mask16x8(*self, rhs);
1297 }
1298}
1299impl<S: Simd> core::ops::BitXor<i16> for mask16x8<S> {
1300 type Output = Self;
1301 #[inline(always)]
1302 fn bitxor(self, rhs: i16) -> Self::Output {
1303 self.simd.xor_mask16x8(self, rhs.simd_into(self.simd))
1304 }
1305}
1306impl<S: Simd> core::ops::BitXorAssign<i16> for mask16x8<S> {
1307 #[inline(always)]
1308 fn bitxor_assign(&mut self, rhs: i16) {
1309 *self = self.simd.xor_mask16x8(*self, rhs.simd_into(self.simd));
1310 }
1311}
1312impl<S: Simd> core::ops::BitXor<mask16x8<S>> for i16 {
1313 type Output = mask16x8<S>;
1314 #[inline(always)]
1315 fn bitxor(self, rhs: mask16x8<S>) -> Self::Output {
1316 rhs.simd.xor_mask16x8(self.simd_into(rhs.simd), rhs)
1317 }
1318}
1319impl<S: Simd> core::ops::Not for mask16x8<S> {
1320 type Output = Self;
1321 #[inline(always)]
1322 fn not(self) -> Self::Output {
1323 self.simd.not_mask16x8(self)
1324 }
1325}
1326impl<S: Simd> core::ops::Neg for i32x4<S> {
1327 type Output = Self;
1328 #[inline(always)]
1329 fn neg(self) -> Self::Output {
1330 self.simd.neg_i32x4(self)
1331 }
1332}
1333impl<S: Simd> core::ops::Add for i32x4<S> {
1334 type Output = Self;
1335 #[inline(always)]
1336 fn add(self, rhs: Self) -> Self::Output {
1337 self.simd.add_i32x4(self, rhs)
1338 }
1339}
1340impl<S: Simd> core::ops::AddAssign for i32x4<S> {
1341 #[inline(always)]
1342 fn add_assign(&mut self, rhs: Self) {
1343 *self = self.simd.add_i32x4(*self, rhs);
1344 }
1345}
1346impl<S: Simd> core::ops::Add<i32> for i32x4<S> {
1347 type Output = Self;
1348 #[inline(always)]
1349 fn add(self, rhs: i32) -> Self::Output {
1350 self.simd.add_i32x4(self, rhs.simd_into(self.simd))
1351 }
1352}
1353impl<S: Simd> core::ops::AddAssign<i32> for i32x4<S> {
1354 #[inline(always)]
1355 fn add_assign(&mut self, rhs: i32) {
1356 *self = self.simd.add_i32x4(*self, rhs.simd_into(self.simd));
1357 }
1358}
1359impl<S: Simd> core::ops::Add<i32x4<S>> for i32 {
1360 type Output = i32x4<S>;
1361 #[inline(always)]
1362 fn add(self, rhs: i32x4<S>) -> Self::Output {
1363 rhs.simd.add_i32x4(self.simd_into(rhs.simd), rhs)
1364 }
1365}
1366impl<S: Simd> core::ops::Sub for i32x4<S> {
1367 type Output = Self;
1368 #[inline(always)]
1369 fn sub(self, rhs: Self) -> Self::Output {
1370 self.simd.sub_i32x4(self, rhs)
1371 }
1372}
1373impl<S: Simd> core::ops::SubAssign for i32x4<S> {
1374 #[inline(always)]
1375 fn sub_assign(&mut self, rhs: Self) {
1376 *self = self.simd.sub_i32x4(*self, rhs);
1377 }
1378}
1379impl<S: Simd> core::ops::Sub<i32> for i32x4<S> {
1380 type Output = Self;
1381 #[inline(always)]
1382 fn sub(self, rhs: i32) -> Self::Output {
1383 self.simd.sub_i32x4(self, rhs.simd_into(self.simd))
1384 }
1385}
1386impl<S: Simd> core::ops::SubAssign<i32> for i32x4<S> {
1387 #[inline(always)]
1388 fn sub_assign(&mut self, rhs: i32) {
1389 *self = self.simd.sub_i32x4(*self, rhs.simd_into(self.simd));
1390 }
1391}
1392impl<S: Simd> core::ops::Sub<i32x4<S>> for i32 {
1393 type Output = i32x4<S>;
1394 #[inline(always)]
1395 fn sub(self, rhs: i32x4<S>) -> Self::Output {
1396 rhs.simd.sub_i32x4(self.simd_into(rhs.simd), rhs)
1397 }
1398}
1399impl<S: Simd> core::ops::Mul for i32x4<S> {
1400 type Output = Self;
1401 #[inline(always)]
1402 fn mul(self, rhs: Self) -> Self::Output {
1403 self.simd.mul_i32x4(self, rhs)
1404 }
1405}
1406impl<S: Simd> core::ops::MulAssign for i32x4<S> {
1407 #[inline(always)]
1408 fn mul_assign(&mut self, rhs: Self) {
1409 *self = self.simd.mul_i32x4(*self, rhs);
1410 }
1411}
1412impl<S: Simd> core::ops::Mul<i32> for i32x4<S> {
1413 type Output = Self;
1414 #[inline(always)]
1415 fn mul(self, rhs: i32) -> Self::Output {
1416 self.simd.mul_i32x4(self, rhs.simd_into(self.simd))
1417 }
1418}
1419impl<S: Simd> core::ops::MulAssign<i32> for i32x4<S> {
1420 #[inline(always)]
1421 fn mul_assign(&mut self, rhs: i32) {
1422 *self = self.simd.mul_i32x4(*self, rhs.simd_into(self.simd));
1423 }
1424}
1425impl<S: Simd> core::ops::Mul<i32x4<S>> for i32 {
1426 type Output = i32x4<S>;
1427 #[inline(always)]
1428 fn mul(self, rhs: i32x4<S>) -> Self::Output {
1429 rhs.simd.mul_i32x4(self.simd_into(rhs.simd), rhs)
1430 }
1431}
1432impl<S: Simd> core::ops::BitAnd for i32x4<S> {
1433 type Output = Self;
1434 #[inline(always)]
1435 fn bitand(self, rhs: Self) -> Self::Output {
1436 self.simd.and_i32x4(self, rhs)
1437 }
1438}
1439impl<S: Simd> core::ops::BitAndAssign for i32x4<S> {
1440 #[inline(always)]
1441 fn bitand_assign(&mut self, rhs: Self) {
1442 *self = self.simd.and_i32x4(*self, rhs);
1443 }
1444}
1445impl<S: Simd> core::ops::BitAnd<i32> for i32x4<S> {
1446 type Output = Self;
1447 #[inline(always)]
1448 fn bitand(self, rhs: i32) -> Self::Output {
1449 self.simd.and_i32x4(self, rhs.simd_into(self.simd))
1450 }
1451}
1452impl<S: Simd> core::ops::BitAndAssign<i32> for i32x4<S> {
1453 #[inline(always)]
1454 fn bitand_assign(&mut self, rhs: i32) {
1455 *self = self.simd.and_i32x4(*self, rhs.simd_into(self.simd));
1456 }
1457}
1458impl<S: Simd> core::ops::BitAnd<i32x4<S>> for i32 {
1459 type Output = i32x4<S>;
1460 #[inline(always)]
1461 fn bitand(self, rhs: i32x4<S>) -> Self::Output {
1462 rhs.simd.and_i32x4(self.simd_into(rhs.simd), rhs)
1463 }
1464}
1465impl<S: Simd> core::ops::BitOr for i32x4<S> {
1466 type Output = Self;
1467 #[inline(always)]
1468 fn bitor(self, rhs: Self) -> Self::Output {
1469 self.simd.or_i32x4(self, rhs)
1470 }
1471}
1472impl<S: Simd> core::ops::BitOrAssign for i32x4<S> {
1473 #[inline(always)]
1474 fn bitor_assign(&mut self, rhs: Self) {
1475 *self = self.simd.or_i32x4(*self, rhs);
1476 }
1477}
1478impl<S: Simd> core::ops::BitOr<i32> for i32x4<S> {
1479 type Output = Self;
1480 #[inline(always)]
1481 fn bitor(self, rhs: i32) -> Self::Output {
1482 self.simd.or_i32x4(self, rhs.simd_into(self.simd))
1483 }
1484}
1485impl<S: Simd> core::ops::BitOrAssign<i32> for i32x4<S> {
1486 #[inline(always)]
1487 fn bitor_assign(&mut self, rhs: i32) {
1488 *self = self.simd.or_i32x4(*self, rhs.simd_into(self.simd));
1489 }
1490}
1491impl<S: Simd> core::ops::BitOr<i32x4<S>> for i32 {
1492 type Output = i32x4<S>;
1493 #[inline(always)]
1494 fn bitor(self, rhs: i32x4<S>) -> Self::Output {
1495 rhs.simd.or_i32x4(self.simd_into(rhs.simd), rhs)
1496 }
1497}
1498impl<S: Simd> core::ops::BitXor for i32x4<S> {
1499 type Output = Self;
1500 #[inline(always)]
1501 fn bitxor(self, rhs: Self) -> Self::Output {
1502 self.simd.xor_i32x4(self, rhs)
1503 }
1504}
1505impl<S: Simd> core::ops::BitXorAssign for i32x4<S> {
1506 #[inline(always)]
1507 fn bitxor_assign(&mut self, rhs: Self) {
1508 *self = self.simd.xor_i32x4(*self, rhs);
1509 }
1510}
1511impl<S: Simd> core::ops::BitXor<i32> for i32x4<S> {
1512 type Output = Self;
1513 #[inline(always)]
1514 fn bitxor(self, rhs: i32) -> Self::Output {
1515 self.simd.xor_i32x4(self, rhs.simd_into(self.simd))
1516 }
1517}
1518impl<S: Simd> core::ops::BitXorAssign<i32> for i32x4<S> {
1519 #[inline(always)]
1520 fn bitxor_assign(&mut self, rhs: i32) {
1521 *self = self.simd.xor_i32x4(*self, rhs.simd_into(self.simd));
1522 }
1523}
1524impl<S: Simd> core::ops::BitXor<i32x4<S>> for i32 {
1525 type Output = i32x4<S>;
1526 #[inline(always)]
1527 fn bitxor(self, rhs: i32x4<S>) -> Self::Output {
1528 rhs.simd.xor_i32x4(self.simd_into(rhs.simd), rhs)
1529 }
1530}
1531impl<S: Simd> core::ops::Shl<u32> for i32x4<S> {
1532 type Output = Self;
1533 #[inline(always)]
1534 fn shl(self, rhs: u32) -> Self::Output {
1535 self.simd.shl_i32x4(self, rhs)
1536 }
1537}
1538impl<S: Simd> core::ops::Shr<u32> for i32x4<S> {
1539 type Output = Self;
1540 #[inline(always)]
1541 fn shr(self, rhs: u32) -> Self::Output {
1542 self.simd.shr_i32x4(self, rhs)
1543 }
1544}
1545impl<S: Simd> core::ops::ShlAssign<u32> for i32x4<S> {
1546 #[inline(always)]
1547 fn shl_assign(&mut self, rhs: u32) {
1548 *self = self.simd.shl_i32x4(*self, rhs);
1549 }
1550}
1551impl<S: Simd> core::ops::ShrAssign<u32> for i32x4<S> {
1552 #[inline(always)]
1553 fn shr_assign(&mut self, rhs: u32) {
1554 *self = self.simd.shr_i32x4(*self, rhs);
1555 }
1556}
1557impl<S: Simd> core::ops::Shr for i32x4<S> {
1558 type Output = Self;
1559 #[inline(always)]
1560 fn shr(self, rhs: Self) -> Self::Output {
1561 self.simd.shrv_i32x4(self, rhs)
1562 }
1563}
1564impl<S: Simd> core::ops::ShrAssign for i32x4<S> {
1565 #[inline(always)]
1566 fn shr_assign(&mut self, rhs: Self) {
1567 *self = self.simd.shrv_i32x4(*self, rhs);
1568 }
1569}
1570impl<S: Simd> core::ops::Add for u32x4<S> {
1571 type Output = Self;
1572 #[inline(always)]
1573 fn add(self, rhs: Self) -> Self::Output {
1574 self.simd.add_u32x4(self, rhs)
1575 }
1576}
1577impl<S: Simd> core::ops::AddAssign for u32x4<S> {
1578 #[inline(always)]
1579 fn add_assign(&mut self, rhs: Self) {
1580 *self = self.simd.add_u32x4(*self, rhs);
1581 }
1582}
1583impl<S: Simd> core::ops::Add<u32> for u32x4<S> {
1584 type Output = Self;
1585 #[inline(always)]
1586 fn add(self, rhs: u32) -> Self::Output {
1587 self.simd.add_u32x4(self, rhs.simd_into(self.simd))
1588 }
1589}
1590impl<S: Simd> core::ops::AddAssign<u32> for u32x4<S> {
1591 #[inline(always)]
1592 fn add_assign(&mut self, rhs: u32) {
1593 *self = self.simd.add_u32x4(*self, rhs.simd_into(self.simd));
1594 }
1595}
1596impl<S: Simd> core::ops::Add<u32x4<S>> for u32 {
1597 type Output = u32x4<S>;
1598 #[inline(always)]
1599 fn add(self, rhs: u32x4<S>) -> Self::Output {
1600 rhs.simd.add_u32x4(self.simd_into(rhs.simd), rhs)
1601 }
1602}
1603impl<S: Simd> core::ops::Sub for u32x4<S> {
1604 type Output = Self;
1605 #[inline(always)]
1606 fn sub(self, rhs: Self) -> Self::Output {
1607 self.simd.sub_u32x4(self, rhs)
1608 }
1609}
1610impl<S: Simd> core::ops::SubAssign for u32x4<S> {
1611 #[inline(always)]
1612 fn sub_assign(&mut self, rhs: Self) {
1613 *self = self.simd.sub_u32x4(*self, rhs);
1614 }
1615}
1616impl<S: Simd> core::ops::Sub<u32> for u32x4<S> {
1617 type Output = Self;
1618 #[inline(always)]
1619 fn sub(self, rhs: u32) -> Self::Output {
1620 self.simd.sub_u32x4(self, rhs.simd_into(self.simd))
1621 }
1622}
1623impl<S: Simd> core::ops::SubAssign<u32> for u32x4<S> {
1624 #[inline(always)]
1625 fn sub_assign(&mut self, rhs: u32) {
1626 *self = self.simd.sub_u32x4(*self, rhs.simd_into(self.simd));
1627 }
1628}
1629impl<S: Simd> core::ops::Sub<u32x4<S>> for u32 {
1630 type Output = u32x4<S>;
1631 #[inline(always)]
1632 fn sub(self, rhs: u32x4<S>) -> Self::Output {
1633 rhs.simd.sub_u32x4(self.simd_into(rhs.simd), rhs)
1634 }
1635}
1636impl<S: Simd> core::ops::Mul for u32x4<S> {
1637 type Output = Self;
1638 #[inline(always)]
1639 fn mul(self, rhs: Self) -> Self::Output {
1640 self.simd.mul_u32x4(self, rhs)
1641 }
1642}
1643impl<S: Simd> core::ops::MulAssign for u32x4<S> {
1644 #[inline(always)]
1645 fn mul_assign(&mut self, rhs: Self) {
1646 *self = self.simd.mul_u32x4(*self, rhs);
1647 }
1648}
1649impl<S: Simd> core::ops::Mul<u32> for u32x4<S> {
1650 type Output = Self;
1651 #[inline(always)]
1652 fn mul(self, rhs: u32) -> Self::Output {
1653 self.simd.mul_u32x4(self, rhs.simd_into(self.simd))
1654 }
1655}
1656impl<S: Simd> core::ops::MulAssign<u32> for u32x4<S> {
1657 #[inline(always)]
1658 fn mul_assign(&mut self, rhs: u32) {
1659 *self = self.simd.mul_u32x4(*self, rhs.simd_into(self.simd));
1660 }
1661}
1662impl<S: Simd> core::ops::Mul<u32x4<S>> for u32 {
1663 type Output = u32x4<S>;
1664 #[inline(always)]
1665 fn mul(self, rhs: u32x4<S>) -> Self::Output {
1666 rhs.simd.mul_u32x4(self.simd_into(rhs.simd), rhs)
1667 }
1668}
1669impl<S: Simd> core::ops::BitAnd for u32x4<S> {
1670 type Output = Self;
1671 #[inline(always)]
1672 fn bitand(self, rhs: Self) -> Self::Output {
1673 self.simd.and_u32x4(self, rhs)
1674 }
1675}
1676impl<S: Simd> core::ops::BitAndAssign for u32x4<S> {
1677 #[inline(always)]
1678 fn bitand_assign(&mut self, rhs: Self) {
1679 *self = self.simd.and_u32x4(*self, rhs);
1680 }
1681}
1682impl<S: Simd> core::ops::BitAnd<u32> for u32x4<S> {
1683 type Output = Self;
1684 #[inline(always)]
1685 fn bitand(self, rhs: u32) -> Self::Output {
1686 self.simd.and_u32x4(self, rhs.simd_into(self.simd))
1687 }
1688}
1689impl<S: Simd> core::ops::BitAndAssign<u32> for u32x4<S> {
1690 #[inline(always)]
1691 fn bitand_assign(&mut self, rhs: u32) {
1692 *self = self.simd.and_u32x4(*self, rhs.simd_into(self.simd));
1693 }
1694}
1695impl<S: Simd> core::ops::BitAnd<u32x4<S>> for u32 {
1696 type Output = u32x4<S>;
1697 #[inline(always)]
1698 fn bitand(self, rhs: u32x4<S>) -> Self::Output {
1699 rhs.simd.and_u32x4(self.simd_into(rhs.simd), rhs)
1700 }
1701}
1702impl<S: Simd> core::ops::BitOr for u32x4<S> {
1703 type Output = Self;
1704 #[inline(always)]
1705 fn bitor(self, rhs: Self) -> Self::Output {
1706 self.simd.or_u32x4(self, rhs)
1707 }
1708}
1709impl<S: Simd> core::ops::BitOrAssign for u32x4<S> {
1710 #[inline(always)]
1711 fn bitor_assign(&mut self, rhs: Self) {
1712 *self = self.simd.or_u32x4(*self, rhs);
1713 }
1714}
1715impl<S: Simd> core::ops::BitOr<u32> for u32x4<S> {
1716 type Output = Self;
1717 #[inline(always)]
1718 fn bitor(self, rhs: u32) -> Self::Output {
1719 self.simd.or_u32x4(self, rhs.simd_into(self.simd))
1720 }
1721}
1722impl<S: Simd> core::ops::BitOrAssign<u32> for u32x4<S> {
1723 #[inline(always)]
1724 fn bitor_assign(&mut self, rhs: u32) {
1725 *self = self.simd.or_u32x4(*self, rhs.simd_into(self.simd));
1726 }
1727}
1728impl<S: Simd> core::ops::BitOr<u32x4<S>> for u32 {
1729 type Output = u32x4<S>;
1730 #[inline(always)]
1731 fn bitor(self, rhs: u32x4<S>) -> Self::Output {
1732 rhs.simd.or_u32x4(self.simd_into(rhs.simd), rhs)
1733 }
1734}
1735impl<S: Simd> core::ops::BitXor for u32x4<S> {
1736 type Output = Self;
1737 #[inline(always)]
1738 fn bitxor(self, rhs: Self) -> Self::Output {
1739 self.simd.xor_u32x4(self, rhs)
1740 }
1741}
1742impl<S: Simd> core::ops::BitXorAssign for u32x4<S> {
1743 #[inline(always)]
1744 fn bitxor_assign(&mut self, rhs: Self) {
1745 *self = self.simd.xor_u32x4(*self, rhs);
1746 }
1747}
1748impl<S: Simd> core::ops::BitXor<u32> for u32x4<S> {
1749 type Output = Self;
1750 #[inline(always)]
1751 fn bitxor(self, rhs: u32) -> Self::Output {
1752 self.simd.xor_u32x4(self, rhs.simd_into(self.simd))
1753 }
1754}
1755impl<S: Simd> core::ops::BitXorAssign<u32> for u32x4<S> {
1756 #[inline(always)]
1757 fn bitxor_assign(&mut self, rhs: u32) {
1758 *self = self.simd.xor_u32x4(*self, rhs.simd_into(self.simd));
1759 }
1760}
1761impl<S: Simd> core::ops::BitXor<u32x4<S>> for u32 {
1762 type Output = u32x4<S>;
1763 #[inline(always)]
1764 fn bitxor(self, rhs: u32x4<S>) -> Self::Output {
1765 rhs.simd.xor_u32x4(self.simd_into(rhs.simd), rhs)
1766 }
1767}
1768impl<S: Simd> core::ops::Shl<u32> for u32x4<S> {
1769 type Output = Self;
1770 #[inline(always)]
1771 fn shl(self, rhs: u32) -> Self::Output {
1772 self.simd.shl_u32x4(self, rhs)
1773 }
1774}
1775impl<S: Simd> core::ops::Shr<u32> for u32x4<S> {
1776 type Output = Self;
1777 #[inline(always)]
1778 fn shr(self, rhs: u32) -> Self::Output {
1779 self.simd.shr_u32x4(self, rhs)
1780 }
1781}
1782impl<S: Simd> core::ops::ShlAssign<u32> for u32x4<S> {
1783 #[inline(always)]
1784 fn shl_assign(&mut self, rhs: u32) {
1785 *self = self.simd.shl_u32x4(*self, rhs);
1786 }
1787}
1788impl<S: Simd> core::ops::ShrAssign<u32> for u32x4<S> {
1789 #[inline(always)]
1790 fn shr_assign(&mut self, rhs: u32) {
1791 *self = self.simd.shr_u32x4(*self, rhs);
1792 }
1793}
1794impl<S: Simd> core::ops::Shr for u32x4<S> {
1795 type Output = Self;
1796 #[inline(always)]
1797 fn shr(self, rhs: Self) -> Self::Output {
1798 self.simd.shrv_u32x4(self, rhs)
1799 }
1800}
1801impl<S: Simd> core::ops::ShrAssign for u32x4<S> {
1802 #[inline(always)]
1803 fn shr_assign(&mut self, rhs: Self) {
1804 *self = self.simd.shrv_u32x4(*self, rhs);
1805 }
1806}
1807impl<S: Simd> core::ops::BitAnd for mask32x4<S> {
1808 type Output = Self;
1809 #[inline(always)]
1810 fn bitand(self, rhs: Self) -> Self::Output {
1811 self.simd.and_mask32x4(self, rhs)
1812 }
1813}
1814impl<S: Simd> core::ops::BitAndAssign for mask32x4<S> {
1815 #[inline(always)]
1816 fn bitand_assign(&mut self, rhs: Self) {
1817 *self = self.simd.and_mask32x4(*self, rhs);
1818 }
1819}
1820impl<S: Simd> core::ops::BitAnd<i32> for mask32x4<S> {
1821 type Output = Self;
1822 #[inline(always)]
1823 fn bitand(self, rhs: i32) -> Self::Output {
1824 self.simd.and_mask32x4(self, rhs.simd_into(self.simd))
1825 }
1826}
1827impl<S: Simd> core::ops::BitAndAssign<i32> for mask32x4<S> {
1828 #[inline(always)]
1829 fn bitand_assign(&mut self, rhs: i32) {
1830 *self = self.simd.and_mask32x4(*self, rhs.simd_into(self.simd));
1831 }
1832}
1833impl<S: Simd> core::ops::BitAnd<mask32x4<S>> for i32 {
1834 type Output = mask32x4<S>;
1835 #[inline(always)]
1836 fn bitand(self, rhs: mask32x4<S>) -> Self::Output {
1837 rhs.simd.and_mask32x4(self.simd_into(rhs.simd), rhs)
1838 }
1839}
1840impl<S: Simd> core::ops::BitOr for mask32x4<S> {
1841 type Output = Self;
1842 #[inline(always)]
1843 fn bitor(self, rhs: Self) -> Self::Output {
1844 self.simd.or_mask32x4(self, rhs)
1845 }
1846}
1847impl<S: Simd> core::ops::BitOrAssign for mask32x4<S> {
1848 #[inline(always)]
1849 fn bitor_assign(&mut self, rhs: Self) {
1850 *self = self.simd.or_mask32x4(*self, rhs);
1851 }
1852}
1853impl<S: Simd> core::ops::BitOr<i32> for mask32x4<S> {
1854 type Output = Self;
1855 #[inline(always)]
1856 fn bitor(self, rhs: i32) -> Self::Output {
1857 self.simd.or_mask32x4(self, rhs.simd_into(self.simd))
1858 }
1859}
1860impl<S: Simd> core::ops::BitOrAssign<i32> for mask32x4<S> {
1861 #[inline(always)]
1862 fn bitor_assign(&mut self, rhs: i32) {
1863 *self = self.simd.or_mask32x4(*self, rhs.simd_into(self.simd));
1864 }
1865}
1866impl<S: Simd> core::ops::BitOr<mask32x4<S>> for i32 {
1867 type Output = mask32x4<S>;
1868 #[inline(always)]
1869 fn bitor(self, rhs: mask32x4<S>) -> Self::Output {
1870 rhs.simd.or_mask32x4(self.simd_into(rhs.simd), rhs)
1871 }
1872}
1873impl<S: Simd> core::ops::BitXor for mask32x4<S> {
1874 type Output = Self;
1875 #[inline(always)]
1876 fn bitxor(self, rhs: Self) -> Self::Output {
1877 self.simd.xor_mask32x4(self, rhs)
1878 }
1879}
1880impl<S: Simd> core::ops::BitXorAssign for mask32x4<S> {
1881 #[inline(always)]
1882 fn bitxor_assign(&mut self, rhs: Self) {
1883 *self = self.simd.xor_mask32x4(*self, rhs);
1884 }
1885}
1886impl<S: Simd> core::ops::BitXor<i32> for mask32x4<S> {
1887 type Output = Self;
1888 #[inline(always)]
1889 fn bitxor(self, rhs: i32) -> Self::Output {
1890 self.simd.xor_mask32x4(self, rhs.simd_into(self.simd))
1891 }
1892}
1893impl<S: Simd> core::ops::BitXorAssign<i32> for mask32x4<S> {
1894 #[inline(always)]
1895 fn bitxor_assign(&mut self, rhs: i32) {
1896 *self = self.simd.xor_mask32x4(*self, rhs.simd_into(self.simd));
1897 }
1898}
1899impl<S: Simd> core::ops::BitXor<mask32x4<S>> for i32 {
1900 type Output = mask32x4<S>;
1901 #[inline(always)]
1902 fn bitxor(self, rhs: mask32x4<S>) -> Self::Output {
1903 rhs.simd.xor_mask32x4(self.simd_into(rhs.simd), rhs)
1904 }
1905}
1906impl<S: Simd> core::ops::Not for mask32x4<S> {
1907 type Output = Self;
1908 #[inline(always)]
1909 fn not(self) -> Self::Output {
1910 self.simd.not_mask32x4(self)
1911 }
1912}
1913impl<S: Simd> core::ops::Neg for f64x2<S> {
1914 type Output = Self;
1915 #[inline(always)]
1916 fn neg(self) -> Self::Output {
1917 self.simd.neg_f64x2(self)
1918 }
1919}
1920impl<S: Simd> core::ops::Add for f64x2<S> {
1921 type Output = Self;
1922 #[inline(always)]
1923 fn add(self, rhs: Self) -> Self::Output {
1924 self.simd.add_f64x2(self, rhs)
1925 }
1926}
1927impl<S: Simd> core::ops::AddAssign for f64x2<S> {
1928 #[inline(always)]
1929 fn add_assign(&mut self, rhs: Self) {
1930 *self = self.simd.add_f64x2(*self, rhs);
1931 }
1932}
1933impl<S: Simd> core::ops::Add<f64> for f64x2<S> {
1934 type Output = Self;
1935 #[inline(always)]
1936 fn add(self, rhs: f64) -> Self::Output {
1937 self.simd.add_f64x2(self, rhs.simd_into(self.simd))
1938 }
1939}
1940impl<S: Simd> core::ops::AddAssign<f64> for f64x2<S> {
1941 #[inline(always)]
1942 fn add_assign(&mut self, rhs: f64) {
1943 *self = self.simd.add_f64x2(*self, rhs.simd_into(self.simd));
1944 }
1945}
1946impl<S: Simd> core::ops::Add<f64x2<S>> for f64 {
1947 type Output = f64x2<S>;
1948 #[inline(always)]
1949 fn add(self, rhs: f64x2<S>) -> Self::Output {
1950 rhs.simd.add_f64x2(self.simd_into(rhs.simd), rhs)
1951 }
1952}
1953impl<S: Simd> core::ops::Sub for f64x2<S> {
1954 type Output = Self;
1955 #[inline(always)]
1956 fn sub(self, rhs: Self) -> Self::Output {
1957 self.simd.sub_f64x2(self, rhs)
1958 }
1959}
1960impl<S: Simd> core::ops::SubAssign for f64x2<S> {
1961 #[inline(always)]
1962 fn sub_assign(&mut self, rhs: Self) {
1963 *self = self.simd.sub_f64x2(*self, rhs);
1964 }
1965}
1966impl<S: Simd> core::ops::Sub<f64> for f64x2<S> {
1967 type Output = Self;
1968 #[inline(always)]
1969 fn sub(self, rhs: f64) -> Self::Output {
1970 self.simd.sub_f64x2(self, rhs.simd_into(self.simd))
1971 }
1972}
1973impl<S: Simd> core::ops::SubAssign<f64> for f64x2<S> {
1974 #[inline(always)]
1975 fn sub_assign(&mut self, rhs: f64) {
1976 *self = self.simd.sub_f64x2(*self, rhs.simd_into(self.simd));
1977 }
1978}
1979impl<S: Simd> core::ops::Sub<f64x2<S>> for f64 {
1980 type Output = f64x2<S>;
1981 #[inline(always)]
1982 fn sub(self, rhs: f64x2<S>) -> Self::Output {
1983 rhs.simd.sub_f64x2(self.simd_into(rhs.simd), rhs)
1984 }
1985}
1986impl<S: Simd> core::ops::Mul for f64x2<S> {
1987 type Output = Self;
1988 #[inline(always)]
1989 fn mul(self, rhs: Self) -> Self::Output {
1990 self.simd.mul_f64x2(self, rhs)
1991 }
1992}
1993impl<S: Simd> core::ops::MulAssign for f64x2<S> {
1994 #[inline(always)]
1995 fn mul_assign(&mut self, rhs: Self) {
1996 *self = self.simd.mul_f64x2(*self, rhs);
1997 }
1998}
1999impl<S: Simd> core::ops::Mul<f64> for f64x2<S> {
2000 type Output = Self;
2001 #[inline(always)]
2002 fn mul(self, rhs: f64) -> Self::Output {
2003 self.simd.mul_f64x2(self, rhs.simd_into(self.simd))
2004 }
2005}
2006impl<S: Simd> core::ops::MulAssign<f64> for f64x2<S> {
2007 #[inline(always)]
2008 fn mul_assign(&mut self, rhs: f64) {
2009 *self = self.simd.mul_f64x2(*self, rhs.simd_into(self.simd));
2010 }
2011}
2012impl<S: Simd> core::ops::Mul<f64x2<S>> for f64 {
2013 type Output = f64x2<S>;
2014 #[inline(always)]
2015 fn mul(self, rhs: f64x2<S>) -> Self::Output {
2016 rhs.simd.mul_f64x2(self.simd_into(rhs.simd), rhs)
2017 }
2018}
2019impl<S: Simd> core::ops::Div for f64x2<S> {
2020 type Output = Self;
2021 #[inline(always)]
2022 fn div(self, rhs: Self) -> Self::Output {
2023 self.simd.div_f64x2(self, rhs)
2024 }
2025}
2026impl<S: Simd> core::ops::DivAssign for f64x2<S> {
2027 #[inline(always)]
2028 fn div_assign(&mut self, rhs: Self) {
2029 *self = self.simd.div_f64x2(*self, rhs);
2030 }
2031}
2032impl<S: Simd> core::ops::Div<f64> for f64x2<S> {
2033 type Output = Self;
2034 #[inline(always)]
2035 fn div(self, rhs: f64) -> Self::Output {
2036 self.simd.div_f64x2(self, rhs.simd_into(self.simd))
2037 }
2038}
2039impl<S: Simd> core::ops::DivAssign<f64> for f64x2<S> {
2040 #[inline(always)]
2041 fn div_assign(&mut self, rhs: f64) {
2042 *self = self.simd.div_f64x2(*self, rhs.simd_into(self.simd));
2043 }
2044}
2045impl<S: Simd> core::ops::Div<f64x2<S>> for f64 {
2046 type Output = f64x2<S>;
2047 #[inline(always)]
2048 fn div(self, rhs: f64x2<S>) -> Self::Output {
2049 rhs.simd.div_f64x2(self.simd_into(rhs.simd), rhs)
2050 }
2051}
2052impl<S: Simd> core::ops::BitAnd for mask64x2<S> {
2053 type Output = Self;
2054 #[inline(always)]
2055 fn bitand(self, rhs: Self) -> Self::Output {
2056 self.simd.and_mask64x2(self, rhs)
2057 }
2058}
2059impl<S: Simd> core::ops::BitAndAssign for mask64x2<S> {
2060 #[inline(always)]
2061 fn bitand_assign(&mut self, rhs: Self) {
2062 *self = self.simd.and_mask64x2(*self, rhs);
2063 }
2064}
2065impl<S: Simd> core::ops::BitAnd<i64> for mask64x2<S> {
2066 type Output = Self;
2067 #[inline(always)]
2068 fn bitand(self, rhs: i64) -> Self::Output {
2069 self.simd.and_mask64x2(self, rhs.simd_into(self.simd))
2070 }
2071}
2072impl<S: Simd> core::ops::BitAndAssign<i64> for mask64x2<S> {
2073 #[inline(always)]
2074 fn bitand_assign(&mut self, rhs: i64) {
2075 *self = self.simd.and_mask64x2(*self, rhs.simd_into(self.simd));
2076 }
2077}
2078impl<S: Simd> core::ops::BitAnd<mask64x2<S>> for i64 {
2079 type Output = mask64x2<S>;
2080 #[inline(always)]
2081 fn bitand(self, rhs: mask64x2<S>) -> Self::Output {
2082 rhs.simd.and_mask64x2(self.simd_into(rhs.simd), rhs)
2083 }
2084}
2085impl<S: Simd> core::ops::BitOr for mask64x2<S> {
2086 type Output = Self;
2087 #[inline(always)]
2088 fn bitor(self, rhs: Self) -> Self::Output {
2089 self.simd.or_mask64x2(self, rhs)
2090 }
2091}
2092impl<S: Simd> core::ops::BitOrAssign for mask64x2<S> {
2093 #[inline(always)]
2094 fn bitor_assign(&mut self, rhs: Self) {
2095 *self = self.simd.or_mask64x2(*self, rhs);
2096 }
2097}
2098impl<S: Simd> core::ops::BitOr<i64> for mask64x2<S> {
2099 type Output = Self;
2100 #[inline(always)]
2101 fn bitor(self, rhs: i64) -> Self::Output {
2102 self.simd.or_mask64x2(self, rhs.simd_into(self.simd))
2103 }
2104}
2105impl<S: Simd> core::ops::BitOrAssign<i64> for mask64x2<S> {
2106 #[inline(always)]
2107 fn bitor_assign(&mut self, rhs: i64) {
2108 *self = self.simd.or_mask64x2(*self, rhs.simd_into(self.simd));
2109 }
2110}
2111impl<S: Simd> core::ops::BitOr<mask64x2<S>> for i64 {
2112 type Output = mask64x2<S>;
2113 #[inline(always)]
2114 fn bitor(self, rhs: mask64x2<S>) -> Self::Output {
2115 rhs.simd.or_mask64x2(self.simd_into(rhs.simd), rhs)
2116 }
2117}
2118impl<S: Simd> core::ops::BitXor for mask64x2<S> {
2119 type Output = Self;
2120 #[inline(always)]
2121 fn bitxor(self, rhs: Self) -> Self::Output {
2122 self.simd.xor_mask64x2(self, rhs)
2123 }
2124}
2125impl<S: Simd> core::ops::BitXorAssign for mask64x2<S> {
2126 #[inline(always)]
2127 fn bitxor_assign(&mut self, rhs: Self) {
2128 *self = self.simd.xor_mask64x2(*self, rhs);
2129 }
2130}
2131impl<S: Simd> core::ops::BitXor<i64> for mask64x2<S> {
2132 type Output = Self;
2133 #[inline(always)]
2134 fn bitxor(self, rhs: i64) -> Self::Output {
2135 self.simd.xor_mask64x2(self, rhs.simd_into(self.simd))
2136 }
2137}
2138impl<S: Simd> core::ops::BitXorAssign<i64> for mask64x2<S> {
2139 #[inline(always)]
2140 fn bitxor_assign(&mut self, rhs: i64) {
2141 *self = self.simd.xor_mask64x2(*self, rhs.simd_into(self.simd));
2142 }
2143}
2144impl<S: Simd> core::ops::BitXor<mask64x2<S>> for i64 {
2145 type Output = mask64x2<S>;
2146 #[inline(always)]
2147 fn bitxor(self, rhs: mask64x2<S>) -> Self::Output {
2148 rhs.simd.xor_mask64x2(self.simd_into(rhs.simd), rhs)
2149 }
2150}
2151impl<S: Simd> core::ops::Not for mask64x2<S> {
2152 type Output = Self;
2153 #[inline(always)]
2154 fn not(self) -> Self::Output {
2155 self.simd.not_mask64x2(self)
2156 }
2157}
2158impl<S: Simd> core::ops::Neg for f32x8<S> {
2159 type Output = Self;
2160 #[inline(always)]
2161 fn neg(self) -> Self::Output {
2162 self.simd.neg_f32x8(self)
2163 }
2164}
2165impl<S: Simd> core::ops::Add for f32x8<S> {
2166 type Output = Self;
2167 #[inline(always)]
2168 fn add(self, rhs: Self) -> Self::Output {
2169 self.simd.add_f32x8(self, rhs)
2170 }
2171}
2172impl<S: Simd> core::ops::AddAssign for f32x8<S> {
2173 #[inline(always)]
2174 fn add_assign(&mut self, rhs: Self) {
2175 *self = self.simd.add_f32x8(*self, rhs);
2176 }
2177}
2178impl<S: Simd> core::ops::Add<f32> for f32x8<S> {
2179 type Output = Self;
2180 #[inline(always)]
2181 fn add(self, rhs: f32) -> Self::Output {
2182 self.simd.add_f32x8(self, rhs.simd_into(self.simd))
2183 }
2184}
2185impl<S: Simd> core::ops::AddAssign<f32> for f32x8<S> {
2186 #[inline(always)]
2187 fn add_assign(&mut self, rhs: f32) {
2188 *self = self.simd.add_f32x8(*self, rhs.simd_into(self.simd));
2189 }
2190}
2191impl<S: Simd> core::ops::Add<f32x8<S>> for f32 {
2192 type Output = f32x8<S>;
2193 #[inline(always)]
2194 fn add(self, rhs: f32x8<S>) -> Self::Output {
2195 rhs.simd.add_f32x8(self.simd_into(rhs.simd), rhs)
2196 }
2197}
2198impl<S: Simd> core::ops::Sub for f32x8<S> {
2199 type Output = Self;
2200 #[inline(always)]
2201 fn sub(self, rhs: Self) -> Self::Output {
2202 self.simd.sub_f32x8(self, rhs)
2203 }
2204}
2205impl<S: Simd> core::ops::SubAssign for f32x8<S> {
2206 #[inline(always)]
2207 fn sub_assign(&mut self, rhs: Self) {
2208 *self = self.simd.sub_f32x8(*self, rhs);
2209 }
2210}
2211impl<S: Simd> core::ops::Sub<f32> for f32x8<S> {
2212 type Output = Self;
2213 #[inline(always)]
2214 fn sub(self, rhs: f32) -> Self::Output {
2215 self.simd.sub_f32x8(self, rhs.simd_into(self.simd))
2216 }
2217}
2218impl<S: Simd> core::ops::SubAssign<f32> for f32x8<S> {
2219 #[inline(always)]
2220 fn sub_assign(&mut self, rhs: f32) {
2221 *self = self.simd.sub_f32x8(*self, rhs.simd_into(self.simd));
2222 }
2223}
2224impl<S: Simd> core::ops::Sub<f32x8<S>> for f32 {
2225 type Output = f32x8<S>;
2226 #[inline(always)]
2227 fn sub(self, rhs: f32x8<S>) -> Self::Output {
2228 rhs.simd.sub_f32x8(self.simd_into(rhs.simd), rhs)
2229 }
2230}
2231impl<S: Simd> core::ops::Mul for f32x8<S> {
2232 type Output = Self;
2233 #[inline(always)]
2234 fn mul(self, rhs: Self) -> Self::Output {
2235 self.simd.mul_f32x8(self, rhs)
2236 }
2237}
2238impl<S: Simd> core::ops::MulAssign for f32x8<S> {
2239 #[inline(always)]
2240 fn mul_assign(&mut self, rhs: Self) {
2241 *self = self.simd.mul_f32x8(*self, rhs);
2242 }
2243}
2244impl<S: Simd> core::ops::Mul<f32> for f32x8<S> {
2245 type Output = Self;
2246 #[inline(always)]
2247 fn mul(self, rhs: f32) -> Self::Output {
2248 self.simd.mul_f32x8(self, rhs.simd_into(self.simd))
2249 }
2250}
2251impl<S: Simd> core::ops::MulAssign<f32> for f32x8<S> {
2252 #[inline(always)]
2253 fn mul_assign(&mut self, rhs: f32) {
2254 *self = self.simd.mul_f32x8(*self, rhs.simd_into(self.simd));
2255 }
2256}
2257impl<S: Simd> core::ops::Mul<f32x8<S>> for f32 {
2258 type Output = f32x8<S>;
2259 #[inline(always)]
2260 fn mul(self, rhs: f32x8<S>) -> Self::Output {
2261 rhs.simd.mul_f32x8(self.simd_into(rhs.simd), rhs)
2262 }
2263}
2264impl<S: Simd> core::ops::Div for f32x8<S> {
2265 type Output = Self;
2266 #[inline(always)]
2267 fn div(self, rhs: Self) -> Self::Output {
2268 self.simd.div_f32x8(self, rhs)
2269 }
2270}
2271impl<S: Simd> core::ops::DivAssign for f32x8<S> {
2272 #[inline(always)]
2273 fn div_assign(&mut self, rhs: Self) {
2274 *self = self.simd.div_f32x8(*self, rhs);
2275 }
2276}
2277impl<S: Simd> core::ops::Div<f32> for f32x8<S> {
2278 type Output = Self;
2279 #[inline(always)]
2280 fn div(self, rhs: f32) -> Self::Output {
2281 self.simd.div_f32x8(self, rhs.simd_into(self.simd))
2282 }
2283}
2284impl<S: Simd> core::ops::DivAssign<f32> for f32x8<S> {
2285 #[inline(always)]
2286 fn div_assign(&mut self, rhs: f32) {
2287 *self = self.simd.div_f32x8(*self, rhs.simd_into(self.simd));
2288 }
2289}
2290impl<S: Simd> core::ops::Div<f32x8<S>> for f32 {
2291 type Output = f32x8<S>;
2292 #[inline(always)]
2293 fn div(self, rhs: f32x8<S>) -> Self::Output {
2294 rhs.simd.div_f32x8(self.simd_into(rhs.simd), rhs)
2295 }
2296}
2297impl<S: Simd> core::ops::Neg for i8x32<S> {
2298 type Output = Self;
2299 #[inline(always)]
2300 fn neg(self) -> Self::Output {
2301 self.simd.neg_i8x32(self)
2302 }
2303}
2304impl<S: Simd> core::ops::Add for i8x32<S> {
2305 type Output = Self;
2306 #[inline(always)]
2307 fn add(self, rhs: Self) -> Self::Output {
2308 self.simd.add_i8x32(self, rhs)
2309 }
2310}
2311impl<S: Simd> core::ops::AddAssign for i8x32<S> {
2312 #[inline(always)]
2313 fn add_assign(&mut self, rhs: Self) {
2314 *self = self.simd.add_i8x32(*self, rhs);
2315 }
2316}
2317impl<S: Simd> core::ops::Add<i8> for i8x32<S> {
2318 type Output = Self;
2319 #[inline(always)]
2320 fn add(self, rhs: i8) -> Self::Output {
2321 self.simd.add_i8x32(self, rhs.simd_into(self.simd))
2322 }
2323}
2324impl<S: Simd> core::ops::AddAssign<i8> for i8x32<S> {
2325 #[inline(always)]
2326 fn add_assign(&mut self, rhs: i8) {
2327 *self = self.simd.add_i8x32(*self, rhs.simd_into(self.simd));
2328 }
2329}
2330impl<S: Simd> core::ops::Add<i8x32<S>> for i8 {
2331 type Output = i8x32<S>;
2332 #[inline(always)]
2333 fn add(self, rhs: i8x32<S>) -> Self::Output {
2334 rhs.simd.add_i8x32(self.simd_into(rhs.simd), rhs)
2335 }
2336}
2337impl<S: Simd> core::ops::Sub for i8x32<S> {
2338 type Output = Self;
2339 #[inline(always)]
2340 fn sub(self, rhs: Self) -> Self::Output {
2341 self.simd.sub_i8x32(self, rhs)
2342 }
2343}
2344impl<S: Simd> core::ops::SubAssign for i8x32<S> {
2345 #[inline(always)]
2346 fn sub_assign(&mut self, rhs: Self) {
2347 *self = self.simd.sub_i8x32(*self, rhs);
2348 }
2349}
2350impl<S: Simd> core::ops::Sub<i8> for i8x32<S> {
2351 type Output = Self;
2352 #[inline(always)]
2353 fn sub(self, rhs: i8) -> Self::Output {
2354 self.simd.sub_i8x32(self, rhs.simd_into(self.simd))
2355 }
2356}
2357impl<S: Simd> core::ops::SubAssign<i8> for i8x32<S> {
2358 #[inline(always)]
2359 fn sub_assign(&mut self, rhs: i8) {
2360 *self = self.simd.sub_i8x32(*self, rhs.simd_into(self.simd));
2361 }
2362}
2363impl<S: Simd> core::ops::Sub<i8x32<S>> for i8 {
2364 type Output = i8x32<S>;
2365 #[inline(always)]
2366 fn sub(self, rhs: i8x32<S>) -> Self::Output {
2367 rhs.simd.sub_i8x32(self.simd_into(rhs.simd), rhs)
2368 }
2369}
2370impl<S: Simd> core::ops::Mul for i8x32<S> {
2371 type Output = Self;
2372 #[inline(always)]
2373 fn mul(self, rhs: Self) -> Self::Output {
2374 self.simd.mul_i8x32(self, rhs)
2375 }
2376}
2377impl<S: Simd> core::ops::MulAssign for i8x32<S> {
2378 #[inline(always)]
2379 fn mul_assign(&mut self, rhs: Self) {
2380 *self = self.simd.mul_i8x32(*self, rhs);
2381 }
2382}
2383impl<S: Simd> core::ops::Mul<i8> for i8x32<S> {
2384 type Output = Self;
2385 #[inline(always)]
2386 fn mul(self, rhs: i8) -> Self::Output {
2387 self.simd.mul_i8x32(self, rhs.simd_into(self.simd))
2388 }
2389}
2390impl<S: Simd> core::ops::MulAssign<i8> for i8x32<S> {
2391 #[inline(always)]
2392 fn mul_assign(&mut self, rhs: i8) {
2393 *self = self.simd.mul_i8x32(*self, rhs.simd_into(self.simd));
2394 }
2395}
2396impl<S: Simd> core::ops::Mul<i8x32<S>> for i8 {
2397 type Output = i8x32<S>;
2398 #[inline(always)]
2399 fn mul(self, rhs: i8x32<S>) -> Self::Output {
2400 rhs.simd.mul_i8x32(self.simd_into(rhs.simd), rhs)
2401 }
2402}
2403impl<S: Simd> core::ops::BitAnd for i8x32<S> {
2404 type Output = Self;
2405 #[inline(always)]
2406 fn bitand(self, rhs: Self) -> Self::Output {
2407 self.simd.and_i8x32(self, rhs)
2408 }
2409}
2410impl<S: Simd> core::ops::BitAndAssign for i8x32<S> {
2411 #[inline(always)]
2412 fn bitand_assign(&mut self, rhs: Self) {
2413 *self = self.simd.and_i8x32(*self, rhs);
2414 }
2415}
2416impl<S: Simd> core::ops::BitAnd<i8> for i8x32<S> {
2417 type Output = Self;
2418 #[inline(always)]
2419 fn bitand(self, rhs: i8) -> Self::Output {
2420 self.simd.and_i8x32(self, rhs.simd_into(self.simd))
2421 }
2422}
2423impl<S: Simd> core::ops::BitAndAssign<i8> for i8x32<S> {
2424 #[inline(always)]
2425 fn bitand_assign(&mut self, rhs: i8) {
2426 *self = self.simd.and_i8x32(*self, rhs.simd_into(self.simd));
2427 }
2428}
2429impl<S: Simd> core::ops::BitAnd<i8x32<S>> for i8 {
2430 type Output = i8x32<S>;
2431 #[inline(always)]
2432 fn bitand(self, rhs: i8x32<S>) -> Self::Output {
2433 rhs.simd.and_i8x32(self.simd_into(rhs.simd), rhs)
2434 }
2435}
2436impl<S: Simd> core::ops::BitOr for i8x32<S> {
2437 type Output = Self;
2438 #[inline(always)]
2439 fn bitor(self, rhs: Self) -> Self::Output {
2440 self.simd.or_i8x32(self, rhs)
2441 }
2442}
2443impl<S: Simd> core::ops::BitOrAssign for i8x32<S> {
2444 #[inline(always)]
2445 fn bitor_assign(&mut self, rhs: Self) {
2446 *self = self.simd.or_i8x32(*self, rhs);
2447 }
2448}
2449impl<S: Simd> core::ops::BitOr<i8> for i8x32<S> {
2450 type Output = Self;
2451 #[inline(always)]
2452 fn bitor(self, rhs: i8) -> Self::Output {
2453 self.simd.or_i8x32(self, rhs.simd_into(self.simd))
2454 }
2455}
2456impl<S: Simd> core::ops::BitOrAssign<i8> for i8x32<S> {
2457 #[inline(always)]
2458 fn bitor_assign(&mut self, rhs: i8) {
2459 *self = self.simd.or_i8x32(*self, rhs.simd_into(self.simd));
2460 }
2461}
2462impl<S: Simd> core::ops::BitOr<i8x32<S>> for i8 {
2463 type Output = i8x32<S>;
2464 #[inline(always)]
2465 fn bitor(self, rhs: i8x32<S>) -> Self::Output {
2466 rhs.simd.or_i8x32(self.simd_into(rhs.simd), rhs)
2467 }
2468}
2469impl<S: Simd> core::ops::BitXor for i8x32<S> {
2470 type Output = Self;
2471 #[inline(always)]
2472 fn bitxor(self, rhs: Self) -> Self::Output {
2473 self.simd.xor_i8x32(self, rhs)
2474 }
2475}
2476impl<S: Simd> core::ops::BitXorAssign for i8x32<S> {
2477 #[inline(always)]
2478 fn bitxor_assign(&mut self, rhs: Self) {
2479 *self = self.simd.xor_i8x32(*self, rhs);
2480 }
2481}
2482impl<S: Simd> core::ops::BitXor<i8> for i8x32<S> {
2483 type Output = Self;
2484 #[inline(always)]
2485 fn bitxor(self, rhs: i8) -> Self::Output {
2486 self.simd.xor_i8x32(self, rhs.simd_into(self.simd))
2487 }
2488}
2489impl<S: Simd> core::ops::BitXorAssign<i8> for i8x32<S> {
2490 #[inline(always)]
2491 fn bitxor_assign(&mut self, rhs: i8) {
2492 *self = self.simd.xor_i8x32(*self, rhs.simd_into(self.simd));
2493 }
2494}
2495impl<S: Simd> core::ops::BitXor<i8x32<S>> for i8 {
2496 type Output = i8x32<S>;
2497 #[inline(always)]
2498 fn bitxor(self, rhs: i8x32<S>) -> Self::Output {
2499 rhs.simd.xor_i8x32(self.simd_into(rhs.simd), rhs)
2500 }
2501}
2502impl<S: Simd> core::ops::Shl<u32> for i8x32<S> {
2503 type Output = Self;
2504 #[inline(always)]
2505 fn shl(self, rhs: u32) -> Self::Output {
2506 self.simd.shl_i8x32(self, rhs)
2507 }
2508}
2509impl<S: Simd> core::ops::Shr<u32> for i8x32<S> {
2510 type Output = Self;
2511 #[inline(always)]
2512 fn shr(self, rhs: u32) -> Self::Output {
2513 self.simd.shr_i8x32(self, rhs)
2514 }
2515}
2516impl<S: Simd> core::ops::ShlAssign<u32> for i8x32<S> {
2517 #[inline(always)]
2518 fn shl_assign(&mut self, rhs: u32) {
2519 *self = self.simd.shl_i8x32(*self, rhs);
2520 }
2521}
2522impl<S: Simd> core::ops::ShrAssign<u32> for i8x32<S> {
2523 #[inline(always)]
2524 fn shr_assign(&mut self, rhs: u32) {
2525 *self = self.simd.shr_i8x32(*self, rhs);
2526 }
2527}
2528impl<S: Simd> core::ops::Shr for i8x32<S> {
2529 type Output = Self;
2530 #[inline(always)]
2531 fn shr(self, rhs: Self) -> Self::Output {
2532 self.simd.shrv_i8x32(self, rhs)
2533 }
2534}
2535impl<S: Simd> core::ops::ShrAssign for i8x32<S> {
2536 #[inline(always)]
2537 fn shr_assign(&mut self, rhs: Self) {
2538 *self = self.simd.shrv_i8x32(*self, rhs);
2539 }
2540}
2541impl<S: Simd> core::ops::Add for u8x32<S> {
2542 type Output = Self;
2543 #[inline(always)]
2544 fn add(self, rhs: Self) -> Self::Output {
2545 self.simd.add_u8x32(self, rhs)
2546 }
2547}
2548impl<S: Simd> core::ops::AddAssign for u8x32<S> {
2549 #[inline(always)]
2550 fn add_assign(&mut self, rhs: Self) {
2551 *self = self.simd.add_u8x32(*self, rhs);
2552 }
2553}
2554impl<S: Simd> core::ops::Add<u8> for u8x32<S> {
2555 type Output = Self;
2556 #[inline(always)]
2557 fn add(self, rhs: u8) -> Self::Output {
2558 self.simd.add_u8x32(self, rhs.simd_into(self.simd))
2559 }
2560}
2561impl<S: Simd> core::ops::AddAssign<u8> for u8x32<S> {
2562 #[inline(always)]
2563 fn add_assign(&mut self, rhs: u8) {
2564 *self = self.simd.add_u8x32(*self, rhs.simd_into(self.simd));
2565 }
2566}
2567impl<S: Simd> core::ops::Add<u8x32<S>> for u8 {
2568 type Output = u8x32<S>;
2569 #[inline(always)]
2570 fn add(self, rhs: u8x32<S>) -> Self::Output {
2571 rhs.simd.add_u8x32(self.simd_into(rhs.simd), rhs)
2572 }
2573}
2574impl<S: Simd> core::ops::Sub for u8x32<S> {
2575 type Output = Self;
2576 #[inline(always)]
2577 fn sub(self, rhs: Self) -> Self::Output {
2578 self.simd.sub_u8x32(self, rhs)
2579 }
2580}
2581impl<S: Simd> core::ops::SubAssign for u8x32<S> {
2582 #[inline(always)]
2583 fn sub_assign(&mut self, rhs: Self) {
2584 *self = self.simd.sub_u8x32(*self, rhs);
2585 }
2586}
2587impl<S: Simd> core::ops::Sub<u8> for u8x32<S> {
2588 type Output = Self;
2589 #[inline(always)]
2590 fn sub(self, rhs: u8) -> Self::Output {
2591 self.simd.sub_u8x32(self, rhs.simd_into(self.simd))
2592 }
2593}
2594impl<S: Simd> core::ops::SubAssign<u8> for u8x32<S> {
2595 #[inline(always)]
2596 fn sub_assign(&mut self, rhs: u8) {
2597 *self = self.simd.sub_u8x32(*self, rhs.simd_into(self.simd));
2598 }
2599}
2600impl<S: Simd> core::ops::Sub<u8x32<S>> for u8 {
2601 type Output = u8x32<S>;
2602 #[inline(always)]
2603 fn sub(self, rhs: u8x32<S>) -> Self::Output {
2604 rhs.simd.sub_u8x32(self.simd_into(rhs.simd), rhs)
2605 }
2606}
2607impl<S: Simd> core::ops::Mul for u8x32<S> {
2608 type Output = Self;
2609 #[inline(always)]
2610 fn mul(self, rhs: Self) -> Self::Output {
2611 self.simd.mul_u8x32(self, rhs)
2612 }
2613}
2614impl<S: Simd> core::ops::MulAssign for u8x32<S> {
2615 #[inline(always)]
2616 fn mul_assign(&mut self, rhs: Self) {
2617 *self = self.simd.mul_u8x32(*self, rhs);
2618 }
2619}
2620impl<S: Simd> core::ops::Mul<u8> for u8x32<S> {
2621 type Output = Self;
2622 #[inline(always)]
2623 fn mul(self, rhs: u8) -> Self::Output {
2624 self.simd.mul_u8x32(self, rhs.simd_into(self.simd))
2625 }
2626}
2627impl<S: Simd> core::ops::MulAssign<u8> for u8x32<S> {
2628 #[inline(always)]
2629 fn mul_assign(&mut self, rhs: u8) {
2630 *self = self.simd.mul_u8x32(*self, rhs.simd_into(self.simd));
2631 }
2632}
2633impl<S: Simd> core::ops::Mul<u8x32<S>> for u8 {
2634 type Output = u8x32<S>;
2635 #[inline(always)]
2636 fn mul(self, rhs: u8x32<S>) -> Self::Output {
2637 rhs.simd.mul_u8x32(self.simd_into(rhs.simd), rhs)
2638 }
2639}
2640impl<S: Simd> core::ops::BitAnd for u8x32<S> {
2641 type Output = Self;
2642 #[inline(always)]
2643 fn bitand(self, rhs: Self) -> Self::Output {
2644 self.simd.and_u8x32(self, rhs)
2645 }
2646}
2647impl<S: Simd> core::ops::BitAndAssign for u8x32<S> {
2648 #[inline(always)]
2649 fn bitand_assign(&mut self, rhs: Self) {
2650 *self = self.simd.and_u8x32(*self, rhs);
2651 }
2652}
2653impl<S: Simd> core::ops::BitAnd<u8> for u8x32<S> {
2654 type Output = Self;
2655 #[inline(always)]
2656 fn bitand(self, rhs: u8) -> Self::Output {
2657 self.simd.and_u8x32(self, rhs.simd_into(self.simd))
2658 }
2659}
2660impl<S: Simd> core::ops::BitAndAssign<u8> for u8x32<S> {
2661 #[inline(always)]
2662 fn bitand_assign(&mut self, rhs: u8) {
2663 *self = self.simd.and_u8x32(*self, rhs.simd_into(self.simd));
2664 }
2665}
2666impl<S: Simd> core::ops::BitAnd<u8x32<S>> for u8 {
2667 type Output = u8x32<S>;
2668 #[inline(always)]
2669 fn bitand(self, rhs: u8x32<S>) -> Self::Output {
2670 rhs.simd.and_u8x32(self.simd_into(rhs.simd), rhs)
2671 }
2672}
2673impl<S: Simd> core::ops::BitOr for u8x32<S> {
2674 type Output = Self;
2675 #[inline(always)]
2676 fn bitor(self, rhs: Self) -> Self::Output {
2677 self.simd.or_u8x32(self, rhs)
2678 }
2679}
2680impl<S: Simd> core::ops::BitOrAssign for u8x32<S> {
2681 #[inline(always)]
2682 fn bitor_assign(&mut self, rhs: Self) {
2683 *self = self.simd.or_u8x32(*self, rhs);
2684 }
2685}
2686impl<S: Simd> core::ops::BitOr<u8> for u8x32<S> {
2687 type Output = Self;
2688 #[inline(always)]
2689 fn bitor(self, rhs: u8) -> Self::Output {
2690 self.simd.or_u8x32(self, rhs.simd_into(self.simd))
2691 }
2692}
2693impl<S: Simd> core::ops::BitOrAssign<u8> for u8x32<S> {
2694 #[inline(always)]
2695 fn bitor_assign(&mut self, rhs: u8) {
2696 *self = self.simd.or_u8x32(*self, rhs.simd_into(self.simd));
2697 }
2698}
2699impl<S: Simd> core::ops::BitOr<u8x32<S>> for u8 {
2700 type Output = u8x32<S>;
2701 #[inline(always)]
2702 fn bitor(self, rhs: u8x32<S>) -> Self::Output {
2703 rhs.simd.or_u8x32(self.simd_into(rhs.simd), rhs)
2704 }
2705}
2706impl<S: Simd> core::ops::BitXor for u8x32<S> {
2707 type Output = Self;
2708 #[inline(always)]
2709 fn bitxor(self, rhs: Self) -> Self::Output {
2710 self.simd.xor_u8x32(self, rhs)
2711 }
2712}
2713impl<S: Simd> core::ops::BitXorAssign for u8x32<S> {
2714 #[inline(always)]
2715 fn bitxor_assign(&mut self, rhs: Self) {
2716 *self = self.simd.xor_u8x32(*self, rhs);
2717 }
2718}
2719impl<S: Simd> core::ops::BitXor<u8> for u8x32<S> {
2720 type Output = Self;
2721 #[inline(always)]
2722 fn bitxor(self, rhs: u8) -> Self::Output {
2723 self.simd.xor_u8x32(self, rhs.simd_into(self.simd))
2724 }
2725}
2726impl<S: Simd> core::ops::BitXorAssign<u8> for u8x32<S> {
2727 #[inline(always)]
2728 fn bitxor_assign(&mut self, rhs: u8) {
2729 *self = self.simd.xor_u8x32(*self, rhs.simd_into(self.simd));
2730 }
2731}
2732impl<S: Simd> core::ops::BitXor<u8x32<S>> for u8 {
2733 type Output = u8x32<S>;
2734 #[inline(always)]
2735 fn bitxor(self, rhs: u8x32<S>) -> Self::Output {
2736 rhs.simd.xor_u8x32(self.simd_into(rhs.simd), rhs)
2737 }
2738}
2739impl<S: Simd> core::ops::Shl<u32> for u8x32<S> {
2740 type Output = Self;
2741 #[inline(always)]
2742 fn shl(self, rhs: u32) -> Self::Output {
2743 self.simd.shl_u8x32(self, rhs)
2744 }
2745}
2746impl<S: Simd> core::ops::Shr<u32> for u8x32<S> {
2747 type Output = Self;
2748 #[inline(always)]
2749 fn shr(self, rhs: u32) -> Self::Output {
2750 self.simd.shr_u8x32(self, rhs)
2751 }
2752}
2753impl<S: Simd> core::ops::ShlAssign<u32> for u8x32<S> {
2754 #[inline(always)]
2755 fn shl_assign(&mut self, rhs: u32) {
2756 *self = self.simd.shl_u8x32(*self, rhs);
2757 }
2758}
2759impl<S: Simd> core::ops::ShrAssign<u32> for u8x32<S> {
2760 #[inline(always)]
2761 fn shr_assign(&mut self, rhs: u32) {
2762 *self = self.simd.shr_u8x32(*self, rhs);
2763 }
2764}
2765impl<S: Simd> core::ops::Shr for u8x32<S> {
2766 type Output = Self;
2767 #[inline(always)]
2768 fn shr(self, rhs: Self) -> Self::Output {
2769 self.simd.shrv_u8x32(self, rhs)
2770 }
2771}
2772impl<S: Simd> core::ops::ShrAssign for u8x32<S> {
2773 #[inline(always)]
2774 fn shr_assign(&mut self, rhs: Self) {
2775 *self = self.simd.shrv_u8x32(*self, rhs);
2776 }
2777}
2778impl<S: Simd> core::ops::BitAnd for mask8x32<S> {
2779 type Output = Self;
2780 #[inline(always)]
2781 fn bitand(self, rhs: Self) -> Self::Output {
2782 self.simd.and_mask8x32(self, rhs)
2783 }
2784}
2785impl<S: Simd> core::ops::BitAndAssign for mask8x32<S> {
2786 #[inline(always)]
2787 fn bitand_assign(&mut self, rhs: Self) {
2788 *self = self.simd.and_mask8x32(*self, rhs);
2789 }
2790}
2791impl<S: Simd> core::ops::BitAnd<i8> for mask8x32<S> {
2792 type Output = Self;
2793 #[inline(always)]
2794 fn bitand(self, rhs: i8) -> Self::Output {
2795 self.simd.and_mask8x32(self, rhs.simd_into(self.simd))
2796 }
2797}
2798impl<S: Simd> core::ops::BitAndAssign<i8> for mask8x32<S> {
2799 #[inline(always)]
2800 fn bitand_assign(&mut self, rhs: i8) {
2801 *self = self.simd.and_mask8x32(*self, rhs.simd_into(self.simd));
2802 }
2803}
2804impl<S: Simd> core::ops::BitAnd<mask8x32<S>> for i8 {
2805 type Output = mask8x32<S>;
2806 #[inline(always)]
2807 fn bitand(self, rhs: mask8x32<S>) -> Self::Output {
2808 rhs.simd.and_mask8x32(self.simd_into(rhs.simd), rhs)
2809 }
2810}
2811impl<S: Simd> core::ops::BitOr for mask8x32<S> {
2812 type Output = Self;
2813 #[inline(always)]
2814 fn bitor(self, rhs: Self) -> Self::Output {
2815 self.simd.or_mask8x32(self, rhs)
2816 }
2817}
2818impl<S: Simd> core::ops::BitOrAssign for mask8x32<S> {
2819 #[inline(always)]
2820 fn bitor_assign(&mut self, rhs: Self) {
2821 *self = self.simd.or_mask8x32(*self, rhs);
2822 }
2823}
2824impl<S: Simd> core::ops::BitOr<i8> for mask8x32<S> {
2825 type Output = Self;
2826 #[inline(always)]
2827 fn bitor(self, rhs: i8) -> Self::Output {
2828 self.simd.or_mask8x32(self, rhs.simd_into(self.simd))
2829 }
2830}
2831impl<S: Simd> core::ops::BitOrAssign<i8> for mask8x32<S> {
2832 #[inline(always)]
2833 fn bitor_assign(&mut self, rhs: i8) {
2834 *self = self.simd.or_mask8x32(*self, rhs.simd_into(self.simd));
2835 }
2836}
2837impl<S: Simd> core::ops::BitOr<mask8x32<S>> for i8 {
2838 type Output = mask8x32<S>;
2839 #[inline(always)]
2840 fn bitor(self, rhs: mask8x32<S>) -> Self::Output {
2841 rhs.simd.or_mask8x32(self.simd_into(rhs.simd), rhs)
2842 }
2843}
2844impl<S: Simd> core::ops::BitXor for mask8x32<S> {
2845 type Output = Self;
2846 #[inline(always)]
2847 fn bitxor(self, rhs: Self) -> Self::Output {
2848 self.simd.xor_mask8x32(self, rhs)
2849 }
2850}
2851impl<S: Simd> core::ops::BitXorAssign for mask8x32<S> {
2852 #[inline(always)]
2853 fn bitxor_assign(&mut self, rhs: Self) {
2854 *self = self.simd.xor_mask8x32(*self, rhs);
2855 }
2856}
2857impl<S: Simd> core::ops::BitXor<i8> for mask8x32<S> {
2858 type Output = Self;
2859 #[inline(always)]
2860 fn bitxor(self, rhs: i8) -> Self::Output {
2861 self.simd.xor_mask8x32(self, rhs.simd_into(self.simd))
2862 }
2863}
2864impl<S: Simd> core::ops::BitXorAssign<i8> for mask8x32<S> {
2865 #[inline(always)]
2866 fn bitxor_assign(&mut self, rhs: i8) {
2867 *self = self.simd.xor_mask8x32(*self, rhs.simd_into(self.simd));
2868 }
2869}
2870impl<S: Simd> core::ops::BitXor<mask8x32<S>> for i8 {
2871 type Output = mask8x32<S>;
2872 #[inline(always)]
2873 fn bitxor(self, rhs: mask8x32<S>) -> Self::Output {
2874 rhs.simd.xor_mask8x32(self.simd_into(rhs.simd), rhs)
2875 }
2876}
2877impl<S: Simd> core::ops::Not for mask8x32<S> {
2878 type Output = Self;
2879 #[inline(always)]
2880 fn not(self) -> Self::Output {
2881 self.simd.not_mask8x32(self)
2882 }
2883}
2884impl<S: Simd> core::ops::Neg for i16x16<S> {
2885 type Output = Self;
2886 #[inline(always)]
2887 fn neg(self) -> Self::Output {
2888 self.simd.neg_i16x16(self)
2889 }
2890}
2891impl<S: Simd> core::ops::Add for i16x16<S> {
2892 type Output = Self;
2893 #[inline(always)]
2894 fn add(self, rhs: Self) -> Self::Output {
2895 self.simd.add_i16x16(self, rhs)
2896 }
2897}
2898impl<S: Simd> core::ops::AddAssign for i16x16<S> {
2899 #[inline(always)]
2900 fn add_assign(&mut self, rhs: Self) {
2901 *self = self.simd.add_i16x16(*self, rhs);
2902 }
2903}
2904impl<S: Simd> core::ops::Add<i16> for i16x16<S> {
2905 type Output = Self;
2906 #[inline(always)]
2907 fn add(self, rhs: i16) -> Self::Output {
2908 self.simd.add_i16x16(self, rhs.simd_into(self.simd))
2909 }
2910}
2911impl<S: Simd> core::ops::AddAssign<i16> for i16x16<S> {
2912 #[inline(always)]
2913 fn add_assign(&mut self, rhs: i16) {
2914 *self = self.simd.add_i16x16(*self, rhs.simd_into(self.simd));
2915 }
2916}
2917impl<S: Simd> core::ops::Add<i16x16<S>> for i16 {
2918 type Output = i16x16<S>;
2919 #[inline(always)]
2920 fn add(self, rhs: i16x16<S>) -> Self::Output {
2921 rhs.simd.add_i16x16(self.simd_into(rhs.simd), rhs)
2922 }
2923}
2924impl<S: Simd> core::ops::Sub for i16x16<S> {
2925 type Output = Self;
2926 #[inline(always)]
2927 fn sub(self, rhs: Self) -> Self::Output {
2928 self.simd.sub_i16x16(self, rhs)
2929 }
2930}
2931impl<S: Simd> core::ops::SubAssign for i16x16<S> {
2932 #[inline(always)]
2933 fn sub_assign(&mut self, rhs: Self) {
2934 *self = self.simd.sub_i16x16(*self, rhs);
2935 }
2936}
2937impl<S: Simd> core::ops::Sub<i16> for i16x16<S> {
2938 type Output = Self;
2939 #[inline(always)]
2940 fn sub(self, rhs: i16) -> Self::Output {
2941 self.simd.sub_i16x16(self, rhs.simd_into(self.simd))
2942 }
2943}
2944impl<S: Simd> core::ops::SubAssign<i16> for i16x16<S> {
2945 #[inline(always)]
2946 fn sub_assign(&mut self, rhs: i16) {
2947 *self = self.simd.sub_i16x16(*self, rhs.simd_into(self.simd));
2948 }
2949}
2950impl<S: Simd> core::ops::Sub<i16x16<S>> for i16 {
2951 type Output = i16x16<S>;
2952 #[inline(always)]
2953 fn sub(self, rhs: i16x16<S>) -> Self::Output {
2954 rhs.simd.sub_i16x16(self.simd_into(rhs.simd), rhs)
2955 }
2956}
2957impl<S: Simd> core::ops::Mul for i16x16<S> {
2958 type Output = Self;
2959 #[inline(always)]
2960 fn mul(self, rhs: Self) -> Self::Output {
2961 self.simd.mul_i16x16(self, rhs)
2962 }
2963}
2964impl<S: Simd> core::ops::MulAssign for i16x16<S> {
2965 #[inline(always)]
2966 fn mul_assign(&mut self, rhs: Self) {
2967 *self = self.simd.mul_i16x16(*self, rhs);
2968 }
2969}
2970impl<S: Simd> core::ops::Mul<i16> for i16x16<S> {
2971 type Output = Self;
2972 #[inline(always)]
2973 fn mul(self, rhs: i16) -> Self::Output {
2974 self.simd.mul_i16x16(self, rhs.simd_into(self.simd))
2975 }
2976}
2977impl<S: Simd> core::ops::MulAssign<i16> for i16x16<S> {
2978 #[inline(always)]
2979 fn mul_assign(&mut self, rhs: i16) {
2980 *self = self.simd.mul_i16x16(*self, rhs.simd_into(self.simd));
2981 }
2982}
2983impl<S: Simd> core::ops::Mul<i16x16<S>> for i16 {
2984 type Output = i16x16<S>;
2985 #[inline(always)]
2986 fn mul(self, rhs: i16x16<S>) -> Self::Output {
2987 rhs.simd.mul_i16x16(self.simd_into(rhs.simd), rhs)
2988 }
2989}
2990impl<S: Simd> core::ops::BitAnd for i16x16<S> {
2991 type Output = Self;
2992 #[inline(always)]
2993 fn bitand(self, rhs: Self) -> Self::Output {
2994 self.simd.and_i16x16(self, rhs)
2995 }
2996}
2997impl<S: Simd> core::ops::BitAndAssign for i16x16<S> {
2998 #[inline(always)]
2999 fn bitand_assign(&mut self, rhs: Self) {
3000 *self = self.simd.and_i16x16(*self, rhs);
3001 }
3002}
3003impl<S: Simd> core::ops::BitAnd<i16> for i16x16<S> {
3004 type Output = Self;
3005 #[inline(always)]
3006 fn bitand(self, rhs: i16) -> Self::Output {
3007 self.simd.and_i16x16(self, rhs.simd_into(self.simd))
3008 }
3009}
3010impl<S: Simd> core::ops::BitAndAssign<i16> for i16x16<S> {
3011 #[inline(always)]
3012 fn bitand_assign(&mut self, rhs: i16) {
3013 *self = self.simd.and_i16x16(*self, rhs.simd_into(self.simd));
3014 }
3015}
3016impl<S: Simd> core::ops::BitAnd<i16x16<S>> for i16 {
3017 type Output = i16x16<S>;
3018 #[inline(always)]
3019 fn bitand(self, rhs: i16x16<S>) -> Self::Output {
3020 rhs.simd.and_i16x16(self.simd_into(rhs.simd), rhs)
3021 }
3022}
3023impl<S: Simd> core::ops::BitOr for i16x16<S> {
3024 type Output = Self;
3025 #[inline(always)]
3026 fn bitor(self, rhs: Self) -> Self::Output {
3027 self.simd.or_i16x16(self, rhs)
3028 }
3029}
3030impl<S: Simd> core::ops::BitOrAssign for i16x16<S> {
3031 #[inline(always)]
3032 fn bitor_assign(&mut self, rhs: Self) {
3033 *self = self.simd.or_i16x16(*self, rhs);
3034 }
3035}
3036impl<S: Simd> core::ops::BitOr<i16> for i16x16<S> {
3037 type Output = Self;
3038 #[inline(always)]
3039 fn bitor(self, rhs: i16) -> Self::Output {
3040 self.simd.or_i16x16(self, rhs.simd_into(self.simd))
3041 }
3042}
3043impl<S: Simd> core::ops::BitOrAssign<i16> for i16x16<S> {
3044 #[inline(always)]
3045 fn bitor_assign(&mut self, rhs: i16) {
3046 *self = self.simd.or_i16x16(*self, rhs.simd_into(self.simd));
3047 }
3048}
3049impl<S: Simd> core::ops::BitOr<i16x16<S>> for i16 {
3050 type Output = i16x16<S>;
3051 #[inline(always)]
3052 fn bitor(self, rhs: i16x16<S>) -> Self::Output {
3053 rhs.simd.or_i16x16(self.simd_into(rhs.simd), rhs)
3054 }
3055}
3056impl<S: Simd> core::ops::BitXor for i16x16<S> {
3057 type Output = Self;
3058 #[inline(always)]
3059 fn bitxor(self, rhs: Self) -> Self::Output {
3060 self.simd.xor_i16x16(self, rhs)
3061 }
3062}
3063impl<S: Simd> core::ops::BitXorAssign for i16x16<S> {
3064 #[inline(always)]
3065 fn bitxor_assign(&mut self, rhs: Self) {
3066 *self = self.simd.xor_i16x16(*self, rhs);
3067 }
3068}
3069impl<S: Simd> core::ops::BitXor<i16> for i16x16<S> {
3070 type Output = Self;
3071 #[inline(always)]
3072 fn bitxor(self, rhs: i16) -> Self::Output {
3073 self.simd.xor_i16x16(self, rhs.simd_into(self.simd))
3074 }
3075}
3076impl<S: Simd> core::ops::BitXorAssign<i16> for i16x16<S> {
3077 #[inline(always)]
3078 fn bitxor_assign(&mut self, rhs: i16) {
3079 *self = self.simd.xor_i16x16(*self, rhs.simd_into(self.simd));
3080 }
3081}
3082impl<S: Simd> core::ops::BitXor<i16x16<S>> for i16 {
3083 type Output = i16x16<S>;
3084 #[inline(always)]
3085 fn bitxor(self, rhs: i16x16<S>) -> Self::Output {
3086 rhs.simd.xor_i16x16(self.simd_into(rhs.simd), rhs)
3087 }
3088}
3089impl<S: Simd> core::ops::Shl<u32> for i16x16<S> {
3090 type Output = Self;
3091 #[inline(always)]
3092 fn shl(self, rhs: u32) -> Self::Output {
3093 self.simd.shl_i16x16(self, rhs)
3094 }
3095}
3096impl<S: Simd> core::ops::Shr<u32> for i16x16<S> {
3097 type Output = Self;
3098 #[inline(always)]
3099 fn shr(self, rhs: u32) -> Self::Output {
3100 self.simd.shr_i16x16(self, rhs)
3101 }
3102}
3103impl<S: Simd> core::ops::ShlAssign<u32> for i16x16<S> {
3104 #[inline(always)]
3105 fn shl_assign(&mut self, rhs: u32) {
3106 *self = self.simd.shl_i16x16(*self, rhs);
3107 }
3108}
3109impl<S: Simd> core::ops::ShrAssign<u32> for i16x16<S> {
3110 #[inline(always)]
3111 fn shr_assign(&mut self, rhs: u32) {
3112 *self = self.simd.shr_i16x16(*self, rhs);
3113 }
3114}
3115impl<S: Simd> core::ops::Shr for i16x16<S> {
3116 type Output = Self;
3117 #[inline(always)]
3118 fn shr(self, rhs: Self) -> Self::Output {
3119 self.simd.shrv_i16x16(self, rhs)
3120 }
3121}
3122impl<S: Simd> core::ops::ShrAssign for i16x16<S> {
3123 #[inline(always)]
3124 fn shr_assign(&mut self, rhs: Self) {
3125 *self = self.simd.shrv_i16x16(*self, rhs);
3126 }
3127}
3128impl<S: Simd> core::ops::Add for u16x16<S> {
3129 type Output = Self;
3130 #[inline(always)]
3131 fn add(self, rhs: Self) -> Self::Output {
3132 self.simd.add_u16x16(self, rhs)
3133 }
3134}
3135impl<S: Simd> core::ops::AddAssign for u16x16<S> {
3136 #[inline(always)]
3137 fn add_assign(&mut self, rhs: Self) {
3138 *self = self.simd.add_u16x16(*self, rhs);
3139 }
3140}
3141impl<S: Simd> core::ops::Add<u16> for u16x16<S> {
3142 type Output = Self;
3143 #[inline(always)]
3144 fn add(self, rhs: u16) -> Self::Output {
3145 self.simd.add_u16x16(self, rhs.simd_into(self.simd))
3146 }
3147}
3148impl<S: Simd> core::ops::AddAssign<u16> for u16x16<S> {
3149 #[inline(always)]
3150 fn add_assign(&mut self, rhs: u16) {
3151 *self = self.simd.add_u16x16(*self, rhs.simd_into(self.simd));
3152 }
3153}
3154impl<S: Simd> core::ops::Add<u16x16<S>> for u16 {
3155 type Output = u16x16<S>;
3156 #[inline(always)]
3157 fn add(self, rhs: u16x16<S>) -> Self::Output {
3158 rhs.simd.add_u16x16(self.simd_into(rhs.simd), rhs)
3159 }
3160}
3161impl<S: Simd> core::ops::Sub for u16x16<S> {
3162 type Output = Self;
3163 #[inline(always)]
3164 fn sub(self, rhs: Self) -> Self::Output {
3165 self.simd.sub_u16x16(self, rhs)
3166 }
3167}
3168impl<S: Simd> core::ops::SubAssign for u16x16<S> {
3169 #[inline(always)]
3170 fn sub_assign(&mut self, rhs: Self) {
3171 *self = self.simd.sub_u16x16(*self, rhs);
3172 }
3173}
3174impl<S: Simd> core::ops::Sub<u16> for u16x16<S> {
3175 type Output = Self;
3176 #[inline(always)]
3177 fn sub(self, rhs: u16) -> Self::Output {
3178 self.simd.sub_u16x16(self, rhs.simd_into(self.simd))
3179 }
3180}
3181impl<S: Simd> core::ops::SubAssign<u16> for u16x16<S> {
3182 #[inline(always)]
3183 fn sub_assign(&mut self, rhs: u16) {
3184 *self = self.simd.sub_u16x16(*self, rhs.simd_into(self.simd));
3185 }
3186}
3187impl<S: Simd> core::ops::Sub<u16x16<S>> for u16 {
3188 type Output = u16x16<S>;
3189 #[inline(always)]
3190 fn sub(self, rhs: u16x16<S>) -> Self::Output {
3191 rhs.simd.sub_u16x16(self.simd_into(rhs.simd), rhs)
3192 }
3193}
3194impl<S: Simd> core::ops::Mul for u16x16<S> {
3195 type Output = Self;
3196 #[inline(always)]
3197 fn mul(self, rhs: Self) -> Self::Output {
3198 self.simd.mul_u16x16(self, rhs)
3199 }
3200}
3201impl<S: Simd> core::ops::MulAssign for u16x16<S> {
3202 #[inline(always)]
3203 fn mul_assign(&mut self, rhs: Self) {
3204 *self = self.simd.mul_u16x16(*self, rhs);
3205 }
3206}
3207impl<S: Simd> core::ops::Mul<u16> for u16x16<S> {
3208 type Output = Self;
3209 #[inline(always)]
3210 fn mul(self, rhs: u16) -> Self::Output {
3211 self.simd.mul_u16x16(self, rhs.simd_into(self.simd))
3212 }
3213}
3214impl<S: Simd> core::ops::MulAssign<u16> for u16x16<S> {
3215 #[inline(always)]
3216 fn mul_assign(&mut self, rhs: u16) {
3217 *self = self.simd.mul_u16x16(*self, rhs.simd_into(self.simd));
3218 }
3219}
3220impl<S: Simd> core::ops::Mul<u16x16<S>> for u16 {
3221 type Output = u16x16<S>;
3222 #[inline(always)]
3223 fn mul(self, rhs: u16x16<S>) -> Self::Output {
3224 rhs.simd.mul_u16x16(self.simd_into(rhs.simd), rhs)
3225 }
3226}
3227impl<S: Simd> core::ops::BitAnd for u16x16<S> {
3228 type Output = Self;
3229 #[inline(always)]
3230 fn bitand(self, rhs: Self) -> Self::Output {
3231 self.simd.and_u16x16(self, rhs)
3232 }
3233}
3234impl<S: Simd> core::ops::BitAndAssign for u16x16<S> {
3235 #[inline(always)]
3236 fn bitand_assign(&mut self, rhs: Self) {
3237 *self = self.simd.and_u16x16(*self, rhs);
3238 }
3239}
3240impl<S: Simd> core::ops::BitAnd<u16> for u16x16<S> {
3241 type Output = Self;
3242 #[inline(always)]
3243 fn bitand(self, rhs: u16) -> Self::Output {
3244 self.simd.and_u16x16(self, rhs.simd_into(self.simd))
3245 }
3246}
3247impl<S: Simd> core::ops::BitAndAssign<u16> for u16x16<S> {
3248 #[inline(always)]
3249 fn bitand_assign(&mut self, rhs: u16) {
3250 *self = self.simd.and_u16x16(*self, rhs.simd_into(self.simd));
3251 }
3252}
3253impl<S: Simd> core::ops::BitAnd<u16x16<S>> for u16 {
3254 type Output = u16x16<S>;
3255 #[inline(always)]
3256 fn bitand(self, rhs: u16x16<S>) -> Self::Output {
3257 rhs.simd.and_u16x16(self.simd_into(rhs.simd), rhs)
3258 }
3259}
3260impl<S: Simd> core::ops::BitOr for u16x16<S> {
3261 type Output = Self;
3262 #[inline(always)]
3263 fn bitor(self, rhs: Self) -> Self::Output {
3264 self.simd.or_u16x16(self, rhs)
3265 }
3266}
3267impl<S: Simd> core::ops::BitOrAssign for u16x16<S> {
3268 #[inline(always)]
3269 fn bitor_assign(&mut self, rhs: Self) {
3270 *self = self.simd.or_u16x16(*self, rhs);
3271 }
3272}
3273impl<S: Simd> core::ops::BitOr<u16> for u16x16<S> {
3274 type Output = Self;
3275 #[inline(always)]
3276 fn bitor(self, rhs: u16) -> Self::Output {
3277 self.simd.or_u16x16(self, rhs.simd_into(self.simd))
3278 }
3279}
3280impl<S: Simd> core::ops::BitOrAssign<u16> for u16x16<S> {
3281 #[inline(always)]
3282 fn bitor_assign(&mut self, rhs: u16) {
3283 *self = self.simd.or_u16x16(*self, rhs.simd_into(self.simd));
3284 }
3285}
3286impl<S: Simd> core::ops::BitOr<u16x16<S>> for u16 {
3287 type Output = u16x16<S>;
3288 #[inline(always)]
3289 fn bitor(self, rhs: u16x16<S>) -> Self::Output {
3290 rhs.simd.or_u16x16(self.simd_into(rhs.simd), rhs)
3291 }
3292}
3293impl<S: Simd> core::ops::BitXor for u16x16<S> {
3294 type Output = Self;
3295 #[inline(always)]
3296 fn bitxor(self, rhs: Self) -> Self::Output {
3297 self.simd.xor_u16x16(self, rhs)
3298 }
3299}
3300impl<S: Simd> core::ops::BitXorAssign for u16x16<S> {
3301 #[inline(always)]
3302 fn bitxor_assign(&mut self, rhs: Self) {
3303 *self = self.simd.xor_u16x16(*self, rhs);
3304 }
3305}
3306impl<S: Simd> core::ops::BitXor<u16> for u16x16<S> {
3307 type Output = Self;
3308 #[inline(always)]
3309 fn bitxor(self, rhs: u16) -> Self::Output {
3310 self.simd.xor_u16x16(self, rhs.simd_into(self.simd))
3311 }
3312}
3313impl<S: Simd> core::ops::BitXorAssign<u16> for u16x16<S> {
3314 #[inline(always)]
3315 fn bitxor_assign(&mut self, rhs: u16) {
3316 *self = self.simd.xor_u16x16(*self, rhs.simd_into(self.simd));
3317 }
3318}
3319impl<S: Simd> core::ops::BitXor<u16x16<S>> for u16 {
3320 type Output = u16x16<S>;
3321 #[inline(always)]
3322 fn bitxor(self, rhs: u16x16<S>) -> Self::Output {
3323 rhs.simd.xor_u16x16(self.simd_into(rhs.simd), rhs)
3324 }
3325}
3326impl<S: Simd> core::ops::Shl<u32> for u16x16<S> {
3327 type Output = Self;
3328 #[inline(always)]
3329 fn shl(self, rhs: u32) -> Self::Output {
3330 self.simd.shl_u16x16(self, rhs)
3331 }
3332}
3333impl<S: Simd> core::ops::Shr<u32> for u16x16<S> {
3334 type Output = Self;
3335 #[inline(always)]
3336 fn shr(self, rhs: u32) -> Self::Output {
3337 self.simd.shr_u16x16(self, rhs)
3338 }
3339}
3340impl<S: Simd> core::ops::ShlAssign<u32> for u16x16<S> {
3341 #[inline(always)]
3342 fn shl_assign(&mut self, rhs: u32) {
3343 *self = self.simd.shl_u16x16(*self, rhs);
3344 }
3345}
3346impl<S: Simd> core::ops::ShrAssign<u32> for u16x16<S> {
3347 #[inline(always)]
3348 fn shr_assign(&mut self, rhs: u32) {
3349 *self = self.simd.shr_u16x16(*self, rhs);
3350 }
3351}
3352impl<S: Simd> core::ops::Shr for u16x16<S> {
3353 type Output = Self;
3354 #[inline(always)]
3355 fn shr(self, rhs: Self) -> Self::Output {
3356 self.simd.shrv_u16x16(self, rhs)
3357 }
3358}
3359impl<S: Simd> core::ops::ShrAssign for u16x16<S> {
3360 #[inline(always)]
3361 fn shr_assign(&mut self, rhs: Self) {
3362 *self = self.simd.shrv_u16x16(*self, rhs);
3363 }
3364}
3365impl<S: Simd> core::ops::BitAnd for mask16x16<S> {
3366 type Output = Self;
3367 #[inline(always)]
3368 fn bitand(self, rhs: Self) -> Self::Output {
3369 self.simd.and_mask16x16(self, rhs)
3370 }
3371}
3372impl<S: Simd> core::ops::BitAndAssign for mask16x16<S> {
3373 #[inline(always)]
3374 fn bitand_assign(&mut self, rhs: Self) {
3375 *self = self.simd.and_mask16x16(*self, rhs);
3376 }
3377}
3378impl<S: Simd> core::ops::BitAnd<i16> for mask16x16<S> {
3379 type Output = Self;
3380 #[inline(always)]
3381 fn bitand(self, rhs: i16) -> Self::Output {
3382 self.simd.and_mask16x16(self, rhs.simd_into(self.simd))
3383 }
3384}
3385impl<S: Simd> core::ops::BitAndAssign<i16> for mask16x16<S> {
3386 #[inline(always)]
3387 fn bitand_assign(&mut self, rhs: i16) {
3388 *self = self.simd.and_mask16x16(*self, rhs.simd_into(self.simd));
3389 }
3390}
3391impl<S: Simd> core::ops::BitAnd<mask16x16<S>> for i16 {
3392 type Output = mask16x16<S>;
3393 #[inline(always)]
3394 fn bitand(self, rhs: mask16x16<S>) -> Self::Output {
3395 rhs.simd.and_mask16x16(self.simd_into(rhs.simd), rhs)
3396 }
3397}
3398impl<S: Simd> core::ops::BitOr for mask16x16<S> {
3399 type Output = Self;
3400 #[inline(always)]
3401 fn bitor(self, rhs: Self) -> Self::Output {
3402 self.simd.or_mask16x16(self, rhs)
3403 }
3404}
3405impl<S: Simd> core::ops::BitOrAssign for mask16x16<S> {
3406 #[inline(always)]
3407 fn bitor_assign(&mut self, rhs: Self) {
3408 *self = self.simd.or_mask16x16(*self, rhs);
3409 }
3410}
3411impl<S: Simd> core::ops::BitOr<i16> for mask16x16<S> {
3412 type Output = Self;
3413 #[inline(always)]
3414 fn bitor(self, rhs: i16) -> Self::Output {
3415 self.simd.or_mask16x16(self, rhs.simd_into(self.simd))
3416 }
3417}
3418impl<S: Simd> core::ops::BitOrAssign<i16> for mask16x16<S> {
3419 #[inline(always)]
3420 fn bitor_assign(&mut self, rhs: i16) {
3421 *self = self.simd.or_mask16x16(*self, rhs.simd_into(self.simd));
3422 }
3423}
3424impl<S: Simd> core::ops::BitOr<mask16x16<S>> for i16 {
3425 type Output = mask16x16<S>;
3426 #[inline(always)]
3427 fn bitor(self, rhs: mask16x16<S>) -> Self::Output {
3428 rhs.simd.or_mask16x16(self.simd_into(rhs.simd), rhs)
3429 }
3430}
3431impl<S: Simd> core::ops::BitXor for mask16x16<S> {
3432 type Output = Self;
3433 #[inline(always)]
3434 fn bitxor(self, rhs: Self) -> Self::Output {
3435 self.simd.xor_mask16x16(self, rhs)
3436 }
3437}
3438impl<S: Simd> core::ops::BitXorAssign for mask16x16<S> {
3439 #[inline(always)]
3440 fn bitxor_assign(&mut self, rhs: Self) {
3441 *self = self.simd.xor_mask16x16(*self, rhs);
3442 }
3443}
3444impl<S: Simd> core::ops::BitXor<i16> for mask16x16<S> {
3445 type Output = Self;
3446 #[inline(always)]
3447 fn bitxor(self, rhs: i16) -> Self::Output {
3448 self.simd.xor_mask16x16(self, rhs.simd_into(self.simd))
3449 }
3450}
3451impl<S: Simd> core::ops::BitXorAssign<i16> for mask16x16<S> {
3452 #[inline(always)]
3453 fn bitxor_assign(&mut self, rhs: i16) {
3454 *self = self.simd.xor_mask16x16(*self, rhs.simd_into(self.simd));
3455 }
3456}
3457impl<S: Simd> core::ops::BitXor<mask16x16<S>> for i16 {
3458 type Output = mask16x16<S>;
3459 #[inline(always)]
3460 fn bitxor(self, rhs: mask16x16<S>) -> Self::Output {
3461 rhs.simd.xor_mask16x16(self.simd_into(rhs.simd), rhs)
3462 }
3463}
3464impl<S: Simd> core::ops::Not for mask16x16<S> {
3465 type Output = Self;
3466 #[inline(always)]
3467 fn not(self) -> Self::Output {
3468 self.simd.not_mask16x16(self)
3469 }
3470}
3471impl<S: Simd> core::ops::Neg for i32x8<S> {
3472 type Output = Self;
3473 #[inline(always)]
3474 fn neg(self) -> Self::Output {
3475 self.simd.neg_i32x8(self)
3476 }
3477}
3478impl<S: Simd> core::ops::Add for i32x8<S> {
3479 type Output = Self;
3480 #[inline(always)]
3481 fn add(self, rhs: Self) -> Self::Output {
3482 self.simd.add_i32x8(self, rhs)
3483 }
3484}
3485impl<S: Simd> core::ops::AddAssign for i32x8<S> {
3486 #[inline(always)]
3487 fn add_assign(&mut self, rhs: Self) {
3488 *self = self.simd.add_i32x8(*self, rhs);
3489 }
3490}
3491impl<S: Simd> core::ops::Add<i32> for i32x8<S> {
3492 type Output = Self;
3493 #[inline(always)]
3494 fn add(self, rhs: i32) -> Self::Output {
3495 self.simd.add_i32x8(self, rhs.simd_into(self.simd))
3496 }
3497}
3498impl<S: Simd> core::ops::AddAssign<i32> for i32x8<S> {
3499 #[inline(always)]
3500 fn add_assign(&mut self, rhs: i32) {
3501 *self = self.simd.add_i32x8(*self, rhs.simd_into(self.simd));
3502 }
3503}
3504impl<S: Simd> core::ops::Add<i32x8<S>> for i32 {
3505 type Output = i32x8<S>;
3506 #[inline(always)]
3507 fn add(self, rhs: i32x8<S>) -> Self::Output {
3508 rhs.simd.add_i32x8(self.simd_into(rhs.simd), rhs)
3509 }
3510}
3511impl<S: Simd> core::ops::Sub for i32x8<S> {
3512 type Output = Self;
3513 #[inline(always)]
3514 fn sub(self, rhs: Self) -> Self::Output {
3515 self.simd.sub_i32x8(self, rhs)
3516 }
3517}
3518impl<S: Simd> core::ops::SubAssign for i32x8<S> {
3519 #[inline(always)]
3520 fn sub_assign(&mut self, rhs: Self) {
3521 *self = self.simd.sub_i32x8(*self, rhs);
3522 }
3523}
3524impl<S: Simd> core::ops::Sub<i32> for i32x8<S> {
3525 type Output = Self;
3526 #[inline(always)]
3527 fn sub(self, rhs: i32) -> Self::Output {
3528 self.simd.sub_i32x8(self, rhs.simd_into(self.simd))
3529 }
3530}
3531impl<S: Simd> core::ops::SubAssign<i32> for i32x8<S> {
3532 #[inline(always)]
3533 fn sub_assign(&mut self, rhs: i32) {
3534 *self = self.simd.sub_i32x8(*self, rhs.simd_into(self.simd));
3535 }
3536}
3537impl<S: Simd> core::ops::Sub<i32x8<S>> for i32 {
3538 type Output = i32x8<S>;
3539 #[inline(always)]
3540 fn sub(self, rhs: i32x8<S>) -> Self::Output {
3541 rhs.simd.sub_i32x8(self.simd_into(rhs.simd), rhs)
3542 }
3543}
3544impl<S: Simd> core::ops::Mul for i32x8<S> {
3545 type Output = Self;
3546 #[inline(always)]
3547 fn mul(self, rhs: Self) -> Self::Output {
3548 self.simd.mul_i32x8(self, rhs)
3549 }
3550}
3551impl<S: Simd> core::ops::MulAssign for i32x8<S> {
3552 #[inline(always)]
3553 fn mul_assign(&mut self, rhs: Self) {
3554 *self = self.simd.mul_i32x8(*self, rhs);
3555 }
3556}
3557impl<S: Simd> core::ops::Mul<i32> for i32x8<S> {
3558 type Output = Self;
3559 #[inline(always)]
3560 fn mul(self, rhs: i32) -> Self::Output {
3561 self.simd.mul_i32x8(self, rhs.simd_into(self.simd))
3562 }
3563}
3564impl<S: Simd> core::ops::MulAssign<i32> for i32x8<S> {
3565 #[inline(always)]
3566 fn mul_assign(&mut self, rhs: i32) {
3567 *self = self.simd.mul_i32x8(*self, rhs.simd_into(self.simd));
3568 }
3569}
3570impl<S: Simd> core::ops::Mul<i32x8<S>> for i32 {
3571 type Output = i32x8<S>;
3572 #[inline(always)]
3573 fn mul(self, rhs: i32x8<S>) -> Self::Output {
3574 rhs.simd.mul_i32x8(self.simd_into(rhs.simd), rhs)
3575 }
3576}
3577impl<S: Simd> core::ops::BitAnd for i32x8<S> {
3578 type Output = Self;
3579 #[inline(always)]
3580 fn bitand(self, rhs: Self) -> Self::Output {
3581 self.simd.and_i32x8(self, rhs)
3582 }
3583}
3584impl<S: Simd> core::ops::BitAndAssign for i32x8<S> {
3585 #[inline(always)]
3586 fn bitand_assign(&mut self, rhs: Self) {
3587 *self = self.simd.and_i32x8(*self, rhs);
3588 }
3589}
3590impl<S: Simd> core::ops::BitAnd<i32> for i32x8<S> {
3591 type Output = Self;
3592 #[inline(always)]
3593 fn bitand(self, rhs: i32) -> Self::Output {
3594 self.simd.and_i32x8(self, rhs.simd_into(self.simd))
3595 }
3596}
3597impl<S: Simd> core::ops::BitAndAssign<i32> for i32x8<S> {
3598 #[inline(always)]
3599 fn bitand_assign(&mut self, rhs: i32) {
3600 *self = self.simd.and_i32x8(*self, rhs.simd_into(self.simd));
3601 }
3602}
3603impl<S: Simd> core::ops::BitAnd<i32x8<S>> for i32 {
3604 type Output = i32x8<S>;
3605 #[inline(always)]
3606 fn bitand(self, rhs: i32x8<S>) -> Self::Output {
3607 rhs.simd.and_i32x8(self.simd_into(rhs.simd), rhs)
3608 }
3609}
3610impl<S: Simd> core::ops::BitOr for i32x8<S> {
3611 type Output = Self;
3612 #[inline(always)]
3613 fn bitor(self, rhs: Self) -> Self::Output {
3614 self.simd.or_i32x8(self, rhs)
3615 }
3616}
3617impl<S: Simd> core::ops::BitOrAssign for i32x8<S> {
3618 #[inline(always)]
3619 fn bitor_assign(&mut self, rhs: Self) {
3620 *self = self.simd.or_i32x8(*self, rhs);
3621 }
3622}
3623impl<S: Simd> core::ops::BitOr<i32> for i32x8<S> {
3624 type Output = Self;
3625 #[inline(always)]
3626 fn bitor(self, rhs: i32) -> Self::Output {
3627 self.simd.or_i32x8(self, rhs.simd_into(self.simd))
3628 }
3629}
3630impl<S: Simd> core::ops::BitOrAssign<i32> for i32x8<S> {
3631 #[inline(always)]
3632 fn bitor_assign(&mut self, rhs: i32) {
3633 *self = self.simd.or_i32x8(*self, rhs.simd_into(self.simd));
3634 }
3635}
3636impl<S: Simd> core::ops::BitOr<i32x8<S>> for i32 {
3637 type Output = i32x8<S>;
3638 #[inline(always)]
3639 fn bitor(self, rhs: i32x8<S>) -> Self::Output {
3640 rhs.simd.or_i32x8(self.simd_into(rhs.simd), rhs)
3641 }
3642}
3643impl<S: Simd> core::ops::BitXor for i32x8<S> {
3644 type Output = Self;
3645 #[inline(always)]
3646 fn bitxor(self, rhs: Self) -> Self::Output {
3647 self.simd.xor_i32x8(self, rhs)
3648 }
3649}
3650impl<S: Simd> core::ops::BitXorAssign for i32x8<S> {
3651 #[inline(always)]
3652 fn bitxor_assign(&mut self, rhs: Self) {
3653 *self = self.simd.xor_i32x8(*self, rhs);
3654 }
3655}
3656impl<S: Simd> core::ops::BitXor<i32> for i32x8<S> {
3657 type Output = Self;
3658 #[inline(always)]
3659 fn bitxor(self, rhs: i32) -> Self::Output {
3660 self.simd.xor_i32x8(self, rhs.simd_into(self.simd))
3661 }
3662}
3663impl<S: Simd> core::ops::BitXorAssign<i32> for i32x8<S> {
3664 #[inline(always)]
3665 fn bitxor_assign(&mut self, rhs: i32) {
3666 *self = self.simd.xor_i32x8(*self, rhs.simd_into(self.simd));
3667 }
3668}
3669impl<S: Simd> core::ops::BitXor<i32x8<S>> for i32 {
3670 type Output = i32x8<S>;
3671 #[inline(always)]
3672 fn bitxor(self, rhs: i32x8<S>) -> Self::Output {
3673 rhs.simd.xor_i32x8(self.simd_into(rhs.simd), rhs)
3674 }
3675}
3676impl<S: Simd> core::ops::Shl<u32> for i32x8<S> {
3677 type Output = Self;
3678 #[inline(always)]
3679 fn shl(self, rhs: u32) -> Self::Output {
3680 self.simd.shl_i32x8(self, rhs)
3681 }
3682}
3683impl<S: Simd> core::ops::Shr<u32> for i32x8<S> {
3684 type Output = Self;
3685 #[inline(always)]
3686 fn shr(self, rhs: u32) -> Self::Output {
3687 self.simd.shr_i32x8(self, rhs)
3688 }
3689}
3690impl<S: Simd> core::ops::ShlAssign<u32> for i32x8<S> {
3691 #[inline(always)]
3692 fn shl_assign(&mut self, rhs: u32) {
3693 *self = self.simd.shl_i32x8(*self, rhs);
3694 }
3695}
3696impl<S: Simd> core::ops::ShrAssign<u32> for i32x8<S> {
3697 #[inline(always)]
3698 fn shr_assign(&mut self, rhs: u32) {
3699 *self = self.simd.shr_i32x8(*self, rhs);
3700 }
3701}
3702impl<S: Simd> core::ops::Shr for i32x8<S> {
3703 type Output = Self;
3704 #[inline(always)]
3705 fn shr(self, rhs: Self) -> Self::Output {
3706 self.simd.shrv_i32x8(self, rhs)
3707 }
3708}
3709impl<S: Simd> core::ops::ShrAssign for i32x8<S> {
3710 #[inline(always)]
3711 fn shr_assign(&mut self, rhs: Self) {
3712 *self = self.simd.shrv_i32x8(*self, rhs);
3713 }
3714}
3715impl<S: Simd> core::ops::Add for u32x8<S> {
3716 type Output = Self;
3717 #[inline(always)]
3718 fn add(self, rhs: Self) -> Self::Output {
3719 self.simd.add_u32x8(self, rhs)
3720 }
3721}
3722impl<S: Simd> core::ops::AddAssign for u32x8<S> {
3723 #[inline(always)]
3724 fn add_assign(&mut self, rhs: Self) {
3725 *self = self.simd.add_u32x8(*self, rhs);
3726 }
3727}
3728impl<S: Simd> core::ops::Add<u32> for u32x8<S> {
3729 type Output = Self;
3730 #[inline(always)]
3731 fn add(self, rhs: u32) -> Self::Output {
3732 self.simd.add_u32x8(self, rhs.simd_into(self.simd))
3733 }
3734}
3735impl<S: Simd> core::ops::AddAssign<u32> for u32x8<S> {
3736 #[inline(always)]
3737 fn add_assign(&mut self, rhs: u32) {
3738 *self = self.simd.add_u32x8(*self, rhs.simd_into(self.simd));
3739 }
3740}
3741impl<S: Simd> core::ops::Add<u32x8<S>> for u32 {
3742 type Output = u32x8<S>;
3743 #[inline(always)]
3744 fn add(self, rhs: u32x8<S>) -> Self::Output {
3745 rhs.simd.add_u32x8(self.simd_into(rhs.simd), rhs)
3746 }
3747}
3748impl<S: Simd> core::ops::Sub for u32x8<S> {
3749 type Output = Self;
3750 #[inline(always)]
3751 fn sub(self, rhs: Self) -> Self::Output {
3752 self.simd.sub_u32x8(self, rhs)
3753 }
3754}
3755impl<S: Simd> core::ops::SubAssign for u32x8<S> {
3756 #[inline(always)]
3757 fn sub_assign(&mut self, rhs: Self) {
3758 *self = self.simd.sub_u32x8(*self, rhs);
3759 }
3760}
3761impl<S: Simd> core::ops::Sub<u32> for u32x8<S> {
3762 type Output = Self;
3763 #[inline(always)]
3764 fn sub(self, rhs: u32) -> Self::Output {
3765 self.simd.sub_u32x8(self, rhs.simd_into(self.simd))
3766 }
3767}
3768impl<S: Simd> core::ops::SubAssign<u32> for u32x8<S> {
3769 #[inline(always)]
3770 fn sub_assign(&mut self, rhs: u32) {
3771 *self = self.simd.sub_u32x8(*self, rhs.simd_into(self.simd));
3772 }
3773}
3774impl<S: Simd> core::ops::Sub<u32x8<S>> for u32 {
3775 type Output = u32x8<S>;
3776 #[inline(always)]
3777 fn sub(self, rhs: u32x8<S>) -> Self::Output {
3778 rhs.simd.sub_u32x8(self.simd_into(rhs.simd), rhs)
3779 }
3780}
3781impl<S: Simd> core::ops::Mul for u32x8<S> {
3782 type Output = Self;
3783 #[inline(always)]
3784 fn mul(self, rhs: Self) -> Self::Output {
3785 self.simd.mul_u32x8(self, rhs)
3786 }
3787}
3788impl<S: Simd> core::ops::MulAssign for u32x8<S> {
3789 #[inline(always)]
3790 fn mul_assign(&mut self, rhs: Self) {
3791 *self = self.simd.mul_u32x8(*self, rhs);
3792 }
3793}
3794impl<S: Simd> core::ops::Mul<u32> for u32x8<S> {
3795 type Output = Self;
3796 #[inline(always)]
3797 fn mul(self, rhs: u32) -> Self::Output {
3798 self.simd.mul_u32x8(self, rhs.simd_into(self.simd))
3799 }
3800}
3801impl<S: Simd> core::ops::MulAssign<u32> for u32x8<S> {
3802 #[inline(always)]
3803 fn mul_assign(&mut self, rhs: u32) {
3804 *self = self.simd.mul_u32x8(*self, rhs.simd_into(self.simd));
3805 }
3806}
3807impl<S: Simd> core::ops::Mul<u32x8<S>> for u32 {
3808 type Output = u32x8<S>;
3809 #[inline(always)]
3810 fn mul(self, rhs: u32x8<S>) -> Self::Output {
3811 rhs.simd.mul_u32x8(self.simd_into(rhs.simd), rhs)
3812 }
3813}
3814impl<S: Simd> core::ops::BitAnd for u32x8<S> {
3815 type Output = Self;
3816 #[inline(always)]
3817 fn bitand(self, rhs: Self) -> Self::Output {
3818 self.simd.and_u32x8(self, rhs)
3819 }
3820}
3821impl<S: Simd> core::ops::BitAndAssign for u32x8<S> {
3822 #[inline(always)]
3823 fn bitand_assign(&mut self, rhs: Self) {
3824 *self = self.simd.and_u32x8(*self, rhs);
3825 }
3826}
3827impl<S: Simd> core::ops::BitAnd<u32> for u32x8<S> {
3828 type Output = Self;
3829 #[inline(always)]
3830 fn bitand(self, rhs: u32) -> Self::Output {
3831 self.simd.and_u32x8(self, rhs.simd_into(self.simd))
3832 }
3833}
3834impl<S: Simd> core::ops::BitAndAssign<u32> for u32x8<S> {
3835 #[inline(always)]
3836 fn bitand_assign(&mut self, rhs: u32) {
3837 *self = self.simd.and_u32x8(*self, rhs.simd_into(self.simd));
3838 }
3839}
3840impl<S: Simd> core::ops::BitAnd<u32x8<S>> for u32 {
3841 type Output = u32x8<S>;
3842 #[inline(always)]
3843 fn bitand(self, rhs: u32x8<S>) -> Self::Output {
3844 rhs.simd.and_u32x8(self.simd_into(rhs.simd), rhs)
3845 }
3846}
3847impl<S: Simd> core::ops::BitOr for u32x8<S> {
3848 type Output = Self;
3849 #[inline(always)]
3850 fn bitor(self, rhs: Self) -> Self::Output {
3851 self.simd.or_u32x8(self, rhs)
3852 }
3853}
3854impl<S: Simd> core::ops::BitOrAssign for u32x8<S> {
3855 #[inline(always)]
3856 fn bitor_assign(&mut self, rhs: Self) {
3857 *self = self.simd.or_u32x8(*self, rhs);
3858 }
3859}
3860impl<S: Simd> core::ops::BitOr<u32> for u32x8<S> {
3861 type Output = Self;
3862 #[inline(always)]
3863 fn bitor(self, rhs: u32) -> Self::Output {
3864 self.simd.or_u32x8(self, rhs.simd_into(self.simd))
3865 }
3866}
3867impl<S: Simd> core::ops::BitOrAssign<u32> for u32x8<S> {
3868 #[inline(always)]
3869 fn bitor_assign(&mut self, rhs: u32) {
3870 *self = self.simd.or_u32x8(*self, rhs.simd_into(self.simd));
3871 }
3872}
3873impl<S: Simd> core::ops::BitOr<u32x8<S>> for u32 {
3874 type Output = u32x8<S>;
3875 #[inline(always)]
3876 fn bitor(self, rhs: u32x8<S>) -> Self::Output {
3877 rhs.simd.or_u32x8(self.simd_into(rhs.simd), rhs)
3878 }
3879}
3880impl<S: Simd> core::ops::BitXor for u32x8<S> {
3881 type Output = Self;
3882 #[inline(always)]
3883 fn bitxor(self, rhs: Self) -> Self::Output {
3884 self.simd.xor_u32x8(self, rhs)
3885 }
3886}
3887impl<S: Simd> core::ops::BitXorAssign for u32x8<S> {
3888 #[inline(always)]
3889 fn bitxor_assign(&mut self, rhs: Self) {
3890 *self = self.simd.xor_u32x8(*self, rhs);
3891 }
3892}
3893impl<S: Simd> core::ops::BitXor<u32> for u32x8<S> {
3894 type Output = Self;
3895 #[inline(always)]
3896 fn bitxor(self, rhs: u32) -> Self::Output {
3897 self.simd.xor_u32x8(self, rhs.simd_into(self.simd))
3898 }
3899}
3900impl<S: Simd> core::ops::BitXorAssign<u32> for u32x8<S> {
3901 #[inline(always)]
3902 fn bitxor_assign(&mut self, rhs: u32) {
3903 *self = self.simd.xor_u32x8(*self, rhs.simd_into(self.simd));
3904 }
3905}
3906impl<S: Simd> core::ops::BitXor<u32x8<S>> for u32 {
3907 type Output = u32x8<S>;
3908 #[inline(always)]
3909 fn bitxor(self, rhs: u32x8<S>) -> Self::Output {
3910 rhs.simd.xor_u32x8(self.simd_into(rhs.simd), rhs)
3911 }
3912}
3913impl<S: Simd> core::ops::Shl<u32> for u32x8<S> {
3914 type Output = Self;
3915 #[inline(always)]
3916 fn shl(self, rhs: u32) -> Self::Output {
3917 self.simd.shl_u32x8(self, rhs)
3918 }
3919}
3920impl<S: Simd> core::ops::Shr<u32> for u32x8<S> {
3921 type Output = Self;
3922 #[inline(always)]
3923 fn shr(self, rhs: u32) -> Self::Output {
3924 self.simd.shr_u32x8(self, rhs)
3925 }
3926}
3927impl<S: Simd> core::ops::ShlAssign<u32> for u32x8<S> {
3928 #[inline(always)]
3929 fn shl_assign(&mut self, rhs: u32) {
3930 *self = self.simd.shl_u32x8(*self, rhs);
3931 }
3932}
3933impl<S: Simd> core::ops::ShrAssign<u32> for u32x8<S> {
3934 #[inline(always)]
3935 fn shr_assign(&mut self, rhs: u32) {
3936 *self = self.simd.shr_u32x8(*self, rhs);
3937 }
3938}
3939impl<S: Simd> core::ops::Shr for u32x8<S> {
3940 type Output = Self;
3941 #[inline(always)]
3942 fn shr(self, rhs: Self) -> Self::Output {
3943 self.simd.shrv_u32x8(self, rhs)
3944 }
3945}
3946impl<S: Simd> core::ops::ShrAssign for u32x8<S> {
3947 #[inline(always)]
3948 fn shr_assign(&mut self, rhs: Self) {
3949 *self = self.simd.shrv_u32x8(*self, rhs);
3950 }
3951}
3952impl<S: Simd> core::ops::BitAnd for mask32x8<S> {
3953 type Output = Self;
3954 #[inline(always)]
3955 fn bitand(self, rhs: Self) -> Self::Output {
3956 self.simd.and_mask32x8(self, rhs)
3957 }
3958}
3959impl<S: Simd> core::ops::BitAndAssign for mask32x8<S> {
3960 #[inline(always)]
3961 fn bitand_assign(&mut self, rhs: Self) {
3962 *self = self.simd.and_mask32x8(*self, rhs);
3963 }
3964}
3965impl<S: Simd> core::ops::BitAnd<i32> for mask32x8<S> {
3966 type Output = Self;
3967 #[inline(always)]
3968 fn bitand(self, rhs: i32) -> Self::Output {
3969 self.simd.and_mask32x8(self, rhs.simd_into(self.simd))
3970 }
3971}
3972impl<S: Simd> core::ops::BitAndAssign<i32> for mask32x8<S> {
3973 #[inline(always)]
3974 fn bitand_assign(&mut self, rhs: i32) {
3975 *self = self.simd.and_mask32x8(*self, rhs.simd_into(self.simd));
3976 }
3977}
3978impl<S: Simd> core::ops::BitAnd<mask32x8<S>> for i32 {
3979 type Output = mask32x8<S>;
3980 #[inline(always)]
3981 fn bitand(self, rhs: mask32x8<S>) -> Self::Output {
3982 rhs.simd.and_mask32x8(self.simd_into(rhs.simd), rhs)
3983 }
3984}
3985impl<S: Simd> core::ops::BitOr for mask32x8<S> {
3986 type Output = Self;
3987 #[inline(always)]
3988 fn bitor(self, rhs: Self) -> Self::Output {
3989 self.simd.or_mask32x8(self, rhs)
3990 }
3991}
3992impl<S: Simd> core::ops::BitOrAssign for mask32x8<S> {
3993 #[inline(always)]
3994 fn bitor_assign(&mut self, rhs: Self) {
3995 *self = self.simd.or_mask32x8(*self, rhs);
3996 }
3997}
3998impl<S: Simd> core::ops::BitOr<i32> for mask32x8<S> {
3999 type Output = Self;
4000 #[inline(always)]
4001 fn bitor(self, rhs: i32) -> Self::Output {
4002 self.simd.or_mask32x8(self, rhs.simd_into(self.simd))
4003 }
4004}
4005impl<S: Simd> core::ops::BitOrAssign<i32> for mask32x8<S> {
4006 #[inline(always)]
4007 fn bitor_assign(&mut self, rhs: i32) {
4008 *self = self.simd.or_mask32x8(*self, rhs.simd_into(self.simd));
4009 }
4010}
4011impl<S: Simd> core::ops::BitOr<mask32x8<S>> for i32 {
4012 type Output = mask32x8<S>;
4013 #[inline(always)]
4014 fn bitor(self, rhs: mask32x8<S>) -> Self::Output {
4015 rhs.simd.or_mask32x8(self.simd_into(rhs.simd), rhs)
4016 }
4017}
4018impl<S: Simd> core::ops::BitXor for mask32x8<S> {
4019 type Output = Self;
4020 #[inline(always)]
4021 fn bitxor(self, rhs: Self) -> Self::Output {
4022 self.simd.xor_mask32x8(self, rhs)
4023 }
4024}
4025impl<S: Simd> core::ops::BitXorAssign for mask32x8<S> {
4026 #[inline(always)]
4027 fn bitxor_assign(&mut self, rhs: Self) {
4028 *self = self.simd.xor_mask32x8(*self, rhs);
4029 }
4030}
4031impl<S: Simd> core::ops::BitXor<i32> for mask32x8<S> {
4032 type Output = Self;
4033 #[inline(always)]
4034 fn bitxor(self, rhs: i32) -> Self::Output {
4035 self.simd.xor_mask32x8(self, rhs.simd_into(self.simd))
4036 }
4037}
4038impl<S: Simd> core::ops::BitXorAssign<i32> for mask32x8<S> {
4039 #[inline(always)]
4040 fn bitxor_assign(&mut self, rhs: i32) {
4041 *self = self.simd.xor_mask32x8(*self, rhs.simd_into(self.simd));
4042 }
4043}
4044impl<S: Simd> core::ops::BitXor<mask32x8<S>> for i32 {
4045 type Output = mask32x8<S>;
4046 #[inline(always)]
4047 fn bitxor(self, rhs: mask32x8<S>) -> Self::Output {
4048 rhs.simd.xor_mask32x8(self.simd_into(rhs.simd), rhs)
4049 }
4050}
4051impl<S: Simd> core::ops::Not for mask32x8<S> {
4052 type Output = Self;
4053 #[inline(always)]
4054 fn not(self) -> Self::Output {
4055 self.simd.not_mask32x8(self)
4056 }
4057}
4058impl<S: Simd> core::ops::Neg for f64x4<S> {
4059 type Output = Self;
4060 #[inline(always)]
4061 fn neg(self) -> Self::Output {
4062 self.simd.neg_f64x4(self)
4063 }
4064}
4065impl<S: Simd> core::ops::Add for f64x4<S> {
4066 type Output = Self;
4067 #[inline(always)]
4068 fn add(self, rhs: Self) -> Self::Output {
4069 self.simd.add_f64x4(self, rhs)
4070 }
4071}
4072impl<S: Simd> core::ops::AddAssign for f64x4<S> {
4073 #[inline(always)]
4074 fn add_assign(&mut self, rhs: Self) {
4075 *self = self.simd.add_f64x4(*self, rhs);
4076 }
4077}
4078impl<S: Simd> core::ops::Add<f64> for f64x4<S> {
4079 type Output = Self;
4080 #[inline(always)]
4081 fn add(self, rhs: f64) -> Self::Output {
4082 self.simd.add_f64x4(self, rhs.simd_into(self.simd))
4083 }
4084}
4085impl<S: Simd> core::ops::AddAssign<f64> for f64x4<S> {
4086 #[inline(always)]
4087 fn add_assign(&mut self, rhs: f64) {
4088 *self = self.simd.add_f64x4(*self, rhs.simd_into(self.simd));
4089 }
4090}
4091impl<S: Simd> core::ops::Add<f64x4<S>> for f64 {
4092 type Output = f64x4<S>;
4093 #[inline(always)]
4094 fn add(self, rhs: f64x4<S>) -> Self::Output {
4095 rhs.simd.add_f64x4(self.simd_into(rhs.simd), rhs)
4096 }
4097}
4098impl<S: Simd> core::ops::Sub for f64x4<S> {
4099 type Output = Self;
4100 #[inline(always)]
4101 fn sub(self, rhs: Self) -> Self::Output {
4102 self.simd.sub_f64x4(self, rhs)
4103 }
4104}
4105impl<S: Simd> core::ops::SubAssign for f64x4<S> {
4106 #[inline(always)]
4107 fn sub_assign(&mut self, rhs: Self) {
4108 *self = self.simd.sub_f64x4(*self, rhs);
4109 }
4110}
4111impl<S: Simd> core::ops::Sub<f64> for f64x4<S> {
4112 type Output = Self;
4113 #[inline(always)]
4114 fn sub(self, rhs: f64) -> Self::Output {
4115 self.simd.sub_f64x4(self, rhs.simd_into(self.simd))
4116 }
4117}
4118impl<S: Simd> core::ops::SubAssign<f64> for f64x4<S> {
4119 #[inline(always)]
4120 fn sub_assign(&mut self, rhs: f64) {
4121 *self = self.simd.sub_f64x4(*self, rhs.simd_into(self.simd));
4122 }
4123}
4124impl<S: Simd> core::ops::Sub<f64x4<S>> for f64 {
4125 type Output = f64x4<S>;
4126 #[inline(always)]
4127 fn sub(self, rhs: f64x4<S>) -> Self::Output {
4128 rhs.simd.sub_f64x4(self.simd_into(rhs.simd), rhs)
4129 }
4130}
4131impl<S: Simd> core::ops::Mul for f64x4<S> {
4132 type Output = Self;
4133 #[inline(always)]
4134 fn mul(self, rhs: Self) -> Self::Output {
4135 self.simd.mul_f64x4(self, rhs)
4136 }
4137}
4138impl<S: Simd> core::ops::MulAssign for f64x4<S> {
4139 #[inline(always)]
4140 fn mul_assign(&mut self, rhs: Self) {
4141 *self = self.simd.mul_f64x4(*self, rhs);
4142 }
4143}
4144impl<S: Simd> core::ops::Mul<f64> for f64x4<S> {
4145 type Output = Self;
4146 #[inline(always)]
4147 fn mul(self, rhs: f64) -> Self::Output {
4148 self.simd.mul_f64x4(self, rhs.simd_into(self.simd))
4149 }
4150}
4151impl<S: Simd> core::ops::MulAssign<f64> for f64x4<S> {
4152 #[inline(always)]
4153 fn mul_assign(&mut self, rhs: f64) {
4154 *self = self.simd.mul_f64x4(*self, rhs.simd_into(self.simd));
4155 }
4156}
4157impl<S: Simd> core::ops::Mul<f64x4<S>> for f64 {
4158 type Output = f64x4<S>;
4159 #[inline(always)]
4160 fn mul(self, rhs: f64x4<S>) -> Self::Output {
4161 rhs.simd.mul_f64x4(self.simd_into(rhs.simd), rhs)
4162 }
4163}
4164impl<S: Simd> core::ops::Div for f64x4<S> {
4165 type Output = Self;
4166 #[inline(always)]
4167 fn div(self, rhs: Self) -> Self::Output {
4168 self.simd.div_f64x4(self, rhs)
4169 }
4170}
4171impl<S: Simd> core::ops::DivAssign for f64x4<S> {
4172 #[inline(always)]
4173 fn div_assign(&mut self, rhs: Self) {
4174 *self = self.simd.div_f64x4(*self, rhs);
4175 }
4176}
4177impl<S: Simd> core::ops::Div<f64> for f64x4<S> {
4178 type Output = Self;
4179 #[inline(always)]
4180 fn div(self, rhs: f64) -> Self::Output {
4181 self.simd.div_f64x4(self, rhs.simd_into(self.simd))
4182 }
4183}
4184impl<S: Simd> core::ops::DivAssign<f64> for f64x4<S> {
4185 #[inline(always)]
4186 fn div_assign(&mut self, rhs: f64) {
4187 *self = self.simd.div_f64x4(*self, rhs.simd_into(self.simd));
4188 }
4189}
4190impl<S: Simd> core::ops::Div<f64x4<S>> for f64 {
4191 type Output = f64x4<S>;
4192 #[inline(always)]
4193 fn div(self, rhs: f64x4<S>) -> Self::Output {
4194 rhs.simd.div_f64x4(self.simd_into(rhs.simd), rhs)
4195 }
4196}
4197impl<S: Simd> core::ops::BitAnd for mask64x4<S> {
4198 type Output = Self;
4199 #[inline(always)]
4200 fn bitand(self, rhs: Self) -> Self::Output {
4201 self.simd.and_mask64x4(self, rhs)
4202 }
4203}
4204impl<S: Simd> core::ops::BitAndAssign for mask64x4<S> {
4205 #[inline(always)]
4206 fn bitand_assign(&mut self, rhs: Self) {
4207 *self = self.simd.and_mask64x4(*self, rhs);
4208 }
4209}
4210impl<S: Simd> core::ops::BitAnd<i64> for mask64x4<S> {
4211 type Output = Self;
4212 #[inline(always)]
4213 fn bitand(self, rhs: i64) -> Self::Output {
4214 self.simd.and_mask64x4(self, rhs.simd_into(self.simd))
4215 }
4216}
4217impl<S: Simd> core::ops::BitAndAssign<i64> for mask64x4<S> {
4218 #[inline(always)]
4219 fn bitand_assign(&mut self, rhs: i64) {
4220 *self = self.simd.and_mask64x4(*self, rhs.simd_into(self.simd));
4221 }
4222}
4223impl<S: Simd> core::ops::BitAnd<mask64x4<S>> for i64 {
4224 type Output = mask64x4<S>;
4225 #[inline(always)]
4226 fn bitand(self, rhs: mask64x4<S>) -> Self::Output {
4227 rhs.simd.and_mask64x4(self.simd_into(rhs.simd), rhs)
4228 }
4229}
4230impl<S: Simd> core::ops::BitOr for mask64x4<S> {
4231 type Output = Self;
4232 #[inline(always)]
4233 fn bitor(self, rhs: Self) -> Self::Output {
4234 self.simd.or_mask64x4(self, rhs)
4235 }
4236}
4237impl<S: Simd> core::ops::BitOrAssign for mask64x4<S> {
4238 #[inline(always)]
4239 fn bitor_assign(&mut self, rhs: Self) {
4240 *self = self.simd.or_mask64x4(*self, rhs);
4241 }
4242}
4243impl<S: Simd> core::ops::BitOr<i64> for mask64x4<S> {
4244 type Output = Self;
4245 #[inline(always)]
4246 fn bitor(self, rhs: i64) -> Self::Output {
4247 self.simd.or_mask64x4(self, rhs.simd_into(self.simd))
4248 }
4249}
4250impl<S: Simd> core::ops::BitOrAssign<i64> for mask64x4<S> {
4251 #[inline(always)]
4252 fn bitor_assign(&mut self, rhs: i64) {
4253 *self = self.simd.or_mask64x4(*self, rhs.simd_into(self.simd));
4254 }
4255}
4256impl<S: Simd> core::ops::BitOr<mask64x4<S>> for i64 {
4257 type Output = mask64x4<S>;
4258 #[inline(always)]
4259 fn bitor(self, rhs: mask64x4<S>) -> Self::Output {
4260 rhs.simd.or_mask64x4(self.simd_into(rhs.simd), rhs)
4261 }
4262}
4263impl<S: Simd> core::ops::BitXor for mask64x4<S> {
4264 type Output = Self;
4265 #[inline(always)]
4266 fn bitxor(self, rhs: Self) -> Self::Output {
4267 self.simd.xor_mask64x4(self, rhs)
4268 }
4269}
4270impl<S: Simd> core::ops::BitXorAssign for mask64x4<S> {
4271 #[inline(always)]
4272 fn bitxor_assign(&mut self, rhs: Self) {
4273 *self = self.simd.xor_mask64x4(*self, rhs);
4274 }
4275}
4276impl<S: Simd> core::ops::BitXor<i64> for mask64x4<S> {
4277 type Output = Self;
4278 #[inline(always)]
4279 fn bitxor(self, rhs: i64) -> Self::Output {
4280 self.simd.xor_mask64x4(self, rhs.simd_into(self.simd))
4281 }
4282}
4283impl<S: Simd> core::ops::BitXorAssign<i64> for mask64x4<S> {
4284 #[inline(always)]
4285 fn bitxor_assign(&mut self, rhs: i64) {
4286 *self = self.simd.xor_mask64x4(*self, rhs.simd_into(self.simd));
4287 }
4288}
4289impl<S: Simd> core::ops::BitXor<mask64x4<S>> for i64 {
4290 type Output = mask64x4<S>;
4291 #[inline(always)]
4292 fn bitxor(self, rhs: mask64x4<S>) -> Self::Output {
4293 rhs.simd.xor_mask64x4(self.simd_into(rhs.simd), rhs)
4294 }
4295}
4296impl<S: Simd> core::ops::Not for mask64x4<S> {
4297 type Output = Self;
4298 #[inline(always)]
4299 fn not(self) -> Self::Output {
4300 self.simd.not_mask64x4(self)
4301 }
4302}
4303impl<S: Simd> core::ops::Neg for f32x16<S> {
4304 type Output = Self;
4305 #[inline(always)]
4306 fn neg(self) -> Self::Output {
4307 self.simd.neg_f32x16(self)
4308 }
4309}
4310impl<S: Simd> core::ops::Add for f32x16<S> {
4311 type Output = Self;
4312 #[inline(always)]
4313 fn add(self, rhs: Self) -> Self::Output {
4314 self.simd.add_f32x16(self, rhs)
4315 }
4316}
4317impl<S: Simd> core::ops::AddAssign for f32x16<S> {
4318 #[inline(always)]
4319 fn add_assign(&mut self, rhs: Self) {
4320 *self = self.simd.add_f32x16(*self, rhs);
4321 }
4322}
4323impl<S: Simd> core::ops::Add<f32> for f32x16<S> {
4324 type Output = Self;
4325 #[inline(always)]
4326 fn add(self, rhs: f32) -> Self::Output {
4327 self.simd.add_f32x16(self, rhs.simd_into(self.simd))
4328 }
4329}
4330impl<S: Simd> core::ops::AddAssign<f32> for f32x16<S> {
4331 #[inline(always)]
4332 fn add_assign(&mut self, rhs: f32) {
4333 *self = self.simd.add_f32x16(*self, rhs.simd_into(self.simd));
4334 }
4335}
4336impl<S: Simd> core::ops::Add<f32x16<S>> for f32 {
4337 type Output = f32x16<S>;
4338 #[inline(always)]
4339 fn add(self, rhs: f32x16<S>) -> Self::Output {
4340 rhs.simd.add_f32x16(self.simd_into(rhs.simd), rhs)
4341 }
4342}
4343impl<S: Simd> core::ops::Sub for f32x16<S> {
4344 type Output = Self;
4345 #[inline(always)]
4346 fn sub(self, rhs: Self) -> Self::Output {
4347 self.simd.sub_f32x16(self, rhs)
4348 }
4349}
4350impl<S: Simd> core::ops::SubAssign for f32x16<S> {
4351 #[inline(always)]
4352 fn sub_assign(&mut self, rhs: Self) {
4353 *self = self.simd.sub_f32x16(*self, rhs);
4354 }
4355}
4356impl<S: Simd> core::ops::Sub<f32> for f32x16<S> {
4357 type Output = Self;
4358 #[inline(always)]
4359 fn sub(self, rhs: f32) -> Self::Output {
4360 self.simd.sub_f32x16(self, rhs.simd_into(self.simd))
4361 }
4362}
4363impl<S: Simd> core::ops::SubAssign<f32> for f32x16<S> {
4364 #[inline(always)]
4365 fn sub_assign(&mut self, rhs: f32) {
4366 *self = self.simd.sub_f32x16(*self, rhs.simd_into(self.simd));
4367 }
4368}
4369impl<S: Simd> core::ops::Sub<f32x16<S>> for f32 {
4370 type Output = f32x16<S>;
4371 #[inline(always)]
4372 fn sub(self, rhs: f32x16<S>) -> Self::Output {
4373 rhs.simd.sub_f32x16(self.simd_into(rhs.simd), rhs)
4374 }
4375}
4376impl<S: Simd> core::ops::Mul for f32x16<S> {
4377 type Output = Self;
4378 #[inline(always)]
4379 fn mul(self, rhs: Self) -> Self::Output {
4380 self.simd.mul_f32x16(self, rhs)
4381 }
4382}
4383impl<S: Simd> core::ops::MulAssign for f32x16<S> {
4384 #[inline(always)]
4385 fn mul_assign(&mut self, rhs: Self) {
4386 *self = self.simd.mul_f32x16(*self, rhs);
4387 }
4388}
4389impl<S: Simd> core::ops::Mul<f32> for f32x16<S> {
4390 type Output = Self;
4391 #[inline(always)]
4392 fn mul(self, rhs: f32) -> Self::Output {
4393 self.simd.mul_f32x16(self, rhs.simd_into(self.simd))
4394 }
4395}
4396impl<S: Simd> core::ops::MulAssign<f32> for f32x16<S> {
4397 #[inline(always)]
4398 fn mul_assign(&mut self, rhs: f32) {
4399 *self = self.simd.mul_f32x16(*self, rhs.simd_into(self.simd));
4400 }
4401}
4402impl<S: Simd> core::ops::Mul<f32x16<S>> for f32 {
4403 type Output = f32x16<S>;
4404 #[inline(always)]
4405 fn mul(self, rhs: f32x16<S>) -> Self::Output {
4406 rhs.simd.mul_f32x16(self.simd_into(rhs.simd), rhs)
4407 }
4408}
4409impl<S: Simd> core::ops::Div for f32x16<S> {
4410 type Output = Self;
4411 #[inline(always)]
4412 fn div(self, rhs: Self) -> Self::Output {
4413 self.simd.div_f32x16(self, rhs)
4414 }
4415}
4416impl<S: Simd> core::ops::DivAssign for f32x16<S> {
4417 #[inline(always)]
4418 fn div_assign(&mut self, rhs: Self) {
4419 *self = self.simd.div_f32x16(*self, rhs);
4420 }
4421}
4422impl<S: Simd> core::ops::Div<f32> for f32x16<S> {
4423 type Output = Self;
4424 #[inline(always)]
4425 fn div(self, rhs: f32) -> Self::Output {
4426 self.simd.div_f32x16(self, rhs.simd_into(self.simd))
4427 }
4428}
4429impl<S: Simd> core::ops::DivAssign<f32> for f32x16<S> {
4430 #[inline(always)]
4431 fn div_assign(&mut self, rhs: f32) {
4432 *self = self.simd.div_f32x16(*self, rhs.simd_into(self.simd));
4433 }
4434}
4435impl<S: Simd> core::ops::Div<f32x16<S>> for f32 {
4436 type Output = f32x16<S>;
4437 #[inline(always)]
4438 fn div(self, rhs: f32x16<S>) -> Self::Output {
4439 rhs.simd.div_f32x16(self.simd_into(rhs.simd), rhs)
4440 }
4441}
4442impl<S: Simd> core::ops::Neg for i8x64<S> {
4443 type Output = Self;
4444 #[inline(always)]
4445 fn neg(self) -> Self::Output {
4446 self.simd.neg_i8x64(self)
4447 }
4448}
4449impl<S: Simd> core::ops::Add for i8x64<S> {
4450 type Output = Self;
4451 #[inline(always)]
4452 fn add(self, rhs: Self) -> Self::Output {
4453 self.simd.add_i8x64(self, rhs)
4454 }
4455}
4456impl<S: Simd> core::ops::AddAssign for i8x64<S> {
4457 #[inline(always)]
4458 fn add_assign(&mut self, rhs: Self) {
4459 *self = self.simd.add_i8x64(*self, rhs);
4460 }
4461}
4462impl<S: Simd> core::ops::Add<i8> for i8x64<S> {
4463 type Output = Self;
4464 #[inline(always)]
4465 fn add(self, rhs: i8) -> Self::Output {
4466 self.simd.add_i8x64(self, rhs.simd_into(self.simd))
4467 }
4468}
4469impl<S: Simd> core::ops::AddAssign<i8> for i8x64<S> {
4470 #[inline(always)]
4471 fn add_assign(&mut self, rhs: i8) {
4472 *self = self.simd.add_i8x64(*self, rhs.simd_into(self.simd));
4473 }
4474}
4475impl<S: Simd> core::ops::Add<i8x64<S>> for i8 {
4476 type Output = i8x64<S>;
4477 #[inline(always)]
4478 fn add(self, rhs: i8x64<S>) -> Self::Output {
4479 rhs.simd.add_i8x64(self.simd_into(rhs.simd), rhs)
4480 }
4481}
4482impl<S: Simd> core::ops::Sub for i8x64<S> {
4483 type Output = Self;
4484 #[inline(always)]
4485 fn sub(self, rhs: Self) -> Self::Output {
4486 self.simd.sub_i8x64(self, rhs)
4487 }
4488}
4489impl<S: Simd> core::ops::SubAssign for i8x64<S> {
4490 #[inline(always)]
4491 fn sub_assign(&mut self, rhs: Self) {
4492 *self = self.simd.sub_i8x64(*self, rhs);
4493 }
4494}
4495impl<S: Simd> core::ops::Sub<i8> for i8x64<S> {
4496 type Output = Self;
4497 #[inline(always)]
4498 fn sub(self, rhs: i8) -> Self::Output {
4499 self.simd.sub_i8x64(self, rhs.simd_into(self.simd))
4500 }
4501}
4502impl<S: Simd> core::ops::SubAssign<i8> for i8x64<S> {
4503 #[inline(always)]
4504 fn sub_assign(&mut self, rhs: i8) {
4505 *self = self.simd.sub_i8x64(*self, rhs.simd_into(self.simd));
4506 }
4507}
4508impl<S: Simd> core::ops::Sub<i8x64<S>> for i8 {
4509 type Output = i8x64<S>;
4510 #[inline(always)]
4511 fn sub(self, rhs: i8x64<S>) -> Self::Output {
4512 rhs.simd.sub_i8x64(self.simd_into(rhs.simd), rhs)
4513 }
4514}
4515impl<S: Simd> core::ops::Mul for i8x64<S> {
4516 type Output = Self;
4517 #[inline(always)]
4518 fn mul(self, rhs: Self) -> Self::Output {
4519 self.simd.mul_i8x64(self, rhs)
4520 }
4521}
4522impl<S: Simd> core::ops::MulAssign for i8x64<S> {
4523 #[inline(always)]
4524 fn mul_assign(&mut self, rhs: Self) {
4525 *self = self.simd.mul_i8x64(*self, rhs);
4526 }
4527}
4528impl<S: Simd> core::ops::Mul<i8> for i8x64<S> {
4529 type Output = Self;
4530 #[inline(always)]
4531 fn mul(self, rhs: i8) -> Self::Output {
4532 self.simd.mul_i8x64(self, rhs.simd_into(self.simd))
4533 }
4534}
4535impl<S: Simd> core::ops::MulAssign<i8> for i8x64<S> {
4536 #[inline(always)]
4537 fn mul_assign(&mut self, rhs: i8) {
4538 *self = self.simd.mul_i8x64(*self, rhs.simd_into(self.simd));
4539 }
4540}
4541impl<S: Simd> core::ops::Mul<i8x64<S>> for i8 {
4542 type Output = i8x64<S>;
4543 #[inline(always)]
4544 fn mul(self, rhs: i8x64<S>) -> Self::Output {
4545 rhs.simd.mul_i8x64(self.simd_into(rhs.simd), rhs)
4546 }
4547}
4548impl<S: Simd> core::ops::BitAnd for i8x64<S> {
4549 type Output = Self;
4550 #[inline(always)]
4551 fn bitand(self, rhs: Self) -> Self::Output {
4552 self.simd.and_i8x64(self, rhs)
4553 }
4554}
4555impl<S: Simd> core::ops::BitAndAssign for i8x64<S> {
4556 #[inline(always)]
4557 fn bitand_assign(&mut self, rhs: Self) {
4558 *self = self.simd.and_i8x64(*self, rhs);
4559 }
4560}
4561impl<S: Simd> core::ops::BitAnd<i8> for i8x64<S> {
4562 type Output = Self;
4563 #[inline(always)]
4564 fn bitand(self, rhs: i8) -> Self::Output {
4565 self.simd.and_i8x64(self, rhs.simd_into(self.simd))
4566 }
4567}
4568impl<S: Simd> core::ops::BitAndAssign<i8> for i8x64<S> {
4569 #[inline(always)]
4570 fn bitand_assign(&mut self, rhs: i8) {
4571 *self = self.simd.and_i8x64(*self, rhs.simd_into(self.simd));
4572 }
4573}
4574impl<S: Simd> core::ops::BitAnd<i8x64<S>> for i8 {
4575 type Output = i8x64<S>;
4576 #[inline(always)]
4577 fn bitand(self, rhs: i8x64<S>) -> Self::Output {
4578 rhs.simd.and_i8x64(self.simd_into(rhs.simd), rhs)
4579 }
4580}
4581impl<S: Simd> core::ops::BitOr for i8x64<S> {
4582 type Output = Self;
4583 #[inline(always)]
4584 fn bitor(self, rhs: Self) -> Self::Output {
4585 self.simd.or_i8x64(self, rhs)
4586 }
4587}
4588impl<S: Simd> core::ops::BitOrAssign for i8x64<S> {
4589 #[inline(always)]
4590 fn bitor_assign(&mut self, rhs: Self) {
4591 *self = self.simd.or_i8x64(*self, rhs);
4592 }
4593}
4594impl<S: Simd> core::ops::BitOr<i8> for i8x64<S> {
4595 type Output = Self;
4596 #[inline(always)]
4597 fn bitor(self, rhs: i8) -> Self::Output {
4598 self.simd.or_i8x64(self, rhs.simd_into(self.simd))
4599 }
4600}
4601impl<S: Simd> core::ops::BitOrAssign<i8> for i8x64<S> {
4602 #[inline(always)]
4603 fn bitor_assign(&mut self, rhs: i8) {
4604 *self = self.simd.or_i8x64(*self, rhs.simd_into(self.simd));
4605 }
4606}
4607impl<S: Simd> core::ops::BitOr<i8x64<S>> for i8 {
4608 type Output = i8x64<S>;
4609 #[inline(always)]
4610 fn bitor(self, rhs: i8x64<S>) -> Self::Output {
4611 rhs.simd.or_i8x64(self.simd_into(rhs.simd), rhs)
4612 }
4613}
4614impl<S: Simd> core::ops::BitXor for i8x64<S> {
4615 type Output = Self;
4616 #[inline(always)]
4617 fn bitxor(self, rhs: Self) -> Self::Output {
4618 self.simd.xor_i8x64(self, rhs)
4619 }
4620}
4621impl<S: Simd> core::ops::BitXorAssign for i8x64<S> {
4622 #[inline(always)]
4623 fn bitxor_assign(&mut self, rhs: Self) {
4624 *self = self.simd.xor_i8x64(*self, rhs);
4625 }
4626}
4627impl<S: Simd> core::ops::BitXor<i8> for i8x64<S> {
4628 type Output = Self;
4629 #[inline(always)]
4630 fn bitxor(self, rhs: i8) -> Self::Output {
4631 self.simd.xor_i8x64(self, rhs.simd_into(self.simd))
4632 }
4633}
4634impl<S: Simd> core::ops::BitXorAssign<i8> for i8x64<S> {
4635 #[inline(always)]
4636 fn bitxor_assign(&mut self, rhs: i8) {
4637 *self = self.simd.xor_i8x64(*self, rhs.simd_into(self.simd));
4638 }
4639}
4640impl<S: Simd> core::ops::BitXor<i8x64<S>> for i8 {
4641 type Output = i8x64<S>;
4642 #[inline(always)]
4643 fn bitxor(self, rhs: i8x64<S>) -> Self::Output {
4644 rhs.simd.xor_i8x64(self.simd_into(rhs.simd), rhs)
4645 }
4646}
4647impl<S: Simd> core::ops::Shl<u32> for i8x64<S> {
4648 type Output = Self;
4649 #[inline(always)]
4650 fn shl(self, rhs: u32) -> Self::Output {
4651 self.simd.shl_i8x64(self, rhs)
4652 }
4653}
4654impl<S: Simd> core::ops::Shr<u32> for i8x64<S> {
4655 type Output = Self;
4656 #[inline(always)]
4657 fn shr(self, rhs: u32) -> Self::Output {
4658 self.simd.shr_i8x64(self, rhs)
4659 }
4660}
4661impl<S: Simd> core::ops::ShlAssign<u32> for i8x64<S> {
4662 #[inline(always)]
4663 fn shl_assign(&mut self, rhs: u32) {
4664 *self = self.simd.shl_i8x64(*self, rhs);
4665 }
4666}
4667impl<S: Simd> core::ops::ShrAssign<u32> for i8x64<S> {
4668 #[inline(always)]
4669 fn shr_assign(&mut self, rhs: u32) {
4670 *self = self.simd.shr_i8x64(*self, rhs);
4671 }
4672}
4673impl<S: Simd> core::ops::Shr for i8x64<S> {
4674 type Output = Self;
4675 #[inline(always)]
4676 fn shr(self, rhs: Self) -> Self::Output {
4677 self.simd.shrv_i8x64(self, rhs)
4678 }
4679}
4680impl<S: Simd> core::ops::ShrAssign for i8x64<S> {
4681 #[inline(always)]
4682 fn shr_assign(&mut self, rhs: Self) {
4683 *self = self.simd.shrv_i8x64(*self, rhs);
4684 }
4685}
4686impl<S: Simd> core::ops::Add for u8x64<S> {
4687 type Output = Self;
4688 #[inline(always)]
4689 fn add(self, rhs: Self) -> Self::Output {
4690 self.simd.add_u8x64(self, rhs)
4691 }
4692}
4693impl<S: Simd> core::ops::AddAssign for u8x64<S> {
4694 #[inline(always)]
4695 fn add_assign(&mut self, rhs: Self) {
4696 *self = self.simd.add_u8x64(*self, rhs);
4697 }
4698}
4699impl<S: Simd> core::ops::Add<u8> for u8x64<S> {
4700 type Output = Self;
4701 #[inline(always)]
4702 fn add(self, rhs: u8) -> Self::Output {
4703 self.simd.add_u8x64(self, rhs.simd_into(self.simd))
4704 }
4705}
4706impl<S: Simd> core::ops::AddAssign<u8> for u8x64<S> {
4707 #[inline(always)]
4708 fn add_assign(&mut self, rhs: u8) {
4709 *self = self.simd.add_u8x64(*self, rhs.simd_into(self.simd));
4710 }
4711}
4712impl<S: Simd> core::ops::Add<u8x64<S>> for u8 {
4713 type Output = u8x64<S>;
4714 #[inline(always)]
4715 fn add(self, rhs: u8x64<S>) -> Self::Output {
4716 rhs.simd.add_u8x64(self.simd_into(rhs.simd), rhs)
4717 }
4718}
4719impl<S: Simd> core::ops::Sub for u8x64<S> {
4720 type Output = Self;
4721 #[inline(always)]
4722 fn sub(self, rhs: Self) -> Self::Output {
4723 self.simd.sub_u8x64(self, rhs)
4724 }
4725}
4726impl<S: Simd> core::ops::SubAssign for u8x64<S> {
4727 #[inline(always)]
4728 fn sub_assign(&mut self, rhs: Self) {
4729 *self = self.simd.sub_u8x64(*self, rhs);
4730 }
4731}
4732impl<S: Simd> core::ops::Sub<u8> for u8x64<S> {
4733 type Output = Self;
4734 #[inline(always)]
4735 fn sub(self, rhs: u8) -> Self::Output {
4736 self.simd.sub_u8x64(self, rhs.simd_into(self.simd))
4737 }
4738}
4739impl<S: Simd> core::ops::SubAssign<u8> for u8x64<S> {
4740 #[inline(always)]
4741 fn sub_assign(&mut self, rhs: u8) {
4742 *self = self.simd.sub_u8x64(*self, rhs.simd_into(self.simd));
4743 }
4744}
4745impl<S: Simd> core::ops::Sub<u8x64<S>> for u8 {
4746 type Output = u8x64<S>;
4747 #[inline(always)]
4748 fn sub(self, rhs: u8x64<S>) -> Self::Output {
4749 rhs.simd.sub_u8x64(self.simd_into(rhs.simd), rhs)
4750 }
4751}
4752impl<S: Simd> core::ops::Mul for u8x64<S> {
4753 type Output = Self;
4754 #[inline(always)]
4755 fn mul(self, rhs: Self) -> Self::Output {
4756 self.simd.mul_u8x64(self, rhs)
4757 }
4758}
4759impl<S: Simd> core::ops::MulAssign for u8x64<S> {
4760 #[inline(always)]
4761 fn mul_assign(&mut self, rhs: Self) {
4762 *self = self.simd.mul_u8x64(*self, rhs);
4763 }
4764}
4765impl<S: Simd> core::ops::Mul<u8> for u8x64<S> {
4766 type Output = Self;
4767 #[inline(always)]
4768 fn mul(self, rhs: u8) -> Self::Output {
4769 self.simd.mul_u8x64(self, rhs.simd_into(self.simd))
4770 }
4771}
4772impl<S: Simd> core::ops::MulAssign<u8> for u8x64<S> {
4773 #[inline(always)]
4774 fn mul_assign(&mut self, rhs: u8) {
4775 *self = self.simd.mul_u8x64(*self, rhs.simd_into(self.simd));
4776 }
4777}
4778impl<S: Simd> core::ops::Mul<u8x64<S>> for u8 {
4779 type Output = u8x64<S>;
4780 #[inline(always)]
4781 fn mul(self, rhs: u8x64<S>) -> Self::Output {
4782 rhs.simd.mul_u8x64(self.simd_into(rhs.simd), rhs)
4783 }
4784}
4785impl<S: Simd> core::ops::BitAnd for u8x64<S> {
4786 type Output = Self;
4787 #[inline(always)]
4788 fn bitand(self, rhs: Self) -> Self::Output {
4789 self.simd.and_u8x64(self, rhs)
4790 }
4791}
4792impl<S: Simd> core::ops::BitAndAssign for u8x64<S> {
4793 #[inline(always)]
4794 fn bitand_assign(&mut self, rhs: Self) {
4795 *self = self.simd.and_u8x64(*self, rhs);
4796 }
4797}
4798impl<S: Simd> core::ops::BitAnd<u8> for u8x64<S> {
4799 type Output = Self;
4800 #[inline(always)]
4801 fn bitand(self, rhs: u8) -> Self::Output {
4802 self.simd.and_u8x64(self, rhs.simd_into(self.simd))
4803 }
4804}
4805impl<S: Simd> core::ops::BitAndAssign<u8> for u8x64<S> {
4806 #[inline(always)]
4807 fn bitand_assign(&mut self, rhs: u8) {
4808 *self = self.simd.and_u8x64(*self, rhs.simd_into(self.simd));
4809 }
4810}
4811impl<S: Simd> core::ops::BitAnd<u8x64<S>> for u8 {
4812 type Output = u8x64<S>;
4813 #[inline(always)]
4814 fn bitand(self, rhs: u8x64<S>) -> Self::Output {
4815 rhs.simd.and_u8x64(self.simd_into(rhs.simd), rhs)
4816 }
4817}
4818impl<S: Simd> core::ops::BitOr for u8x64<S> {
4819 type Output = Self;
4820 #[inline(always)]
4821 fn bitor(self, rhs: Self) -> Self::Output {
4822 self.simd.or_u8x64(self, rhs)
4823 }
4824}
4825impl<S: Simd> core::ops::BitOrAssign for u8x64<S> {
4826 #[inline(always)]
4827 fn bitor_assign(&mut self, rhs: Self) {
4828 *self = self.simd.or_u8x64(*self, rhs);
4829 }
4830}
4831impl<S: Simd> core::ops::BitOr<u8> for u8x64<S> {
4832 type Output = Self;
4833 #[inline(always)]
4834 fn bitor(self, rhs: u8) -> Self::Output {
4835 self.simd.or_u8x64(self, rhs.simd_into(self.simd))
4836 }
4837}
4838impl<S: Simd> core::ops::BitOrAssign<u8> for u8x64<S> {
4839 #[inline(always)]
4840 fn bitor_assign(&mut self, rhs: u8) {
4841 *self = self.simd.or_u8x64(*self, rhs.simd_into(self.simd));
4842 }
4843}
4844impl<S: Simd> core::ops::BitOr<u8x64<S>> for u8 {
4845 type Output = u8x64<S>;
4846 #[inline(always)]
4847 fn bitor(self, rhs: u8x64<S>) -> Self::Output {
4848 rhs.simd.or_u8x64(self.simd_into(rhs.simd), rhs)
4849 }
4850}
4851impl<S: Simd> core::ops::BitXor for u8x64<S> {
4852 type Output = Self;
4853 #[inline(always)]
4854 fn bitxor(self, rhs: Self) -> Self::Output {
4855 self.simd.xor_u8x64(self, rhs)
4856 }
4857}
4858impl<S: Simd> core::ops::BitXorAssign for u8x64<S> {
4859 #[inline(always)]
4860 fn bitxor_assign(&mut self, rhs: Self) {
4861 *self = self.simd.xor_u8x64(*self, rhs);
4862 }
4863}
4864impl<S: Simd> core::ops::BitXor<u8> for u8x64<S> {
4865 type Output = Self;
4866 #[inline(always)]
4867 fn bitxor(self, rhs: u8) -> Self::Output {
4868 self.simd.xor_u8x64(self, rhs.simd_into(self.simd))
4869 }
4870}
4871impl<S: Simd> core::ops::BitXorAssign<u8> for u8x64<S> {
4872 #[inline(always)]
4873 fn bitxor_assign(&mut self, rhs: u8) {
4874 *self = self.simd.xor_u8x64(*self, rhs.simd_into(self.simd));
4875 }
4876}
4877impl<S: Simd> core::ops::BitXor<u8x64<S>> for u8 {
4878 type Output = u8x64<S>;
4879 #[inline(always)]
4880 fn bitxor(self, rhs: u8x64<S>) -> Self::Output {
4881 rhs.simd.xor_u8x64(self.simd_into(rhs.simd), rhs)
4882 }
4883}
4884impl<S: Simd> core::ops::Shl<u32> for u8x64<S> {
4885 type Output = Self;
4886 #[inline(always)]
4887 fn shl(self, rhs: u32) -> Self::Output {
4888 self.simd.shl_u8x64(self, rhs)
4889 }
4890}
4891impl<S: Simd> core::ops::Shr<u32> for u8x64<S> {
4892 type Output = Self;
4893 #[inline(always)]
4894 fn shr(self, rhs: u32) -> Self::Output {
4895 self.simd.shr_u8x64(self, rhs)
4896 }
4897}
4898impl<S: Simd> core::ops::ShlAssign<u32> for u8x64<S> {
4899 #[inline(always)]
4900 fn shl_assign(&mut self, rhs: u32) {
4901 *self = self.simd.shl_u8x64(*self, rhs);
4902 }
4903}
4904impl<S: Simd> core::ops::ShrAssign<u32> for u8x64<S> {
4905 #[inline(always)]
4906 fn shr_assign(&mut self, rhs: u32) {
4907 *self = self.simd.shr_u8x64(*self, rhs);
4908 }
4909}
4910impl<S: Simd> core::ops::Shr for u8x64<S> {
4911 type Output = Self;
4912 #[inline(always)]
4913 fn shr(self, rhs: Self) -> Self::Output {
4914 self.simd.shrv_u8x64(self, rhs)
4915 }
4916}
4917impl<S: Simd> core::ops::ShrAssign for u8x64<S> {
4918 #[inline(always)]
4919 fn shr_assign(&mut self, rhs: Self) {
4920 *self = self.simd.shrv_u8x64(*self, rhs);
4921 }
4922}
4923impl<S: Simd> core::ops::BitAnd for mask8x64<S> {
4924 type Output = Self;
4925 #[inline(always)]
4926 fn bitand(self, rhs: Self) -> Self::Output {
4927 self.simd.and_mask8x64(self, rhs)
4928 }
4929}
4930impl<S: Simd> core::ops::BitAndAssign for mask8x64<S> {
4931 #[inline(always)]
4932 fn bitand_assign(&mut self, rhs: Self) {
4933 *self = self.simd.and_mask8x64(*self, rhs);
4934 }
4935}
4936impl<S: Simd> core::ops::BitAnd<i8> for mask8x64<S> {
4937 type Output = Self;
4938 #[inline(always)]
4939 fn bitand(self, rhs: i8) -> Self::Output {
4940 self.simd.and_mask8x64(self, rhs.simd_into(self.simd))
4941 }
4942}
4943impl<S: Simd> core::ops::BitAndAssign<i8> for mask8x64<S> {
4944 #[inline(always)]
4945 fn bitand_assign(&mut self, rhs: i8) {
4946 *self = self.simd.and_mask8x64(*self, rhs.simd_into(self.simd));
4947 }
4948}
4949impl<S: Simd> core::ops::BitAnd<mask8x64<S>> for i8 {
4950 type Output = mask8x64<S>;
4951 #[inline(always)]
4952 fn bitand(self, rhs: mask8x64<S>) -> Self::Output {
4953 rhs.simd.and_mask8x64(self.simd_into(rhs.simd), rhs)
4954 }
4955}
4956impl<S: Simd> core::ops::BitOr for mask8x64<S> {
4957 type Output = Self;
4958 #[inline(always)]
4959 fn bitor(self, rhs: Self) -> Self::Output {
4960 self.simd.or_mask8x64(self, rhs)
4961 }
4962}
4963impl<S: Simd> core::ops::BitOrAssign for mask8x64<S> {
4964 #[inline(always)]
4965 fn bitor_assign(&mut self, rhs: Self) {
4966 *self = self.simd.or_mask8x64(*self, rhs);
4967 }
4968}
4969impl<S: Simd> core::ops::BitOr<i8> for mask8x64<S> {
4970 type Output = Self;
4971 #[inline(always)]
4972 fn bitor(self, rhs: i8) -> Self::Output {
4973 self.simd.or_mask8x64(self, rhs.simd_into(self.simd))
4974 }
4975}
4976impl<S: Simd> core::ops::BitOrAssign<i8> for mask8x64<S> {
4977 #[inline(always)]
4978 fn bitor_assign(&mut self, rhs: i8) {
4979 *self = self.simd.or_mask8x64(*self, rhs.simd_into(self.simd));
4980 }
4981}
4982impl<S: Simd> core::ops::BitOr<mask8x64<S>> for i8 {
4983 type Output = mask8x64<S>;
4984 #[inline(always)]
4985 fn bitor(self, rhs: mask8x64<S>) -> Self::Output {
4986 rhs.simd.or_mask8x64(self.simd_into(rhs.simd), rhs)
4987 }
4988}
4989impl<S: Simd> core::ops::BitXor for mask8x64<S> {
4990 type Output = Self;
4991 #[inline(always)]
4992 fn bitxor(self, rhs: Self) -> Self::Output {
4993 self.simd.xor_mask8x64(self, rhs)
4994 }
4995}
4996impl<S: Simd> core::ops::BitXorAssign for mask8x64<S> {
4997 #[inline(always)]
4998 fn bitxor_assign(&mut self, rhs: Self) {
4999 *self = self.simd.xor_mask8x64(*self, rhs);
5000 }
5001}
5002impl<S: Simd> core::ops::BitXor<i8> for mask8x64<S> {
5003 type Output = Self;
5004 #[inline(always)]
5005 fn bitxor(self, rhs: i8) -> Self::Output {
5006 self.simd.xor_mask8x64(self, rhs.simd_into(self.simd))
5007 }
5008}
5009impl<S: Simd> core::ops::BitXorAssign<i8> for mask8x64<S> {
5010 #[inline(always)]
5011 fn bitxor_assign(&mut self, rhs: i8) {
5012 *self = self.simd.xor_mask8x64(*self, rhs.simd_into(self.simd));
5013 }
5014}
5015impl<S: Simd> core::ops::BitXor<mask8x64<S>> for i8 {
5016 type Output = mask8x64<S>;
5017 #[inline(always)]
5018 fn bitxor(self, rhs: mask8x64<S>) -> Self::Output {
5019 rhs.simd.xor_mask8x64(self.simd_into(rhs.simd), rhs)
5020 }
5021}
5022impl<S: Simd> core::ops::Not for mask8x64<S> {
5023 type Output = Self;
5024 #[inline(always)]
5025 fn not(self) -> Self::Output {
5026 self.simd.not_mask8x64(self)
5027 }
5028}
5029impl<S: Simd> core::ops::Neg for i16x32<S> {
5030 type Output = Self;
5031 #[inline(always)]
5032 fn neg(self) -> Self::Output {
5033 self.simd.neg_i16x32(self)
5034 }
5035}
5036impl<S: Simd> core::ops::Add for i16x32<S> {
5037 type Output = Self;
5038 #[inline(always)]
5039 fn add(self, rhs: Self) -> Self::Output {
5040 self.simd.add_i16x32(self, rhs)
5041 }
5042}
5043impl<S: Simd> core::ops::AddAssign for i16x32<S> {
5044 #[inline(always)]
5045 fn add_assign(&mut self, rhs: Self) {
5046 *self = self.simd.add_i16x32(*self, rhs);
5047 }
5048}
5049impl<S: Simd> core::ops::Add<i16> for i16x32<S> {
5050 type Output = Self;
5051 #[inline(always)]
5052 fn add(self, rhs: i16) -> Self::Output {
5053 self.simd.add_i16x32(self, rhs.simd_into(self.simd))
5054 }
5055}
5056impl<S: Simd> core::ops::AddAssign<i16> for i16x32<S> {
5057 #[inline(always)]
5058 fn add_assign(&mut self, rhs: i16) {
5059 *self = self.simd.add_i16x32(*self, rhs.simd_into(self.simd));
5060 }
5061}
5062impl<S: Simd> core::ops::Add<i16x32<S>> for i16 {
5063 type Output = i16x32<S>;
5064 #[inline(always)]
5065 fn add(self, rhs: i16x32<S>) -> Self::Output {
5066 rhs.simd.add_i16x32(self.simd_into(rhs.simd), rhs)
5067 }
5068}
5069impl<S: Simd> core::ops::Sub for i16x32<S> {
5070 type Output = Self;
5071 #[inline(always)]
5072 fn sub(self, rhs: Self) -> Self::Output {
5073 self.simd.sub_i16x32(self, rhs)
5074 }
5075}
5076impl<S: Simd> core::ops::SubAssign for i16x32<S> {
5077 #[inline(always)]
5078 fn sub_assign(&mut self, rhs: Self) {
5079 *self = self.simd.sub_i16x32(*self, rhs);
5080 }
5081}
5082impl<S: Simd> core::ops::Sub<i16> for i16x32<S> {
5083 type Output = Self;
5084 #[inline(always)]
5085 fn sub(self, rhs: i16) -> Self::Output {
5086 self.simd.sub_i16x32(self, rhs.simd_into(self.simd))
5087 }
5088}
5089impl<S: Simd> core::ops::SubAssign<i16> for i16x32<S> {
5090 #[inline(always)]
5091 fn sub_assign(&mut self, rhs: i16) {
5092 *self = self.simd.sub_i16x32(*self, rhs.simd_into(self.simd));
5093 }
5094}
5095impl<S: Simd> core::ops::Sub<i16x32<S>> for i16 {
5096 type Output = i16x32<S>;
5097 #[inline(always)]
5098 fn sub(self, rhs: i16x32<S>) -> Self::Output {
5099 rhs.simd.sub_i16x32(self.simd_into(rhs.simd), rhs)
5100 }
5101}
5102impl<S: Simd> core::ops::Mul for i16x32<S> {
5103 type Output = Self;
5104 #[inline(always)]
5105 fn mul(self, rhs: Self) -> Self::Output {
5106 self.simd.mul_i16x32(self, rhs)
5107 }
5108}
5109impl<S: Simd> core::ops::MulAssign for i16x32<S> {
5110 #[inline(always)]
5111 fn mul_assign(&mut self, rhs: Self) {
5112 *self = self.simd.mul_i16x32(*self, rhs);
5113 }
5114}
5115impl<S: Simd> core::ops::Mul<i16> for i16x32<S> {
5116 type Output = Self;
5117 #[inline(always)]
5118 fn mul(self, rhs: i16) -> Self::Output {
5119 self.simd.mul_i16x32(self, rhs.simd_into(self.simd))
5120 }
5121}
5122impl<S: Simd> core::ops::MulAssign<i16> for i16x32<S> {
5123 #[inline(always)]
5124 fn mul_assign(&mut self, rhs: i16) {
5125 *self = self.simd.mul_i16x32(*self, rhs.simd_into(self.simd));
5126 }
5127}
5128impl<S: Simd> core::ops::Mul<i16x32<S>> for i16 {
5129 type Output = i16x32<S>;
5130 #[inline(always)]
5131 fn mul(self, rhs: i16x32<S>) -> Self::Output {
5132 rhs.simd.mul_i16x32(self.simd_into(rhs.simd), rhs)
5133 }
5134}
5135impl<S: Simd> core::ops::BitAnd for i16x32<S> {
5136 type Output = Self;
5137 #[inline(always)]
5138 fn bitand(self, rhs: Self) -> Self::Output {
5139 self.simd.and_i16x32(self, rhs)
5140 }
5141}
5142impl<S: Simd> core::ops::BitAndAssign for i16x32<S> {
5143 #[inline(always)]
5144 fn bitand_assign(&mut self, rhs: Self) {
5145 *self = self.simd.and_i16x32(*self, rhs);
5146 }
5147}
5148impl<S: Simd> core::ops::BitAnd<i16> for i16x32<S> {
5149 type Output = Self;
5150 #[inline(always)]
5151 fn bitand(self, rhs: i16) -> Self::Output {
5152 self.simd.and_i16x32(self, rhs.simd_into(self.simd))
5153 }
5154}
5155impl<S: Simd> core::ops::BitAndAssign<i16> for i16x32<S> {
5156 #[inline(always)]
5157 fn bitand_assign(&mut self, rhs: i16) {
5158 *self = self.simd.and_i16x32(*self, rhs.simd_into(self.simd));
5159 }
5160}
5161impl<S: Simd> core::ops::BitAnd<i16x32<S>> for i16 {
5162 type Output = i16x32<S>;
5163 #[inline(always)]
5164 fn bitand(self, rhs: i16x32<S>) -> Self::Output {
5165 rhs.simd.and_i16x32(self.simd_into(rhs.simd), rhs)
5166 }
5167}
5168impl<S: Simd> core::ops::BitOr for i16x32<S> {
5169 type Output = Self;
5170 #[inline(always)]
5171 fn bitor(self, rhs: Self) -> Self::Output {
5172 self.simd.or_i16x32(self, rhs)
5173 }
5174}
5175impl<S: Simd> core::ops::BitOrAssign for i16x32<S> {
5176 #[inline(always)]
5177 fn bitor_assign(&mut self, rhs: Self) {
5178 *self = self.simd.or_i16x32(*self, rhs);
5179 }
5180}
5181impl<S: Simd> core::ops::BitOr<i16> for i16x32<S> {
5182 type Output = Self;
5183 #[inline(always)]
5184 fn bitor(self, rhs: i16) -> Self::Output {
5185 self.simd.or_i16x32(self, rhs.simd_into(self.simd))
5186 }
5187}
5188impl<S: Simd> core::ops::BitOrAssign<i16> for i16x32<S> {
5189 #[inline(always)]
5190 fn bitor_assign(&mut self, rhs: i16) {
5191 *self = self.simd.or_i16x32(*self, rhs.simd_into(self.simd));
5192 }
5193}
5194impl<S: Simd> core::ops::BitOr<i16x32<S>> for i16 {
5195 type Output = i16x32<S>;
5196 #[inline(always)]
5197 fn bitor(self, rhs: i16x32<S>) -> Self::Output {
5198 rhs.simd.or_i16x32(self.simd_into(rhs.simd), rhs)
5199 }
5200}
5201impl<S: Simd> core::ops::BitXor for i16x32<S> {
5202 type Output = Self;
5203 #[inline(always)]
5204 fn bitxor(self, rhs: Self) -> Self::Output {
5205 self.simd.xor_i16x32(self, rhs)
5206 }
5207}
5208impl<S: Simd> core::ops::BitXorAssign for i16x32<S> {
5209 #[inline(always)]
5210 fn bitxor_assign(&mut self, rhs: Self) {
5211 *self = self.simd.xor_i16x32(*self, rhs);
5212 }
5213}
5214impl<S: Simd> core::ops::BitXor<i16> for i16x32<S> {
5215 type Output = Self;
5216 #[inline(always)]
5217 fn bitxor(self, rhs: i16) -> Self::Output {
5218 self.simd.xor_i16x32(self, rhs.simd_into(self.simd))
5219 }
5220}
5221impl<S: Simd> core::ops::BitXorAssign<i16> for i16x32<S> {
5222 #[inline(always)]
5223 fn bitxor_assign(&mut self, rhs: i16) {
5224 *self = self.simd.xor_i16x32(*self, rhs.simd_into(self.simd));
5225 }
5226}
5227impl<S: Simd> core::ops::BitXor<i16x32<S>> for i16 {
5228 type Output = i16x32<S>;
5229 #[inline(always)]
5230 fn bitxor(self, rhs: i16x32<S>) -> Self::Output {
5231 rhs.simd.xor_i16x32(self.simd_into(rhs.simd), rhs)
5232 }
5233}
5234impl<S: Simd> core::ops::Shl<u32> for i16x32<S> {
5235 type Output = Self;
5236 #[inline(always)]
5237 fn shl(self, rhs: u32) -> Self::Output {
5238 self.simd.shl_i16x32(self, rhs)
5239 }
5240}
5241impl<S: Simd> core::ops::Shr<u32> for i16x32<S> {
5242 type Output = Self;
5243 #[inline(always)]
5244 fn shr(self, rhs: u32) -> Self::Output {
5245 self.simd.shr_i16x32(self, rhs)
5246 }
5247}
5248impl<S: Simd> core::ops::ShlAssign<u32> for i16x32<S> {
5249 #[inline(always)]
5250 fn shl_assign(&mut self, rhs: u32) {
5251 *self = self.simd.shl_i16x32(*self, rhs);
5252 }
5253}
5254impl<S: Simd> core::ops::ShrAssign<u32> for i16x32<S> {
5255 #[inline(always)]
5256 fn shr_assign(&mut self, rhs: u32) {
5257 *self = self.simd.shr_i16x32(*self, rhs);
5258 }
5259}
5260impl<S: Simd> core::ops::Shr for i16x32<S> {
5261 type Output = Self;
5262 #[inline(always)]
5263 fn shr(self, rhs: Self) -> Self::Output {
5264 self.simd.shrv_i16x32(self, rhs)
5265 }
5266}
5267impl<S: Simd> core::ops::ShrAssign for i16x32<S> {
5268 #[inline(always)]
5269 fn shr_assign(&mut self, rhs: Self) {
5270 *self = self.simd.shrv_i16x32(*self, rhs);
5271 }
5272}
5273impl<S: Simd> core::ops::Add for u16x32<S> {
5274 type Output = Self;
5275 #[inline(always)]
5276 fn add(self, rhs: Self) -> Self::Output {
5277 self.simd.add_u16x32(self, rhs)
5278 }
5279}
5280impl<S: Simd> core::ops::AddAssign for u16x32<S> {
5281 #[inline(always)]
5282 fn add_assign(&mut self, rhs: Self) {
5283 *self = self.simd.add_u16x32(*self, rhs);
5284 }
5285}
5286impl<S: Simd> core::ops::Add<u16> for u16x32<S> {
5287 type Output = Self;
5288 #[inline(always)]
5289 fn add(self, rhs: u16) -> Self::Output {
5290 self.simd.add_u16x32(self, rhs.simd_into(self.simd))
5291 }
5292}
5293impl<S: Simd> core::ops::AddAssign<u16> for u16x32<S> {
5294 #[inline(always)]
5295 fn add_assign(&mut self, rhs: u16) {
5296 *self = self.simd.add_u16x32(*self, rhs.simd_into(self.simd));
5297 }
5298}
5299impl<S: Simd> core::ops::Add<u16x32<S>> for u16 {
5300 type Output = u16x32<S>;
5301 #[inline(always)]
5302 fn add(self, rhs: u16x32<S>) -> Self::Output {
5303 rhs.simd.add_u16x32(self.simd_into(rhs.simd), rhs)
5304 }
5305}
5306impl<S: Simd> core::ops::Sub for u16x32<S> {
5307 type Output = Self;
5308 #[inline(always)]
5309 fn sub(self, rhs: Self) -> Self::Output {
5310 self.simd.sub_u16x32(self, rhs)
5311 }
5312}
5313impl<S: Simd> core::ops::SubAssign for u16x32<S> {
5314 #[inline(always)]
5315 fn sub_assign(&mut self, rhs: Self) {
5316 *self = self.simd.sub_u16x32(*self, rhs);
5317 }
5318}
5319impl<S: Simd> core::ops::Sub<u16> for u16x32<S> {
5320 type Output = Self;
5321 #[inline(always)]
5322 fn sub(self, rhs: u16) -> Self::Output {
5323 self.simd.sub_u16x32(self, rhs.simd_into(self.simd))
5324 }
5325}
5326impl<S: Simd> core::ops::SubAssign<u16> for u16x32<S> {
5327 #[inline(always)]
5328 fn sub_assign(&mut self, rhs: u16) {
5329 *self = self.simd.sub_u16x32(*self, rhs.simd_into(self.simd));
5330 }
5331}
5332impl<S: Simd> core::ops::Sub<u16x32<S>> for u16 {
5333 type Output = u16x32<S>;
5334 #[inline(always)]
5335 fn sub(self, rhs: u16x32<S>) -> Self::Output {
5336 rhs.simd.sub_u16x32(self.simd_into(rhs.simd), rhs)
5337 }
5338}
5339impl<S: Simd> core::ops::Mul for u16x32<S> {
5340 type Output = Self;
5341 #[inline(always)]
5342 fn mul(self, rhs: Self) -> Self::Output {
5343 self.simd.mul_u16x32(self, rhs)
5344 }
5345}
5346impl<S: Simd> core::ops::MulAssign for u16x32<S> {
5347 #[inline(always)]
5348 fn mul_assign(&mut self, rhs: Self) {
5349 *self = self.simd.mul_u16x32(*self, rhs);
5350 }
5351}
5352impl<S: Simd> core::ops::Mul<u16> for u16x32<S> {
5353 type Output = Self;
5354 #[inline(always)]
5355 fn mul(self, rhs: u16) -> Self::Output {
5356 self.simd.mul_u16x32(self, rhs.simd_into(self.simd))
5357 }
5358}
5359impl<S: Simd> core::ops::MulAssign<u16> for u16x32<S> {
5360 #[inline(always)]
5361 fn mul_assign(&mut self, rhs: u16) {
5362 *self = self.simd.mul_u16x32(*self, rhs.simd_into(self.simd));
5363 }
5364}
5365impl<S: Simd> core::ops::Mul<u16x32<S>> for u16 {
5366 type Output = u16x32<S>;
5367 #[inline(always)]
5368 fn mul(self, rhs: u16x32<S>) -> Self::Output {
5369 rhs.simd.mul_u16x32(self.simd_into(rhs.simd), rhs)
5370 }
5371}
5372impl<S: Simd> core::ops::BitAnd for u16x32<S> {
5373 type Output = Self;
5374 #[inline(always)]
5375 fn bitand(self, rhs: Self) -> Self::Output {
5376 self.simd.and_u16x32(self, rhs)
5377 }
5378}
5379impl<S: Simd> core::ops::BitAndAssign for u16x32<S> {
5380 #[inline(always)]
5381 fn bitand_assign(&mut self, rhs: Self) {
5382 *self = self.simd.and_u16x32(*self, rhs);
5383 }
5384}
5385impl<S: Simd> core::ops::BitAnd<u16> for u16x32<S> {
5386 type Output = Self;
5387 #[inline(always)]
5388 fn bitand(self, rhs: u16) -> Self::Output {
5389 self.simd.and_u16x32(self, rhs.simd_into(self.simd))
5390 }
5391}
5392impl<S: Simd> core::ops::BitAndAssign<u16> for u16x32<S> {
5393 #[inline(always)]
5394 fn bitand_assign(&mut self, rhs: u16) {
5395 *self = self.simd.and_u16x32(*self, rhs.simd_into(self.simd));
5396 }
5397}
5398impl<S: Simd> core::ops::BitAnd<u16x32<S>> for u16 {
5399 type Output = u16x32<S>;
5400 #[inline(always)]
5401 fn bitand(self, rhs: u16x32<S>) -> Self::Output {
5402 rhs.simd.and_u16x32(self.simd_into(rhs.simd), rhs)
5403 }
5404}
5405impl<S: Simd> core::ops::BitOr for u16x32<S> {
5406 type Output = Self;
5407 #[inline(always)]
5408 fn bitor(self, rhs: Self) -> Self::Output {
5409 self.simd.or_u16x32(self, rhs)
5410 }
5411}
5412impl<S: Simd> core::ops::BitOrAssign for u16x32<S> {
5413 #[inline(always)]
5414 fn bitor_assign(&mut self, rhs: Self) {
5415 *self = self.simd.or_u16x32(*self, rhs);
5416 }
5417}
5418impl<S: Simd> core::ops::BitOr<u16> for u16x32<S> {
5419 type Output = Self;
5420 #[inline(always)]
5421 fn bitor(self, rhs: u16) -> Self::Output {
5422 self.simd.or_u16x32(self, rhs.simd_into(self.simd))
5423 }
5424}
5425impl<S: Simd> core::ops::BitOrAssign<u16> for u16x32<S> {
5426 #[inline(always)]
5427 fn bitor_assign(&mut self, rhs: u16) {
5428 *self = self.simd.or_u16x32(*self, rhs.simd_into(self.simd));
5429 }
5430}
5431impl<S: Simd> core::ops::BitOr<u16x32<S>> for u16 {
5432 type Output = u16x32<S>;
5433 #[inline(always)]
5434 fn bitor(self, rhs: u16x32<S>) -> Self::Output {
5435 rhs.simd.or_u16x32(self.simd_into(rhs.simd), rhs)
5436 }
5437}
5438impl<S: Simd> core::ops::BitXor for u16x32<S> {
5439 type Output = Self;
5440 #[inline(always)]
5441 fn bitxor(self, rhs: Self) -> Self::Output {
5442 self.simd.xor_u16x32(self, rhs)
5443 }
5444}
5445impl<S: Simd> core::ops::BitXorAssign for u16x32<S> {
5446 #[inline(always)]
5447 fn bitxor_assign(&mut self, rhs: Self) {
5448 *self = self.simd.xor_u16x32(*self, rhs);
5449 }
5450}
5451impl<S: Simd> core::ops::BitXor<u16> for u16x32<S> {
5452 type Output = Self;
5453 #[inline(always)]
5454 fn bitxor(self, rhs: u16) -> Self::Output {
5455 self.simd.xor_u16x32(self, rhs.simd_into(self.simd))
5456 }
5457}
5458impl<S: Simd> core::ops::BitXorAssign<u16> for u16x32<S> {
5459 #[inline(always)]
5460 fn bitxor_assign(&mut self, rhs: u16) {
5461 *self = self.simd.xor_u16x32(*self, rhs.simd_into(self.simd));
5462 }
5463}
5464impl<S: Simd> core::ops::BitXor<u16x32<S>> for u16 {
5465 type Output = u16x32<S>;
5466 #[inline(always)]
5467 fn bitxor(self, rhs: u16x32<S>) -> Self::Output {
5468 rhs.simd.xor_u16x32(self.simd_into(rhs.simd), rhs)
5469 }
5470}
5471impl<S: Simd> core::ops::Shl<u32> for u16x32<S> {
5472 type Output = Self;
5473 #[inline(always)]
5474 fn shl(self, rhs: u32) -> Self::Output {
5475 self.simd.shl_u16x32(self, rhs)
5476 }
5477}
5478impl<S: Simd> core::ops::Shr<u32> for u16x32<S> {
5479 type Output = Self;
5480 #[inline(always)]
5481 fn shr(self, rhs: u32) -> Self::Output {
5482 self.simd.shr_u16x32(self, rhs)
5483 }
5484}
5485impl<S: Simd> core::ops::ShlAssign<u32> for u16x32<S> {
5486 #[inline(always)]
5487 fn shl_assign(&mut self, rhs: u32) {
5488 *self = self.simd.shl_u16x32(*self, rhs);
5489 }
5490}
5491impl<S: Simd> core::ops::ShrAssign<u32> for u16x32<S> {
5492 #[inline(always)]
5493 fn shr_assign(&mut self, rhs: u32) {
5494 *self = self.simd.shr_u16x32(*self, rhs);
5495 }
5496}
5497impl<S: Simd> core::ops::Shr for u16x32<S> {
5498 type Output = Self;
5499 #[inline(always)]
5500 fn shr(self, rhs: Self) -> Self::Output {
5501 self.simd.shrv_u16x32(self, rhs)
5502 }
5503}
5504impl<S: Simd> core::ops::ShrAssign for u16x32<S> {
5505 #[inline(always)]
5506 fn shr_assign(&mut self, rhs: Self) {
5507 *self = self.simd.shrv_u16x32(*self, rhs);
5508 }
5509}
5510impl<S: Simd> core::ops::BitAnd for mask16x32<S> {
5511 type Output = Self;
5512 #[inline(always)]
5513 fn bitand(self, rhs: Self) -> Self::Output {
5514 self.simd.and_mask16x32(self, rhs)
5515 }
5516}
5517impl<S: Simd> core::ops::BitAndAssign for mask16x32<S> {
5518 #[inline(always)]
5519 fn bitand_assign(&mut self, rhs: Self) {
5520 *self = self.simd.and_mask16x32(*self, rhs);
5521 }
5522}
5523impl<S: Simd> core::ops::BitAnd<i16> for mask16x32<S> {
5524 type Output = Self;
5525 #[inline(always)]
5526 fn bitand(self, rhs: i16) -> Self::Output {
5527 self.simd.and_mask16x32(self, rhs.simd_into(self.simd))
5528 }
5529}
5530impl<S: Simd> core::ops::BitAndAssign<i16> for mask16x32<S> {
5531 #[inline(always)]
5532 fn bitand_assign(&mut self, rhs: i16) {
5533 *self = self.simd.and_mask16x32(*self, rhs.simd_into(self.simd));
5534 }
5535}
5536impl<S: Simd> core::ops::BitAnd<mask16x32<S>> for i16 {
5537 type Output = mask16x32<S>;
5538 #[inline(always)]
5539 fn bitand(self, rhs: mask16x32<S>) -> Self::Output {
5540 rhs.simd.and_mask16x32(self.simd_into(rhs.simd), rhs)
5541 }
5542}
5543impl<S: Simd> core::ops::BitOr for mask16x32<S> {
5544 type Output = Self;
5545 #[inline(always)]
5546 fn bitor(self, rhs: Self) -> Self::Output {
5547 self.simd.or_mask16x32(self, rhs)
5548 }
5549}
5550impl<S: Simd> core::ops::BitOrAssign for mask16x32<S> {
5551 #[inline(always)]
5552 fn bitor_assign(&mut self, rhs: Self) {
5553 *self = self.simd.or_mask16x32(*self, rhs);
5554 }
5555}
5556impl<S: Simd> core::ops::BitOr<i16> for mask16x32<S> {
5557 type Output = Self;
5558 #[inline(always)]
5559 fn bitor(self, rhs: i16) -> Self::Output {
5560 self.simd.or_mask16x32(self, rhs.simd_into(self.simd))
5561 }
5562}
5563impl<S: Simd> core::ops::BitOrAssign<i16> for mask16x32<S> {
5564 #[inline(always)]
5565 fn bitor_assign(&mut self, rhs: i16) {
5566 *self = self.simd.or_mask16x32(*self, rhs.simd_into(self.simd));
5567 }
5568}
5569impl<S: Simd> core::ops::BitOr<mask16x32<S>> for i16 {
5570 type Output = mask16x32<S>;
5571 #[inline(always)]
5572 fn bitor(self, rhs: mask16x32<S>) -> Self::Output {
5573 rhs.simd.or_mask16x32(self.simd_into(rhs.simd), rhs)
5574 }
5575}
5576impl<S: Simd> core::ops::BitXor for mask16x32<S> {
5577 type Output = Self;
5578 #[inline(always)]
5579 fn bitxor(self, rhs: Self) -> Self::Output {
5580 self.simd.xor_mask16x32(self, rhs)
5581 }
5582}
5583impl<S: Simd> core::ops::BitXorAssign for mask16x32<S> {
5584 #[inline(always)]
5585 fn bitxor_assign(&mut self, rhs: Self) {
5586 *self = self.simd.xor_mask16x32(*self, rhs);
5587 }
5588}
5589impl<S: Simd> core::ops::BitXor<i16> for mask16x32<S> {
5590 type Output = Self;
5591 #[inline(always)]
5592 fn bitxor(self, rhs: i16) -> Self::Output {
5593 self.simd.xor_mask16x32(self, rhs.simd_into(self.simd))
5594 }
5595}
5596impl<S: Simd> core::ops::BitXorAssign<i16> for mask16x32<S> {
5597 #[inline(always)]
5598 fn bitxor_assign(&mut self, rhs: i16) {
5599 *self = self.simd.xor_mask16x32(*self, rhs.simd_into(self.simd));
5600 }
5601}
5602impl<S: Simd> core::ops::BitXor<mask16x32<S>> for i16 {
5603 type Output = mask16x32<S>;
5604 #[inline(always)]
5605 fn bitxor(self, rhs: mask16x32<S>) -> Self::Output {
5606 rhs.simd.xor_mask16x32(self.simd_into(rhs.simd), rhs)
5607 }
5608}
5609impl<S: Simd> core::ops::Not for mask16x32<S> {
5610 type Output = Self;
5611 #[inline(always)]
5612 fn not(self) -> Self::Output {
5613 self.simd.not_mask16x32(self)
5614 }
5615}
5616impl<S: Simd> core::ops::Neg for i32x16<S> {
5617 type Output = Self;
5618 #[inline(always)]
5619 fn neg(self) -> Self::Output {
5620 self.simd.neg_i32x16(self)
5621 }
5622}
5623impl<S: Simd> core::ops::Add for i32x16<S> {
5624 type Output = Self;
5625 #[inline(always)]
5626 fn add(self, rhs: Self) -> Self::Output {
5627 self.simd.add_i32x16(self, rhs)
5628 }
5629}
5630impl<S: Simd> core::ops::AddAssign for i32x16<S> {
5631 #[inline(always)]
5632 fn add_assign(&mut self, rhs: Self) {
5633 *self = self.simd.add_i32x16(*self, rhs);
5634 }
5635}
5636impl<S: Simd> core::ops::Add<i32> for i32x16<S> {
5637 type Output = Self;
5638 #[inline(always)]
5639 fn add(self, rhs: i32) -> Self::Output {
5640 self.simd.add_i32x16(self, rhs.simd_into(self.simd))
5641 }
5642}
5643impl<S: Simd> core::ops::AddAssign<i32> for i32x16<S> {
5644 #[inline(always)]
5645 fn add_assign(&mut self, rhs: i32) {
5646 *self = self.simd.add_i32x16(*self, rhs.simd_into(self.simd));
5647 }
5648}
5649impl<S: Simd> core::ops::Add<i32x16<S>> for i32 {
5650 type Output = i32x16<S>;
5651 #[inline(always)]
5652 fn add(self, rhs: i32x16<S>) -> Self::Output {
5653 rhs.simd.add_i32x16(self.simd_into(rhs.simd), rhs)
5654 }
5655}
5656impl<S: Simd> core::ops::Sub for i32x16<S> {
5657 type Output = Self;
5658 #[inline(always)]
5659 fn sub(self, rhs: Self) -> Self::Output {
5660 self.simd.sub_i32x16(self, rhs)
5661 }
5662}
5663impl<S: Simd> core::ops::SubAssign for i32x16<S> {
5664 #[inline(always)]
5665 fn sub_assign(&mut self, rhs: Self) {
5666 *self = self.simd.sub_i32x16(*self, rhs);
5667 }
5668}
5669impl<S: Simd> core::ops::Sub<i32> for i32x16<S> {
5670 type Output = Self;
5671 #[inline(always)]
5672 fn sub(self, rhs: i32) -> Self::Output {
5673 self.simd.sub_i32x16(self, rhs.simd_into(self.simd))
5674 }
5675}
5676impl<S: Simd> core::ops::SubAssign<i32> for i32x16<S> {
5677 #[inline(always)]
5678 fn sub_assign(&mut self, rhs: i32) {
5679 *self = self.simd.sub_i32x16(*self, rhs.simd_into(self.simd));
5680 }
5681}
5682impl<S: Simd> core::ops::Sub<i32x16<S>> for i32 {
5683 type Output = i32x16<S>;
5684 #[inline(always)]
5685 fn sub(self, rhs: i32x16<S>) -> Self::Output {
5686 rhs.simd.sub_i32x16(self.simd_into(rhs.simd), rhs)
5687 }
5688}
5689impl<S: Simd> core::ops::Mul for i32x16<S> {
5690 type Output = Self;
5691 #[inline(always)]
5692 fn mul(self, rhs: Self) -> Self::Output {
5693 self.simd.mul_i32x16(self, rhs)
5694 }
5695}
5696impl<S: Simd> core::ops::MulAssign for i32x16<S> {
5697 #[inline(always)]
5698 fn mul_assign(&mut self, rhs: Self) {
5699 *self = self.simd.mul_i32x16(*self, rhs);
5700 }
5701}
5702impl<S: Simd> core::ops::Mul<i32> for i32x16<S> {
5703 type Output = Self;
5704 #[inline(always)]
5705 fn mul(self, rhs: i32) -> Self::Output {
5706 self.simd.mul_i32x16(self, rhs.simd_into(self.simd))
5707 }
5708}
5709impl<S: Simd> core::ops::MulAssign<i32> for i32x16<S> {
5710 #[inline(always)]
5711 fn mul_assign(&mut self, rhs: i32) {
5712 *self = self.simd.mul_i32x16(*self, rhs.simd_into(self.simd));
5713 }
5714}
5715impl<S: Simd> core::ops::Mul<i32x16<S>> for i32 {
5716 type Output = i32x16<S>;
5717 #[inline(always)]
5718 fn mul(self, rhs: i32x16<S>) -> Self::Output {
5719 rhs.simd.mul_i32x16(self.simd_into(rhs.simd), rhs)
5720 }
5721}
5722impl<S: Simd> core::ops::BitAnd for i32x16<S> {
5723 type Output = Self;
5724 #[inline(always)]
5725 fn bitand(self, rhs: Self) -> Self::Output {
5726 self.simd.and_i32x16(self, rhs)
5727 }
5728}
5729impl<S: Simd> core::ops::BitAndAssign for i32x16<S> {
5730 #[inline(always)]
5731 fn bitand_assign(&mut self, rhs: Self) {
5732 *self = self.simd.and_i32x16(*self, rhs);
5733 }
5734}
5735impl<S: Simd> core::ops::BitAnd<i32> for i32x16<S> {
5736 type Output = Self;
5737 #[inline(always)]
5738 fn bitand(self, rhs: i32) -> Self::Output {
5739 self.simd.and_i32x16(self, rhs.simd_into(self.simd))
5740 }
5741}
5742impl<S: Simd> core::ops::BitAndAssign<i32> for i32x16<S> {
5743 #[inline(always)]
5744 fn bitand_assign(&mut self, rhs: i32) {
5745 *self = self.simd.and_i32x16(*self, rhs.simd_into(self.simd));
5746 }
5747}
5748impl<S: Simd> core::ops::BitAnd<i32x16<S>> for i32 {
5749 type Output = i32x16<S>;
5750 #[inline(always)]
5751 fn bitand(self, rhs: i32x16<S>) -> Self::Output {
5752 rhs.simd.and_i32x16(self.simd_into(rhs.simd), rhs)
5753 }
5754}
5755impl<S: Simd> core::ops::BitOr for i32x16<S> {
5756 type Output = Self;
5757 #[inline(always)]
5758 fn bitor(self, rhs: Self) -> Self::Output {
5759 self.simd.or_i32x16(self, rhs)
5760 }
5761}
5762impl<S: Simd> core::ops::BitOrAssign for i32x16<S> {
5763 #[inline(always)]
5764 fn bitor_assign(&mut self, rhs: Self) {
5765 *self = self.simd.or_i32x16(*self, rhs);
5766 }
5767}
5768impl<S: Simd> core::ops::BitOr<i32> for i32x16<S> {
5769 type Output = Self;
5770 #[inline(always)]
5771 fn bitor(self, rhs: i32) -> Self::Output {
5772 self.simd.or_i32x16(self, rhs.simd_into(self.simd))
5773 }
5774}
5775impl<S: Simd> core::ops::BitOrAssign<i32> for i32x16<S> {
5776 #[inline(always)]
5777 fn bitor_assign(&mut self, rhs: i32) {
5778 *self = self.simd.or_i32x16(*self, rhs.simd_into(self.simd));
5779 }
5780}
5781impl<S: Simd> core::ops::BitOr<i32x16<S>> for i32 {
5782 type Output = i32x16<S>;
5783 #[inline(always)]
5784 fn bitor(self, rhs: i32x16<S>) -> Self::Output {
5785 rhs.simd.or_i32x16(self.simd_into(rhs.simd), rhs)
5786 }
5787}
5788impl<S: Simd> core::ops::BitXor for i32x16<S> {
5789 type Output = Self;
5790 #[inline(always)]
5791 fn bitxor(self, rhs: Self) -> Self::Output {
5792 self.simd.xor_i32x16(self, rhs)
5793 }
5794}
5795impl<S: Simd> core::ops::BitXorAssign for i32x16<S> {
5796 #[inline(always)]
5797 fn bitxor_assign(&mut self, rhs: Self) {
5798 *self = self.simd.xor_i32x16(*self, rhs);
5799 }
5800}
5801impl<S: Simd> core::ops::BitXor<i32> for i32x16<S> {
5802 type Output = Self;
5803 #[inline(always)]
5804 fn bitxor(self, rhs: i32) -> Self::Output {
5805 self.simd.xor_i32x16(self, rhs.simd_into(self.simd))
5806 }
5807}
5808impl<S: Simd> core::ops::BitXorAssign<i32> for i32x16<S> {
5809 #[inline(always)]
5810 fn bitxor_assign(&mut self, rhs: i32) {
5811 *self = self.simd.xor_i32x16(*self, rhs.simd_into(self.simd));
5812 }
5813}
5814impl<S: Simd> core::ops::BitXor<i32x16<S>> for i32 {
5815 type Output = i32x16<S>;
5816 #[inline(always)]
5817 fn bitxor(self, rhs: i32x16<S>) -> Self::Output {
5818 rhs.simd.xor_i32x16(self.simd_into(rhs.simd), rhs)
5819 }
5820}
5821impl<S: Simd> core::ops::Shl<u32> for i32x16<S> {
5822 type Output = Self;
5823 #[inline(always)]
5824 fn shl(self, rhs: u32) -> Self::Output {
5825 self.simd.shl_i32x16(self, rhs)
5826 }
5827}
5828impl<S: Simd> core::ops::Shr<u32> for i32x16<S> {
5829 type Output = Self;
5830 #[inline(always)]
5831 fn shr(self, rhs: u32) -> Self::Output {
5832 self.simd.shr_i32x16(self, rhs)
5833 }
5834}
5835impl<S: Simd> core::ops::ShlAssign<u32> for i32x16<S> {
5836 #[inline(always)]
5837 fn shl_assign(&mut self, rhs: u32) {
5838 *self = self.simd.shl_i32x16(*self, rhs);
5839 }
5840}
5841impl<S: Simd> core::ops::ShrAssign<u32> for i32x16<S> {
5842 #[inline(always)]
5843 fn shr_assign(&mut self, rhs: u32) {
5844 *self = self.simd.shr_i32x16(*self, rhs);
5845 }
5846}
5847impl<S: Simd> core::ops::Shr for i32x16<S> {
5848 type Output = Self;
5849 #[inline(always)]
5850 fn shr(self, rhs: Self) -> Self::Output {
5851 self.simd.shrv_i32x16(self, rhs)
5852 }
5853}
5854impl<S: Simd> core::ops::ShrAssign for i32x16<S> {
5855 #[inline(always)]
5856 fn shr_assign(&mut self, rhs: Self) {
5857 *self = self.simd.shrv_i32x16(*self, rhs);
5858 }
5859}
5860impl<S: Simd> core::ops::Add for u32x16<S> {
5861 type Output = Self;
5862 #[inline(always)]
5863 fn add(self, rhs: Self) -> Self::Output {
5864 self.simd.add_u32x16(self, rhs)
5865 }
5866}
5867impl<S: Simd> core::ops::AddAssign for u32x16<S> {
5868 #[inline(always)]
5869 fn add_assign(&mut self, rhs: Self) {
5870 *self = self.simd.add_u32x16(*self, rhs);
5871 }
5872}
5873impl<S: Simd> core::ops::Add<u32> for u32x16<S> {
5874 type Output = Self;
5875 #[inline(always)]
5876 fn add(self, rhs: u32) -> Self::Output {
5877 self.simd.add_u32x16(self, rhs.simd_into(self.simd))
5878 }
5879}
5880impl<S: Simd> core::ops::AddAssign<u32> for u32x16<S> {
5881 #[inline(always)]
5882 fn add_assign(&mut self, rhs: u32) {
5883 *self = self.simd.add_u32x16(*self, rhs.simd_into(self.simd));
5884 }
5885}
5886impl<S: Simd> core::ops::Add<u32x16<S>> for u32 {
5887 type Output = u32x16<S>;
5888 #[inline(always)]
5889 fn add(self, rhs: u32x16<S>) -> Self::Output {
5890 rhs.simd.add_u32x16(self.simd_into(rhs.simd), rhs)
5891 }
5892}
5893impl<S: Simd> core::ops::Sub for u32x16<S> {
5894 type Output = Self;
5895 #[inline(always)]
5896 fn sub(self, rhs: Self) -> Self::Output {
5897 self.simd.sub_u32x16(self, rhs)
5898 }
5899}
5900impl<S: Simd> core::ops::SubAssign for u32x16<S> {
5901 #[inline(always)]
5902 fn sub_assign(&mut self, rhs: Self) {
5903 *self = self.simd.sub_u32x16(*self, rhs);
5904 }
5905}
5906impl<S: Simd> core::ops::Sub<u32> for u32x16<S> {
5907 type Output = Self;
5908 #[inline(always)]
5909 fn sub(self, rhs: u32) -> Self::Output {
5910 self.simd.sub_u32x16(self, rhs.simd_into(self.simd))
5911 }
5912}
5913impl<S: Simd> core::ops::SubAssign<u32> for u32x16<S> {
5914 #[inline(always)]
5915 fn sub_assign(&mut self, rhs: u32) {
5916 *self = self.simd.sub_u32x16(*self, rhs.simd_into(self.simd));
5917 }
5918}
5919impl<S: Simd> core::ops::Sub<u32x16<S>> for u32 {
5920 type Output = u32x16<S>;
5921 #[inline(always)]
5922 fn sub(self, rhs: u32x16<S>) -> Self::Output {
5923 rhs.simd.sub_u32x16(self.simd_into(rhs.simd), rhs)
5924 }
5925}
5926impl<S: Simd> core::ops::Mul for u32x16<S> {
5927 type Output = Self;
5928 #[inline(always)]
5929 fn mul(self, rhs: Self) -> Self::Output {
5930 self.simd.mul_u32x16(self, rhs)
5931 }
5932}
5933impl<S: Simd> core::ops::MulAssign for u32x16<S> {
5934 #[inline(always)]
5935 fn mul_assign(&mut self, rhs: Self) {
5936 *self = self.simd.mul_u32x16(*self, rhs);
5937 }
5938}
5939impl<S: Simd> core::ops::Mul<u32> for u32x16<S> {
5940 type Output = Self;
5941 #[inline(always)]
5942 fn mul(self, rhs: u32) -> Self::Output {
5943 self.simd.mul_u32x16(self, rhs.simd_into(self.simd))
5944 }
5945}
5946impl<S: Simd> core::ops::MulAssign<u32> for u32x16<S> {
5947 #[inline(always)]
5948 fn mul_assign(&mut self, rhs: u32) {
5949 *self = self.simd.mul_u32x16(*self, rhs.simd_into(self.simd));
5950 }
5951}
5952impl<S: Simd> core::ops::Mul<u32x16<S>> for u32 {
5953 type Output = u32x16<S>;
5954 #[inline(always)]
5955 fn mul(self, rhs: u32x16<S>) -> Self::Output {
5956 rhs.simd.mul_u32x16(self.simd_into(rhs.simd), rhs)
5957 }
5958}
5959impl<S: Simd> core::ops::BitAnd for u32x16<S> {
5960 type Output = Self;
5961 #[inline(always)]
5962 fn bitand(self, rhs: Self) -> Self::Output {
5963 self.simd.and_u32x16(self, rhs)
5964 }
5965}
5966impl<S: Simd> core::ops::BitAndAssign for u32x16<S> {
5967 #[inline(always)]
5968 fn bitand_assign(&mut self, rhs: Self) {
5969 *self = self.simd.and_u32x16(*self, rhs);
5970 }
5971}
5972impl<S: Simd> core::ops::BitAnd<u32> for u32x16<S> {
5973 type Output = Self;
5974 #[inline(always)]
5975 fn bitand(self, rhs: u32) -> Self::Output {
5976 self.simd.and_u32x16(self, rhs.simd_into(self.simd))
5977 }
5978}
5979impl<S: Simd> core::ops::BitAndAssign<u32> for u32x16<S> {
5980 #[inline(always)]
5981 fn bitand_assign(&mut self, rhs: u32) {
5982 *self = self.simd.and_u32x16(*self, rhs.simd_into(self.simd));
5983 }
5984}
5985impl<S: Simd> core::ops::BitAnd<u32x16<S>> for u32 {
5986 type Output = u32x16<S>;
5987 #[inline(always)]
5988 fn bitand(self, rhs: u32x16<S>) -> Self::Output {
5989 rhs.simd.and_u32x16(self.simd_into(rhs.simd), rhs)
5990 }
5991}
5992impl<S: Simd> core::ops::BitOr for u32x16<S> {
5993 type Output = Self;
5994 #[inline(always)]
5995 fn bitor(self, rhs: Self) -> Self::Output {
5996 self.simd.or_u32x16(self, rhs)
5997 }
5998}
5999impl<S: Simd> core::ops::BitOrAssign for u32x16<S> {
6000 #[inline(always)]
6001 fn bitor_assign(&mut self, rhs: Self) {
6002 *self = self.simd.or_u32x16(*self, rhs);
6003 }
6004}
6005impl<S: Simd> core::ops::BitOr<u32> for u32x16<S> {
6006 type Output = Self;
6007 #[inline(always)]
6008 fn bitor(self, rhs: u32) -> Self::Output {
6009 self.simd.or_u32x16(self, rhs.simd_into(self.simd))
6010 }
6011}
6012impl<S: Simd> core::ops::BitOrAssign<u32> for u32x16<S> {
6013 #[inline(always)]
6014 fn bitor_assign(&mut self, rhs: u32) {
6015 *self = self.simd.or_u32x16(*self, rhs.simd_into(self.simd));
6016 }
6017}
6018impl<S: Simd> core::ops::BitOr<u32x16<S>> for u32 {
6019 type Output = u32x16<S>;
6020 #[inline(always)]
6021 fn bitor(self, rhs: u32x16<S>) -> Self::Output {
6022 rhs.simd.or_u32x16(self.simd_into(rhs.simd), rhs)
6023 }
6024}
6025impl<S: Simd> core::ops::BitXor for u32x16<S> {
6026 type Output = Self;
6027 #[inline(always)]
6028 fn bitxor(self, rhs: Self) -> Self::Output {
6029 self.simd.xor_u32x16(self, rhs)
6030 }
6031}
6032impl<S: Simd> core::ops::BitXorAssign for u32x16<S> {
6033 #[inline(always)]
6034 fn bitxor_assign(&mut self, rhs: Self) {
6035 *self = self.simd.xor_u32x16(*self, rhs);
6036 }
6037}
6038impl<S: Simd> core::ops::BitXor<u32> for u32x16<S> {
6039 type Output = Self;
6040 #[inline(always)]
6041 fn bitxor(self, rhs: u32) -> Self::Output {
6042 self.simd.xor_u32x16(self, rhs.simd_into(self.simd))
6043 }
6044}
6045impl<S: Simd> core::ops::BitXorAssign<u32> for u32x16<S> {
6046 #[inline(always)]
6047 fn bitxor_assign(&mut self, rhs: u32) {
6048 *self = self.simd.xor_u32x16(*self, rhs.simd_into(self.simd));
6049 }
6050}
6051impl<S: Simd> core::ops::BitXor<u32x16<S>> for u32 {
6052 type Output = u32x16<S>;
6053 #[inline(always)]
6054 fn bitxor(self, rhs: u32x16<S>) -> Self::Output {
6055 rhs.simd.xor_u32x16(self.simd_into(rhs.simd), rhs)
6056 }
6057}
6058impl<S: Simd> core::ops::Shl<u32> for u32x16<S> {
6059 type Output = Self;
6060 #[inline(always)]
6061 fn shl(self, rhs: u32) -> Self::Output {
6062 self.simd.shl_u32x16(self, rhs)
6063 }
6064}
6065impl<S: Simd> core::ops::Shr<u32> for u32x16<S> {
6066 type Output = Self;
6067 #[inline(always)]
6068 fn shr(self, rhs: u32) -> Self::Output {
6069 self.simd.shr_u32x16(self, rhs)
6070 }
6071}
6072impl<S: Simd> core::ops::ShlAssign<u32> for u32x16<S> {
6073 #[inline(always)]
6074 fn shl_assign(&mut self, rhs: u32) {
6075 *self = self.simd.shl_u32x16(*self, rhs);
6076 }
6077}
6078impl<S: Simd> core::ops::ShrAssign<u32> for u32x16<S> {
6079 #[inline(always)]
6080 fn shr_assign(&mut self, rhs: u32) {
6081 *self = self.simd.shr_u32x16(*self, rhs);
6082 }
6083}
6084impl<S: Simd> core::ops::Shr for u32x16<S> {
6085 type Output = Self;
6086 #[inline(always)]
6087 fn shr(self, rhs: Self) -> Self::Output {
6088 self.simd.shrv_u32x16(self, rhs)
6089 }
6090}
6091impl<S: Simd> core::ops::ShrAssign for u32x16<S> {
6092 #[inline(always)]
6093 fn shr_assign(&mut self, rhs: Self) {
6094 *self = self.simd.shrv_u32x16(*self, rhs);
6095 }
6096}
6097impl<S: Simd> core::ops::BitAnd for mask32x16<S> {
6098 type Output = Self;
6099 #[inline(always)]
6100 fn bitand(self, rhs: Self) -> Self::Output {
6101 self.simd.and_mask32x16(self, rhs)
6102 }
6103}
6104impl<S: Simd> core::ops::BitAndAssign for mask32x16<S> {
6105 #[inline(always)]
6106 fn bitand_assign(&mut self, rhs: Self) {
6107 *self = self.simd.and_mask32x16(*self, rhs);
6108 }
6109}
6110impl<S: Simd> core::ops::BitAnd<i32> for mask32x16<S> {
6111 type Output = Self;
6112 #[inline(always)]
6113 fn bitand(self, rhs: i32) -> Self::Output {
6114 self.simd.and_mask32x16(self, rhs.simd_into(self.simd))
6115 }
6116}
6117impl<S: Simd> core::ops::BitAndAssign<i32> for mask32x16<S> {
6118 #[inline(always)]
6119 fn bitand_assign(&mut self, rhs: i32) {
6120 *self = self.simd.and_mask32x16(*self, rhs.simd_into(self.simd));
6121 }
6122}
6123impl<S: Simd> core::ops::BitAnd<mask32x16<S>> for i32 {
6124 type Output = mask32x16<S>;
6125 #[inline(always)]
6126 fn bitand(self, rhs: mask32x16<S>) -> Self::Output {
6127 rhs.simd.and_mask32x16(self.simd_into(rhs.simd), rhs)
6128 }
6129}
6130impl<S: Simd> core::ops::BitOr for mask32x16<S> {
6131 type Output = Self;
6132 #[inline(always)]
6133 fn bitor(self, rhs: Self) -> Self::Output {
6134 self.simd.or_mask32x16(self, rhs)
6135 }
6136}
6137impl<S: Simd> core::ops::BitOrAssign for mask32x16<S> {
6138 #[inline(always)]
6139 fn bitor_assign(&mut self, rhs: Self) {
6140 *self = self.simd.or_mask32x16(*self, rhs);
6141 }
6142}
6143impl<S: Simd> core::ops::BitOr<i32> for mask32x16<S> {
6144 type Output = Self;
6145 #[inline(always)]
6146 fn bitor(self, rhs: i32) -> Self::Output {
6147 self.simd.or_mask32x16(self, rhs.simd_into(self.simd))
6148 }
6149}
6150impl<S: Simd> core::ops::BitOrAssign<i32> for mask32x16<S> {
6151 #[inline(always)]
6152 fn bitor_assign(&mut self, rhs: i32) {
6153 *self = self.simd.or_mask32x16(*self, rhs.simd_into(self.simd));
6154 }
6155}
6156impl<S: Simd> core::ops::BitOr<mask32x16<S>> for i32 {
6157 type Output = mask32x16<S>;
6158 #[inline(always)]
6159 fn bitor(self, rhs: mask32x16<S>) -> Self::Output {
6160 rhs.simd.or_mask32x16(self.simd_into(rhs.simd), rhs)
6161 }
6162}
6163impl<S: Simd> core::ops::BitXor for mask32x16<S> {
6164 type Output = Self;
6165 #[inline(always)]
6166 fn bitxor(self, rhs: Self) -> Self::Output {
6167 self.simd.xor_mask32x16(self, rhs)
6168 }
6169}
6170impl<S: Simd> core::ops::BitXorAssign for mask32x16<S> {
6171 #[inline(always)]
6172 fn bitxor_assign(&mut self, rhs: Self) {
6173 *self = self.simd.xor_mask32x16(*self, rhs);
6174 }
6175}
6176impl<S: Simd> core::ops::BitXor<i32> for mask32x16<S> {
6177 type Output = Self;
6178 #[inline(always)]
6179 fn bitxor(self, rhs: i32) -> Self::Output {
6180 self.simd.xor_mask32x16(self, rhs.simd_into(self.simd))
6181 }
6182}
6183impl<S: Simd> core::ops::BitXorAssign<i32> for mask32x16<S> {
6184 #[inline(always)]
6185 fn bitxor_assign(&mut self, rhs: i32) {
6186 *self = self.simd.xor_mask32x16(*self, rhs.simd_into(self.simd));
6187 }
6188}
6189impl<S: Simd> core::ops::BitXor<mask32x16<S>> for i32 {
6190 type Output = mask32x16<S>;
6191 #[inline(always)]
6192 fn bitxor(self, rhs: mask32x16<S>) -> Self::Output {
6193 rhs.simd.xor_mask32x16(self.simd_into(rhs.simd), rhs)
6194 }
6195}
6196impl<S: Simd> core::ops::Not for mask32x16<S> {
6197 type Output = Self;
6198 #[inline(always)]
6199 fn not(self) -> Self::Output {
6200 self.simd.not_mask32x16(self)
6201 }
6202}
6203impl<S: Simd> core::ops::Neg for f64x8<S> {
6204 type Output = Self;
6205 #[inline(always)]
6206 fn neg(self) -> Self::Output {
6207 self.simd.neg_f64x8(self)
6208 }
6209}
6210impl<S: Simd> core::ops::Add for f64x8<S> {
6211 type Output = Self;
6212 #[inline(always)]
6213 fn add(self, rhs: Self) -> Self::Output {
6214 self.simd.add_f64x8(self, rhs)
6215 }
6216}
6217impl<S: Simd> core::ops::AddAssign for f64x8<S> {
6218 #[inline(always)]
6219 fn add_assign(&mut self, rhs: Self) {
6220 *self = self.simd.add_f64x8(*self, rhs);
6221 }
6222}
6223impl<S: Simd> core::ops::Add<f64> for f64x8<S> {
6224 type Output = Self;
6225 #[inline(always)]
6226 fn add(self, rhs: f64) -> Self::Output {
6227 self.simd.add_f64x8(self, rhs.simd_into(self.simd))
6228 }
6229}
6230impl<S: Simd> core::ops::AddAssign<f64> for f64x8<S> {
6231 #[inline(always)]
6232 fn add_assign(&mut self, rhs: f64) {
6233 *self = self.simd.add_f64x8(*self, rhs.simd_into(self.simd));
6234 }
6235}
6236impl<S: Simd> core::ops::Add<f64x8<S>> for f64 {
6237 type Output = f64x8<S>;
6238 #[inline(always)]
6239 fn add(self, rhs: f64x8<S>) -> Self::Output {
6240 rhs.simd.add_f64x8(self.simd_into(rhs.simd), rhs)
6241 }
6242}
6243impl<S: Simd> core::ops::Sub for f64x8<S> {
6244 type Output = Self;
6245 #[inline(always)]
6246 fn sub(self, rhs: Self) -> Self::Output {
6247 self.simd.sub_f64x8(self, rhs)
6248 }
6249}
6250impl<S: Simd> core::ops::SubAssign for f64x8<S> {
6251 #[inline(always)]
6252 fn sub_assign(&mut self, rhs: Self) {
6253 *self = self.simd.sub_f64x8(*self, rhs);
6254 }
6255}
6256impl<S: Simd> core::ops::Sub<f64> for f64x8<S> {
6257 type Output = Self;
6258 #[inline(always)]
6259 fn sub(self, rhs: f64) -> Self::Output {
6260 self.simd.sub_f64x8(self, rhs.simd_into(self.simd))
6261 }
6262}
6263impl<S: Simd> core::ops::SubAssign<f64> for f64x8<S> {
6264 #[inline(always)]
6265 fn sub_assign(&mut self, rhs: f64) {
6266 *self = self.simd.sub_f64x8(*self, rhs.simd_into(self.simd));
6267 }
6268}
6269impl<S: Simd> core::ops::Sub<f64x8<S>> for f64 {
6270 type Output = f64x8<S>;
6271 #[inline(always)]
6272 fn sub(self, rhs: f64x8<S>) -> Self::Output {
6273 rhs.simd.sub_f64x8(self.simd_into(rhs.simd), rhs)
6274 }
6275}
6276impl<S: Simd> core::ops::Mul for f64x8<S> {
6277 type Output = Self;
6278 #[inline(always)]
6279 fn mul(self, rhs: Self) -> Self::Output {
6280 self.simd.mul_f64x8(self, rhs)
6281 }
6282}
6283impl<S: Simd> core::ops::MulAssign for f64x8<S> {
6284 #[inline(always)]
6285 fn mul_assign(&mut self, rhs: Self) {
6286 *self = self.simd.mul_f64x8(*self, rhs);
6287 }
6288}
6289impl<S: Simd> core::ops::Mul<f64> for f64x8<S> {
6290 type Output = Self;
6291 #[inline(always)]
6292 fn mul(self, rhs: f64) -> Self::Output {
6293 self.simd.mul_f64x8(self, rhs.simd_into(self.simd))
6294 }
6295}
6296impl<S: Simd> core::ops::MulAssign<f64> for f64x8<S> {
6297 #[inline(always)]
6298 fn mul_assign(&mut self, rhs: f64) {
6299 *self = self.simd.mul_f64x8(*self, rhs.simd_into(self.simd));
6300 }
6301}
6302impl<S: Simd> core::ops::Mul<f64x8<S>> for f64 {
6303 type Output = f64x8<S>;
6304 #[inline(always)]
6305 fn mul(self, rhs: f64x8<S>) -> Self::Output {
6306 rhs.simd.mul_f64x8(self.simd_into(rhs.simd), rhs)
6307 }
6308}
6309impl<S: Simd> core::ops::Div for f64x8<S> {
6310 type Output = Self;
6311 #[inline(always)]
6312 fn div(self, rhs: Self) -> Self::Output {
6313 self.simd.div_f64x8(self, rhs)
6314 }
6315}
6316impl<S: Simd> core::ops::DivAssign for f64x8<S> {
6317 #[inline(always)]
6318 fn div_assign(&mut self, rhs: Self) {
6319 *self = self.simd.div_f64x8(*self, rhs);
6320 }
6321}
6322impl<S: Simd> core::ops::Div<f64> for f64x8<S> {
6323 type Output = Self;
6324 #[inline(always)]
6325 fn div(self, rhs: f64) -> Self::Output {
6326 self.simd.div_f64x8(self, rhs.simd_into(self.simd))
6327 }
6328}
6329impl<S: Simd> core::ops::DivAssign<f64> for f64x8<S> {
6330 #[inline(always)]
6331 fn div_assign(&mut self, rhs: f64) {
6332 *self = self.simd.div_f64x8(*self, rhs.simd_into(self.simd));
6333 }
6334}
6335impl<S: Simd> core::ops::Div<f64x8<S>> for f64 {
6336 type Output = f64x8<S>;
6337 #[inline(always)]
6338 fn div(self, rhs: f64x8<S>) -> Self::Output {
6339 rhs.simd.div_f64x8(self.simd_into(rhs.simd), rhs)
6340 }
6341}
6342impl<S: Simd> core::ops::BitAnd for mask64x8<S> {
6343 type Output = Self;
6344 #[inline(always)]
6345 fn bitand(self, rhs: Self) -> Self::Output {
6346 self.simd.and_mask64x8(self, rhs)
6347 }
6348}
6349impl<S: Simd> core::ops::BitAndAssign for mask64x8<S> {
6350 #[inline(always)]
6351 fn bitand_assign(&mut self, rhs: Self) {
6352 *self = self.simd.and_mask64x8(*self, rhs);
6353 }
6354}
6355impl<S: Simd> core::ops::BitAnd<i64> for mask64x8<S> {
6356 type Output = Self;
6357 #[inline(always)]
6358 fn bitand(self, rhs: i64) -> Self::Output {
6359 self.simd.and_mask64x8(self, rhs.simd_into(self.simd))
6360 }
6361}
6362impl<S: Simd> core::ops::BitAndAssign<i64> for mask64x8<S> {
6363 #[inline(always)]
6364 fn bitand_assign(&mut self, rhs: i64) {
6365 *self = self.simd.and_mask64x8(*self, rhs.simd_into(self.simd));
6366 }
6367}
6368impl<S: Simd> core::ops::BitAnd<mask64x8<S>> for i64 {
6369 type Output = mask64x8<S>;
6370 #[inline(always)]
6371 fn bitand(self, rhs: mask64x8<S>) -> Self::Output {
6372 rhs.simd.and_mask64x8(self.simd_into(rhs.simd), rhs)
6373 }
6374}
6375impl<S: Simd> core::ops::BitOr for mask64x8<S> {
6376 type Output = Self;
6377 #[inline(always)]
6378 fn bitor(self, rhs: Self) -> Self::Output {
6379 self.simd.or_mask64x8(self, rhs)
6380 }
6381}
6382impl<S: Simd> core::ops::BitOrAssign for mask64x8<S> {
6383 #[inline(always)]
6384 fn bitor_assign(&mut self, rhs: Self) {
6385 *self = self.simd.or_mask64x8(*self, rhs);
6386 }
6387}
6388impl<S: Simd> core::ops::BitOr<i64> for mask64x8<S> {
6389 type Output = Self;
6390 #[inline(always)]
6391 fn bitor(self, rhs: i64) -> Self::Output {
6392 self.simd.or_mask64x8(self, rhs.simd_into(self.simd))
6393 }
6394}
6395impl<S: Simd> core::ops::BitOrAssign<i64> for mask64x8<S> {
6396 #[inline(always)]
6397 fn bitor_assign(&mut self, rhs: i64) {
6398 *self = self.simd.or_mask64x8(*self, rhs.simd_into(self.simd));
6399 }
6400}
6401impl<S: Simd> core::ops::BitOr<mask64x8<S>> for i64 {
6402 type Output = mask64x8<S>;
6403 #[inline(always)]
6404 fn bitor(self, rhs: mask64x8<S>) -> Self::Output {
6405 rhs.simd.or_mask64x8(self.simd_into(rhs.simd), rhs)
6406 }
6407}
6408impl<S: Simd> core::ops::BitXor for mask64x8<S> {
6409 type Output = Self;
6410 #[inline(always)]
6411 fn bitxor(self, rhs: Self) -> Self::Output {
6412 self.simd.xor_mask64x8(self, rhs)
6413 }
6414}
6415impl<S: Simd> core::ops::BitXorAssign for mask64x8<S> {
6416 #[inline(always)]
6417 fn bitxor_assign(&mut self, rhs: Self) {
6418 *self = self.simd.xor_mask64x8(*self, rhs);
6419 }
6420}
6421impl<S: Simd> core::ops::BitXor<i64> for mask64x8<S> {
6422 type Output = Self;
6423 #[inline(always)]
6424 fn bitxor(self, rhs: i64) -> Self::Output {
6425 self.simd.xor_mask64x8(self, rhs.simd_into(self.simd))
6426 }
6427}
6428impl<S: Simd> core::ops::BitXorAssign<i64> for mask64x8<S> {
6429 #[inline(always)]
6430 fn bitxor_assign(&mut self, rhs: i64) {
6431 *self = self.simd.xor_mask64x8(*self, rhs.simd_into(self.simd));
6432 }
6433}
6434impl<S: Simd> core::ops::BitXor<mask64x8<S>> for i64 {
6435 type Output = mask64x8<S>;
6436 #[inline(always)]
6437 fn bitxor(self, rhs: mask64x8<S>) -> Self::Output {
6438 rhs.simd.xor_mask64x8(self.simd_into(rhs.simd), rhs)
6439 }
6440}
6441impl<S: Simd> core::ops::Not for mask64x8<S> {
6442 type Output = Self;
6443 #[inline(always)]
6444 fn not(self) -> Self::Output {
6445 self.simd.not_mask64x8(self)
6446 }
6447}