rayon/iter/mod.rs
1//! Traits for writing parallel programs using an iterator-style interface
2//!
3//! You will rarely need to interact with this module directly unless you have
4//! need to name one of the iterator types.
5//!
6//! Parallel iterators make it easy to write iterator-like chains that
7//! execute in parallel: typically all you have to do is convert the
8//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10//! example, to compute the sum of the squares of a sequence of
11//! integers, one might write:
12//!
13//! ```rust
14//! use rayon::prelude::*;
15//! fn sum_of_squares(input: &[i32]) -> i32 {
16//! input.par_iter()
17//! .map(|i| i * i)
18//! .sum()
19//! }
20//! ```
21//!
22//! Or, to increment all the integers in a slice, you could write:
23//!
24//! ```rust
25//! use rayon::prelude::*;
26//! fn increment_all(input: &mut [i32]) {
27//! input.par_iter_mut()
28//! .for_each(|p| *p += 1);
29//! }
30//! ```
31//!
32//! To use parallel iterators, first import the traits by adding
33//! something like `use rayon::prelude::*` to your module. You can
34//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35//! parallel iterator. Like a [regular iterator][], parallel
36//! iterators work by first constructing a computation and then
37//! executing it.
38//!
39//! In addition to `par_iter()` and friends, some types offer other
40//! ways to create (or consume) parallel iterators:
41//!
42//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43//! `par_windows`, as well as various parallel sorting
44//! operations. See [the `ParallelSlice` trait] for the full list.
45//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46//! See [the `ParallelString` trait] for the full list.
47//! - Various collections offer [`par_extend`], which grows a
48//! collection given a parallel iterator. (If you don't have a
49//! collection to extend, you can use [`collect()`] to create a new
50//! one from scratch.)
51//!
52//! [the `ParallelSlice` trait]: crate::slice::ParallelSlice
53//! [the `ParallelString` trait]: crate::str::ParallelString
54//! [`par_extend`]: ParallelExtend
55//! [`collect()`]: ParallelIterator::collect()
56//!
57//! To see the full range of methods available on parallel iterators,
58//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59//! traits.
60//!
61//! If you'd like to build a custom parallel iterator, or to write your own
62//! combinator, then check out the [split] function and the [plumbing] module.
63//!
64//! [regular iterator]: Iterator
65//! [split]: split()
66//! [plumbing]: plumbing
67//!
68//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
69//! has been deliberately obscured from the public API. This trait is intended
70//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
71//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
72//! `None`/`Err` values will exit early.
73//!
74//! A note about
75//! [dyn compatiblity](https://doc.rust-lang.org/reference/items/traits.html#dyn-compatibility):
76//! It is currently _not_ possible to wrap a `ParallelIterator` (or any trait
77//! that depends on it) using a `Box<dyn ParallelIterator>` or other kind of
78//! dynamic allocation, because `ParallelIterator` is **not dyn-compatible**.
79//! (This keeps the implementation simpler and allows extra optimizations.)
80
81use self::plumbing::*;
82use self::private::Try;
83pub use either::Either;
84use std::cmp::Ordering;
85use std::collections::LinkedList;
86use std::iter::{Product, Sum};
87use std::ops::{Fn, RangeBounds};
88
89pub mod plumbing;
90
91#[cfg(test)]
92mod test;
93
94// There is a method to the madness here:
95//
96// - These modules are private but expose certain types to the end-user
97// (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
98// public API surface of the `ParallelIterator` traits.
99// - In **this** module, those public types are always used unprefixed, which forces
100// us to add a `pub use` and helps identify if we missed anything.
101// - In contrast, items that appear **only** in the body of a method,
102// e.g. `find::find()`, are always used **prefixed**, so that they
103// can be readily distinguished.
104
105mod blocks;
106mod chain;
107mod chunks;
108mod cloned;
109mod collect;
110mod copied;
111mod empty;
112mod enumerate;
113mod extend;
114mod filter;
115mod filter_map;
116mod find;
117mod find_first_last;
118mod flat_map;
119mod flat_map_iter;
120mod flatten;
121mod flatten_iter;
122mod fold;
123mod fold_chunks;
124mod fold_chunks_with;
125mod for_each;
126mod from_par_iter;
127mod inspect;
128mod interleave;
129mod interleave_shortest;
130mod intersperse;
131mod len;
132mod map;
133mod map_with;
134mod multizip;
135mod noop;
136mod once;
137mod panic_fuse;
138mod par_bridge;
139mod positions;
140mod product;
141mod reduce;
142mod repeat;
143mod rev;
144mod skip;
145mod skip_any;
146mod skip_any_while;
147mod splitter;
148mod step_by;
149mod sum;
150mod take;
151mod take_any;
152mod take_any_while;
153mod try_fold;
154mod try_reduce;
155mod try_reduce_with;
156mod unzip;
157mod update;
158mod walk_tree;
159mod while_some;
160mod zip;
161mod zip_eq;
162
163pub use self::{
164 blocks::{ExponentialBlocks, UniformBlocks},
165 chain::Chain,
166 chunks::Chunks,
167 cloned::Cloned,
168 copied::Copied,
169 empty::{empty, Empty},
170 enumerate::Enumerate,
171 filter::Filter,
172 filter_map::FilterMap,
173 flat_map::FlatMap,
174 flat_map_iter::FlatMapIter,
175 flatten::Flatten,
176 flatten_iter::FlattenIter,
177 fold::{Fold, FoldWith},
178 fold_chunks::FoldChunks,
179 fold_chunks_with::FoldChunksWith,
180 inspect::Inspect,
181 interleave::Interleave,
182 interleave_shortest::InterleaveShortest,
183 intersperse::Intersperse,
184 len::{MaxLen, MinLen},
185 map::Map,
186 map_with::{MapInit, MapWith},
187 multizip::MultiZip,
188 once::{once, Once},
189 panic_fuse::PanicFuse,
190 par_bridge::{IterBridge, ParallelBridge},
191 positions::Positions,
192 repeat::{repeat, repeat_n, Repeat, RepeatN},
193 rev::Rev,
194 skip::Skip,
195 skip_any::SkipAny,
196 skip_any_while::SkipAnyWhile,
197 splitter::{split, Split},
198 step_by::StepBy,
199 take::Take,
200 take_any::TakeAny,
201 take_any_while::TakeAnyWhile,
202 try_fold::{TryFold, TryFoldWith},
203 update::Update,
204 walk_tree::{
205 walk_tree, walk_tree_postfix, walk_tree_prefix, WalkTree, WalkTreePostfix, WalkTreePrefix,
206 },
207 while_some::WhileSome,
208 zip::Zip,
209 zip_eq::ZipEq,
210};
211
212#[allow(deprecated)]
213pub use repeat::repeatn;
214
215/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
216///
217/// By implementing `IntoParallelIterator` for a type, you define how it will
218/// transformed into an iterator. This is a parallel version of the standard
219/// library's [`std::iter::IntoIterator`] trait.
220pub trait IntoParallelIterator {
221 /// The parallel iterator type that will be created.
222 type Iter: ParallelIterator<Item = Self::Item>;
223
224 /// The type of item that the parallel iterator will produce.
225 type Item: Send;
226
227 /// Converts `self` into a parallel iterator.
228 ///
229 /// # Examples
230 ///
231 /// ```
232 /// use rayon::prelude::*;
233 ///
234 /// println!("counting in parallel:");
235 /// (0..100).into_par_iter()
236 /// .for_each(|i| println!("{}", i));
237 /// ```
238 ///
239 /// This conversion is often implicit for arguments to methods like [`zip`].
240 ///
241 /// ```
242 /// use rayon::prelude::*;
243 ///
244 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
245 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
246 /// ```
247 ///
248 /// [`zip`]: IndexedParallelIterator::zip()
249 fn into_par_iter(self) -> Self::Iter;
250}
251
252/// `IntoParallelRefIterator` implements the conversion to a
253/// [`ParallelIterator`], providing shared references to the data.
254///
255/// This is a parallel version of the `iter()` method
256/// defined by various collections.
257///
258/// This trait is automatically implemented
259/// `for I where &I: IntoParallelIterator`. In most cases, users
260/// will want to implement [`IntoParallelIterator`] rather than implement
261/// this trait directly.
262pub trait IntoParallelRefIterator<'data> {
263 /// The type of the parallel iterator that will be returned.
264 type Iter: ParallelIterator<Item = Self::Item>;
265
266 /// The type of item that the parallel iterator will produce.
267 /// This will typically be an `&'data T` reference type.
268 type Item: Send + 'data;
269
270 /// Converts `self` into a parallel iterator.
271 ///
272 /// # Examples
273 ///
274 /// ```
275 /// use rayon::prelude::*;
276 ///
277 /// let v: Vec<_> = (0..100).collect();
278 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
279 ///
280 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
281 /// // producing the exact same references.
282 /// assert!(v.par_iter().zip(&v)
283 /// .all(|(a, b)| std::ptr::eq(a, b)));
284 /// ```
285 fn par_iter(&'data self) -> Self::Iter;
286}
287
288impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
289where
290 &'data I: IntoParallelIterator,
291{
292 type Iter = <&'data I as IntoParallelIterator>::Iter;
293 type Item = <&'data I as IntoParallelIterator>::Item;
294
295 fn par_iter(&'data self) -> Self::Iter {
296 self.into_par_iter()
297 }
298}
299
300/// `IntoParallelRefMutIterator` implements the conversion to a
301/// [`ParallelIterator`], providing mutable references to the data.
302///
303/// This is a parallel version of the `iter_mut()` method
304/// defined by various collections.
305///
306/// This trait is automatically implemented
307/// `for I where &mut I: IntoParallelIterator`. In most cases, users
308/// will want to implement [`IntoParallelIterator`] rather than implement
309/// this trait directly.
310pub trait IntoParallelRefMutIterator<'data> {
311 /// The type of iterator that will be created.
312 type Iter: ParallelIterator<Item = Self::Item>;
313
314 /// The type of item that will be produced; this is typically an
315 /// `&'data mut T` reference.
316 type Item: Send + 'data;
317
318 /// Creates the parallel iterator from `self`.
319 ///
320 /// # Examples
321 ///
322 /// ```
323 /// use rayon::prelude::*;
324 ///
325 /// let mut v = vec![0usize; 5];
326 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
327 /// assert_eq!(v, [0, 1, 2, 3, 4]);
328 /// ```
329 fn par_iter_mut(&'data mut self) -> Self::Iter;
330}
331
332impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
333where
334 &'data mut I: IntoParallelIterator,
335{
336 type Iter = <&'data mut I as IntoParallelIterator>::Iter;
337 type Item = <&'data mut I as IntoParallelIterator>::Item;
338
339 fn par_iter_mut(&'data mut self) -> Self::Iter {
340 self.into_par_iter()
341 }
342}
343
344/// Parallel version of the standard iterator trait.
345///
346/// The combinators on this trait are available on **all** parallel
347/// iterators. Additional methods can be found on the
348/// [`IndexedParallelIterator`] trait: those methods are only
349/// available for parallel iterators where the number of items is
350/// known in advance (so, e.g., after invoking `filter`, those methods
351/// become unavailable).
352///
353/// For examples of using parallel iterators, see [the docs on the
354/// `iter` module][iter].
355///
356/// [iter]: self
357pub trait ParallelIterator: Sized + Send {
358 /// The type of item that this parallel iterator produces.
359 /// For example, if you use the [`for_each`] method, this is the type of
360 /// item that your closure will be invoked with.
361 ///
362 /// [`for_each`]: #method.for_each
363 type Item: Send;
364
365 /// Executes `OP` on each item produced by the iterator, in parallel.
366 ///
367 /// # Examples
368 ///
369 /// ```
370 /// use rayon::prelude::*;
371 ///
372 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
373 /// ```
374 fn for_each<OP>(self, op: OP)
375 where
376 OP: Fn(Self::Item) + Sync + Send,
377 {
378 for_each::for_each(self, &op)
379 }
380
381 /// Executes `OP` on the given `init` value with each item produced by
382 /// the iterator, in parallel.
383 ///
384 /// The `init` value will be cloned only as needed to be paired with
385 /// the group of items in each rayon job. It does not require the type
386 /// to be `Sync`.
387 ///
388 /// # Examples
389 ///
390 /// ```
391 /// use std::sync::mpsc::channel;
392 /// use rayon::prelude::*;
393 ///
394 /// let (sender, receiver) = channel();
395 ///
396 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
397 ///
398 /// let mut res: Vec<_> = receiver.iter().collect();
399 ///
400 /// res.sort();
401 ///
402 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
403 /// ```
404 fn for_each_with<OP, T>(self, init: T, op: OP)
405 where
406 OP: Fn(&mut T, Self::Item) + Sync + Send,
407 T: Send + Clone,
408 {
409 self.map_with(init, op).collect()
410 }
411
412 /// Executes `OP` on a value returned by `init` with each item produced by
413 /// the iterator, in parallel.
414 ///
415 /// The `init` function will be called only as needed for a value to be
416 /// paired with the group of items in each rayon job. There is no
417 /// constraint on that returned type at all!
418 ///
419 /// # Examples
420 ///
421 /// ```
422 /// use rand::Rng;
423 /// use rayon::prelude::*;
424 ///
425 /// let mut v = vec![0u8; 1_000_000];
426 ///
427 /// v.par_chunks_mut(1000)
428 /// .for_each_init(
429 /// || rand::rng(),
430 /// |rng, chunk| rng.fill(chunk),
431 /// );
432 ///
433 /// // There's a remote chance that this will fail...
434 /// for i in 0u8..=255 {
435 /// assert!(v.contains(&i));
436 /// }
437 /// ```
438 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
439 where
440 OP: Fn(&mut T, Self::Item) + Sync + Send,
441 INIT: Fn() -> T + Sync + Send,
442 {
443 self.map_init(init, op).collect()
444 }
445
446 /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
447 ///
448 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
449 /// stop processing the rest of the items in the iterator as soon as
450 /// possible, and we will return that terminating value. Otherwise, we will
451 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are
452 /// multiple errors in parallel, it is not specified which will be returned.
453 ///
454 /// # Examples
455 ///
456 /// ```
457 /// use rayon::prelude::*;
458 /// use std::io::{self, Write};
459 ///
460 /// // This will stop iteration early if there's any write error, like
461 /// // having piped output get closed on the other end.
462 /// (0..100).into_par_iter()
463 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
464 /// .expect("expected no write errors");
465 /// ```
466 fn try_for_each<OP, R>(self, op: OP) -> R
467 where
468 OP: Fn(Self::Item) -> R + Sync + Send,
469 R: Try<Output = ()> + Send,
470 {
471 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
472 R::from_output(())
473 }
474
475 self.map(op).try_reduce(<()>::default, ok)
476 }
477
478 /// Executes a fallible `OP` on the given `init` value with each item
479 /// produced by the iterator, in parallel.
480 ///
481 /// This combines the `init` semantics of [`for_each_with()`] and the
482 /// failure semantics of [`try_for_each()`].
483 ///
484 /// [`for_each_with()`]: #method.for_each_with
485 /// [`try_for_each()`]: #method.try_for_each
486 ///
487 /// # Examples
488 ///
489 /// ```
490 /// use std::sync::mpsc::channel;
491 /// use rayon::prelude::*;
492 ///
493 /// let (sender, receiver) = channel();
494 ///
495 /// (0..5).into_par_iter()
496 /// .try_for_each_with(sender, |s, x| s.send(x))
497 /// .expect("expected no send errors");
498 ///
499 /// let mut res: Vec<_> = receiver.iter().collect();
500 ///
501 /// res.sort();
502 ///
503 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
504 /// ```
505 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
506 where
507 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
508 T: Send + Clone,
509 R: Try<Output = ()> + Send,
510 {
511 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
512 R::from_output(())
513 }
514
515 self.map_with(init, op).try_reduce(<()>::default, ok)
516 }
517
518 /// Executes a fallible `OP` on a value returned by `init` with each item
519 /// produced by the iterator, in parallel.
520 ///
521 /// This combines the `init` semantics of [`for_each_init()`] and the
522 /// failure semantics of [`try_for_each()`].
523 ///
524 /// [`for_each_init()`]: #method.for_each_init
525 /// [`try_for_each()`]: #method.try_for_each
526 ///
527 /// # Examples
528 ///
529 /// ```
530 /// use rand::{Rng, TryRngCore};
531 /// use rayon::prelude::*;
532 ///
533 /// let mut v = vec![0u8; 1_000_000];
534 ///
535 /// v.par_chunks_mut(1000)
536 /// .try_for_each_init(
537 /// || rand::rng(),
538 /// |rng, chunk| rng.try_fill_bytes(chunk),
539 /// )
540 /// .expect("expected no rand errors");
541 ///
542 /// // There's a remote chance that this will fail...
543 /// for i in 0u8..=255 {
544 /// assert!(v.contains(&i));
545 /// }
546 /// ```
547 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
548 where
549 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
550 INIT: Fn() -> T + Sync + Send,
551 R: Try<Output = ()> + Send,
552 {
553 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
554 R::from_output(())
555 }
556
557 self.map_init(init, op).try_reduce(<()>::default, ok)
558 }
559
560 /// Counts the number of items in this parallel iterator.
561 ///
562 /// # Examples
563 ///
564 /// ```
565 /// use rayon::prelude::*;
566 ///
567 /// let count = (0..100).into_par_iter().count();
568 ///
569 /// assert_eq!(count, 100);
570 /// ```
571 fn count(self) -> usize {
572 fn one<T>(_: T) -> usize {
573 1
574 }
575
576 self.map(one).sum()
577 }
578
579 /// Applies `map_op` to each item of this iterator, producing a new
580 /// iterator with the results.
581 ///
582 /// # Examples
583 ///
584 /// ```
585 /// use rayon::prelude::*;
586 ///
587 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
588 ///
589 /// let doubles: Vec<_> = par_iter.collect();
590 ///
591 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
592 /// ```
593 fn map<F, R>(self, map_op: F) -> Map<Self, F>
594 where
595 F: Fn(Self::Item) -> R + Sync + Send,
596 R: Send,
597 {
598 Map::new(self, map_op)
599 }
600
601 /// Applies `map_op` to the given `init` value with each item of this
602 /// iterator, producing a new iterator with the results.
603 ///
604 /// The `init` value will be cloned only as needed to be paired with
605 /// the group of items in each rayon job. It does not require the type
606 /// to be `Sync`.
607 ///
608 /// # Examples
609 ///
610 /// ```
611 /// use std::sync::mpsc::channel;
612 /// use rayon::prelude::*;
613 ///
614 /// let (sender, receiver) = channel();
615 ///
616 /// let a: Vec<_> = (0..5)
617 /// .into_par_iter() // iterating over i32
618 /// .map_with(sender, |s, x| {
619 /// s.send(x).unwrap(); // sending i32 values through the channel
620 /// x // returning i32
621 /// })
622 /// .collect(); // collecting the returned values into a vector
623 ///
624 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel
625 /// .collect(); // and collecting them
626 /// b.sort();
627 ///
628 /// assert_eq!(a, b);
629 /// ```
630 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
631 where
632 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
633 T: Send + Clone,
634 R: Send,
635 {
636 MapWith::new(self, init, map_op)
637 }
638
639 /// Applies `map_op` to a value returned by `init` with each item of this
640 /// iterator, producing a new iterator with the results.
641 ///
642 /// The `init` function will be called only as needed for a value to be
643 /// paired with the group of items in each rayon job. There is no
644 /// constraint on that returned type at all!
645 ///
646 /// # Examples
647 ///
648 /// ```
649 /// use rand::Rng;
650 /// use rayon::prelude::*;
651 ///
652 /// let a: Vec<_> = (1i32..1_000_000)
653 /// .into_par_iter()
654 /// .map_init(
655 /// || rand::rng(), // get the thread-local RNG
656 /// |rng, x| if rng.random() { // randomly negate items
657 /// -x
658 /// } else {
659 /// x
660 /// },
661 /// ).collect();
662 ///
663 /// // There's a remote chance that this will fail...
664 /// assert!(a.iter().any(|&x| x < 0));
665 /// assert!(a.iter().any(|&x| x > 0));
666 /// ```
667 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
668 where
669 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
670 INIT: Fn() -> T + Sync + Send,
671 R: Send,
672 {
673 MapInit::new(self, init, map_op)
674 }
675
676 /// Creates an iterator which clones all of its elements. This may be
677 /// useful when you have an iterator over `&T`, but you need `T`, and
678 /// that type implements `Clone`. See also [`copied()`].
679 ///
680 /// [`copied()`]: #method.copied
681 ///
682 /// # Examples
683 ///
684 /// ```
685 /// use rayon::prelude::*;
686 ///
687 /// let a = [1, 2, 3];
688 ///
689 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
690 ///
691 /// // cloned is the same as .map(|&x| x), for integers
692 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
693 ///
694 /// assert_eq!(v_cloned, vec![1, 2, 3]);
695 /// assert_eq!(v_map, vec![1, 2, 3]);
696 /// ```
697 fn cloned<'a, T>(self) -> Cloned<Self>
698 where
699 T: 'a + Clone + Send,
700 Self: ParallelIterator<Item = &'a T>,
701 {
702 Cloned::new(self)
703 }
704
705 /// Creates an iterator which copies all of its elements. This may be
706 /// useful when you have an iterator over `&T`, but you need `T`, and
707 /// that type implements `Copy`. See also [`cloned()`].
708 ///
709 /// [`cloned()`]: #method.cloned
710 ///
711 /// # Examples
712 ///
713 /// ```
714 /// use rayon::prelude::*;
715 ///
716 /// let a = [1, 2, 3];
717 ///
718 /// let v_copied: Vec<_> = a.par_iter().copied().collect();
719 ///
720 /// // copied is the same as .map(|&x| x), for integers
721 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
722 ///
723 /// assert_eq!(v_copied, vec![1, 2, 3]);
724 /// assert_eq!(v_map, vec![1, 2, 3]);
725 /// ```
726 fn copied<'a, T>(self) -> Copied<Self>
727 where
728 T: 'a + Copy + Send,
729 Self: ParallelIterator<Item = &'a T>,
730 {
731 Copied::new(self)
732 }
733
734 /// Applies `inspect_op` to a reference to each item of this iterator,
735 /// producing a new iterator passing through the original items. This is
736 /// often useful for debugging to see what's happening in iterator stages.
737 ///
738 /// # Examples
739 ///
740 /// ```
741 /// use rayon::prelude::*;
742 ///
743 /// let a = [1, 4, 2, 3];
744 ///
745 /// // this iterator sequence is complex.
746 /// let sum = a.par_iter()
747 /// .cloned()
748 /// .filter(|&x| x % 2 == 0)
749 /// .reduce(|| 0, |sum, i| sum + i);
750 ///
751 /// println!("{}", sum);
752 ///
753 /// // let's add some inspect() calls to investigate what's happening
754 /// let sum = a.par_iter()
755 /// .cloned()
756 /// .inspect(|x| println!("about to filter: {}", x))
757 /// .filter(|&x| x % 2 == 0)
758 /// .inspect(|x| println!("made it through filter: {}", x))
759 /// .reduce(|| 0, |sum, i| sum + i);
760 ///
761 /// println!("{}", sum);
762 /// ```
763 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
764 where
765 OP: Fn(&Self::Item) + Sync + Send,
766 {
767 Inspect::new(self, inspect_op)
768 }
769
770 /// Mutates each item of this iterator before yielding it.
771 ///
772 /// # Examples
773 ///
774 /// ```
775 /// use rayon::prelude::*;
776 ///
777 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
778 ///
779 /// let doubles: Vec<_> = par_iter.collect();
780 ///
781 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
782 /// ```
783 fn update<F>(self, update_op: F) -> Update<Self, F>
784 where
785 F: Fn(&mut Self::Item) + Sync + Send,
786 {
787 Update::new(self, update_op)
788 }
789
790 /// Applies `filter_op` to each item of this iterator, producing a new
791 /// iterator with only the items that gave `true` results.
792 ///
793 /// # Examples
794 ///
795 /// ```
796 /// use rayon::prelude::*;
797 ///
798 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
799 ///
800 /// let even_numbers: Vec<_> = par_iter.collect();
801 ///
802 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
803 /// ```
804 fn filter<P>(self, filter_op: P) -> Filter<Self, P>
805 where
806 P: Fn(&Self::Item) -> bool + Sync + Send,
807 {
808 Filter::new(self, filter_op)
809 }
810
811 /// Applies `filter_op` to each item of this iterator to get an `Option`,
812 /// producing a new iterator with only the items from `Some` results.
813 ///
814 /// # Examples
815 ///
816 /// ```
817 /// use rayon::prelude::*;
818 ///
819 /// let mut par_iter = (0..10).into_par_iter()
820 /// .filter_map(|x| {
821 /// if x % 2 == 0 { Some(x * 3) }
822 /// else { None }
823 /// });
824 ///
825 /// let even_numbers: Vec<_> = par_iter.collect();
826 ///
827 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
828 /// ```
829 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
830 where
831 P: Fn(Self::Item) -> Option<R> + Sync + Send,
832 R: Send,
833 {
834 FilterMap::new(self, filter_op)
835 }
836
837 /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
838 /// producing a new parallel iterator that flattens these back into one.
839 ///
840 /// See also [`flat_map_iter`](#method.flat_map_iter).
841 ///
842 /// # Examples
843 ///
844 /// ```
845 /// use rayon::prelude::*;
846 ///
847 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
848 ///
849 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
850 ///
851 /// let vec: Vec<_> = par_iter.collect();
852 ///
853 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
854 /// ```
855 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
856 where
857 F: Fn(Self::Item) -> PI + Sync + Send,
858 PI: IntoParallelIterator,
859 {
860 FlatMap::new(self, map_op)
861 }
862
863 /// Applies `map_op` to each item of this iterator to get nested serial iterators,
864 /// producing a new parallel iterator that flattens these back into one.
865 ///
866 /// # `flat_map_iter` versus `flat_map`
867 ///
868 /// These two methods are similar but behave slightly differently. With [`flat_map`],
869 /// each of the nested iterators must be a parallel iterator, and they will be further
870 /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
871 /// sequential `Iterator`, and we only parallelize _between_ them, while the items
872 /// produced by each nested iterator are processed sequentially.
873 ///
874 /// When choosing between these methods, consider whether nested parallelism suits the
875 /// potential iterators at hand. If there's little computation involved, or its length
876 /// is much less than the outer parallel iterator, then it may perform better to avoid
877 /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
878 /// If there is a lot of computation, potentially outweighing the outer parallel
879 /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
880 ///
881 /// [`flat_map`]: #method.flat_map
882 ///
883 /// # Examples
884 ///
885 /// ```
886 /// use rayon::prelude::*;
887 /// use std::cell::RefCell;
888 ///
889 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
890 ///
891 /// let par_iter = a.par_iter().flat_map_iter(|a| {
892 /// // The serial iterator doesn't have to be thread-safe, just its items.
893 /// let cell_iter = RefCell::new(a.iter().cloned());
894 /// std::iter::from_fn(move || cell_iter.borrow_mut().next())
895 /// });
896 ///
897 /// let vec: Vec<_> = par_iter.collect();
898 ///
899 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
900 /// ```
901 fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
902 where
903 F: Fn(Self::Item) -> SI + Sync + Send,
904 SI: IntoIterator<Item: Send>,
905 {
906 FlatMapIter::new(self, map_op)
907 }
908
909 /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
910 ///
911 /// See also [`flatten_iter`](#method.flatten_iter).
912 ///
913 /// # Examples
914 ///
915 /// ```
916 /// use rayon::prelude::*;
917 ///
918 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
919 /// let y: Vec<_> = x.into_par_iter().flatten().collect();
920 ///
921 /// assert_eq!(y, vec![1, 2, 3, 4]);
922 /// ```
923 fn flatten(self) -> Flatten<Self>
924 where
925 Self::Item: IntoParallelIterator,
926 {
927 Flatten::new(self)
928 }
929
930 /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
931 ///
932 /// See also [`flatten`](#method.flatten) and the analogous comparison of
933 /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
934 ///
935 /// # Examples
936 ///
937 /// ```
938 /// use rayon::prelude::*;
939 ///
940 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
941 /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
942 /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
943 ///
944 /// assert_eq!(y, vec![1, 2, 3, 4]);
945 /// ```
946 fn flatten_iter(self) -> FlattenIter<Self>
947 where
948 Self::Item: IntoIterator<Item: Send>,
949 {
950 FlattenIter::new(self)
951 }
952
953 /// Reduces the items in the iterator into one item using `op`.
954 /// The argument `identity` should be a closure that can produce
955 /// "identity" value which may be inserted into the sequence as
956 /// needed to create opportunities for parallel execution. So, for
957 /// example, if you are doing a summation, then `identity()` ought
958 /// to produce something that represents the zero for your type
959 /// (but consider just calling `sum()` in that case).
960 ///
961 /// # Examples
962 ///
963 /// ```
964 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
965 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
966 /// // where the first/second elements are summed separately.
967 /// use rayon::prelude::*;
968 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
969 /// .par_iter() // iterating over &(i32, i32)
970 /// .cloned() // iterating over (i32, i32)
971 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns
972 /// |a, b| (a.0 + b.0, a.1 + b.1));
973 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
974 /// ```
975 ///
976 /// **Note:** unlike a sequential `fold` operation, the order in
977 /// which `op` will be applied to reduce the result is not fully
978 /// specified. So `op` should be [associative] or else the results
979 /// will be non-deterministic. And of course `identity()` should
980 /// produce a true identity.
981 ///
982 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
983 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
984 where
985 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
986 ID: Fn() -> Self::Item + Sync + Send,
987 {
988 reduce::reduce(self, identity, op)
989 }
990
991 /// Reduces the items in the iterator into one item using `op`.
992 /// If the iterator is empty, `None` is returned; otherwise,
993 /// `Some` is returned.
994 ///
995 /// This version of `reduce` is simple but somewhat less
996 /// efficient. If possible, it is better to call `reduce()`, which
997 /// requires an identity element.
998 ///
999 /// # Examples
1000 ///
1001 /// ```
1002 /// use rayon::prelude::*;
1003 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1004 /// .par_iter() // iterating over &(i32, i32)
1005 /// .cloned() // iterating over (i32, i32)
1006 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1007 /// .unwrap();
1008 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1009 /// ```
1010 ///
1011 /// **Note:** unlike a sequential `fold` operation, the order in
1012 /// which `op` will be applied to reduce the result is not fully
1013 /// specified. So `op` should be [associative] or else the results
1014 /// will be non-deterministic.
1015 ///
1016 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1017 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1018 where
1019 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1020 {
1021 fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1022 move |opt_a, b| match opt_a {
1023 Some(a) => Some(op(a, b)),
1024 None => Some(b),
1025 }
1026 }
1027
1028 fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1029 move |opt_a, opt_b| match (opt_a, opt_b) {
1030 (Some(a), Some(b)) => Some(op(a, b)),
1031 (Some(v), None) | (None, Some(v)) => Some(v),
1032 (None, None) => None,
1033 }
1034 }
1035
1036 self.fold(<_>::default, opt_fold(&op))
1037 .reduce(<_>::default, opt_reduce(&op))
1038 }
1039
1040 /// Reduces the items in the iterator into one item using a fallible `op`.
1041 /// The `identity` argument is used the same way as in [`reduce()`].
1042 ///
1043 /// [`reduce()`]: #method.reduce
1044 ///
1045 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1046 /// to one, we will attempt to stop processing the rest of the items in the
1047 /// iterator as soon as possible, and we will return that terminating value.
1048 /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1049 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not
1050 /// specified which will be returned.
1051 ///
1052 /// # Examples
1053 ///
1054 /// ```
1055 /// use rayon::prelude::*;
1056 ///
1057 /// // Compute the sum of squares, being careful about overflow.
1058 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1059 /// iter.into_par_iter()
1060 /// .map(|i| i.checked_mul(i)) // square each item,
1061 /// .try_reduce(|| 0, i32::checked_add) // and add them up!
1062 /// }
1063 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1064 ///
1065 /// // The sum might overflow
1066 /// assert_eq!(sum_squares(0..10_000), None);
1067 ///
1068 /// // Or the squares might overflow before it even reaches `try_reduce`
1069 /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1070 /// ```
1071 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1072 where
1073 OP: Fn(T, T) -> Self::Item + Sync + Send,
1074 ID: Fn() -> T + Sync + Send,
1075 Self::Item: Try<Output = T>,
1076 {
1077 try_reduce::try_reduce(self, identity, op)
1078 }
1079
1080 /// Reduces the items in the iterator into one item using a fallible `op`.
1081 ///
1082 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1083 /// otherwise, `Some` is returned. Beyond that, it behaves like
1084 /// [`try_reduce()`] for handling `Err`/`None`.
1085 ///
1086 /// [`reduce_with()`]: #method.reduce_with
1087 /// [`try_reduce()`]: #method.try_reduce
1088 ///
1089 /// For instance, with `Option` items, the return value may be:
1090 /// - `None`, the iterator was empty
1091 /// - `Some(None)`, we stopped after encountering `None`.
1092 /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1093 ///
1094 /// With `Result` items, the nesting is more obvious:
1095 /// - `None`, the iterator was empty
1096 /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1097 /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1098 ///
1099 /// # Examples
1100 ///
1101 /// ```
1102 /// use rayon::prelude::*;
1103 ///
1104 /// let files = ["/dev/null", "/does/not/exist"];
1105 ///
1106 /// // Find the biggest file
1107 /// files.into_par_iter()
1108 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1109 /// .try_reduce_with(|a, b| {
1110 /// Ok(if a.1 >= b.1 { a } else { b })
1111 /// })
1112 /// .expect("Some value, since the iterator is not empty")
1113 /// .expect_err("not found");
1114 /// ```
1115 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1116 where
1117 OP: Fn(T, T) -> Self::Item + Sync + Send,
1118 Self::Item: Try<Output = T>,
1119 {
1120 try_reduce_with::try_reduce_with(self, op)
1121 }
1122
1123 /// Parallel fold is similar to sequential fold except that the
1124 /// sequence of items may be subdivided before it is
1125 /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1126 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1127 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1128 /// 77, and so forth. The **parallel fold** works similarly except
1129 /// that it first breaks up your list into sublists, and hence
1130 /// instead of yielding up a single sum at the end, it yields up
1131 /// multiple sums. The number of results is nondeterministic, as
1132 /// is the point where the breaks occur.
1133 ///
1134 /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1135 /// our example list, we might wind up with a sequence of two numbers,
1136 /// like so:
1137 ///
1138 /// ```notrust
1139 /// 22 3 77 89 46
1140 /// | |
1141 /// 102 135
1142 /// ```
1143 ///
1144 /// Or perhaps these three numbers:
1145 ///
1146 /// ```notrust
1147 /// 22 3 77 89 46
1148 /// | | |
1149 /// 102 89 46
1150 /// ```
1151 ///
1152 /// In general, Rayon will attempt to find good breaking points
1153 /// that keep all of your cores busy.
1154 ///
1155 /// ### Fold versus reduce
1156 ///
1157 /// The `fold()` and `reduce()` methods each take an identity element
1158 /// and a combining function, but they operate rather differently.
1159 ///
1160 /// `reduce()` requires that the identity function has the same
1161 /// type as the things you are iterating over, and it fully
1162 /// reduces the list of items into a single item. So, for example,
1163 /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1164 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1165 /// u8| a + b)`, we would get an overflow. This is because `0`,
1166 /// `a`, and `b` here are all bytes, just like the numbers in the
1167 /// list (I wrote the types explicitly above, but those are the
1168 /// only types you can use). To avoid the overflow, we would need
1169 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1170 /// b| a + b)`, in which case our result would be `256`.
1171 ///
1172 /// In contrast, with `fold()`, the identity function does not
1173 /// have to have the same type as the things you are iterating
1174 /// over, and you potentially get back many results. So, if we
1175 /// continue with the `bytes` example from the previous paragraph,
1176 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1177 /// convert our bytes into `u32`. And of course we might not get
1178 /// back a single sum.
1179 ///
1180 /// There is a more subtle distinction as well, though it's
1181 /// actually implied by the above points. When you use `reduce()`,
1182 /// your reduction function is sometimes called with values that
1183 /// were never part of your original parallel iterator (for
1184 /// example, both the left and right might be a partial sum). With
1185 /// `fold()`, in contrast, the left value in the fold function is
1186 /// always the accumulator, and the right value is always from
1187 /// your original sequence.
1188 ///
1189 /// ### Fold vs Map/Reduce
1190 ///
1191 /// Fold makes sense if you have some operation where it is
1192 /// cheaper to create groups of elements at a time. For example,
1193 /// imagine collecting characters into a string. If you were going
1194 /// to use map/reduce, you might try this:
1195 ///
1196 /// ```
1197 /// use rayon::prelude::*;
1198 ///
1199 /// let s =
1200 /// ['a', 'b', 'c', 'd', 'e']
1201 /// .par_iter()
1202 /// .map(|c: &char| format!("{}", c))
1203 /// .reduce(|| String::new(),
1204 /// |mut a: String, b: String| { a.push_str(&b); a });
1205 ///
1206 /// assert_eq!(s, "abcde");
1207 /// ```
1208 ///
1209 /// Because reduce produces the same type of element as its input,
1210 /// you have to first map each character into a string, and then
1211 /// you can reduce them. This means we create one string per
1212 /// element in our iterator -- not so great. Using `fold`, we can
1213 /// do this instead:
1214 ///
1215 /// ```
1216 /// use rayon::prelude::*;
1217 ///
1218 /// let s =
1219 /// ['a', 'b', 'c', 'd', 'e']
1220 /// .par_iter()
1221 /// .fold(|| String::new(),
1222 /// |mut s: String, c: &char| { s.push(*c); s })
1223 /// .reduce(|| String::new(),
1224 /// |mut a: String, b: String| { a.push_str(&b); a });
1225 ///
1226 /// assert_eq!(s, "abcde");
1227 /// ```
1228 ///
1229 /// Now `fold` will process groups of our characters at a time,
1230 /// and we only make one string per group. We should wind up with
1231 /// some small-ish number of strings roughly proportional to the
1232 /// number of CPUs you have (it will ultimately depend on how busy
1233 /// your processors are). Note that we still need to do a reduce
1234 /// afterwards to combine those groups of strings into a single
1235 /// string.
1236 ///
1237 /// You could use a similar trick to save partial results (e.g., a
1238 /// cache) or something similar.
1239 ///
1240 /// ### Combining fold with other operations
1241 ///
1242 /// You can combine `fold` with `reduce` if you want to produce a
1243 /// single value. This is then roughly equivalent to a map/reduce
1244 /// combination in effect:
1245 ///
1246 /// ```
1247 /// use rayon::prelude::*;
1248 ///
1249 /// let bytes = 0..22_u8;
1250 /// let sum = bytes.into_par_iter()
1251 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1252 /// .sum::<u32>();
1253 ///
1254 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1255 /// ```
1256 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1257 where
1258 F: Fn(T, Self::Item) -> T + Sync + Send,
1259 ID: Fn() -> T + Sync + Send,
1260 T: Send,
1261 {
1262 Fold::new(self, identity, fold_op)
1263 }
1264
1265 /// Applies `fold_op` to the given `init` value with each item of this
1266 /// iterator, finally producing the value for further use.
1267 ///
1268 /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1269 /// it doesn't require the `init` type to be `Sync`, nor any other form
1270 /// of added synchronization.
1271 ///
1272 /// # Examples
1273 ///
1274 /// ```
1275 /// use rayon::prelude::*;
1276 ///
1277 /// let bytes = 0..22_u8;
1278 /// let sum = bytes.into_par_iter()
1279 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1280 /// .sum::<u32>();
1281 ///
1282 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1283 /// ```
1284 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1285 where
1286 F: Fn(T, Self::Item) -> T + Sync + Send,
1287 T: Send + Clone,
1288 {
1289 FoldWith::new(self, init, fold_op)
1290 }
1291
1292 /// Performs a fallible parallel fold.
1293 ///
1294 /// This is a variation of [`fold()`] for operations which can fail with
1295 /// `Option::None` or `Result::Err`. The first such failure stops
1296 /// processing the local set of items, without affecting other folds in the
1297 /// iterator's subdivisions.
1298 ///
1299 /// Often, `try_fold()` will be followed by [`try_reduce()`]
1300 /// for a final reduction and global short-circuiting effect.
1301 ///
1302 /// [`fold()`]: #method.fold
1303 /// [`try_reduce()`]: #method.try_reduce
1304 ///
1305 /// # Examples
1306 ///
1307 /// ```
1308 /// use rayon::prelude::*;
1309 ///
1310 /// let bytes = 0..22_u8;
1311 /// let sum = bytes.into_par_iter()
1312 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1313 /// .try_reduce(|| 0, u32::checked_add);
1314 ///
1315 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1316 /// ```
1317 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1318 where
1319 F: Fn(T, Self::Item) -> R + Sync + Send,
1320 ID: Fn() -> T + Sync + Send,
1321 R: Try<Output = T> + Send,
1322 {
1323 TryFold::new(self, identity, fold_op)
1324 }
1325
1326 /// Performs a fallible parallel fold with a cloneable `init` value.
1327 ///
1328 /// This combines the `init` semantics of [`fold_with()`] and the failure
1329 /// semantics of [`try_fold()`].
1330 ///
1331 /// [`fold_with()`]: #method.fold_with
1332 /// [`try_fold()`]: #method.try_fold
1333 ///
1334 /// ```
1335 /// use rayon::prelude::*;
1336 ///
1337 /// let bytes = 0..22_u8;
1338 /// let sum = bytes.into_par_iter()
1339 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1340 /// .try_reduce(|| 0, u32::checked_add);
1341 ///
1342 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1343 /// ```
1344 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1345 where
1346 F: Fn(T, Self::Item) -> R + Sync + Send,
1347 R: Try<Output = T> + Send,
1348 T: Clone + Send,
1349 {
1350 TryFoldWith::new(self, init, fold_op)
1351 }
1352
1353 /// Sums up the items in the iterator.
1354 ///
1355 /// Note that the order in items will be reduced is not specified,
1356 /// so if the `+` operator is not truly [associative] \(as is the
1357 /// case for floating point numbers), then the results are not
1358 /// fully deterministic.
1359 ///
1360 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1361 ///
1362 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1363 /// except that the type of `0` and the `+` operation may vary
1364 /// depending on the type of value being produced.
1365 ///
1366 /// # Examples
1367 ///
1368 /// ```
1369 /// use rayon::prelude::*;
1370 ///
1371 /// let a = [1, 5, 7];
1372 ///
1373 /// let sum: i32 = a.par_iter().sum();
1374 ///
1375 /// assert_eq!(sum, 13);
1376 /// ```
1377 fn sum<S>(self) -> S
1378 where
1379 S: Send + Sum<Self::Item> + Sum<S>,
1380 {
1381 sum::sum(self)
1382 }
1383
1384 /// Multiplies all the items in the iterator.
1385 ///
1386 /// Note that the order in items will be reduced is not specified,
1387 /// so if the `*` operator is not truly [associative] \(as is the
1388 /// case for floating point numbers), then the results are not
1389 /// fully deterministic.
1390 ///
1391 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1392 ///
1393 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1394 /// except that the type of `1` and the `*` operation may vary
1395 /// depending on the type of value being produced.
1396 ///
1397 /// # Examples
1398 ///
1399 /// ```
1400 /// use rayon::prelude::*;
1401 ///
1402 /// fn factorial(n: u32) -> u32 {
1403 /// (1..n+1).into_par_iter().product()
1404 /// }
1405 ///
1406 /// assert_eq!(factorial(0), 1);
1407 /// assert_eq!(factorial(1), 1);
1408 /// assert_eq!(factorial(5), 120);
1409 /// ```
1410 fn product<P>(self) -> P
1411 where
1412 P: Send + Product<Self::Item> + Product<P>,
1413 {
1414 product::product(self)
1415 }
1416
1417 /// Computes the minimum of all the items in the iterator. If the
1418 /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1419 /// is returned.
1420 ///
1421 /// Note that the order in which the items will be reduced is not
1422 /// specified, so if the `Ord` impl is not truly associative, then
1423 /// the results are not deterministic.
1424 ///
1425 /// Basically equivalent to `self.reduce_with(|a, b| Ord::min(a, b))`.
1426 ///
1427 /// # Examples
1428 ///
1429 /// ```
1430 /// use rayon::prelude::*;
1431 ///
1432 /// let a = [45, 74, 32];
1433 ///
1434 /// assert_eq!(a.par_iter().min(), Some(&32));
1435 ///
1436 /// let b: [i32; 0] = [];
1437 ///
1438 /// assert_eq!(b.par_iter().min(), None);
1439 /// ```
1440 fn min(self) -> Option<Self::Item>
1441 where
1442 Self::Item: Ord,
1443 {
1444 self.reduce_with(Ord::min)
1445 }
1446
1447 /// Computes the minimum of all the items in the iterator with respect to
1448 /// the given comparison function. If the iterator is empty, `None` is
1449 /// returned; otherwise, `Some(min)` is returned.
1450 ///
1451 /// Note that the order in which the items will be reduced is not
1452 /// specified, so if the comparison function is not associative, then
1453 /// the results are not deterministic.
1454 ///
1455 /// # Examples
1456 ///
1457 /// ```
1458 /// use rayon::prelude::*;
1459 ///
1460 /// let a = [-3_i32, 77, 53, 240, -1];
1461 ///
1462 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1463 /// ```
1464 fn min_by<F>(self, f: F) -> Option<Self::Item>
1465 where
1466 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1467 {
1468 fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1469 move |a, b| match f(&a, &b) {
1470 Ordering::Greater => b,
1471 _ => a,
1472 }
1473 }
1474
1475 self.reduce_with(min(f))
1476 }
1477
1478 /// Computes the item that yields the minimum value for the given
1479 /// function. If the iterator is empty, `None` is returned;
1480 /// otherwise, `Some(item)` is returned.
1481 ///
1482 /// Note that the order in which the items will be reduced is not
1483 /// specified, so if the `Ord` impl is not truly associative, then
1484 /// the results are not deterministic.
1485 ///
1486 /// # Examples
1487 ///
1488 /// ```
1489 /// use rayon::prelude::*;
1490 ///
1491 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1492 ///
1493 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1494 /// ```
1495 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1496 where
1497 K: Ord + Send,
1498 F: Sync + Send + Fn(&Self::Item) -> K,
1499 {
1500 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1501 move |x| (f(&x), x)
1502 }
1503
1504 fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1505 match (a.0).cmp(&b.0) {
1506 Ordering::Greater => b,
1507 _ => a,
1508 }
1509 }
1510
1511 let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1512 Some(x)
1513 }
1514
1515 /// Computes the maximum of all the items in the iterator. If the
1516 /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1517 /// is returned.
1518 ///
1519 /// Note that the order in which the items will be reduced is not
1520 /// specified, so if the `Ord` impl is not truly associative, then
1521 /// the results are not deterministic.
1522 ///
1523 /// Basically equivalent to `self.reduce_with(|a, b| Ord::max(a, b))`.
1524 ///
1525 /// # Examples
1526 ///
1527 /// ```
1528 /// use rayon::prelude::*;
1529 ///
1530 /// let a = [45, 74, 32];
1531 ///
1532 /// assert_eq!(a.par_iter().max(), Some(&74));
1533 ///
1534 /// let b: [i32; 0] = [];
1535 ///
1536 /// assert_eq!(b.par_iter().max(), None);
1537 /// ```
1538 fn max(self) -> Option<Self::Item>
1539 where
1540 Self::Item: Ord,
1541 {
1542 self.reduce_with(Ord::max)
1543 }
1544
1545 /// Computes the maximum of all the items in the iterator with respect to
1546 /// the given comparison function. If the iterator is empty, `None` is
1547 /// returned; otherwise, `Some(max)` is returned.
1548 ///
1549 /// Note that the order in which the items will be reduced is not
1550 /// specified, so if the comparison function is not associative, then
1551 /// the results are not deterministic.
1552 ///
1553 /// # Examples
1554 ///
1555 /// ```
1556 /// use rayon::prelude::*;
1557 ///
1558 /// let a = [-3_i32, 77, 53, 240, -1];
1559 ///
1560 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1561 /// ```
1562 fn max_by<F>(self, f: F) -> Option<Self::Item>
1563 where
1564 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1565 {
1566 fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1567 move |a, b| match f(&a, &b) {
1568 Ordering::Greater => a,
1569 _ => b,
1570 }
1571 }
1572
1573 self.reduce_with(max(f))
1574 }
1575
1576 /// Computes the item that yields the maximum value for the given
1577 /// function. If the iterator is empty, `None` is returned;
1578 /// otherwise, `Some(item)` is returned.
1579 ///
1580 /// Note that the order in which the items will be reduced is not
1581 /// specified, so if the `Ord` impl is not truly associative, then
1582 /// the results are not deterministic.
1583 ///
1584 /// # Examples
1585 ///
1586 /// ```
1587 /// use rayon::prelude::*;
1588 ///
1589 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1590 ///
1591 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1592 /// ```
1593 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1594 where
1595 K: Ord + Send,
1596 F: Sync + Send + Fn(&Self::Item) -> K,
1597 {
1598 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1599 move |x| (f(&x), x)
1600 }
1601
1602 fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1603 match (a.0).cmp(&b.0) {
1604 Ordering::Greater => a,
1605 _ => b,
1606 }
1607 }
1608
1609 let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1610 Some(x)
1611 }
1612
1613 /// Takes two iterators and creates a new iterator over both.
1614 ///
1615 /// # Examples
1616 ///
1617 /// ```
1618 /// use rayon::prelude::*;
1619 ///
1620 /// let a = [0, 1, 2];
1621 /// let b = [9, 8, 7];
1622 ///
1623 /// let par_iter = a.par_iter().chain(b.par_iter());
1624 ///
1625 /// let chained: Vec<_> = par_iter.cloned().collect();
1626 ///
1627 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1628 /// ```
1629 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1630 where
1631 C: IntoParallelIterator<Item = Self::Item>,
1632 {
1633 Chain::new(self, chain.into_par_iter())
1634 }
1635
1636 /// Searches for **some** item in the parallel iterator that
1637 /// matches the given predicate and returns it. This operation
1638 /// is similar to [`find` on sequential iterators][find] but
1639 /// the item returned may not be the **first** one in the parallel
1640 /// sequence which matches, since we search the entire sequence in parallel.
1641 ///
1642 /// Once a match is found, we will attempt to stop processing
1643 /// the rest of the items in the iterator as soon as possible
1644 /// (just as `find` stops iterating once a match is found).
1645 ///
1646 /// [find]: Iterator::find()
1647 ///
1648 /// # Examples
1649 ///
1650 /// ```
1651 /// use rayon::prelude::*;
1652 ///
1653 /// let a = [1, 2, 3, 3];
1654 ///
1655 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1656 ///
1657 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1658 /// ```
1659 fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1660 where
1661 P: Fn(&Self::Item) -> bool + Sync + Send,
1662 {
1663 find::find(self, predicate)
1664 }
1665
1666 /// Searches for the sequentially **first** item in the parallel iterator
1667 /// that matches the given predicate and returns it.
1668 ///
1669 /// Once a match is found, all attempts to the right of the match
1670 /// will be stopped, while attempts to the left must continue in case
1671 /// an earlier match is found.
1672 ///
1673 /// For added performance, you might consider using `find_first` in conjunction with
1674 /// [`by_exponential_blocks()`][IndexedParallelIterator::by_exponential_blocks].
1675 ///
1676 /// Note that not all parallel iterators have a useful order, much like
1677 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1678 /// just want the first match that discovered anywhere in the iterator,
1679 /// `find_any` is a better choice.
1680 ///
1681 /// # Examples
1682 ///
1683 /// ```
1684 /// use rayon::prelude::*;
1685 ///
1686 /// let a = [1, 2, 3, 3];
1687 ///
1688 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1689 ///
1690 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1691 /// ```
1692 fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1693 where
1694 P: Fn(&Self::Item) -> bool + Sync + Send,
1695 {
1696 find_first_last::find_first(self, predicate)
1697 }
1698
1699 /// Searches for the sequentially **last** item in the parallel iterator
1700 /// that matches the given predicate and returns it.
1701 ///
1702 /// Once a match is found, all attempts to the left of the match
1703 /// will be stopped, while attempts to the right must continue in case
1704 /// a later match is found.
1705 ///
1706 /// Note that not all parallel iterators have a useful order, much like
1707 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
1708 /// order doesn't actually matter to you, `find_any` is a better choice.
1709 ///
1710 /// # Examples
1711 ///
1712 /// ```
1713 /// use rayon::prelude::*;
1714 ///
1715 /// let a = [1, 2, 3, 3];
1716 ///
1717 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1718 ///
1719 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1720 /// ```
1721 fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1722 where
1723 P: Fn(&Self::Item) -> bool + Sync + Send,
1724 {
1725 find_first_last::find_last(self, predicate)
1726 }
1727
1728 /// Applies the given predicate to the items in the parallel iterator
1729 /// and returns **any** non-None result of the map operation.
1730 ///
1731 /// Once a non-None value is produced from the map operation, we will
1732 /// attempt to stop processing the rest of the items in the iterator
1733 /// as soon as possible.
1734 ///
1735 /// Note that this method only returns **some** item in the parallel
1736 /// iterator that is not None from the map predicate. The item returned
1737 /// may not be the **first** non-None value produced in the parallel
1738 /// sequence, since the entire sequence is mapped over in parallel.
1739 ///
1740 /// # Examples
1741 ///
1742 /// ```
1743 /// use rayon::prelude::*;
1744 ///
1745 /// let c = ["lol", "NaN", "5", "5"];
1746 ///
1747 /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1748 ///
1749 /// assert_eq!(found_number, Some(5));
1750 /// ```
1751 fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1752 where
1753 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1754 R: Send,
1755 {
1756 fn yes<T>(_: &T) -> bool {
1757 true
1758 }
1759 self.filter_map(predicate).find_any(yes)
1760 }
1761
1762 /// Applies the given predicate to the items in the parallel iterator and
1763 /// returns the sequentially **first** non-None result of the map operation.
1764 ///
1765 /// Once a non-None value is produced from the map operation, all attempts
1766 /// to the right of the match will be stopped, while attempts to the left
1767 /// must continue in case an earlier match is found.
1768 ///
1769 /// Note that not all parallel iterators have a useful order, much like
1770 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1771 /// just want the first non-None value discovered anywhere in the iterator,
1772 /// `find_map_any` is a better choice.
1773 ///
1774 /// # Examples
1775 ///
1776 /// ```
1777 /// use rayon::prelude::*;
1778 ///
1779 /// let c = ["lol", "NaN", "2", "5"];
1780 ///
1781 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1782 ///
1783 /// assert_eq!(first_number, Some(2));
1784 /// ```
1785 fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1786 where
1787 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1788 R: Send,
1789 {
1790 fn yes<T>(_: &T) -> bool {
1791 true
1792 }
1793 self.filter_map(predicate).find_first(yes)
1794 }
1795
1796 /// Applies the given predicate to the items in the parallel iterator and
1797 /// returns the sequentially **last** non-None result of the map operation.
1798 ///
1799 /// Once a non-None value is produced from the map operation, all attempts
1800 /// to the left of the match will be stopped, while attempts to the right
1801 /// must continue in case a later match is found.
1802 ///
1803 /// Note that not all parallel iterators have a useful order, much like
1804 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1805 /// just want the first non-None value discovered anywhere in the iterator,
1806 /// `find_map_any` is a better choice.
1807 ///
1808 /// # Examples
1809 ///
1810 /// ```
1811 /// use rayon::prelude::*;
1812 ///
1813 /// let c = ["lol", "NaN", "2", "5"];
1814 ///
1815 /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1816 ///
1817 /// assert_eq!(last_number, Some(5));
1818 /// ```
1819 fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1820 where
1821 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1822 R: Send,
1823 {
1824 fn yes<T>(_: &T) -> bool {
1825 true
1826 }
1827 self.filter_map(predicate).find_last(yes)
1828 }
1829
1830 #[doc(hidden)]
1831 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1832 `find_first`, or `find_last`")]
1833 fn find<P>(self, predicate: P) -> Option<Self::Item>
1834 where
1835 P: Fn(&Self::Item) -> bool + Sync + Send,
1836 {
1837 self.find_any(predicate)
1838 }
1839
1840 /// Searches for **some** item in the parallel iterator that
1841 /// matches the given predicate, and if so returns true. Once
1842 /// a match is found, we'll attempt to stop process the rest
1843 /// of the items. Proving that there's no match, returning false,
1844 /// does require visiting every item.
1845 ///
1846 /// # Examples
1847 ///
1848 /// ```
1849 /// use rayon::prelude::*;
1850 ///
1851 /// let a = [0, 12, 3, 4, 0, 23, 0];
1852 ///
1853 /// let is_valid = a.par_iter().any(|&x| x > 10);
1854 ///
1855 /// assert!(is_valid);
1856 /// ```
1857 fn any<P>(self, predicate: P) -> bool
1858 where
1859 P: Fn(Self::Item) -> bool + Sync + Send,
1860 {
1861 self.map(predicate).find_any(bool::clone).is_some()
1862 }
1863
1864 /// Tests that every item in the parallel iterator matches the given
1865 /// predicate, and if so returns true. If a counter-example is found,
1866 /// we'll attempt to stop processing more items, then return false.
1867 ///
1868 /// # Examples
1869 ///
1870 /// ```
1871 /// use rayon::prelude::*;
1872 ///
1873 /// let a = [0, 12, 3, 4, 0, 23, 0];
1874 ///
1875 /// let is_valid = a.par_iter().all(|&x| x > 10);
1876 ///
1877 /// assert!(!is_valid);
1878 /// ```
1879 fn all<P>(self, predicate: P) -> bool
1880 where
1881 P: Fn(Self::Item) -> bool + Sync + Send,
1882 {
1883 #[inline]
1884 fn is_false(x: &bool) -> bool {
1885 !x
1886 }
1887
1888 self.map(predicate).find_any(is_false).is_none()
1889 }
1890
1891 /// Creates an iterator over the `Some` items of this iterator, halting
1892 /// as soon as any `None` is found.
1893 ///
1894 /// # Examples
1895 ///
1896 /// ```
1897 /// use rayon::prelude::*;
1898 /// use std::sync::atomic::{AtomicUsize, Ordering};
1899 ///
1900 /// let counter = AtomicUsize::new(0);
1901 /// let value = (0_i32..2048)
1902 /// .into_par_iter()
1903 /// .map(|x| {
1904 /// counter.fetch_add(1, Ordering::SeqCst);
1905 /// if x < 1024 { Some(x) } else { None }
1906 /// })
1907 /// .while_some()
1908 /// .max();
1909 ///
1910 /// assert!(value < Some(1024));
1911 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1912 /// ```
1913 fn while_some<T>(self) -> WhileSome<Self>
1914 where
1915 Self: ParallelIterator<Item = Option<T>>,
1916 T: Send,
1917 {
1918 WhileSome::new(self)
1919 }
1920
1921 /// Wraps an iterator with a fuse in case of panics, to halt all threads
1922 /// as soon as possible.
1923 ///
1924 /// Panics within parallel iterators are always propagated to the caller,
1925 /// but they don't always halt the rest of the iterator right away, due to
1926 /// the internal semantics of [`join`]. This adaptor makes a greater effort
1927 /// to stop processing other items sooner, with the cost of additional
1928 /// synchronization overhead, which may also inhibit some optimizations.
1929 ///
1930 /// [`join`]: crate::join()#panics
1931 ///
1932 /// # Examples
1933 ///
1934 /// If this code didn't use `panic_fuse()`, it would continue processing
1935 /// many more items in other threads (with long sleep delays) before the
1936 /// panic is finally propagated.
1937 ///
1938 /// ```should_panic
1939 /// use rayon::prelude::*;
1940 /// use std::{thread, time};
1941 ///
1942 /// (0..1_000_000)
1943 /// .into_par_iter()
1944 /// .panic_fuse()
1945 /// .for_each(|i| {
1946 /// // simulate some work
1947 /// thread::sleep(time::Duration::from_secs(1));
1948 /// assert!(i > 0); // oops!
1949 /// });
1950 /// ```
1951 fn panic_fuse(self) -> PanicFuse<Self> {
1952 PanicFuse::new(self)
1953 }
1954
1955 /// Creates a fresh collection containing all the elements produced
1956 /// by this parallel iterator.
1957 ///
1958 /// You may prefer [`collect_into_vec()`] implemented on
1959 /// [`IndexedParallelIterator`], if your underlying iterator also implements
1960 /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1961 /// of how many elements the iterator contains, and even allows you to reuse
1962 /// an existing vector's backing store rather than allocating a fresh vector.
1963 ///
1964 /// See also [`collect_vec_list()`] for collecting into a
1965 /// `LinkedList<Vec<T>>`.
1966 ///
1967 /// [`collect_into_vec()`]: IndexedParallelIterator::collect_into_vec()
1968 /// [`collect_vec_list()`]: Self::collect_vec_list()
1969 ///
1970 /// # Examples
1971 ///
1972 /// ```
1973 /// use rayon::prelude::*;
1974 ///
1975 /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1976 ///
1977 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1978 ///
1979 /// assert_eq!(sync_vec, async_vec);
1980 /// ```
1981 ///
1982 /// You can collect a pair of collections like [`unzip`](#method.unzip)
1983 /// for paired items:
1984 ///
1985 /// ```
1986 /// use rayon::prelude::*;
1987 ///
1988 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1989 /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1990 ///
1991 /// assert_eq!(first, [0, 1, 2, 3]);
1992 /// assert_eq!(second, [1, 2, 3, 4]);
1993 /// ```
1994 ///
1995 /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1996 ///
1997 /// ```
1998 /// use rayon::prelude::*;
1999 /// use rayon::iter::Either;
2000 ///
2001 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2002 /// if x % 2 == 0 {
2003 /// Either::Left(x * 4)
2004 /// } else {
2005 /// Either::Right(x * 3)
2006 /// }
2007 /// }).collect();
2008 ///
2009 /// assert_eq!(left, [0, 8, 16, 24]);
2010 /// assert_eq!(right, [3, 9, 15, 21]);
2011 /// ```
2012 ///
2013 /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2014 ///
2015 /// ```
2016 /// use rayon::prelude::*;
2017 /// use rayon::iter::Either;
2018 ///
2019 /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2020 /// = (0..8).into_par_iter().map(|x| {
2021 /// if x % 2 == 0 {
2022 /// (x, Either::Left(x * 4))
2023 /// } else {
2024 /// (-x, Either::Right(x * 3))
2025 /// }
2026 /// }).collect();
2027 ///
2028 /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2029 /// assert_eq!(left, [0, 8, 16, 24]);
2030 /// assert_eq!(right, [3, 9, 15, 21]);
2031 /// ```
2032 ///
2033 /// All of that can _also_ be combined with short-circuiting collection of
2034 /// `Result` or `Option` types:
2035 ///
2036 /// ```
2037 /// use rayon::prelude::*;
2038 /// use rayon::iter::Either;
2039 ///
2040 /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2041 /// = (0..8).into_par_iter().map(|x| {
2042 /// if x > 5 {
2043 /// Err(x)
2044 /// } else if x % 2 == 0 {
2045 /// Ok((x, Either::Left(x * 4)))
2046 /// } else {
2047 /// Ok((-x, Either::Right(x * 3)))
2048 /// }
2049 /// }).collect();
2050 ///
2051 /// let error = result.unwrap_err();
2052 /// assert!(error == 6 || error == 7);
2053 /// ```
2054 fn collect<C>(self) -> C
2055 where
2056 C: FromParallelIterator<Self::Item>,
2057 {
2058 C::from_par_iter(self)
2059 }
2060
2061 /// Unzips the items of a parallel iterator into a pair of arbitrary
2062 /// `ParallelExtend` containers.
2063 ///
2064 /// You may prefer to use `unzip_into_vecs()`, which allocates more
2065 /// efficiently with precise knowledge of how many elements the
2066 /// iterator contains, and even allows you to reuse existing
2067 /// vectors' backing stores rather than allocating fresh vectors.
2068 ///
2069 /// # Examples
2070 ///
2071 /// ```
2072 /// use rayon::prelude::*;
2073 ///
2074 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2075 ///
2076 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2077 ///
2078 /// assert_eq!(left, [0, 1, 2, 3]);
2079 /// assert_eq!(right, [1, 2, 3, 4]);
2080 /// ```
2081 ///
2082 /// Nested pairs can be unzipped too.
2083 ///
2084 /// ```
2085 /// use rayon::prelude::*;
2086 ///
2087 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2088 /// .map(|i| (i, (i * i, i * i * i)))
2089 /// .unzip();
2090 ///
2091 /// assert_eq!(values, [0, 1, 2, 3]);
2092 /// assert_eq!(squares, [0, 1, 4, 9]);
2093 /// assert_eq!(cubes, [0, 1, 8, 27]);
2094 /// ```
2095 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2096 where
2097 Self: ParallelIterator<Item = (A, B)>,
2098 FromA: Default + Send + ParallelExtend<A>,
2099 FromB: Default + Send + ParallelExtend<B>,
2100 A: Send,
2101 B: Send,
2102 {
2103 unzip::unzip(self)
2104 }
2105
2106 /// Partitions the items of a parallel iterator into a pair of arbitrary
2107 /// `ParallelExtend` containers. Items for which the `predicate` returns
2108 /// true go into the first container, and the rest go into the second.
2109 ///
2110 /// Note: unlike the standard `Iterator::partition`, this allows distinct
2111 /// collection types for the left and right items. This is more flexible,
2112 /// but may require new type annotations when converting sequential code
2113 /// that used type inference assuming the two were the same.
2114 ///
2115 /// # Examples
2116 ///
2117 /// ```
2118 /// use rayon::prelude::*;
2119 ///
2120 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2121 ///
2122 /// assert_eq!(left, [0, 2, 4, 6]);
2123 /// assert_eq!(right, [1, 3, 5, 7]);
2124 /// ```
2125 fn partition<A, B, P>(self, predicate: P) -> (A, B)
2126 where
2127 A: Default + Send + ParallelExtend<Self::Item>,
2128 B: Default + Send + ParallelExtend<Self::Item>,
2129 P: Fn(&Self::Item) -> bool + Sync + Send,
2130 {
2131 unzip::partition(self, predicate)
2132 }
2133
2134 /// Partitions and maps the items of a parallel iterator into a pair of
2135 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into
2136 /// the first container, and `Either::Right` items go into the second.
2137 ///
2138 /// # Examples
2139 ///
2140 /// ```
2141 /// use rayon::prelude::*;
2142 /// use rayon::iter::Either;
2143 ///
2144 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2145 /// .partition_map(|x| {
2146 /// if x % 2 == 0 {
2147 /// Either::Left(x * 4)
2148 /// } else {
2149 /// Either::Right(x * 3)
2150 /// }
2151 /// });
2152 ///
2153 /// assert_eq!(left, [0, 8, 16, 24]);
2154 /// assert_eq!(right, [3, 9, 15, 21]);
2155 /// ```
2156 ///
2157 /// Nested `Either` enums can be split as well.
2158 ///
2159 /// ```
2160 /// use rayon::prelude::*;
2161 /// use rayon::iter::Either::*;
2162 ///
2163 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2164 /// .into_par_iter()
2165 /// .partition_map(|x| match (x % 3, x % 5) {
2166 /// (0, 0) => Left(Left(x)),
2167 /// (0, _) => Left(Right(x)),
2168 /// (_, 0) => Right(Left(x)),
2169 /// (_, _) => Right(Right(x)),
2170 /// });
2171 ///
2172 /// assert_eq!(fizzbuzz, [15]);
2173 /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2174 /// assert_eq!(buzz, [5, 10]);
2175 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2176 /// ```
2177 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2178 where
2179 A: Default + Send + ParallelExtend<L>,
2180 B: Default + Send + ParallelExtend<R>,
2181 P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2182 L: Send,
2183 R: Send,
2184 {
2185 unzip::partition_map(self, predicate)
2186 }
2187
2188 /// Intersperses clones of an element between items of this iterator.
2189 ///
2190 /// # Examples
2191 ///
2192 /// ```
2193 /// use rayon::prelude::*;
2194 ///
2195 /// let x = vec![1, 2, 3];
2196 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2197 ///
2198 /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2199 /// ```
2200 fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2201 where
2202 Self::Item: Clone,
2203 {
2204 Intersperse::new(self, element)
2205 }
2206
2207 /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2208 ///
2209 /// This is similar to [`IndexedParallelIterator::take`] without being
2210 /// constrained to the "first" `n` of the original iterator order. The
2211 /// taken items will still maintain their relative order where that is
2212 /// visible in `collect`, `reduce`, and similar outputs.
2213 ///
2214 /// # Examples
2215 ///
2216 /// ```
2217 /// use rayon::prelude::*;
2218 ///
2219 /// let result: Vec<_> = (0..100)
2220 /// .into_par_iter()
2221 /// .filter(|&x| x % 2 == 0)
2222 /// .take_any(5)
2223 /// .collect();
2224 ///
2225 /// assert_eq!(result.len(), 5);
2226 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2227 /// ```
2228 fn take_any(self, n: usize) -> TakeAny<Self> {
2229 TakeAny::new(self, n)
2230 }
2231
2232 /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2233 ///
2234 /// This is similar to [`IndexedParallelIterator::skip`] without being
2235 /// constrained to the "first" `n` of the original iterator order. The
2236 /// remaining items will still maintain their relative order where that is
2237 /// visible in `collect`, `reduce`, and similar outputs.
2238 ///
2239 /// # Examples
2240 ///
2241 /// ```
2242 /// use rayon::prelude::*;
2243 ///
2244 /// let result: Vec<_> = (0..100)
2245 /// .into_par_iter()
2246 /// .filter(|&x| x % 2 == 0)
2247 /// .skip_any(5)
2248 /// .collect();
2249 ///
2250 /// assert_eq!(result.len(), 45);
2251 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2252 /// ```
2253 fn skip_any(self, n: usize) -> SkipAny<Self> {
2254 SkipAny::new(self, n)
2255 }
2256
2257 /// Creates an iterator that takes elements from *anywhere* in the original iterator
2258 /// until the given `predicate` returns `false`.
2259 ///
2260 /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2261 /// global condition unrelated to the item itself, or some combination thereof.
2262 ///
2263 /// If parallel calls to the `predicate` race and give different results, then the
2264 /// `true` results will still take those particular items, while respecting the `false`
2265 /// result from elsewhere to skip any further items.
2266 ///
2267 /// This is similar to [`Iterator::take_while`] without being constrained to the original
2268 /// iterator order. The taken items will still maintain their relative order where that is
2269 /// visible in `collect`, `reduce`, and similar outputs.
2270 ///
2271 /// # Examples
2272 ///
2273 /// ```
2274 /// use rayon::prelude::*;
2275 ///
2276 /// let result: Vec<_> = (0..100)
2277 /// .into_par_iter()
2278 /// .take_any_while(|x| *x < 50)
2279 /// .collect();
2280 ///
2281 /// assert!(result.len() <= 50);
2282 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2283 /// ```
2284 ///
2285 /// ```
2286 /// use rayon::prelude::*;
2287 /// use std::sync::atomic::AtomicUsize;
2288 /// use std::sync::atomic::Ordering::Relaxed;
2289 ///
2290 /// // Collect any group of items that sum <= 1000
2291 /// let quota = AtomicUsize::new(1000);
2292 /// let result: Vec<_> = (0_usize..100)
2293 /// .into_par_iter()
2294 /// .take_any_while(|&x| {
2295 /// quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2296 /// .is_ok()
2297 /// })
2298 /// .collect();
2299 ///
2300 /// let sum = result.iter().sum::<usize>();
2301 /// assert!(matches!(sum, 902..=1000));
2302 /// ```
2303 fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2304 where
2305 P: Fn(&Self::Item) -> bool + Sync + Send,
2306 {
2307 TakeAnyWhile::new(self, predicate)
2308 }
2309
2310 /// Creates an iterator that skips elements from *anywhere* in the original iterator
2311 /// until the given `predicate` returns `false`.
2312 ///
2313 /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2314 /// global condition unrelated to the item itself, or some combination thereof.
2315 ///
2316 /// If parallel calls to the `predicate` race and give different results, then the
2317 /// `true` results will still skip those particular items, while respecting the `false`
2318 /// result from elsewhere to skip any further items.
2319 ///
2320 /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2321 /// iterator order. The remaining items will still maintain their relative order where that is
2322 /// visible in `collect`, `reduce`, and similar outputs.
2323 ///
2324 /// # Examples
2325 ///
2326 /// ```
2327 /// use rayon::prelude::*;
2328 ///
2329 /// let result: Vec<_> = (0..100)
2330 /// .into_par_iter()
2331 /// .skip_any_while(|x| *x < 50)
2332 /// .collect();
2333 ///
2334 /// assert!(result.len() >= 50);
2335 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2336 /// ```
2337 fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2338 where
2339 P: Fn(&Self::Item) -> bool + Sync + Send,
2340 {
2341 SkipAnyWhile::new(self, predicate)
2342 }
2343
2344 /// Collects this iterator into a linked list of vectors.
2345 ///
2346 /// This is useful when you need to condense a parallel iterator into a collection,
2347 /// but have no specific requirements for what that collection should be. If you
2348 /// plan to store the collection longer-term, `Vec<T>` is, as always, likely the
2349 /// best default choice, despite the overhead that comes from concatenating each
2350 /// vector. Or, if this is an `IndexedParallelIterator`, you should also prefer to
2351 /// just collect to a `Vec<T>`.
2352 ///
2353 /// Internally, most [`FromParallelIterator`]/[`ParallelExtend`] implementations
2354 /// use this strategy; each job collecting their chunk of the iterator to a `Vec<T>`
2355 /// and those chunks getting merged into a `LinkedList`, before then extending the
2356 /// collection with each vector. This is a very efficient way to collect an
2357 /// unindexed parallel iterator, without much intermediate data movement.
2358 ///
2359 /// # Examples
2360 ///
2361 /// ```
2362 /// # use std::collections::LinkedList;
2363 /// use rayon::prelude::*;
2364 ///
2365 /// let result: LinkedList<Vec<_>> = (0..=100)
2366 /// .into_par_iter()
2367 /// .filter(|x| x % 2 == 0)
2368 /// .flat_map(|x| 0..x)
2369 /// .collect_vec_list();
2370 ///
2371 /// // `par_iter.collect_vec_list().into_iter().flatten()` turns
2372 /// // a parallel iterator into a serial one
2373 /// let total_len = result.into_iter().flatten().count();
2374 /// assert_eq!(total_len, 2550);
2375 /// ```
2376 fn collect_vec_list(self) -> LinkedList<Vec<Self::Item>> {
2377 match extend::fast_collect(self) {
2378 Either::Left(vec) => {
2379 let mut list = LinkedList::new();
2380 if !vec.is_empty() {
2381 list.push_back(vec);
2382 }
2383 list
2384 }
2385 Either::Right(list) => list,
2386 }
2387 }
2388
2389 /// Internal method used to define the behavior of this parallel
2390 /// iterator. You should not need to call this directly.
2391 ///
2392 /// This method causes the iterator `self` to start producing
2393 /// items and to feed them to the consumer `consumer` one by one.
2394 /// It may split the consumer before doing so to create the
2395 /// opportunity to produce in parallel.
2396 ///
2397 /// See the [README] for more details on the internals of parallel
2398 /// iterators.
2399 ///
2400 /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
2401 fn drive_unindexed<C>(self, consumer: C) -> C::Result
2402 where
2403 C: UnindexedConsumer<Self::Item>;
2404
2405 /// Internal method used to define the behavior of this parallel
2406 /// iterator. You should not need to call this directly.
2407 ///
2408 /// Returns the number of items produced by this iterator, if known
2409 /// statically. This can be used by consumers to trigger special fast
2410 /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2411 /// use the (indexed) `Consumer` methods when driving a consumer, such
2412 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2413 /// other `UnindexedConsumer` methods -- or returning an inaccurate
2414 /// value -- may result in panics.
2415 ///
2416 /// This method is currently used to optimize `collect` for want
2417 /// of true Rust specialization; it may be removed when
2418 /// specialization is stable.
2419 fn opt_len(&self) -> Option<usize> {
2420 None
2421 }
2422}
2423
2424impl<T: ParallelIterator> IntoParallelIterator for T {
2425 type Iter = T;
2426 type Item = T::Item;
2427
2428 fn into_par_iter(self) -> T {
2429 self
2430 }
2431}
2432
2433/// An iterator that supports "random access" to its data, meaning
2434/// that you can split it at arbitrary indices and draw data from
2435/// those points.
2436///
2437/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2438// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2439#[allow(clippy::len_without_is_empty)]
2440pub trait IndexedParallelIterator: ParallelIterator {
2441 /// Divides an iterator into sequential blocks of exponentially-increasing size.
2442 ///
2443 /// Normally, parallel iterators are recursively divided into tasks in parallel.
2444 /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2445 /// of parallel iterators of increasing sizes.
2446 /// Sizes grow exponentially in order to avoid creating
2447 /// too many blocks. This also allows to balance the current block with all previous ones.
2448 ///
2449 /// This can have many applications but the most notable ones are:
2450 /// - better performance with [`find_first()`][ParallelIterator::find_first]
2451 /// - more predictable performance with [`find_any()`][ParallelIterator::find_any]
2452 /// or any interruptible computation
2453 ///
2454 /// # Examples
2455 ///
2456 /// ```
2457 /// use rayon::prelude::*;
2458 /// assert_eq!((0..10_000).into_par_iter()
2459 /// .by_exponential_blocks()
2460 /// .find_first(|&e| e==4_999), Some(4_999))
2461 /// ```
2462 ///
2463 /// In this example, without blocks, rayon will split the initial range into two but all work
2464 /// on the right hand side (from 5,000 onwards) is **useless** since the sequential algorithm
2465 /// never goes there. This means that if two threads are used there will be **no** speedup **at
2466 /// all**.
2467 ///
2468 /// `by_exponential_blocks` on the other hand will start with the leftmost range from 0
2469 /// to `p` (threads number), continue with p to 3p, the 3p to 7p...
2470 ///
2471 /// Each subrange is treated in parallel, while all subranges are treated sequentially.
2472 /// We therefore ensure a logarithmic number of blocks (and overhead) while guaranteeing
2473 /// we stop at the first block containing the searched data.
2474 fn by_exponential_blocks(self) -> ExponentialBlocks<Self> {
2475 ExponentialBlocks::new(self)
2476 }
2477
2478 /// Divides an iterator into sequential blocks of the given size.
2479 ///
2480 /// Normally, parallel iterators are recursively divided into tasks in parallel.
2481 /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2482 /// of parallel iterators of given `block_size`.
2483 /// The main application is to obtain better
2484 /// memory locality (especially if the reduce operation re-use folded data).
2485 ///
2486 /// **Panics** if `block_size` is 0.
2487 ///
2488 /// # Example
2489 /// ```
2490 /// use rayon::prelude::*;
2491 /// // during most reductions v1 and v2 fit the cache
2492 /// let v = (0u32..10_000_000)
2493 /// .into_par_iter()
2494 /// .by_uniform_blocks(1_000_000)
2495 /// .fold(Vec::new, |mut v, e| { v.push(e); v})
2496 /// .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
2497 /// assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());
2498 /// ```
2499 #[track_caller]
2500 fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> {
2501 assert!(block_size != 0, "block_size must not be zero");
2502 UniformBlocks::new(self, block_size)
2503 }
2504
2505 /// Collects the results of the iterator into the specified
2506 /// vector. The vector is always cleared before execution
2507 /// begins. If possible, reusing the vector across calls can lead
2508 /// to better performance since it reuses the same backing buffer.
2509 ///
2510 /// # Examples
2511 ///
2512 /// ```
2513 /// use rayon::prelude::*;
2514 ///
2515 /// // any prior data will be cleared
2516 /// let mut vec = vec![-1, -2, -3];
2517 ///
2518 /// (0..5).into_par_iter()
2519 /// .collect_into_vec(&mut vec);
2520 ///
2521 /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2522 /// ```
2523 fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2524 collect::collect_into_vec(self, target);
2525 }
2526
2527 /// Unzips the results of the iterator into the specified
2528 /// vectors. The vectors are always cleared before execution
2529 /// begins. If possible, reusing the vectors across calls can lead
2530 /// to better performance since they reuse the same backing buffer.
2531 ///
2532 /// # Examples
2533 ///
2534 /// ```
2535 /// use rayon::prelude::*;
2536 ///
2537 /// // any prior data will be cleared
2538 /// let mut left = vec![42; 10];
2539 /// let mut right = vec![-1; 10];
2540 ///
2541 /// (10..15).into_par_iter()
2542 /// .enumerate()
2543 /// .unzip_into_vecs(&mut left, &mut right);
2544 ///
2545 /// assert_eq!(left, [0, 1, 2, 3, 4]);
2546 /// assert_eq!(right, [10, 11, 12, 13, 14]);
2547 /// ```
2548 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2549 where
2550 Self: IndexedParallelIterator<Item = (A, B)>,
2551 A: Send,
2552 B: Send,
2553 {
2554 collect::unzip_into_vecs(self, left, right);
2555 }
2556
2557 /// Iterates over tuples `(A, B)`, where the items `A` are from
2558 /// this iterator and `B` are from the iterator given as argument.
2559 /// Like the `zip` method on ordinary iterators, if the two
2560 /// iterators are of unequal length, you only get the items they
2561 /// have in common.
2562 ///
2563 /// # Examples
2564 ///
2565 /// ```
2566 /// use rayon::prelude::*;
2567 ///
2568 /// let result: Vec<_> = (1..4)
2569 /// .into_par_iter()
2570 /// .zip(vec!['a', 'b', 'c'])
2571 /// .collect();
2572 ///
2573 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2574 /// ```
2575 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2576 where
2577 Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2578 {
2579 Zip::new(self, zip_op.into_par_iter())
2580 }
2581
2582 /// The same as `Zip`, but requires that both iterators have the same length.
2583 ///
2584 /// # Panics
2585 /// Will panic if `self` and `zip_op` are not the same length.
2586 ///
2587 /// ```should_panic
2588 /// use rayon::prelude::*;
2589 ///
2590 /// let one = [1u8];
2591 /// let two = [2u8, 2];
2592 /// let one_iter = one.par_iter();
2593 /// let two_iter = two.par_iter();
2594 ///
2595 /// // this will panic
2596 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2597 ///
2598 /// // we should never get here
2599 /// assert_eq!(1, zipped.len());
2600 /// ```
2601 #[track_caller]
2602 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2603 where
2604 Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2605 {
2606 let zip_op_iter = zip_op.into_par_iter();
2607 assert_eq!(
2608 self.len(),
2609 zip_op_iter.len(),
2610 "iterators must have the same length"
2611 );
2612 ZipEq::new(self, zip_op_iter)
2613 }
2614
2615 /// Interleaves elements of this iterator and the other given
2616 /// iterator. Alternately yields elements from this iterator and
2617 /// the given iterator, until both are exhausted. If one iterator
2618 /// is exhausted before the other, the last elements are provided
2619 /// from the other.
2620 ///
2621 /// # Examples
2622 ///
2623 /// ```
2624 /// use rayon::prelude::*;
2625 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2626 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2627 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2628 /// ```
2629 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2630 where
2631 I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2632 {
2633 Interleave::new(self, other.into_par_iter())
2634 }
2635
2636 /// Interleaves elements of this iterator and the other given
2637 /// iterator, until one is exhausted.
2638 ///
2639 /// # Examples
2640 ///
2641 /// ```
2642 /// use rayon::prelude::*;
2643 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2644 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2645 /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2646 /// ```
2647 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2648 where
2649 I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2650 {
2651 InterleaveShortest::new(self, other.into_par_iter())
2652 }
2653
2654 /// Splits an iterator up into fixed-size chunks.
2655 ///
2656 /// Returns an iterator that returns `Vec`s of the given number of elements.
2657 /// If the number of elements in the iterator is not divisible by `chunk_size`,
2658 /// the last chunk may be shorter than `chunk_size`.
2659 ///
2660 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2661 /// slices, without having to allocate intermediate `Vec`s for the chunks.
2662 ///
2663 /// [`par_chunks()`]: crate::slice::ParallelSlice::par_chunks()
2664 /// [`par_chunks_mut()`]: crate::slice::ParallelSliceMut::par_chunks_mut()
2665 ///
2666 /// **Panics** if `chunk_size` is 0.
2667 ///
2668 /// # Examples
2669 ///
2670 /// ```
2671 /// use rayon::prelude::*;
2672 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2673 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2674 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2675 /// ```
2676 #[track_caller]
2677 fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2678 assert!(chunk_size != 0, "chunk_size must not be zero");
2679 Chunks::new(self, chunk_size)
2680 }
2681
2682 /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2683 /// each chunk.
2684 ///
2685 /// Returns an iterator that produces a folded result for each chunk of items
2686 /// produced by this iterator.
2687 ///
2688 /// This works essentially like:
2689 ///
2690 /// ```text
2691 /// iter.chunks(chunk_size)
2692 /// .map(|chunk|
2693 /// chunk.into_iter()
2694 /// .fold(identity, fold_op)
2695 /// )
2696 /// ```
2697 ///
2698 /// except there is no per-chunk allocation overhead.
2699 ///
2700 /// [`fold()`]: std::iter::Iterator#method.fold
2701 ///
2702 /// **Panics** if `chunk_size` is 0.
2703 ///
2704 /// # Examples
2705 ///
2706 /// ```
2707 /// use rayon::prelude::*;
2708 /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2709 /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2710 /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2711 /// ```
2712 #[track_caller]
2713 fn fold_chunks<T, ID, F>(
2714 self,
2715 chunk_size: usize,
2716 identity: ID,
2717 fold_op: F,
2718 ) -> FoldChunks<Self, ID, F>
2719 where
2720 ID: Fn() -> T + Send + Sync,
2721 F: Fn(T, Self::Item) -> T + Send + Sync,
2722 T: Send,
2723 {
2724 assert!(chunk_size != 0, "chunk_size must not be zero");
2725 FoldChunks::new(self, chunk_size, identity, fold_op)
2726 }
2727
2728 /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2729 /// each chunk.
2730 ///
2731 /// Returns an iterator that produces a folded result for each chunk of items
2732 /// produced by this iterator.
2733 ///
2734 /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2735 /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2736 /// added synchronization.
2737 ///
2738 /// [`fold()`]: std::iter::Iterator#method.fold
2739 ///
2740 /// **Panics** if `chunk_size` is 0.
2741 ///
2742 /// # Examples
2743 ///
2744 /// ```
2745 /// use rayon::prelude::*;
2746 /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2747 /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2748 /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2749 /// ```
2750 #[track_caller]
2751 fn fold_chunks_with<T, F>(
2752 self,
2753 chunk_size: usize,
2754 init: T,
2755 fold_op: F,
2756 ) -> FoldChunksWith<Self, T, F>
2757 where
2758 T: Send + Clone,
2759 F: Fn(T, Self::Item) -> T + Send + Sync,
2760 {
2761 assert!(chunk_size != 0, "chunk_size must not be zero");
2762 FoldChunksWith::new(self, chunk_size, init, fold_op)
2763 }
2764
2765 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2766 /// another.
2767 ///
2768 /// # Examples
2769 ///
2770 /// ```
2771 /// use rayon::prelude::*;
2772 /// use std::cmp::Ordering::*;
2773 ///
2774 /// let x = vec![1, 2, 3];
2775 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2776 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2777 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2778 /// ```
2779 fn cmp<I>(self, other: I) -> Ordering
2780 where
2781 I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2782 Self::Item: Ord,
2783 {
2784 #[inline]
2785 fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2786 Ord::cmp(&x, &y)
2787 }
2788
2789 #[inline]
2790 fn inequal(&ord: &Ordering) -> bool {
2791 ord != Ordering::Equal
2792 }
2793
2794 let other = other.into_par_iter();
2795 let ord_len = self.len().cmp(&other.len());
2796 self.zip(other)
2797 .map(ordering)
2798 .find_first(inequal)
2799 .unwrap_or(ord_len)
2800 }
2801
2802 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2803 /// another.
2804 ///
2805 /// # Examples
2806 ///
2807 /// ```
2808 /// use rayon::prelude::*;
2809 /// use std::cmp::Ordering::*;
2810 ///
2811 /// let x = vec![1.0, 2.0, 3.0];
2812 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2813 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2814 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2815 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, f64::NAN]), None);
2816 /// ```
2817 fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2818 where
2819 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2820 Self::Item: PartialOrd<I::Item>,
2821 {
2822 #[inline]
2823 fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2824 PartialOrd::partial_cmp(&x, &y)
2825 }
2826
2827 #[inline]
2828 fn inequal(&ord: &Option<Ordering>) -> bool {
2829 ord != Some(Ordering::Equal)
2830 }
2831
2832 let other = other.into_par_iter();
2833 let ord_len = self.len().cmp(&other.len());
2834 self.zip(other)
2835 .map(ordering)
2836 .find_first(inequal)
2837 .unwrap_or(Some(ord_len))
2838 }
2839
2840 /// Determines if the elements of this `ParallelIterator`
2841 /// are equal to those of another
2842 fn eq<I>(self, other: I) -> bool
2843 where
2844 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2845 Self::Item: PartialEq<I::Item>,
2846 {
2847 #[inline]
2848 fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2849 PartialEq::eq(&x, &y)
2850 }
2851
2852 let other = other.into_par_iter();
2853 self.len() == other.len() && self.zip(other).all(eq)
2854 }
2855
2856 /// Determines if the elements of this `ParallelIterator`
2857 /// are unequal to those of another
2858 fn ne<I>(self, other: I) -> bool
2859 where
2860 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2861 Self::Item: PartialEq<I::Item>,
2862 {
2863 !self.eq(other)
2864 }
2865
2866 /// Determines if the elements of this `ParallelIterator`
2867 /// are lexicographically less than those of another.
2868 fn lt<I>(self, other: I) -> bool
2869 where
2870 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2871 Self::Item: PartialOrd<I::Item>,
2872 {
2873 self.partial_cmp(other) == Some(Ordering::Less)
2874 }
2875
2876 /// Determines if the elements of this `ParallelIterator`
2877 /// are less than or equal to those of another.
2878 fn le<I>(self, other: I) -> bool
2879 where
2880 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2881 Self::Item: PartialOrd<I::Item>,
2882 {
2883 let ord = self.partial_cmp(other);
2884 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2885 }
2886
2887 /// Determines if the elements of this `ParallelIterator`
2888 /// are lexicographically greater than those of another.
2889 fn gt<I>(self, other: I) -> bool
2890 where
2891 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2892 Self::Item: PartialOrd<I::Item>,
2893 {
2894 self.partial_cmp(other) == Some(Ordering::Greater)
2895 }
2896
2897 /// Determines if the elements of this `ParallelIterator`
2898 /// are greater than or equal to those of another.
2899 fn ge<I>(self, other: I) -> bool
2900 where
2901 I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2902 Self::Item: PartialOrd<I::Item>,
2903 {
2904 let ord = self.partial_cmp(other);
2905 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2906 }
2907
2908 /// Yields an index along with each item.
2909 ///
2910 /// # Examples
2911 ///
2912 /// ```
2913 /// use rayon::prelude::*;
2914 ///
2915 /// let chars = vec!['a', 'b', 'c'];
2916 /// let result: Vec<_> = chars
2917 /// .into_par_iter()
2918 /// .enumerate()
2919 /// .collect();
2920 ///
2921 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2922 /// ```
2923 fn enumerate(self) -> Enumerate<Self> {
2924 Enumerate::new(self)
2925 }
2926
2927 /// Creates an iterator that steps by the given amount
2928 ///
2929 /// # Examples
2930 ///
2931 /// ```
2932 ///use rayon::prelude::*;
2933 ///
2934 /// let range = (3..10);
2935 /// let result: Vec<i32> = range
2936 /// .into_par_iter()
2937 /// .step_by(3)
2938 /// .collect();
2939 ///
2940 /// assert_eq!(result, [3, 6, 9])
2941 /// ```
2942 fn step_by(self, step: usize) -> StepBy<Self> {
2943 StepBy::new(self, step)
2944 }
2945
2946 /// Creates an iterator that skips the first `n` elements.
2947 ///
2948 /// # Examples
2949 ///
2950 /// ```
2951 /// use rayon::prelude::*;
2952 ///
2953 /// let result: Vec<_> = (0..100)
2954 /// .into_par_iter()
2955 /// .skip(95)
2956 /// .collect();
2957 ///
2958 /// assert_eq!(result, [95, 96, 97, 98, 99]);
2959 /// ```
2960 fn skip(self, n: usize) -> Skip<Self> {
2961 Skip::new(self, n)
2962 }
2963
2964 /// Creates an iterator that yields the first `n` elements.
2965 ///
2966 /// # Examples
2967 ///
2968 /// ```
2969 /// use rayon::prelude::*;
2970 ///
2971 /// let result: Vec<_> = (0..100)
2972 /// .into_par_iter()
2973 /// .take(5)
2974 /// .collect();
2975 ///
2976 /// assert_eq!(result, [0, 1, 2, 3, 4]);
2977 /// ```
2978 fn take(self, n: usize) -> Take<Self> {
2979 Take::new(self, n)
2980 }
2981
2982 /// Searches for **some** item in the parallel iterator that
2983 /// matches the given predicate, and returns its index. Like
2984 /// `ParallelIterator::find_any`, the parallel search will not
2985 /// necessarily find the **first** match, and once a match is
2986 /// found we'll attempt to stop processing any more.
2987 ///
2988 /// # Examples
2989 ///
2990 /// ```
2991 /// use rayon::prelude::*;
2992 ///
2993 /// let a = [1, 2, 3, 3];
2994 ///
2995 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2996 /// assert!(i == 2 || i == 3);
2997 ///
2998 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2999 /// ```
3000 fn position_any<P>(self, predicate: P) -> Option<usize>
3001 where
3002 P: Fn(Self::Item) -> bool + Sync + Send,
3003 {
3004 #[inline]
3005 fn check(&(_, p): &(usize, bool)) -> bool {
3006 p
3007 }
3008
3009 let (i, _) = self.map(predicate).enumerate().find_any(check)?;
3010 Some(i)
3011 }
3012
3013 /// Searches for the sequentially **first** item in the parallel iterator
3014 /// that matches the given predicate, and returns its index.
3015 ///
3016 /// Like `ParallelIterator::find_first`, once a match is found,
3017 /// all attempts to the right of the match will be stopped, while
3018 /// attempts to the left must continue in case an earlier match
3019 /// is found.
3020 ///
3021 /// Note that not all parallel iterators have a useful order, much like
3022 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
3023 /// just want the first match that discovered anywhere in the iterator,
3024 /// `position_any` is a better choice.
3025 ///
3026 /// # Examples
3027 ///
3028 /// ```
3029 /// use rayon::prelude::*;
3030 ///
3031 /// let a = [1, 2, 3, 3];
3032 ///
3033 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
3034 ///
3035 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
3036 /// ```
3037 fn position_first<P>(self, predicate: P) -> Option<usize>
3038 where
3039 P: Fn(Self::Item) -> bool + Sync + Send,
3040 {
3041 #[inline]
3042 fn check(&(_, p): &(usize, bool)) -> bool {
3043 p
3044 }
3045
3046 let (i, _) = self.map(predicate).enumerate().find_first(check)?;
3047 Some(i)
3048 }
3049
3050 /// Searches for the sequentially **last** item in the parallel iterator
3051 /// that matches the given predicate, and returns its index.
3052 ///
3053 /// Like `ParallelIterator::find_last`, once a match is found,
3054 /// all attempts to the left of the match will be stopped, while
3055 /// attempts to the right must continue in case a later match
3056 /// is found.
3057 ///
3058 /// Note that not all parallel iterators have a useful order, much like
3059 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
3060 /// order doesn't actually matter to you, `position_any` is a better
3061 /// choice.
3062 ///
3063 /// # Examples
3064 ///
3065 /// ```
3066 /// use rayon::prelude::*;
3067 ///
3068 /// let a = [1, 2, 3, 3];
3069 ///
3070 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
3071 ///
3072 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
3073 /// ```
3074 fn position_last<P>(self, predicate: P) -> Option<usize>
3075 where
3076 P: Fn(Self::Item) -> bool + Sync + Send,
3077 {
3078 #[inline]
3079 fn check(&(_, p): &(usize, bool)) -> bool {
3080 p
3081 }
3082
3083 let (i, _) = self.map(predicate).enumerate().find_last(check)?;
3084 Some(i)
3085 }
3086
3087 #[doc(hidden)]
3088 #[deprecated(
3089 note = "parallel `position` does not search in order -- use `position_any`, \\
3090 `position_first`, or `position_last`"
3091 )]
3092 fn position<P>(self, predicate: P) -> Option<usize>
3093 where
3094 P: Fn(Self::Item) -> bool + Sync + Send,
3095 {
3096 self.position_any(predicate)
3097 }
3098
3099 /// Searches for items in the parallel iterator that match the given
3100 /// predicate, and returns their indices.
3101 ///
3102 /// # Examples
3103 ///
3104 /// ```
3105 /// use rayon::prelude::*;
3106 ///
3107 /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3108 ///
3109 /// // Find the positions of primes congruent to 1 modulo 6
3110 /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3111 /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3112 ///
3113 /// // Find the positions of primes congruent to 5 modulo 6
3114 /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3115 /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3116 /// ```
3117 fn positions<P>(self, predicate: P) -> Positions<Self, P>
3118 where
3119 P: Fn(Self::Item) -> bool + Sync + Send,
3120 {
3121 Positions::new(self, predicate)
3122 }
3123
3124 /// Produces a new iterator with the elements of this iterator in
3125 /// reverse order.
3126 ///
3127 /// # Examples
3128 ///
3129 /// ```
3130 /// use rayon::prelude::*;
3131 ///
3132 /// let result: Vec<_> = (0..5)
3133 /// .into_par_iter()
3134 /// .rev()
3135 /// .collect();
3136 ///
3137 /// assert_eq!(result, [4, 3, 2, 1, 0]);
3138 /// ```
3139 fn rev(self) -> Rev<Self> {
3140 Rev::new(self)
3141 }
3142
3143 /// Sets the minimum length of iterators desired to process in each
3144 /// rayon job. Rayon will not split any smaller than this length, but
3145 /// of course an iterator could already be smaller to begin with.
3146 ///
3147 /// Producers like `zip` and `interleave` will use greater of the two
3148 /// minimums.
3149 /// Chained iterators and iterators inside `flat_map` may each use
3150 /// their own minimum length.
3151 ///
3152 /// # Examples
3153 ///
3154 /// ```
3155 /// use rayon::prelude::*;
3156 ///
3157 /// let min = (0..1_000_000)
3158 /// .into_par_iter()
3159 /// .with_min_len(1234)
3160 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3161 /// .min().unwrap();
3162 ///
3163 /// assert!(min >= 1234);
3164 /// ```
3165 fn with_min_len(self, min: usize) -> MinLen<Self> {
3166 MinLen::new(self, min)
3167 }
3168
3169 /// Sets the maximum length of iterators desired to process in each
3170 /// rayon job. Rayon will try to split at least below this length,
3171 /// unless that would put it below the length from `with_min_len()`.
3172 /// For example, given min=10 and max=15, a length of 16 will not be
3173 /// split any further.
3174 ///
3175 /// Producers like `zip` and `interleave` will use lesser of the two
3176 /// maximums.
3177 /// Chained iterators and iterators inside `flat_map` may each use
3178 /// their own maximum length.
3179 ///
3180 /// # Examples
3181 ///
3182 /// ```
3183 /// use rayon::prelude::*;
3184 ///
3185 /// let max = (0..1_000_000)
3186 /// .into_par_iter()
3187 /// .with_max_len(1234)
3188 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3189 /// .max().unwrap();
3190 ///
3191 /// assert!(max <= 1234);
3192 /// ```
3193 fn with_max_len(self, max: usize) -> MaxLen<Self> {
3194 MaxLen::new(self, max)
3195 }
3196
3197 /// Produces an exact count of how many items this iterator will
3198 /// produce, presuming no panic occurs.
3199 ///
3200 /// # Examples
3201 ///
3202 /// ```
3203 /// use rayon::prelude::*;
3204 ///
3205 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3206 /// assert_eq!(par_iter.len(), 10);
3207 ///
3208 /// let vec: Vec<_> = par_iter.collect();
3209 /// assert_eq!(vec.len(), 10);
3210 /// ```
3211 fn len(&self) -> usize;
3212
3213 /// Internal method used to define the behavior of this parallel
3214 /// iterator. You should not need to call this directly.
3215 ///
3216 /// This method causes the iterator `self` to start producing
3217 /// items and to feed them to the consumer `consumer` one by one.
3218 /// It may split the consumer before doing so to create the
3219 /// opportunity to produce in parallel. If a split does happen, it
3220 /// will inform the consumer of the index where the split should
3221 /// occur (unlike `ParallelIterator::drive_unindexed()`).
3222 ///
3223 /// See the [README] for more details on the internals of parallel
3224 /// iterators.
3225 ///
3226 /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3227 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3228
3229 /// Internal method used to define the behavior of this parallel
3230 /// iterator. You should not need to call this directly.
3231 ///
3232 /// This method converts the iterator into a producer P and then
3233 /// invokes `callback.callback()` with P. Note that the type of
3234 /// this producer is not defined as part of the API, since
3235 /// `callback` must be defined generically for all producers. This
3236 /// allows the producer type to contain references; it also means
3237 /// that parallel iterators can adjust that type without causing a
3238 /// breaking change.
3239 ///
3240 /// See the [README] for more details on the internals of parallel
3241 /// iterators.
3242 ///
3243 /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3244 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3245}
3246
3247/// `FromParallelIterator` implements the creation of a collection
3248/// from a [`ParallelIterator`]. By implementing
3249/// `FromParallelIterator` for a given type, you define how it will be
3250/// created from an iterator.
3251///
3252/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3253///
3254/// [`collect()`]: ParallelIterator::collect()
3255///
3256/// # Examples
3257///
3258/// Implementing `FromParallelIterator` for your type:
3259///
3260/// ```
3261/// use rayon::prelude::*;
3262///
3263/// struct BlackHole {
3264/// mass: usize,
3265/// }
3266///
3267/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3268/// fn from_par_iter<I>(par_iter: I) -> Self
3269/// where I: IntoParallelIterator<Item = T>
3270/// {
3271/// let par_iter = par_iter.into_par_iter();
3272/// BlackHole {
3273/// mass: par_iter.count() * size_of::<T>(),
3274/// }
3275/// }
3276/// }
3277///
3278/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3279/// assert_eq!(bh.mass, 4000);
3280/// ```
3281pub trait FromParallelIterator<T>
3282where
3283 T: Send,
3284{
3285 /// Creates an instance of the collection from the parallel iterator `par_iter`.
3286 ///
3287 /// If your collection is not naturally parallel, the easiest (and
3288 /// fastest) way to do this is often to collect `par_iter` into a
3289 /// [`LinkedList`] (via [`collect_vec_list`]) or another intermediate
3290 /// data structure and then sequentially extend your collection. However,
3291 /// a more 'native' technique is to use the [`par_iter.fold`] or
3292 /// [`par_iter.fold_with`] methods to create the collection.
3293 /// Alternatively, if your collection is 'natively' parallel, you
3294 /// can use [`par_iter.for_each`] to process each element in turn.
3295 ///
3296 /// [`LinkedList`]: std::collections::LinkedList
3297 /// [`collect_vec_list`]: ParallelIterator::collect_vec_list
3298 /// [`par_iter.fold`]: ParallelIterator::fold()
3299 /// [`par_iter.fold_with`]: ParallelIterator::fold_with()
3300 /// [`par_iter.for_each`]: ParallelIterator::for_each()
3301 fn from_par_iter<I>(par_iter: I) -> Self
3302 where
3303 I: IntoParallelIterator<Item = T>;
3304}
3305
3306/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3307///
3308/// # Examples
3309///
3310/// Implementing `ParallelExtend` for your type:
3311///
3312/// ```
3313/// use rayon::prelude::*;
3314///
3315/// struct BlackHole {
3316/// mass: usize,
3317/// }
3318///
3319/// impl<T: Send> ParallelExtend<T> for BlackHole {
3320/// fn par_extend<I>(&mut self, par_iter: I)
3321/// where I: IntoParallelIterator<Item = T>
3322/// {
3323/// let par_iter = par_iter.into_par_iter();
3324/// self.mass += par_iter.count() * size_of::<T>();
3325/// }
3326/// }
3327///
3328/// let mut bh = BlackHole { mass: 0 };
3329/// bh.par_extend(0i32..1000);
3330/// assert_eq!(bh.mass, 4000);
3331/// bh.par_extend(0i64..10);
3332/// assert_eq!(bh.mass, 4080);
3333/// ```
3334pub trait ParallelExtend<T>
3335where
3336 T: Send,
3337{
3338 /// Extends an instance of the collection with the elements drawn
3339 /// from the parallel iterator `par_iter`.
3340 ///
3341 /// # Examples
3342 ///
3343 /// ```
3344 /// use rayon::prelude::*;
3345 ///
3346 /// let mut vec = vec![];
3347 /// vec.par_extend(0..5);
3348 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3349 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3350 /// ```
3351 fn par_extend<I>(&mut self, par_iter: I)
3352 where
3353 I: IntoParallelIterator<Item = T>;
3354}
3355
3356/// `ParallelDrainFull` creates a parallel iterator that moves all items
3357/// from a collection while retaining the original capacity.
3358///
3359/// Types which are indexable typically implement [`ParallelDrainRange`]
3360/// instead, where you can drain fully with `par_drain(..)`.
3361pub trait ParallelDrainFull {
3362 /// The draining parallel iterator type that will be created.
3363 type Iter: ParallelIterator<Item = Self::Item>;
3364
3365 /// The type of item that the parallel iterator will produce.
3366 /// This is usually the same as `IntoParallelIterator::Item`.
3367 type Item: Send;
3368
3369 /// Returns a draining parallel iterator over an entire collection.
3370 ///
3371 /// When the iterator is dropped, all items are removed, even if the
3372 /// iterator was not fully consumed. If the iterator is leaked, for example
3373 /// using `std::mem::forget`, it is unspecified how many items are removed.
3374 ///
3375 /// # Examples
3376 ///
3377 /// ```
3378 /// use rayon::prelude::*;
3379 /// use std::collections::{BinaryHeap, HashSet};
3380 ///
3381 /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3382 ///
3383 /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3384 /// assert_eq!(
3385 /// // heaps are drained in arbitrary order
3386 /// heap.par_drain()
3387 /// .inspect(|x| assert!(squares.contains(x)))
3388 /// .count(),
3389 /// squares.len(),
3390 /// );
3391 /// assert!(heap.is_empty());
3392 /// assert!(heap.capacity() >= squares.len());
3393 /// ```
3394 fn par_drain(self) -> Self::Iter;
3395}
3396
3397/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3398/// from a collection while retaining the original capacity.
3399///
3400/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3401pub trait ParallelDrainRange<Idx = usize> {
3402 /// The draining parallel iterator type that will be created.
3403 type Iter: ParallelIterator<Item = Self::Item>;
3404
3405 /// The type of item that the parallel iterator will produce.
3406 /// This is usually the same as `IntoParallelIterator::Item`.
3407 type Item: Send;
3408
3409 /// Returns a draining parallel iterator over a range of the collection.
3410 ///
3411 /// When the iterator is dropped, all items in the range are removed, even
3412 /// if the iterator was not fully consumed. If the iterator is leaked, for
3413 /// example using `std::mem::forget`, it is unspecified how many items are
3414 /// removed.
3415 ///
3416 /// # Examples
3417 ///
3418 /// ```
3419 /// use rayon::prelude::*;
3420 ///
3421 /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3422 ///
3423 /// println!("RangeFull");
3424 /// let mut vec = squares.clone();
3425 /// assert!(vec.par_drain(..)
3426 /// .eq(squares.par_iter().copied()));
3427 /// assert!(vec.is_empty());
3428 /// assert!(vec.capacity() >= squares.len());
3429 ///
3430 /// println!("RangeFrom");
3431 /// let mut vec = squares.clone();
3432 /// assert!(vec.par_drain(5..)
3433 /// .eq(squares[5..].par_iter().copied()));
3434 /// assert_eq!(&vec[..], &squares[..5]);
3435 /// assert!(vec.capacity() >= squares.len());
3436 ///
3437 /// println!("RangeTo");
3438 /// let mut vec = squares.clone();
3439 /// assert!(vec.par_drain(..5)
3440 /// .eq(squares[..5].par_iter().copied()));
3441 /// assert_eq!(&vec[..], &squares[5..]);
3442 /// assert!(vec.capacity() >= squares.len());
3443 ///
3444 /// println!("RangeToInclusive");
3445 /// let mut vec = squares.clone();
3446 /// assert!(vec.par_drain(..=5)
3447 /// .eq(squares[..=5].par_iter().copied()));
3448 /// assert_eq!(&vec[..], &squares[6..]);
3449 /// assert!(vec.capacity() >= squares.len());
3450 ///
3451 /// println!("Range");
3452 /// let mut vec = squares.clone();
3453 /// assert!(vec.par_drain(3..7)
3454 /// .eq(squares[3..7].par_iter().copied()));
3455 /// assert_eq!(&vec[..3], &squares[..3]);
3456 /// assert_eq!(&vec[3..], &squares[7..]);
3457 /// assert!(vec.capacity() >= squares.len());
3458 ///
3459 /// println!("RangeInclusive");
3460 /// let mut vec = squares.clone();
3461 /// assert!(vec.par_drain(3..=7)
3462 /// .eq(squares[3..=7].par_iter().copied()));
3463 /// assert_eq!(&vec[..3], &squares[..3]);
3464 /// assert_eq!(&vec[3..], &squares[8..]);
3465 /// assert!(vec.capacity() >= squares.len());
3466 /// ```
3467 fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3468}
3469
3470/// We hide the `Try` trait in a private module, as it's only meant to be a
3471/// stable clone of the standard library's `Try` trait, as yet unstable.
3472mod private {
3473 use std::convert::Infallible;
3474 use std::ops::ControlFlow::{self, Break, Continue};
3475 use std::task::Poll;
3476
3477 /// Clone of `std::ops::Try`.
3478 ///
3479 /// Implementing this trait is not permitted outside of `rayon`.
3480 pub trait Try {
3481 private_decl! {}
3482
3483 type Output;
3484 type Residual;
3485
3486 fn from_output(output: Self::Output) -> Self;
3487
3488 fn from_residual(residual: Self::Residual) -> Self;
3489
3490 fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3491 }
3492
3493 impl<B, C> Try for ControlFlow<B, C> {
3494 private_impl! {}
3495
3496 type Output = C;
3497 type Residual = ControlFlow<B, Infallible>;
3498
3499 fn from_output(output: Self::Output) -> Self {
3500 Continue(output)
3501 }
3502
3503 fn from_residual(residual: Self::Residual) -> Self {
3504 match residual {
3505 Break(b) => Break(b),
3506 #[allow(unreachable_patterns)]
3507 Continue(_) => unreachable!(),
3508 }
3509 }
3510
3511 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3512 match self {
3513 Continue(c) => Continue(c),
3514 Break(b) => Break(Break(b)),
3515 }
3516 }
3517 }
3518
3519 impl<T> Try for Option<T> {
3520 private_impl! {}
3521
3522 type Output = T;
3523 type Residual = Option<Infallible>;
3524
3525 fn from_output(output: Self::Output) -> Self {
3526 Some(output)
3527 }
3528
3529 fn from_residual(residual: Self::Residual) -> Self {
3530 match residual {
3531 None => None,
3532 #[allow(unreachable_patterns)]
3533 Some(_) => unreachable!(),
3534 }
3535 }
3536
3537 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3538 match self {
3539 Some(c) => Continue(c),
3540 None => Break(None),
3541 }
3542 }
3543 }
3544
3545 impl<T, E> Try for Result<T, E> {
3546 private_impl! {}
3547
3548 type Output = T;
3549 type Residual = Result<Infallible, E>;
3550
3551 fn from_output(output: Self::Output) -> Self {
3552 Ok(output)
3553 }
3554
3555 fn from_residual(residual: Self::Residual) -> Self {
3556 match residual {
3557 Err(e) => Err(e),
3558 #[allow(unreachable_patterns)]
3559 Ok(_) => unreachable!(),
3560 }
3561 }
3562
3563 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3564 match self {
3565 Ok(c) => Continue(c),
3566 Err(e) => Break(Err(e)),
3567 }
3568 }
3569 }
3570
3571 impl<T, E> Try for Poll<Result<T, E>> {
3572 private_impl! {}
3573
3574 type Output = Poll<T>;
3575 type Residual = Result<Infallible, E>;
3576
3577 fn from_output(output: Self::Output) -> Self {
3578 output.map(Ok)
3579 }
3580
3581 fn from_residual(residual: Self::Residual) -> Self {
3582 match residual {
3583 Err(e) => Poll::Ready(Err(e)),
3584 #[allow(unreachable_patterns)]
3585 Ok(_) => unreachable!(),
3586 }
3587 }
3588
3589 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3590 match self {
3591 Poll::Pending => Continue(Poll::Pending),
3592 Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3593 Poll::Ready(Err(e)) => Break(Err(e)),
3594 }
3595 }
3596 }
3597
3598 impl<T, E> Try for Poll<Option<Result<T, E>>> {
3599 private_impl! {}
3600
3601 type Output = Poll<Option<T>>;
3602 type Residual = Result<Infallible, E>;
3603
3604 fn from_output(output: Self::Output) -> Self {
3605 match output {
3606 Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3607 Poll::Pending => Poll::Pending,
3608 }
3609 }
3610
3611 fn from_residual(residual: Self::Residual) -> Self {
3612 match residual {
3613 Err(e) => Poll::Ready(Some(Err(e))),
3614 #[allow(unreachable_patterns)]
3615 Ok(_) => unreachable!(),
3616 }
3617 }
3618
3619 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3620 match self {
3621 Poll::Pending => Continue(Poll::Pending),
3622 Poll::Ready(None) => Continue(Poll::Ready(None)),
3623 Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3624 Poll::Ready(Some(Err(e))) => Break(Err(e)),
3625 }
3626 }
3627 }
3628}