rayon/iter/
mod.rs

1//! Traits for writing parallel programs using an iterator-style interface
2//!
3//! You will rarely need to interact with this module directly unless you have
4//! need to name one of the iterator types.
5//!
6//! Parallel iterators make it easy to write iterator-like chains that
7//! execute in parallel: typically all you have to do is convert the
8//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10//! example, to compute the sum of the squares of a sequence of
11//! integers, one might write:
12//!
13//! ```rust
14//! use rayon::prelude::*;
15//! fn sum_of_squares(input: &[i32]) -> i32 {
16//!     input.par_iter()
17//!          .map(|i| i * i)
18//!          .sum()
19//! }
20//! ```
21//!
22//! Or, to increment all the integers in a slice, you could write:
23//!
24//! ```rust
25//! use rayon::prelude::*;
26//! fn increment_all(input: &mut [i32]) {
27//!     input.par_iter_mut()
28//!          .for_each(|p| *p += 1);
29//! }
30//! ```
31//!
32//! To use parallel iterators, first import the traits by adding
33//! something like `use rayon::prelude::*` to your module. You can
34//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35//! parallel iterator. Like a [regular iterator][], parallel
36//! iterators work by first constructing a computation and then
37//! executing it.
38//!
39//! In addition to `par_iter()` and friends, some types offer other
40//! ways to create (or consume) parallel iterators:
41//!
42//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43//!   `par_windows`, as well as various parallel sorting
44//!   operations. See [the `ParallelSlice` trait] for the full list.
45//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46//!   See [the `ParallelString` trait] for the full list.
47//! - Various collections offer [`par_extend`], which grows a
48//!   collection given a parallel iterator. (If you don't have a
49//!   collection to extend, you can use [`collect()`] to create a new
50//!   one from scratch.)
51//!
52//! [the `ParallelSlice` trait]: crate::slice::ParallelSlice
53//! [the `ParallelString` trait]: crate::str::ParallelString
54//! [`par_extend`]: ParallelExtend
55//! [`collect()`]: ParallelIterator::collect()
56//!
57//! To see the full range of methods available on parallel iterators,
58//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59//! traits.
60//!
61//! If you'd like to build a custom parallel iterator, or to write your own
62//! combinator, then check out the [split] function and the [plumbing] module.
63//!
64//! [regular iterator]: Iterator
65//! [split]: split()
66//! [plumbing]: plumbing
67//!
68//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
69//! has been deliberately obscured from the public API.  This trait is intended
70//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
71//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
72//! `None`/`Err` values will exit early.
73//!
74//! A note about object safety: It is currently _not_ possible to wrap
75//! a `ParallelIterator` (or any trait that depends on it) using a
76//! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
77//! because `ParallelIterator` is **not object-safe**.
78//! (This keeps the implementation simpler and allows extra optimizations.)
79
80use self::plumbing::*;
81use self::private::Try;
82pub use either::Either;
83use std::cmp::Ordering;
84use std::collections::LinkedList;
85use std::iter::{Product, Sum};
86use std::ops::{Fn, RangeBounds};
87
88pub mod plumbing;
89
90#[cfg(test)]
91mod test;
92
93// There is a method to the madness here:
94//
95// - These modules are private but expose certain types to the end-user
96//   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
97//   public API surface of the `ParallelIterator` traits.
98// - In **this** module, those public types are always used unprefixed, which forces
99//   us to add a `pub use` and helps identify if we missed anything.
100// - In contrast, items that appear **only** in the body of a method,
101//   e.g. `find::find()`, are always used **prefixed**, so that they
102//   can be readily distinguished.
103
104mod blocks;
105mod chain;
106mod chunks;
107mod cloned;
108mod collect;
109mod copied;
110mod empty;
111mod enumerate;
112mod extend;
113mod filter;
114mod filter_map;
115mod find;
116mod find_first_last;
117mod flat_map;
118mod flat_map_iter;
119mod flatten;
120mod flatten_iter;
121mod fold;
122mod fold_chunks;
123mod fold_chunks_with;
124mod for_each;
125mod from_par_iter;
126mod inspect;
127mod interleave;
128mod interleave_shortest;
129mod intersperse;
130mod len;
131mod map;
132mod map_with;
133mod multizip;
134mod noop;
135mod once;
136mod panic_fuse;
137mod par_bridge;
138mod positions;
139mod product;
140mod reduce;
141mod repeat;
142mod rev;
143mod skip;
144mod skip_any;
145mod skip_any_while;
146mod splitter;
147mod step_by;
148mod sum;
149mod take;
150mod take_any;
151mod take_any_while;
152mod try_fold;
153mod try_reduce;
154mod try_reduce_with;
155mod unzip;
156mod update;
157mod walk_tree;
158mod while_some;
159mod zip;
160mod zip_eq;
161
162pub use self::{
163    blocks::{ExponentialBlocks, UniformBlocks},
164    chain::Chain,
165    chunks::Chunks,
166    cloned::Cloned,
167    copied::Copied,
168    empty::{empty, Empty},
169    enumerate::Enumerate,
170    filter::Filter,
171    filter_map::FilterMap,
172    flat_map::FlatMap,
173    flat_map_iter::FlatMapIter,
174    flatten::Flatten,
175    flatten_iter::FlattenIter,
176    fold::{Fold, FoldWith},
177    fold_chunks::FoldChunks,
178    fold_chunks_with::FoldChunksWith,
179    inspect::Inspect,
180    interleave::Interleave,
181    interleave_shortest::InterleaveShortest,
182    intersperse::Intersperse,
183    len::{MaxLen, MinLen},
184    map::Map,
185    map_with::{MapInit, MapWith},
186    multizip::MultiZip,
187    once::{once, Once},
188    panic_fuse::PanicFuse,
189    par_bridge::{IterBridge, ParallelBridge},
190    positions::Positions,
191    repeat::{repeat, repeat_n, Repeat, RepeatN},
192    rev::Rev,
193    skip::Skip,
194    skip_any::SkipAny,
195    skip_any_while::SkipAnyWhile,
196    splitter::{split, Split},
197    step_by::StepBy,
198    take::Take,
199    take_any::TakeAny,
200    take_any_while::TakeAnyWhile,
201    try_fold::{TryFold, TryFoldWith},
202    update::Update,
203    walk_tree::{
204        walk_tree, walk_tree_postfix, walk_tree_prefix, WalkTree, WalkTreePostfix, WalkTreePrefix,
205    },
206    while_some::WhileSome,
207    zip::Zip,
208    zip_eq::ZipEq,
209};
210
211#[allow(deprecated)]
212pub use repeat::repeatn;
213
214/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
215///
216/// By implementing `IntoParallelIterator` for a type, you define how it will
217/// transformed into an iterator. This is a parallel version of the standard
218/// library's [`std::iter::IntoIterator`] trait.
219pub trait IntoParallelIterator {
220    /// The parallel iterator type that will be created.
221    type Iter: ParallelIterator<Item = Self::Item>;
222
223    /// The type of item that the parallel iterator will produce.
224    type Item: Send;
225
226    /// Converts `self` into a parallel iterator.
227    ///
228    /// # Examples
229    ///
230    /// ```
231    /// use rayon::prelude::*;
232    ///
233    /// println!("counting in parallel:");
234    /// (0..100).into_par_iter()
235    ///     .for_each(|i| println!("{}", i));
236    /// ```
237    ///
238    /// This conversion is often implicit for arguments to methods like [`zip`].
239    ///
240    /// ```
241    /// use rayon::prelude::*;
242    ///
243    /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
244    /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
245    /// ```
246    ///
247    /// [`zip`]: IndexedParallelIterator::zip()
248    fn into_par_iter(self) -> Self::Iter;
249}
250
251/// `IntoParallelRefIterator` implements the conversion to a
252/// [`ParallelIterator`], providing shared references to the data.
253///
254/// This is a parallel version of the `iter()` method
255/// defined by various collections.
256///
257/// This trait is automatically implemented
258/// `for I where &I: IntoParallelIterator`. In most cases, users
259/// will want to implement [`IntoParallelIterator`] rather than implement
260/// this trait directly.
261pub trait IntoParallelRefIterator<'data> {
262    /// The type of the parallel iterator that will be returned.
263    type Iter: ParallelIterator<Item = Self::Item>;
264
265    /// The type of item that the parallel iterator will produce.
266    /// This will typically be an `&'data T` reference type.
267    type Item: Send + 'data;
268
269    /// Converts `self` into a parallel iterator.
270    ///
271    /// # Examples
272    ///
273    /// ```
274    /// use rayon::prelude::*;
275    ///
276    /// let v: Vec<_> = (0..100).collect();
277    /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
278    ///
279    /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
280    /// // producing the exact same references.
281    /// assert!(v.par_iter().zip(&v)
282    ///          .all(|(a, b)| std::ptr::eq(a, b)));
283    /// ```
284    fn par_iter(&'data self) -> Self::Iter;
285}
286
287impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
288where
289    &'data I: IntoParallelIterator,
290{
291    type Iter = <&'data I as IntoParallelIterator>::Iter;
292    type Item = <&'data I as IntoParallelIterator>::Item;
293
294    fn par_iter(&'data self) -> Self::Iter {
295        self.into_par_iter()
296    }
297}
298
299/// `IntoParallelRefMutIterator` implements the conversion to a
300/// [`ParallelIterator`], providing mutable references to the data.
301///
302/// This is a parallel version of the `iter_mut()` method
303/// defined by various collections.
304///
305/// This trait is automatically implemented
306/// `for I where &mut I: IntoParallelIterator`. In most cases, users
307/// will want to implement [`IntoParallelIterator`] rather than implement
308/// this trait directly.
309pub trait IntoParallelRefMutIterator<'data> {
310    /// The type of iterator that will be created.
311    type Iter: ParallelIterator<Item = Self::Item>;
312
313    /// The type of item that will be produced; this is typically an
314    /// `&'data mut T` reference.
315    type Item: Send + 'data;
316
317    /// Creates the parallel iterator from `self`.
318    ///
319    /// # Examples
320    ///
321    /// ```
322    /// use rayon::prelude::*;
323    ///
324    /// let mut v = vec![0usize; 5];
325    /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
326    /// assert_eq!(v, [0, 1, 2, 3, 4]);
327    /// ```
328    fn par_iter_mut(&'data mut self) -> Self::Iter;
329}
330
331impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
332where
333    &'data mut I: IntoParallelIterator,
334{
335    type Iter = <&'data mut I as IntoParallelIterator>::Iter;
336    type Item = <&'data mut I as IntoParallelIterator>::Item;
337
338    fn par_iter_mut(&'data mut self) -> Self::Iter {
339        self.into_par_iter()
340    }
341}
342
343/// Parallel version of the standard iterator trait.
344///
345/// The combinators on this trait are available on **all** parallel
346/// iterators.  Additional methods can be found on the
347/// [`IndexedParallelIterator`] trait: those methods are only
348/// available for parallel iterators where the number of items is
349/// known in advance (so, e.g., after invoking `filter`, those methods
350/// become unavailable).
351///
352/// For examples of using parallel iterators, see [the docs on the
353/// `iter` module][iter].
354///
355/// [iter]: self
356pub trait ParallelIterator: Sized + Send {
357    /// The type of item that this parallel iterator produces.
358    /// For example, if you use the [`for_each`] method, this is the type of
359    /// item that your closure will be invoked with.
360    ///
361    /// [`for_each`]: #method.for_each
362    type Item: Send;
363
364    /// Executes `OP` on each item produced by the iterator, in parallel.
365    ///
366    /// # Examples
367    ///
368    /// ```
369    /// use rayon::prelude::*;
370    ///
371    /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
372    /// ```
373    fn for_each<OP>(self, op: OP)
374    where
375        OP: Fn(Self::Item) + Sync + Send,
376    {
377        for_each::for_each(self, &op)
378    }
379
380    /// Executes `OP` on the given `init` value with each item produced by
381    /// the iterator, in parallel.
382    ///
383    /// The `init` value will be cloned only as needed to be paired with
384    /// the group of items in each rayon job.  It does not require the type
385    /// to be `Sync`.
386    ///
387    /// # Examples
388    ///
389    /// ```
390    /// use std::sync::mpsc::channel;
391    /// use rayon::prelude::*;
392    ///
393    /// let (sender, receiver) = channel();
394    ///
395    /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
396    ///
397    /// let mut res: Vec<_> = receiver.iter().collect();
398    ///
399    /// res.sort();
400    ///
401    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
402    /// ```
403    fn for_each_with<OP, T>(self, init: T, op: OP)
404    where
405        OP: Fn(&mut T, Self::Item) + Sync + Send,
406        T: Send + Clone,
407    {
408        self.map_with(init, op).collect()
409    }
410
411    /// Executes `OP` on a value returned by `init` with each item produced by
412    /// the iterator, in parallel.
413    ///
414    /// The `init` function will be called only as needed for a value to be
415    /// paired with the group of items in each rayon job.  There is no
416    /// constraint on that returned type at all!
417    ///
418    /// # Examples
419    ///
420    /// ```
421    /// use rand::Rng;
422    /// use rayon::prelude::*;
423    ///
424    /// let mut v = vec![0u8; 1_000_000];
425    ///
426    /// v.par_chunks_mut(1000)
427    ///     .for_each_init(
428    ///         || rand::rng(),
429    ///         |rng, chunk| rng.fill(chunk),
430    ///     );
431    ///
432    /// // There's a remote chance that this will fail...
433    /// for i in 0u8..=255 {
434    ///     assert!(v.contains(&i));
435    /// }
436    /// ```
437    fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
438    where
439        OP: Fn(&mut T, Self::Item) + Sync + Send,
440        INIT: Fn() -> T + Sync + Send,
441    {
442        self.map_init(init, op).collect()
443    }
444
445    /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
446    ///
447    /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
448    /// stop processing the rest of the items in the iterator as soon as
449    /// possible, and we will return that terminating value.  Otherwise, we will
450    /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
451    /// multiple errors in parallel, it is not specified which will be returned.
452    ///
453    /// # Examples
454    ///
455    /// ```
456    /// use rayon::prelude::*;
457    /// use std::io::{self, Write};
458    ///
459    /// // This will stop iteration early if there's any write error, like
460    /// // having piped output get closed on the other end.
461    /// (0..100).into_par_iter()
462    ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
463    ///     .expect("expected no write errors");
464    /// ```
465    fn try_for_each<OP, R>(self, op: OP) -> R
466    where
467        OP: Fn(Self::Item) -> R + Sync + Send,
468        R: Try<Output = ()> + Send,
469    {
470        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
471            R::from_output(())
472        }
473
474        self.map(op).try_reduce(<()>::default, ok)
475    }
476
477    /// Executes a fallible `OP` on the given `init` value with each item
478    /// produced by the iterator, in parallel.
479    ///
480    /// This combines the `init` semantics of [`for_each_with()`] and the
481    /// failure semantics of [`try_for_each()`].
482    ///
483    /// [`for_each_with()`]: #method.for_each_with
484    /// [`try_for_each()`]: #method.try_for_each
485    ///
486    /// # Examples
487    ///
488    /// ```
489    /// use std::sync::mpsc::channel;
490    /// use rayon::prelude::*;
491    ///
492    /// let (sender, receiver) = channel();
493    ///
494    /// (0..5).into_par_iter()
495    ///     .try_for_each_with(sender, |s, x| s.send(x))
496    ///     .expect("expected no send errors");
497    ///
498    /// let mut res: Vec<_> = receiver.iter().collect();
499    ///
500    /// res.sort();
501    ///
502    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
503    /// ```
504    fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
505    where
506        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
507        T: Send + Clone,
508        R: Try<Output = ()> + Send,
509    {
510        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
511            R::from_output(())
512        }
513
514        self.map_with(init, op).try_reduce(<()>::default, ok)
515    }
516
517    /// Executes a fallible `OP` on a value returned by `init` with each item
518    /// produced by the iterator, in parallel.
519    ///
520    /// This combines the `init` semantics of [`for_each_init()`] and the
521    /// failure semantics of [`try_for_each()`].
522    ///
523    /// [`for_each_init()`]: #method.for_each_init
524    /// [`try_for_each()`]: #method.try_for_each
525    ///
526    /// # Examples
527    ///
528    /// ```
529    /// use rand::{Rng, TryRngCore};
530    /// use rayon::prelude::*;
531    ///
532    /// let mut v = vec![0u8; 1_000_000];
533    ///
534    /// v.par_chunks_mut(1000)
535    ///     .try_for_each_init(
536    ///         || rand::rng(),
537    ///         |rng, chunk| rng.try_fill_bytes(chunk),
538    ///     )
539    ///     .expect("expected no rand errors");
540    ///
541    /// // There's a remote chance that this will fail...
542    /// for i in 0u8..=255 {
543    ///     assert!(v.contains(&i));
544    /// }
545    /// ```
546    fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
547    where
548        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
549        INIT: Fn() -> T + Sync + Send,
550        R: Try<Output = ()> + Send,
551    {
552        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
553            R::from_output(())
554        }
555
556        self.map_init(init, op).try_reduce(<()>::default, ok)
557    }
558
559    /// Counts the number of items in this parallel iterator.
560    ///
561    /// # Examples
562    ///
563    /// ```
564    /// use rayon::prelude::*;
565    ///
566    /// let count = (0..100).into_par_iter().count();
567    ///
568    /// assert_eq!(count, 100);
569    /// ```
570    fn count(self) -> usize {
571        fn one<T>(_: T) -> usize {
572            1
573        }
574
575        self.map(one).sum()
576    }
577
578    /// Applies `map_op` to each item of this iterator, producing a new
579    /// iterator with the results.
580    ///
581    /// # Examples
582    ///
583    /// ```
584    /// use rayon::prelude::*;
585    ///
586    /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
587    ///
588    /// let doubles: Vec<_> = par_iter.collect();
589    ///
590    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
591    /// ```
592    fn map<F, R>(self, map_op: F) -> Map<Self, F>
593    where
594        F: Fn(Self::Item) -> R + Sync + Send,
595        R: Send,
596    {
597        Map::new(self, map_op)
598    }
599
600    /// Applies `map_op` to the given `init` value with each item of this
601    /// iterator, producing a new iterator with the results.
602    ///
603    /// The `init` value will be cloned only as needed to be paired with
604    /// the group of items in each rayon job.  It does not require the type
605    /// to be `Sync`.
606    ///
607    /// # Examples
608    ///
609    /// ```
610    /// use std::sync::mpsc::channel;
611    /// use rayon::prelude::*;
612    ///
613    /// let (sender, receiver) = channel();
614    ///
615    /// let a: Vec<_> = (0..5)
616    ///                 .into_par_iter()            // iterating over i32
617    ///                 .map_with(sender, |s, x| {
618    ///                     s.send(x).unwrap();     // sending i32 values through the channel
619    ///                     x                       // returning i32
620    ///                 })
621    ///                 .collect();                 // collecting the returned values into a vector
622    ///
623    /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
624    ///                             .collect();     // and collecting them
625    /// b.sort();
626    ///
627    /// assert_eq!(a, b);
628    /// ```
629    fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
630    where
631        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
632        T: Send + Clone,
633        R: Send,
634    {
635        MapWith::new(self, init, map_op)
636    }
637
638    /// Applies `map_op` to a value returned by `init` with each item of this
639    /// iterator, producing a new iterator with the results.
640    ///
641    /// The `init` function will be called only as needed for a value to be
642    /// paired with the group of items in each rayon job.  There is no
643    /// constraint on that returned type at all!
644    ///
645    /// # Examples
646    ///
647    /// ```
648    /// use rand::Rng;
649    /// use rayon::prelude::*;
650    ///
651    /// let a: Vec<_> = (1i32..1_000_000)
652    ///     .into_par_iter()
653    ///     .map_init(
654    ///         || rand::rng(),  // get the thread-local RNG
655    ///         |rng, x| if rng.random() { // randomly negate items
656    ///             -x
657    ///         } else {
658    ///             x
659    ///         },
660    ///     ).collect();
661    ///
662    /// // There's a remote chance that this will fail...
663    /// assert!(a.iter().any(|&x| x < 0));
664    /// assert!(a.iter().any(|&x| x > 0));
665    /// ```
666    fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
667    where
668        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
669        INIT: Fn() -> T + Sync + Send,
670        R: Send,
671    {
672        MapInit::new(self, init, map_op)
673    }
674
675    /// Creates an iterator which clones all of its elements.  This may be
676    /// useful when you have an iterator over `&T`, but you need `T`, and
677    /// that type implements `Clone`. See also [`copied()`].
678    ///
679    /// [`copied()`]: #method.copied
680    ///
681    /// # Examples
682    ///
683    /// ```
684    /// use rayon::prelude::*;
685    ///
686    /// let a = [1, 2, 3];
687    ///
688    /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
689    ///
690    /// // cloned is the same as .map(|&x| x), for integers
691    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
692    ///
693    /// assert_eq!(v_cloned, vec![1, 2, 3]);
694    /// assert_eq!(v_map, vec![1, 2, 3]);
695    /// ```
696    fn cloned<'a, T>(self) -> Cloned<Self>
697    where
698        T: 'a + Clone + Send,
699        Self: ParallelIterator<Item = &'a T>,
700    {
701        Cloned::new(self)
702    }
703
704    /// Creates an iterator which copies all of its elements.  This may be
705    /// useful when you have an iterator over `&T`, but you need `T`, and
706    /// that type implements `Copy`. See also [`cloned()`].
707    ///
708    /// [`cloned()`]: #method.cloned
709    ///
710    /// # Examples
711    ///
712    /// ```
713    /// use rayon::prelude::*;
714    ///
715    /// let a = [1, 2, 3];
716    ///
717    /// let v_copied: Vec<_> = a.par_iter().copied().collect();
718    ///
719    /// // copied is the same as .map(|&x| x), for integers
720    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
721    ///
722    /// assert_eq!(v_copied, vec![1, 2, 3]);
723    /// assert_eq!(v_map, vec![1, 2, 3]);
724    /// ```
725    fn copied<'a, T>(self) -> Copied<Self>
726    where
727        T: 'a + Copy + Send,
728        Self: ParallelIterator<Item = &'a T>,
729    {
730        Copied::new(self)
731    }
732
733    /// Applies `inspect_op` to a reference to each item of this iterator,
734    /// producing a new iterator passing through the original items.  This is
735    /// often useful for debugging to see what's happening in iterator stages.
736    ///
737    /// # Examples
738    ///
739    /// ```
740    /// use rayon::prelude::*;
741    ///
742    /// let a = [1, 4, 2, 3];
743    ///
744    /// // this iterator sequence is complex.
745    /// let sum = a.par_iter()
746    ///             .cloned()
747    ///             .filter(|&x| x % 2 == 0)
748    ///             .reduce(|| 0, |sum, i| sum + i);
749    ///
750    /// println!("{}", sum);
751    ///
752    /// // let's add some inspect() calls to investigate what's happening
753    /// let sum = a.par_iter()
754    ///             .cloned()
755    ///             .inspect(|x| println!("about to filter: {}", x))
756    ///             .filter(|&x| x % 2 == 0)
757    ///             .inspect(|x| println!("made it through filter: {}", x))
758    ///             .reduce(|| 0, |sum, i| sum + i);
759    ///
760    /// println!("{}", sum);
761    /// ```
762    fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
763    where
764        OP: Fn(&Self::Item) + Sync + Send,
765    {
766        Inspect::new(self, inspect_op)
767    }
768
769    /// Mutates each item of this iterator before yielding it.
770    ///
771    /// # Examples
772    ///
773    /// ```
774    /// use rayon::prelude::*;
775    ///
776    /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
777    ///
778    /// let doubles: Vec<_> = par_iter.collect();
779    ///
780    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
781    /// ```
782    fn update<F>(self, update_op: F) -> Update<Self, F>
783    where
784        F: Fn(&mut Self::Item) + Sync + Send,
785    {
786        Update::new(self, update_op)
787    }
788
789    /// Applies `filter_op` to each item of this iterator, producing a new
790    /// iterator with only the items that gave `true` results.
791    ///
792    /// # Examples
793    ///
794    /// ```
795    /// use rayon::prelude::*;
796    ///
797    /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
798    ///
799    /// let even_numbers: Vec<_> = par_iter.collect();
800    ///
801    /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
802    /// ```
803    fn filter<P>(self, filter_op: P) -> Filter<Self, P>
804    where
805        P: Fn(&Self::Item) -> bool + Sync + Send,
806    {
807        Filter::new(self, filter_op)
808    }
809
810    /// Applies `filter_op` to each item of this iterator to get an `Option`,
811    /// producing a new iterator with only the items from `Some` results.
812    ///
813    /// # Examples
814    ///
815    /// ```
816    /// use rayon::prelude::*;
817    ///
818    /// let mut par_iter = (0..10).into_par_iter()
819    ///                         .filter_map(|x| {
820    ///                             if x % 2 == 0 { Some(x * 3) }
821    ///                             else { None }
822    ///                         });
823    ///
824    /// let even_numbers: Vec<_> = par_iter.collect();
825    ///
826    /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
827    /// ```
828    fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
829    where
830        P: Fn(Self::Item) -> Option<R> + Sync + Send,
831        R: Send,
832    {
833        FilterMap::new(self, filter_op)
834    }
835
836    /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
837    /// producing a new parallel iterator that flattens these back into one.
838    ///
839    /// See also [`flat_map_iter`](#method.flat_map_iter).
840    ///
841    /// # Examples
842    ///
843    /// ```
844    /// use rayon::prelude::*;
845    ///
846    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
847    ///
848    /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
849    ///
850    /// let vec: Vec<_> = par_iter.collect();
851    ///
852    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
853    /// ```
854    fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
855    where
856        F: Fn(Self::Item) -> PI + Sync + Send,
857        PI: IntoParallelIterator,
858    {
859        FlatMap::new(self, map_op)
860    }
861
862    /// Applies `map_op` to each item of this iterator to get nested serial iterators,
863    /// producing a new parallel iterator that flattens these back into one.
864    ///
865    /// # `flat_map_iter` versus `flat_map`
866    ///
867    /// These two methods are similar but behave slightly differently. With [`flat_map`],
868    /// each of the nested iterators must be a parallel iterator, and they will be further
869    /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
870    /// sequential `Iterator`, and we only parallelize _between_ them, while the items
871    /// produced by each nested iterator are processed sequentially.
872    ///
873    /// When choosing between these methods, consider whether nested parallelism suits the
874    /// potential iterators at hand. If there's little computation involved, or its length
875    /// is much less than the outer parallel iterator, then it may perform better to avoid
876    /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
877    /// If there is a lot of computation, potentially outweighing the outer parallel
878    /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
879    ///
880    /// [`flat_map`]: #method.flat_map
881    ///
882    /// # Examples
883    ///
884    /// ```
885    /// use rayon::prelude::*;
886    /// use std::cell::RefCell;
887    ///
888    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
889    ///
890    /// let par_iter = a.par_iter().flat_map_iter(|a| {
891    ///     // The serial iterator doesn't have to be thread-safe, just its items.
892    ///     let cell_iter = RefCell::new(a.iter().cloned());
893    ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
894    /// });
895    ///
896    /// let vec: Vec<_> = par_iter.collect();
897    ///
898    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
899    /// ```
900    fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
901    where
902        F: Fn(Self::Item) -> SI + Sync + Send,
903        SI: IntoIterator<Item: Send>,
904    {
905        FlatMapIter::new(self, map_op)
906    }
907
908    /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
909    ///
910    /// See also [`flatten_iter`](#method.flatten_iter).
911    ///
912    /// # Examples
913    ///
914    /// ```
915    /// use rayon::prelude::*;
916    ///
917    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
918    /// let y: Vec<_> = x.into_par_iter().flatten().collect();
919    ///
920    /// assert_eq!(y, vec![1, 2, 3, 4]);
921    /// ```
922    fn flatten(self) -> Flatten<Self>
923    where
924        Self::Item: IntoParallelIterator,
925    {
926        Flatten::new(self)
927    }
928
929    /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
930    ///
931    /// See also [`flatten`](#method.flatten) and the analogous comparison of
932    /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
933    ///
934    /// # Examples
935    ///
936    /// ```
937    /// use rayon::prelude::*;
938    ///
939    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
940    /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
941    /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
942    ///
943    /// assert_eq!(y, vec![1, 2, 3, 4]);
944    /// ```
945    fn flatten_iter(self) -> FlattenIter<Self>
946    where
947        Self::Item: IntoIterator<Item: Send>,
948    {
949        FlattenIter::new(self)
950    }
951
952    /// Reduces the items in the iterator into one item using `op`.
953    /// The argument `identity` should be a closure that can produce
954    /// "identity" value which may be inserted into the sequence as
955    /// needed to create opportunities for parallel execution. So, for
956    /// example, if you are doing a summation, then `identity()` ought
957    /// to produce something that represents the zero for your type
958    /// (but consider just calling `sum()` in that case).
959    ///
960    /// # Examples
961    ///
962    /// ```
963    /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
964    /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
965    /// // where the first/second elements are summed separately.
966    /// use rayon::prelude::*;
967    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
968    ///            .par_iter()        // iterating over &(i32, i32)
969    ///            .cloned()          // iterating over (i32, i32)
970    ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
971    ///                    |a, b| (a.0 + b.0, a.1 + b.1));
972    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
973    /// ```
974    ///
975    /// **Note:** unlike a sequential `fold` operation, the order in
976    /// which `op` will be applied to reduce the result is not fully
977    /// specified. So `op` should be [associative] or else the results
978    /// will be non-deterministic. And of course `identity()` should
979    /// produce a true identity.
980    ///
981    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
982    fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
983    where
984        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
985        ID: Fn() -> Self::Item + Sync + Send,
986    {
987        reduce::reduce(self, identity, op)
988    }
989
990    /// Reduces the items in the iterator into one item using `op`.
991    /// If the iterator is empty, `None` is returned; otherwise,
992    /// `Some` is returned.
993    ///
994    /// This version of `reduce` is simple but somewhat less
995    /// efficient. If possible, it is better to call `reduce()`, which
996    /// requires an identity element.
997    ///
998    /// # Examples
999    ///
1000    /// ```
1001    /// use rayon::prelude::*;
1002    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1003    ///            .par_iter()        // iterating over &(i32, i32)
1004    ///            .cloned()          // iterating over (i32, i32)
1005    ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1006    ///            .unwrap();
1007    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1008    /// ```
1009    ///
1010    /// **Note:** unlike a sequential `fold` operation, the order in
1011    /// which `op` will be applied to reduce the result is not fully
1012    /// specified. So `op` should be [associative] or else the results
1013    /// will be non-deterministic.
1014    ///
1015    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1016    fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1017    where
1018        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1019    {
1020        fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1021            move |opt_a, b| match opt_a {
1022                Some(a) => Some(op(a, b)),
1023                None => Some(b),
1024            }
1025        }
1026
1027        fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1028            move |opt_a, opt_b| match (opt_a, opt_b) {
1029                (Some(a), Some(b)) => Some(op(a, b)),
1030                (Some(v), None) | (None, Some(v)) => Some(v),
1031                (None, None) => None,
1032            }
1033        }
1034
1035        self.fold(<_>::default, opt_fold(&op))
1036            .reduce(<_>::default, opt_reduce(&op))
1037    }
1038
1039    /// Reduces the items in the iterator into one item using a fallible `op`.
1040    /// The `identity` argument is used the same way as in [`reduce()`].
1041    ///
1042    /// [`reduce()`]: #method.reduce
1043    ///
1044    /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1045    /// to one, we will attempt to stop processing the rest of the items in the
1046    /// iterator as soon as possible, and we will return that terminating value.
1047    /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1048    /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1049    /// specified which will be returned.
1050    ///
1051    /// # Examples
1052    ///
1053    /// ```
1054    /// use rayon::prelude::*;
1055    ///
1056    /// // Compute the sum of squares, being careful about overflow.
1057    /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1058    ///     iter.into_par_iter()
1059    ///         .map(|i| i.checked_mul(i))            // square each item,
1060    ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1061    /// }
1062    /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1063    ///
1064    /// // The sum might overflow
1065    /// assert_eq!(sum_squares(0..10_000), None);
1066    ///
1067    /// // Or the squares might overflow before it even reaches `try_reduce`
1068    /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1069    /// ```
1070    fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1071    where
1072        OP: Fn(T, T) -> Self::Item + Sync + Send,
1073        ID: Fn() -> T + Sync + Send,
1074        Self::Item: Try<Output = T>,
1075    {
1076        try_reduce::try_reduce(self, identity, op)
1077    }
1078
1079    /// Reduces the items in the iterator into one item using a fallible `op`.
1080    ///
1081    /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1082    /// otherwise, `Some` is returned.  Beyond that, it behaves like
1083    /// [`try_reduce()`] for handling `Err`/`None`.
1084    ///
1085    /// [`reduce_with()`]: #method.reduce_with
1086    /// [`try_reduce()`]: #method.try_reduce
1087    ///
1088    /// For instance, with `Option` items, the return value may be:
1089    /// - `None`, the iterator was empty
1090    /// - `Some(None)`, we stopped after encountering `None`.
1091    /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1092    ///
1093    /// With `Result` items, the nesting is more obvious:
1094    /// - `None`, the iterator was empty
1095    /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1096    /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1097    ///
1098    /// # Examples
1099    ///
1100    /// ```
1101    /// use rayon::prelude::*;
1102    ///
1103    /// let files = ["/dev/null", "/does/not/exist"];
1104    ///
1105    /// // Find the biggest file
1106    /// files.into_par_iter()
1107    ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1108    ///     .try_reduce_with(|a, b| {
1109    ///         Ok(if a.1 >= b.1 { a } else { b })
1110    ///     })
1111    ///     .expect("Some value, since the iterator is not empty")
1112    ///     .expect_err("not found");
1113    /// ```
1114    fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1115    where
1116        OP: Fn(T, T) -> Self::Item + Sync + Send,
1117        Self::Item: Try<Output = T>,
1118    {
1119        try_reduce_with::try_reduce_with(self, op)
1120    }
1121
1122    /// Parallel fold is similar to sequential fold except that the
1123    /// sequence of items may be subdivided before it is
1124    /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1125    /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1126    /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1127    /// 77, and so forth. The **parallel fold** works similarly except
1128    /// that it first breaks up your list into sublists, and hence
1129    /// instead of yielding up a single sum at the end, it yields up
1130    /// multiple sums. The number of results is nondeterministic, as
1131    /// is the point where the breaks occur.
1132    ///
1133    /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1134    /// our example list, we might wind up with a sequence of two numbers,
1135    /// like so:
1136    ///
1137    /// ```notrust
1138    /// 22 3 77 89 46
1139    ///       |     |
1140    ///     102   135
1141    /// ```
1142    ///
1143    /// Or perhaps these three numbers:
1144    ///
1145    /// ```notrust
1146    /// 22 3 77 89 46
1147    ///       |  |  |
1148    ///     102 89 46
1149    /// ```
1150    ///
1151    /// In general, Rayon will attempt to find good breaking points
1152    /// that keep all of your cores busy.
1153    ///
1154    /// ### Fold versus reduce
1155    ///
1156    /// The `fold()` and `reduce()` methods each take an identity element
1157    /// and a combining function, but they operate rather differently.
1158    ///
1159    /// `reduce()` requires that the identity function has the same
1160    /// type as the things you are iterating over, and it fully
1161    /// reduces the list of items into a single item. So, for example,
1162    /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1163    /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1164    /// u8| a + b)`, we would get an overflow. This is because `0`,
1165    /// `a`, and `b` here are all bytes, just like the numbers in the
1166    /// list (I wrote the types explicitly above, but those are the
1167    /// only types you can use). To avoid the overflow, we would need
1168    /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1169    /// b| a + b)`, in which case our result would be `256`.
1170    ///
1171    /// In contrast, with `fold()`, the identity function does not
1172    /// have to have the same type as the things you are iterating
1173    /// over, and you potentially get back many results. So, if we
1174    /// continue with the `bytes` example from the previous paragraph,
1175    /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1176    /// convert our bytes into `u32`. And of course we might not get
1177    /// back a single sum.
1178    ///
1179    /// There is a more subtle distinction as well, though it's
1180    /// actually implied by the above points. When you use `reduce()`,
1181    /// your reduction function is sometimes called with values that
1182    /// were never part of your original parallel iterator (for
1183    /// example, both the left and right might be a partial sum). With
1184    /// `fold()`, in contrast, the left value in the fold function is
1185    /// always the accumulator, and the right value is always from
1186    /// your original sequence.
1187    ///
1188    /// ### Fold vs Map/Reduce
1189    ///
1190    /// Fold makes sense if you have some operation where it is
1191    /// cheaper to create groups of elements at a time. For example,
1192    /// imagine collecting characters into a string. If you were going
1193    /// to use map/reduce, you might try this:
1194    ///
1195    /// ```
1196    /// use rayon::prelude::*;
1197    ///
1198    /// let s =
1199    ///     ['a', 'b', 'c', 'd', 'e']
1200    ///     .par_iter()
1201    ///     .map(|c: &char| format!("{}", c))
1202    ///     .reduce(|| String::new(),
1203    ///             |mut a: String, b: String| { a.push_str(&b); a });
1204    ///
1205    /// assert_eq!(s, "abcde");
1206    /// ```
1207    ///
1208    /// Because reduce produces the same type of element as its input,
1209    /// you have to first map each character into a string, and then
1210    /// you can reduce them. This means we create one string per
1211    /// element in our iterator -- not so great. Using `fold`, we can
1212    /// do this instead:
1213    ///
1214    /// ```
1215    /// use rayon::prelude::*;
1216    ///
1217    /// let s =
1218    ///     ['a', 'b', 'c', 'd', 'e']
1219    ///     .par_iter()
1220    ///     .fold(|| String::new(),
1221    ///             |mut s: String, c: &char| { s.push(*c); s })
1222    ///     .reduce(|| String::new(),
1223    ///             |mut a: String, b: String| { a.push_str(&b); a });
1224    ///
1225    /// assert_eq!(s, "abcde");
1226    /// ```
1227    ///
1228    /// Now `fold` will process groups of our characters at a time,
1229    /// and we only make one string per group. We should wind up with
1230    /// some small-ish number of strings roughly proportional to the
1231    /// number of CPUs you have (it will ultimately depend on how busy
1232    /// your processors are). Note that we still need to do a reduce
1233    /// afterwards to combine those groups of strings into a single
1234    /// string.
1235    ///
1236    /// You could use a similar trick to save partial results (e.g., a
1237    /// cache) or something similar.
1238    ///
1239    /// ### Combining fold with other operations
1240    ///
1241    /// You can combine `fold` with `reduce` if you want to produce a
1242    /// single value. This is then roughly equivalent to a map/reduce
1243    /// combination in effect:
1244    ///
1245    /// ```
1246    /// use rayon::prelude::*;
1247    ///
1248    /// let bytes = 0..22_u8;
1249    /// let sum = bytes.into_par_iter()
1250    ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1251    ///                .sum::<u32>();
1252    ///
1253    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1254    /// ```
1255    fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1256    where
1257        F: Fn(T, Self::Item) -> T + Sync + Send,
1258        ID: Fn() -> T + Sync + Send,
1259        T: Send,
1260    {
1261        Fold::new(self, identity, fold_op)
1262    }
1263
1264    /// Applies `fold_op` to the given `init` value with each item of this
1265    /// iterator, finally producing the value for further use.
1266    ///
1267    /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1268    /// it doesn't require the `init` type to be `Sync`, nor any other form
1269    /// of added synchronization.
1270    ///
1271    /// # Examples
1272    ///
1273    /// ```
1274    /// use rayon::prelude::*;
1275    ///
1276    /// let bytes = 0..22_u8;
1277    /// let sum = bytes.into_par_iter()
1278    ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1279    ///                .sum::<u32>();
1280    ///
1281    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1282    /// ```
1283    fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1284    where
1285        F: Fn(T, Self::Item) -> T + Sync + Send,
1286        T: Send + Clone,
1287    {
1288        FoldWith::new(self, init, fold_op)
1289    }
1290
1291    /// Performs a fallible parallel fold.
1292    ///
1293    /// This is a variation of [`fold()`] for operations which can fail with
1294    /// `Option::None` or `Result::Err`.  The first such failure stops
1295    /// processing the local set of items, without affecting other folds in the
1296    /// iterator's subdivisions.
1297    ///
1298    /// Often, `try_fold()` will be followed by [`try_reduce()`]
1299    /// for a final reduction and global short-circuiting effect.
1300    ///
1301    /// [`fold()`]: #method.fold
1302    /// [`try_reduce()`]: #method.try_reduce
1303    ///
1304    /// # Examples
1305    ///
1306    /// ```
1307    /// use rayon::prelude::*;
1308    ///
1309    /// let bytes = 0..22_u8;
1310    /// let sum = bytes.into_par_iter()
1311    ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1312    ///                .try_reduce(|| 0, u32::checked_add);
1313    ///
1314    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1315    /// ```
1316    fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1317    where
1318        F: Fn(T, Self::Item) -> R + Sync + Send,
1319        ID: Fn() -> T + Sync + Send,
1320        R: Try<Output = T> + Send,
1321    {
1322        TryFold::new(self, identity, fold_op)
1323    }
1324
1325    /// Performs a fallible parallel fold with a cloneable `init` value.
1326    ///
1327    /// This combines the `init` semantics of [`fold_with()`] and the failure
1328    /// semantics of [`try_fold()`].
1329    ///
1330    /// [`fold_with()`]: #method.fold_with
1331    /// [`try_fold()`]: #method.try_fold
1332    ///
1333    /// ```
1334    /// use rayon::prelude::*;
1335    ///
1336    /// let bytes = 0..22_u8;
1337    /// let sum = bytes.into_par_iter()
1338    ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1339    ///                .try_reduce(|| 0, u32::checked_add);
1340    ///
1341    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1342    /// ```
1343    fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1344    where
1345        F: Fn(T, Self::Item) -> R + Sync + Send,
1346        R: Try<Output = T> + Send,
1347        T: Clone + Send,
1348    {
1349        TryFoldWith::new(self, init, fold_op)
1350    }
1351
1352    /// Sums up the items in the iterator.
1353    ///
1354    /// Note that the order in items will be reduced is not specified,
1355    /// so if the `+` operator is not truly [associative] \(as is the
1356    /// case for floating point numbers), then the results are not
1357    /// fully deterministic.
1358    ///
1359    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1360    ///
1361    /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1362    /// except that the type of `0` and the `+` operation may vary
1363    /// depending on the type of value being produced.
1364    ///
1365    /// # Examples
1366    ///
1367    /// ```
1368    /// use rayon::prelude::*;
1369    ///
1370    /// let a = [1, 5, 7];
1371    ///
1372    /// let sum: i32 = a.par_iter().sum();
1373    ///
1374    /// assert_eq!(sum, 13);
1375    /// ```
1376    fn sum<S>(self) -> S
1377    where
1378        S: Send + Sum<Self::Item> + Sum<S>,
1379    {
1380        sum::sum(self)
1381    }
1382
1383    /// Multiplies all the items in the iterator.
1384    ///
1385    /// Note that the order in items will be reduced is not specified,
1386    /// so if the `*` operator is not truly [associative] \(as is the
1387    /// case for floating point numbers), then the results are not
1388    /// fully deterministic.
1389    ///
1390    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1391    ///
1392    /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1393    /// except that the type of `1` and the `*` operation may vary
1394    /// depending on the type of value being produced.
1395    ///
1396    /// # Examples
1397    ///
1398    /// ```
1399    /// use rayon::prelude::*;
1400    ///
1401    /// fn factorial(n: u32) -> u32 {
1402    ///    (1..n+1).into_par_iter().product()
1403    /// }
1404    ///
1405    /// assert_eq!(factorial(0), 1);
1406    /// assert_eq!(factorial(1), 1);
1407    /// assert_eq!(factorial(5), 120);
1408    /// ```
1409    fn product<P>(self) -> P
1410    where
1411        P: Send + Product<Self::Item> + Product<P>,
1412    {
1413        product::product(self)
1414    }
1415
1416    /// Computes the minimum of all the items in the iterator. If the
1417    /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1418    /// is returned.
1419    ///
1420    /// Note that the order in which the items will be reduced is not
1421    /// specified, so if the `Ord` impl is not truly associative, then
1422    /// the results are not deterministic.
1423    ///
1424    /// Basically equivalent to `self.reduce_with(|a, b| Ord::min(a, b))`.
1425    ///
1426    /// # Examples
1427    ///
1428    /// ```
1429    /// use rayon::prelude::*;
1430    ///
1431    /// let a = [45, 74, 32];
1432    ///
1433    /// assert_eq!(a.par_iter().min(), Some(&32));
1434    ///
1435    /// let b: [i32; 0] = [];
1436    ///
1437    /// assert_eq!(b.par_iter().min(), None);
1438    /// ```
1439    fn min(self) -> Option<Self::Item>
1440    where
1441        Self::Item: Ord,
1442    {
1443        self.reduce_with(Ord::min)
1444    }
1445
1446    /// Computes the minimum of all the items in the iterator with respect to
1447    /// the given comparison function. If the iterator is empty, `None` is
1448    /// returned; otherwise, `Some(min)` is returned.
1449    ///
1450    /// Note that the order in which the items will be reduced is not
1451    /// specified, so if the comparison function is not associative, then
1452    /// the results are not deterministic.
1453    ///
1454    /// # Examples
1455    ///
1456    /// ```
1457    /// use rayon::prelude::*;
1458    ///
1459    /// let a = [-3_i32, 77, 53, 240, -1];
1460    ///
1461    /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1462    /// ```
1463    fn min_by<F>(self, f: F) -> Option<Self::Item>
1464    where
1465        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1466    {
1467        fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1468            move |a, b| match f(&a, &b) {
1469                Ordering::Greater => b,
1470                _ => a,
1471            }
1472        }
1473
1474        self.reduce_with(min(f))
1475    }
1476
1477    /// Computes the item that yields the minimum value for the given
1478    /// function. If the iterator is empty, `None` is returned;
1479    /// otherwise, `Some(item)` is returned.
1480    ///
1481    /// Note that the order in which the items will be reduced is not
1482    /// specified, so if the `Ord` impl is not truly associative, then
1483    /// the results are not deterministic.
1484    ///
1485    /// # Examples
1486    ///
1487    /// ```
1488    /// use rayon::prelude::*;
1489    ///
1490    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1491    ///
1492    /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1493    /// ```
1494    fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1495    where
1496        K: Ord + Send,
1497        F: Sync + Send + Fn(&Self::Item) -> K,
1498    {
1499        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1500            move |x| (f(&x), x)
1501        }
1502
1503        fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1504            match (a.0).cmp(&b.0) {
1505                Ordering::Greater => b,
1506                _ => a,
1507            }
1508        }
1509
1510        let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1511        Some(x)
1512    }
1513
1514    /// Computes the maximum of all the items in the iterator. If the
1515    /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1516    /// is returned.
1517    ///
1518    /// Note that the order in which the items will be reduced is not
1519    /// specified, so if the `Ord` impl is not truly associative, then
1520    /// the results are not deterministic.
1521    ///
1522    /// Basically equivalent to `self.reduce_with(|a, b| Ord::max(a, b))`.
1523    ///
1524    /// # Examples
1525    ///
1526    /// ```
1527    /// use rayon::prelude::*;
1528    ///
1529    /// let a = [45, 74, 32];
1530    ///
1531    /// assert_eq!(a.par_iter().max(), Some(&74));
1532    ///
1533    /// let b: [i32; 0] = [];
1534    ///
1535    /// assert_eq!(b.par_iter().max(), None);
1536    /// ```
1537    fn max(self) -> Option<Self::Item>
1538    where
1539        Self::Item: Ord,
1540    {
1541        self.reduce_with(Ord::max)
1542    }
1543
1544    /// Computes the maximum of all the items in the iterator with respect to
1545    /// the given comparison function. If the iterator is empty, `None` is
1546    /// returned; otherwise, `Some(max)` is returned.
1547    ///
1548    /// Note that the order in which the items will be reduced is not
1549    /// specified, so if the comparison function is not associative, then
1550    /// the results are not deterministic.
1551    ///
1552    /// # Examples
1553    ///
1554    /// ```
1555    /// use rayon::prelude::*;
1556    ///
1557    /// let a = [-3_i32, 77, 53, 240, -1];
1558    ///
1559    /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1560    /// ```
1561    fn max_by<F>(self, f: F) -> Option<Self::Item>
1562    where
1563        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1564    {
1565        fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1566            move |a, b| match f(&a, &b) {
1567                Ordering::Greater => a,
1568                _ => b,
1569            }
1570        }
1571
1572        self.reduce_with(max(f))
1573    }
1574
1575    /// Computes the item that yields the maximum value for the given
1576    /// function. If the iterator is empty, `None` is returned;
1577    /// otherwise, `Some(item)` is returned.
1578    ///
1579    /// Note that the order in which the items will be reduced is not
1580    /// specified, so if the `Ord` impl is not truly associative, then
1581    /// the results are not deterministic.
1582    ///
1583    /// # Examples
1584    ///
1585    /// ```
1586    /// use rayon::prelude::*;
1587    ///
1588    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1589    ///
1590    /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1591    /// ```
1592    fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1593    where
1594        K: Ord + Send,
1595        F: Sync + Send + Fn(&Self::Item) -> K,
1596    {
1597        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1598            move |x| (f(&x), x)
1599        }
1600
1601        fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1602            match (a.0).cmp(&b.0) {
1603                Ordering::Greater => a,
1604                _ => b,
1605            }
1606        }
1607
1608        let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1609        Some(x)
1610    }
1611
1612    /// Takes two iterators and creates a new iterator over both.
1613    ///
1614    /// # Examples
1615    ///
1616    /// ```
1617    /// use rayon::prelude::*;
1618    ///
1619    /// let a = [0, 1, 2];
1620    /// let b = [9, 8, 7];
1621    ///
1622    /// let par_iter = a.par_iter().chain(b.par_iter());
1623    ///
1624    /// let chained: Vec<_> = par_iter.cloned().collect();
1625    ///
1626    /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1627    /// ```
1628    fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1629    where
1630        C: IntoParallelIterator<Item = Self::Item>,
1631    {
1632        Chain::new(self, chain.into_par_iter())
1633    }
1634
1635    /// Searches for **some** item in the parallel iterator that
1636    /// matches the given predicate and returns it. This operation
1637    /// is similar to [`find` on sequential iterators][find] but
1638    /// the item returned may not be the **first** one in the parallel
1639    /// sequence which matches, since we search the entire sequence in parallel.
1640    ///
1641    /// Once a match is found, we will attempt to stop processing
1642    /// the rest of the items in the iterator as soon as possible
1643    /// (just as `find` stops iterating once a match is found).
1644    ///
1645    /// [find]: Iterator::find()
1646    ///
1647    /// # Examples
1648    ///
1649    /// ```
1650    /// use rayon::prelude::*;
1651    ///
1652    /// let a = [1, 2, 3, 3];
1653    ///
1654    /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1655    ///
1656    /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1657    /// ```
1658    fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1659    where
1660        P: Fn(&Self::Item) -> bool + Sync + Send,
1661    {
1662        find::find(self, predicate)
1663    }
1664
1665    /// Searches for the sequentially **first** item in the parallel iterator
1666    /// that matches the given predicate and returns it.
1667    ///
1668    /// Once a match is found, all attempts to the right of the match
1669    /// will be stopped, while attempts to the left must continue in case
1670    /// an earlier match is found.
1671    ///
1672    /// For added performance, you might consider using `find_first` in conjunction with
1673    /// [`by_exponential_blocks()`][IndexedParallelIterator::by_exponential_blocks].
1674    ///
1675    /// Note that not all parallel iterators have a useful order, much like
1676    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1677    /// just want the first match that discovered anywhere in the iterator,
1678    /// `find_any` is a better choice.
1679    ///
1680    /// # Examples
1681    ///
1682    /// ```
1683    /// use rayon::prelude::*;
1684    ///
1685    /// let a = [1, 2, 3, 3];
1686    ///
1687    /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1688    ///
1689    /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1690    /// ```
1691    fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1692    where
1693        P: Fn(&Self::Item) -> bool + Sync + Send,
1694    {
1695        find_first_last::find_first(self, predicate)
1696    }
1697
1698    /// Searches for the sequentially **last** item in the parallel iterator
1699    /// that matches the given predicate and returns it.
1700    ///
1701    /// Once a match is found, all attempts to the left of the match
1702    /// will be stopped, while attempts to the right must continue in case
1703    /// a later match is found.
1704    ///
1705    /// Note that not all parallel iterators have a useful order, much like
1706    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1707    /// order doesn't actually matter to you, `find_any` is a better choice.
1708    ///
1709    /// # Examples
1710    ///
1711    /// ```
1712    /// use rayon::prelude::*;
1713    ///
1714    /// let a = [1, 2, 3, 3];
1715    ///
1716    /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1717    ///
1718    /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1719    /// ```
1720    fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1721    where
1722        P: Fn(&Self::Item) -> bool + Sync + Send,
1723    {
1724        find_first_last::find_last(self, predicate)
1725    }
1726
1727    /// Applies the given predicate to the items in the parallel iterator
1728    /// and returns **any** non-None result of the map operation.
1729    ///
1730    /// Once a non-None value is produced from the map operation, we will
1731    /// attempt to stop processing the rest of the items in the iterator
1732    /// as soon as possible.
1733    ///
1734    /// Note that this method only returns **some** item in the parallel
1735    /// iterator that is not None from the map predicate. The item returned
1736    /// may not be the **first** non-None value produced in the parallel
1737    /// sequence, since the entire sequence is mapped over in parallel.
1738    ///
1739    /// # Examples
1740    ///
1741    /// ```
1742    /// use rayon::prelude::*;
1743    ///
1744    /// let c = ["lol", "NaN", "5", "5"];
1745    ///
1746    /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1747    ///
1748    /// assert_eq!(found_number, Some(5));
1749    /// ```
1750    fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1751    where
1752        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1753        R: Send,
1754    {
1755        fn yes<T>(_: &T) -> bool {
1756            true
1757        }
1758        self.filter_map(predicate).find_any(yes)
1759    }
1760
1761    /// Applies the given predicate to the items in the parallel iterator and
1762    /// returns the sequentially **first** non-None result of the map operation.
1763    ///
1764    /// Once a non-None value is produced from the map operation, all attempts
1765    /// to the right of the match will be stopped, while attempts to the left
1766    /// must continue in case an earlier match is found.
1767    ///
1768    /// Note that not all parallel iterators have a useful order, much like
1769    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1770    /// just want the first non-None value discovered anywhere in the iterator,
1771    /// `find_map_any` is a better choice.
1772    ///
1773    /// # Examples
1774    ///
1775    /// ```
1776    /// use rayon::prelude::*;
1777    ///
1778    /// let c = ["lol", "NaN", "2", "5"];
1779    ///
1780    /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1781    ///
1782    /// assert_eq!(first_number, Some(2));
1783    /// ```
1784    fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1785    where
1786        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1787        R: Send,
1788    {
1789        fn yes<T>(_: &T) -> bool {
1790            true
1791        }
1792        self.filter_map(predicate).find_first(yes)
1793    }
1794
1795    /// Applies the given predicate to the items in the parallel iterator and
1796    /// returns the sequentially **last** non-None result of the map operation.
1797    ///
1798    /// Once a non-None value is produced from the map operation, all attempts
1799    /// to the left of the match will be stopped, while attempts to the right
1800    /// must continue in case a later match is found.
1801    ///
1802    /// Note that not all parallel iterators have a useful order, much like
1803    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1804    /// just want the first non-None value discovered anywhere in the iterator,
1805    /// `find_map_any` is a better choice.
1806    ///
1807    /// # Examples
1808    ///
1809    /// ```
1810    /// use rayon::prelude::*;
1811    ///
1812    /// let c = ["lol", "NaN", "2", "5"];
1813    ///
1814    /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1815    ///
1816    /// assert_eq!(last_number, Some(5));
1817    /// ```
1818    fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1819    where
1820        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1821        R: Send,
1822    {
1823        fn yes<T>(_: &T) -> bool {
1824            true
1825        }
1826        self.filter_map(predicate).find_last(yes)
1827    }
1828
1829    #[doc(hidden)]
1830    #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1831                         `find_first`, or `find_last`")]
1832    fn find<P>(self, predicate: P) -> Option<Self::Item>
1833    where
1834        P: Fn(&Self::Item) -> bool + Sync + Send,
1835    {
1836        self.find_any(predicate)
1837    }
1838
1839    /// Searches for **some** item in the parallel iterator that
1840    /// matches the given predicate, and if so returns true.  Once
1841    /// a match is found, we'll attempt to stop process the rest
1842    /// of the items.  Proving that there's no match, returning false,
1843    /// does require visiting every item.
1844    ///
1845    /// # Examples
1846    ///
1847    /// ```
1848    /// use rayon::prelude::*;
1849    ///
1850    /// let a = [0, 12, 3, 4, 0, 23, 0];
1851    ///
1852    /// let is_valid = a.par_iter().any(|&x| x > 10);
1853    ///
1854    /// assert!(is_valid);
1855    /// ```
1856    fn any<P>(self, predicate: P) -> bool
1857    where
1858        P: Fn(Self::Item) -> bool + Sync + Send,
1859    {
1860        self.map(predicate).find_any(bool::clone).is_some()
1861    }
1862
1863    /// Tests that every item in the parallel iterator matches the given
1864    /// predicate, and if so returns true.  If a counter-example is found,
1865    /// we'll attempt to stop processing more items, then return false.
1866    ///
1867    /// # Examples
1868    ///
1869    /// ```
1870    /// use rayon::prelude::*;
1871    ///
1872    /// let a = [0, 12, 3, 4, 0, 23, 0];
1873    ///
1874    /// let is_valid = a.par_iter().all(|&x| x > 10);
1875    ///
1876    /// assert!(!is_valid);
1877    /// ```
1878    fn all<P>(self, predicate: P) -> bool
1879    where
1880        P: Fn(Self::Item) -> bool + Sync + Send,
1881    {
1882        #[inline]
1883        fn is_false(x: &bool) -> bool {
1884            !x
1885        }
1886
1887        self.map(predicate).find_any(is_false).is_none()
1888    }
1889
1890    /// Creates an iterator over the `Some` items of this iterator, halting
1891    /// as soon as any `None` is found.
1892    ///
1893    /// # Examples
1894    ///
1895    /// ```
1896    /// use rayon::prelude::*;
1897    /// use std::sync::atomic::{AtomicUsize, Ordering};
1898    ///
1899    /// let counter = AtomicUsize::new(0);
1900    /// let value = (0_i32..2048)
1901    ///     .into_par_iter()
1902    ///     .map(|x| {
1903    ///              counter.fetch_add(1, Ordering::SeqCst);
1904    ///              if x < 1024 { Some(x) } else { None }
1905    ///          })
1906    ///     .while_some()
1907    ///     .max();
1908    ///
1909    /// assert!(value < Some(1024));
1910    /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1911    /// ```
1912    fn while_some<T>(self) -> WhileSome<Self>
1913    where
1914        Self: ParallelIterator<Item = Option<T>>,
1915        T: Send,
1916    {
1917        WhileSome::new(self)
1918    }
1919
1920    /// Wraps an iterator with a fuse in case of panics, to halt all threads
1921    /// as soon as possible.
1922    ///
1923    /// Panics within parallel iterators are always propagated to the caller,
1924    /// but they don't always halt the rest of the iterator right away, due to
1925    /// the internal semantics of [`join`]. This adaptor makes a greater effort
1926    /// to stop processing other items sooner, with the cost of additional
1927    /// synchronization overhead, which may also inhibit some optimizations.
1928    ///
1929    /// [`join`]: crate::join()#panics
1930    ///
1931    /// # Examples
1932    ///
1933    /// If this code didn't use `panic_fuse()`, it would continue processing
1934    /// many more items in other threads (with long sleep delays) before the
1935    /// panic is finally propagated.
1936    ///
1937    /// ```should_panic
1938    /// use rayon::prelude::*;
1939    /// use std::{thread, time};
1940    ///
1941    /// (0..1_000_000)
1942    ///     .into_par_iter()
1943    ///     .panic_fuse()
1944    ///     .for_each(|i| {
1945    ///         // simulate some work
1946    ///         thread::sleep(time::Duration::from_secs(1));
1947    ///         assert!(i > 0); // oops!
1948    ///     });
1949    /// ```
1950    fn panic_fuse(self) -> PanicFuse<Self> {
1951        PanicFuse::new(self)
1952    }
1953
1954    /// Creates a fresh collection containing all the elements produced
1955    /// by this parallel iterator.
1956    ///
1957    /// You may prefer [`collect_into_vec()`] implemented on
1958    /// [`IndexedParallelIterator`], if your underlying iterator also implements
1959    /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1960    /// of how many elements the iterator contains, and even allows you to reuse
1961    /// an existing vector's backing store rather than allocating a fresh vector.
1962    ///
1963    /// See also [`collect_vec_list()`] for collecting into a
1964    /// `LinkedList<Vec<T>>`.
1965    ///
1966    /// [`collect_into_vec()`]: IndexedParallelIterator::collect_into_vec()
1967    /// [`collect_vec_list()`]: Self::collect_vec_list()
1968    ///
1969    /// # Examples
1970    ///
1971    /// ```
1972    /// use rayon::prelude::*;
1973    ///
1974    /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1975    ///
1976    /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1977    ///
1978    /// assert_eq!(sync_vec, async_vec);
1979    /// ```
1980    ///
1981    /// You can collect a pair of collections like [`unzip`](#method.unzip)
1982    /// for paired items:
1983    ///
1984    /// ```
1985    /// use rayon::prelude::*;
1986    ///
1987    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1988    /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1989    ///
1990    /// assert_eq!(first, [0, 1, 2, 3]);
1991    /// assert_eq!(second, [1, 2, 3, 4]);
1992    /// ```
1993    ///
1994    /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1995    ///
1996    /// ```
1997    /// use rayon::prelude::*;
1998    /// use rayon::iter::Either;
1999    ///
2000    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2001    ///     if x % 2 == 0 {
2002    ///         Either::Left(x * 4)
2003    ///     } else {
2004    ///         Either::Right(x * 3)
2005    ///     }
2006    /// }).collect();
2007    ///
2008    /// assert_eq!(left, [0, 8, 16, 24]);
2009    /// assert_eq!(right, [3, 9, 15, 21]);
2010    /// ```
2011    ///
2012    /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2013    ///
2014    /// ```
2015    /// use rayon::prelude::*;
2016    /// use rayon::iter::Either;
2017    ///
2018    /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2019    ///     = (0..8).into_par_iter().map(|x| {
2020    ///         if x % 2 == 0 {
2021    ///             (x, Either::Left(x * 4))
2022    ///         } else {
2023    ///             (-x, Either::Right(x * 3))
2024    ///         }
2025    ///     }).collect();
2026    ///
2027    /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2028    /// assert_eq!(left, [0, 8, 16, 24]);
2029    /// assert_eq!(right, [3, 9, 15, 21]);
2030    /// ```
2031    ///
2032    /// All of that can _also_ be combined with short-circuiting collection of
2033    /// `Result` or `Option` types:
2034    ///
2035    /// ```
2036    /// use rayon::prelude::*;
2037    /// use rayon::iter::Either;
2038    ///
2039    /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2040    ///     = (0..8).into_par_iter().map(|x| {
2041    ///         if x > 5 {
2042    ///             Err(x)
2043    ///         } else if x % 2 == 0 {
2044    ///             Ok((x, Either::Left(x * 4)))
2045    ///         } else {
2046    ///             Ok((-x, Either::Right(x * 3)))
2047    ///         }
2048    ///     }).collect();
2049    ///
2050    /// let error = result.unwrap_err();
2051    /// assert!(error == 6 || error == 7);
2052    /// ```
2053    fn collect<C>(self) -> C
2054    where
2055        C: FromParallelIterator<Self::Item>,
2056    {
2057        C::from_par_iter(self)
2058    }
2059
2060    /// Unzips the items of a parallel iterator into a pair of arbitrary
2061    /// `ParallelExtend` containers.
2062    ///
2063    /// You may prefer to use `unzip_into_vecs()`, which allocates more
2064    /// efficiently with precise knowledge of how many elements the
2065    /// iterator contains, and even allows you to reuse existing
2066    /// vectors' backing stores rather than allocating fresh vectors.
2067    ///
2068    /// # Examples
2069    ///
2070    /// ```
2071    /// use rayon::prelude::*;
2072    ///
2073    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2074    ///
2075    /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2076    ///
2077    /// assert_eq!(left, [0, 1, 2, 3]);
2078    /// assert_eq!(right, [1, 2, 3, 4]);
2079    /// ```
2080    ///
2081    /// Nested pairs can be unzipped too.
2082    ///
2083    /// ```
2084    /// use rayon::prelude::*;
2085    ///
2086    /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2087    ///     .map(|i| (i, (i * i, i * i * i)))
2088    ///     .unzip();
2089    ///
2090    /// assert_eq!(values, [0, 1, 2, 3]);
2091    /// assert_eq!(squares, [0, 1, 4, 9]);
2092    /// assert_eq!(cubes, [0, 1, 8, 27]);
2093    /// ```
2094    fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2095    where
2096        Self: ParallelIterator<Item = (A, B)>,
2097        FromA: Default + Send + ParallelExtend<A>,
2098        FromB: Default + Send + ParallelExtend<B>,
2099        A: Send,
2100        B: Send,
2101    {
2102        unzip::unzip(self)
2103    }
2104
2105    /// Partitions the items of a parallel iterator into a pair of arbitrary
2106    /// `ParallelExtend` containers.  Items for which the `predicate` returns
2107    /// true go into the first container, and the rest go into the second.
2108    ///
2109    /// Note: unlike the standard `Iterator::partition`, this allows distinct
2110    /// collection types for the left and right items.  This is more flexible,
2111    /// but may require new type annotations when converting sequential code
2112    /// that used type inference assuming the two were the same.
2113    ///
2114    /// # Examples
2115    ///
2116    /// ```
2117    /// use rayon::prelude::*;
2118    ///
2119    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2120    ///
2121    /// assert_eq!(left, [0, 2, 4, 6]);
2122    /// assert_eq!(right, [1, 3, 5, 7]);
2123    /// ```
2124    fn partition<A, B, P>(self, predicate: P) -> (A, B)
2125    where
2126        A: Default + Send + ParallelExtend<Self::Item>,
2127        B: Default + Send + ParallelExtend<Self::Item>,
2128        P: Fn(&Self::Item) -> bool + Sync + Send,
2129    {
2130        unzip::partition(self, predicate)
2131    }
2132
2133    /// Partitions and maps the items of a parallel iterator into a pair of
2134    /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2135    /// the first container, and `Either::Right` items go into the second.
2136    ///
2137    /// # Examples
2138    ///
2139    /// ```
2140    /// use rayon::prelude::*;
2141    /// use rayon::iter::Either;
2142    ///
2143    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2144    ///     .partition_map(|x| {
2145    ///         if x % 2 == 0 {
2146    ///             Either::Left(x * 4)
2147    ///         } else {
2148    ///             Either::Right(x * 3)
2149    ///         }
2150    ///     });
2151    ///
2152    /// assert_eq!(left, [0, 8, 16, 24]);
2153    /// assert_eq!(right, [3, 9, 15, 21]);
2154    /// ```
2155    ///
2156    /// Nested `Either` enums can be split as well.
2157    ///
2158    /// ```
2159    /// use rayon::prelude::*;
2160    /// use rayon::iter::Either::*;
2161    ///
2162    /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2163    ///     .into_par_iter()
2164    ///     .partition_map(|x| match (x % 3, x % 5) {
2165    ///         (0, 0) => Left(Left(x)),
2166    ///         (0, _) => Left(Right(x)),
2167    ///         (_, 0) => Right(Left(x)),
2168    ///         (_, _) => Right(Right(x)),
2169    ///     });
2170    ///
2171    /// assert_eq!(fizzbuzz, [15]);
2172    /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2173    /// assert_eq!(buzz, [5, 10]);
2174    /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2175    /// ```
2176    fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2177    where
2178        A: Default + Send + ParallelExtend<L>,
2179        B: Default + Send + ParallelExtend<R>,
2180        P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2181        L: Send,
2182        R: Send,
2183    {
2184        unzip::partition_map(self, predicate)
2185    }
2186
2187    /// Intersperses clones of an element between items of this iterator.
2188    ///
2189    /// # Examples
2190    ///
2191    /// ```
2192    /// use rayon::prelude::*;
2193    ///
2194    /// let x = vec![1, 2, 3];
2195    /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2196    ///
2197    /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2198    /// ```
2199    fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2200    where
2201        Self::Item: Clone,
2202    {
2203        Intersperse::new(self, element)
2204    }
2205
2206    /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2207    ///
2208    /// This is similar to [`IndexedParallelIterator::take`] without being
2209    /// constrained to the "first" `n` of the original iterator order. The
2210    /// taken items will still maintain their relative order where that is
2211    /// visible in `collect`, `reduce`, and similar outputs.
2212    ///
2213    /// # Examples
2214    ///
2215    /// ```
2216    /// use rayon::prelude::*;
2217    ///
2218    /// let result: Vec<_> = (0..100)
2219    ///     .into_par_iter()
2220    ///     .filter(|&x| x % 2 == 0)
2221    ///     .take_any(5)
2222    ///     .collect();
2223    ///
2224    /// assert_eq!(result.len(), 5);
2225    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2226    /// ```
2227    fn take_any(self, n: usize) -> TakeAny<Self> {
2228        TakeAny::new(self, n)
2229    }
2230
2231    /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2232    ///
2233    /// This is similar to [`IndexedParallelIterator::skip`] without being
2234    /// constrained to the "first" `n` of the original iterator order. The
2235    /// remaining items will still maintain their relative order where that is
2236    /// visible in `collect`, `reduce`, and similar outputs.
2237    ///
2238    /// # Examples
2239    ///
2240    /// ```
2241    /// use rayon::prelude::*;
2242    ///
2243    /// let result: Vec<_> = (0..100)
2244    ///     .into_par_iter()
2245    ///     .filter(|&x| x % 2 == 0)
2246    ///     .skip_any(5)
2247    ///     .collect();
2248    ///
2249    /// assert_eq!(result.len(), 45);
2250    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2251    /// ```
2252    fn skip_any(self, n: usize) -> SkipAny<Self> {
2253        SkipAny::new(self, n)
2254    }
2255
2256    /// Creates an iterator that takes elements from *anywhere* in the original iterator
2257    /// until the given `predicate` returns `false`.
2258    ///
2259    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2260    /// global condition unrelated to the item itself, or some combination thereof.
2261    ///
2262    /// If parallel calls to the `predicate` race and give different results, then the
2263    /// `true` results will still take those particular items, while respecting the `false`
2264    /// result from elsewhere to skip any further items.
2265    ///
2266    /// This is similar to [`Iterator::take_while`] without being constrained to the original
2267    /// iterator order. The taken items will still maintain their relative order where that is
2268    /// visible in `collect`, `reduce`, and similar outputs.
2269    ///
2270    /// # Examples
2271    ///
2272    /// ```
2273    /// use rayon::prelude::*;
2274    ///
2275    /// let result: Vec<_> = (0..100)
2276    ///     .into_par_iter()
2277    ///     .take_any_while(|x| *x < 50)
2278    ///     .collect();
2279    ///
2280    /// assert!(result.len() <= 50);
2281    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2282    /// ```
2283    ///
2284    /// ```
2285    /// use rayon::prelude::*;
2286    /// use std::sync::atomic::AtomicUsize;
2287    /// use std::sync::atomic::Ordering::Relaxed;
2288    ///
2289    /// // Collect any group of items that sum <= 1000
2290    /// let quota = AtomicUsize::new(1000);
2291    /// let result: Vec<_> = (0_usize..100)
2292    ///     .into_par_iter()
2293    ///     .take_any_while(|&x| {
2294    ///         quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2295    ///             .is_ok()
2296    ///     })
2297    ///     .collect();
2298    ///
2299    /// let sum = result.iter().sum::<usize>();
2300    /// assert!(matches!(sum, 902..=1000));
2301    /// ```
2302    fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2303    where
2304        P: Fn(&Self::Item) -> bool + Sync + Send,
2305    {
2306        TakeAnyWhile::new(self, predicate)
2307    }
2308
2309    /// Creates an iterator that skips elements from *anywhere* in the original iterator
2310    /// until the given `predicate` returns `false`.
2311    ///
2312    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2313    /// global condition unrelated to the item itself, or some combination thereof.
2314    ///
2315    /// If parallel calls to the `predicate` race and give different results, then the
2316    /// `true` results will still skip those particular items, while respecting the `false`
2317    /// result from elsewhere to skip any further items.
2318    ///
2319    /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2320    /// iterator order. The remaining items will still maintain their relative order where that is
2321    /// visible in `collect`, `reduce`, and similar outputs.
2322    ///
2323    /// # Examples
2324    ///
2325    /// ```
2326    /// use rayon::prelude::*;
2327    ///
2328    /// let result: Vec<_> = (0..100)
2329    ///     .into_par_iter()
2330    ///     .skip_any_while(|x| *x < 50)
2331    ///     .collect();
2332    ///
2333    /// assert!(result.len() >= 50);
2334    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2335    /// ```
2336    fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2337    where
2338        P: Fn(&Self::Item) -> bool + Sync + Send,
2339    {
2340        SkipAnyWhile::new(self, predicate)
2341    }
2342
2343    /// Collects this iterator into a linked list of vectors.
2344    ///
2345    /// This is useful when you need to condense a parallel iterator into a collection,
2346    /// but have no specific requirements for what that collection should be. If you
2347    /// plan to store the collection longer-term, `Vec<T>` is, as always, likely the
2348    /// best default choice, despite the overhead that comes from concatenating each
2349    /// vector. Or, if this is an `IndexedParallelIterator`, you should also prefer to
2350    /// just collect to a `Vec<T>`.
2351    ///
2352    /// Internally, most [`FromParallelIterator`]/[`ParallelExtend`] implementations
2353    /// use this strategy; each job collecting their chunk of the iterator to a `Vec<T>`
2354    /// and those chunks getting merged into a `LinkedList`, before then extending the
2355    /// collection with each vector. This is a very efficient way to collect an
2356    /// unindexed parallel iterator, without much intermediate data movement.
2357    ///
2358    /// # Examples
2359    ///
2360    /// ```
2361    /// # use std::collections::LinkedList;
2362    /// use rayon::prelude::*;
2363    ///
2364    /// let result: LinkedList<Vec<_>> = (0..=100)
2365    ///     .into_par_iter()
2366    ///     .filter(|x| x % 2 == 0)
2367    ///     .flat_map(|x| 0..x)
2368    ///     .collect_vec_list();
2369    ///
2370    /// // `par_iter.collect_vec_list().into_iter().flatten()` turns
2371    /// // a parallel iterator into a serial one
2372    /// let total_len = result.into_iter().flatten().count();
2373    /// assert_eq!(total_len, 2550);
2374    /// ```
2375    fn collect_vec_list(self) -> LinkedList<Vec<Self::Item>> {
2376        match extend::fast_collect(self) {
2377            Either::Left(vec) => {
2378                let mut list = LinkedList::new();
2379                if !vec.is_empty() {
2380                    list.push_back(vec);
2381                }
2382                list
2383            }
2384            Either::Right(list) => list,
2385        }
2386    }
2387
2388    /// Internal method used to define the behavior of this parallel
2389    /// iterator. You should not need to call this directly.
2390    ///
2391    /// This method causes the iterator `self` to start producing
2392    /// items and to feed them to the consumer `consumer` one by one.
2393    /// It may split the consumer before doing so to create the
2394    /// opportunity to produce in parallel.
2395    ///
2396    /// See the [README] for more details on the internals of parallel
2397    /// iterators.
2398    ///
2399    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
2400    fn drive_unindexed<C>(self, consumer: C) -> C::Result
2401    where
2402        C: UnindexedConsumer<Self::Item>;
2403
2404    /// Internal method used to define the behavior of this parallel
2405    /// iterator. You should not need to call this directly.
2406    ///
2407    /// Returns the number of items produced by this iterator, if known
2408    /// statically. This can be used by consumers to trigger special fast
2409    /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2410    /// use the (indexed) `Consumer` methods when driving a consumer, such
2411    /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2412    /// other `UnindexedConsumer` methods -- or returning an inaccurate
2413    /// value -- may result in panics.
2414    ///
2415    /// This method is currently used to optimize `collect` for want
2416    /// of true Rust specialization; it may be removed when
2417    /// specialization is stable.
2418    fn opt_len(&self) -> Option<usize> {
2419        None
2420    }
2421}
2422
2423impl<T: ParallelIterator> IntoParallelIterator for T {
2424    type Iter = T;
2425    type Item = T::Item;
2426
2427    fn into_par_iter(self) -> T {
2428        self
2429    }
2430}
2431
2432/// An iterator that supports "random access" to its data, meaning
2433/// that you can split it at arbitrary indices and draw data from
2434/// those points.
2435///
2436/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2437// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2438#[allow(clippy::len_without_is_empty)]
2439pub trait IndexedParallelIterator: ParallelIterator {
2440    /// Divides an iterator into sequential blocks of exponentially-increasing size.
2441    ///
2442    /// Normally, parallel iterators are recursively divided into tasks in parallel.
2443    /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2444    /// of parallel iterators of increasing sizes.
2445    /// Sizes grow exponentially in order to avoid creating
2446    /// too many blocks. This also allows to balance the current block with all previous ones.
2447    ///
2448    /// This can have many applications but the most notable ones are:
2449    /// - better performance with [`find_first()`][ParallelIterator::find_first]
2450    /// - more predictable performance with [`find_any()`][ParallelIterator::find_any]
2451    ///   or any interruptible computation
2452    ///
2453    /// # Examples
2454    ///
2455    /// ```
2456    /// use rayon::prelude::*;
2457    /// assert_eq!((0..10_000).into_par_iter()
2458    ///                       .by_exponential_blocks()
2459    ///                       .find_first(|&e| e==4_999), Some(4_999))
2460    /// ```
2461    ///
2462    /// In this example, without blocks, rayon will split the initial range into two but all work
2463    /// on the right hand side (from 5,000 onwards) is **useless** since the sequential algorithm
2464    /// never goes there. This means that if two threads are used there will be **no** speedup **at
2465    /// all**.
2466    ///
2467    /// `by_exponential_blocks` on the other hand will start with the leftmost range from 0
2468    /// to `p` (threads number), continue with p to 3p, the 3p to 7p...
2469    ///
2470    /// Each subrange is treated in parallel, while all subranges are treated sequentially.
2471    /// We therefore ensure a logarithmic number of blocks (and overhead) while guaranteeing
2472    /// we stop at the first block containing the searched data.
2473    fn by_exponential_blocks(self) -> ExponentialBlocks<Self> {
2474        ExponentialBlocks::new(self)
2475    }
2476
2477    /// Divides an iterator into sequential blocks of the given size.
2478    ///
2479    /// Normally, parallel iterators are recursively divided into tasks in parallel.
2480    /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2481    /// of parallel iterators of given `block_size`.
2482    /// The main application is to obtain better
2483    /// memory locality (especially if the reduce operation re-use folded data).
2484    ///
2485    /// **Panics** if `block_size` is 0.
2486    ///
2487    /// # Example
2488    /// ```
2489    /// use rayon::prelude::*;
2490    /// // during most reductions v1 and v2 fit the cache
2491    /// let v = (0u32..10_000_000)
2492    ///     .into_par_iter()
2493    ///     .by_uniform_blocks(1_000_000)
2494    ///     .fold(Vec::new, |mut v, e| { v.push(e); v})
2495    ///     .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
2496    /// assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());
2497    /// ```
2498    #[track_caller]
2499    fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> {
2500        assert!(block_size != 0, "block_size must not be zero");
2501        UniformBlocks::new(self, block_size)
2502    }
2503
2504    /// Collects the results of the iterator into the specified
2505    /// vector. The vector is always cleared before execution
2506    /// begins. If possible, reusing the vector across calls can lead
2507    /// to better performance since it reuses the same backing buffer.
2508    ///
2509    /// # Examples
2510    ///
2511    /// ```
2512    /// use rayon::prelude::*;
2513    ///
2514    /// // any prior data will be cleared
2515    /// let mut vec = vec![-1, -2, -3];
2516    ///
2517    /// (0..5).into_par_iter()
2518    ///     .collect_into_vec(&mut vec);
2519    ///
2520    /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2521    /// ```
2522    fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2523        collect::collect_into_vec(self, target);
2524    }
2525
2526    /// Unzips the results of the iterator into the specified
2527    /// vectors. The vectors are always cleared before execution
2528    /// begins. If possible, reusing the vectors across calls can lead
2529    /// to better performance since they reuse the same backing buffer.
2530    ///
2531    /// # Examples
2532    ///
2533    /// ```
2534    /// use rayon::prelude::*;
2535    ///
2536    /// // any prior data will be cleared
2537    /// let mut left = vec![42; 10];
2538    /// let mut right = vec![-1; 10];
2539    ///
2540    /// (10..15).into_par_iter()
2541    ///     .enumerate()
2542    ///     .unzip_into_vecs(&mut left, &mut right);
2543    ///
2544    /// assert_eq!(left, [0, 1, 2, 3, 4]);
2545    /// assert_eq!(right, [10, 11, 12, 13, 14]);
2546    /// ```
2547    fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2548    where
2549        Self: IndexedParallelIterator<Item = (A, B)>,
2550        A: Send,
2551        B: Send,
2552    {
2553        collect::unzip_into_vecs(self, left, right);
2554    }
2555
2556    /// Iterates over tuples `(A, B)`, where the items `A` are from
2557    /// this iterator and `B` are from the iterator given as argument.
2558    /// Like the `zip` method on ordinary iterators, if the two
2559    /// iterators are of unequal length, you only get the items they
2560    /// have in common.
2561    ///
2562    /// # Examples
2563    ///
2564    /// ```
2565    /// use rayon::prelude::*;
2566    ///
2567    /// let result: Vec<_> = (1..4)
2568    ///     .into_par_iter()
2569    ///     .zip(vec!['a', 'b', 'c'])
2570    ///     .collect();
2571    ///
2572    /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2573    /// ```
2574    fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2575    where
2576        Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2577    {
2578        Zip::new(self, zip_op.into_par_iter())
2579    }
2580
2581    /// The same as `Zip`, but requires that both iterators have the same length.
2582    ///
2583    /// # Panics
2584    /// Will panic if `self` and `zip_op` are not the same length.
2585    ///
2586    /// ```should_panic
2587    /// use rayon::prelude::*;
2588    ///
2589    /// let one = [1u8];
2590    /// let two = [2u8, 2];
2591    /// let one_iter = one.par_iter();
2592    /// let two_iter = two.par_iter();
2593    ///
2594    /// // this will panic
2595    /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2596    ///
2597    /// // we should never get here
2598    /// assert_eq!(1, zipped.len());
2599    /// ```
2600    #[track_caller]
2601    fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2602    where
2603        Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2604    {
2605        let zip_op_iter = zip_op.into_par_iter();
2606        assert_eq!(
2607            self.len(),
2608            zip_op_iter.len(),
2609            "iterators must have the same length"
2610        );
2611        ZipEq::new(self, zip_op_iter)
2612    }
2613
2614    /// Interleaves elements of this iterator and the other given
2615    /// iterator. Alternately yields elements from this iterator and
2616    /// the given iterator, until both are exhausted. If one iterator
2617    /// is exhausted before the other, the last elements are provided
2618    /// from the other.
2619    ///
2620    /// # Examples
2621    ///
2622    /// ```
2623    /// use rayon::prelude::*;
2624    /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2625    /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2626    /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2627    /// ```
2628    fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2629    where
2630        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2631    {
2632        Interleave::new(self, other.into_par_iter())
2633    }
2634
2635    /// Interleaves elements of this iterator and the other given
2636    /// iterator, until one is exhausted.
2637    ///
2638    /// # Examples
2639    ///
2640    /// ```
2641    /// use rayon::prelude::*;
2642    /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2643    /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2644    /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2645    /// ```
2646    fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2647    where
2648        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2649    {
2650        InterleaveShortest::new(self, other.into_par_iter())
2651    }
2652
2653    /// Splits an iterator up into fixed-size chunks.
2654    ///
2655    /// Returns an iterator that returns `Vec`s of the given number of elements.
2656    /// If the number of elements in the iterator is not divisible by `chunk_size`,
2657    /// the last chunk may be shorter than `chunk_size`.
2658    ///
2659    /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2660    /// slices, without having to allocate intermediate `Vec`s for the chunks.
2661    ///
2662    /// [`par_chunks()`]: crate::slice::ParallelSlice::par_chunks()
2663    /// [`par_chunks_mut()`]: crate::slice::ParallelSliceMut::par_chunks_mut()
2664    ///
2665    /// **Panics** if `chunk_size` is 0.
2666    ///
2667    /// # Examples
2668    ///
2669    /// ```
2670    /// use rayon::prelude::*;
2671    /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2672    /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2673    /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2674    /// ```
2675    #[track_caller]
2676    fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2677        assert!(chunk_size != 0, "chunk_size must not be zero");
2678        Chunks::new(self, chunk_size)
2679    }
2680
2681    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2682    /// each chunk.
2683    ///
2684    /// Returns an iterator that produces a folded result for each chunk of items
2685    /// produced by this iterator.
2686    ///
2687    /// This works essentially like:
2688    ///
2689    /// ```text
2690    /// iter.chunks(chunk_size)
2691    ///     .map(|chunk|
2692    ///         chunk.into_iter()
2693    ///             .fold(identity, fold_op)
2694    ///     )
2695    /// ```
2696    ///
2697    /// except there is no per-chunk allocation overhead.
2698    ///
2699    /// [`fold()`]: std::iter::Iterator#method.fold
2700    ///
2701    /// **Panics** if `chunk_size` is 0.
2702    ///
2703    /// # Examples
2704    ///
2705    /// ```
2706    /// use rayon::prelude::*;
2707    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2708    /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2709    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2710    /// ```
2711    #[track_caller]
2712    fn fold_chunks<T, ID, F>(
2713        self,
2714        chunk_size: usize,
2715        identity: ID,
2716        fold_op: F,
2717    ) -> FoldChunks<Self, ID, F>
2718    where
2719        ID: Fn() -> T + Send + Sync,
2720        F: Fn(T, Self::Item) -> T + Send + Sync,
2721        T: Send,
2722    {
2723        assert!(chunk_size != 0, "chunk_size must not be zero");
2724        FoldChunks::new(self, chunk_size, identity, fold_op)
2725    }
2726
2727    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2728    /// each chunk.
2729    ///
2730    /// Returns an iterator that produces a folded result for each chunk of items
2731    /// produced by this iterator.
2732    ///
2733    /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2734    /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2735    /// added synchronization.
2736    ///
2737    /// [`fold()`]: std::iter::Iterator#method.fold
2738    ///
2739    /// **Panics** if `chunk_size` is 0.
2740    ///
2741    /// # Examples
2742    ///
2743    /// ```
2744    /// use rayon::prelude::*;
2745    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2746    /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2747    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2748    /// ```
2749    #[track_caller]
2750    fn fold_chunks_with<T, F>(
2751        self,
2752        chunk_size: usize,
2753        init: T,
2754        fold_op: F,
2755    ) -> FoldChunksWith<Self, T, F>
2756    where
2757        T: Send + Clone,
2758        F: Fn(T, Self::Item) -> T + Send + Sync,
2759    {
2760        assert!(chunk_size != 0, "chunk_size must not be zero");
2761        FoldChunksWith::new(self, chunk_size, init, fold_op)
2762    }
2763
2764    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2765    /// another.
2766    ///
2767    /// # Examples
2768    ///
2769    /// ```
2770    /// use rayon::prelude::*;
2771    /// use std::cmp::Ordering::*;
2772    ///
2773    /// let x = vec![1, 2, 3];
2774    /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2775    /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2776    /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2777    /// ```
2778    fn cmp<I>(self, other: I) -> Ordering
2779    where
2780        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2781        Self::Item: Ord,
2782    {
2783        #[inline]
2784        fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2785            Ord::cmp(&x, &y)
2786        }
2787
2788        #[inline]
2789        fn inequal(&ord: &Ordering) -> bool {
2790            ord != Ordering::Equal
2791        }
2792
2793        let other = other.into_par_iter();
2794        let ord_len = self.len().cmp(&other.len());
2795        self.zip(other)
2796            .map(ordering)
2797            .find_first(inequal)
2798            .unwrap_or(ord_len)
2799    }
2800
2801    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2802    /// another.
2803    ///
2804    /// # Examples
2805    ///
2806    /// ```
2807    /// use rayon::prelude::*;
2808    /// use std::cmp::Ordering::*;
2809    ///
2810    /// let x = vec![1.0, 2.0, 3.0];
2811    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2812    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2813    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2814    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, f64::NAN]), None);
2815    /// ```
2816    fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2817    where
2818        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2819        Self::Item: PartialOrd<I::Item>,
2820    {
2821        #[inline]
2822        fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2823            PartialOrd::partial_cmp(&x, &y)
2824        }
2825
2826        #[inline]
2827        fn inequal(&ord: &Option<Ordering>) -> bool {
2828            ord != Some(Ordering::Equal)
2829        }
2830
2831        let other = other.into_par_iter();
2832        let ord_len = self.len().cmp(&other.len());
2833        self.zip(other)
2834            .map(ordering)
2835            .find_first(inequal)
2836            .unwrap_or(Some(ord_len))
2837    }
2838
2839    /// Determines if the elements of this `ParallelIterator`
2840    /// are equal to those of another
2841    fn eq<I>(self, other: I) -> bool
2842    where
2843        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2844        Self::Item: PartialEq<I::Item>,
2845    {
2846        #[inline]
2847        fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2848            PartialEq::eq(&x, &y)
2849        }
2850
2851        let other = other.into_par_iter();
2852        self.len() == other.len() && self.zip(other).all(eq)
2853    }
2854
2855    /// Determines if the elements of this `ParallelIterator`
2856    /// are unequal to those of another
2857    fn ne<I>(self, other: I) -> bool
2858    where
2859        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2860        Self::Item: PartialEq<I::Item>,
2861    {
2862        !self.eq(other)
2863    }
2864
2865    /// Determines if the elements of this `ParallelIterator`
2866    /// are lexicographically less than those of another.
2867    fn lt<I>(self, other: I) -> bool
2868    where
2869        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2870        Self::Item: PartialOrd<I::Item>,
2871    {
2872        self.partial_cmp(other) == Some(Ordering::Less)
2873    }
2874
2875    /// Determines if the elements of this `ParallelIterator`
2876    /// are less than or equal to those of another.
2877    fn le<I>(self, other: I) -> bool
2878    where
2879        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2880        Self::Item: PartialOrd<I::Item>,
2881    {
2882        let ord = self.partial_cmp(other);
2883        ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2884    }
2885
2886    /// Determines if the elements of this `ParallelIterator`
2887    /// are lexicographically greater than those of another.
2888    fn gt<I>(self, other: I) -> bool
2889    where
2890        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2891        Self::Item: PartialOrd<I::Item>,
2892    {
2893        self.partial_cmp(other) == Some(Ordering::Greater)
2894    }
2895
2896    /// Determines if the elements of this `ParallelIterator`
2897    /// are greater than or equal to those of another.
2898    fn ge<I>(self, other: I) -> bool
2899    where
2900        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2901        Self::Item: PartialOrd<I::Item>,
2902    {
2903        let ord = self.partial_cmp(other);
2904        ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2905    }
2906
2907    /// Yields an index along with each item.
2908    ///
2909    /// # Examples
2910    ///
2911    /// ```
2912    /// use rayon::prelude::*;
2913    ///
2914    /// let chars = vec!['a', 'b', 'c'];
2915    /// let result: Vec<_> = chars
2916    ///     .into_par_iter()
2917    ///     .enumerate()
2918    ///     .collect();
2919    ///
2920    /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2921    /// ```
2922    fn enumerate(self) -> Enumerate<Self> {
2923        Enumerate::new(self)
2924    }
2925
2926    /// Creates an iterator that steps by the given amount
2927    ///
2928    /// # Examples
2929    ///
2930    /// ```
2931    ///use rayon::prelude::*;
2932    ///
2933    /// let range = (3..10);
2934    /// let result: Vec<i32> = range
2935    ///    .into_par_iter()
2936    ///    .step_by(3)
2937    ///    .collect();
2938    ///
2939    /// assert_eq!(result, [3, 6, 9])
2940    /// ```
2941    fn step_by(self, step: usize) -> StepBy<Self> {
2942        StepBy::new(self, step)
2943    }
2944
2945    /// Creates an iterator that skips the first `n` elements.
2946    ///
2947    /// # Examples
2948    ///
2949    /// ```
2950    /// use rayon::prelude::*;
2951    ///
2952    /// let result: Vec<_> = (0..100)
2953    ///     .into_par_iter()
2954    ///     .skip(95)
2955    ///     .collect();
2956    ///
2957    /// assert_eq!(result, [95, 96, 97, 98, 99]);
2958    /// ```
2959    fn skip(self, n: usize) -> Skip<Self> {
2960        Skip::new(self, n)
2961    }
2962
2963    /// Creates an iterator that yields the first `n` elements.
2964    ///
2965    /// # Examples
2966    ///
2967    /// ```
2968    /// use rayon::prelude::*;
2969    ///
2970    /// let result: Vec<_> = (0..100)
2971    ///     .into_par_iter()
2972    ///     .take(5)
2973    ///     .collect();
2974    ///
2975    /// assert_eq!(result, [0, 1, 2, 3, 4]);
2976    /// ```
2977    fn take(self, n: usize) -> Take<Self> {
2978        Take::new(self, n)
2979    }
2980
2981    /// Searches for **some** item in the parallel iterator that
2982    /// matches the given predicate, and returns its index.  Like
2983    /// `ParallelIterator::find_any`, the parallel search will not
2984    /// necessarily find the **first** match, and once a match is
2985    /// found we'll attempt to stop processing any more.
2986    ///
2987    /// # Examples
2988    ///
2989    /// ```
2990    /// use rayon::prelude::*;
2991    ///
2992    /// let a = [1, 2, 3, 3];
2993    ///
2994    /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2995    /// assert!(i == 2 || i == 3);
2996    ///
2997    /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2998    /// ```
2999    fn position_any<P>(self, predicate: P) -> Option<usize>
3000    where
3001        P: Fn(Self::Item) -> bool + Sync + Send,
3002    {
3003        #[inline]
3004        fn check(&(_, p): &(usize, bool)) -> bool {
3005            p
3006        }
3007
3008        let (i, _) = self.map(predicate).enumerate().find_any(check)?;
3009        Some(i)
3010    }
3011
3012    /// Searches for the sequentially **first** item in the parallel iterator
3013    /// that matches the given predicate, and returns its index.
3014    ///
3015    /// Like `ParallelIterator::find_first`, once a match is found,
3016    /// all attempts to the right of the match will be stopped, while
3017    /// attempts to the left must continue in case an earlier match
3018    /// is found.
3019    ///
3020    /// Note that not all parallel iterators have a useful order, much like
3021    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
3022    /// just want the first match that discovered anywhere in the iterator,
3023    /// `position_any` is a better choice.
3024    ///
3025    /// # Examples
3026    ///
3027    /// ```
3028    /// use rayon::prelude::*;
3029    ///
3030    /// let a = [1, 2, 3, 3];
3031    ///
3032    /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
3033    ///
3034    /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
3035    /// ```
3036    fn position_first<P>(self, predicate: P) -> Option<usize>
3037    where
3038        P: Fn(Self::Item) -> bool + Sync + Send,
3039    {
3040        #[inline]
3041        fn check(&(_, p): &(usize, bool)) -> bool {
3042            p
3043        }
3044
3045        let (i, _) = self.map(predicate).enumerate().find_first(check)?;
3046        Some(i)
3047    }
3048
3049    /// Searches for the sequentially **last** item in the parallel iterator
3050    /// that matches the given predicate, and returns its index.
3051    ///
3052    /// Like `ParallelIterator::find_last`, once a match is found,
3053    /// all attempts to the left of the match will be stopped, while
3054    /// attempts to the right must continue in case a later match
3055    /// is found.
3056    ///
3057    /// Note that not all parallel iterators have a useful order, much like
3058    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
3059    /// order doesn't actually matter to you, `position_any` is a better
3060    /// choice.
3061    ///
3062    /// # Examples
3063    ///
3064    /// ```
3065    /// use rayon::prelude::*;
3066    ///
3067    /// let a = [1, 2, 3, 3];
3068    ///
3069    /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
3070    ///
3071    /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
3072    /// ```
3073    fn position_last<P>(self, predicate: P) -> Option<usize>
3074    where
3075        P: Fn(Self::Item) -> bool + Sync + Send,
3076    {
3077        #[inline]
3078        fn check(&(_, p): &(usize, bool)) -> bool {
3079            p
3080        }
3081
3082        let (i, _) = self.map(predicate).enumerate().find_last(check)?;
3083        Some(i)
3084    }
3085
3086    #[doc(hidden)]
3087    #[deprecated(
3088        note = "parallel `position` does not search in order -- use `position_any`, \\
3089                `position_first`, or `position_last`"
3090    )]
3091    fn position<P>(self, predicate: P) -> Option<usize>
3092    where
3093        P: Fn(Self::Item) -> bool + Sync + Send,
3094    {
3095        self.position_any(predicate)
3096    }
3097
3098    /// Searches for items in the parallel iterator that match the given
3099    /// predicate, and returns their indices.
3100    ///
3101    /// # Examples
3102    ///
3103    /// ```
3104    /// use rayon::prelude::*;
3105    ///
3106    /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3107    ///
3108    /// // Find the positions of primes congruent to 1 modulo 6
3109    /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3110    /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3111    ///
3112    /// // Find the positions of primes congruent to 5 modulo 6
3113    /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3114    /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3115    /// ```
3116    fn positions<P>(self, predicate: P) -> Positions<Self, P>
3117    where
3118        P: Fn(Self::Item) -> bool + Sync + Send,
3119    {
3120        Positions::new(self, predicate)
3121    }
3122
3123    /// Produces a new iterator with the elements of this iterator in
3124    /// reverse order.
3125    ///
3126    /// # Examples
3127    ///
3128    /// ```
3129    /// use rayon::prelude::*;
3130    ///
3131    /// let result: Vec<_> = (0..5)
3132    ///     .into_par_iter()
3133    ///     .rev()
3134    ///     .collect();
3135    ///
3136    /// assert_eq!(result, [4, 3, 2, 1, 0]);
3137    /// ```
3138    fn rev(self) -> Rev<Self> {
3139        Rev::new(self)
3140    }
3141
3142    /// Sets the minimum length of iterators desired to process in each
3143    /// rayon job.  Rayon will not split any smaller than this length, but
3144    /// of course an iterator could already be smaller to begin with.
3145    ///
3146    /// Producers like `zip` and `interleave` will use greater of the two
3147    /// minimums.
3148    /// Chained iterators and iterators inside `flat_map` may each use
3149    /// their own minimum length.
3150    ///
3151    /// # Examples
3152    ///
3153    /// ```
3154    /// use rayon::prelude::*;
3155    ///
3156    /// let min = (0..1_000_000)
3157    ///     .into_par_iter()
3158    ///     .with_min_len(1234)
3159    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3160    ///     .min().unwrap();
3161    ///
3162    /// assert!(min >= 1234);
3163    /// ```
3164    fn with_min_len(self, min: usize) -> MinLen<Self> {
3165        MinLen::new(self, min)
3166    }
3167
3168    /// Sets the maximum length of iterators desired to process in each
3169    /// rayon job.  Rayon will try to split at least below this length,
3170    /// unless that would put it below the length from `with_min_len()`.
3171    /// For example, given min=10 and max=15, a length of 16 will not be
3172    /// split any further.
3173    ///
3174    /// Producers like `zip` and `interleave` will use lesser of the two
3175    /// maximums.
3176    /// Chained iterators and iterators inside `flat_map` may each use
3177    /// their own maximum length.
3178    ///
3179    /// # Examples
3180    ///
3181    /// ```
3182    /// use rayon::prelude::*;
3183    ///
3184    /// let max = (0..1_000_000)
3185    ///     .into_par_iter()
3186    ///     .with_max_len(1234)
3187    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3188    ///     .max().unwrap();
3189    ///
3190    /// assert!(max <= 1234);
3191    /// ```
3192    fn with_max_len(self, max: usize) -> MaxLen<Self> {
3193        MaxLen::new(self, max)
3194    }
3195
3196    /// Produces an exact count of how many items this iterator will
3197    /// produce, presuming no panic occurs.
3198    ///
3199    /// # Examples
3200    ///
3201    /// ```
3202    /// use rayon::prelude::*;
3203    ///
3204    /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3205    /// assert_eq!(par_iter.len(), 10);
3206    ///
3207    /// let vec: Vec<_> = par_iter.collect();
3208    /// assert_eq!(vec.len(), 10);
3209    /// ```
3210    fn len(&self) -> usize;
3211
3212    /// Internal method used to define the behavior of this parallel
3213    /// iterator. You should not need to call this directly.
3214    ///
3215    /// This method causes the iterator `self` to start producing
3216    /// items and to feed them to the consumer `consumer` one by one.
3217    /// It may split the consumer before doing so to create the
3218    /// opportunity to produce in parallel. If a split does happen, it
3219    /// will inform the consumer of the index where the split should
3220    /// occur (unlike `ParallelIterator::drive_unindexed()`).
3221    ///
3222    /// See the [README] for more details on the internals of parallel
3223    /// iterators.
3224    ///
3225    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3226    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3227
3228    /// Internal method used to define the behavior of this parallel
3229    /// iterator. You should not need to call this directly.
3230    ///
3231    /// This method converts the iterator into a producer P and then
3232    /// invokes `callback.callback()` with P. Note that the type of
3233    /// this producer is not defined as part of the API, since
3234    /// `callback` must be defined generically for all producers. This
3235    /// allows the producer type to contain references; it also means
3236    /// that parallel iterators can adjust that type without causing a
3237    /// breaking change.
3238    ///
3239    /// See the [README] for more details on the internals of parallel
3240    /// iterators.
3241    ///
3242    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3243    fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3244}
3245
3246/// `FromParallelIterator` implements the creation of a collection
3247/// from a [`ParallelIterator`]. By implementing
3248/// `FromParallelIterator` for a given type, you define how it will be
3249/// created from an iterator.
3250///
3251/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3252///
3253/// [`collect()`]: ParallelIterator::collect()
3254///
3255/// # Examples
3256///
3257/// Implementing `FromParallelIterator` for your type:
3258///
3259/// ```
3260/// use rayon::prelude::*;
3261///
3262/// struct BlackHole {
3263///     mass: usize,
3264/// }
3265///
3266/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3267///     fn from_par_iter<I>(par_iter: I) -> Self
3268///         where I: IntoParallelIterator<Item = T>
3269///     {
3270///         let par_iter = par_iter.into_par_iter();
3271///         BlackHole {
3272///             mass: par_iter.count() * size_of::<T>(),
3273///         }
3274///     }
3275/// }
3276///
3277/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3278/// assert_eq!(bh.mass, 4000);
3279/// ```
3280pub trait FromParallelIterator<T>
3281where
3282    T: Send,
3283{
3284    /// Creates an instance of the collection from the parallel iterator `par_iter`.
3285    ///
3286    /// If your collection is not naturally parallel, the easiest (and
3287    /// fastest) way to do this is often to collect `par_iter` into a
3288    /// [`LinkedList`] (via [`collect_vec_list`]) or another intermediate
3289    /// data structure and then sequentially extend your collection. However,
3290    /// a more 'native' technique is to use the [`par_iter.fold`] or
3291    /// [`par_iter.fold_with`] methods to create the collection.
3292    /// Alternatively, if your collection is 'natively' parallel, you
3293    /// can use [`par_iter.for_each`] to process each element in turn.
3294    ///
3295    /// [`LinkedList`]: std::collections::LinkedList
3296    /// [`collect_vec_list`]: ParallelIterator::collect_vec_list
3297    /// [`par_iter.fold`]: ParallelIterator::fold()
3298    /// [`par_iter.fold_with`]: ParallelIterator::fold_with()
3299    /// [`par_iter.for_each`]: ParallelIterator::for_each()
3300    fn from_par_iter<I>(par_iter: I) -> Self
3301    where
3302        I: IntoParallelIterator<Item = T>;
3303}
3304
3305/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3306///
3307/// # Examples
3308///
3309/// Implementing `ParallelExtend` for your type:
3310///
3311/// ```
3312/// use rayon::prelude::*;
3313///
3314/// struct BlackHole {
3315///     mass: usize,
3316/// }
3317///
3318/// impl<T: Send> ParallelExtend<T> for BlackHole {
3319///     fn par_extend<I>(&mut self, par_iter: I)
3320///         where I: IntoParallelIterator<Item = T>
3321///     {
3322///         let par_iter = par_iter.into_par_iter();
3323///         self.mass += par_iter.count() * size_of::<T>();
3324///     }
3325/// }
3326///
3327/// let mut bh = BlackHole { mass: 0 };
3328/// bh.par_extend(0i32..1000);
3329/// assert_eq!(bh.mass, 4000);
3330/// bh.par_extend(0i64..10);
3331/// assert_eq!(bh.mass, 4080);
3332/// ```
3333pub trait ParallelExtend<T>
3334where
3335    T: Send,
3336{
3337    /// Extends an instance of the collection with the elements drawn
3338    /// from the parallel iterator `par_iter`.
3339    ///
3340    /// # Examples
3341    ///
3342    /// ```
3343    /// use rayon::prelude::*;
3344    ///
3345    /// let mut vec = vec![];
3346    /// vec.par_extend(0..5);
3347    /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3348    /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3349    /// ```
3350    fn par_extend<I>(&mut self, par_iter: I)
3351    where
3352        I: IntoParallelIterator<Item = T>;
3353}
3354
3355/// `ParallelDrainFull` creates a parallel iterator that moves all items
3356/// from a collection while retaining the original capacity.
3357///
3358/// Types which are indexable typically implement [`ParallelDrainRange`]
3359/// instead, where you can drain fully with `par_drain(..)`.
3360pub trait ParallelDrainFull {
3361    /// The draining parallel iterator type that will be created.
3362    type Iter: ParallelIterator<Item = Self::Item>;
3363
3364    /// The type of item that the parallel iterator will produce.
3365    /// This is usually the same as `IntoParallelIterator::Item`.
3366    type Item: Send;
3367
3368    /// Returns a draining parallel iterator over an entire collection.
3369    ///
3370    /// When the iterator is dropped, all items are removed, even if the
3371    /// iterator was not fully consumed. If the iterator is leaked, for example
3372    /// using `std::mem::forget`, it is unspecified how many items are removed.
3373    ///
3374    /// # Examples
3375    ///
3376    /// ```
3377    /// use rayon::prelude::*;
3378    /// use std::collections::{BinaryHeap, HashSet};
3379    ///
3380    /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3381    ///
3382    /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3383    /// assert_eq!(
3384    ///     // heaps are drained in arbitrary order
3385    ///     heap.par_drain()
3386    ///         .inspect(|x| assert!(squares.contains(x)))
3387    ///         .count(),
3388    ///     squares.len(),
3389    /// );
3390    /// assert!(heap.is_empty());
3391    /// assert!(heap.capacity() >= squares.len());
3392    /// ```
3393    fn par_drain(self) -> Self::Iter;
3394}
3395
3396/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3397/// from a collection while retaining the original capacity.
3398///
3399/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3400pub trait ParallelDrainRange<Idx = usize> {
3401    /// The draining parallel iterator type that will be created.
3402    type Iter: ParallelIterator<Item = Self::Item>;
3403
3404    /// The type of item that the parallel iterator will produce.
3405    /// This is usually the same as `IntoParallelIterator::Item`.
3406    type Item: Send;
3407
3408    /// Returns a draining parallel iterator over a range of the collection.
3409    ///
3410    /// When the iterator is dropped, all items in the range are removed, even
3411    /// if the iterator was not fully consumed. If the iterator is leaked, for
3412    /// example using `std::mem::forget`, it is unspecified how many items are
3413    /// removed.
3414    ///
3415    /// # Examples
3416    ///
3417    /// ```
3418    /// use rayon::prelude::*;
3419    ///
3420    /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3421    ///
3422    /// println!("RangeFull");
3423    /// let mut vec = squares.clone();
3424    /// assert!(vec.par_drain(..)
3425    ///            .eq(squares.par_iter().copied()));
3426    /// assert!(vec.is_empty());
3427    /// assert!(vec.capacity() >= squares.len());
3428    ///
3429    /// println!("RangeFrom");
3430    /// let mut vec = squares.clone();
3431    /// assert!(vec.par_drain(5..)
3432    ///            .eq(squares[5..].par_iter().copied()));
3433    /// assert_eq!(&vec[..], &squares[..5]);
3434    /// assert!(vec.capacity() >= squares.len());
3435    ///
3436    /// println!("RangeTo");
3437    /// let mut vec = squares.clone();
3438    /// assert!(vec.par_drain(..5)
3439    ///            .eq(squares[..5].par_iter().copied()));
3440    /// assert_eq!(&vec[..], &squares[5..]);
3441    /// assert!(vec.capacity() >= squares.len());
3442    ///
3443    /// println!("RangeToInclusive");
3444    /// let mut vec = squares.clone();
3445    /// assert!(vec.par_drain(..=5)
3446    ///            .eq(squares[..=5].par_iter().copied()));
3447    /// assert_eq!(&vec[..], &squares[6..]);
3448    /// assert!(vec.capacity() >= squares.len());
3449    ///
3450    /// println!("Range");
3451    /// let mut vec = squares.clone();
3452    /// assert!(vec.par_drain(3..7)
3453    ///            .eq(squares[3..7].par_iter().copied()));
3454    /// assert_eq!(&vec[..3], &squares[..3]);
3455    /// assert_eq!(&vec[3..], &squares[7..]);
3456    /// assert!(vec.capacity() >= squares.len());
3457    ///
3458    /// println!("RangeInclusive");
3459    /// let mut vec = squares.clone();
3460    /// assert!(vec.par_drain(3..=7)
3461    ///            .eq(squares[3..=7].par_iter().copied()));
3462    /// assert_eq!(&vec[..3], &squares[..3]);
3463    /// assert_eq!(&vec[3..], &squares[8..]);
3464    /// assert!(vec.capacity() >= squares.len());
3465    /// ```
3466    fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3467}
3468
3469/// We hide the `Try` trait in a private module, as it's only meant to be a
3470/// stable clone of the standard library's `Try` trait, as yet unstable.
3471mod private {
3472    use std::convert::Infallible;
3473    use std::ops::ControlFlow::{self, Break, Continue};
3474    use std::task::Poll;
3475
3476    /// Clone of `std::ops::Try`.
3477    ///
3478    /// Implementing this trait is not permitted outside of `rayon`.
3479    pub trait Try {
3480        private_decl! {}
3481
3482        type Output;
3483        type Residual;
3484
3485        fn from_output(output: Self::Output) -> Self;
3486
3487        fn from_residual(residual: Self::Residual) -> Self;
3488
3489        fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3490    }
3491
3492    impl<B, C> Try for ControlFlow<B, C> {
3493        private_impl! {}
3494
3495        type Output = C;
3496        type Residual = ControlFlow<B, Infallible>;
3497
3498        fn from_output(output: Self::Output) -> Self {
3499            Continue(output)
3500        }
3501
3502        fn from_residual(residual: Self::Residual) -> Self {
3503            match residual {
3504                Break(b) => Break(b),
3505                #[allow(unreachable_patterns)]
3506                Continue(_) => unreachable!(),
3507            }
3508        }
3509
3510        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3511            match self {
3512                Continue(c) => Continue(c),
3513                Break(b) => Break(Break(b)),
3514            }
3515        }
3516    }
3517
3518    impl<T> Try for Option<T> {
3519        private_impl! {}
3520
3521        type Output = T;
3522        type Residual = Option<Infallible>;
3523
3524        fn from_output(output: Self::Output) -> Self {
3525            Some(output)
3526        }
3527
3528        fn from_residual(residual: Self::Residual) -> Self {
3529            match residual {
3530                None => None,
3531                #[allow(unreachable_patterns)]
3532                Some(_) => unreachable!(),
3533            }
3534        }
3535
3536        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3537            match self {
3538                Some(c) => Continue(c),
3539                None => Break(None),
3540            }
3541        }
3542    }
3543
3544    impl<T, E> Try for Result<T, E> {
3545        private_impl! {}
3546
3547        type Output = T;
3548        type Residual = Result<Infallible, E>;
3549
3550        fn from_output(output: Self::Output) -> Self {
3551            Ok(output)
3552        }
3553
3554        fn from_residual(residual: Self::Residual) -> Self {
3555            match residual {
3556                Err(e) => Err(e),
3557                #[allow(unreachable_patterns)]
3558                Ok(_) => unreachable!(),
3559            }
3560        }
3561
3562        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3563            match self {
3564                Ok(c) => Continue(c),
3565                Err(e) => Break(Err(e)),
3566            }
3567        }
3568    }
3569
3570    impl<T, E> Try for Poll<Result<T, E>> {
3571        private_impl! {}
3572
3573        type Output = Poll<T>;
3574        type Residual = Result<Infallible, E>;
3575
3576        fn from_output(output: Self::Output) -> Self {
3577            output.map(Ok)
3578        }
3579
3580        fn from_residual(residual: Self::Residual) -> Self {
3581            match residual {
3582                Err(e) => Poll::Ready(Err(e)),
3583                #[allow(unreachable_patterns)]
3584                Ok(_) => unreachable!(),
3585            }
3586        }
3587
3588        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3589            match self {
3590                Poll::Pending => Continue(Poll::Pending),
3591                Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3592                Poll::Ready(Err(e)) => Break(Err(e)),
3593            }
3594        }
3595    }
3596
3597    impl<T, E> Try for Poll<Option<Result<T, E>>> {
3598        private_impl! {}
3599
3600        type Output = Poll<Option<T>>;
3601        type Residual = Result<Infallible, E>;
3602
3603        fn from_output(output: Self::Output) -> Self {
3604            match output {
3605                Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3606                Poll::Pending => Poll::Pending,
3607            }
3608        }
3609
3610        fn from_residual(residual: Self::Residual) -> Self {
3611            match residual {
3612                Err(e) => Poll::Ready(Some(Err(e))),
3613                #[allow(unreachable_patterns)]
3614                Ok(_) => unreachable!(),
3615            }
3616        }
3617
3618        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3619            match self {
3620                Poll::Pending => Continue(Poll::Pending),
3621                Poll::Ready(None) => Continue(Poll::Ready(None)),
3622                Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3623                Poll::Ready(Some(Err(e))) => Break(Err(e)),
3624            }
3625        }
3626    }
3627}