Trait rayon::par_iter::ParallelIterator
[−]
[src]
pub trait ParallelIterator: Sized { type Item: Send; fn weight(self, scale: f64) -> Weight<Self> { ... } fn weight_max(self) -> Weight<Self> { ... } fn for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync { ... } fn count(self) -> usize { ... } fn map<MAP_OP, R>(self, map_op: MAP_OP) -> Map<Self, MapFn<MAP_OP>> where MAP_OP: Fn(Self::Item) -> R + Sync { ... } fn cloned<'a, T>(self) -> Map<Self, MapCloned> where T: 'a + Clone, Self: ParallelIterator<Item=&'a T> { ... } fn inspect<INSPECT_OP>(self,
inspect_op: INSPECT_OP)
-> Map<Self, MapInspect<INSPECT_OP>> where INSPECT_OP: Fn(&Self::Item) + Sync { ... } fn filter<FILTER_OP>(self, filter_op: FILTER_OP) -> Filter<Self, FILTER_OP> where FILTER_OP: Fn(&Self::Item) -> bool + Sync { ... } fn filter_map<FILTER_OP, R>(self,
filter_op: FILTER_OP)
-> FilterMap<Self, FILTER_OP> where FILTER_OP: Fn(Self::Item) -> Option<R> + Sync { ... } fn flat_map<MAP_OP, PI>(self, map_op: MAP_OP) -> FlatMap<Self, MAP_OP> where MAP_OP: Fn(Self::Item) -> PI + Sync, PI: IntoParallelIterator { ... } fn reduce<OP, IDENTITY>(self, identity: IDENTITY, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync,
IDENTITY: Fn() -> Self::Item + Sync { ... } fn reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync { ... } fn reduce_with_identity<OP>(self,
identity: Self::Item,
op: OP)
-> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync,
Self::Item: Clone + Sync { ... } fn fold<IDENTITY_ITEM, IDENTITY, FOLD_OP>(self,
identity: IDENTITY,
fold_op: FOLD_OP)
-> Fold<Self, IDENTITY, FOLD_OP> where FOLD_OP: Fn(IDENTITY_ITEM, Self::Item) -> IDENTITY_ITEM + Sync,
IDENTITY: Fn() -> IDENTITY_ITEM + Sync,
IDENTITY_ITEM: Send { ... } fn sum(self) -> Self::Item where SumOp: ReduceOp<Self::Item> { ... } fn product(self) -> Self::Item where ProductOp: ReduceOp<Self::Item> { ... } fn mul(self) -> Self::Item where ProductOp: ReduceOp<Self::Item> { ... } fn min(self) -> Option<Self::Item> where Self::Item: Ord { ... } fn min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Fn(&Self::Item) -> K { ... } fn max(self) -> Option<Self::Item> where Self::Item: Ord { ... } fn max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Fn(&Self::Item) -> K { ... } fn chain<CHAIN>(self, chain: CHAIN) -> ChainIter<Self, CHAIN::Iter> where CHAIN: IntoParallelIterator<Item=Self::Item> { ... } fn find_any<FIND_OP>(self, predicate: FIND_OP) -> Option<Self::Item> where FIND_OP: Fn(&Self::Item) -> bool + Sync { ... } fn any<ANY_OP>(self, predicate: ANY_OP) -> bool where ANY_OP: Fn(Self::Item) -> bool + Sync { ... } fn all<ALL_OP>(self, predicate: ALL_OP) -> bool where ALL_OP: Fn(Self::Item) -> bool + Sync { ... } fn collect<C>(self) -> C where C: FromParallelIterator<Self::Item> { ... } }
The ParallelIterator
interface.
Associated Types
Provided Methods
fn weight(self, scale: f64) -> Weight<Self>
Indicates the relative "weight" of producing each item in this
parallel iterator. A higher weight will cause finer-grained
parallel subtasks. 1.0 indicates something very cheap and
uniform, like copying a value out of an array, or computing x + 1
. If your tasks are either very expensive, or very
unpredictable, you are better off with higher values. See also
weight_max
, which is a convenient shorthand to force the
finest grained parallel execution posible. Tuning this value
should not affect correctness but can improve (or hurt)
performance.
fn weight_max(self) -> Weight<Self>
Shorthand for self.weight(f64::INFINITY)
. This forces the
smallest granularity of parallel execution, which makes sense
when your parallel tasks are (potentially) very expensive to
execute.
fn for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync
Executes OP
on each item produced by the iterator, in parallel.
fn count(self) -> usize
Counts the number of items in this parallel iterator.
fn map<MAP_OP, R>(self, map_op: MAP_OP) -> Map<Self, MapFn<MAP_OP>> where MAP_OP: Fn(Self::Item) -> R + Sync
Applies map_op
to each item of this iterator, producing a new
iterator with the results.
fn cloned<'a, T>(self) -> Map<Self, MapCloned> where T: 'a + Clone, Self: ParallelIterator<Item=&'a T>
Creates an iterator which clones all of its elements. This may be
useful when you have an iterator over &T
, but you need T
.
fn inspect<INSPECT_OP>(self,
inspect_op: INSPECT_OP)
-> Map<Self, MapInspect<INSPECT_OP>> where INSPECT_OP: Fn(&Self::Item) + Sync
inspect_op: INSPECT_OP)
-> Map<Self, MapInspect<INSPECT_OP>> where INSPECT_OP: Fn(&Self::Item) + Sync
Applies inspect_op
to a reference to each item of this iterator,
producing a new iterator passing through the original items. This is
often useful for debugging to see what's happening in iterator stages.
fn filter<FILTER_OP>(self, filter_op: FILTER_OP) -> Filter<Self, FILTER_OP> where FILTER_OP: Fn(&Self::Item) -> bool + Sync
Applies filter_op
to each item of this iterator, producing a new
iterator with only the items that gave true
results.
fn filter_map<FILTER_OP, R>(self,
filter_op: FILTER_OP)
-> FilterMap<Self, FILTER_OP> where FILTER_OP: Fn(Self::Item) -> Option<R> + Sync
filter_op: FILTER_OP)
-> FilterMap<Self, FILTER_OP> where FILTER_OP: Fn(Self::Item) -> Option<R> + Sync
Applies filter_op
to each item of this iterator to get an Option
,
producing a new iterator with only the items from Some
results.
fn flat_map<MAP_OP, PI>(self, map_op: MAP_OP) -> FlatMap<Self, MAP_OP> where MAP_OP: Fn(Self::Item) -> PI + Sync, PI: IntoParallelIterator
Applies map_op
to each item of this iterator to get nested iterators,
producing a new iterator that flattens these back into one.
fn reduce<OP, IDENTITY>(self, identity: IDENTITY, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync,
IDENTITY: Fn() -> Self::Item + Sync
IDENTITY: Fn() -> Self::Item + Sync
Reduces the items in the iterator into one item using op
.
The argument identity
should be a closure that can produce
"identity" value which may be inserted into the sequence as
needed to create opportunities for parallel execution. So, for
example, if you are doing a summation, then identity()
ought
to produce something that represents the zero for your type
(but consider just calling sum()
in that case).
Example:
// Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)` // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)` // where the first/second elements are summed separately. use rayon::prelude::*; let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] .par_iter() // iterating over &(i32, i32) .cloned() // iterating over (i32, i32) .reduce(|| (0, 0), // the "identity" is 0 in both columns |a, b| (a.0 + b.0, a.1 + b.1)); assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
Note: unlike a sequential fold
operation, the order in
which op
will be applied to reduce the result is not fully
specified. So op
should be associative or else the results
will be non-deterministic. And of course identity()
should
produce a true identity.
fn reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync
Reduces the items in the iterator into one item using op
.
If the iterator is empty, None
is returned; otherwise,
Some
is returned.
This version of reduce
is simple but somewhat less
efficient. If possible, it is better to call reduce()
, which
requires an identity element.
Note: unlike a sequential fold
operation, the order in
which op
will be applied to reduce the result is not fully
specified. So op
should be associative or else the results
will be non-deterministic.
fn reduce_with_identity<OP>(self, identity: Self::Item, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync,
Self::Item: Clone + Sync
Self::Item: Clone + Sync
: call reduce
instead
Deprecated. Use reduce()
instead.
fn fold<IDENTITY_ITEM, IDENTITY, FOLD_OP>(self,
identity: IDENTITY,
fold_op: FOLD_OP)
-> Fold<Self, IDENTITY, FOLD_OP> where FOLD_OP: Fn(IDENTITY_ITEM, Self::Item) -> IDENTITY_ITEM + Sync,
IDENTITY: Fn() -> IDENTITY_ITEM + Sync,
IDENTITY_ITEM: Send
identity: IDENTITY,
fold_op: FOLD_OP)
-> Fold<Self, IDENTITY, FOLD_OP> where FOLD_OP: Fn(IDENTITY_ITEM, Self::Item) -> IDENTITY_ITEM + Sync,
IDENTITY: Fn() -> IDENTITY_ITEM + Sync,
IDENTITY_ITEM: Send
Parallel fold is similar to sequential fold except that the
sequence of items may be subdivided before it is
folded. Consider a list of numbers like 22 3 77 89 46
. If
you used sequential fold to add them (fold(0, |a,b| a+b)
,
you would wind up first adding 0 + 22, then 22 + 3, then 25 +
77, and so forth. The parallel fold works similarly except
that it first breaks up your list into sublists, and hence
instead of yielding up a single sum at the end, it yields up
multiple sums. The number of results is nondeterministic, as
is the point where the breaks occur.
So if did the same parallel fold (fold(0, |a,b| a+b)
) on
our example list, we might wind up with a sequence of two numbers,
like so:
22 3 77 89 46
| |
102 135
Or perhaps these three numbers:
22 3 77 89 46
| | |
102 89 46
In general, Rayon will attempt to find good breaking points that keep all of your cores busy.
Fold versus reduce
The fold()
and reduce()
methods each take an identity element
and a combining function, but they operate rather differently.
reduce()
requires that the identity function has the same
type as the things you are iterating over, and it fully
reduces the list of items into a single item. So, for example,
imagine we are iterating over a list of bytes bytes: [128_u8, 64_u8, 64_u8]
. If we used bytes.reduce(|| 0_u8, |a: u8, b: u8| a + b)
, we would get an overflow. This is because 0
,
a
, and b
here are all bytes, just like the numbers in the
list (I wrote the types explicitly above, but those are the
only types you can use). To avoid the overflow, we would need
to do something like bytes.map(|b| b as u32).reduce(|| 0, |a, b| a + b)
, in which case our result would be 256
.
In contrast, with fold()
, the identity function does not
have to have the same type as the things you are iterating
over, and you potentially get back many results. So, if we
continue with the bytes
example from the previous paragraph,
we could do bytes.fold(|| 0_u32, |a, b| a + (b as u32))
to
convert our bytes into u32
. And of course we might not get
back a single sum.
There is a more subtle distinction as well, though it's
actually implied by the above points. When you use reduce()
,
your reduction function is sometimes called with values that
were never part of your original parallel iterator (for
example, both the left and right might be a partial sum). With
fold()
, in contrast, the left value in the fold function is
always the accumulator, and the right value is always from
your original sequence.
Fold vs Map/Reduce
Fold makes sense if you have some operation where it is cheaper to groups of elements at a time. For example, imagine collecting characters into a string. If you were going to use map/reduce, you might try this:
use rayon::prelude::*; let s = ['a', 'b', 'c', 'd', 'e'] .par_iter() .map(|c: &char| format!("{}", c)) .reduce(|| String::new(), |mut a: String, b: String| { a.push_str(&b); a }); assert_eq!(s, "abcde");
Because reduce produces the same type of element as its input,
you have to first map each character into a string, and then
you can reduce them. This means we create one string per
element in ou iterator -- not so great. Using fold
, we can
do this instead:
use rayon::prelude::*; let s = ['a', 'b', 'c', 'd', 'e'] .par_iter() .fold(|| String::new(), |mut s: String, c: &char| { s.push(*c); s }) .reduce(|| String::new(), |mut a: String, b: String| { a.push_str(&b); a }); assert_eq!(s, "abcde");
Now fold
will process groups of our characters at a time,
and we only make one string per group. We should wind up with
some small-ish number of strings roughly proportional to the
number of CPUs you have (it will ultimately depend on how busy
your processors are). Note that we still need to do a reduce
afterwards to combine those groups of strings into a single
string.
You could use a similar trick to save partial results (e.g., a cache) or something similar.
Combining fold with other operations
You can combine fold
with reduce
if you want to produce a
single value. This is then roughly equivalent to a map/reduce
combination in effect:
use rayon::prelude::*; let bytes = 0..22_u8; // series of u8 bytes let sum = bytes.into_par_iter() .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32)) .sum(); assert_eq!(sum, (0..22).sum()); // compare to sequential
fn sum(self) -> Self::Item where SumOp: ReduceOp<Self::Item>
Sums up the items in the iterator.
Note that the order in items will be reduced is not specified,
so if the +
operator is not truly associative, then the results are not
fully deterministic.
Basically equivalent to self.reduce(|| 0, |a, b| a + b)
,
except that the type of 0
and the +
operation may vary
depending on the type of value being produced.
fn product(self) -> Self::Item where ProductOp: ReduceOp<Self::Item>
Multiplies all the items in the iterator.
Note that the order in items will be reduced is not specified,
so if the *
operator is not truly associative, then the results are not
fully deterministic.
Basically equivalent to self.reduce(|| 1, |a, b| a * b)
,
except that the type of 1
and the *
operation may vary
depending on the type of value being produced.
fn mul(self) -> Self::Item where ProductOp: ReduceOp<Self::Item>
: name changed to product()
to match sequential iterators
DEPRECATED
fn min(self) -> Option<Self::Item> where Self::Item: Ord
Computes the minimum of all the items in the iterator. If the
iterator is empty, None
is returned; otherwise, Some(min)
is returned.
Note that the order in which the items will be reduced is not
specified, so if the Ord
impl is not truly associative, then
the results are not deterministic.
Basically equivalent to self.reduce_with(|a, b| cmp::min(a, b))
.
fn min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Fn(&Self::Item) -> K
Computes the item that yields the minimum value for the given
function. If the iterator is empty, None
is returned;
otherwise, Some(item)
is returned.
Note that the order in which the items will be reduced is not
specified, so if the Ord
impl is not truly associative, then
the results are not deterministic.
fn max(self) -> Option<Self::Item> where Self::Item: Ord
Computes the maximum of all the items in the iterator. If the
iterator is empty, None
is returned; otherwise, Some(max)
is returned.
Note that the order in which the items will be reduced is not
specified, so if the Ord
impl is not truly associative, then
the results are not deterministic.
Basically equivalent to self.reduce_with(|a, b| cmp::max(a, b))
.
fn max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Fn(&Self::Item) -> K
Computes the item that yields the maximum value for the given
function. If the iterator is empty, None
is returned;
otherwise, Some(item)
is returned.
Note that the order in which the items will be reduced is not
specified, so if the Ord
impl is not truly associative, then
the results are not deterministic.
fn chain<CHAIN>(self, chain: CHAIN) -> ChainIter<Self, CHAIN::Iter> where CHAIN: IntoParallelIterator<Item=Self::Item>
Takes two iterators and creates a new iterator over both.
fn find_any<FIND_OP>(self, predicate: FIND_OP) -> Option<Self::Item> where FIND_OP: Fn(&Self::Item) -> bool + Sync
Searches for some item in the parallel iterator that
matches the given predicate and returns it. This operation
is similar to find
on sequential iterators but
the item returned may not be the first one in the parallel
sequence which matches, since we search the entire sequence in parallel.
Once a match is found, we will attempt to stop processing
the rest of the items in the iterator as soon as possible
(just as find
stops iterating once a match is found).
fn any<ANY_OP>(self, predicate: ANY_OP) -> bool where ANY_OP: Fn(Self::Item) -> bool + Sync
Searches for some item in the parallel iterator that matches the given predicate, and if so returns true. Once a match is found, we'll attempt to stop process the rest of the items. Proving that there's no match, returning false, does require visiting every item.
fn all<ALL_OP>(self, predicate: ALL_OP) -> bool where ALL_OP: Fn(Self::Item) -> bool + Sync
Tests that every item in the parallel iterator matches the given predicate, and if so returns true. If a counter-example is found, we'll attempt to stop processing more items, then return false.
fn collect<C>(self) -> C where C: FromParallelIterator<Self::Item>
Create a fresh collection containing all the element produced by this parallel iterator.
You may prefer to use collect_into()
, which allocates more
efficiently with precise knowledge of how many elements the
iterator contains, and even allows you to reuse an existing
vector's backing store rather than allocating a fresh vector.
Implementors
impl<A, B> ParallelIterator for ChainIter<A, B> where A: ParallelIterator,
B: ParallelIterator<Item=A::Item>impl<M> ParallelIterator for Enumerate<M> where M: IndexedParallelIterator
impl<M, FILTER_OP> ParallelIterator for Filter<M, FILTER_OP> where M: ParallelIterator,
FILTER_OP: Fn(&M::Item) -> bool + Syncimpl<M, FILTER_OP, R> ParallelIterator for FilterMap<M, FILTER_OP> where M: ParallelIterator,
FILTER_OP: Fn(M::Item) -> Option<R> + Sync,
R: Sendimpl<M, MAP_OP, PI> ParallelIterator for FlatMap<M, MAP_OP> where M: ParallelIterator,
MAP_OP: Fn(M::Item) -> PI + Sync,
PI: IntoParallelIteratorimpl<U, BASE, IDENTITY, FOLD_OP> ParallelIterator for Fold<BASE, IDENTITY, FOLD_OP> where BASE: ParallelIterator,
FOLD_OP: Fn(U, BASE::Item) -> U + Sync,
IDENTITY: Fn() -> U + Sync,
U: Sendimpl<M> ParallelIterator for Skip<M> where M: IndexedParallelIterator
impl<M> ParallelIterator for Take<M> where M: IndexedParallelIterator
impl<'data, T: Sync + 'data> ParallelIterator for SliceIter<'data, T>
impl<'data, T: Sync + 'data> ParallelIterator for ChunksIter<'data, T>
impl<'data, T: Send + 'data> ParallelIterator for SliceIterMut<'data, T>
impl<'data, T: Send + 'data> ParallelIterator for ChunksMutIter<'data, T>
impl<'a> ParallelIterator for ParChars<'a>
impl<M, MAP_OP> ParallelIterator for Map<M, MAP_OP> where M: ParallelIterator,
MAP_OP: MapOp<M::Item>impl<M> ParallelIterator for Weight<M> where M: ParallelIterator
impl<A, B> ParallelIterator for ZipIter<A, B> where A: IndexedParallelIterator,
B: IndexedParallelIteratorimpl ParallelIterator for RangeIter<u8>
impl ParallelIterator for RangeIter<u16>
impl ParallelIterator for RangeIter<u32>
impl ParallelIterator for RangeIter<usize>
impl ParallelIterator for RangeIter<i8>
impl ParallelIterator for RangeIter<i16>
impl ParallelIterator for RangeIter<i32>
impl ParallelIterator for RangeIter<isize>
impl ParallelIterator for RangeIter<u64>
impl ParallelIterator for RangeIter<i64>
impl<T: Send> ParallelIterator for VecIter<T>
impl<T: Send> ParallelIterator for OptionIter<T>