aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2023-04-11 10:36:26 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-04-11 10:36:26 +0000
commit03a89919eb6ff0452902f19c1499ccea7a5a60c9 (patch)
tree1900296c4c4cc8dbc1dcc832ee0e0fcbdcceb88b
parent7a5cb094831c27b3664716813327b0fbc234388f (diff)
parenta00441b1ee7ea275114309268c00e3b701b118f7 (diff)
downloadrayon-03a89919eb6ff0452902f19c1499ccea7a5a60c9.tar.gz
Upgrade rayon to 1.7.0 am: a452597528 am: 72c30a8931 am: 4746a47f1b am: 4b65af3a6c am: a00441b1ee
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/rayon/+/2520000 Change-Id: Ibddfe163348e8766d5c3ae3a804027a0e74d702f Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp2
-rw-r--r--Cargo.toml9
-rw-r--r--Cargo.toml.orig7
-rw-r--r--METADATA10
-rw-r--r--README.md33
-rw-r--r--RELEASES.md19
-rw-r--r--src/array.rs3
-rw-r--r--src/iter/collect/test.rs2
-rw-r--r--src/iter/mod.rs146
-rw-r--r--src/iter/plumbing/README.md4
-rw-r--r--src/iter/skip_any.rs144
-rw-r--r--src/iter/skip_any_while.rs166
-rw-r--r--src/iter/take_any.rs144
-rw-r--r--src/iter/take_any_while.rs166
-rw-r--r--src/iter/test.rs3
-rw-r--r--src/lib.rs6
-rw-r--r--src/vec.rs9
-rw-r--r--tests/clones.rs20
-rw-r--r--tests/collect.rs2
-rw-r--r--tests/cross-pool.rs1
-rw-r--r--tests/debug.rs4
-rw-r--r--tests/iter_panic.rs1
-rw-r--r--tests/named-threads.rs1
-rw-r--r--tests/octillion.rs32
-rw-r--r--tests/par_bridge_recursion.rs1
-rw-r--r--tests/sort-panic-safe.rs10
27 files changed, 903 insertions, 44 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index e65faba..34dd103 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "d5e18e34ae9ace4fe723ba280f254ef01d540a82"
+ "sha1": "6236214d717694917e77aa1c16d91176b9bc2fff"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index d122ac2..1a7835a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -42,7 +42,7 @@ rust_library {
host_supported: true,
crate_name: "rayon",
cargo_env_compat: true,
- cargo_pkg_version: "1.6.1",
+ cargo_pkg_version: "1.7.0",
srcs: ["src/lib.rs"],
edition: "2021",
rustlibs: [
diff --git a/Cargo.toml b/Cargo.toml
index 4a208bc..9b0e7b6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,9 +11,9 @@
[package]
edition = "2021"
-rust-version = "1.56"
+rust-version = "1.59"
name = "rayon"
-version = "1.6.1"
+version = "1.7.0"
authors = [
"Niko Matsakis <niko@alum.mit.edu>",
"Josh Stone <cuviper@gmail.com>",
@@ -43,10 +43,7 @@ version = "1.0"
default-features = false
[dependencies.rayon-core]
-version = "1.10.0"
-
-[dev-dependencies.lazy_static]
-version = "1"
+version = "1.11.0"
[dev-dependencies.rand]
version = "0.8"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index f182242..a6ccc97 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,10 +1,10 @@
[package]
name = "rayon"
-version = "1.6.1"
+version = "1.7.0"
authors = ["Niko Matsakis <niko@alum.mit.edu>",
"Josh Stone <cuviper@gmail.com>"]
description = "Simple work-stealing parallelism for Rust"
-rust-version = "1.56"
+rust-version = "1.59"
edition = "2021"
license = "MIT OR Apache-2.0"
repository = "https://github.com/rayon-rs/rayon"
@@ -19,7 +19,7 @@ members = ["rayon-demo", "rayon-core"]
exclude = ["ci"]
[dependencies]
-rayon-core = { version = "1.10.0", path = "rayon-core" }
+rayon-core = { version = "1.11.0", path = "rayon-core" }
# This is a public dependency!
[dependencies.either]
@@ -27,6 +27,5 @@ version = "1.0"
default-features = false
[dev-dependencies]
-lazy_static = "1"
rand = "0.8"
rand_xorshift = "0.3"
diff --git a/METADATA b/METADATA
index fc05754..94b529f 100644
--- a/METADATA
+++ b/METADATA
@@ -11,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/rayon/rayon-1.6.1.crate"
+ value: "https://static.crates.io/crates/rayon/rayon-1.7.0.crate"
}
- version: "1.6.1"
+ version: "1.7.0"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 12
- day: 13
+ year: 2023
+ month: 4
+ day: 3
}
}
diff --git a/README.md b/README.md
index 51d92a6..7f925bc 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
[![Rayon crate](https://img.shields.io/crates/v/rayon.svg)](https://crates.io/crates/rayon)
[![Rayon documentation](https://docs.rs/rayon/badge.svg)](https://docs.rs/rayon)
-![minimum rustc 1.56](https://img.shields.io/badge/rustc-1.56+-red.svg)
+![minimum rustc 1.59](https://img.shields.io/badge/rustc-1.59+-red.svg)
[![build status](https://github.com/rayon-rs/rayon/workflows/master/badge.svg)](https://github.com/rayon-rs/rayon/actions)
[![Join the chat at https://gitter.im/rayon-rs/Lobby](https://badges.gitter.im/rayon-rs/Lobby.svg)](https://gitter.im/rayon-rs/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
@@ -13,7 +13,7 @@ enjoy [this blog post][blog] about Rayon, which gives more background
and details about how it works, or [this video][video], from the Rust
Belt Rust conference.) Rayon is
[available on crates.io](https://crates.io/crates/rayon), and
-[API Documentation is available on docs.rs](https://docs.rs/rayon/).
+[API documentation is available on docs.rs](https://docs.rs/rayon).
[blog]: https://smallcultfollowing.com/babysteps/blog/2015/12/18/rayon-data-parallelism-in-rust/
[video]: https://www.youtube.com/watch?v=gof_OEv71Aw
@@ -71,12 +71,12 @@ as:
```toml
[dependencies]
-rayon = "1.5"
+rayon = "1.7"
```
-To use the Parallel Iterator APIs, a number of traits have to be in
+To use the parallel iterator APIs, a number of traits have to be in
scope. The easiest way to bring those things into scope is to use the
-[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html). In
+[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html). In
each module where you would like to use the parallel iterator APIs,
just add:
@@ -84,26 +84,37 @@ just add:
use rayon::prelude::*;
```
-Rayon currently requires `rustc 1.56.0` or greater.
+Rayon currently requires `rustc 1.59.0` or greater.
### Usage with WebAssembly
-Rayon can work on the Web via WebAssembly, but requires an adapter
-and some project configuration to account for differences between
+Rayon can work on the Web via WebAssembly, but requires an adapter and
+some project configuration to account for differences between
WebAssembly threads and threads on the other platforms.
-Check out [wasm-bindgen-rayon](https://github.com/GoogleChromeLabs/wasm-bindgen-rayon)
+Check out the
+[wasm-bindgen-rayon](https://github.com/GoogleChromeLabs/wasm-bindgen-rayon)
docs for more details.
## Contribution
-Rayon is an open source project! If you'd like to contribute to Rayon, check out [the list of "help wanted" issues](https://github.com/rayon-rs/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). These are all (or should be) issues that are suitable for getting started, and they generally include a detailed set of instructions for what to do. Please ask questions if anything is unclear! Also, check out the [Guide to Development](https://github.com/rayon-rs/rayon/wiki/Guide-to-Development) page on the wiki. Note that all code submitted in PRs to Rayon is assumed to [be licensed under Rayon's dual MIT/Apache2 licensing](https://github.com/rayon-rs/rayon/blob/master/README.md#license).
+Rayon is an open source project! If you'd like to contribute to Rayon,
+check out
+[the list of "help wanted" issues](https://github.com/rayon-rs/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).
+These are all (or should be) issues that are suitable for getting
+started, and they generally include a detailed set of instructions for
+what to do. Please ask questions if anything is unclear! Also, check
+out the
+[Guide to Development](https://github.com/rayon-rs/rayon/wiki/Guide-to-Development)
+page on the wiki. Note that all code submitted in PRs to Rayon is
+assumed to
+[be licensed under Rayon's dual MIT/Apache 2.0 licensing](https://github.com/rayon-rs/rayon/blob/master/README.md#license).
## Quick demo
To see Rayon in action, check out the `rayon-demo` directory, which
includes a number of demos of code using Rayon. For example, run this
-command to get a visualization of an nbody simulation. To see the
+command to get a visualization of an N-body simulation. To see the
effect of using Rayon, press `s` to run sequentially and `p` to run in
parallel.
diff --git a/RELEASES.md b/RELEASES.md
index f675761..28b476d 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -1,3 +1,22 @@
+# Release rayon 1.7.0 / rayon-core 1.11.0 (2023-03-03)
+
+- The minimum supported `rustc` is now 1.59.
+- Added a fallback when threading is unsupported.
+- The new `ParallelIterator::take_any` and `skip_any` methods work like
+ unordered `IndexedParallelIterator::take` and `skip`, counting items in
+ whatever order they are visited in parallel.
+- The new `ParallelIterator::take_any_while` and `skip_any_while` methods work
+ like unordered `Iterator::take_while` and `skip_while`, which previously had
+ no parallel equivalent. The "while" condition may be satisfied from anywhere
+ in the parallel iterator, affecting all future items regardless of position.
+- The new `yield_now` and `yield_local` functions will cooperatively yield
+ execution to Rayon, either trying to execute pending work from the entire
+ pool or from just the local deques of the current thread, respectively.
+
+# Release rayon-core 1.10.2 (2023-01-22)
+
+- Fixed miri-reported UB for SharedReadOnly tags protected by a call.
+
# Release rayon 1.6.1 (2022-12-09)
- Simplified `par_bridge` to only pull one item at a time from the iterator,
diff --git a/src/array.rs b/src/array.rs
index 937bebf..32a5fdd 100644
--- a/src/array.rs
+++ b/src/array.rs
@@ -78,7 +78,8 @@ impl<T: Send, const N: usize> IndexedParallelIterator for IntoIter<T, N> {
unsafe {
// Drain every item, and then the local array can just fall out of scope.
let mut array = ManuallyDrop::new(self.array);
- callback.callback(DrainProducer::new(&mut *array))
+ let producer = DrainProducer::new(array.as_mut_slice());
+ callback.callback(producer)
}
}
}
diff --git a/src/iter/collect/test.rs b/src/iter/collect/test.rs
index b5f676f..97bec3f 100644
--- a/src/iter/collect/test.rs
+++ b/src/iter/collect/test.rs
@@ -76,6 +76,7 @@ fn right_produces_items_with_no_complete() {
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn produces_items_with_no_complete() {
let counter = DropCounter::default();
let mut v = vec![];
@@ -273,6 +274,7 @@ fn right_panics() {
// The left consumer produces fewer items while the right
// consumer produces correct number; check that created elements are dropped
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn left_produces_fewer_items_drops() {
let counter = DropCounter::default();
let mut v = vec![];
diff --git a/src/iter/mod.rs b/src/iter/mod.rs
index 98c9326..e60ea16 100644
--- a/src/iter/mod.rs
+++ b/src/iter/mod.rs
@@ -141,10 +141,14 @@ mod reduce;
mod repeat;
mod rev;
mod skip;
+mod skip_any;
+mod skip_any_while;
mod splitter;
mod step_by;
mod sum;
mod take;
+mod take_any;
+mod take_any_while;
mod try_fold;
mod try_reduce;
mod try_reduce_with;
@@ -185,9 +189,13 @@ pub use self::{
repeat::{repeat, repeatn, Repeat, RepeatN},
rev::Rev,
skip::Skip,
+ skip_any::SkipAny,
+ skip_any_while::SkipAnyWhile,
splitter::{split, Split},
step_by::StepBy,
take::Take,
+ take_any::TakeAny,
+ take_any_while::TakeAnyWhile,
try_fold::{TryFold, TryFoldWith},
update::Update,
while_some::WhileSome,
@@ -2194,6 +2202,143 @@ pub trait ParallelIterator: Sized + Send {
Intersperse::new(self, element)
}
+ /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
+ ///
+ /// This is similar to [`IndexedParallelIterator::take`] without being
+ /// constrained to the "first" `n` of the original iterator order. The
+ /// taken items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .filter(|&x| x % 2 == 0)
+ /// .take_any(5)
+ /// .collect();
+ ///
+ /// assert_eq!(result.len(), 5);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn take_any(self, n: usize) -> TakeAny<Self> {
+ TakeAny::new(self, n)
+ }
+
+ /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
+ ///
+ /// This is similar to [`IndexedParallelIterator::skip`] without being
+ /// constrained to the "first" `n` of the original iterator order. The
+ /// remaining items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .filter(|&x| x % 2 == 0)
+ /// .skip_any(5)
+ /// .collect();
+ ///
+ /// assert_eq!(result.len(), 45);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn skip_any(self, n: usize) -> SkipAny<Self> {
+ SkipAny::new(self, n)
+ }
+
+ /// Creates an iterator that takes elements from *anywhere* in the original iterator
+ /// until the given `predicate` returns `false`.
+ ///
+ /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
+ /// global condition unrelated to the item itself, or some combination thereof.
+ ///
+ /// If parallel calls to the `predicate` race and give different results, then the
+ /// `true` results will still take those particular items, while respecting the `false`
+ /// result from elsewhere to skip any further items.
+ ///
+ /// This is similar to [`Iterator::take_while`] without being constrained to the original
+ /// iterator order. The taken items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .take_any_while(|x| *x < 50)
+ /// .collect();
+ ///
+ /// assert!(result.len() <= 50);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// use std::sync::atomic::AtomicUsize;
+ /// use std::sync::atomic::Ordering::Relaxed;
+ ///
+ /// // Collect any group of items that sum <= 1000
+ /// let quota = AtomicUsize::new(1000);
+ /// let result: Vec<_> = (0_usize..100)
+ /// .into_par_iter()
+ /// .take_any_while(|&x| {
+ /// quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
+ /// .is_ok()
+ /// })
+ /// .collect();
+ ///
+ /// let sum = result.iter().sum::<usize>();
+ /// assert!(matches!(sum, 902..=1000));
+ /// ```
+ fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
+ where
+ P: Fn(&Self::Item) -> bool + Sync + Send,
+ {
+ TakeAnyWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that skips elements from *anywhere* in the original iterator
+ /// until the given `predicate` returns `false`.
+ ///
+ /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
+ /// global condition unrelated to the item itself, or some combination thereof.
+ ///
+ /// If parallel calls to the `predicate` race and give different results, then the
+ /// `true` results will still skip those particular items, while respecting the `false`
+ /// result from elsewhere to skip any further items.
+ ///
+ /// This is similar to [`Iterator::skip_while`] without being constrained to the original
+ /// iterator order. The remaining items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .skip_any_while(|x| *x < 50)
+ /// .collect();
+ ///
+ /// assert!(result.len() >= 50);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
+ where
+ P: Fn(&Self::Item) -> bool + Sync + Send,
+ {
+ SkipAnyWhile::new(self, predicate)
+ }
+
/// Internal method used to define the behavior of this parallel
/// iterator. You should not need to call this directly.
///
@@ -2419,6 +2564,7 @@ pub trait IndexedParallelIterator: ParallelIterator {
/// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
/// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
/// ```
+ #[track_caller]
fn chunks(self, chunk_size: usize) -> Chunks<Self> {
assert!(chunk_size != 0, "chunk_size must not be zero");
Chunks::new(self, chunk_size)
diff --git a/src/iter/plumbing/README.md b/src/iter/plumbing/README.md
index dbee36b..42d22ef 100644
--- a/src/iter/plumbing/README.md
+++ b/src/iter/plumbing/README.md
@@ -35,8 +35,8 @@ modes (which is why there are two):
more like a `for_each` call: each time a new item is produced, the
`consume` method is called with that item. (The traits themselves are
a bit more complex, as they support state that can be threaded
- through and ultimately reduced.) Unlike producers, there are two
- variants of consumers. The difference is how the split is performed:
+ through and ultimately reduced.) Like producers, there are two
+ variants of consumers which differ in how the split is performed:
- in the `Consumer` trait, splitting is done with `split_at`, which
accepts an index where the split should be performed. All
iterators can work in this mode. The resulting halves thus have an
diff --git a/src/iter/skip_any.rs b/src/iter/skip_any.rs
new file mode 100644
index 0000000..0660a56
--- /dev/null
+++ b/src/iter/skip_any.rs
@@ -0,0 +1,144 @@
+use super::plumbing::*;
+use super::*;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// `SkipAny` is an iterator that skips over `n` elements from anywhere in `I`.
+/// This struct is created by the [`skip_any()`] method on [`ParallelIterator`]
+///
+/// [`skip_any()`]: trait.ParallelIterator.html#method.skip_any
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct SkipAny<I: ParallelIterator> {
+ base: I,
+ count: usize,
+}
+
+impl<I> SkipAny<I>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `SkipAny` iterator.
+ pub(super) fn new(base: I, count: usize) -> Self {
+ SkipAny { base, count }
+ }
+}
+
+impl<I> ParallelIterator for SkipAny<I>
+where
+ I: ParallelIterator,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = SkipAnyConsumer {
+ base: consumer,
+ count: &AtomicUsize::new(self.count),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct SkipAnyConsumer<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+impl<'f, T, C> Consumer<T> for SkipAnyConsumer<'f, C>
+where
+ C: Consumer<T>,
+ T: Send,
+{
+ type Folder = SkipAnyFolder<'f, C::Folder>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ SkipAnyConsumer { base: left, ..self },
+ SkipAnyConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ SkipAnyFolder {
+ base: self.base.into_folder(),
+ count: self.count,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
+
+impl<'f, T, C> UnindexedConsumer<T> for SkipAnyConsumer<'f, C>
+where
+ C: UnindexedConsumer<T>,
+ T: Send,
+{
+ fn split_off_left(&self) -> Self {
+ SkipAnyConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct SkipAnyFolder<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+fn checked_decrement(u: &AtomicUsize) -> bool {
+ u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
+ .is_ok()
+}
+
+impl<'f, T, C> Folder<T> for SkipAnyFolder<'f, C>
+where
+ C: Folder<T>,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if !checked_decrement(self.count) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .skip_while(move |_| checked_decrement(self.count)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
diff --git a/src/iter/skip_any_while.rs b/src/iter/skip_any_while.rs
new file mode 100644
index 0000000..28b9e59
--- /dev/null
+++ b/src/iter/skip_any_while.rs
@@ -0,0 +1,166 @@
+use super::plumbing::*;
+use super::*;
+use std::fmt;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+/// `SkipAnyWhile` is an iterator that skips over elements from anywhere in `I`
+/// until the callback returns `false`.
+/// This struct is created by the [`skip_any_while()`] method on [`ParallelIterator`]
+///
+/// [`skip_any_while()`]: trait.ParallelIterator.html#method.skip_any_while
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
+pub struct SkipAnyWhile<I: ParallelIterator, P> {
+ base: I,
+ predicate: P,
+}
+
+impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for SkipAnyWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipAnyWhile")
+ .field("base", &self.base)
+ .finish()
+ }
+}
+
+impl<I, P> SkipAnyWhile<I, P>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `SkipAnyWhile` iterator.
+ pub(super) fn new(base: I, predicate: P) -> Self {
+ SkipAnyWhile { base, predicate }
+ }
+}
+
+impl<I, P> ParallelIterator for SkipAnyWhile<I, P>
+where
+ I: ParallelIterator,
+ P: Fn(&I::Item) -> bool + Sync + Send,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = SkipAnyWhileConsumer {
+ base: consumer,
+ predicate: &self.predicate,
+ skipping: &AtomicBool::new(true),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct SkipAnyWhileConsumer<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ skipping: &'p AtomicBool,
+}
+
+impl<'p, T, C, P> Consumer<T> for SkipAnyWhileConsumer<'p, C, P>
+where
+ C: Consumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ type Folder = SkipAnyWhileFolder<'p, C::Folder, P>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ SkipAnyWhileConsumer { base: left, ..self },
+ SkipAnyWhileConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ SkipAnyWhileFolder {
+ base: self.base.into_folder(),
+ predicate: self.predicate,
+ skipping: self.skipping,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
+
+impl<'p, T, C, P> UnindexedConsumer<T> for SkipAnyWhileConsumer<'p, C, P>
+where
+ C: UnindexedConsumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ fn split_off_left(&self) -> Self {
+ SkipAnyWhileConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct SkipAnyWhileFolder<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ skipping: &'p AtomicBool,
+}
+
+fn skip<T>(item: &T, skipping: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
+ if !skipping.load(Ordering::Relaxed) {
+ return false;
+ }
+ if predicate(item) {
+ return true;
+ }
+ skipping.store(false, Ordering::Relaxed);
+ false
+}
+
+impl<'p, T, C, P> Folder<T> for SkipAnyWhileFolder<'p, C, P>
+where
+ C: Folder<T>,
+ P: Fn(&T) -> bool + 'p,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if !skip(&item, self.skipping, self.predicate) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .skip_while(move |x| skip(x, self.skipping, self.predicate)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
diff --git a/src/iter/take_any.rs b/src/iter/take_any.rs
new file mode 100644
index 0000000..e3992b3
--- /dev/null
+++ b/src/iter/take_any.rs
@@ -0,0 +1,144 @@
+use super::plumbing::*;
+use super::*;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// `TakeAny` is an iterator that iterates over `n` elements from anywhere in `I`.
+/// This struct is created by the [`take_any()`] method on [`ParallelIterator`]
+///
+/// [`take_any()`]: trait.ParallelIterator.html#method.take_any
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct TakeAny<I: ParallelIterator> {
+ base: I,
+ count: usize,
+}
+
+impl<I> TakeAny<I>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `TakeAny` iterator.
+ pub(super) fn new(base: I, count: usize) -> Self {
+ TakeAny { base, count }
+ }
+}
+
+impl<I> ParallelIterator for TakeAny<I>
+where
+ I: ParallelIterator,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = TakeAnyConsumer {
+ base: consumer,
+ count: &AtomicUsize::new(self.count),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct TakeAnyConsumer<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+impl<'f, T, C> Consumer<T> for TakeAnyConsumer<'f, C>
+where
+ C: Consumer<T>,
+ T: Send,
+{
+ type Folder = TakeAnyFolder<'f, C::Folder>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ TakeAnyConsumer { base: left, ..self },
+ TakeAnyConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ TakeAnyFolder {
+ base: self.base.into_folder(),
+ count: self.count,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.count.load(Ordering::Relaxed) == 0 || self.base.full()
+ }
+}
+
+impl<'f, T, C> UnindexedConsumer<T> for TakeAnyConsumer<'f, C>
+where
+ C: UnindexedConsumer<T>,
+ T: Send,
+{
+ fn split_off_left(&self) -> Self {
+ TakeAnyConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct TakeAnyFolder<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+fn checked_decrement(u: &AtomicUsize) -> bool {
+ u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
+ .is_ok()
+}
+
+impl<'f, T, C> Folder<T> for TakeAnyFolder<'f, C>
+where
+ C: Folder<T>,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if checked_decrement(self.count) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .take_while(move |_| checked_decrement(self.count)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.count.load(Ordering::Relaxed) == 0 || self.base.full()
+ }
+}
diff --git a/src/iter/take_any_while.rs b/src/iter/take_any_while.rs
new file mode 100644
index 0000000..e6a91af
--- /dev/null
+++ b/src/iter/take_any_while.rs
@@ -0,0 +1,166 @@
+use super::plumbing::*;
+use super::*;
+use std::fmt;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+/// `TakeAnyWhile` is an iterator that iterates over elements from anywhere in `I`
+/// until the callback returns `false`.
+/// This struct is created by the [`take_any_while()`] method on [`ParallelIterator`]
+///
+/// [`take_any_while()`]: trait.ParallelIterator.html#method.take_any_while
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
+pub struct TakeAnyWhile<I: ParallelIterator, P> {
+ base: I,
+ predicate: P,
+}
+
+impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for TakeAnyWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeAnyWhile")
+ .field("base", &self.base)
+ .finish()
+ }
+}
+
+impl<I, P> TakeAnyWhile<I, P>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `TakeAnyWhile` iterator.
+ pub(super) fn new(base: I, predicate: P) -> Self {
+ TakeAnyWhile { base, predicate }
+ }
+}
+
+impl<I, P> ParallelIterator for TakeAnyWhile<I, P>
+where
+ I: ParallelIterator,
+ P: Fn(&I::Item) -> bool + Sync + Send,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = TakeAnyWhileConsumer {
+ base: consumer,
+ predicate: &self.predicate,
+ taking: &AtomicBool::new(true),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct TakeAnyWhileConsumer<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ taking: &'p AtomicBool,
+}
+
+impl<'p, T, C, P> Consumer<T> for TakeAnyWhileConsumer<'p, C, P>
+where
+ C: Consumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ type Folder = TakeAnyWhileFolder<'p, C::Folder, P>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ TakeAnyWhileConsumer { base: left, ..self },
+ TakeAnyWhileConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ TakeAnyWhileFolder {
+ base: self.base.into_folder(),
+ predicate: self.predicate,
+ taking: self.taking,
+ }
+ }
+
+ fn full(&self) -> bool {
+ !self.taking.load(Ordering::Relaxed) || self.base.full()
+ }
+}
+
+impl<'p, T, C, P> UnindexedConsumer<T> for TakeAnyWhileConsumer<'p, C, P>
+where
+ C: UnindexedConsumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ fn split_off_left(&self) -> Self {
+ TakeAnyWhileConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct TakeAnyWhileFolder<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ taking: &'p AtomicBool,
+}
+
+fn take<T>(item: &T, taking: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
+ if !taking.load(Ordering::Relaxed) {
+ return false;
+ }
+ if predicate(item) {
+ return true;
+ }
+ taking.store(false, Ordering::Relaxed);
+ false
+}
+
+impl<'p, T, C, P> Folder<T> for TakeAnyWhileFolder<'p, C, P>
+where
+ C: Folder<T>,
+ P: Fn(&T) -> bool + 'p,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if take(&item, self.taking, self.predicate) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .take_while(move |x| take(x, self.taking, self.predicate)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ !self.taking.load(Ordering::Relaxed) || self.base.full()
+ }
+}
diff --git a/src/iter/test.rs b/src/iter/test.rs
index 94323d7..c72068d 100644
--- a/src/iter/test.rs
+++ b/src/iter/test.rs
@@ -468,6 +468,7 @@ fn check_cmp_gt_to_seq() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_cmp_short_circuit() {
// We only use a single thread in order to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
@@ -497,6 +498,7 @@ fn check_cmp_short_circuit() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_partial_cmp_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
@@ -526,6 +528,7 @@ fn check_partial_cmp_short_circuit() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_partial_cmp_nan_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
diff --git a/src/lib.rs b/src/lib.rs
index 25a5e16..86f997b 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -76,6 +76,11 @@
//! [the `collections` from `std`]: https://doc.rust-lang.org/std/collections/index.html
//! [`std`]: https://doc.rust-lang.org/std/
//!
+//! # Targets without threading
+//!
+//! Rayon has limited support for targets without `std` threading implementations.
+//! See the [`rayon_core`] documentation for more information about its global fallback.
+//!
//! # Other questions?
//!
//! See [the Rayon FAQ][faq].
@@ -119,6 +124,7 @@ pub use rayon_core::{in_place_scope, scope, Scope};
pub use rayon_core::{in_place_scope_fifo, scope_fifo, ScopeFifo};
pub use rayon_core::{join, join_context};
pub use rayon_core::{spawn, spawn_fifo};
+pub use rayon_core::{yield_local, yield_now, Yield};
/// We need to transmit raw pointers across threads. It is possible to do this
/// without any unsafe code by converting pointers to usize or to AtomicPtr<T>
diff --git a/src/vec.rs b/src/vec.rs
index c804b0f..7892f53 100644
--- a/src/vec.rs
+++ b/src/vec.rs
@@ -225,8 +225,9 @@ impl<'data, T: 'data + Send> Producer for DrainProducer<'data, T> {
impl<'data, T: 'data + Send> Drop for DrainProducer<'data, T> {
fn drop(&mut self) {
- // use `Drop for [T]`
- unsafe { ptr::drop_in_place(self.slice) };
+ // extract the slice so we can use `Drop for [T]`
+ let slice_ptr: *mut [T] = mem::take::<&'data mut [T]>(&mut self.slice);
+ unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}
@@ -276,7 +277,7 @@ impl<'data, T: 'data> iter::FusedIterator for SliceDrain<'data, T> {}
impl<'data, T: 'data> Drop for SliceDrain<'data, T> {
fn drop(&mut self) {
// extract the iterator so we can use `Drop for [T]`
- let iter = mem::replace(&mut self.iter, [].iter_mut());
- unsafe { ptr::drop_in_place(iter.into_slice()) };
+ let slice_ptr: *mut [T] = mem::replace(&mut self.iter, [].iter_mut()).into_slice();
+ unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}
diff --git a/tests/clones.rs b/tests/clones.rs
index 2b78f09..0d6c864 100644
--- a/tests/clones.rs
+++ b/tests/clones.rs
@@ -10,6 +10,13 @@ where
assert_eq!(a, b);
}
+fn check_count<I>(iter: I)
+where
+ I: ParallelIterator + Clone,
+{
+ assert_eq!(iter.clone().count(), iter.count());
+}
+
#[test]
fn clone_binary_heap() {
use std::collections::BinaryHeap;
@@ -150,8 +157,10 @@ fn clone_adaptors() {
check(v.par_iter().panic_fuse());
check(v.par_iter().positions(|_| true));
check(v.par_iter().rev());
- check(v.par_iter().skip(1));
- check(v.par_iter().take(1));
+ check(v.par_iter().skip(42));
+ check(v.par_iter().skip_any_while(|_| false));
+ check(v.par_iter().take(42));
+ check(v.par_iter().take_any_while(|_| true));
check(v.par_iter().cloned().while_some());
check(v.par_iter().with_max_len(1));
check(v.par_iter().with_min_len(1));
@@ -161,6 +170,13 @@ fn clone_adaptors() {
}
#[test]
+fn clone_counted_adaptors() {
+ let v: Vec<_> = (0..1000).collect();
+ check_count(v.par_iter().skip_any(42));
+ check_count(v.par_iter().take_any(42));
+}
+
+#[test]
fn clone_empty() {
check(rayon::iter::empty::<i32>());
}
diff --git a/tests/collect.rs b/tests/collect.rs
index 48b80f6..bfb080c 100644
--- a/tests/collect.rs
+++ b/tests/collect.rs
@@ -6,6 +6,7 @@ use std::sync::atomic::Ordering;
use std::sync::Mutex;
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn collect_drop_on_unwind() {
struct Recorddrop<'a>(i64, &'a Mutex<Vec<i64>>);
@@ -61,6 +62,7 @@ fn collect_drop_on_unwind() {
}
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn collect_drop_on_unwind_zst() {
static INSERTS: AtomicUsize = AtomicUsize::new(0);
static DROPS: AtomicUsize = AtomicUsize::new(0);
diff --git a/tests/cross-pool.rs b/tests/cross-pool.rs
index f0a2128..835e2e2 100644
--- a/tests/cross-pool.rs
+++ b/tests/cross-pool.rs
@@ -2,6 +2,7 @@ use rayon::prelude::*;
use rayon::ThreadPoolBuilder;
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn cross_pool_busy() {
let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
diff --git a/tests/debug.rs b/tests/debug.rs
index 1cbf4e6..14f3791 100644
--- a/tests/debug.rs
+++ b/tests/debug.rs
@@ -172,7 +172,11 @@ fn debug_adaptors() {
check(v.par_iter().positions(|_| true));
check(v.par_iter().rev());
check(v.par_iter().skip(1));
+ check(v.par_iter().skip_any(1));
+ check(v.par_iter().skip_any_while(|_| false));
check(v.par_iter().take(1));
+ check(v.par_iter().take_any(1));
+ check(v.par_iter().take_any_while(|_| true));
check(v.par_iter().map(Some).while_some());
check(v.par_iter().with_max_len(1));
check(v.par_iter().with_min_len(1));
diff --git a/tests/iter_panic.rs b/tests/iter_panic.rs
index 37d4d6a..c62a8db 100644
--- a/tests/iter_panic.rs
+++ b/tests/iter_panic.rs
@@ -20,6 +20,7 @@ fn iter_panic() {
}
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn iter_panic_fuse() {
// We only use a single thread in order to make the behavior
// of 'panic_fuse' deterministic
diff --git a/tests/named-threads.rs b/tests/named-threads.rs
index fd1b0be..dadb37b 100644
--- a/tests/named-threads.rs
+++ b/tests/named-threads.rs
@@ -4,6 +4,7 @@ use rayon::prelude::*;
use rayon::*;
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn named_threads() {
ThreadPoolBuilder::new()
.thread_name(|i| format!("hello-name-test-{}", i))
diff --git a/tests/octillion.rs b/tests/octillion.rs
index cff2b11..1af9ad8 100644
--- a/tests/octillion.rs
+++ b/tests/octillion.rs
@@ -68,7 +68,14 @@ fn two_threads<F: Send + FnOnce() -> R, R: Send>(f: F) -> R {
}
#[test]
-#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+#[cfg_attr(
+ any(
+ not(target_pointer_width = "64"),
+ target_os = "emscripten",
+ target_family = "wasm"
+ ),
+ ignore
+)]
fn find_last_octillion() {
// It would be nice if `find_last` could prioritize the later splits,
// basically flipping the `join` args, without needing indexed `rev`.
@@ -78,32 +85,49 @@ fn find_last_octillion() {
}
#[test]
-#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+#[cfg_attr(
+ any(
+ not(target_pointer_width = "64"),
+ target_os = "emscripten",
+ target_family = "wasm"
+ ),
+ ignore
+)]
fn find_last_octillion_inclusive() {
let x = two_threads(|| octillion_inclusive().find_last(|_| true));
assert_eq!(x, Some(OCTILLION));
}
#[test]
-#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+#[cfg_attr(
+ any(
+ not(target_pointer_width = "64"),
+ target_os = "emscripten",
+ target_family = "wasm"
+ ),
+ ignore
+)]
fn find_last_octillion_flat() {
let x = two_threads(|| octillion_flat().find_last(|_| true));
assert_eq!(x, Some(OCTILLION - 1));
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn find_any_octillion() {
let x = two_threads(|| octillion().find_any(|x| *x > OCTILLION / 2));
assert!(x.is_some());
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn find_any_octillion_flat() {
let x = two_threads(|| octillion_flat().find_any(|x| *x > OCTILLION / 2));
assert!(x.is_some());
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn filter_find_any_octillion() {
let x = two_threads(|| {
octillion()
@@ -114,6 +138,7 @@ fn filter_find_any_octillion() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn filter_find_any_octillion_flat() {
let x = two_threads(|| {
octillion_flat()
@@ -124,6 +149,7 @@ fn filter_find_any_octillion_flat() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn fold_find_any_octillion_flat() {
let x = two_threads(|| octillion_flat().fold(|| (), |_, _| ()).find_any(|_| true));
assert!(x.is_some());
diff --git a/tests/par_bridge_recursion.rs b/tests/par_bridge_recursion.rs
index 4def0a9..3a48ef1 100644
--- a/tests/par_bridge_recursion.rs
+++ b/tests/par_bridge_recursion.rs
@@ -4,6 +4,7 @@ use std::iter::once_with;
const N: usize = 100_000;
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn par_bridge_recursion() {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(10)
diff --git a/tests/sort-panic-safe.rs b/tests/sort-panic-safe.rs
index 00a9731..95ef88d 100644
--- a/tests/sort-panic-safe.rs
+++ b/tests/sort-panic-safe.rs
@@ -8,11 +8,12 @@ use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::thread;
-static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+const ZERO: AtomicUsize = AtomicUsize::new(0);
+const LEN: usize = 20_000;
-lazy_static::lazy_static! {
- static ref DROP_COUNTS: Vec<AtomicUsize> = (0..20_000).map(|_| AtomicUsize::new(0)).collect();
-}
+static VERSIONS: AtomicUsize = ZERO;
+
+static DROP_COUNTS: [AtomicUsize; LEN] = [ZERO; LEN];
#[derive(Clone, Eq)]
struct DropCounter {
@@ -117,6 +118,7 @@ macro_rules! test {
thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn sort_panic_safe() {
let prev = panic::take_hook();
panic::set_hook(Box::new(move |info| {