aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wailes <chriswailes@google.com>2023-01-25 20:55:09 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-01-25 20:55:09 +0000
commit6d21583f4739e5708e505c0eda72d382149b77f3 (patch)
tree5b03406c5682a43b22a65a82e3251ce28b307456
parent7bcf6c4202362c863d314be4a9fa9dce3d62db42 (diff)
parent110179d64c3d5e7274414b09fc9e8eeaafa0847e (diff)
downloadatomic-6d21583f4739e5708e505c0eda72d382149b77f3.tar.gz
Initial checkin of atomic crate version 0.5.1 am: a620c92011 am: ec06baafd6 am: 110179d64c
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/atomic/+/2399163 Change-Id: Ie623453d443610c09d29c0960b7ab878ec253a4b Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--Android.bp34
-rw-r--r--Cargo.toml29
-rw-r--r--Cargo.toml.orig19
l---------LICENSE1
-rw-r--r--LICENSE-APACHE201
-rw-r--r--LICENSE-MIT25
-rw-r--r--METADATA20
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--OWNERS1
-rw-r--r--README.md42
-rw-r--r--build.rs16
-rw-r--r--cargo2android.json10
-rw-r--r--src/fallback.rs196
-rw-r--r--src/lib.rs711
-rw-r--r--src/ops.rs296
15 files changed, 1601 insertions, 0 deletions
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..7abf8de
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,34 @@
+// This file is generated by cargo2android.py --config cargo2android.json.
+// Do not modify this file as changes will be overridden on upgrade.
+
+
+
+rust_library {
+ name: "libatomic_rust",
+ stem: "libatomic",
+ // has rustc warnings
+ host_supported: true,
+ crate_name: "atomic",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.5.1",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
+ features: [
+ "fallback",
+ "std",
+ ],
+ cfgs: [
+ "has_atomic_i16",
+ "has_atomic_i32",
+ "has_atomic_i64",
+ "has_atomic_i8",
+ "has_atomic_u16",
+ "has_atomic_u32",
+ "has_atomic_u64",
+ "has_atomic_u8",
+ ],
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.virt",
+ ],
+}
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..fedd21b
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,29 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "atomic"
+version = "0.5.1"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Generic Atomic<T> wrapper type"
+documentation = "https://amanieu.github.io/atomic-rs/atomic/index.html"
+readme = "README.md"
+keywords = ["atomic", "no_std"]
+license = "Apache-2.0/MIT"
+repository = "https://github.com/Amanieu/atomic-rs"
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["fallback"]
+fallback = []
+std = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..80b613b
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,19 @@
+[package]
+name = "atomic"
+version = "0.5.1"
+edition = "2018"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Generic Atomic<T> wrapper type"
+documentation = "https://amanieu.github.io/atomic-rs/atomic/index.html"
+license = "Apache-2.0/MIT"
+repository = "https://github.com/Amanieu/atomic-rs"
+readme = "README.md"
+keywords = ["atomic", "no_std"]
+
+[features]
+default = ["fallback"]
+std = []
+fallback = []
+
+[build-dependencies]
+autocfg = "1"
diff --git a/LICENSE b/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE \ No newline at end of file
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..40b8817
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..ada5837
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "atomic"
+description: "Generic Atomic<T> wrapper type"
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/atomic"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/atomic/atomic-0.5.1.crate"
+ }
+ version: "0.5.1"
+ # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2023
+ month: 1
+ day: 17
+ }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..45dc4dd
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:master:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..ea2c0cb
--- /dev/null
+++ b/README.md
@@ -0,0 +1,42 @@
+Generic `Atomic<T>` for Rust
+============================
+
+[![Build Status](https://travis-ci.org/Amanieu/atomic-rs.svg?branch=master)](https://travis-ci.org/Amanieu/atomic-rs) [![Crates.io](https://img.shields.io/crates/v/atomic.svg)](https://crates.io/crates/atomic)
+
+A Rust library which provides a generic `Atomic<T>` type for all `T: Copy` types, unlike the standard library which only provides a few fixed atomic types (`AtomicBool`, `AtomicIsize`, `AtomicUsize`, `AtomicPtr`).
+
+This library will use native atomic instructions if possible, and will otherwise fall back to a lock-based mechanism. You can use the `Atomic::<T>::is_lock_free()` function to check whether native atomic operations are supported for a given type. Note that a type must have a power-of-2 size and alignment in order to be used by native atomic instructions.
+
+This crate uses `#![no_std]` and only depends on libcore.
+
+[Documentation](https://amanieu.github.io/atomic-rs/atomic/index.html)
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+atomic = "0.5"
+```
+
+and this to your crate root:
+
+```rust
+extern crate atomic;
+```
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/build.rs b/build.rs
new file mode 100644
index 0000000..4cf4274
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,16 @@
+fn main() {
+ let ac = autocfg::new();
+
+ for root in &["core", "std"] {
+ for size in &[8, 16, 32, 64, 128] {
+ ac.emit_expression_cfg(
+ &format!("{}::sync::atomic::AtomicU{}::compare_exchange", root, size),
+ &format!("has_atomic_u{}", size),
+ );
+ ac.emit_expression_cfg(
+ &format!("{}::sync::atomic::AtomicI{}::compare_exchange", root, size),
+ &format!("has_atomic_i{}", size),
+ );
+ }
+ }
+}
diff --git a/cargo2android.json b/cargo2android.json
new file mode 100644
index 0000000..5871db7
--- /dev/null
+++ b/cargo2android.json
@@ -0,0 +1,10 @@
+{
+ "apex-available": [
+ "//apex_available:platform",
+ "com.android.virt"
+ ],
+ "dependencies": true,
+ "device": true,
+ "features": "fallback,std",
+ "run": true
+}
diff --git a/src/fallback.rs b/src/fallback.rs
new file mode 100644
index 0000000..8b7e861
--- /dev/null
+++ b/src/fallback.rs
@@ -0,0 +1,196 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::ptr;
+use core::slice;
+use core::sync::atomic::{self, AtomicUsize, Ordering};
+
+// We use an AtomicUsize instead of an AtomicBool because it performs better
+// on architectures that don't have byte-sized atomics.
+//
+// We give each spinlock its own cache line to avoid false sharing.
+#[repr(align(64))]
+struct SpinLock(AtomicUsize);
+
+impl SpinLock {
+ fn lock(&self) {
+ while self
+ .0
+ .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ while self.0.load(Ordering::Relaxed) != 0 {
+ atomic::spin_loop_hint();
+ }
+ }
+ }
+
+ fn unlock(&self) {
+ self.0.store(0, Ordering::Release);
+ }
+}
+
+// A big array of spinlocks which we use to guard atomic accesses. A spinlock is
+// chosen based on a hash of the address of the atomic object, which helps to
+// reduce contention compared to a single global lock.
+macro_rules! array {
+ (@accum (0, $($_es:expr),*) -> ($($body:tt)*))
+ => {array!(@as_expr [$($body)*])};
+ (@accum (1, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};
+ (@accum (2, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};
+ (@accum (4, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (8, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (16, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (32, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (64, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};
+
+ (@as_expr $e:expr) => {$e};
+
+ [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };
+}
+static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];
+
+// Spinlock pointer hashing function from compiler-rt
+#[inline]
+fn lock_for_addr(addr: usize) -> &'static SpinLock {
+ // Disregard the lowest 4 bits. We want all values that may be part of the
+ // same memory operation to hash to the same value and therefore use the same
+ // lock.
+ let mut hash = addr >> 4;
+ // Use the next bits as the basis for the hash
+ let low = hash & (SPINLOCKS.len() - 1);
+ // Now use the high(er) set of bits to perturb the hash, so that we don't
+ // get collisions from atomic fields in a single object
+ hash >>= 16;
+ hash ^= low;
+ // Return a pointer to the lock to use
+ &SPINLOCKS[hash & (SPINLOCKS.len() - 1)]
+}
+
+#[inline]
+fn lock(addr: usize) -> LockGuard {
+ let lock = lock_for_addr(addr);
+ lock.lock();
+ LockGuard(lock)
+}
+
+struct LockGuard(&'static SpinLock);
+impl Drop for LockGuard {
+ #[inline]
+ fn drop(&mut self) {
+ self.0.unlock();
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T) -> T {
+ let _l = lock(dst as usize);
+ ptr::read(dst)
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T) {
+ let _l = lock(dst as usize);
+ ptr::write(dst, val);
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ ptr::replace(dst, val)
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(dst: *mut T, current: T, new: T) -> Result<T, T> {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ // compare_exchange compares with memcmp instead of Eq
+ let a = slice::from_raw_parts(&result as *const _ as *const u8, mem::size_of_val(&result));
+ let b = slice::from_raw_parts(
+ &current as *const _ as *const u8,
+ mem::size_of_val(&current),
+ );
+ if a == b {
+ ptr::write(dst, new);
+ Ok(result)
+ } else {
+ Err(result)
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result & val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result | val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result ^ val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::min(result, val));
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::max(result, val));
+ result
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..09ad879
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,711 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! Generic `Atomic<T>` wrapper type
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent types.
+//!
+//! This library defines a generic atomic wrapper type `Atomic<T>` for all
+//! `T: Copy` types.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [LLVM atomic orderings][1].
+//!
+//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
+//!
+//! Atomic variables are safe to share between threads (they implement `Sync`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the `const fn` constructors. Atomic statics are often used for lazy global
+//! initialization.
+
+#![warn(missing_docs)]
+#![warn(rust_2018_idioms)]
+#![no_std]
+
+#[cfg(any(test, feature = "std"))]
+#[macro_use]
+extern crate std;
+
+// Re-export some useful definitions from libcore
+pub use core::sync::atomic::{fence, Ordering};
+
+use core::cell::UnsafeCell;
+use core::fmt;
+
+#[cfg(feature = "std")]
+use std::panic::RefUnwindSafe;
+
+#[cfg(feature = "fallback")]
+mod fallback;
+mod ops;
+
+/// A generic atomic wrapper type which allows an object to be safely shared
+/// between threads.
+#[repr(transparent)]
+pub struct Atomic<T> {
+ v: UnsafeCell<T>,
+}
+
+// Atomic<T> is only Sync if T is Send
+unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
+
+// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
+//
+// This is trivially correct for native lock-free atomic types. For those whose
+// atomicity is emulated using a spinlock, it is still correct because the
+// `Atomic` API does not allow doing any panic-inducing operation after writing
+// to the target object.
+#[cfg(feature = "std")]
+impl<T: Copy + RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
+
+impl<T: Copy + Default> Default for Atomic<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+impl<T: Copy + fmt::Debug> fmt::Debug for Atomic<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Atomic")
+ .field(&self.load(Ordering::SeqCst))
+ .finish()
+ }
+}
+
+impl<T> Atomic<T> {
+ /// Creates a new `Atomic`.
+ #[inline]
+ pub const fn new(v: T) -> Atomic<T> {
+ Atomic {
+ v: UnsafeCell::new(v),
+ }
+ }
+
+ /// Checks if `Atomic` objects of this type are lock-free.
+ ///
+ /// If an `Atomic` is not lock-free then it may be implemented using locks
+ /// internally, which makes it unsuitable for some situations (such as
+ /// communicating with a signal handler).
+ #[inline]
+ pub const fn is_lock_free() -> bool {
+ ops::atomic_is_lock_free::<T>()
+ }
+}
+
+impl<T: Copy> Atomic<T> {
+ /// Returns a mutable reference to the underlying type.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.v.get() }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ self.v.into_inner()
+ }
+
+ /// Loads a value from the `Atomic`.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ #[inline]
+ pub fn load(&self, order: Ordering) -> T {
+ unsafe { ops::atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the `Atomic`.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ pub fn store(&self, val: T, order: Ordering) {
+ unsafe {
+ ops::atomic_store(self.v.get(), val, order);
+ }
+ }
+
+ /// Stores a value into the `Atomic`, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ #[inline]
+ pub fn swap(&self, val: T, order: Ordering) -> T {
+ unsafe { ops::atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was
+ /// written and containing the previous value. On success this value is
+ /// guaranteed to be equal to `new`.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Release` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ #[inline]
+ pub fn compare_exchange(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail
+ /// even when the comparison succeeds, which can result in more efficient
+ /// code on some platforms. The return value is a result indicating whether
+ /// the new value was written and containing the previous value.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Release` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ /// success ordering.
+ #[inline]
+ pub fn compare_exchange_weak(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
+ /// `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been changed from other threads in
+ /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
+ /// only once to the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
+ /// The first describes the required ordering for when the operation finally succeeds while the second
+ /// describes the required ordering for loads. These correspond to the success and failure orderings of
+ /// [`compare_exchange`] respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
+ /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+ /// and must be equivalent to or weaker than the success ordering.
+ ///
+ /// [`compare_exchange`]: #method.compare_exchange
+ /// [`Ordering`]: enum.Ordering.html
+ /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
+ /// [`Release`]: enum.Ordering.html#variant.Release
+ /// [`Acquire`]: enum.Ordering.html#variant.Acquire
+ /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use atomic::{Atomic, Ordering};
+ ///
+ /// let x = Atomic::new(7);
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
+ /// assert_eq!(x.load(Ordering::SeqCst), 9);
+ /// ```
+ #[inline]
+ pub fn fetch_update<F>(
+ &self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F,
+ ) -> Result<T, T>
+ where
+ F: FnMut(T) -> Option<T>,
+ {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev,
+ }
+ }
+ Err(prev)
+ }
+}
+
+impl Atomic<bool> {
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+}
+
+macro_rules! atomic_ops_common {
+ ($($t:ty)*) => ($(
+ impl Atomic<$t> {
+ /// Add to the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract from the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+ }
+ )*);
+}
+macro_rules! atomic_ops_signed {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_min(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_max(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+macro_rules! atomic_ops_unsigned {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umin(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umax(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+atomic_ops_signed! { i8 i16 i32 i64 isize i128 }
+atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }
+
+#[cfg(test)]
+mod tests {
+ use super::{Atomic, Ordering::*};
+ use core::mem;
+
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Foo(u8, u8);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Bar(u64, u64);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Quux(u32);
+
+ #[test]
+ fn atomic_bool() {
+ let a = Atomic::new(false);
+ assert_eq!(Atomic::<bool>::is_lock_free(), cfg!(has_atomic_u8),);
+ assert_eq!(format!("{:?}", a), "Atomic(false)");
+ assert_eq!(a.load(SeqCst), false);
+ a.store(true, SeqCst);
+ assert_eq!(a.swap(false, SeqCst), true);
+ assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
+ assert_eq!(a.fetch_and(false, SeqCst), true);
+ assert_eq!(a.fetch_or(true, SeqCst), false);
+ assert_eq!(a.fetch_xor(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), true);
+ }
+
+ #[test]
+ fn atomic_i8() {
+ let a = Atomic::new(0i8);
+ assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(has_atomic_u8));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ // Make sure overflows are handled correctly
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), -74);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i16() {
+ let a = Atomic::new(0i16);
+ assert_eq!(Atomic::<i16>::is_lock_free(), cfg!(has_atomic_u16));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i32() {
+ let a = Atomic::new(0i32);
+ assert_eq!(Atomic::<i32>::is_lock_free(), cfg!(has_atomic_u32));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i64() {
+ let a = Atomic::new(0i64);
+ assert_eq!(
+ Atomic::<i64>::is_lock_free(),
+ cfg!(has_atomic_u64) && mem::align_of::<i64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i128() {
+ let a = Atomic::new(0i128);
+ assert_eq!(Atomic::<i128>::is_lock_free(), cfg!(has_atomic_u128));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_isize() {
+ let a = Atomic::new(0isize);
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u8() {
+ let a = Atomic::new(0u8);
+ assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(has_atomic_u8));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u16() {
+ let a = Atomic::new(0u16);
+ assert_eq!(Atomic::<u16>::is_lock_free(), cfg!(has_atomic_u16));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u32() {
+ let a = Atomic::new(0u32);
+ assert_eq!(Atomic::<u32>::is_lock_free(), cfg!(has_atomic_u32));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u64() {
+ let a = Atomic::new(0u64);
+ assert_eq!(
+ Atomic::<u64>::is_lock_free(),
+ cfg!(has_atomic_u64) && mem::align_of::<u64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u128() {
+ let a = Atomic::new(0u128);
+ assert_eq!(Atomic::<u128>::is_lock_free(), cfg!(has_atomic_u128));
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_usize() {
+ let a = Atomic::new(0usize);
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_foo() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Foo>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
+ assert_eq!(a.load(SeqCst), Foo(0, 0));
+ a.store(Foo(1, 1), SeqCst);
+ assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
+ assert_eq!(
+ a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
+ Err(Foo(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
+ Ok(Foo(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Foo(3, 3));
+ }
+
+ #[test]
+ fn atomic_bar() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Bar>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
+ assert_eq!(a.load(SeqCst), Bar(0, 0));
+ a.store(Bar(1, 1), SeqCst);
+ assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
+ assert_eq!(
+ a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
+ Err(Bar(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
+ Ok(Bar(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Bar(3, 3));
+ }
+
+ #[test]
+ fn atomic_quxx() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Quux>::is_lock_free(), cfg!(has_atomic_u32));
+ assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
+ assert_eq!(a.load(SeqCst), Quux(0));
+ a.store(Quux(1), SeqCst);
+ assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
+ assert_eq!(
+ a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
+ Err(Quux(2))
+ );
+ assert_eq!(
+ a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
+ Ok(Quux(2))
+ );
+ assert_eq!(a.load(SeqCst), Quux(3));
+ }
+}
diff --git a/src/ops.rs b/src/ops.rs
new file mode 100644
index 0000000..808335c
--- /dev/null
+++ b/src/ops.rs
@@ -0,0 +1,296 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+#[cfg(feature = "fallback")]
+use crate::fallback;
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::sync::atomic::Ordering;
+
+macro_rules! match_atomic {
+ ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
+ match mem::size_of::<$type>() {
+ #[cfg(has_atomic_u8)]
+ 1 if mem::align_of::<$type>() >= 1 => {
+ type $atomic = core::sync::atomic::AtomicU8;
+
+ $impl
+ }
+ #[cfg(has_atomic_u16)]
+ 2 if mem::align_of::<$type>() >= 2 => {
+ type $atomic = core::sync::atomic::AtomicU16;
+
+ $impl
+ }
+ #[cfg(has_atomic_u32)]
+ 4 if mem::align_of::<$type>() >= 4 => {
+ type $atomic = core::sync::atomic::AtomicU32;
+
+ $impl
+ }
+ #[cfg(has_atomic_u64)]
+ 8 if mem::align_of::<$type>() >= 8 => {
+ type $atomic = core::sync::atomic::AtomicU64;
+
+ $impl
+ }
+ #[cfg(has_atomic_u128)]
+ 16 if mem::align_of::<$type>() >= 16 => {
+ type $atomic = core::sync::atomic::AtomicU128;
+
+ $impl
+ }
+ #[cfg(feature = "fallback")]
+ _ => $fallback_impl,
+ #[cfg(not(feature = "fallback"))]
+ _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
+ }
+ };
+}
+
+macro_rules! match_signed_atomic {
+ ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
+ match mem::size_of::<$type>() {
+ #[cfg(has_atomic_i8)]
+ 1 if mem::align_of::<$type>() >= 1 => {
+ type $atomic = core::sync::atomic::AtomicI8;
+
+ $impl
+ }
+ #[cfg(has_atomic_i16)]
+ 2 if mem::align_of::<$type>() >= 2 => {
+ type $atomic = core::sync::atomic::AtomicI16;
+
+ $impl
+ }
+ #[cfg(has_atomic_i32)]
+ 4 if mem::align_of::<$type>() >= 4 => {
+ type $atomic = core::sync::atomic::AtomicI32;
+
+ $impl
+ }
+ #[cfg(has_atomic_i64)]
+ 8 if mem::align_of::<$type>() >= 8 => {
+ type $atomic = core::sync::atomic::AtomicI64;
+
+ $impl
+ }
+ #[cfg(has_atomic_u128)]
+ 16 if mem::align_of::<$type>() >= 16 => {
+ type $atomic = core::sync::atomic::AtomicI128;
+
+ $impl
+ }
+ #[cfg(feature = "fallback")]
+ _ => $fallback_impl,
+ #[cfg(not(feature = "fallback"))]
+ _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
+ }
+ };
+}
+
+#[inline]
+pub const fn atomic_is_lock_free<T>() -> bool {
+ let size = mem::size_of::<T>();
+ let align = mem::align_of::<T>();
+
+ (cfg!(has_atomic_u8) & (size == 1) & (align >= 1))
+ | (cfg!(has_atomic_u16) & (size == 2) & (align >= 2))
+ | (cfg!(has_atomic_u32) & (size == 4) & (align >= 4))
+ | (cfg!(has_atomic_u64) & (size == 8) & (align >= 8))
+ | (cfg!(has_atomic_u128) & (size == 16) & (align >= 16))
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T, order: Ordering) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).load(order)),
+ fallback::atomic_load(dst)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
+ match_atomic!(
+ T,
+ A,
+ (*(dst as *const A)).store(mem::transmute_copy(&val), order),
+ fallback::atomic_store(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)),
+ fallback::atomic_swap(dst, val)
+ )
+}
+
+#[inline]
+unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
+ match r {
+ Ok(x) => Ok(mem::transmute_copy(&x)),
+ Err(x) => Err(mem::transmute_copy(&x)),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match_atomic!(
+ T,
+ A,
+ map_result((*(dst as *const A)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ )),
+ fallback::atomic_compare_exchange(dst, current, new)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange_weak<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match_atomic!(
+ T,
+ A,
+ map_result((*(dst as *const A)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ )),
+ fallback::atomic_compare_exchange(dst, current, new)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),),
+ fallback::atomic_add(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),),
+ fallback::atomic_sub(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),),
+ fallback::atomic_and(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),),
+ fallback::atomic_or(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),),
+ fallback::atomic_xor(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ match_signed_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
+ fallback::atomic_min(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ match_signed_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
+ fallback::atomic_max(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
+ fallback::atomic_min(dst, val)
+ )
+}
+
+#[inline]
+pub unsafe fn atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ match_atomic!(
+ T,
+ A,
+ mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
+ fallback::atomic_max(dst, val)
+ )
+}