aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2024-01-31 16:20:19 +0100
committerJeff Vander Stoep <jeffv@google.com>2024-01-31 16:20:20 +0100
commiteb498abc8864e5fbcd1ae9b7e2e635be40b07439 (patch)
treeb1d1e33a5481ce3514ad0c1223133157acc85d74
parent3446d945fe88e43b0524d439a60edd75961afc9c (diff)
downloadbytemuck-eb498abc8864e5fbcd1ae9b7e2e635be40b07439.tar.gz
Upgrade bytemuck to 1.14.1
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update external/rust/crates/bytemuck For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md Test: TreeHugger Change-Id: Ic036007e2ee607a0100e422b07edd69266e86c66
-rw-r--r--.cargo/config.toml4
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--.github/workflows/rust.yml4
-rw-r--r--Android.bp16
-rw-r--r--Cargo.toml8
-rw-r--r--Cargo.toml.orig28
-rw-r--r--METADATA26
-rw-r--r--changelog.md18
-rw-r--r--src/allocation.rs117
-rw-r--r--src/anybitpattern.rs1
-rw-r--r--src/checked.rs23
-rw-r--r--src/internal.rs32
-rw-r--r--src/lib.rs150
-rw-r--r--src/must.rs203
-rw-r--r--src/pod.rs109
-rw-r--r--src/zeroable.rs96
-rw-r--r--src/zeroable_in_option.rs1
-rw-r--r--tests/cast_slice_tests.rs3
-rw-r--r--tests/checked_tests.rs3
-rw-r--r--tests/std_tests.rs60
20 files changed, 740 insertions, 164 deletions
diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 0000000..71e0936
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,4 @@
+[alias]
+
+# The list of features should be the same as the one under `[package.metadata.docs.rs]`
+nightly_docs = "doc --no-deps -F nightly_docs,derive,extern_crate_alloc,extern_crate_std,zeroable_maybe_uninit,zeroable_atomics,min_const_generics,wasm_simd,must_cast"
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 499b132..3a8d028 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "b19f8abfe34d6ab660e748086874c01e0212b738"
+ "sha1": "1e1a22e5eac57e5b473e0c427dc187008228ad46"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index 0807319..0fb1358 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -46,8 +46,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- # note: the mips targets are here so that we have big-endian coverage (both 32bit and 64bit)
- target: [i686-unknown-linux-gnu, mips-unknown-linux-gnu, mips64-unknown-linux-gnuabi64]
+ # we once had mips runners for Big-endian coverage but those got demoted to tier 3.
+ target: [i686-unknown-linux-gnu]
steps:
- uses: hecrj/setup-rust-action@v1
with:
diff --git a/Android.bp b/Android.bp
index 47ab7d8..7943b6d 100644
--- a/Android.bp
+++ b/Android.bp
@@ -6,7 +6,7 @@ rust_test {
host_supported: true,
crate_name: "array_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/array_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -27,7 +27,7 @@ rust_test {
host_supported: true,
crate_name: "cast_slice_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/cast_slice_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -48,7 +48,7 @@ rust_test {
host_supported: true,
crate_name: "checked_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/checked_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -69,7 +69,7 @@ rust_test {
host_supported: true,
crate_name: "doc_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/doc_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -90,7 +90,7 @@ rust_test {
host_supported: true,
crate_name: "offset_of_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/offset_of_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -111,7 +111,7 @@ rust_test {
host_supported: true,
crate_name: "std_tests",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/std_tests.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -132,7 +132,7 @@ rust_test {
host_supported: true,
crate_name: "transparent",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["tests/transparent.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -153,7 +153,7 @@ rust_library {
host_supported: true,
crate_name: "bytemuck",
cargo_env_compat: true,
- cargo_pkg_version: "1.13.1",
+ cargo_pkg_version: "1.14.1",
srcs: ["src/lib.rs"],
edition: "2018",
features: [
diff --git a/Cargo.toml b/Cargo.toml
index b7d55e8..826afad 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "bytemuck"
-version = "1.13.1"
+version = "1.14.1"
authors = ["Lokathor <zefria@gmail.com>"]
exclude = ["/pedantic.bat"]
description = "A crate for mucking around with piles of bytes."
@@ -31,6 +31,7 @@ repository = "https://github.com/Lokathor/bytemuck"
[package.metadata.docs.rs]
features = [
+ "nightly_docs",
"derive",
"extern_crate_alloc",
"extern_crate_std",
@@ -38,6 +39,7 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
+ "must_cast",
]
[package.metadata.playground]
@@ -49,6 +51,7 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
+ "must_cast",
]
[dependencies.bytemuck_derive]
@@ -57,10 +60,13 @@ optional = true
[features]
aarch64_simd = []
+align_offset = []
derive = ["bytemuck_derive"]
extern_crate_alloc = []
extern_crate_std = ["extern_crate_alloc"]
min_const_generics = []
+must_cast = []
+nightly_docs = []
nightly_portable_simd = []
nightly_stdsimd = []
unsound_ptr_pod_impl = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 9d6be11..fdd9e06 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,7 +1,7 @@
[package]
name = "bytemuck"
description = "A crate for mucking around with piles of bytes."
-version = "1.13.1"
+version = "1.14.1"
authors = ["Lokathor <zefria@gmail.com>"]
repository = "https://github.com/Lokathor/bytemuck"
readme = "README.md"
@@ -18,23 +18,39 @@ extern_crate_alloc = []
extern_crate_std = ["extern_crate_alloc"]
zeroable_maybe_uninit = []
zeroable_atomics = []
-min_const_generics = []
-wasm_simd = [] # Until >= 1.54.0 is MSRV this is an off-by-default feature.
-aarch64_simd = [] # Until >= 1.59.0 is MSRV this is an off-by-default feature.
-# Do not use if you can avoid it, because this is unsound.
+# All MSRV notes below are GUIDELINES and future versions may require even more
+# MSRV on any feature.
+
+# MSRV 1.36: Use `align_offset` method instead of casting to `usize` to check
+# alignment of pointers, this *may* improve codegen in some cases (but it has
+# never been formally benchmarked!)
+align_offset = []
+
+min_const_generics = [] # MSRV 1.51: support arrays via min_const_generics
+
+wasm_simd = [] # MSRV 1.54.0: support wasm simd types
+aarch64_simd = [] # MSRV 1.59.0: support aarch64 simd types
+
+must_cast = [] # MSRV 1.57.0: support the `must` module.
+
+# Do not use if you can avoid it, because this is **unsound**!!!!
unsound_ptr_pod_impl = []
# NOT SEMVER SUPPORTED! TEMPORARY ONLY!
nightly_portable_simd = []
nightly_stdsimd = []
+# Improved documentation using the nightly toolchain
+nightly_docs = []
+
[dependencies]
bytemuck_derive = { version = "1.4", path = "derive", optional = true }
[package.metadata.docs.rs]
# Note(Lokathor): Don't use all-features or it would use `unsound_ptr_pod_impl` too.
features = [
+ "nightly_docs",
"derive",
"extern_crate_alloc",
"extern_crate_std",
@@ -42,6 +58,7 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
+ "must_cast",
]
[package.metadata.playground]
@@ -54,4 +71,5 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
+ "must_cast",
]
diff --git a/METADATA b/METADATA
index bd36741..15a6d60 100644
--- a/METADATA
+++ b/METADATA
@@ -1,20 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/bytemuck
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
name: "bytemuck"
description: "A crate for mucking around with piles of bytes."
third_party {
- url {
- type: HOMEPAGE
- value: "https://crates.io/crates/bytemuck"
- }
- url {
- type: ARCHIVE
- value: "https://static.crates.io/crates/bytemuck/bytemuck-1.13.1.crate"
- }
- version: "1.13.1"
- # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 7
- day: 10
+ year: 2024
+ month: 1
+ day: 31
+ }
+ homepage: "https://crates.io/crates/bytemuck"
+ identifier {
+ type: "Archive"
+ value: "https://static.crates.io/crates/bytemuck/bytemuck-1.14.1.crate"
+ version: "1.14.1"
}
}
diff --git a/changelog.md b/changelog.md
index 0dff0c2..e890764 100644
--- a/changelog.md
+++ b/changelog.md
@@ -1,5 +1,23 @@
# `bytemuck` changelog
+## 1.14.1
+
+* docs clarifications.
+
+## 1.14
+
+* `write_zeroes` and `fill_zeroes` functions: Writes (to one) or fills (a slice)
+ zero bytes to all bytes covered by the provided reference. If your type has
+ padding, this will even zero out the padding bytes.
+* `align_offset` feature: causes pointer alignment checks to use the
+ `align_offset` pointer method rather than as-casting the pointer to `usize`.
+ This *may* improve codegen, if the compiler would have otherwise thought that
+ the pointer address escaped. No formal benchmarks have been done either way.
+* `must_cast` feature: Adds `must_*` family of functions. These functions will
+ fail to compile if the cast requested can't be statically known to succeed.
+ The error messages can be kinda bad when this happens, but eliminating the
+ possibility of a runtime error might be worth it to you.
+
## 1.13.1
* Remove the requirement for the *source* data type to be `AnyBitPattern` on
diff --git a/src/allocation.rs b/src/allocation.rs
index 0ab6625..cda5e60 100644
--- a/src/allocation.rs
+++ b/src/allocation.rs
@@ -19,6 +19,7 @@ use alloc::{
vec,
vec::Vec,
};
+use core::ops::{Deref, DerefMut};
/// As [`try_cast_box`](try_cast_box), but unwraps for you.
#[inline]
@@ -685,4 +686,118 @@ pub trait TransparentWrapperAlloc<Inner: ?Sized>:
}
}
}
-impl<I: ?Sized, T: TransparentWrapper<I>> TransparentWrapperAlloc<I> for T {}
+
+impl<I: ?Sized, T: ?Sized + TransparentWrapper<I>> TransparentWrapperAlloc<I>
+ for T
+{
+}
+
+/// As `Box<[u8]>`, but remembers the original alignment.
+pub struct BoxBytes {
+ // SAFETY: `ptr` is owned, was allocated with `layout`, and points to
+ // `layout.size()` initialized bytes.
+ ptr: NonNull<u8>,
+ layout: Layout,
+}
+
+impl Deref for BoxBytes {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: See type invariant.
+ unsafe {
+ core::slice::from_raw_parts(self.ptr.as_ptr(), self.layout.size())
+ }
+ }
+}
+
+impl DerefMut for BoxBytes {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: See type invariant.
+ unsafe {
+ core::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.layout.size())
+ }
+ }
+}
+
+impl Drop for BoxBytes {
+ fn drop(&mut self) {
+ // SAFETY: See type invariant.
+ unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) };
+ }
+}
+
+impl<T: NoUninit> From<Box<T>> for BoxBytes {
+ fn from(value: Box<T>) -> Self {
+ let layout = Layout::new::<T>();
+ let ptr = Box::into_raw(value) as *mut u8;
+ // SAFETY: Box::into_raw() returns a non-null pointer.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+ BoxBytes { ptr, layout }
+ }
+}
+
+/// Re-interprets `Box<T>` as `BoxBytes`.
+#[inline]
+pub fn box_bytes_of<T: NoUninit>(input: Box<T>) -> BoxBytes {
+ input.into()
+}
+
+/// Re-interprets `BoxBytes` as `Box<T>`.
+///
+/// ## Panics
+///
+/// This is [`try_from_box_bytes`] but will panic on error and the input will be
+/// dropped.
+#[inline]
+pub fn from_box_bytes<T: AnyBitPattern>(input: BoxBytes) -> Box<T> {
+ try_from_box_bytes(input).map_err(|(error, _)| error).unwrap()
+}
+
+/// Re-interprets `BoxBytes` as `Box<T>`.
+///
+/// ## Panics
+///
+/// * If the input isn't aligned for the new type
+/// * If the input's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_box_bytes<T: AnyBitPattern>(
+ input: BoxBytes,
+) -> Result<Box<T>, (PodCastError, BoxBytes)> {
+ let layout = Layout::new::<T>();
+ if input.layout.align() != layout.align() {
+ return Err((PodCastError::AlignmentMismatch, input));
+ } else if input.layout.size() != layout.size() {
+ return Err((PodCastError::SizeMismatch, input));
+ } else {
+ let (ptr, _) = input.into_raw_parts();
+ // SAFETY: See type invariant.
+ Ok(unsafe { Box::from_raw(ptr.as_ptr() as *mut T) })
+ }
+}
+
+impl BoxBytes {
+ /// Constructs a `BoxBytes` from its raw parts.
+ ///
+ /// # Safety
+ ///
+ /// The pointer is owned, has been allocated with the provided layout, and
+ /// points to `layout.size()` initialized bytes.
+ pub unsafe fn from_raw_parts(ptr: NonNull<u8>, layout: Layout) -> Self {
+ BoxBytes { ptr, layout }
+ }
+
+ /// Deconstructs a `BoxBytes` into its raw parts.
+ ///
+ /// The pointer is owned, has been allocated with the provided layout, and
+ /// points to `layout.size()` initialized bytes.
+ pub fn into_raw_parts(self) -> (NonNull<u8>, Layout) {
+ let me = ManuallyDrop::new(self);
+ (me.ptr, me.layout)
+ }
+
+ /// Returns the original layout.
+ pub fn layout(&self) -> Layout {
+ self.layout
+ }
+}
diff --git a/src/anybitpattern.rs b/src/anybitpattern.rs
index 9332d61..1792eb2 100644
--- a/src/anybitpattern.rs
+++ b/src/anybitpattern.rs
@@ -56,5 +56,6 @@ pub unsafe trait AnyBitPattern:
unsafe impl<T: Pod> AnyBitPattern for T {}
#[cfg(feature = "zeroable_maybe_uninit")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_maybe_uninit")))]
unsafe impl<T> AnyBitPattern for core::mem::MaybeUninit<T> where T: AnyBitPattern
{}
diff --git a/src/checked.rs b/src/checked.rs
index 2d97340..1105528 100644
--- a/src/checked.rs
+++ b/src/checked.rs
@@ -170,6 +170,7 @@ unsafe impl CheckedBitPattern for bool {
}
}
+// Rust 1.70.0 documents that NonZero[int] has the same layout as [int].
macro_rules! impl_checked_for_nonzero {
($($nonzero:ty: $primitive:ty),* $(,)?) => {
$(
@@ -178,14 +179,7 @@ macro_rules! impl_checked_for_nonzero {
#[inline]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
- // Note(zachs18): The size and alignment check are almost certainly
- // not necessary, but Rust currently doesn't explicitly document that
- // NonZero[int] has the same layout as [int], so we check it to be safe.
- // In a const to reduce debug-profile overhead.
- const LAYOUT_SAME: bool =
- core::mem::size_of::<$nonzero>() == core::mem::size_of::<$primitive>()
- && core::mem::align_of::<$nonzero>() == core::mem::align_of::<$primitive>();
- LAYOUT_SAME && *bits != 0
+ *bits != 0
}
}
)*
@@ -230,6 +224,7 @@ impl core::fmt::Display for CheckedCastError {
}
}
#[cfg(feature = "extern_crate_std")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
impl std::error::Error for CheckedCastError {}
impl From<crate::PodCastError> for CheckedCastError {
@@ -249,7 +244,7 @@ impl From<crate::PodCastError> for CheckedCastError {
pub fn try_from_bytes<T: CheckedBitPattern>(
s: &[u8],
) -> Result<&T, CheckedCastError> {
- let pod = unsafe { internal::try_from_bytes(s) }?;
+ let pod = crate::try_from_bytes(s)?;
if <T as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &*(pod as *const <T as CheckedBitPattern>::Bits as *const T) })
@@ -287,7 +282,7 @@ pub fn try_from_bytes_mut<T: CheckedBitPattern + NoUninit>(
pub fn try_pod_read_unaligned<T: CheckedBitPattern>(
bytes: &[u8],
) -> Result<T, CheckedCastError> {
- let pod = unsafe { internal::try_pod_read_unaligned(bytes) }?;
+ let pod = crate::try_pod_read_unaligned(bytes)?;
if <T as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
Ok(unsafe { transmute!(pod) })
@@ -311,7 +306,7 @@ pub fn try_pod_read_unaligned<T: CheckedBitPattern>(
pub fn try_cast<A: NoUninit, B: CheckedBitPattern>(
a: A,
) -> Result<B, CheckedCastError> {
- let pod = unsafe { internal::try_cast(a) }?;
+ let pod = crate::try_cast(a)?;
if <B as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
Ok(unsafe { transmute!(pod) })
@@ -331,7 +326,7 @@ pub fn try_cast<A: NoUninit, B: CheckedBitPattern>(
pub fn try_cast_ref<A: NoUninit, B: CheckedBitPattern>(
a: &A,
) -> Result<&B, CheckedCastError> {
- let pod = unsafe { internal::try_cast_ref(a) }?;
+ let pod = crate::try_cast_ref(a)?;
if <B as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &*(pod as *const <B as CheckedBitPattern>::Bits as *const B) })
@@ -380,7 +375,7 @@ pub fn try_cast_mut<
pub fn try_cast_slice<A: NoUninit, B: CheckedBitPattern>(
a: &[A],
) -> Result<&[B], CheckedCastError> {
- let pod = unsafe { internal::try_cast_slice(a) }?;
+ let pod = crate::try_cast_slice(a)?;
if pod.iter().all(|pod| <B as CheckedBitPattern>::is_valid_bit_pattern(pod)) {
Ok(unsafe {
@@ -455,7 +450,7 @@ pub fn pod_read_unaligned<T: CheckedBitPattern>(bytes: &[u8]) -> T {
///
/// ## Panics
///
-/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
+/// * This is like [`try_cast`], but will panic on a size mismatch.
#[inline]
pub fn cast<A: NoUninit, B: CheckedBitPattern>(a: A) -> B {
match try_cast(a) {
diff --git a/src/internal.rs b/src/internal.rs
index 6068d0e..9f36279 100644
--- a/src/internal.rs
+++ b/src/internal.rs
@@ -130,6 +130,26 @@ pub(crate) unsafe fn pod_read_unaligned<T: Copy>(bytes: &[u8]) -> T {
}
}
+/// Checks if `ptr` is aligned to an `align` memory boundary.
+///
+/// ## Panics
+/// * If `align` is not a power of two. This includes when `align` is zero.
+#[inline]
+pub(crate) fn is_aligned_to(ptr: *const (), align: usize) -> bool {
+ #[cfg(feature = "align_offset")]
+ {
+ // This is in a way better than `ptr as usize % align == 0`,
+ // because casting a pointer to an integer has the side effect that it
+ // exposes the pointer's provenance, which may theoretically inhibit
+ // some compiler optimizations.
+ ptr.align_offset(align) == 0
+ }
+ #[cfg(not(feature = "align_offset"))]
+ {
+ ((ptr as usize) % align) == 0
+ }
+}
+
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Failure
@@ -142,7 +162,7 @@ pub(crate) unsafe fn try_from_bytes<T: Copy>(
) -> Result<&T, PodCastError> {
if s.len() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
- } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ } else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { &*(s.as_ptr() as *const T) })
@@ -161,7 +181,7 @@ pub(crate) unsafe fn try_from_bytes_mut<T: Copy>(
) -> Result<&mut T, PodCastError> {
if s.len() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
- } else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
+ } else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
@@ -284,7 +304,7 @@ pub(crate) unsafe fn try_cast_ref<A: Copy, B: Copy>(
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
- && (a as *const A as usize) % align_of::<B>() != 0
+ && !is_aligned_to(a as *const A as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
@@ -304,7 +324,7 @@ pub(crate) unsafe fn try_cast_mut<A: Copy, B: Copy>(
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
- && (a as *mut A as usize) % align_of::<B>() != 0
+ && !is_aligned_to(a as *const A as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
@@ -336,7 +356,7 @@ pub(crate) unsafe fn try_cast_slice<A: Copy, B: Copy>(
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
- && (a.as_ptr() as usize) % align_of::<B>() != 0
+ && !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
@@ -362,7 +382,7 @@ pub(crate) unsafe fn try_cast_slice_mut<A: Copy, B: Copy>(
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
- && (a.as_mut_ptr() as usize) % align_of::<B>() != 0
+ && !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
diff --git a/src/lib.rs b/src/lib.rs
index d1bbca4..aecc9c6 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -2,6 +2,7 @@
#![warn(missing_docs)]
#![allow(clippy::match_like_matches_macro)]
#![allow(clippy::uninlined_format_args)]
+#![cfg_attr(feature = "nightly_docs", feature(doc_cfg))]
#![cfg_attr(feature = "nightly_portable_simd", feature(portable_simd))]
#![cfg_attr(feature = "nightly_stdsimd", feature(stdsimd))]
@@ -18,28 +19,64 @@
//! * `&[T]` uses [`cast_slice`]
//! * `&mut [T]` uses [`cast_slice_mut`]
//!
-//! Some casts will never fail (eg: `cast::<u32, f32>` always works), other
-//! casts might fail (eg: `cast_ref::<[u8; 4], u32>` will fail if the reference
-//! isn't already aligned to 4). Each casting function has a "try" version which
-//! will return a `Result`, and the "normal" version which will simply panic on
-//! invalid input.
+//! Depending on the function, the [`NoUninit`] and/or [`AnyBitPattern`] traits
+//! are used to maintain memory safety.
+//!
+//! **Historical Note:** When the crate first started the [`Pod`] trait was used
+//! instead, and so you may hear people refer to that, but it has the strongest
+//! requirements and people eventually wanted the more fine-grained system, so
+//! here we are. All types that impl `Pod` have a blanket impl to also support
+//! `NoUninit` and `AnyBitPattern`. The traits unfortunately do not have a
+//! perfectly clean hierarchy for semver reasons.
+//!
+//! ## Failures
+//!
+//! Some casts will never fail, and other casts might fail.
+//!
+//! * `cast::<u32, f32>` always works (and [`f32::from_bits`]).
+//! * `cast_ref::<[u8; 4], u32>` might fail if the specific array reference
+//! given at runtime doesn't have alignment 4.
+//!
+//! In addition to the "normal" forms of each function, which will panic on
+//! invalid input, there's also `try_` versions which will return a `Result`.
+//!
+//! If you would like to statically ensure that a cast will work at runtime you
+//! can use the `must_cast` crate feature and the `must_` casting functions. A
+//! "must cast" that can't be statically known to be valid will cause a
+//! compilation error (and sometimes a very hard to read compilation error).
//!
//! ## Using Your Own Types
//!
-//! All the functions here are guarded by the [`Pod`] trait, which is a
+//! All the functions listed above are guarded by the [`Pod`] trait, which is a
//! sub-trait of the [`Zeroable`] trait.
//!
-//! If you're very sure that your type is eligible, you can implement those
-//! traits for your type and then they'll have full casting support. However,
-//! these traits are `unsafe`, and you should carefully read the requirements
-//! before adding the them to your own types.
+//! If you enable the crate's `derive` feature then these traits can be derived
+//! on your own types. The derive macros will perform the necessary checks on
+//! your type declaration, and trigger an error if your type does not qualify.
+//!
+//! The derive macros might not cover all edge cases, and sometimes they will
+//! error when actually everything is fine. As a last resort you can impl these
+//! traits manually. However, these traits are `unsafe`, and you should
+//! carefully read the requirements before using a manual implementation.
//!
-//! ## Features
+//! ## Cargo Features
//!
-//! * This crate is core only by default, but if you're using Rust 1.36 or later
-//! you can enable the `extern_crate_alloc` cargo feature for some additional
-//! methods related to `Box` and `Vec`. Note that the `docs.rs` documentation
-//! is always built with `extern_crate_alloc` cargo feature enabled.
+//! The crate supports Rust 1.34 when no features are enabled, and so there's
+//! cargo features for thing that you might consider "obvious".
+//!
+//! The cargo features **do not** promise any particular MSRV, and they may
+//! increase their MSRV in new versions.
+//!
+//! * `derive`: Provide derive macros for the various traits.
+//! * `extern_crate_alloc`: Provide utilities for `alloc` related types such as
+//! Box and Vec.
+//! * `zeroable_maybe_uninit` and `zeroable_atomics`: Provide more [`Zeroable`]
+//! impls.
+//! * `wasm_simd` and `aarch64_simd`: Support more SIMD types.
+//! * `min_const_generics`: Provides appropriate impls for arrays of all lengths
+//! instead of just for a select list of array lengths.
+//! * `must_cast`: Provides the `must_` functions, which will compile error if
+//! the requested cast can't be statically verified.
#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
use core::arch::aarch64;
@@ -78,10 +115,13 @@ macro_rules! transmute {
/// with relevant cargo features enabled.
#[allow(unused)]
macro_rules! impl_unsafe_marker_for_simd {
- (unsafe impl $trait:ident for $platform:ident :: {}) => {};
- (unsafe impl $trait:ident for $platform:ident :: { $first_type:ident $(, $types:ident)* $(,)? }) => {
+ ($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: {}) => {};
+ ($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: { $first_type:ident $(, $types:ident)* $(,)? }) => {
+ $( #[cfg($cfg_predicate)] )?
+ $( #[cfg_attr(feature = "nightly_docs", doc(cfg($cfg_predicate)))] )?
unsafe impl $trait for $platform::$first_type {}
- impl_unsafe_marker_for_simd!(unsafe impl $trait for $platform::{ $( $types ),* });
+ $( #[cfg($cfg_predicate)] )? // To prevent recursion errors if nothing is going to be expanded anyway.
+ impl_unsafe_marker_for_simd!($( #[cfg($cfg_predicate)] )? unsafe impl $trait for $platform::{ $( $types ),* });
};
}
@@ -91,6 +131,7 @@ extern crate std;
#[cfg(feature = "extern_crate_alloc")]
extern crate alloc;
#[cfg(feature = "extern_crate_alloc")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
pub mod allocation;
#[cfg(feature = "extern_crate_alloc")]
pub use allocation::*;
@@ -113,6 +154,12 @@ pub use pod::*;
mod pod_in_option;
pub use pod_in_option::*;
+#[cfg(feature = "must_cast")]
+mod must;
+#[cfg(feature = "must_cast")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "must_cast")))]
+pub use must::*;
+
mod no_uninit;
pub use no_uninit::*;
@@ -120,12 +167,14 @@ mod contiguous;
pub use contiguous::*;
mod offset_of;
-pub use offset_of::*;
+// ^ no import, the module only has a macro_rules, which are cursed and don't
+// follow normal import/export rules.
mod transparent;
pub use transparent::*;
#[cfg(feature = "derive")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "derive")))]
pub use bytemuck_derive::{
AnyBitPattern, ByteEq, ByteHash, CheckedBitPattern, Contiguous, NoUninit,
Pod, TransparentWrapper, Zeroable,
@@ -160,6 +209,7 @@ impl core::fmt::Display for PodCastError {
}
}
#[cfg(feature = "extern_crate_std")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
impl std::error::Error for PodCastError {}
/// Re-interprets `&T` as `&[u8]`.
@@ -184,7 +234,7 @@ pub fn bytes_of_mut<T: NoUninit + AnyBitPattern>(t: &mut T) -> &mut [u8] {
///
/// ## Panics
///
-/// This is [`try_from_bytes`] but will panic on error.
+/// This is like [`try_from_bytes`] but will panic on error.
#[inline]
pub fn from_bytes<T: AnyBitPattern>(s: &[u8]) -> &T {
unsafe { internal::from_bytes(s) }
@@ -194,7 +244,7 @@ pub fn from_bytes<T: AnyBitPattern>(s: &[u8]) -> &T {
///
/// ## Panics
///
-/// This is [`try_from_bytes_mut`] but will panic on error.
+/// This is like [`try_from_bytes_mut`] but will panic on error.
#[inline]
pub fn from_bytes_mut<T: NoUninit + AnyBitPattern>(s: &mut [u8]) -> &mut T {
unsafe { internal::from_bytes_mut(s) }
@@ -202,6 +252,9 @@ pub fn from_bytes_mut<T: NoUninit + AnyBitPattern>(s: &mut [u8]) -> &mut T {
/// Reads from the bytes as if they were a `T`.
///
+/// Unlike [`from_bytes`], the slice doesn't need to respect alignment of `T`, only sizes
+/// must match.
+///
/// ## Failure
/// * If the `bytes` length is not equal to `size_of::<T>()`.
#[inline]
@@ -213,6 +266,9 @@ pub fn try_pod_read_unaligned<T: AnyBitPattern>(
/// Reads the slice into a `T` value.
///
+/// Unlike [`from_bytes`], the slice doesn't need to respect alignment of `T`, only sizes
+/// must match.
+///
/// ## Panics
/// * This is like `try_pod_read_unaligned` but will panic on failure.
#[inline]
@@ -248,7 +304,7 @@ pub fn try_from_bytes_mut<T: NoUninit + AnyBitPattern>(
///
/// ## Panics
///
-/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
+/// * This is like [`try_cast`], but will panic on a size mismatch.
#[inline]
pub fn cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
unsafe { internal::cast(a) }
@@ -301,7 +357,8 @@ pub fn cast_slice_mut<
unsafe { internal::cast_slice_mut(a) }
}
-/// As `align_to`, but safe because of the [`Pod`] bound.
+/// As [`align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to),
+/// but safe because of the [`Pod`] bound.
#[inline]
pub fn pod_align_to<T: NoUninit, U: AnyBitPattern>(
vals: &[T],
@@ -309,7 +366,8 @@ pub fn pod_align_to<T: NoUninit, U: AnyBitPattern>(
unsafe { vals.align_to::<U>() }
}
-/// As `align_to_mut`, but safe because of the [`Pod`] bound.
+/// As [`align_to_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to_mut),
+/// but safe because of the [`Pod`] bound.
#[inline]
pub fn pod_align_to_mut<
T: NoUninit + AnyBitPattern,
@@ -398,3 +456,47 @@ pub fn try_cast_slice_mut<
) -> Result<&mut [B], PodCastError> {
unsafe { internal::try_cast_slice_mut(a) }
}
+
+/// Fill all bytes of `target` with zeroes (see [`Zeroable`]).
+///
+/// This is similar to `*target = Zeroable::zeroed()`, but guarantees that any
+/// padding bytes in `target` are zeroed as well.
+///
+/// See also [`fill_zeroes`], if you have a slice rather than a single value.
+#[inline]
+pub fn write_zeroes<T: Zeroable>(target: &mut T) {
+ struct EnsureZeroWrite<T>(*mut T);
+ impl<T> Drop for EnsureZeroWrite<T> {
+ #[inline(always)]
+ fn drop(&mut self) {
+ unsafe {
+ core::ptr::write_bytes(self.0, 0u8, 1);
+ }
+ }
+ }
+ unsafe {
+ let guard = EnsureZeroWrite(target);
+ core::ptr::drop_in_place(guard.0);
+ drop(guard);
+ }
+}
+
+/// Fill all bytes of `slice` with zeroes (see [`Zeroable`]).
+///
+/// This is similar to `slice.fill(Zeroable::zeroed())`, but guarantees that any
+/// padding bytes in `slice` are zeroed as well.
+///
+/// See also [`write_zeroes`], which zeroes all bytes of a single value rather
+/// than a slice.
+#[inline]
+pub fn fill_zeroes<T: Zeroable>(slice: &mut [T]) {
+ if core::mem::needs_drop::<T>() {
+ // If `T` needs to be dropped then we have to do this one item at a time, in
+ // case one of the intermediate drops does a panic.
+ slice.iter_mut().for_each(write_zeroes);
+ } else {
+ // Otherwise we can be really fast and just fill everthing with zeros.
+ let len = core::mem::size_of_val::<[T]>(slice);
+ unsafe { core::ptr::write_bytes(slice.as_mut_ptr() as *mut u8, 0u8, len) }
+ }
+}
diff --git a/src/must.rs b/src/must.rs
new file mode 100644
index 0000000..b3f1701
--- /dev/null
+++ b/src/must.rs
@@ -0,0 +1,203 @@
+#![allow(clippy::module_name_repetitions)]
+#![allow(clippy::let_unit_value)]
+#![allow(clippy::let_underscore_untyped)]
+#![allow(clippy::ptr_as_ptr)]
+
+use crate::{AnyBitPattern, NoUninit};
+use core::mem::{align_of, size_of};
+
+struct Cast<A, B>((A, B));
+impl<A, B> Cast<A, B> {
+ const ASSERT_ALIGN_GREATER_THAN_EQUAL: () =
+ assert!(align_of::<A>() >= align_of::<B>());
+ const ASSERT_SIZE_EQUAL: () = assert!(size_of::<A>() == size_of::<B>());
+ const ASSERT_SIZE_MULTIPLE_OF: () = assert!(
+ (size_of::<A>() == 0) == (size_of::<B>() == 0)
+ && (size_of::<A>() % size_of::<B>() == 0)
+ );
+}
+
+// Workaround for https://github.com/rust-lang/miri/issues/2423.
+// Miri currently doesn't see post-monomorphization errors until runtime,
+// so `compile_fail` tests relying on post-monomorphization errors don't
+// actually fail. Instead use `should_panic` under miri as a workaround.
+#[cfg(miri)]
+macro_rules! post_mono_compile_fail_doctest {
+ () => {
+ "```should_panic"
+ };
+}
+#[cfg(not(miri))]
+macro_rules! post_mono_compile_fail_doctest {
+ () => {
+ "```compile_fail,E0080"
+ };
+}
+
+/// Cast `A` into `B` if infalliable, or fail to compile.
+///
+/// Note that for this particular type of cast, alignment isn't a factor. The
+/// input value is semantically copied into the function and then returned to a
+/// new memory location which will have whatever the required alignment of the
+/// output type is.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails to compile.
+///
+/// ## Examples
+/// ```
+/// // compiles:
+/// let bytes: [u8; 2] = bytemuck::must_cast(12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (size mismatch):
+/// let bytes : [u8; 3] = bytemuck::must_cast(12_u16);
+/// ```
+#[inline]
+pub fn must_cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ unsafe { transmute!(a) }
+}
+
+/// Convert `&A` into `&B` if infalliable, or fail to compile.
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement.
+/// * If the source type and target type aren't the same size.
+///
+/// ## Examples
+/// ```
+/// // compiles:
+/// let bytes: &[u8; 2] = bytemuck::must_cast_ref(&12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (size mismatch):
+/// let bytes : &[u8; 3] = bytemuck::must_cast_ref(&12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (alignment requirements increased):
+/// let bytes : &u16 = bytemuck::must_cast_ref(&[1u8, 2u8]);
+/// ```
+#[inline]
+pub fn must_cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ unsafe { &*(a as *const A as *const B) }
+}
+
+/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
+///
+/// As [`must_cast_ref`], but `mut`.
+///
+/// ## Examples
+/// ```
+/// let mut i = 12_u16;
+/// // compiles:
+/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
+/// // fails to compile (alignment requirements increased):
+/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut i = 12_u16;
+/// // fails to compile (size mismatch):
+/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
+/// ```
+#[inline]
+pub fn must_cast_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut A,
+) -> &mut B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ unsafe { &mut *(a as *mut A as *mut B) }
+}
+
+/// Convert `&[A]` into `&[B]` (possibly with a change in length) if
+/// infalliable, or fail to compile.
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement.
+/// * If the target element type doesn't evenly fit into the the current element
+/// type (eg: 3 `u16` values is 1.5 `u32` values, so that's a failure).
+/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+///
+/// ## Examples
+/// ```
+/// let indicies: &[u16] = &[1, 2, 3];
+/// // compiles:
+/// let bytes: &[u8] = bytemuck::must_cast_slice(indicies);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let bytes : &[u8] = &[1, 0, 2, 0, 3, 0];
+/// // fails to compile (bytes.len() might not be a multiple of 2):
+/// let byte_pairs : &[[u8; 2]] = bytemuck::must_cast_slice(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let byte_pairs : &[[u8; 2]] = &[[1, 0], [2, 0], [3, 0]];
+/// // fails to compile (alignment requirements increased):
+/// let indicies : &[u16] = bytemuck::must_cast_slice(byte_pairs);
+/// ```
+#[inline]
+pub fn must_cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
+ let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ let new_len = if size_of::<A>() == size_of::<B>() {
+ a.len()
+ } else {
+ a.len() * (size_of::<A>() / size_of::<B>())
+ };
+ unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }
+}
+
+/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
+/// infalliable, or fail to compile.
+///
+/// As [`must_cast_slice`], but `&mut`.
+///
+/// ## Examples
+/// ```
+/// let mut indicies = [1, 2, 3];
+/// let indicies: &mut [u16] = &mut indicies;
+/// // compiles:
+/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut bytes = [1, 0, 2, 0, 3, 0];
+/// # let bytes : &mut [u8] = &mut bytes[..];
+/// // fails to compile (bytes.len() might not be a multiple of 2):
+/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
+/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
+/// // fails to compile (alignment requirements increased):
+/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
+/// ```
+#[inline]
+pub fn must_cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut [A],
+) -> &mut [B] {
+ let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ let new_len = if size_of::<A>() == size_of::<B>() {
+ a.len()
+ } else {
+ a.len() * (size_of::<A>() / size_of::<B>())
+ };
+ unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
+}
diff --git a/src/pod.rs b/src/pod.rs
index 2ea7f4b..58b43f8 100644
--- a/src/pod.rs
+++ b/src/pod.rs
@@ -54,19 +54,32 @@ unsafe impl Pod for f64 {}
unsafe impl<T: Pod> Pod for Wrapping<T> {}
#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
unsafe impl<T: 'static> Pod for *mut T {}
#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
unsafe impl<T: 'static> Pod for *const T {}
#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
unsafe impl<T: 'static> PodInOption for NonNull<T> {}
-unsafe impl<T: Pod> Pod for PhantomData<T> {}
+unsafe impl<T: ?Sized + 'static> Pod for PhantomData<T> {}
unsafe impl Pod for PhantomPinned {}
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
// Note(Lokathor): MaybeUninit can NEVER be Pod.
#[cfg(feature = "min_const_generics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
unsafe impl<T, const N: usize> Pod for [T; N] where T: Pod {}
#[cfg(not(feature = "min_const_generics"))]
@@ -76,54 +89,58 @@ impl_unsafe_marker_for_array!(
512, 1024, 2048, 4096
);
-#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for wasm32::{v128}
+ #[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
+ unsafe impl Pod for wasm32::{v128}
);
-#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for aarch64::{
- float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
- float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
- float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
- float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
- int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
- int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
- int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
- int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
- int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
- poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
- poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
- poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
- poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
- uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
- uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
- uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
- uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
- uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
- uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
- uint8x8x3_t, uint8x8x4_t,
- }
+ #[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
+ unsafe impl Pod for aarch64::{
+ float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
+ float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
+ float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
+ float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
+ int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
+ int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
+ int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
+ int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
+ int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
+ poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
+ poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
+ poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
+ poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
+ uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
+ uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
+ uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
+ uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
+ uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
+ uint8x8x3_t, uint8x8x4_t,
+ }
);
-#[cfg(target_arch = "x86")]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for x86::{
- __m128i, __m128, __m128d,
- __m256i, __m256, __m256d,
- }
+ #[cfg(target_arch = "x86")]
+ unsafe impl Pod for x86::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
);
-#[cfg(target_arch = "x86_64")]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for x86_64::{
- __m128i, __m128, __m128d,
- __m256i, __m256, __m256d,
- }
+ #[cfg(target_arch = "x86_64")]
+ unsafe impl Pod for x86_64::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
);
#[cfg(feature = "nightly_portable_simd")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "nightly_portable_simd"))
+)]
unsafe impl<T, const N: usize> Pod for core::simd::Simd<T, N>
where
T: core::simd::SimdElement + Pod,
@@ -131,18 +148,18 @@ where
{
}
-#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for x86::{
- __m128bh, __m256bh, __m512,
- __m512bh, __m512d, __m512i,
- }
+ #[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
+ unsafe impl Pod for x86::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
);
-#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Pod for x86_64::{
- __m128bh, __m256bh, __m512,
- __m512bh, __m512d, __m512i,
- }
+ #[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
+ unsafe impl Pod for x86_64::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
);
diff --git a/src/zeroable.rs b/src/zeroable.rs
index 5f19b98..5a3272a 100644
--- a/src/zeroable.rs
+++ b/src/zeroable.rs
@@ -19,8 +19,8 @@ use super::*;
///
/// * `MaybeUninit<T>` was not available in 1.34.0, but is available under the
/// `zeroable_maybe_uninit` feature flag.
-/// * `Atomic*` types require Rust 1.60.0 or later to work on certain platforms, but is available
-/// under the `zeroable_atomics` feature flag.
+/// * `Atomic*` types require Rust 1.60.0 or later to work on certain platforms,
+/// but is available under the `zeroable_atomics` feature flag.
/// * `[T; N]` for arbitrary `N` requires the `min_const_generics` feature flag.
pub unsafe trait Zeroable: Sized {
/// Calls [`zeroed`](core::mem::zeroed).
@@ -71,6 +71,7 @@ unsafe impl<T: Zeroable> Zeroable for core::cell::UnsafeCell<T> {}
unsafe impl<T: Zeroable> Zeroable for core::cell::Cell<T> {}
#[cfg(feature = "zeroable_atomics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_atomics")))]
mod atomic_impls {
use super::Zeroable;
@@ -106,6 +107,10 @@ mod atomic_impls {
}
#[cfg(feature = "zeroable_maybe_uninit")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "zeroable_maybe_uninit"))
+)]
unsafe impl<T> Zeroable for core::mem::MaybeUninit<T> {}
unsafe impl<A: Zeroable> Zeroable for (A,) {}
@@ -154,6 +159,7 @@ unsafe impl<
}
#[cfg(feature = "min_const_generics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
unsafe impl<T, const N: usize> Zeroable for [T; N] where T: Zeroable {}
#[cfg(not(feature = "min_const_generics"))]
@@ -163,47 +169,47 @@ impl_unsafe_marker_for_array!(
512, 1024, 2048, 4096
);
-#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Zeroable for wasm32::{v128}
+ #[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
+ unsafe impl Zeroable for wasm32::{v128}
);
-#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Zeroable for aarch64::{
- float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
- float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
- float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
- float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
- int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
- int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
- int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
- int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
- int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
- poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
- poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
- poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
- poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
- uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
- uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
- uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
- uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
- uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
- uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
- uint8x8x3_t, uint8x8x4_t,
- }
+ #[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
+ unsafe impl Zeroable for aarch64::{
+ float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
+ float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
+ float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
+ float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
+ int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
+ int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
+ int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
+ int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
+ int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
+ poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
+ poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
+ poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
+ poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
+ uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
+ uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
+ uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
+ uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
+ uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
+ uint8x8x3_t, uint8x8x4_t,
+ }
);
-#[cfg(target_arch = "x86")]
impl_unsafe_marker_for_simd!(
- unsafe impl Zeroable for x86::{
- __m128i, __m128, __m128d,
- __m256i, __m256, __m256d,
- }
+ #[cfg(target_arch = "x86")]
+ unsafe impl Zeroable for x86::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
);
-#[cfg(target_arch = "x86_64")]
impl_unsafe_marker_for_simd!(
+ #[cfg(target_arch = "x86_64")]
unsafe impl Zeroable for x86_64::{
__m128i, __m128, __m128d,
__m256i, __m256, __m256d,
@@ -211,6 +217,10 @@ impl_unsafe_marker_for_simd!(
);
#[cfg(feature = "nightly_portable_simd")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "nightly_portable_simd"))
+)]
unsafe impl<T, const N: usize> Zeroable for core::simd::Simd<T, N>
where
T: core::simd::SimdElement + Zeroable,
@@ -218,18 +228,18 @@ where
{
}
-#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Zeroable for x86::{
- __m128bh, __m256bh, __m512,
- __m512bh, __m512d, __m512i,
- }
+ #[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
+ unsafe impl Zeroable for x86::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
);
-#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
impl_unsafe_marker_for_simd!(
- unsafe impl Zeroable for x86_64::{
- __m128bh, __m256bh, __m512,
- __m512bh, __m512d, __m512i,
- }
+ #[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
+ unsafe impl Zeroable for x86_64::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
);
diff --git a/src/zeroable_in_option.rs b/src/zeroable_in_option.rs
index 1ee2c7b..13737f4 100644
--- a/src/zeroable_in_option.rs
+++ b/src/zeroable_in_option.rs
@@ -31,4 +31,5 @@ unsafe impl<T: ?Sized> ZeroableInOption for &'_ T {}
unsafe impl<T: ?Sized> ZeroableInOption for &'_ mut T {}
#[cfg(feature = "extern_crate_alloc")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
unsafe impl<T: ?Sized> ZeroableInOption for alloc::boxed::Box<T> {}
diff --git a/tests/cast_slice_tests.rs b/tests/cast_slice_tests.rs
index 2838d46..0390f5c 100644
--- a/tests/cast_slice_tests.rs
+++ b/tests/cast_slice_tests.rs
@@ -1,3 +1,6 @@
+#![allow(clippy::unnecessary_cast)]
+#![allow(clippy::manual_slice_size_calculation)]
+
use core::mem::size_of;
use bytemuck::*;
diff --git a/tests/checked_tests.rs b/tests/checked_tests.rs
index 19e6348..4b14909 100644
--- a/tests/checked_tests.rs
+++ b/tests/checked_tests.rs
@@ -1,3 +1,6 @@
+#![allow(clippy::unnecessary_cast)]
+#![allow(clippy::manual_slice_size_calculation)]
+
use core::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
diff --git a/tests/std_tests.rs b/tests/std_tests.rs
index 79adcb5..947d8f2 100644
--- a/tests/std_tests.rs
+++ b/tests/std_tests.rs
@@ -3,6 +3,7 @@
//! depend on that can go here.
use bytemuck::*;
+use core::num::NonZeroU8;
#[test]
fn test_transparent_vtabled() {
@@ -44,3 +45,62 @@ fn test_zero_sized_box_alloc() {
unsafe impl Zeroable for Empty {}
let _: Box<Empty> = try_zeroed_box().unwrap();
}
+
+#[test]
+#[cfg(feature = "extern_crate_alloc")]
+fn test_try_from_box_bytes() {
+ // Different layout: target alignment is greater than source alignment.
+ assert_eq!(
+ try_from_box_bytes::<u32>(Box::new([0u8; 4]).into()).map_err(|(x, _)| x),
+ Err(PodCastError::AlignmentMismatch)
+ );
+
+ // Different layout: target alignment is less than source alignment.
+ assert_eq!(
+ try_from_box_bytes::<u32>(Box::new(0u64).into()).map_err(|(x, _)| x),
+ Err(PodCastError::AlignmentMismatch)
+ );
+
+ // Different layout: target size is greater than source size.
+ assert_eq!(
+ try_from_box_bytes::<[u32; 2]>(Box::new(0u32).into()).map_err(|(x, _)| x),
+ Err(PodCastError::SizeMismatch)
+ );
+
+ // Different layout: target size is less than source size.
+ assert_eq!(
+ try_from_box_bytes::<u32>(Box::new([0u32; 2]).into()).map_err(|(x, _)| x),
+ Err(PodCastError::SizeMismatch)
+ );
+
+ // Round trip: alignment is equal to size.
+ assert_eq!(*from_box_bytes::<u32>(Box::new(1000u32).into()), 1000u32);
+
+ // Round trip: alignment is divider of size.
+ assert_eq!(&*from_box_bytes::<[u8; 5]>(Box::new(*b"hello").into()), b"hello");
+
+ // It's ok for T to have uninitialized bytes.
+ #[cfg(feature = "derive")]
+ {
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, AnyBitPattern)]
+ struct Foo(u8, u16);
+ assert_eq!(
+ *from_box_bytes::<Foo>(Box::new([0xc5c5u16; 2]).into()),
+ Foo(0xc5u8, 0xc5c5u16)
+ );
+ }
+}
+
+#[test]
+#[cfg(feature = "extern_crate_alloc")]
+fn test_box_bytes_of() {
+ assert_eq!(&*box_bytes_of(Box::new(*b"hello")), b"hello");
+
+ #[cfg(target_endian = "big")]
+ assert_eq!(&*box_bytes_of(Box::new(0x12345678)), b"\x12\x34\x56\x78");
+ #[cfg(target_endian = "little")]
+ assert_eq!(&*box_bytes_of(Box::new(0x12345678)), b"\x78\x56\x34\x12");
+
+ // It's ok for T to have invalid bit patterns.
+ assert_eq!(&*box_bytes_of(Box::new(NonZeroU8::new(0xc5))), b"\xc5");
+}