aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeongik Cha <jeongik@google.com>2023-09-27 08:11:50 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-09-27 08:11:50 +0000
commit487bf1c1690474c5a467dc82a3552642eadd43eb (patch)
treea10860a3f9804450bf9ba3f0e5aa2070cce478c0
parent9c50050e9e0b3f690e0730ba2aff693033a7dbbb (diff)
parent443f019c5ba6d3a66f750bdae1350c828c1703cf (diff)
downloadvm-memory-487bf1c1690474c5a467dc82a3552642eadd43eb.tar.gz
Import vm-memory am: 443f019c5b
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/vm-memory/+/2754425 Change-Id: I77384c6f4337f020f318a6334eb00c84368dc21f Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.buildkite/custom-tests.json61
-rw-r--r--.buildkite/pipeline.windows.yml79
-rw-r--r--.cargo/audit.toml14
-rw-r--r--.cargo/config2
-rw-r--r--.cargo_vcs_info.json6
-rw-r--r--.github/dependabot.yml7
-rw-r--r--.gitignore3
-rw-r--r--Android.bp25
-rw-r--r--CHANGELOG.md155
-rw-r--r--CODEOWNERS1
-rw-r--r--Cargo.toml79
-rw-r--r--Cargo.toml.orig46
-rw-r--r--DESIGN.md159
-rw-r--r--LICENSE232
-rw-r--r--LICENSE-APACHE202
-rw-r--r--LICENSE-BSD-3-Clause27
-rw-r--r--METADATA19
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--MODULE_LICENSE_BSD_LIKE0
-rw-r--r--OWNERS1
-rw-r--r--README.md94
-rw-r--r--TODO.md3
-rw-r--r--benches/guest_memory.rs35
-rw-r--r--benches/main.rs47
-rw-r--r--benches/mmap/mod.rs212
-rw-r--r--benches/volatile.rs48
-rw-r--r--cargo2android.json5
-rw-r--r--coverage_config_aarch64.json5
-rw-r--r--coverage_config_x86_64.json5
-rw-r--r--src/address.rs406
-rw-r--r--src/atomic.rs260
-rw-r--r--src/atomic_integer.rs105
-rw-r--r--src/bitmap/backend/atomic_bitmap.rs231
-rw-r--r--src/bitmap/backend/atomic_bitmap_arc.rs86
-rw-r--r--src/bitmap/backend/mod.rs10
-rw-r--r--src/bitmap/backend/slice.rs129
-rw-r--r--src/bitmap/mod.rs416
-rw-r--r--src/bytes.rs539
-rw-r--r--src/endian.rs158
-rw-r--r--src/guest_memory.rs1217
-rw-r--r--src/lib.rs68
-rw-r--r--src/mmap.rs1537
-rw-r--r--src/mmap_unix.rs669
-rw-r--r--src/mmap_windows.rs270
-rw-r--r--src/mmap_xen.rs1216
-rw-r--r--src/volatile_memory.rs2492
46 files changed, 11381 insertions, 0 deletions
diff --git a/.buildkite/custom-tests.json b/.buildkite/custom-tests.json
new file mode 100644
index 0000000..26960d5
--- /dev/null
+++ b/.buildkite/custom-tests.json
@@ -0,0 +1,61 @@
+{
+ "tests": [
+ {
+ "test_name": "build-gnu-mmap",
+ "command": "cargo build --release --features=xen",
+ "platform": ["x86_64", "aarch64"]
+ },
+ {
+ "test_name": "build-gnu-mmap-no-xen",
+ "command": "cargo build --release --features=backend-mmap",
+ "platform": ["x86_64", "aarch64"]
+ },
+ {
+ "test_name": "build-musl-mmap",
+ "command": "cargo build --release --features=xen --target {target_platform}-unknown-linux-musl",
+ "platform": ["x86_64", "aarch64"]
+ },
+ {
+ "test_name": "build-musl-mmap-no-xen",
+ "command": "cargo build --release --features=backend-mmap --target {target_platform}-unknown-linux-musl",
+ "platform": ["x86_64", "aarch64"]
+ },
+ {
+ "test_name": "miri",
+ "command": "RUST_BACKTRACE=1 MIRIFLAGS='-Zmiri-disable-isolation -Zmiri-panic-on-unsupported -Zmiri-backtrace=full' cargo +nightly miri test --features backend-mmap",
+ "platform": ["x86_64", "aarch64"]
+ },
+ {
+ "test_name": "unittests-gnu-no-xen",
+ "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace",
+ "platform": [
+ "x86_64",
+ "aarch64"
+ ]
+ },
+ {
+ "test_name": "unittests-musl-no-xen",
+ "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace --target {target_platform}-unknown-linux-musl",
+ "platform": [
+ "x86_64",
+ "aarch64"
+ ]
+ },
+ {
+ "test_name": "clippy-no-xen",
+ "command": "cargo clippy --workspace --bins --examples --benches --features 'backend-bitmap backend-mmap backend-atomic' --all-targets -- -D warnings -D clippy::undocumented_unsafe_blocks",
+ "platform": [
+ "x86_64",
+ "aarch64"
+ ]
+ },
+ {
+ "test_name": "check-warnings-no-xen",
+ "command": "RUSTFLAGS=\"-D warnings\" cargo check --all-targets --features 'backend-bitmap backend-mmap backend-atomic' --workspace",
+ "platform": [
+ "x86_64",
+ "aarch64"
+ ]
+ }
+ ]
+}
diff --git a/.buildkite/pipeline.windows.yml b/.buildkite/pipeline.windows.yml
new file mode 100644
index 0000000..ea41df1
--- /dev/null
+++ b/.buildkite/pipeline.windows.yml
@@ -0,0 +1,79 @@
+steps:
+ - label: "build-msvc-x86"
+ commands:
+ - cargo build --release
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+
+ - label: "build-msvc-x86-mmap"
+ commands:
+ - cargo build --release --features=backend-mmap
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+
+ - label: "style"
+ command: cargo fmt --all -- --check
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+
+ - label: "unittests-msvc-x86"
+ commands:
+ - cargo test --all-features
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+
+ - label: "clippy-x86"
+ commands:
+ - cargo clippy --all
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+
+ - label: "check-warnings-x86"
+ commands:
+ - cargo check --all-targets
+ retry:
+ automatic: true
+ agents:
+ platform: x86_64
+ os: windows
+ plugins:
+ - docker#v3.7.0:
+ image: "lpetrut/rust_win_buildtools"
+ always-pull: true
+ environment:
+ - "RUSTFLAGS=-D warnings"
diff --git a/.cargo/audit.toml b/.cargo/audit.toml
new file mode 100644
index 0000000..8bd8a87
--- /dev/null
+++ b/.cargo/audit.toml
@@ -0,0 +1,14 @@
+[advisories]
+ignore = [
+ # serde_cbor is an unmaintained dependency introduced by criterion.
+ # We are using criterion only for benchmarks, so we can ignore
+ # this vulnerability until criterion is fixing this.
+ # See https://github.com/bheisler/criterion.rs/issues/534.
+ "RUSTSEC-2021-0127",
+ # atty is unmaintained (the unsound problem doesn't seem to impact us).
+ # We are ignoring this advisory because it's only used by criterion,
+ # and we are using criterion for benchmarks. This is not a problem for
+ # production use cases. Also, criterion did not update the dependency,
+ # so there is not much else we can do.
+ "RUSTSEC-2021-0145"
+ ]
diff --git a/.cargo/config b/.cargo/config
new file mode 100644
index 0000000..02cbaf3
--- /dev/null
+++ b/.cargo/config
@@ -0,0 +1,2 @@
+[target.aarch64-unknown-linux-musl]
+rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..226282c
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,6 @@
+{
+ "git": {
+ "sha1": "aff1dd4a5259f7deba56692840f7a2d9ca34c9c8"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..97b2020
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,7 @@
+version: 2
+updates:
+- package-ecosystem: gitsubmodule
+ directory: "/"
+ schedule:
+ interval: monthly
+ open-pull-requests-limit: 10
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6936990
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+/target
+**/*.rs.bk
+Cargo.lock
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..0322aed
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,25 @@
+// This file is generated by cargo2android.py --config cargo2android.json.
+// Do not modify this file as changes will be overridden on upgrade.
+
+
+
+rust_library_host {
+ name: "libvm_memory_android",
+ crate_name: "vm_memory",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.12.2",
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ features: [
+ "arc-swap",
+ "backend-atomic",
+ "backend-bitmap",
+ "backend-mmap",
+ "default",
+ ],
+ rustlibs: [
+ "libarc_swap",
+ "liblibc",
+ "libthiserror",
+ ],
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..a70cb03
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,155 @@
+# Changelog
+
+## [v0.12.2]
+
+### Fixed
+- [[#251]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks
+ that verify that the value returned by `VolatileMemory::get_slice` is of
+ the correct length.
+
+### Deprecated
+- [[#244]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile
+ memory's `as_ptr()` interfaces. The new interfaces to be used instead are:
+ `ptr_guard()` and `ptr_guard_mut()`.
+
+## [v0.12.1]
+
+### Fixed
+- [[#241]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop
+ the FileOffset while in use #245
+
+## [v0.12.0]
+
+### Added
+- [[#241]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory
+ mapping support: Foreign and Grant. Add new API for accessing pointers to
+ volatile slices, as `as_ptr()` can't be used with Xen's Grant mapping.
+- [[#237]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`.
+
+## [v0.11.0]
+
+### Added
+- [[#216]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`.
+
+### Fixed
+- [[#217]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally
+ taking rust-style slices to guest memory in ways that could potentially cause
+ undefined behavior. Removes/deprecates various `as_slice`/`as_slice_mut` methods
+ whose usage violated rust's aliasing rules, as well as an unsound
+ `impl<'a> VolatileMemory for &'a mut [u8]`.
+
+## [v0.10.0]
+
+### Changed
+- [[#208]](https://github.com/rust-vmm/vm-memory/issues/208) Updated
+ vmm-sys-util dependency to v0.11.0
+- [[#203]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust
+ edition 2021.
+
+## [v0.9.0]
+
+### Fixed
+
+- [[#195]](https://github.com/rust-vmm/vm-memory/issues/195):
+ `mmap::check_file_offset` is doing the correct size validation for block and
+ char devices as well.
+
+### Changed
+
+- [[#198]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64
+ bit atomics on ppc64le and s390x.
+- [[#200]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all
+ features in `docs.rs`.
+- [[#199]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way
+ the dependencies are pulled such that we don't end up with incompatible
+ versions.
+
+## [v0.8.0]
+
+### Fixed
+
+- [[#190]](https://github.com/rust-vmm/vm-memory/pull/190):
+ `VolatileSlice::read/write` when input slice is empty.
+
+## [v0.7.0]
+
+### Changed
+
+- [[#176]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait
+ bounds of `Bytes` auto impl for `T: GuestMemory`
+- [[#178]](https://github.com/rust-vmm/vm-memory/pull/178):
+ `MmapRegion::build_raw` no longer requires that the length of the region is a
+ multiple of the page size.
+
+## [v0.6.0]
+
+### Added
+
+ - [[#160]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap
+ backend implementations.
+ - [[#149]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion.
+ - [[#140]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions.
+
+### Deprecated
+
+ - [[#133]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`,
+ `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`.
+
+## [v0.5.0]
+
+### Added
+
+- [[#8]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator
+- [[#120]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion
+- [[#126]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at()
+- [[#128]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice()
+
+## [v0.4.0]
+
+### Fixed
+
+- [[#100]](https://github.com/rust-vmm/vm-memory/issues/100): Performance
+ degradation after fixing [#95](https://github.com/rust-vmm/vm-memory/pull/95).
+- [[#122]](https://github.com/rust-vmm/vm-memory/pull/122): atomic,
+ Cargo.toml: Update for arc-swap 1.0.0.
+
+## [v0.3.0]
+
+### Added
+
+- [[#109]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to
+ `MmapRegion` which can be used to operate on externally created mappings.
+- [[#101]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for
+ GuestMemory which could be used to validate a range of guest memory.
+- [[#115]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic
+ access to `Bytes`.
+
+### Fixed
+
+- [[#93]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using
+ virtio with rust-vmm/vm-memory.
+- [[#106]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger
+ on zero-length access.
+
+### Removed
+
+- `integer-atomics` is no longer a distinct feature of the crate.
+
+## [v0.2.0]
+
+### Added
+
+- [[#76]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and
+ `as_volatile_slice` to `GuestMemoryRegion`.
+- [[#82]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound
+ for `GuestAddressSpace::T`, the return value of `GuestAddressSpace::memory()`.
+- [[#88]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for
+ `ByteValued` which can be used for reading into POD structures from
+ raw bytes.
+
+## [v0.1.0]
+
+### Added
+
+- Added traits for working with VM memory.
+- Added a mmap based implemention for the Guest Memory.
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 0000000..fc1dba9
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+* @alexandruag @bonzini @jiangliu @tkreuzer @roypat
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..5081971
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,79 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "vm-memory"
+version = "0.12.2"
+authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
+autobenches = false
+description = "Safe abstractions for accessing the VM physical memory"
+readme = "README.md"
+keywords = ["memory"]
+categories = ["memory-management"]
+license = "Apache-2.0 OR BSD-3-Clause"
+repository = "https://github.com/rust-vmm/vm-memory"
+
+[package.metadata.docs.rs]
+all-features = true
+
+[profile.bench]
+lto = true
+codegen-units = 1
+
+[[bench]]
+name = "main"
+harness = false
+
+[dependencies.arc-swap]
+version = "1.0.0"
+optional = true
+
+[dependencies.bitflags]
+version = "1.0"
+optional = true
+
+[dependencies.libc]
+version = "0.2.39"
+
+[dependencies.thiserror]
+version = "1.0.40"
+
+[dependencies.vmm-sys-util]
+version = "0.11.0"
+optional = true
+
+[dev-dependencies.criterion]
+version = "0.3.0"
+
+[dev-dependencies.matches]
+version = "0.1.0"
+
+[dev-dependencies.vmm-sys-util]
+version = "0.11.0"
+
+[features]
+backend-atomic = ["arc-swap"]
+backend-bitmap = []
+backend-mmap = []
+default = []
+xen = [
+ "backend-mmap",
+ "bitflags",
+ "vmm-sys-util",
+]
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3"
+features = [
+ "errhandlingapi",
+ "sysinfoapi",
+]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..cd63941
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,46 @@
+[package]
+name = "vm-memory"
+version = "0.12.2"
+description = "Safe abstractions for accessing the VM physical memory"
+keywords = ["memory"]
+categories = ["memory-management"]
+authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
+repository = "https://github.com/rust-vmm/vm-memory"
+readme = "README.md"
+license = "Apache-2.0 OR BSD-3-Clause"
+edition = "2021"
+autobenches = false
+
+[features]
+default = []
+backend-bitmap = []
+backend-mmap = []
+backend-atomic = ["arc-swap"]
+xen = ["backend-mmap", "bitflags", "vmm-sys-util"]
+
+[dependencies]
+libc = "0.2.39"
+arc-swap = { version = "1.0.0", optional = true }
+bitflags = { version = "1.0", optional = true }
+thiserror = "1.0.40"
+vmm-sys-util = { version = "0.11.0", optional = true }
+
+[target.'cfg(windows)'.dependencies.winapi]
+version = "0.3"
+features = ["errhandlingapi", "sysinfoapi"]
+
+[dev-dependencies]
+criterion = "0.3.0"
+matches = "0.1.0"
+vmm-sys-util = "0.11.0"
+
+[[bench]]
+name = "main"
+harness = false
+
+[profile.bench]
+lto = true
+codegen-units = 1
+
+[package.metadata.docs.rs]
+all-features = true
diff --git a/DESIGN.md b/DESIGN.md
new file mode 100644
index 0000000..1e420e8
--- /dev/null
+++ b/DESIGN.md
@@ -0,0 +1,159 @@
+# Design
+
+## Objectives
+
+- Provide a set of traits for accessing and configuring the physical memory of
+ a virtual machine.
+- Provide a clean abstraction of the VM memory such that rust-vmm components
+ can use it without depending on the implementation details specific to
+ different VMMs.
+
+## API Principles
+
+- Define consumer side interfaces to access VM's physical memory.
+- Do not define provider side interfaces to supply VM physical memory.
+
+The `vm-memory` crate focuses on defining consumer side interfaces to access
+the physical memory of the VM. It does not define how the underlying VM memory
+provider is implemented. Lightweight VMMs like
+[CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) and
+[Firecracker](https://github.com/firecracker-microvm/firecracker) can make
+assumptions about the structure of VM's physical memory and implement a
+lightweight backend to access it. For VMMs like [Qemu](https://www.qemu.org/),
+a high performance and full functionality backend may be implemented with less
+assumptions.
+
+## Architecture
+
+The `vm-memory` is derived from two upstream projects:
+
+- [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/)
+ commit 186eb8b0db644892e8ffba8344efe3492bb2b823
+- [Firecracker](https://github.com/firecracker-microvm/firecracker) commit
+ 80128ea61b305a27df1f751d70415b04b503eae7
+
+The high level abstraction of the VM memory has been heavily refactored to
+provide a VMM agnostic interface.
+
+The `vm-memory` crate could be divided into four logic parts as:
+
+- [Abstraction of Address Space](#abstraction-of-address-space)
+- [Specialization for Virtual Machine Physical Address Space](#specialization-for-virtual-machine-physical-address-space)
+- [Backend Implementation Based on `mmap`](#backend-implementation-based-on-`mmap`)
+- [Utilities and helpers](#utilities-and-helpers)
+
+### Address Space Abstraction
+
+The address space abstraction contains traits and implementations for working
+with addresses as follows:
+
+- `AddressValue`: stores the raw value of an address. Typically `u32`, `u64` or
+ `usize` are used to store the raw value. Pointers such as `*u8`, can not be
+ used as an implementation of `AddressValue` because the `Add` and `Sub`
+ traits are not implemented for that type.
+- `Address`: implementation of `AddressValue`.
+- `Bytes`: trait for volatile access to memory. The `Bytes` trait can be
+ parameterized with types that represent addresses, in order to enforce that
+ addresses are used with the right "kind" of volatile memory.
+- `VolatileMemory`: basic implementation of volatile access to memory.
+ Implements `Bytes<usize>`.
+
+To make the abstraction as generic as possible, all of above traits only define
+methods to access the address space, and they never define methods to manage
+(create, delete, insert, remove etc) address spaces. This way, the address
+space consumers may be decoupled from the address space provider
+(typically a VMM).
+
+### Specialization for Virtual Machine Physical Address Space
+
+The generic address space crates are specialized to access the physical memory
+of the VM using the following traits:
+
+- `GuestAddress`: represents a guest physical address (GPA). On ARM64, a
+ 32-bit VMM/hypervisor can be used to support a 64-bit VM. For simplicity,
+ `u64` is used to store the the raw value no matter if it is a 32-bit or
+ a 64-bit virtual machine.
+- `GuestMemoryRegion`: represents a continuous region of the VM memory.
+- `GuestMemory`: represents a collection of `GuestMemoryRegion` objects. The
+ main responsibilities of the `GuestMemory` trait are:
+ - hide the detail of accessing physical addresses (for example complex
+ hierarchical structures).
+ - map an address request to a `GuestMemoryRegion` object and relay the
+ request to it.
+ - handle cases where an access request is spanning two or more
+ `GuestMemoryRegion` objects.
+
+The VM memory consumers should only rely on traits and structs defined here to
+access VM's physical memory and not on the implementation of the traits.
+
+### Backend Implementation Based on `mmap`
+
+Provides an implementation of the `GuestMemory` trait by mmapping the VM's physical
+memory into the current process.
+
+- `MmapRegion`: implementation of mmap a continuous range of physical memory
+ with methods for accessing the mapped memory.
+- `GuestRegionMmap`: implementation of `GuestMemoryRegion` providing a wrapper
+ used to map VM's physical address into a `(mmap_region, offset)` tuple.
+- `GuestMemoryMmap`: implementation of `GuestMemory` that manages a collection
+ of `GuestRegionMmap` objects for a VM.
+
+One of the main responsibilities of `GuestMemoryMmap` is to handle the use
+cases where an access request crosses the memory region boundary. This scenario
+may be triggered when memory hotplug is supported. There is a trade-off between
+simplicity and code complexity:
+
+- The following pattern currently used in both CrosVM and Firecracker is
+ simple, but fails when the request crosses region boundary.
+
+```rust
+let guest_memory_mmap: GuestMemoryMmap = ...
+let addr: GuestAddress = ...
+let buf = &mut [0u8; 5];
+let result = guest_memory_mmap.find_region(addr).unwrap().write(buf, addr);
+```
+
+- To support requests crossing region boundary, the following update is needed:
+
+```rust
+let guest_memory_mmap: GuestMemoryMmap = ...
+let addr: GuestAddress = ...
+let buf = &mut [0u8; 5];
+let result = guest_memory_mmap.write(buf, addr);
+```
+
+### Utilities and Helpers
+
+The following utilities and helper traits/macros are imported from the
+[crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/)
+with minor changes:
+
+- `ByteValued` (originally `DataInit`): types which are safe to be initialized
+ from raw data. A type `T` is `ByteValued` if and only if it can be
+ initialized by reading its contents from a byte array. This is generally true
+ for all plain-old-data structs. It is notably not true for any type that
+ includes a reference.
+- `{Le,Be}_{16,32,64}`: explicit endian types useful for embedding in structs
+ or reinterpreting data.
+
+## Relationships between Traits, Structs and Types
+
+**Traits**:
+
+- `Address` inherits `AddressValue`
+- `GuestMemoryRegion` inherits `Bytes<MemoryRegionAddress, E = Error>`. The
+ `Bytes` trait must be implemented.
+- `GuestMemory` has a generic implementation of `Bytes<GuestAddress>`.
+
+**Types**:
+
+- `GuestAddress`: `Address<u64>`
+- `MemoryRegionAddress`: `Address<u64>`
+
+**Structs**:
+
+- `MmapRegion` implements `VolatileMemory`
+- `GuestRegionMmap` implements `Bytes<MemoryRegionAddress> + GuestMemoryRegion`
+- `GuestMemoryMmap` implements `GuestMemory`
+- `VolatileSlice` implements
+ `Bytes<usize, E = volatile_memory::Error> + VolatileMemory`
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..c57a296
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,232 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+---
+
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/LICENSE-BSD-3-Clause b/LICENSE-BSD-3-Clause
new file mode 100644
index 0000000..8bafca3
--- /dev/null
+++ b/LICENSE-BSD-3-Clause
@@ -0,0 +1,27 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..19eb745
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "vm-memory"
+description: "Safe abstractions for accessing the VM physical memory"
+third_party {
+ identifier {
+ type: "crates.io"
+ value: "https://crates.io/crates/vm-memory"
+ }
+ identifier {
+ type: "Archive"
+ value: "https://static.crates.io/crates/vm-memory/vm-memory-0.12.2.crate"
+ }
+ version: "0.12.2"
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2023
+ month: 9
+ day: 6
+ }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/MODULE_LICENSE_BSD_LIKE b/MODULE_LICENSE_BSD_LIKE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_BSD_LIKE
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..45dc4dd
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:master:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..07e55ee
--- /dev/null
+++ b/README.md
@@ -0,0 +1,94 @@
+# vm-memory
+
+[![crates.io](https://img.shields.io/crates/v/vm-memory)](https://crates.io/crates/vm-memory)
+[![docs.rs](https://img.shields.io/docsrs/vm-memory)](https://docs.rs/vm-memory/)
+
+## Design
+
+In a typical Virtual Machine Monitor (VMM) there are several components, such
+as boot loader, virtual device drivers, virtio backend drivers and vhost
+drivers, that need to access the VM physical memory. The `vm-memory` crate
+provides a set of traits to decouple VM memory consumers from VM memory
+providers. Based on these traits, VM memory consumers can access the physical
+memory of the VM without knowing the implementation details of the VM memory
+provider. Thus VMM components based on these traits can be shared and reused by
+multiple virtualization solutions.
+
+The detailed design of the `vm-memory` crate can be found [here](DESIGN.md).
+
+### Platform Support
+
+- Arch: x86, AMD64, ARM64
+- OS: Linux/Unix/Windows
+
+### Xen support
+
+Supporting Xen requires special handling while mapping the guest memory and
+hence a separate feature is provided in the crate: `xen`. Mapping the guest
+memory for Xen requires an `ioctl()` to be issued along with `mmap()` for the
+memory area. The arguments for the `ioctl()` are received via the `vhost-user`
+protocol's memory region area.
+
+Xen allows two different mapping models: `Foreign` and `Grant`.
+
+In `Foreign` mapping model, the entire guest address space is mapped at once, in
+advance. In `Grant` mapping model, the memory for few regions, like those
+representing the virtqueues, is mapped in advance. The rest of the memory
+regions are mapped (partially) only while accessing the buffers and the same is
+immediately deallocated after the buffer is accessed. Hence, special handling
+for the same in `VolatileMemory.rs`.
+
+In order to still support standard Unix memory regions, for special regions and
+testing, the Xen specific implementation here allows a third mapping type:
+`MmapXenFlags::UNIX`. This performs standard Unix memory mapping and the same is
+used for all tests in this crate.
+
+It was decided by the `rust-vmm` maintainers to keep the interface simple and
+build the crate for either standard Unix memory mapping or Xen, and not both.
+
+Xen is only supported for Unix platforms.
+
+## Usage
+
+Add `vm-memory` as a dependency in `Cargo.toml`
+
+```toml
+[dependencies]
+vm-memory = "*"
+```
+
+Then add `extern crate vm-memory;` to your crate root.
+
+## Examples
+
+- Creating a VM physical memory object in hypervisor specific ways using the
+ `GuestMemoryMmap` implementation of the `GuestMemory` trait:
+
+```rust
+fn provide_mem_to_virt_dev() {
+ let gm = GuestMemoryMmap::from_ranges(&[
+ (GuestAddress(0), 0x1000),
+ (GuestAddress(0x1000), 0x1000)
+ ]).unwrap();
+ virt_device_io(&gm);
+}
+```
+
+- Consumers accessing the VM's physical memory:
+
+```rust
+fn virt_device_io<T: GuestMemory>(mem: &T) {
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert_eq!(mem.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
+ let buf = &mut [0u8; 5];
+ assert_eq!(mem.read(buf, GuestAddress(0xffc)).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+}
+```
+
+## License
+
+This project is licensed under either of
+
+- [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0
+- [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause)
diff --git a/TODO.md b/TODO.md
new file mode 100644
index 0000000..e52bb07
--- /dev/null
+++ b/TODO.md
@@ -0,0 +1,3 @@
+### TODO List
+- Abstraction layer to seperate VM memory management from VM memory accessor.
+- Help needed to refine documentation and usage examples.
diff --git a/benches/guest_memory.rs b/benches/guest_memory.rs
new file mode 100644
index 0000000..f2372e3
--- /dev/null
+++ b/benches/guest_memory.rs
@@ -0,0 +1,35 @@
+// Copyright (C) 2020 Alibaba Cloud Computing. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+#![cfg(feature = "backend-mmap")]
+
+pub use criterion::{black_box, Criterion};
+
+use vm_memory::bitmap::Bitmap;
+use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
+
+const REGION_SIZE: usize = 0x10_0000;
+const REGIONS_COUNT: u64 = 256;
+
+pub fn benchmark_for_guest_memory(c: &mut Criterion) {
+ benchmark_find_region(c);
+}
+
+fn find_region<B>(mem: &GuestMemoryMmap<B>)
+where
+ B: Bitmap + 'static,
+{
+ for i in 0..REGIONS_COUNT {
+ let _ = mem
+ .find_region(black_box(GuestAddress(i * REGION_SIZE as u64)))
+ .unwrap();
+ }
+}
+
+fn benchmark_find_region(c: &mut Criterion) {
+ let memory = super::create_guest_memory_mmap(REGION_SIZE, REGIONS_COUNT);
+
+ c.bench_function("find_region", |b| {
+ b.iter(|| find_region(black_box(&memory)))
+ });
+}
diff --git a/benches/main.rs b/benches/main.rs
new file mode 100644
index 0000000..98dc0a5
--- /dev/null
+++ b/benches/main.rs
@@ -0,0 +1,47 @@
+// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+extern crate criterion;
+
+pub use criterion::{black_box, criterion_group, criterion_main, Criterion};
+#[cfg(feature = "backend-mmap")]
+use vm_memory::{GuestAddress, GuestMemoryMmap};
+
+mod guest_memory;
+mod mmap;
+mod volatile;
+
+use volatile::benchmark_for_volatile;
+
+#[cfg(feature = "backend-mmap")]
+// Use this function with caution. It does not check against overflows
+// and `GuestMemoryMmap::from_ranges` errors.
+fn create_guest_memory_mmap(size: usize, count: u64) -> GuestMemoryMmap<()> {
+ let mut regions: Vec<(GuestAddress, usize)> = Vec::new();
+ for i in 0..count {
+ regions.push((GuestAddress(i * size as u64), size));
+ }
+
+ GuestMemoryMmap::from_ranges(regions.as_slice()).unwrap()
+}
+
+pub fn criterion_benchmark(_c: &mut Criterion) {
+ #[cfg(feature = "backend-mmap")]
+ mmap::benchmark_for_mmap(_c);
+}
+
+pub fn benchmark_guest_memory(_c: &mut Criterion) {
+ #[cfg(feature = "backend-mmap")]
+ guest_memory::benchmark_for_guest_memory(_c)
+}
+
+criterion_group! {
+ name = benches;
+ config = Criterion::default().sample_size(200).measurement_time(std::time::Duration::from_secs(50));
+ targets = criterion_benchmark, benchmark_guest_memory, benchmark_for_volatile
+}
+
+criterion_main! {
+ benches,
+}
diff --git a/benches/mmap/mod.rs b/benches/mmap/mod.rs
new file mode 100644
index 0000000..ed15e18
--- /dev/null
+++ b/benches/mmap/mod.rs
@@ -0,0 +1,212 @@
+// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+#![cfg(feature = "backend-mmap")]
+#![allow(clippy::undocumented_unsafe_blocks)]
+
+extern crate criterion;
+extern crate vm_memory;
+
+use std::fs::{File, OpenOptions};
+use std::io::Cursor;
+use std::mem::size_of;
+use std::path::Path;
+
+use criterion::{black_box, Criterion};
+
+use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory};
+
+const REGION_SIZE: usize = 0x8000_0000;
+const REGIONS_COUNT: u64 = 8;
+const ACCESS_SIZE: usize = 0x200;
+
+#[repr(C)]
+#[derive(Copy, Clone, Default)]
+struct SmallDummy {
+ a: u32,
+ b: u32,
+}
+unsafe impl ByteValued for SmallDummy {}
+
+#[repr(C)]
+#[derive(Copy, Clone, Default)]
+struct BigDummy {
+ elements: [u64; 12],
+}
+
+unsafe impl ByteValued for BigDummy {}
+
+fn make_image(size: usize) -> Vec<u8> {
+ let mut image: Vec<u8> = Vec::with_capacity(size);
+ for i in 0..size {
+ // We just want some different numbers here, so the conversion is OK.
+ image.push(i as u8);
+ }
+ image
+}
+
+enum AccessKind {
+ // The parameter represents the index of the region where the access should happen.
+ // Indices are 0-based.
+ InRegion(u64),
+ // The parameter represents the index of the first region (i.e. where the access starts).
+ CrossRegion(u64),
+}
+
+impl AccessKind {
+ fn make_offset(&self, access_size: usize) -> u64 {
+ match *self {
+ AccessKind::InRegion(idx) => REGION_SIZE as u64 * idx,
+ AccessKind::CrossRegion(idx) => {
+ REGION_SIZE as u64 * (idx + 1) - (access_size as u64 / 2)
+ }
+ }
+ }
+}
+
+pub fn benchmark_for_mmap(c: &mut Criterion) {
+ let memory = super::create_guest_memory_mmap(REGION_SIZE, REGIONS_COUNT);
+
+ // Just a sanity check.
+ assert_eq!(
+ memory.last_addr(),
+ GuestAddress(REGION_SIZE as u64 * REGIONS_COUNT - 0x01)
+ );
+
+ let some_small_dummy = SmallDummy {
+ a: 0x1111_2222,
+ b: 0x3333_4444,
+ };
+
+ let some_big_dummy = BigDummy {
+ elements: [0x1111_2222_3333_4444; 12],
+ };
+
+ let mut image = make_image(ACCESS_SIZE);
+ let buf = &mut [0u8; ACCESS_SIZE];
+ let mut file = File::open(Path::new("/dev/zero")).expect("Could not open /dev/zero");
+ let mut file_to_write = OpenOptions::new()
+ .write(true)
+ .open("/dev/null")
+ .expect("Could not open /dev/null");
+
+ let accesses = &[
+ AccessKind::InRegion(0),
+ AccessKind::CrossRegion(0),
+ AccessKind::CrossRegion(REGIONS_COUNT - 2),
+ AccessKind::InRegion(REGIONS_COUNT - 1),
+ ];
+
+ for access in accesses {
+ let offset = access.make_offset(ACCESS_SIZE);
+ let address = GuestAddress(offset);
+
+ // Check performance for read operations.
+ c.bench_function(format!("read_from_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .read_from(address, &mut Cursor::new(&image), ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(format!("read_from_file_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .read_from(address, &mut file, ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(format!("read_exact_from_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .read_exact_from(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(
+ format!("read_entire_slice_from_{:#0X}", offset).as_str(),
+ |b| b.iter(|| black_box(&memory).read_slice(buf, address).unwrap()),
+ );
+
+ c.bench_function(format!("read_slice_from_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| black_box(&memory).read(buf, address).unwrap())
+ });
+
+ let obj_off = access.make_offset(size_of::<SmallDummy>());
+ let obj_addr = GuestAddress(obj_off);
+
+ c.bench_function(
+ format!("read_small_obj_from_{:#0X}", obj_off).as_str(),
+ |b| b.iter(|| black_box(&memory).read_obj::<SmallDummy>(obj_addr).unwrap()),
+ );
+
+ let obj_off = access.make_offset(size_of::<BigDummy>());
+ let obj_addr = GuestAddress(obj_off);
+
+ c.bench_function(format!("read_big_obj_from_{:#0X}", obj_off).as_str(), |b| {
+ b.iter(|| black_box(&memory).read_obj::<BigDummy>(obj_addr).unwrap())
+ });
+
+ // Check performance for write operations.
+ c.bench_function(format!("write_to_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .write_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(format!("write_to_file_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .write_to(address, &mut file_to_write, ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(format!("write_exact_to_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .write_all_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .unwrap()
+ })
+ });
+
+ c.bench_function(
+ format!("write_entire_slice_to_{:#0X}", offset).as_str(),
+ |b| b.iter(|| black_box(&memory).write_slice(buf, address).unwrap()),
+ );
+
+ c.bench_function(format!("write_slice_to_{:#0X}", offset).as_str(), |b| {
+ b.iter(|| black_box(&memory).write(buf, address).unwrap())
+ });
+
+ let obj_off = access.make_offset(size_of::<SmallDummy>());
+ let obj_addr = GuestAddress(obj_off);
+
+ c.bench_function(
+ format!("write_small_obj_to_{:#0X}", obj_off).as_str(),
+ |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .write_obj::<SmallDummy>(some_small_dummy, obj_addr)
+ .unwrap()
+ })
+ },
+ );
+
+ let obj_off = access.make_offset(size_of::<BigDummy>());
+ let obj_addr = GuestAddress(obj_off);
+
+ c.bench_function(format!("write_big_obj_to_{:#0X}", obj_off).as_str(), |b| {
+ b.iter(|| {
+ black_box(&memory)
+ .write_obj::<BigDummy>(some_big_dummy, obj_addr)
+ .unwrap()
+ })
+ });
+ }
+}
diff --git a/benches/volatile.rs b/benches/volatile.rs
new file mode 100644
index 0000000..341e28f
--- /dev/null
+++ b/benches/volatile.rs
@@ -0,0 +1,48 @@
+// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+pub use criterion::{black_box, Criterion};
+use vm_memory::volatile_memory::VolatileMemory;
+use vm_memory::VolatileSlice;
+
+pub fn benchmark_for_volatile(c: &mut Criterion) {
+ let mut a = [0xa5u8; 1024];
+ let vslice = VolatileSlice::from(&mut a[..]);
+ let v_ref8 = vslice.get_slice(0, vslice.len()).unwrap();
+ let mut d8 = [0u8; 1024];
+
+ // Check performance for read operations.
+ c.bench_function("VolatileSlice::copy_to_u8", |b| {
+ b.iter(|| v_ref8.copy_to(black_box(&mut d8[..])))
+ });
+
+ let v_ref16 = vslice.get_slice(0, vslice.len() / 2).unwrap();
+ let mut d16 = [0u16; 512];
+
+ c.bench_function("VolatileSlice::copy_to_u16", |b| {
+ b.iter(|| v_ref16.copy_to(black_box(&mut d16[..])))
+ });
+ benchmark_volatile_copy_to_volatile_slice(c);
+
+ // Check performance for write operations.
+ c.bench_function("VolatileSlice::copy_from_u8", |b| {
+ b.iter(|| v_ref8.copy_from(black_box(&d8[..])))
+ });
+ c.bench_function("VolatileSlice::copy_from_u16", |b| {
+ b.iter(|| v_ref16.copy_from(black_box(&d16[..])))
+ });
+}
+
+fn benchmark_volatile_copy_to_volatile_slice(c: &mut Criterion) {
+ let mut a = [0xa5u8; 10240];
+ let vslice = VolatileSlice::from(&mut a[..]);
+ let a_slice = vslice.get_slice(0, vslice.len()).unwrap();
+ let mut d = [0u8; 10240];
+ let vslice2 = VolatileSlice::from(&mut d[..]);
+ let d_slice = vslice2.get_slice(0, vslice2.len()).unwrap();
+
+ c.bench_function("VolatileSlice::copy_to_volatile_slice", |b| {
+ b.iter(|| black_box(a_slice).copy_to_volatile_slice(d_slice))
+ });
+}
diff --git a/cargo2android.json b/cargo2android.json
new file mode 100644
index 0000000..8889327
--- /dev/null
+++ b/cargo2android.json
@@ -0,0 +1,5 @@
+{
+ "features": "default,backend-bitmap,backend-mmap,backend-atomic,arc-swap",
+ "name-suffix": "_android",
+ "run": true
+} \ No newline at end of file
diff --git a/coverage_config_aarch64.json b/coverage_config_aarch64.json
new file mode 100644
index 0000000..3a28db2
--- /dev/null
+++ b/coverage_config_aarch64.json
@@ -0,0 +1,5 @@
+{
+ "coverage_score": 85.2,
+ "exclude_path": "mmap_windows.rs",
+ "crate_features": "backend-mmap,backend-atomic,backend-bitmap"
+}
diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json
new file mode 100644
index 0000000..a6a09c4
--- /dev/null
+++ b/coverage_config_x86_64.json
@@ -0,0 +1,5 @@
+{
+ "coverage_score": 92.2,
+ "exclude_path": "mmap_windows.rs",
+ "crate_features": "backend-mmap,backend-atomic,backend-bitmap"
+}
diff --git a/src/address.rs b/src/address.rs
new file mode 100644
index 0000000..350a186
--- /dev/null
+++ b/src/address.rs
@@ -0,0 +1,406 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+//
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Traits to represent an address within an address space.
+//!
+//! Two traits are defined to represent an address within an address space:
+//! - [`AddressValue`](trait.AddressValue.html): stores the raw value of an address. Typically
+//! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used
+//! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and
+//! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits.
+//! - [Address](trait.Address.html): encapsulates an [`AddressValue`](trait.AddressValue.html)
+//! object and defines methods to access and manipulate it.
+
+use std::cmp::{Eq, Ord, PartialEq, PartialOrd};
+use std::fmt::Debug;
+use std::ops::{Add, BitAnd, BitOr, Not, Sub};
+
+/// Simple helper trait used to store a raw address value.
+pub trait AddressValue {
+ /// Type of the raw address value.
+ type V: Copy
+ + PartialEq
+ + Eq
+ + PartialOrd
+ + Ord
+ + Not<Output = Self::V>
+ + Add<Output = Self::V>
+ + Sub<Output = Self::V>
+ + BitAnd<Output = Self::V>
+ + BitOr<Output = Self::V>
+ + Debug
+ + From<u8>;
+
+ /// Return the value zero, coerced into the value type `Self::V`
+ fn zero() -> Self::V {
+ 0u8.into()
+ }
+
+ /// Return the value zero, coerced into the value type `Self::V`
+ fn one() -> Self::V {
+ 1u8.into()
+ }
+}
+
+/// Trait to represent an address within an address space.
+///
+/// To simplify the design and implementation, assume the same raw data type `(AddressValue::V)`
+/// could be used to store address, size and offset for the address space. Thus the `Address` trait
+/// could be used to manage address, size and offset. On the other hand, type aliases may be
+/// defined to improve code readability.
+///
+/// One design rule is applied to the `Address` trait, namely that operators (+, -, &, | etc) are
+/// not supported and it forces clients to explicitly invoke corresponding methods. But there are
+/// always exceptions:
+/// `Address` (BitAnd|BitOr) `AddressValue` are supported.
+pub trait Address:
+ AddressValue
+ + Sized
+ + Default
+ + Copy
+ + Eq
+ + PartialEq
+ + Ord
+ + PartialOrd
+ + BitAnd<<Self as AddressValue>::V, Output = Self>
+ + BitOr<<Self as AddressValue>::V, Output = Self>
+{
+ /// Creates an address from a raw address value.
+ fn new(addr: Self::V) -> Self;
+
+ /// Returns the raw value of the address.
+ fn raw_value(&self) -> Self::V;
+
+ /// Returns the bitwise and of the address with the given mask.
+ fn mask(&self, mask: Self::V) -> Self::V {
+ self.raw_value() & mask
+ }
+
+ /// Computes the offset from this address to the given base address.
+ ///
+ /// Returns `None` if there is underflow.
+ fn checked_offset_from(&self, base: Self) -> Option<Self::V>;
+
+ /// Computes the offset from this address to the given base address.
+ ///
+ /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds,
+ /// silently wrap in release builds.
+ ///
+ /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined
+ /// behavior.
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::{Address, GuestAddress};
+ /// #
+ /// let base = GuestAddress(0x100);
+ /// let addr = GuestAddress(0x150);
+ /// assert_eq!(addr.unchecked_offset_from(base), 0x50);
+ /// ```
+ fn unchecked_offset_from(&self, base: Self) -> Self::V {
+ self.raw_value() - base.raw_value()
+ }
+
+ /// Returns self, aligned to the given power of two.
+ fn checked_align_up(&self, power_of_two: Self::V) -> Option<Self> {
+ let mask = power_of_two - Self::one();
+ assert_ne!(power_of_two, Self::zero());
+ assert_eq!(power_of_two & mask, Self::zero());
+ self.checked_add(mask).map(|x| x & !mask)
+ }
+
+ /// Returns self, aligned to the given power of two.
+ /// Only use this when the result is guaranteed not to overflow.
+ fn unchecked_align_up(&self, power_of_two: Self::V) -> Self {
+ let mask = power_of_two - Self::one();
+ self.unchecked_add(mask) & !mask
+ }
+
+ /// Computes `self + other`, returning `None` if overflow occurred.
+ fn checked_add(&self, other: Self::V) -> Option<Self>;
+
+ /// Computes `self + other`.
+ ///
+ /// Returns a tuple of the addition result along with a boolean indicating whether an arithmetic
+ /// overflow would occur. If an overflow would have occurred then the wrapped address
+ /// is returned.
+ fn overflowing_add(&self, other: Self::V) -> (Self, bool);
+
+ /// Computes `self + offset`.
+ ///
+ /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds,
+ /// silently wrap in release builds.
+ ///
+ /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined
+ /// behavior..
+ fn unchecked_add(&self, offset: Self::V) -> Self;
+
+ /// Subtracts two addresses, checking for underflow. If underflow happens, `None` is returned.
+ fn checked_sub(&self, other: Self::V) -> Option<Self>;
+
+ /// Computes `self - other`.
+ ///
+ /// Returns a tuple of the subtraction result along with a boolean indicating whether an
+ /// arithmetic overflow would occur. If an overflow would have occurred then the wrapped
+ /// address is returned.
+ fn overflowing_sub(&self, other: Self::V) -> (Self, bool);
+
+ /// Computes `self - other`.
+ ///
+ /// In the event of underflow, follows standard Rust behavior, i.e. panic in debug builds,
+ /// silently wrap in release builds.
+ ///
+ /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined
+ /// behavior.
+ fn unchecked_sub(&self, other: Self::V) -> Self;
+}
+
+macro_rules! impl_address_ops {
+ ($T:ident, $V:ty) => {
+ impl AddressValue for $T {
+ type V = $V;
+ }
+
+ impl Address for $T {
+ fn new(value: $V) -> $T {
+ $T(value)
+ }
+
+ fn raw_value(&self) -> $V {
+ self.0
+ }
+
+ fn checked_offset_from(&self, base: $T) -> Option<$V> {
+ self.0.checked_sub(base.0)
+ }
+
+ fn checked_add(&self, other: $V) -> Option<$T> {
+ self.0.checked_add(other).map($T)
+ }
+
+ fn overflowing_add(&self, other: $V) -> ($T, bool) {
+ let (t, ovf) = self.0.overflowing_add(other);
+ ($T(t), ovf)
+ }
+
+ fn unchecked_add(&self, offset: $V) -> $T {
+ $T(self.0 + offset)
+ }
+
+ fn checked_sub(&self, other: $V) -> Option<$T> {
+ self.0.checked_sub(other).map($T)
+ }
+
+ fn overflowing_sub(&self, other: $V) -> ($T, bool) {
+ let (t, ovf) = self.0.overflowing_sub(other);
+ ($T(t), ovf)
+ }
+
+ fn unchecked_sub(&self, other: $V) -> $T {
+ $T(self.0 - other)
+ }
+ }
+
+ impl Default for $T {
+ fn default() -> $T {
+ Self::new(0 as $V)
+ }
+ }
+
+ impl BitAnd<$V> for $T {
+ type Output = $T;
+
+ fn bitand(self, other: $V) -> $T {
+ $T(self.0 & other)
+ }
+ }
+
+ impl BitOr<$V> for $T {
+ type Output = $T;
+
+ fn bitor(self, other: $V) -> $T {
+ $T(self.0 | other)
+ }
+ }
+ };
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
+ struct MockAddress(pub u64);
+ impl_address_ops!(MockAddress, u64);
+
+ #[test]
+ fn test_new() {
+ assert_eq!(MockAddress::new(0), MockAddress(0));
+ assert_eq!(MockAddress::new(std::u64::MAX), MockAddress(std::u64::MAX));
+ }
+
+ #[test]
+ fn test_offset_from() {
+ let base = MockAddress(0x100);
+ let addr = MockAddress(0x150);
+ assert_eq!(addr.unchecked_offset_from(base), 0x50u64);
+ assert_eq!(addr.checked_offset_from(base), Some(0x50u64));
+ assert_eq!(base.checked_offset_from(addr), None);
+ }
+
+ #[test]
+ fn test_equals() {
+ let a = MockAddress(0x300);
+ let b = MockAddress(0x300);
+ let c = MockAddress(0x301);
+ assert_eq!(a, MockAddress(a.raw_value()));
+ assert_eq!(a, b);
+ assert_eq!(b, a);
+ assert_ne!(a, c);
+ assert_ne!(c, a);
+ }
+
+ #[test]
+ fn test_cmp() {
+ let a = MockAddress(0x300);
+ let b = MockAddress(0x301);
+ assert!(a < b);
+ }
+
+ #[test]
+ fn test_checked_align_up() {
+ assert_eq!(
+ MockAddress::new(0x128).checked_align_up(8),
+ Some(MockAddress(0x128))
+ );
+ assert_eq!(
+ MockAddress::new(0x128).checked_align_up(16),
+ Some(MockAddress(0x130))
+ );
+ assert_eq!(
+ MockAddress::new(std::u64::MAX - 0x3fff).checked_align_up(0x10000),
+ None
+ );
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_checked_align_up_invalid() {
+ let _ = MockAddress::new(0x128).checked_align_up(12);
+ }
+
+ #[test]
+ fn test_unchecked_align_up() {
+ assert_eq!(
+ MockAddress::new(0x128).unchecked_align_up(8),
+ MockAddress(0x128)
+ );
+ assert_eq!(
+ MockAddress::new(0x128).unchecked_align_up(16),
+ MockAddress(0x130)
+ );
+ }
+
+ #[test]
+ fn test_mask() {
+ let a = MockAddress(0x5050);
+ assert_eq!(MockAddress(0x5000), a & 0xff00u64);
+ assert_eq!(0x5000, a.mask(0xff00u64));
+ assert_eq!(MockAddress(0x5055), a | 0x0005u64);
+ }
+
+ fn check_add(a: u64, b: u64, expected_overflow: bool, expected_result: u64) {
+ assert_eq!(
+ (MockAddress(expected_result), expected_overflow),
+ MockAddress(a).overflowing_add(b)
+ );
+ if expected_overflow {
+ assert!(MockAddress(a).checked_add(b).is_none());
+ #[cfg(debug_assertions)]
+ assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_add(b)).is_err());
+ } else {
+ assert_eq!(
+ Some(MockAddress(expected_result)),
+ MockAddress(a).checked_add(b)
+ );
+ assert_eq!(
+ MockAddress(expected_result),
+ MockAddress(a).unchecked_add(b)
+ );
+ }
+ }
+
+ #[test]
+ fn test_add() {
+ // without overflow
+ // normal case
+ check_add(10, 10, false, 20);
+ // edge case
+ check_add(std::u64::MAX - 1, 1, false, std::u64::MAX);
+
+ // with overflow
+ check_add(std::u64::MAX, 1, true, 0);
+ }
+
+ fn check_sub(a: u64, b: u64, expected_overflow: bool, expected_result: u64) {
+ assert_eq!(
+ (MockAddress(expected_result), expected_overflow),
+ MockAddress(a).overflowing_sub(b)
+ );
+ if expected_overflow {
+ assert!(MockAddress(a).checked_sub(b).is_none());
+ assert!(MockAddress(a).checked_offset_from(MockAddress(b)).is_none());
+ #[cfg(debug_assertions)]
+ assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_sub(b)).is_err());
+ } else {
+ assert_eq!(
+ Some(MockAddress(expected_result)),
+ MockAddress(a).checked_sub(b)
+ );
+ assert_eq!(
+ Some(expected_result),
+ MockAddress(a).checked_offset_from(MockAddress(b))
+ );
+ assert_eq!(
+ MockAddress(expected_result),
+ MockAddress(a).unchecked_sub(b)
+ );
+ }
+ }
+
+ #[test]
+ fn test_sub() {
+ // without overflow
+ // normal case
+ check_sub(20, 10, false, 10);
+ // edge case
+ check_sub(1, 1, false, 0);
+
+ // with underflow
+ check_sub(0, 1, true, std::u64::MAX);
+ }
+
+ #[test]
+ fn test_default() {
+ assert_eq!(MockAddress::default(), MockAddress(0));
+ }
+
+ #[test]
+ fn test_bit_and() {
+ let a = MockAddress(0x0ff0);
+ assert_eq!(a & 0xf00f, MockAddress(0));
+ }
+
+ #[test]
+ fn test_bit_or() {
+ let a = MockAddress(0x0ff0);
+ assert_eq!(a | 0xf00f, MockAddress(0xffff));
+ }
+}
diff --git a/src/atomic.rs b/src/atomic.rs
new file mode 100644
index 0000000..ae10224
--- /dev/null
+++ b/src/atomic.rs
@@ -0,0 +1,260 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+// Copyright (C) 2020 Red Hat, Inc. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! A wrapper over an `ArcSwap<GuestMemory>` struct to support RCU-style mutability.
+//!
+//! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap`
+//! with `GuestMemoryAtomic<GuestMemoryMmap>` will enable support for mutable memory maps.
+//! To support mutable memory maps, devices will also need to use
+//! `GuestAddressSpace::memory()` to gain temporary access to guest memory.
+
+extern crate arc_swap;
+
+use arc_swap::{ArcSwap, Guard};
+use std::ops::Deref;
+use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
+
+use crate::{GuestAddressSpace, GuestMemory};
+
+/// A fast implementation of a mutable collection of memory regions.
+///
+/// This implementation uses `ArcSwap` to provide RCU-like snapshotting of the memory map:
+/// every update of the memory map creates a completely new `GuestMemory` object, and
+/// readers will not be blocked because the copies they retrieved will be collected once
+/// no one can access them anymore. Under the assumption that updates to the memory map
+/// are rare, this allows a very efficient implementation of the `memory()` method.
+#[derive(Clone, Debug)]
+pub struct GuestMemoryAtomic<M: GuestMemory> {
+ // GuestAddressSpace<M>, which we want to implement, is basically a drop-in
+ // replacement for &M. Therefore, we need to pass to devices the `GuestMemoryAtomic`
+ // rather than a reference to it. To obtain this effect we wrap the actual fields
+ // of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the
+ // documentation for GuestAddressSpace for an example.
+ inner: Arc<(ArcSwap<M>, Mutex<()>)>,
+}
+
+impl<M: GuestMemory> From<Arc<M>> for GuestMemoryAtomic<M> {
+ /// create a new `GuestMemoryAtomic` object whose initial contents come from
+ /// the `map` reference counted `GuestMemory`.
+ fn from(map: Arc<M>) -> Self {
+ let inner = (ArcSwap::new(map), Mutex::new(()));
+ GuestMemoryAtomic {
+ inner: Arc::new(inner),
+ }
+ }
+}
+
+impl<M: GuestMemory> GuestMemoryAtomic<M> {
+ /// create a new `GuestMemoryAtomic` object whose initial contents come from
+ /// the `map` `GuestMemory`.
+ pub fn new(map: M) -> Self {
+ Arc::new(map).into()
+ }
+
+ fn load(&self) -> Guard<Arc<M>> {
+ self.inner.0.load()
+ }
+
+ /// Acquires the update mutex for the `GuestMemoryAtomic`, blocking the current
+ /// thread until it is able to do so. The returned RAII guard allows for
+ /// scoped unlock of the mutex (that is, the mutex will be unlocked when
+ /// the guard goes out of scope), and optionally also for replacing the
+ /// contents of the `GuestMemoryAtomic` when the lock is dropped.
+ pub fn lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>> {
+ match self.inner.1.lock() {
+ Ok(guard) => Ok(GuestMemoryExclusiveGuard {
+ parent: self,
+ _guard: guard,
+ }),
+ Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard {
+ parent: self,
+ _guard: err.into_inner(),
+ })),
+ }
+ }
+}
+
+impl<M: GuestMemory> GuestAddressSpace for GuestMemoryAtomic<M> {
+ type T = GuestMemoryLoadGuard<M>;
+ type M = M;
+
+ fn memory(&self) -> Self::T {
+ GuestMemoryLoadGuard { guard: self.load() }
+ }
+}
+
+/// A guard that provides temporary access to a `GuestMemoryAtomic`. This
+/// object is returned from the `memory()` method. It dereference to
+/// a snapshot of the `GuestMemory`, so it can be used transparently to
+/// access memory.
+#[derive(Debug)]
+pub struct GuestMemoryLoadGuard<M: GuestMemory> {
+ guard: Guard<Arc<M>>,
+}
+
+impl<M: GuestMemory> GuestMemoryLoadGuard<M> {
+ /// Make a clone of the held pointer and returns it. This is more
+ /// expensive than just using the snapshot, but it allows to hold on
+ /// to the snapshot outside the scope of the guard. It also allows
+ /// writers to proceed, so it is recommended if the reference must
+ /// be held for a long time (including for caching purposes).
+ pub fn into_inner(self) -> Arc<M> {
+ Guard::into_inner(self.guard)
+ }
+}
+
+impl<M: GuestMemory> Clone for GuestMemoryLoadGuard<M> {
+ fn clone(&self) -> Self {
+ GuestMemoryLoadGuard {
+ guard: Guard::from_inner(Arc::clone(&*self.guard)),
+ }
+ }
+}
+
+impl<M: GuestMemory> Deref for GuestMemoryLoadGuard<M> {
+ type Target = M;
+
+ fn deref(&self) -> &Self::Target {
+ &self.guard
+ }
+}
+
+/// An RAII implementation of a "scoped lock" for `GuestMemoryAtomic`. When
+/// this structure is dropped (falls out of scope) the lock will be unlocked,
+/// possibly after updating the memory map represented by the
+/// `GuestMemoryAtomic` that created the guard.
+pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> {
+ parent: &'a GuestMemoryAtomic<M>,
+ _guard: MutexGuard<'a, ()>,
+}
+
+impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> {
+ /// Replace the memory map in the `GuestMemoryAtomic` that created the guard
+ /// with the new memory map, `map`. The lock is then dropped since this
+ /// method consumes the guard.
+ pub fn replace(self, map: M) {
+ self.parent.inner.0.store(Arc::new(map))
+ }
+}
+
+#[cfg(test)]
+#[cfg(feature = "backend-mmap")]
+mod tests {
+ use super::*;
+ use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion};
+
+ type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
+ type GuestRegionMmap = crate::GuestRegionMmap<()>;
+ type GuestMemoryMmapAtomic = GuestMemoryAtomic<GuestMemoryMmap>;
+
+ #[test]
+ fn test_atomic_memory() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let mut iterated_regions = Vec::new();
+ let gmm = GuestMemoryMmap::from_ranges(&regions).unwrap();
+ let gm = GuestMemoryMmapAtomic::new(gmm);
+ let mem = gm.memory();
+
+ for region in mem.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+
+ for region in mem.iter() {
+ iterated_regions.push((region.start_addr(), region.len() as usize));
+ }
+ assert_eq!(regions, iterated_regions);
+ assert_eq!(mem.num_regions(), 2);
+ assert!(mem.find_region(GuestAddress(0x1000)).is_some());
+ assert!(mem.find_region(GuestAddress(0x10000)).is_none());
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ let mem2 = mem.into_inner();
+ for region in mem2.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+ assert_eq!(mem2.num_regions(), 2);
+ assert!(mem2.find_region(GuestAddress(0x1000)).is_some());
+ assert!(mem2.find_region(GuestAddress(0x10000)).is_none());
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ let mem3 = mem2.memory();
+ for region in mem3.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+ assert_eq!(mem3.num_regions(), 2);
+ assert!(mem3.find_region(GuestAddress(0x1000)).is_some());
+ assert!(mem3.find_region(GuestAddress(0x10000)).is_none());
+ }
+
+ #[test]
+ fn test_clone_guard() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let gmm = GuestMemoryMmap::from_ranges(&regions).unwrap();
+ let gm = GuestMemoryMmapAtomic::new(gmm);
+ let mem = {
+ let guard1 = gm.memory();
+ Clone::clone(&guard1)
+ };
+ assert_eq!(mem.num_regions(), 2);
+ }
+
+ #[test]
+ fn test_atomic_hotplug() {
+ let region_size = 0x1000;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x10_0000), region_size),
+ ];
+ let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(&regions).unwrap());
+ let gm: GuestMemoryAtomic<_> = gmm.clone().into();
+ let mem_orig = gm.memory();
+ assert_eq!(mem_orig.num_regions(), 2);
+
+ {
+ let guard = gm.lock().unwrap();
+ let new_gmm = Arc::make_mut(&mut gmm);
+ let mmap = Arc::new(
+ GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000))
+ .unwrap(),
+ );
+ let new_gmm = new_gmm.insert_region(mmap).unwrap();
+ let mmap = Arc::new(
+ GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000))
+ .unwrap(),
+ );
+ let new_gmm = new_gmm.insert_region(mmap).unwrap();
+ let mmap = Arc::new(
+ GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000))
+ .unwrap(),
+ );
+ let new_gmm = new_gmm.insert_region(mmap).unwrap();
+ let mmap = Arc::new(
+ GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000))
+ .unwrap(),
+ );
+ new_gmm.insert_region(mmap).unwrap_err();
+ guard.replace(new_gmm);
+ }
+
+ assert_eq!(mem_orig.num_regions(), 2);
+ let mem = gm.memory();
+ assert_eq!(mem.num_regions(), 5);
+ }
+}
diff --git a/src/atomic_integer.rs b/src/atomic_integer.rs
new file mode 100644
index 0000000..1b55c81
--- /dev/null
+++ b/src/atomic_integer.rs
@@ -0,0 +1,105 @@
+// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+use std::sync::atomic::Ordering;
+
+/// # Safety
+///
+/// Objects that implement this trait must consist exclusively of atomic types
+/// from [`std::sync::atomic`](https://doc.rust-lang.org/std/sync/atomic/), except for
+/// [`AtomicPtr<T>`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html) and
+/// [`AtomicBool`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html).
+pub unsafe trait AtomicInteger: Sync + Send {
+ /// The raw value type associated with the atomic integer (i.e. `u16` for `AtomicU16`).
+ type V;
+
+ /// Create a new instance of `Self`.
+ fn new(v: Self::V) -> Self;
+
+ /// Loads a value from the atomic integer.
+ fn load(&self, order: Ordering) -> Self::V;
+
+ /// Stores a value into the atomic integer.
+ fn store(&self, val: Self::V, order: Ordering);
+}
+
+macro_rules! impl_atomic_integer_ops {
+ ($T:path, $V:ty) => {
+ // SAFETY: This is safe as long as T is an Atomic type.
+ // This is a helper macro for generating the implementation for common
+ // Atomic types.
+ unsafe impl AtomicInteger for $T {
+ type V = $V;
+
+ fn new(v: Self::V) -> Self {
+ Self::new(v)
+ }
+
+ fn load(&self, order: Ordering) -> Self::V {
+ self.load(order)
+ }
+
+ fn store(&self, val: Self::V, order: Ordering) {
+ self.store(val, order)
+ }
+ }
+ };
+}
+
+// TODO: Detect availability using #[cfg(target_has_atomic) when it is stabilized.
+// Right now we essentially assume we're running on either x86 or Arm (32 or 64 bit). AFAIK,
+// Rust starts using additional synchronization primitives to implement atomics when they're
+// not natively available, and that doesn't interact safely with how we cast pointers to
+// atomic value references. We should be wary of this when looking at a broader range of
+// platforms.
+
+impl_atomic_integer_ops!(std::sync::atomic::AtomicI8, i8);
+impl_atomic_integer_ops!(std::sync::atomic::AtomicI16, i16);
+impl_atomic_integer_ops!(std::sync::atomic::AtomicI32, i32);
+#[cfg(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "s390x"
+))]
+impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64);
+
+impl_atomic_integer_ops!(std::sync::atomic::AtomicU8, u8);
+impl_atomic_integer_ops!(std::sync::atomic::AtomicU16, u16);
+impl_atomic_integer_ops!(std::sync::atomic::AtomicU32, u32);
+#[cfg(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "s390x"
+))]
+impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64);
+
+impl_atomic_integer_ops!(std::sync::atomic::AtomicIsize, isize);
+impl_atomic_integer_ops!(std::sync::atomic::AtomicUsize, usize);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::fmt::Debug;
+ use std::sync::atomic::AtomicU32;
+
+ fn check_atomic_integer_ops<A: AtomicInteger>()
+ where
+ A::V: Copy + Debug + From<u8> + PartialEq,
+ {
+ let v = A::V::from(0);
+ let a = A::new(v);
+ assert_eq!(a.load(Ordering::Relaxed), v);
+
+ let v2 = A::V::from(100);
+ a.store(v2, Ordering::Relaxed);
+ assert_eq!(a.load(Ordering::Relaxed), v2);
+ }
+
+ #[test]
+ fn test_atomic_integer_ops() {
+ check_atomic_integer_ops::<AtomicU32>()
+ }
+}
diff --git a/src/bitmap/backend/atomic_bitmap.rs b/src/bitmap/backend/atomic_bitmap.rs
new file mode 100644
index 0000000..b3340c3
--- /dev/null
+++ b/src/bitmap/backend/atomic_bitmap.rs
@@ -0,0 +1,231 @@
+// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Bitmap backend implementation based on atomic integers.
+
+use std::sync::atomic::{AtomicU64, Ordering};
+
+use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice};
+
+#[cfg(feature = "backend-mmap")]
+use crate::mmap::NewBitmap;
+
+/// `AtomicBitmap` implements a simple bit map on the page level with test and set operations.
+/// It is page-size aware, so it converts addresses to page numbers before setting or clearing
+/// the bits.
+#[derive(Debug)]
+pub struct AtomicBitmap {
+ map: Vec<AtomicU64>,
+ size: usize,
+ page_size: usize,
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl AtomicBitmap {
+ /// Create a new bitmap of `byte_size`, with one bit per page. This is effectively
+ /// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`.
+ pub fn new(byte_size: usize, page_size: usize) -> Self {
+ let mut num_pages = byte_size / page_size;
+ if byte_size % page_size > 0 {
+ num_pages += 1;
+ }
+
+ // Adding one entry element more just in case `num_pages` is not a multiple of `64`.
+ let map_size = num_pages / 64 + 1;
+ let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
+
+ AtomicBitmap {
+ map,
+ size: num_pages,
+ page_size,
+ }
+ }
+
+ /// Is bit `n` set? Bits outside the range of the bitmap are always unset.
+ pub fn is_bit_set(&self, index: usize) -> bool {
+ if index < self.size {
+ (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0
+ } else {
+ // Out-of-range bits are always unset.
+ false
+ }
+ }
+
+ /// Is the bit corresponding to address `addr` set?
+ pub fn is_addr_set(&self, addr: usize) -> bool {
+ self.is_bit_set(addr / self.page_size)
+ }
+
+ /// Set a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
+ /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
+ /// to address `start_addr + len - 1`.
+ pub fn set_addr_range(&self, start_addr: usize, len: usize) {
+ // Return early in the unlikely event that `len == 0` so the `len - 1` computation
+ // below does not underflow.
+ if len == 0 {
+ return;
+ }
+
+ let first_bit = start_addr / self.page_size;
+ // Handle input ranges where `start_addr + len - 1` would otherwise overflow an `usize`
+ // by ignoring pages at invalid addresses.
+ let last_bit = start_addr.saturating_add(len - 1) / self.page_size;
+ for n in first_bit..=last_bit {
+ if n >= self.size {
+ // Attempts to set bits beyond the end of the bitmap are simply ignored.
+ break;
+ }
+ self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
+ }
+ }
+
+ /// Get the length of the bitmap in bits (i.e. in how many pages it can represent).
+ pub fn len(&self) -> usize {
+ self.size
+ }
+
+ /// Atomically get and reset the dirty page bitmap.
+ pub fn get_and_reset(&self) -> Vec<u64> {
+ self.map
+ .iter()
+ .map(|u| u.fetch_and(0, Ordering::SeqCst))
+ .collect()
+ }
+
+ /// Reset all bitmap bits to 0.
+ pub fn reset(&self) {
+ for it in self.map.iter() {
+ it.store(0, Ordering::Release);
+ }
+ }
+}
+
+impl Clone for AtomicBitmap {
+ fn clone(&self) -> Self {
+ let map = self
+ .map
+ .iter()
+ .map(|i| i.load(Ordering::Acquire))
+ .map(AtomicU64::new)
+ .collect();
+ AtomicBitmap {
+ map,
+ size: self.size,
+ page_size: self.page_size,
+ }
+ }
+}
+
+impl<'a> WithBitmapSlice<'a> for AtomicBitmap {
+ type S = RefSlice<'a, Self>;
+}
+
+impl Bitmap for AtomicBitmap {
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ self.set_addr_range(offset, len)
+ }
+
+ fn dirty_at(&self, offset: usize) -> bool {
+ self.is_addr_set(offset)
+ }
+
+ fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
+ RefSlice::new(self, offset)
+ }
+}
+
+impl Default for AtomicBitmap {
+ fn default() -> Self {
+ AtomicBitmap::new(0, 0x1000)
+ }
+}
+
+#[cfg(feature = "backend-mmap")]
+impl NewBitmap for AtomicBitmap {
+ fn with_len(len: usize) -> Self {
+ let page_size;
+
+ #[cfg(unix)]
+ {
+ // SAFETY: There's no unsafe potential in calling this function.
+ page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
+ }
+
+ #[cfg(windows)]
+ {
+ use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO};
+
+ // It's safe to initialize this object from a zeroed memory region.
+ let mut sysinfo: SYSTEM_INFO = unsafe { std::mem::zeroed() };
+
+ // It's safe to call this method as the pointer is based on the address
+ // of the previously initialized `sysinfo` object.
+ unsafe { GetSystemInfo(&mut sysinfo as LPSYSTEM_INFO) };
+
+ page_size = sysinfo.dwPageSize;
+ }
+
+ // The `unwrap` is safe to use because the above call should always succeed on the
+ // supported platforms, and the size of a page will always fit within a `usize`.
+ AtomicBitmap::new(len, usize::try_from(page_size).unwrap())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use crate::bitmap::tests::test_bitmap;
+
+ #[test]
+ fn test_bitmap_basic() {
+ // Test that bitmap size is properly rounded up.
+ let a = AtomicBitmap::new(1025, 128);
+ assert_eq!(a.len(), 9);
+
+ let b = AtomicBitmap::new(1024, 128);
+ assert_eq!(b.len(), 8);
+ b.set_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ #[allow(clippy::redundant_clone)]
+ let copy_b = b.clone();
+ assert!(copy_b.is_addr_set(256));
+ assert!(!copy_b.is_addr_set(384));
+
+ b.reset();
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.set_addr_range(128, 129);
+ let v = b.get_and_reset();
+
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0], 0b110);
+ }
+
+ #[test]
+ fn test_bitmap_out_of_range() {
+ let b = AtomicBitmap::new(1024, 1);
+ // Set a partial range that goes beyond the end of the bitmap
+ b.set_addr_range(768, 512);
+ assert!(b.is_addr_set(768));
+ // The bitmap is never set beyond its end.
+ assert!(!b.is_addr_set(1024));
+ assert!(!b.is_addr_set(1152));
+ }
+
+ #[test]
+ fn test_bitmap_impl() {
+ let b = AtomicBitmap::new(0x2000, 128);
+ test_bitmap(&b);
+ }
+}
diff --git a/src/bitmap/backend/atomic_bitmap_arc.rs b/src/bitmap/backend/atomic_bitmap_arc.rs
new file mode 100644
index 0000000..3545623
--- /dev/null
+++ b/src/bitmap/backend/atomic_bitmap_arc.rs
@@ -0,0 +1,86 @@
+// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+use std::ops::Deref;
+use std::sync::Arc;
+
+use crate::bitmap::{ArcSlice, AtomicBitmap, Bitmap, WithBitmapSlice};
+
+#[cfg(feature = "backend-mmap")]
+use crate::mmap::NewBitmap;
+
+/// A `Bitmap` implementation that's based on an atomically reference counted handle to an
+/// `AtomicBitmap` object.
+pub struct AtomicBitmapArc {
+ inner: Arc<AtomicBitmap>,
+}
+
+impl AtomicBitmapArc {
+ pub fn new(inner: AtomicBitmap) -> Self {
+ AtomicBitmapArc {
+ inner: Arc::new(inner),
+ }
+ }
+}
+
+// The current clone implementation creates a deep clone of the inner bitmap, as opposed to
+// simply cloning the `Arc`.
+impl Clone for AtomicBitmapArc {
+ fn clone(&self) -> Self {
+ Self::new(self.inner.deref().clone())
+ }
+}
+
+// Providing a `Deref` to `AtomicBitmap` implementation, so the methods of the inner object
+// can be called in a transparent manner.
+impl Deref for AtomicBitmapArc {
+ type Target = AtomicBitmap;
+
+ fn deref(&self) -> &Self::Target {
+ self.inner.deref()
+ }
+}
+
+impl WithBitmapSlice<'_> for AtomicBitmapArc {
+ type S = ArcSlice<AtomicBitmap>;
+}
+
+impl Bitmap for AtomicBitmapArc {
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ self.inner.set_addr_range(offset, len)
+ }
+
+ fn dirty_at(&self, offset: usize) -> bool {
+ self.inner.is_addr_set(offset)
+ }
+
+ fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
+ ArcSlice::new(self.inner.clone(), offset)
+ }
+}
+
+impl Default for AtomicBitmapArc {
+ fn default() -> Self {
+ Self::new(AtomicBitmap::default())
+ }
+}
+
+#[cfg(feature = "backend-mmap")]
+impl NewBitmap for AtomicBitmapArc {
+ fn with_len(len: usize) -> Self {
+ Self::new(AtomicBitmap::with_len(len))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use crate::bitmap::tests::test_bitmap;
+
+ #[test]
+ fn test_bitmap_impl() {
+ let b = AtomicBitmapArc::new(AtomicBitmap::new(0x2000, 128));
+ test_bitmap(&b);
+ }
+}
diff --git a/src/bitmap/backend/mod.rs b/src/bitmap/backend/mod.rs
new file mode 100644
index 0000000..256585e
--- /dev/null
+++ b/src/bitmap/backend/mod.rs
@@ -0,0 +1,10 @@
+// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+mod atomic_bitmap;
+mod atomic_bitmap_arc;
+mod slice;
+
+pub use atomic_bitmap::AtomicBitmap;
+pub use atomic_bitmap_arc::AtomicBitmapArc;
+pub use slice::{ArcSlice, RefSlice};
diff --git a/src/bitmap/backend/slice.rs b/src/bitmap/backend/slice.rs
new file mode 100644
index 0000000..913a2f5
--- /dev/null
+++ b/src/bitmap/backend/slice.rs
@@ -0,0 +1,129 @@
+// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Contains a generic implementation of `BitmapSlice`.
+
+use std::fmt::{self, Debug};
+use std::ops::Deref;
+use std::sync::Arc;
+
+use crate::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice};
+
+/// Represents a slice into a `Bitmap` object, starting at `base_offset`.
+#[derive(Clone, Copy)]
+pub struct BaseSlice<B> {
+ inner: B,
+ base_offset: usize,
+}
+
+impl<B> BaseSlice<B> {
+ /// Create a new `BitmapSlice`, starting at the specified `offset`.
+ pub fn new(inner: B, offset: usize) -> Self {
+ BaseSlice {
+ inner,
+ base_offset: offset,
+ }
+ }
+}
+
+impl<'a, B> WithBitmapSlice<'a> for BaseSlice<B>
+where
+ B: Clone + Deref,
+ B::Target: Bitmap,
+{
+ type S = Self;
+}
+
+impl<B> BitmapSlice for BaseSlice<B>
+where
+ B: Clone + Deref,
+ B::Target: Bitmap,
+{
+}
+
+impl<B> Bitmap for BaseSlice<B>
+where
+ B: Clone + Deref,
+ B::Target: Bitmap,
+{
+ /// Mark the memory range specified by the given `offset` (relative to the base offset of
+ /// the slice) and `len` as dirtied.
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ // The `Bitmap` operations are supposed to accompany guest memory accesses defined by the
+ // same parameters (i.e. offset & length), so we use simple wrapping arithmetic instead of
+ // performing additional checks. If an overflow would occur, we simply end up marking some
+ // other region as dirty (which is just a false positive) instead of a region that could
+ // not have been accessed to begin with.
+ self.inner
+ .mark_dirty(self.base_offset.wrapping_add(offset), len)
+ }
+
+ fn dirty_at(&self, offset: usize) -> bool {
+ self.inner.dirty_at(self.base_offset.wrapping_add(offset))
+ }
+
+ /// Create a new `BitmapSlice` starting from the specified `offset` into the current slice.
+ fn slice_at(&self, offset: usize) -> Self {
+ BaseSlice {
+ inner: self.inner.clone(),
+ base_offset: self.base_offset.wrapping_add(offset),
+ }
+ }
+}
+
+impl<B> Debug for BaseSlice<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Dummy impl for now.
+ write!(f, "(bitmap slice)")
+ }
+}
+
+impl<B: Default> Default for BaseSlice<B> {
+ fn default() -> Self {
+ BaseSlice {
+ inner: B::default(),
+ base_offset: 0,
+ }
+ }
+}
+
+/// A `BitmapSlice` implementation that wraps a reference to a `Bitmap` object.
+pub type RefSlice<'a, B> = BaseSlice<&'a B>;
+
+/// A `BitmapSlice` implementation that uses an `Arc` handle to a `Bitmap` object.
+pub type ArcSlice<B> = BaseSlice<Arc<B>>;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap};
+ use crate::bitmap::AtomicBitmap;
+
+ #[test]
+ fn test_slice() {
+ let bitmap_size = 0x1_0000;
+ let dirty_offset = 0x1000;
+ let dirty_len = 0x100;
+
+ {
+ let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let slice1 = bitmap.slice_at(0);
+ let slice2 = bitmap.slice_at(dirty_offset);
+
+ assert!(range_is_clean(&slice1, 0, bitmap_size));
+ assert!(range_is_clean(&slice2, 0, dirty_len));
+
+ bitmap.mark_dirty(dirty_offset, dirty_len);
+
+ assert!(range_is_dirty(&slice1, dirty_offset, dirty_len));
+ assert!(range_is_dirty(&slice2, 0, dirty_len));
+ }
+
+ {
+ let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let slice = bitmap.slice_at(0);
+ test_bitmap(&slice);
+ }
+ }
+}
diff --git a/src/bitmap/mod.rs b/src/bitmap/mod.rs
new file mode 100644
index 0000000..e8c0987
--- /dev/null
+++ b/src/bitmap/mod.rs
@@ -0,0 +1,416 @@
+// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified
+//! length to a given offset. In particular, this is used to track write accesses within a
+//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the
+//! global view for an entire `GuestMemory` object.
+
+#[cfg(any(test, feature = "backend-bitmap"))]
+mod backend;
+
+use std::fmt::Debug;
+
+use crate::{GuestMemory, GuestMemoryRegion};
+
+#[cfg(any(test, feature = "backend-bitmap"))]
+pub use backend::{ArcSlice, AtomicBitmap, RefSlice};
+
+/// Trait implemented by types that support creating `BitmapSlice` objects.
+pub trait WithBitmapSlice<'a> {
+ /// Type of the bitmap slice.
+ type S: BitmapSlice;
+}
+
+/// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the
+/// restriction that slices created from it have the same type as `Self`.
+pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {}
+
+/// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define
+/// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with
+/// a lifetime as well.
+///
+/// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality
+/// as a zero-cost abstraction when providing trivial implementations such as the one
+/// defined for `()`.
+// These methods represent the core functionality that's required by `vm-memory` abstractions
+// to implement generic tracking logic, as well as tests that can be reused by different backends.
+pub trait Bitmap: for<'a> WithBitmapSlice<'a> {
+ /// Mark the memory range specified by the given `offset` and `len` as dirtied.
+ fn mark_dirty(&self, offset: usize, len: usize);
+
+ /// Check whether the specified `offset` is marked as dirty.
+ fn dirty_at(&self, offset: usize) -> bool;
+
+ /// Return a `<Self as WithBitmapSlice>::S` slice of the current bitmap, starting at
+ /// the specified `offset`.
+ fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S;
+}
+
+/// A no-op `Bitmap` implementation that can be provided for backends that do not actually
+/// require the tracking functionality.
+
+impl<'a> WithBitmapSlice<'a> for () {
+ type S = Self;
+}
+
+impl BitmapSlice for () {}
+
+impl Bitmap for () {
+ fn mark_dirty(&self, _offset: usize, _len: usize) {}
+
+ fn dirty_at(&self, _offset: usize) -> bool {
+ false
+ }
+
+ fn slice_at(&self, _offset: usize) -> Self {}
+}
+
+/// A `Bitmap` and `BitmapSlice` implementation for `Option<B>`.
+
+impl<'a, B> WithBitmapSlice<'a> for Option<B>
+where
+ B: WithBitmapSlice<'a>,
+{
+ type S = Option<B::S>;
+}
+
+impl<B: BitmapSlice> BitmapSlice for Option<B> {}
+
+impl<B: Bitmap> Bitmap for Option<B> {
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ if let Some(inner) = self {
+ inner.mark_dirty(offset, len)
+ }
+ }
+
+ fn dirty_at(&self, offset: usize) -> bool {
+ if let Some(inner) = self {
+ return inner.dirty_at(offset);
+ }
+ false
+ }
+
+ fn slice_at(&self, offset: usize) -> Option<<B as WithBitmapSlice>::S> {
+ if let Some(inner) = self {
+ return Some(inner.slice_at(offset));
+ }
+ None
+ }
+}
+
+/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
+/// an object `B: WithBitmapSlice<'a>`.
+pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
+
+/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
+/// the memory regions of an object `M: GuestMemory`.
+pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
+
+#[cfg(test)]
+pub(crate) mod tests {
+ use super::*;
+
+ use std::io::Cursor;
+ use std::marker::PhantomData;
+ use std::mem::size_of_val;
+ use std::result::Result;
+ use std::sync::atomic::Ordering;
+
+ use crate::{Bytes, VolatileMemory};
+ #[cfg(feature = "backend-mmap")]
+ use crate::{GuestAddress, MemoryRegionAddress};
+
+ // Helper method to check whether a specified range is clean.
+ pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
+ (start..start + len).all(|offset| !b.dirty_at(offset))
+ }
+
+ // Helper method to check whether a specified range is dirty.
+ pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
+ (start..start + len).all(|offset| b.dirty_at(offset))
+ }
+
+ pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
+ if clean {
+ range_is_clean(b, start, len)
+ } else {
+ range_is_dirty(b, start, len)
+ }
+ }
+
+ // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
+ // an area of length at least 0x2000.
+ pub fn test_bitmap<B: Bitmap>(b: &B) {
+ let len = 0x2000;
+ let dirty_offset = 0x1000;
+ let dirty_len = 0x100;
+
+ // Some basic checks.
+ let s = b.slice_at(dirty_offset);
+
+ assert!(range_is_clean(b, 0, len));
+ assert!(range_is_clean(&s, 0, dirty_len));
+
+ b.mark_dirty(dirty_offset, dirty_len);
+ assert!(range_is_dirty(b, dirty_offset, dirty_len));
+ assert!(range_is_dirty(&s, 0, dirty_len));
+ }
+
+ #[derive(Debug)]
+ pub enum TestAccessError {
+ RangeCleanCheck,
+ RangeDirtyCheck,
+ }
+
+ // A helper object that implements auxiliary operations for testing `Bytes` implementations
+ // in the context of dirty bitmap tracking.
+ struct BytesHelper<F, G, M> {
+ check_range_fn: F,
+ address_fn: G,
+ phantom: PhantomData<*const M>,
+ }
+
+ // `F` represents a closure the checks whether a specified range associated with the `Bytes`
+ // object that's being tested is marked as dirty or not (depending on the value of the last
+ // parameter). It has the following parameters:
+ // - A reference to a `Bytes` implementations that's subject to testing.
+ // - The offset of the range.
+ // - The length of the range.
+ // - Whether we are checking if the range is clean (when `true`) or marked as dirty.
+ //
+ // `G` represents a closure that translates an offset into an address value that's
+ // relevant for the `Bytes` implementation being tested.
+ impl<F, G, M, A> BytesHelper<F, G, M>
+ where
+ F: Fn(&M, usize, usize, bool) -> bool,
+ G: Fn(usize) -> A,
+ M: Bytes<A>,
+ {
+ fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool {
+ (self.check_range_fn)(m, start, len, clean)
+ }
+
+ fn address(&self, offset: usize) -> A {
+ (self.address_fn)(offset)
+ }
+
+ fn test_access<Op>(
+ &self,
+ bytes: &M,
+ dirty_offset: usize,
+ dirty_len: usize,
+ op: Op,
+ ) -> Result<(), TestAccessError>
+ where
+ Op: Fn(&M, A),
+ {
+ if !self.check_range(bytes, dirty_offset, dirty_len, true) {
+ return Err(TestAccessError::RangeCleanCheck);
+ }
+
+ op(bytes, self.address(dirty_offset));
+
+ if !self.check_range(bytes, dirty_offset, dirty_len, false) {
+ return Err(TestAccessError::RangeDirtyCheck);
+ }
+
+ Ok(())
+ }
+ }
+
+ // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
+ // The `step` parameter represents the offset that's added the the current address after
+ // performing each access. It provides finer grained control when testing tracking
+ // implementations that aggregate entire ranges for accounting purposes (for example, doing
+ // tracking at the page level).
+ pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
+ where
+ F: Fn(&M, usize, usize, bool) -> bool,
+ G: Fn(usize) -> A,
+ A: Copy,
+ M: Bytes<A>,
+ <M as Bytes<A>>::E: Debug,
+ {
+ const BUF_SIZE: usize = 1024;
+ let buf = vec![1u8; 1024];
+
+ let val = 1u64;
+
+ let h = BytesHelper {
+ check_range_fn,
+ address_fn,
+ phantom: PhantomData,
+ };
+
+ let mut dirty_offset = 0x1000;
+
+ // Test `write`.
+ h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
+ assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE)
+ })
+ .unwrap();
+ dirty_offset += step;
+
+ // Test `write_slice`.
+ h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
+ m.write_slice(buf.as_slice(), addr).unwrap()
+ })
+ .unwrap();
+ dirty_offset += step;
+
+ // Test `write_obj`.
+ h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
+ m.write_obj(val, addr).unwrap()
+ })
+ .unwrap();
+ dirty_offset += step;
+
+ // Test `read_from`.
+ h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
+ assert_eq!(
+ m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
+ BUF_SIZE
+ )
+ })
+ .unwrap();
+ dirty_offset += step;
+
+ // Test `read_exact_from`.
+ h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
+ m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
+ .unwrap()
+ })
+ .unwrap();
+ dirty_offset += step;
+
+ // Test `store`.
+ h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
+ m.store(val, addr, Ordering::Relaxed).unwrap()
+ })
+ .unwrap();
+ }
+
+ // This function and the next are currently conditionally compiled because we only use
+ // them to test the mmap-based backend implementations for now. Going forward, the generic
+ // test functions defined here can be placed in a separate module (i.e. `test_utilities`)
+ // which is gated by a feature and can be used for testing purposes by other crates as well.
+ #[cfg(feature = "backend-mmap")]
+ fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
+ let dirty_addr = MemoryRegionAddress(0x0);
+ let val = 123u64;
+ let dirty_len = size_of_val(&val);
+
+ let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
+
+ assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
+ assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
+
+ region.write_obj(val, dirty_addr).unwrap();
+
+ assert!(range_is_dirty(
+ region.bitmap(),
+ dirty_addr.0 as usize,
+ dirty_len
+ ));
+
+ assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
+
+ // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
+ // `region` handle because `test_bytes` starts performing writes after the range that's
+ // been already dirtied in the first part of this test.
+ test_bytes(
+ region,
+ |r: &R, start: usize, len: usize, clean: bool| {
+ check_range(r.bitmap(), start, len, clean)
+ },
+ |offset| MemoryRegionAddress(offset as u64),
+ 0x1000,
+ );
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ // Assumptions about M generated by f ...
+ pub fn test_guest_memory_and_region<M, F>(f: F)
+ where
+ M: GuestMemory,
+ F: Fn() -> M,
+ {
+ let m = f();
+ let dirty_addr = GuestAddress(0x1000);
+ let val = 123u64;
+ let dirty_len = size_of_val(&val);
+
+ let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
+ let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
+
+ assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
+ assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
+
+ m.write_obj(val, dirty_addr).unwrap();
+
+ assert!(range_is_dirty(
+ region.bitmap(),
+ region_addr.0 as usize,
+ dirty_len
+ ));
+
+ assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
+
+ // Now let's invoke the tests for the inner `GuestMemoryRegion` type.
+ test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
+
+ // Finally, let's invoke the generic tests for `Bytes`.
+ let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
+ let mut check_result = true;
+ m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| {
+ if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) {
+ check_result = false;
+ }
+ Ok(size)
+ })
+ .unwrap();
+
+ check_result
+ };
+
+ test_bytes(
+ &f(),
+ check_range_closure,
+ |offset| GuestAddress(offset as u64),
+ 0x1000,
+ );
+ }
+
+ pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
+ assert!(m.len() >= 0x8000);
+
+ let dirty_offset = 0x1000;
+ let val = 123u64;
+ let dirty_len = size_of_val(&val);
+
+ let get_ref_offset = 0x2000;
+ let array_ref_offset = 0x3000;
+
+ let s1 = m.as_volatile_slice();
+ let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
+
+ assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
+ assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
+
+ s1.write_obj(val, dirty_offset).unwrap();
+
+ assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
+ assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
+
+ let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
+ assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
+ v_ref.store(val);
+ assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
+
+ let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
+ assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
+ arr_ref.store(0, val);
+ assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
+ }
+}
diff --git a/src/bytes.rs b/src/bytes.rs
new file mode 100644
index 0000000..2430708
--- /dev/null
+++ b/src/bytes.rs
@@ -0,0 +1,539 @@
+// Portions Copyright 2019 Red Hat, Inc.
+//
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
+//! data.
+
+use std::io::{Read, Write};
+use std::mem::size_of;
+use std::result::Result;
+use std::slice::{from_raw_parts, from_raw_parts_mut};
+use std::sync::atomic::Ordering;
+
+use crate::atomic_integer::AtomicInteger;
+use crate::volatile_memory::VolatileSlice;
+
+/// Types for which it is safe to initialize from raw data.
+///
+/// # Safety
+///
+/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
+/// byte array. This is generally true for all plain-old-data structs. It is notably not true for
+/// any type that includes a reference. It is generally also not safe for non-packed structs, as
+/// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will
+/// cause undefined behavior.
+///
+/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
+pub unsafe trait ByteValued: Copy + Default + Send + Sync {
+ /// Converts a slice of raw data into a reference of `Self`.
+ ///
+ /// The value of `data` is not copied. Instead a reference is made from the given slice. The
+ /// value of `Self` will depend on the representation of the type in memory, and may change in
+ /// an unstable fashion.
+ ///
+ /// This will return `None` if the length of data does not match the size of `Self`, or if the
+ /// data is not aligned for the type of `Self`.
+ fn from_slice(data: &[u8]) -> Option<&Self> {
+ // Early out to avoid an unneeded `align_to` call.
+ if data.len() != size_of::<Self>() {
+ return None;
+ }
+
+ // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
+ // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
+ // ensures that we don't have any unaligned references. This aliases a pointer, but because
+ // the pointer is from a const slice reference, there are no mutable aliases. Finally, the
+ // reference returned can not outlive data because they have equal implicit lifetime
+ // constraints.
+ match unsafe { data.align_to::<Self>() } {
+ ([], [mid], []) => Some(mid),
+ _ => None,
+ }
+ }
+
+ /// Converts a mutable slice of raw data into a mutable reference of `Self`.
+ ///
+ /// Because `Self` is made from a reference to the mutable slice, mutations to the returned
+ /// reference are immediately reflected in `data`. The value of the returned `Self` will depend
+ /// on the representation of the type in memory, and may change in an unstable fashion.
+ ///
+ /// This will return `None` if the length of data does not match the size of `Self`, or if the
+ /// data is not aligned for the type of `Self`.
+ fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
+ // Early out to avoid an unneeded `align_to_mut` call.
+ if data.len() != size_of::<Self>() {
+ return None;
+ }
+
+ // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
+ // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
+ // ensures that we don't have any unaligned references. This aliases a pointer, but because
+ // the pointer is from a mut slice reference, we borrow the passed in mutable reference.
+ // Finally, the reference returned can not outlive data because they have equal implicit
+ // lifetime constraints.
+ match unsafe { data.align_to_mut::<Self>() } {
+ ([], [mid], []) => Some(mid),
+ _ => None,
+ }
+ }
+
+ /// Converts a reference to `self` into a slice of bytes.
+ ///
+ /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
+ /// The value of bytes in the returned slice will depend on the representation of the type in
+ /// memory, and may change in an unstable fashion.
+ fn as_slice(&self) -> &[u8] {
+ // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
+ // guarantees it. The lifetime of the returned slice is the same as the passed reference,
+ // so that no dangling pointers will result from this pointer alias.
+ unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
+ }
+
+ /// Converts a mutable reference to `self` into a mutable slice of bytes.
+ ///
+ /// Because the slice is made from a reference to `self`, mutations to the returned slice are
+ /// immediately reflected in `self`. The value of bytes in the returned slice will depend on
+ /// the representation of the type in memory, and may change in an unstable fashion.
+ fn as_mut_slice(&mut self) -> &mut [u8] {
+ // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
+ // guarantees it. The trait also guarantees that any combination of bytes is valid for this
+ // type, so modifying them in the form of a byte slice is valid. The lifetime of the
+ // returned slice is the same as the passed reference, so that no dangling pointers will
+ // result from this pointer alias. Although this does alias a mutable pointer, we do so by
+ // exclusively borrowing the given mutable reference.
+ unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
+ }
+
+ /// Converts a mutable reference to `self` into a `VolatileSlice`. This is
+ /// useful because `VolatileSlice` provides a `Bytes<usize>` implementation.
+ ///
+ /// # Safety
+ ///
+ /// Unlike most `VolatileMemory` implementation, this method requires an exclusive
+ /// reference to `self`; this trivially fulfills `VolatileSlice::new`'s requirement
+ /// that all accesses to `self` use volatile accesses (because there can
+ /// be no other accesses).
+ fn as_bytes(&mut self) -> VolatileSlice {
+ // SAFETY: This is safe because the lifetime is the same as self
+ unsafe { VolatileSlice::new(self as *mut Self as *mut _, size_of::<Self>()) }
+ }
+}
+
+macro_rules! byte_valued_array {
+ ($T:ty, $($N:expr)+) => {
+ $(
+ // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued.
+ // They are just numbers.
+ unsafe impl ByteValued for [$T; $N] {}
+ )+
+ }
+}
+
+macro_rules! byte_valued_type {
+ ($T:ty) => {
+ // SAFETY: Safe as long T is POD.
+ // We are using this macro to generated the implementation for integer types below.
+ unsafe impl ByteValued for $T {}
+ byte_valued_array! {
+ $T,
+ 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32
+ }
+ };
+}
+
+byte_valued_type!(u8);
+byte_valued_type!(u16);
+byte_valued_type!(u32);
+byte_valued_type!(u64);
+byte_valued_type!(u128);
+byte_valued_type!(usize);
+byte_valued_type!(i8);
+byte_valued_type!(i16);
+byte_valued_type!(i32);
+byte_valued_type!(i64);
+byte_valued_type!(i128);
+byte_valued_type!(isize);
+
+/// A trait used to identify types which can be accessed atomically by proxy.
+pub trait AtomicAccess:
+ ByteValued
+ // Could not find a more succinct way of stating that `Self` can be converted
+ // into `Self::A::V`, and the other way around.
+ + From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
+ + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
+{
+ /// The `AtomicInteger` that atomic operations on `Self` are based on.
+ type A: AtomicInteger;
+}
+
+macro_rules! impl_atomic_access {
+ ($T:ty, $A:path) => {
+ impl AtomicAccess for $T {
+ type A = $A;
+ }
+ };
+}
+
+impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
+impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
+impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
+#[cfg(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "s390x"
+))]
+impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
+
+impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
+impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
+impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
+#[cfg(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "s390x"
+))]
+impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
+
+impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
+impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
+
+/// A container to host a range of bytes and access its content.
+///
+/// Candidates which may implement this trait include:
+/// - anonymous memory areas
+/// - mmapped memory areas
+/// - data files
+/// - a proxy to access memory on remote
+pub trait Bytes<A> {
+ /// Associated error codes
+ type E;
+
+ /// Writes a slice into the container at `addr`.
+ ///
+ /// Returns the number of bytes written. The number of bytes written can
+ /// be less than the length of the slice if there isn't enough room in the
+ /// container.
+ fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
+
+ /// Reads data from the container at `addr` into a slice.
+ ///
+ /// Returns the number of bytes read. The number of bytes read can be less than the length
+ /// of the slice if there isn't enough data within the container.
+ fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
+
+ /// Writes the entire content of a slice into the container at `addr`.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if there isn't enough space within the container to write the entire slice.
+ /// Part of the data may have been copied nevertheless.
+ fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
+
+ /// Reads data from the container at `addr` to fill an entire slice.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if there isn't enough data within the container to fill the entire slice.
+ /// Part of the data may have been copied nevertheless.
+ fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
+
+ /// Writes an object into the container at `addr`.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if the object doesn't fit inside the container.
+ fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
+ self.write_slice(val.as_slice(), addr)
+ }
+
+ /// Reads an object from the container at `addr`.
+ ///
+ /// Reading from a volatile area isn't strictly safe as it could change mid-read.
+ /// However, as long as the type T is plain old data and can handle random initialization,
+ /// everything will be OK.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if there's not enough data inside the container.
+ fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
+ let mut result: T = Default::default();
+ self.read_slice(result.as_mut_slice(), addr).map(|_| result)
+ }
+
+ /// Reads up to `count` bytes from an object and writes them into the container at `addr`.
+ ///
+ /// Returns the number of bytes written into the container.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into the container.
+ /// * `count` - Copy `count` bytes from `src` into the container.
+ fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
+ where
+ F: Read;
+
+ /// Reads exactly `count` bytes from an object and writes them into the container at `addr`.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from `src` to the container.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into the container.
+ /// * `count` - Copy exactly `count` bytes from `src` into the container.
+ fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
+ where
+ F: Read;
+
+ /// Reads up to `count` bytes from the container at `addr` and writes them it into an object.
+ ///
+ /// Returns the number of bytes written into the object.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from the container to `dst`.
+ /// * `count` - Copy `count` bytes from the container to `dst`.
+ fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
+ where
+ F: Write;
+
+ /// Reads exactly `count` bytes from the container at `addr` and writes them into an object.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from the container to `dst`.
+ /// * `count` - Copy exactly `count` bytes from the container to `dst`.
+ fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
+ where
+ F: Write;
+
+ /// Atomically store a value at the specified address.
+ fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
+
+ /// Atomically load a value from the specified address.
+ fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+ use super::*;
+
+ use std::cell::RefCell;
+ use std::fmt::Debug;
+ use std::mem::align_of;
+
+ // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
+ // zero-initialized.
+ pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
+ where
+ A: Copy,
+ B: Bytes<A>,
+ B::E: Debug,
+ {
+ let val = 100u32;
+
+ assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
+ b.store(val, addr, Ordering::Relaxed).unwrap();
+ assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
+
+ assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
+ assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
+ }
+
+ fn check_byte_valued_type<T>()
+ where
+ T: ByteValued + PartialEq + Debug + Default,
+ {
+ let mut data = [0u8; 48];
+ let pre_len = {
+ let (pre, _, _) = unsafe { data.align_to::<T>() };
+ pre.len()
+ };
+ {
+ let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
+ {
+ let mut val: T = Default::default();
+ assert_eq!(T::from_slice(aligned_data), Some(&val));
+ assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
+ assert_eq!(val.as_slice(), aligned_data);
+ assert_eq!(val.as_mut_slice(), aligned_data);
+ }
+ }
+ for i in 1..size_of::<T>().min(align_of::<T>()) {
+ let begin = pre_len + i;
+ let end = begin + size_of::<T>();
+ let unaligned_data = &mut data[begin..end];
+ {
+ if align_of::<T>() != 1 {
+ assert_eq!(T::from_slice(unaligned_data), None);
+ assert_eq!(T::from_mut_slice(unaligned_data), None);
+ }
+ }
+ }
+ // Check the early out condition
+ {
+ assert!(T::from_slice(&data).is_none());
+ assert!(T::from_mut_slice(&mut data).is_none());
+ }
+ }
+
+ #[test]
+ fn test_byte_valued() {
+ check_byte_valued_type::<u8>();
+ check_byte_valued_type::<u16>();
+ check_byte_valued_type::<u32>();
+ check_byte_valued_type::<u64>();
+ check_byte_valued_type::<u128>();
+ check_byte_valued_type::<usize>();
+ check_byte_valued_type::<i8>();
+ check_byte_valued_type::<i16>();
+ check_byte_valued_type::<i32>();
+ check_byte_valued_type::<i64>();
+ check_byte_valued_type::<i128>();
+ check_byte_valued_type::<isize>();
+ }
+
+ pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
+
+ pub struct MockBytesContainer {
+ container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>,
+ }
+
+ impl MockBytesContainer {
+ pub fn new() -> Self {
+ MockBytesContainer {
+ container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]),
+ }
+ }
+
+ pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
+ if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
+ return Err(());
+ }
+
+ Ok(())
+ }
+ }
+
+ impl Bytes<usize> for MockBytesContainer {
+ type E = ();
+
+ fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
+ unimplemented!()
+ }
+
+ fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
+ unimplemented!()
+ }
+
+ fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
+ self.validate_slice_op(buf, addr)?;
+
+ let mut container = self.container.borrow_mut();
+ container[addr..addr + buf.len()].copy_from_slice(buf);
+
+ Ok(())
+ }
+
+ fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
+ self.validate_slice_op(buf, addr)?;
+
+ let container = self.container.borrow();
+ buf.copy_from_slice(&container[addr..addr + buf.len()]);
+
+ Ok(())
+ }
+
+ fn read_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
+ where
+ F: Read,
+ {
+ unimplemented!()
+ }
+
+ fn read_exact_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
+ where
+ F: Read,
+ {
+ unimplemented!()
+ }
+
+ fn write_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
+ where
+ F: Write,
+ {
+ unimplemented!()
+ }
+
+ fn write_all_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
+ where
+ F: Write,
+ {
+ unimplemented!()
+ }
+
+ fn store<T: AtomicAccess>(
+ &self,
+ _val: T,
+ _addr: usize,
+ _order: Ordering,
+ ) -> Result<(), Self::E> {
+ unimplemented!()
+ }
+
+ fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
+ unimplemented!()
+ }
+ }
+
+ #[test]
+ fn test_bytes() {
+ let bytes = MockBytesContainer::new();
+
+ assert!(bytes.write_obj(std::u64::MAX, 0).is_ok());
+ assert_eq!(bytes.read_obj::<u64>(0).unwrap(), std::u64::MAX);
+
+ assert!(bytes
+ .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
+ .is_err());
+ assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
+ }
+
+ #[repr(C)]
+ #[derive(Copy, Clone, Default)]
+ struct S {
+ a: u32,
+ b: u32,
+ }
+
+ unsafe impl ByteValued for S {}
+
+ #[test]
+ fn byte_valued_slice() {
+ let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
+ let mut s: S = Default::default();
+ s.as_bytes().copy_from(&a);
+ assert_eq!(s.a, 0);
+ assert_eq!(s.b, 0x0101_0101);
+ }
+}
diff --git a/src/endian.rs b/src/endian.rs
new file mode 100644
index 0000000..36e1352
--- /dev/null
+++ b/src/endian.rs
@@ -0,0 +1,158 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Explicit endian types useful for embedding in structs or reinterpreting data.
+//!
+//! Each endian type is guaarnteed to have the same size and alignment as a regular unsigned
+//! primitive of the equal size.
+//!
+//! # Examples
+//!
+//! ```
+//! # use vm_memory::{Be32, Le32};
+//! #
+//! let b: Be32 = From::from(3);
+//! let l: Le32 = From::from(3);
+//!
+//! assert_eq!(b.to_native(), 3);
+//! assert_eq!(l.to_native(), 3);
+//! assert!(b == 3);
+//! assert!(l == 3);
+//!
+//! let b_trans: u32 = unsafe { std::mem::transmute(b) };
+//! let l_trans: u32 = unsafe { std::mem::transmute(l) };
+//!
+//! #[cfg(target_endian = "little")]
+//! assert_eq!(l_trans, 3);
+//! #[cfg(target_endian = "big")]
+//! assert_eq!(b_trans, 3);
+//!
+//! assert_ne!(b_trans, l_trans);
+//! ```
+
+use std::mem::{align_of, size_of};
+
+use crate::bytes::ByteValued;
+
+macro_rules! const_assert {
+ ($condition:expr) => {
+ let _ = [(); 0 - !$condition as usize];
+ };
+}
+
+macro_rules! endian_type {
+ ($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => {
+ /// An unsigned integer type of with an explicit endianness.
+ ///
+ /// See module level documentation for examples.
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ pub struct $new_type($old_type);
+
+ impl $new_type {
+ fn _assert() {
+ const_assert!(align_of::<$new_type>() == align_of::<$old_type>());
+ const_assert!(size_of::<$new_type>() == size_of::<$old_type>());
+ }
+
+ /// Converts `self` to the native endianness.
+ pub fn to_native(self) -> $old_type {
+ $old_type::$from_new(self.0)
+ }
+ }
+
+ // SAFETY: Safe because we are using this for implementing ByteValued for endian types
+ // which are POD.
+ unsafe impl ByteValued for $new_type {}
+
+ impl PartialEq<$old_type> for $new_type {
+ fn eq(&self, other: &$old_type) -> bool {
+ self.0 == $old_type::$to_new(*other)
+ }
+ }
+
+ impl PartialEq<$new_type> for $old_type {
+ fn eq(&self, other: &$new_type) -> bool {
+ $old_type::$to_new(other.0) == *self
+ }
+ }
+
+ impl From<$new_type> for $old_type {
+ fn from(v: $new_type) -> $old_type {
+ v.to_native()
+ }
+ }
+
+ impl From<$old_type> for $new_type {
+ fn from(v: $old_type) -> $new_type {
+ $new_type($old_type::$to_new(v))
+ }
+ }
+ };
+}
+
+endian_type!(u16, Le16, to_le, from_le);
+endian_type!(u32, Le32, to_le, from_le);
+endian_type!(u64, Le64, to_le, from_le);
+endian_type!(usize, LeSize, to_le, from_le);
+endian_type!(u16, Be16, to_be, from_be);
+endian_type!(u32, Be32, to_be, from_be);
+endian_type!(u64, Be64, to_be, from_be);
+endian_type!(usize, BeSize, to_be, from_be);
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+ use super::*;
+
+ use std::convert::From;
+ use std::mem::transmute;
+
+ #[cfg(target_endian = "little")]
+ const NATIVE_LITTLE: bool = true;
+ #[cfg(target_endian = "big")]
+ const NATIVE_LITTLE: bool = false;
+ const NATIVE_BIG: bool = !NATIVE_LITTLE;
+
+ macro_rules! endian_test {
+ ($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => {
+ mod $test_name {
+ use super::*;
+
+ #[allow(overflowing_literals)]
+ #[test]
+ fn test_endian_type() {
+ <$new_type>::_assert();
+
+ let v = 0x0123_4567_89AB_CDEF as $old_type;
+ let endian_v: $new_type = From::from(v);
+ let endian_into: $old_type = endian_v.into();
+ let endian_transmute: $old_type = unsafe { transmute(endian_v) };
+
+ if $native {
+ assert_eq!(endian_v, endian_transmute);
+ } else {
+ assert_eq!(endian_v, endian_transmute.swap_bytes());
+ }
+
+ assert_eq!(endian_into, v);
+ assert_eq!(endian_v.to_native(), v);
+
+ assert!(v == endian_v);
+ assert!(endian_v == v);
+ }
+ }
+ };
+ }
+
+ endian_test!(u16, Le16, test_le16, NATIVE_LITTLE);
+ endian_test!(u32, Le32, test_le32, NATIVE_LITTLE);
+ endian_test!(u64, Le64, test_le64, NATIVE_LITTLE);
+ endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE);
+ endian_test!(u16, Be16, test_be16, NATIVE_BIG);
+ endian_test!(u32, Be32, test_be32, NATIVE_BIG);
+ endian_test!(u64, Be64, test_be64, NATIVE_BIG);
+ endian_test!(usize, BeSize, test_be_size, NATIVE_BIG);
+}
diff --git a/src/guest_memory.rs b/src/guest_memory.rs
new file mode 100644
index 0000000..ba615ef
--- /dev/null
+++ b/src/guest_memory.rs
@@ -0,0 +1,1217 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+//
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Traits to track and access the physical memory of the guest.
+//!
+//! To make the abstraction as generic as possible, all the core traits declared here only define
+//! methods to access guest's memory, and never define methods to manage (create, delete, insert,
+//! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers,
+//! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
+//! a hypervisor).
+//!
+//! Traits and Structs
+//! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
+//! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
+//! region.
+//! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
+//! physical memory.
+//! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
+//! objects.
+//! The main responsibilities of the `GuestMemory` trait are:
+//! - hide the detail of accessing guest's physical address.
+//! - map a request address to a `GuestMemoryRegion` object and relay the request to it.
+//! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
+//!
+//! Whenever a collection of `GuestMemoryRegion` objects is mutable,
+//! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented
+//! for clients to obtain a [`GuestMemory`] reference or smart pointer.
+//!
+//! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle
+//! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is
+//! actually performed at all). Those that do implement tracking functionality are expected to
+//! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly
+//! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed
+//! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`,
+//! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`.
+
+use std::convert::From;
+use std::fs::File;
+use std::io::{self, Read, Write};
+use std::ops::{BitAnd, BitOr, Deref};
+use std::rc::Rc;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+use crate::address::{Address, AddressValue};
+use crate::bitmap::{Bitmap, BS, MS};
+use crate::bytes::{AtomicAccess, Bytes};
+use crate::volatile_memory::{self, VolatileSlice};
+
+static MAX_ACCESS_CHUNK: usize = 4096;
+
+/// Errors associated with handling guest memory accesses.
+#[allow(missing_docs)]
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ /// Failure in finding a guest address in any memory regions mapped by this guest.
+ #[error("Guest memory error: invalid guest address {}",.0.raw_value())]
+ InvalidGuestAddress(GuestAddress),
+ /// Couldn't read/write from the given source.
+ #[error("Guest memory error: {0}")]
+ IOError(io::Error),
+ /// Incomplete read or write.
+ #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")]
+ PartialBuffer { expected: usize, completed: usize },
+ /// Requested backend address is out of range.
+ #[error("Guest memory error: invalid backend address")]
+ InvalidBackendAddress,
+ /// Host virtual address not available.
+ #[error("Guest memory error: host virtual address not available")]
+ HostAddressNotAvailable,
+}
+
+impl From<volatile_memory::Error> for Error {
+ fn from(e: volatile_memory::Error) -> Self {
+ match e {
+ volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
+ volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
+ volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
+ volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
+ volatile_memory::Error::IOError(e) => Error::IOError(e),
+ volatile_memory::Error::PartialBuffer {
+ expected,
+ completed,
+ } => Error::PartialBuffer {
+ expected,
+ completed,
+ },
+ }
+ }
+}
+
+/// Result of guest memory operations.
+pub type Result<T> = std::result::Result<T, Error>;
+
+/// Represents a guest physical address (GPA).
+///
+/// # Notes:
+/// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
+/// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
+/// machine.
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub struct GuestAddress(pub u64);
+impl_address_ops!(GuestAddress, u64);
+
+/// Represents an offset inside a region.
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub struct MemoryRegionAddress(pub u64);
+impl_address_ops!(MemoryRegionAddress, u64);
+
+/// Type of the raw value stored in a `GuestAddress` object.
+pub type GuestUsize = <GuestAddress as AddressValue>::V;
+
+/// Represents the start point within a `File` that backs a `GuestMemoryRegion`.
+#[derive(Clone, Debug)]
+pub struct FileOffset {
+ file: Arc<File>,
+ start: u64,
+}
+
+impl FileOffset {
+ /// Creates a new `FileOffset` object.
+ pub fn new(file: File, start: u64) -> Self {
+ FileOffset::from_arc(Arc::new(file), start)
+ }
+
+ /// Creates a new `FileOffset` object based on an exiting `Arc<File>`.
+ pub fn from_arc(file: Arc<File>, start: u64) -> Self {
+ FileOffset { file, start }
+ }
+
+ /// Returns a reference to the inner `File` object.
+ pub fn file(&self) -> &File {
+ self.file.as_ref()
+ }
+
+ /// Return a reference to the inner `Arc<File>` object.
+ pub fn arc(&self) -> &Arc<File> {
+ &self.file
+ }
+
+ /// Returns the start offset within the file.
+ pub fn start(&self) -> u64 {
+ self.start
+ }
+}
+
+/// Represents a continuous region of guest physical memory.
+#[allow(clippy::len_without_is_empty)]
+pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
+ /// Type used for dirty memory tracking.
+ type B: Bitmap;
+
+ /// Returns the size of the region.
+ fn len(&self) -> GuestUsize;
+
+ /// Returns the minimum (inclusive) address managed by the region.
+ fn start_addr(&self) -> GuestAddress;
+
+ /// Returns the maximum (inclusive) address managed by the region.
+ fn last_addr(&self) -> GuestAddress {
+ // unchecked_add is safe as the region bounds were checked when it was created.
+ self.start_addr().unchecked_add(self.len() - 1)
+ }
+
+ /// Borrow the associated `Bitmap` object.
+ fn bitmap(&self) -> &Self::B;
+
+ /// Returns the given address if it is within this region.
+ fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
+ if self.address_in_range(addr) {
+ Some(addr)
+ } else {
+ None
+ }
+ }
+
+ /// Returns `true` if the given address is within this region.
+ fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
+ addr.raw_value() < self.len()
+ }
+
+ /// Returns the address plus the offset if it is in this region.
+ fn checked_offset(
+ &self,
+ base: MemoryRegionAddress,
+ offset: usize,
+ ) -> Option<MemoryRegionAddress> {
+ base.checked_add(offset as u64)
+ .and_then(|addr| self.check_address(addr))
+ }
+
+ /// Tries to convert an absolute address to a relative address within this region.
+ ///
+ /// Returns `None` if `addr` is out of the bounds of this region.
+ fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
+ addr.checked_offset_from(self.start_addr())
+ .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
+ }
+
+ /// Returns the host virtual address corresponding to the region address.
+ ///
+ /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
+ /// have the capability to mmap guest address range into host virtual address space for
+ /// direct access, so the corresponding host virtual address may be passed to other subsystems.
+ ///
+ /// # Note
+ /// The underlying guest memory is not protected from memory aliasing, which breaks the
+ /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
+ /// concurrent accesses to the underlying guest memory.
+ fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
+ Err(Error::HostAddressNotAvailable)
+ }
+
+ /// Returns information regarding the file and offset backing this memory region.
+ fn file_offset(&self) -> Option<&FileOffset> {
+ None
+ }
+
+ /// Returns a slice corresponding to the data in the region.
+ ///
+ /// Returns `None` if the region does not support slice-based access.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe because of possible aliasing.
+ #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
+ machine without violating aliasing rules "]
+ unsafe fn as_slice(&self) -> Option<&[u8]> {
+ None
+ }
+
+ /// Returns a mutable slice corresponding to the data in the region.
+ ///
+ /// Returns `None` if the region does not support slice-based access.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe because of possible aliasing. Mutable accesses performed through the
+ /// returned slice are not visible to the dirty bitmap tracking functionality of
+ /// the region, and must be manually recorded using the associated bitmap object.
+ #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
+ machine without violating aliasing rules "]
+ unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
+ None
+ }
+
+ /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
+ /// `offset`.
+ #[allow(unused_variables)]
+ fn get_slice(
+ &self,
+ offset: MemoryRegionAddress,
+ count: usize,
+ ) -> Result<VolatileSlice<BS<Self::B>>> {
+ Err(Error::HostAddressNotAvailable)
+ }
+
+ /// Gets a slice of memory for the entire region that supports volatile access.
+ ///
+ /// # Examples (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
+ /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
+ /// #
+ /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None)
+ /// .expect("Could not create guest memory");
+ /// let slice = region
+ /// .as_volatile_slice()
+ /// .expect("Could not get volatile slice");
+ ///
+ /// let v = 42u32;
+ /// let r = slice
+ /// .get_ref::<u32>(0x200)
+ /// .expect("Could not get reference");
+ /// r.store(v);
+ /// assert_eq!(r.load(), v);
+ /// # }
+ /// ```
+ fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
+ self.get_slice(MemoryRegionAddress(0), self.len() as usize)
+ }
+
+ /// Show if the region is based on the `HugeTLBFS`.
+ /// Returns Some(true) if the region is backed by hugetlbfs.
+ /// None represents that no information is available.
+ ///
+ /// # Examples (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
+ /// let addr = GuestAddress(0x1000);
+ /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
+ /// let r = mem.find_region(addr).unwrap();
+ /// assert_eq!(r.is_hugetlbfs(), None);
+ /// # }
+ /// ```
+ #[cfg(target_os = "linux")]
+ fn is_hugetlbfs(&self) -> Option<bool> {
+ None
+ }
+}
+
+/// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object.
+/// The vm-memory crate already provides trivial implementation for
+/// references to `GuestMemory` or reference-counted `GuestMemory` objects,
+/// but the trait can also be implemented by any other struct in order
+/// to provide temporary access to a snapshot of the memory map.
+///
+/// In order to support generic mutable memory maps, devices (or other things
+/// that access memory) should store the memory as a `GuestAddressSpace<M>`.
+/// This example shows that references can also be used as the `GuestAddressSpace`
+/// implementation, providing a zero-cost abstraction whenever immutable memory
+/// maps are sufficient.
+///
+/// # Examples (uses the `backend-mmap` and `backend-atomic` features)
+///
+/// ```
+/// # #[cfg(feature = "backend-mmap")]
+/// # {
+/// # use std::sync::Arc;
+/// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap};
+/// #
+/// pub struct VirtioDevice<AS: GuestAddressSpace> {
+/// mem: Option<AS>,
+/// }
+///
+/// impl<AS: GuestAddressSpace> VirtioDevice<AS> {
+/// fn new() -> Self {
+/// VirtioDevice { mem: None }
+/// }
+/// fn activate(&mut self, mem: AS) {
+/// self.mem = Some(mem)
+/// }
+/// }
+///
+/// fn get_mmap() -> GuestMemoryMmap<()> {
+/// let start_addr = GuestAddress(0x1000);
+/// GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)])
+/// .expect("Could not create guest memory")
+/// }
+///
+/// // Using `VirtioDevice` with an immutable GuestMemoryMmap:
+/// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new();
+/// let mmap = get_mmap();
+/// for_immutable_mmap.activate(&mmap);
+/// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new();
+/// another.activate(&mmap);
+///
+/// # #[cfg(feature = "backend-atomic")]
+/// # {
+/// # use vm_memory::GuestMemoryAtomic;
+/// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
+/// let mut for_mutable_mmap = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
+/// let atomic = GuestMemoryAtomic::new(get_mmap());
+/// for_mutable_mmap.activate(atomic.clone());
+/// let mut another = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
+/// another.activate(atomic.clone());
+///
+/// // atomic can be modified here...
+/// # }
+/// # }
+/// ```
+pub trait GuestAddressSpace {
+ /// The type that will be used to access guest memory.
+ type M: GuestMemory;
+
+ /// A type that provides access to the memory.
+ type T: Clone + Deref<Target = Self::M>;
+
+ /// Return an object (e.g. a reference or guard) that can be used
+ /// to access memory through this address space. The object provides
+ /// a consistent snapshot of the memory map.
+ fn memory(&self) -> Self::T;
+}
+
+impl<M: GuestMemory> GuestAddressSpace for &M {
+ type M = M;
+ type T = Self;
+
+ fn memory(&self) -> Self {
+ self
+ }
+}
+
+impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
+ type M = M;
+ type T = Self;
+
+ fn memory(&self) -> Self {
+ self.clone()
+ }
+}
+
+impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
+ type M = M;
+ type T = Self;
+
+ fn memory(&self) -> Self {
+ self.clone()
+ }
+}
+
+/// Lifetime generic associated iterators. The actual iterator type is defined through associated
+/// item `Iter`, for example:
+///
+/// ```
+/// # use std::marker::PhantomData;
+/// # use vm_memory::guest_memory::GuestMemoryIterator;
+/// #
+/// // Declare the relevant Region and Memory types
+/// struct MyGuestRegion {/* fields omitted */}
+/// struct MyGuestMemory {/* fields omitted */}
+///
+/// // Make an Iterator type to iterate over the Regions
+/// # /*
+/// struct MyGuestMemoryIter<'a> {/* fields omitted */}
+/// # */
+/// # struct MyGuestMemoryIter<'a> {
+/// # _marker: PhantomData<&'a MyGuestRegion>,
+/// # }
+/// impl<'a> Iterator for MyGuestMemoryIter<'a> {
+/// type Item = &'a MyGuestRegion;
+/// fn next(&mut self) -> Option<&'a MyGuestRegion> {
+/// // ...
+/// # None
+/// }
+/// }
+///
+/// // Associate the Iter type with the Memory type
+/// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory {
+/// type Iter = MyGuestMemoryIter<'a>;
+/// }
+/// ```
+pub trait GuestMemoryIterator<'a, R: 'a> {
+ /// Type of the `iter` method's return value.
+ type Iter: Iterator<Item = &'a R>;
+}
+
+/// `GuestMemory` represents a container for an *immutable* collection of
+/// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes<GuestAddress>`
+/// trait to hide the details of accessing guest memory by physical address.
+/// Interior mutability is not allowed for implementations of `GuestMemory` so
+/// that they always provide a consistent view of the memory map.
+///
+/// The task of the `GuestMemory` trait are:
+/// - map a request address to a `GuestMemoryRegion` object and relay the request to it.
+/// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
+pub trait GuestMemory {
+ /// Type of objects hosted by the address space.
+ type R: GuestMemoryRegion;
+
+ /// Lifetime generic associated iterators. Usually this is just `Self`.
+ type I: for<'a> GuestMemoryIterator<'a, Self::R>;
+
+ /// Returns the number of regions in the collection.
+ fn num_regions(&self) -> usize;
+
+ /// Returns the region containing the specified address or `None`.
+ fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
+
+ /// Perform the specified action on each region.
+ ///
+ /// It only walks children of current region and does not step into sub regions.
+ #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
+ fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
+ where
+ F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
+ {
+ for (index, region) in self.iter().enumerate() {
+ cb(index, region)?;
+ }
+ Ok(())
+ }
+
+ /// Perform the specified action on each region mutably.
+ ///
+ /// It only walks children of current region and does not step into sub regions.
+ #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
+ fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
+ where
+ F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
+ {
+ for (index, region) in self.iter().enumerate() {
+ cb(index, region)?;
+ }
+ Ok(())
+ }
+
+ /// Gets an iterator over the entries in the collection.
+ ///
+ /// # Examples
+ ///
+ /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
+ /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
+ /// `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
+ /// #
+ /// let start_addr1 = GuestAddress(0x0);
+ /// let start_addr2 = GuestAddress(0x400);
+ /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
+ /// .expect("Could not create guest memory");
+ ///
+ /// let total_size = gm
+ /// .iter()
+ /// .map(|region| region.len() / 1024)
+ /// .fold(0, |acc, size| acc + size);
+ /// assert_eq!(3, total_size)
+ /// # }
+ /// ```
+ fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
+
+ /// Applies two functions, specified as callbacks, on the inner memory regions.
+ ///
+ /// # Arguments
+ /// * `init` - Starting value of the accumulator for the `foldf` function.
+ /// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
+ /// the same size as the memory regions array, containing the function's results
+ /// for each region.
+ /// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
+ /// operator, applying itself to the `init` value and to each subsequent elemnent
+ /// in the array returned by `mapf`.
+ ///
+ /// # Examples
+ ///
+ /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
+ /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
+ /// `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
+ /// #
+ /// let start_addr1 = GuestAddress(0x0);
+ /// let start_addr2 = GuestAddress(0x400);
+ /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
+ /// .expect("Could not create guest memory");
+ ///
+ /// let total_size = gm.map_and_fold(0, |(_, region)| region.len() / 1024, |acc, size| acc + size);
+ /// assert_eq!(3, total_size)
+ /// # }
+ /// ```
+ #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
+ fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
+ where
+ F: Fn((usize, &Self::R)) -> T,
+ G: Fn(T, T) -> T,
+ {
+ self.iter().enumerate().map(mapf).fold(init, foldf)
+ }
+
+ /// Returns the maximum (inclusive) address managed by the
+ /// [`GuestMemory`](trait.GuestMemory.html).
+ ///
+ /// # Examples (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap};
+ /// #
+ /// let start_addr = GuestAddress(0x1000);
+ /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// .expect("Could not create guest memory");
+ ///
+ /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr()));
+ /// # }
+ /// ```
+ fn last_addr(&self) -> GuestAddress {
+ self.iter()
+ .map(GuestMemoryRegion::last_addr)
+ .fold(GuestAddress(0), std::cmp::max)
+ }
+
+ /// Tries to convert an absolute address to a relative address within the corresponding region.
+ ///
+ /// Returns `None` if `addr` isn't present within the memory of the guest.
+ fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
+ self.find_region(addr)
+ .map(|r| (r, r.to_region_addr(addr).unwrap()))
+ }
+
+ /// Returns `true` if the given address is present within the memory of the guest.
+ fn address_in_range(&self, addr: GuestAddress) -> bool {
+ self.find_region(addr).is_some()
+ }
+
+ /// Returns the given address if it is present within the memory of the guest.
+ fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
+ self.find_region(addr).map(|_| addr)
+ }
+
+ /// Check whether the range [base, base + len) is valid.
+ fn check_range(&self, base: GuestAddress, len: usize) -> bool {
+ match self.try_access(len, base, |_, count, _, _| -> Result<usize> { Ok(count) }) {
+ Ok(count) => count == len,
+ _ => false,
+ }
+ }
+
+ /// Returns the address plus the offset if it is present within the memory of the guest.
+ fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
+ base.checked_add(offset as u64)
+ .and_then(|addr| self.check_address(addr))
+ }
+
+ /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`.
+ ///
+ /// The address range `[addr, addr + count)` may span more than one
+ /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it.
+ /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f'
+ /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns:
+ /// - the error code returned by the callback 'f'
+ /// - the size of the already handled data when encountering the first hole
+ /// - the size of the already handled data when the whole range has been handled
+ fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
+ where
+ F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
+ {
+ let mut cur = addr;
+ let mut total = 0;
+ while let Some(region) = self.find_region(cur) {
+ let start = region.to_region_addr(cur).unwrap();
+ let cap = region.len() - start.raw_value();
+ let len = std::cmp::min(cap, (count - total) as GuestUsize);
+ match f(total, len as usize, start, region) {
+ // no more data
+ Ok(0) => return Ok(total),
+ // made some progress
+ Ok(len) => {
+ total += len;
+ if total == count {
+ break;
+ }
+ cur = match cur.overflowing_add(len as GuestUsize) {
+ (GuestAddress(0), _) => GuestAddress(0),
+ (result, false) => result,
+ (_, true) => panic!("guest address overflow"),
+ }
+ }
+ // error happened
+ e => return e,
+ }
+ }
+ if total == 0 {
+ Err(Error::InvalidGuestAddress(addr))
+ } else {
+ Ok(total)
+ }
+ }
+
+ /// Get the host virtual address corresponding to the guest address.
+ ///
+ /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
+ /// have the capability to mmap the guest address range into virtual address space of the host
+ /// for direct access, so the corresponding host virtual address may be passed to other
+ /// subsystems.
+ ///
+ /// # Note
+ /// The underlying guest memory is not protected from memory aliasing, which breaks the
+ /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
+ /// concurrent accesses to the underlying guest memory.
+ ///
+ /// # Arguments
+ /// * `addr` - Guest address to convert.
+ ///
+ /// # Examples (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)])
+ /// # .expect("Could not create guest memory");
+ /// #
+ /// let addr = gm
+ /// .get_host_address(GuestAddress(0x1200))
+ /// .expect("Could not get host address");
+ /// println!("Host address is {:p}", addr);
+ /// # }
+ /// ```
+ fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
+ self.to_region_addr(addr)
+ .ok_or(Error::InvalidGuestAddress(addr))
+ .and_then(|(r, addr)| r.get_host_address(addr))
+ }
+
+ /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
+ /// `addr`.
+ fn get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>> {
+ self.to_region_addr(addr)
+ .ok_or(Error::InvalidGuestAddress(addr))
+ .and_then(|(r, addr)| r.get_slice(addr, count))
+ }
+}
+
+impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
+ type E = Error;
+
+ fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
+ self.try_access(
+ buf.len(),
+ addr,
+ |offset, _count, caddr, region| -> Result<usize> {
+ region.write(&buf[offset..], caddr)
+ },
+ )
+ }
+
+ fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
+ self.try_access(
+ buf.len(),
+ addr,
+ |offset, _count, caddr, region| -> Result<usize> {
+ region.read(&mut buf[offset..], caddr)
+ },
+ )
+ }
+
+ /// # Examples
+ ///
+ /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// #
+ /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr)
+ /// .expect("Could not write slice to guest memory");
+ /// # }
+ /// ```
+ fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
+ let res = self.write(buf, addr)?;
+ if res != buf.len() {
+ return Err(Error::PartialBuffer {
+ expected: buf.len(),
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ /// # Examples
+ ///
+ /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
+ /// #
+ /// let start_addr = GuestAddress(0x1000);
+ /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// .expect("Could not create guest memory");
+ /// let buf = &mut [0u8; 16];
+ ///
+ /// gm.read_slice(buf, start_addr)
+ /// .expect("Could not read slice from guest memory");
+ /// # }
+ /// ```
+ fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
+ let res = self.read(buf, addr)?;
+ if res != buf.len() {
+ return Err(Error::PartialBuffer {
+ expected: buf.len(),
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let addr = GuestAddress(0x1010);
+ /// # let mut file = if cfg!(unix) {
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ /// # file
+ /// # } else {
+ /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
+ /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
+ /// # };
+ ///
+ /// gm.read_from(addr, &mut file, 128)
+ /// .expect("Could not read from /dev/urandom into guest memory");
+ ///
+ /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
+ /// let rand_val: u32 = gm
+ /// .read_obj(read_addr)
+ /// .expect("Could not read u32 val from /dev/urandom");
+ /// # }
+ /// ```
+ fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
+ where
+ F: Read,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
+ let mut buf = vec![0u8; len].into_boxed_slice();
+
+ loop {
+ match src.read(&mut buf[..]) {
+ Ok(bytes_read) => {
+ // We don't need to update the dirty bitmap manually here because it's
+ // expected to be handled by the logic within the `Bytes`
+ // implementation for the region object.
+ let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
+ assert_eq!(bytes_written, bytes_read);
+ break Ok(bytes_read);
+ }
+ Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
+ Err(e) => break Err(Error::IOError(e)),
+ }
+ }
+ })
+ }
+
+ fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
+ where
+ F: Read,
+ {
+ let res = self.read_from(addr, src, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ /// # Examples
+ ///
+ /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(not(unix))]
+ /// # extern crate vmm_sys_util;
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
+ /// # .expect("Could not create guest memory");
+ /// # let mut file = if cfg!(unix) {
+ /// # use std::fs::OpenOptions;
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ /// # file
+ /// # } else {
+ /// # use vmm_sys_util::tempfile::TempFile;
+ /// # TempFile::new().unwrap().into_file()
+ /// # };
+ ///
+ /// gm.write_to(start_addr, &mut file, 128)
+ /// .expect("Could not write 128 bytes to the provided address");
+ /// # }
+ /// ```
+ fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
+ where
+ F: Write,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
+ let mut buf = vec![0u8; len].into_boxed_slice();
+ let bytes_read = region.read(&mut buf, caddr)?;
+ assert_eq!(bytes_read, len);
+ // For a non-RAM region, reading could have side effects, so we
+ // must use write_all().
+ dst.write_all(&buf).map_err(Error::IOError)?;
+ Ok(len)
+ })
+ }
+
+ /// # Examples
+ ///
+ /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(not(unix))]
+ /// # extern crate vmm_sys_util;
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
+ /// # .expect("Could not create guest memory");
+ /// # let mut file = if cfg!(unix) {
+ /// # use std::fs::OpenOptions;
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ /// # file
+ /// # } else {
+ /// # use vmm_sys_util::tempfile::TempFile;
+ /// # TempFile::new().unwrap().into_file()
+ /// # };
+ ///
+ /// gm.write_all_to(start_addr, &mut file, 128)
+ /// .expect("Could not write 128 bytes to the provided address");
+ /// # }
+ /// ```
+ fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
+ where
+ F: Write,
+ {
+ let res = self.write_to(addr, dst, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
+ // `find_region` should really do what `to_region_addr` is doing right now, except
+ // it should keep returning a `Result`.
+ self.to_region_addr(addr)
+ .ok_or(Error::InvalidGuestAddress(addr))
+ .and_then(|(region, region_addr)| region.store(val, region_addr, order))
+ }
+
+ fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
+ self.to_region_addr(addr)
+ .ok_or(Error::InvalidGuestAddress(addr))
+ .and_then(|(region, region_addr)| region.load(region_addr, order))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+ use super::*;
+ #[cfg(feature = "backend-mmap")]
+ use crate::bytes::ByteValued;
+ #[cfg(feature = "backend-mmap")]
+ use crate::GuestAddress;
+ #[cfg(feature = "backend-mmap")]
+ use std::io::Cursor;
+ #[cfg(feature = "backend-mmap")]
+ use std::time::{Duration, Instant};
+
+ use vmm_sys_util::tempfile::TempFile;
+
+ #[cfg(feature = "backend-mmap")]
+ type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
+
+ #[cfg(feature = "backend-mmap")]
+ fn make_image(size: u8) -> Vec<u8> {
+ let mut image: Vec<u8> = Vec::with_capacity(size as usize);
+ for i in 0..size {
+ image.push(i);
+ }
+ image
+ }
+
+ #[test]
+ fn test_file_offset() {
+ let file = TempFile::new().unwrap().into_file();
+ let start = 1234;
+ let file_offset = FileOffset::new(file, start);
+ assert_eq!(file_offset.start(), start);
+ assert_eq!(
+ file_offset.file() as *const File,
+ file_offset.arc().as_ref() as *const File
+ );
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ #[test]
+ fn checked_read_from() {
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x40);
+ let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
+ let image = make_image(0x80);
+ let offset = GuestAddress(0x30);
+ let count: usize = 0x20;
+ assert_eq!(
+ 0x20_usize,
+ mem.read_from(offset, &mut Cursor::new(&image), count)
+ .unwrap()
+ );
+ }
+
+ // Runs the provided closure in a loop, until at least `duration` time units have elapsed.
+ #[cfg(feature = "backend-mmap")]
+ fn loop_timed<F>(duration: Duration, mut f: F)
+ where
+ F: FnMut(),
+ {
+ // We check the time every `CHECK_PERIOD` iterations.
+ const CHECK_PERIOD: u64 = 1_000_000;
+ let start_time = Instant::now();
+
+ loop {
+ for _ in 0..CHECK_PERIOD {
+ f();
+ }
+ if start_time.elapsed() >= duration {
+ break;
+ }
+ }
+ }
+
+ // Helper method for the following test. It spawns a writer and a reader thread, which
+ // simultaneously try to access an object that is placed at the junction of two memory regions.
+ // The part of the object that's continuously accessed is a member of type T. The writer
+ // flips all the bits of the member with every write, while the reader checks that every byte
+ // has the same value (and thus it did not do a non-atomic access). The test succeeds if
+ // no mismatch is detected after performing accesses for a pre-determined amount of time.
+ #[cfg(feature = "backend-mmap")]
+ #[cfg(not(miri))] // This test simulates a race condition between guest and vmm
+ fn non_atomic_access_helper<T>()
+ where
+ T: ByteValued
+ + std::fmt::Debug
+ + From<u8>
+ + Into<u128>
+ + std::ops::Not<Output = T>
+ + PartialEq,
+ {
+ use std::mem;
+ use std::thread;
+
+ // A dummy type that's always going to have the same alignment as the first member,
+ // and then adds some bytes at the end.
+ #[derive(Clone, Copy, Debug, Default, PartialEq)]
+ struct Data<T> {
+ val: T,
+ some_bytes: [u8; 8],
+ }
+
+ // Some sanity checks.
+ assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
+ assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
+
+ // There must be no padding bytes, as otherwise implementing ByteValued is UB
+ assert_eq!(mem::size_of::<Data<T>>(), mem::size_of::<T>() + 8);
+
+ unsafe impl<T: ByteValued> ByteValued for Data<T> {}
+
+ // Start of first guest memory region.
+ let start = GuestAddress(0);
+ let region_len = 1 << 12;
+
+ // The address where we start writing/reading a Data<T> value.
+ let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
+
+ let mem = GuestMemoryMmap::from_ranges(&[
+ (start, region_len),
+ (start.unchecked_add(region_len as u64), region_len),
+ ])
+ .unwrap();
+
+ // Need to clone this and move it into the new thread we create.
+ let mem2 = mem.clone();
+ // Just some bytes.
+ let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255];
+
+ let mut data = Data {
+ val: T::from(0u8),
+ some_bytes,
+ };
+
+ // Simple check that cross-region write/read is ok.
+ mem.write_obj(data, data_start).unwrap();
+ let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
+ assert_eq!(read_data, data);
+
+ let t = thread::spawn(move || {
+ let mut count: u64 = 0;
+
+ loop_timed(Duration::from_secs(3), || {
+ let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
+
+ // Every time data is written to memory by the other thread, the value of
+ // data.val alternates between 0 and T::MAX, so the inner bytes should always
+ // have the same value. If they don't match, it means we read a partial value,
+ // so the access was not atomic.
+ let bytes = data.val.into().to_le_bytes();
+ for i in 1..mem::size_of::<T>() {
+ if bytes[0] != bytes[i] {
+ panic!(
+ "val bytes don't match {:?} after {} iterations",
+ &bytes[..mem::size_of::<T>()],
+ count
+ );
+ }
+ }
+ count += 1;
+ });
+ });
+
+ // Write the object while flipping the bits of data.val over and over again.
+ loop_timed(Duration::from_secs(3), || {
+ mem.write_obj(data, data_start).unwrap();
+ data.val = !data.val;
+ });
+
+ t.join().unwrap()
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ #[test]
+ #[cfg(not(miri))]
+ fn test_non_atomic_access() {
+ non_atomic_access_helper::<u16>()
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ #[test]
+ fn test_zero_length_accesses() {
+ #[derive(Default, Clone, Copy)]
+ #[repr(C)]
+ struct ZeroSizedStruct {
+ dummy: [u32; 0],
+ }
+
+ unsafe impl ByteValued for ZeroSizedStruct {}
+
+ let addr = GuestAddress(0x1000);
+ let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
+ let obj = ZeroSizedStruct::default();
+ let mut image = make_image(0x80);
+
+ assert_eq!(mem.write(&[], addr).unwrap(), 0);
+ assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
+
+ assert!(mem.write_slice(&[], addr).is_ok());
+ assert!(mem.read_slice(&mut [], addr).is_ok());
+
+ assert!(mem.write_obj(obj, addr).is_ok());
+ assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
+
+ assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
+
+ assert!(mem
+ .read_exact_from(addr, &mut Cursor::new(&image), 0)
+ .is_ok());
+
+ assert_eq!(
+ mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
+ 0
+ );
+
+ assert!(mem
+ .write_all_to(addr, &mut Cursor::new(&mut image), 0)
+ .is_ok());
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ #[test]
+ fn test_atomic_accesses() {
+ let addr = GuestAddress(0x1000);
+ let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
+ let bad_addr = addr.unchecked_add(0x1000);
+
+ crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
+ }
+
+ #[cfg(feature = "backend-mmap")]
+ #[cfg(target_os = "linux")]
+ #[test]
+ fn test_guest_memory_mmap_is_hugetlbfs() {
+ let addr = GuestAddress(0x1000);
+ let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
+ let r = mem.find_region(addr).unwrap();
+ assert_eq!(r.is_hugetlbfs(), None);
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..b574dfa
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,68 @@
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Traits for allocating, handling and interacting with the VM's physical memory.
+//!
+//! For a typical hypervisor, there are several components, such as boot loader, virtual device
+//! drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory.
+//! This crate aims to provide a set of stable traits to decouple VM memory consumers from VM
+//! memory providers. Based on these traits, VM memory consumers could access VM's physical memory
+//! without knowing the implementation details of the VM memory provider. Thus hypervisor
+//! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost
+//! drivers etc, could be shared and reused by multiple hypervisors.
+
+#![deny(clippy::doc_markdown)]
+#![deny(missing_docs)]
+
+#[macro_use]
+pub mod address;
+pub use address::{Address, AddressValue};
+
+#[cfg(feature = "backend-atomic")]
+pub mod atomic;
+#[cfg(feature = "backend-atomic")]
+pub use atomic::{GuestMemoryAtomic, GuestMemoryLoadGuard};
+
+mod atomic_integer;
+pub use atomic_integer::AtomicInteger;
+
+pub mod bitmap;
+
+pub mod bytes;
+pub use bytes::{AtomicAccess, ByteValued, Bytes};
+
+pub mod endian;
+pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};
+
+pub mod guest_memory;
+pub use guest_memory::{
+ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory,
+ GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult,
+};
+
+#[cfg(all(feature = "backend-mmap", not(feature = "xen"), unix))]
+mod mmap_unix;
+
+#[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
+mod mmap_xen;
+
+#[cfg(all(feature = "backend-mmap", windows))]
+mod mmap_windows;
+
+#[cfg(feature = "backend-mmap")]
+pub mod mmap;
+#[cfg(feature = "backend-mmap")]
+pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion};
+#[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
+pub use mmap::{MmapRange, MmapXenFlags};
+
+pub mod volatile_memory;
+pub use volatile_memory::{
+ Error as VolatileMemoryError, Result as VolatileMemoryResult, VolatileArrayRef, VolatileMemory,
+ VolatileRef, VolatileSlice,
+};
diff --git a/src/mmap.rs b/src/mmap.rs
new file mode 100644
index 0000000..0a442e6
--- /dev/null
+++ b/src/mmap.rs
@@ -0,0 +1,1537 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+//
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! The default implementation for the [`GuestMemory`](trait.GuestMemory.html) trait.
+//!
+//! This implementation is mmap-ing the memory of the guest into the current process.
+
+use std::borrow::Borrow;
+use std::io::{Read, Write};
+#[cfg(unix)]
+use std::io::{Seek, SeekFrom};
+use std::ops::Deref;
+use std::result;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+use crate::address::Address;
+use crate::bitmap::{Bitmap, BS};
+use crate::guest_memory::{
+ self, FileOffset, GuestAddress, GuestMemory, GuestMemoryIterator, GuestMemoryRegion,
+ GuestUsize, MemoryRegionAddress,
+};
+use crate::volatile_memory::{VolatileMemory, VolatileSlice};
+use crate::{AtomicAccess, Bytes};
+
+#[cfg(all(not(feature = "xen"), unix))]
+pub use crate::mmap_unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder};
+
+#[cfg(all(feature = "xen", unix))]
+pub use crate::mmap_xen::{Error as MmapRegionError, MmapRange, MmapRegion, MmapXenFlags};
+
+#[cfg(windows)]
+pub use crate::mmap_windows::MmapRegion;
+#[cfg(windows)]
+pub use std::io::Error as MmapRegionError;
+
+/// A `Bitmap` that can be created starting from an initial size.
+pub trait NewBitmap: Bitmap + Default {
+ /// Create a new object based on the specified length in bytes.
+ fn with_len(len: usize) -> Self;
+}
+
+impl NewBitmap for () {
+ fn with_len(_len: usize) -> Self {}
+}
+
+/// Errors that can occur when creating a memory map.
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ /// Adding the guest base address to the length of the underlying mapping resulted
+ /// in an overflow.
+ #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")]
+ InvalidGuestRegion,
+ /// Error creating a `MmapRegion` object.
+ #[error("{0}")]
+ MmapRegion(MmapRegionError),
+ /// No memory region found.
+ #[error("No memory region found")]
+ NoMemoryRegion,
+ /// Some of the memory regions intersect with each other.
+ #[error("Some of the memory regions intersect with each other")]
+ MemoryRegionOverlap,
+ /// The provided memory regions haven't been sorted.
+ #[error("The provided memory regions haven't been sorted")]
+ UnsortedMemoryRegions,
+}
+
+// TODO: use this for Windows as well after we redefine the Error type there.
+#[cfg(unix)]
+/// Checks if a mapping of `size` bytes fits at the provided `file_offset`.
+///
+/// For a borrowed `FileOffset` and size, this function checks whether the mapping does not
+/// extend past EOF, and that adding the size to the file offset does not lead to overflow.
+pub fn check_file_offset(
+ file_offset: &FileOffset,
+ size: usize,
+) -> result::Result<(), MmapRegionError> {
+ let mut file = file_offset.file();
+ let start = file_offset.start();
+
+ if let Some(end) = start.checked_add(size as u64) {
+ let filesize = file
+ .seek(SeekFrom::End(0))
+ .map_err(MmapRegionError::SeekEnd)?;
+ file.rewind().map_err(MmapRegionError::SeekStart)?;
+ if filesize < end {
+ return Err(MmapRegionError::MappingPastEof);
+ }
+ } else {
+ return Err(MmapRegionError::InvalidOffsetLength);
+ }
+
+ Ok(())
+}
+
+/// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's
+/// memory region in the current process.
+///
+/// Represents a continuous region of the guest's physical memory that is backed by a mapping
+/// in the virtual address space of the calling process.
+#[derive(Debug)]
+pub struct GuestRegionMmap<B = ()> {
+ mapping: MmapRegion<B>,
+ guest_base: GuestAddress,
+}
+
+impl<B> Deref for GuestRegionMmap<B> {
+ type Target = MmapRegion<B>;
+
+ fn deref(&self) -> &MmapRegion<B> {
+ &self.mapping
+ }
+}
+
+impl<B: Bitmap> GuestRegionMmap<B> {
+ /// Create a new memory-mapped memory region for the guest's physical memory.
+ pub fn new(mapping: MmapRegion<B>, guest_base: GuestAddress) -> result::Result<Self, Error> {
+ if guest_base.0.checked_add(mapping.size() as u64).is_none() {
+ return Err(Error::InvalidGuestRegion);
+ }
+
+ Ok(GuestRegionMmap {
+ mapping,
+ guest_base,
+ })
+ }
+}
+
+#[cfg(not(feature = "xen"))]
+impl<B: NewBitmap> GuestRegionMmap<B> {
+ /// Create a new memory-mapped memory region from guest's physical memory, size and file.
+ pub fn from_range(
+ addr: GuestAddress,
+ size: usize,
+ file: Option<FileOffset>,
+ ) -> result::Result<Self, Error> {
+ let region = if let Some(ref f_off) = file {
+ MmapRegion::from_file(f_off.clone(), size)
+ } else {
+ MmapRegion::new(size)
+ }
+ .map_err(Error::MmapRegion)?;
+
+ Self::new(region, addr)
+ }
+}
+
+#[cfg(feature = "xen")]
+impl<B: NewBitmap> GuestRegionMmap<B> {
+ /// Create a new Unix memory-mapped memory region from guest's physical memory, size and file.
+ /// This must only be used for tests, doctests, benches and is not designed for end consumers.
+ pub fn from_range(
+ addr: GuestAddress,
+ size: usize,
+ file: Option<FileOffset>,
+ ) -> result::Result<Self, Error> {
+ let range = MmapRange::new_unix(size, file, addr);
+
+ let region = MmapRegion::from_range(range).map_err(Error::MmapRegion)?;
+ Self::new(region, addr)
+ }
+}
+
+impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionMmap<B> {
+ type E = guest_memory::Error;
+
+ /// # Examples
+ /// * Write a slice at guest address 0x1200.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// #
+ /// let res = gm
+ /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
+ /// .expect("Could not write to guest memory");
+ /// assert_eq!(5, res);
+ /// ```
+ fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ /// # Examples
+ /// * Read a slice of length 16 at guestaddress 0x1200.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// #
+ /// let buf = &mut [0u8; 16];
+ /// let res = gm
+ /// .read(buf, GuestAddress(0x1200))
+ /// .expect("Could not read from guest memory");
+ /// assert_eq!(16, res);
+ /// ```
+ fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_slice(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_slice(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom
+ ///
+ /// ```
+ /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let addr = GuestAddress(0x1010);
+ /// # let mut file = if cfg!(unix) {
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ /// # file
+ /// # } else {
+ /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
+ /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
+ /// # };
+ ///
+ /// gm.read_from(addr, &mut file, 128)
+ /// .expect("Could not read from /dev/urandom into guest memory");
+ ///
+ /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
+ /// let rand_val: u32 = gm
+ /// .read_obj(read_addr)
+ /// .expect("Could not read u32 val from /dev/urandom");
+ /// ```
+ fn read_from<F>(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<usize>
+ where
+ F: Read,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_from::<F>(maddr, src, count)
+ .map_err(Into::into)
+ }
+
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom
+ ///
+ /// ```
+ /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let addr = GuestAddress(0x1010);
+ /// # let mut file = if cfg!(unix) {
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ /// # file
+ /// # } else {
+ /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
+ /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
+ /// # };
+ ///
+ /// gm.read_exact_from(addr, &mut file, 128)
+ /// .expect("Could not read from /dev/urandom into guest memory");
+ ///
+ /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
+ /// let rand_val: u32 = gm
+ /// .read_obj(read_addr)
+ /// .expect("Could not read u32 val from /dev/urandom");
+ /// ```
+ fn read_exact_from<F>(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Read,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_exact_from::<F>(maddr, src, count)
+ .map_err(Into::into)
+ }
+
+ /// Writes data from the region to a writable object.
+ ///
+ /// # Examples
+ ///
+ /// * Write 128 bytes to a /dev/null file
+ ///
+ /// ```
+ /// # #[cfg(not(unix))]
+ /// # extern crate vmm_sys_util;
+ /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let mut file = if cfg!(unix) {
+ /// # use std::fs::OpenOptions;
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ /// # file
+ /// # } else {
+ /// # use vmm_sys_util::tempfile::TempFile;
+ /// # TempFile::new().unwrap().into_file()
+ /// # };
+ ///
+ /// gm.write_to(start_addr, &mut file, 128)
+ /// .expect("Could not write to file from guest memory");
+ /// ```
+ fn write_to<F>(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<usize>
+ where
+ F: Write,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_to::<F>(maddr, dst, count)
+ .map_err(Into::into)
+ }
+
+ /// Writes data from the region to a writable object.
+ ///
+ /// # Examples
+ ///
+ /// * Write 128 bytes to a /dev/null file
+ ///
+ /// ```
+ /// # #[cfg(not(unix))]
+ /// # extern crate vmm_sys_util;
+ /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let mut file = if cfg!(unix) {
+ /// # use std::fs::OpenOptions;
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ /// # file
+ /// # } else {
+ /// # use vmm_sys_util::tempfile::TempFile;
+ /// # TempFile::new().unwrap().into_file()
+ /// # };
+ ///
+ /// gm.write_all_to(start_addr, &mut file, 128)
+ /// .expect("Could not write to file from guest memory");
+ /// ```
+ fn write_all_to<F>(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Write,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_all_to::<F>(maddr, dst, count)
+ .map_err(Into::into)
+ }
+
+ fn store<T: AtomicAccess>(
+ &self,
+ val: T,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result<()> {
+ self.as_volatile_slice().and_then(|s| {
+ s.store(val, addr.raw_value() as usize, order)
+ .map_err(Into::into)
+ })
+ }
+
+ fn load<T: AtomicAccess>(
+ &self,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result<T> {
+ self.as_volatile_slice()
+ .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
+ }
+}
+
+impl<B: Bitmap> GuestMemoryRegion for GuestRegionMmap<B> {
+ type B = B;
+
+ fn len(&self) -> GuestUsize {
+ self.mapping.size() as GuestUsize
+ }
+
+ fn start_addr(&self) -> GuestAddress {
+ self.guest_base
+ }
+
+ fn bitmap(&self) -> &Self::B {
+ self.mapping.bitmap()
+ }
+
+ fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
+ // Not sure why wrapping_offset is not unsafe. Anyway this
+ // is safe because we've just range-checked addr using check_address.
+ self.check_address(addr)
+ .ok_or(guest_memory::Error::InvalidBackendAddress)
+ .map(|addr| {
+ self.mapping
+ .as_ptr()
+ .wrapping_offset(addr.raw_value() as isize)
+ })
+ }
+
+ fn file_offset(&self) -> Option<&FileOffset> {
+ self.mapping.file_offset()
+ }
+
+ fn get_slice(
+ &self,
+ offset: MemoryRegionAddress,
+ count: usize,
+ ) -> guest_memory::Result<VolatileSlice<BS<B>>> {
+ let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?;
+ Ok(slice)
+ }
+
+ #[cfg(target_os = "linux")]
+ fn is_hugetlbfs(&self) -> Option<bool> {
+ self.mapping.is_hugetlbfs()
+ }
+}
+
+/// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory
+/// in the current process.
+///
+/// Represents the entire physical memory of the guest by tracking all its memory regions.
+/// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the
+/// virtual address space of the calling process.
+#[derive(Clone, Debug, Default)]
+pub struct GuestMemoryMmap<B = ()> {
+ regions: Vec<Arc<GuestRegionMmap<B>>>,
+}
+
+impl<B: NewBitmap> GuestMemoryMmap<B> {
+ /// Creates an empty `GuestMemoryMmap` instance.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Creates a container and allocates anonymous memory for guest memory regions.
+ ///
+ /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address.
+ pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result<Self, Error> {
+ Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None)))
+ }
+
+ /// Creates a container and allocates anonymous memory for guest memory regions.
+ ///
+ /// Valid memory regions are specified as a sequence of (Address, Size, Option<FileOffset>)
+ /// tuples sorted by Address.
+ pub fn from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, Error>
+ where
+ A: Borrow<(GuestAddress, usize, Option<FileOffset>)>,
+ T: IntoIterator<Item = A>,
+ {
+ Self::from_regions(
+ ranges
+ .into_iter()
+ .map(|x| {
+ GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone())
+ })
+ .collect::<result::Result<Vec<_>, Error>>()?,
+ )
+ }
+}
+
+impl<B: Bitmap> GuestMemoryMmap<B> {
+ /// Creates a new `GuestMemoryMmap` from a vector of regions.
+ ///
+ /// # Arguments
+ ///
+ /// * `regions` - The vector of regions.
+ /// The regions shouldn't overlap and they should be sorted
+ /// by the starting address.
+ pub fn from_regions(mut regions: Vec<GuestRegionMmap<B>>) -> result::Result<Self, Error> {
+ Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
+ }
+
+ /// Creates a new `GuestMemoryMmap` from a vector of Arc regions.
+ ///
+ /// Similar to the constructor `from_regions()` as it returns a
+ /// `GuestMemoryMmap`. The need for this constructor is to provide a way for
+ /// consumer of this API to create a new `GuestMemoryMmap` based on existing
+ /// regions coming from an existing `GuestMemoryMmap` instance.
+ ///
+ /// # Arguments
+ ///
+ /// * `regions` - The vector of `Arc` regions.
+ /// The regions shouldn't overlap and they should be sorted
+ /// by the starting address.
+ pub fn from_arc_regions(regions: Vec<Arc<GuestRegionMmap<B>>>) -> result::Result<Self, Error> {
+ if regions.is_empty() {
+ return Err(Error::NoMemoryRegion);
+ }
+
+ for window in regions.windows(2) {
+ let prev = &window[0];
+ let next = &window[1];
+
+ if prev.start_addr() > next.start_addr() {
+ return Err(Error::UnsortedMemoryRegions);
+ }
+
+ if prev.last_addr() >= next.start_addr() {
+ return Err(Error::MemoryRegionOverlap);
+ }
+ }
+
+ Ok(Self { regions })
+ }
+
+ /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`.
+ ///
+ /// # Arguments
+ /// * `region`: the memory region to insert into the guest memory object.
+ pub fn insert_region(
+ &self,
+ region: Arc<GuestRegionMmap<B>>,
+ ) -> result::Result<GuestMemoryMmap<B>, Error> {
+ let mut regions = self.regions.clone();
+ regions.push(region);
+ regions.sort_by_key(|x| x.start_addr());
+
+ Self::from_arc_regions(regions)
+ }
+
+ /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`
+ /// on success, together with the removed region.
+ ///
+ /// # Arguments
+ /// * `base`: base address of the region to be removed
+ /// * `size`: size of the region to be removed
+ pub fn remove_region(
+ &self,
+ base: GuestAddress,
+ size: GuestUsize,
+ ) -> result::Result<(GuestMemoryMmap<B>, Arc<GuestRegionMmap<B>>), Error> {
+ if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
+ if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size {
+ let mut regions = self.regions.clone();
+ let region = regions.remove(region_index);
+ return Ok((Self { regions }, region));
+ }
+ }
+
+ Err(Error::InvalidGuestRegion)
+ }
+}
+
+/// An iterator over the elements of `GuestMemoryMmap`.
+///
+/// This struct is created by `GuestMemory::iter()`. See its documentation for more.
+pub struct Iter<'a, B>(std::slice::Iter<'a, Arc<GuestRegionMmap<B>>>);
+
+impl<'a, B> Iterator for Iter<'a, B> {
+ type Item = &'a GuestRegionMmap<B>;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next().map(AsRef::as_ref)
+ }
+}
+
+impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionMmap<B>> for GuestMemoryMmap<B> {
+ type Iter = Iter<'a, B>;
+}
+
+impl<B: Bitmap + 'static> GuestMemory for GuestMemoryMmap<B> {
+ type R = GuestRegionMmap<B>;
+
+ type I = Self;
+
+ fn num_regions(&self) -> usize {
+ self.regions.len()
+ }
+
+ fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap<B>> {
+ let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
+ Ok(x) => Some(x),
+ // Within the closest region with starting address < addr
+ Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
+ _ => None,
+ };
+ index.map(|x| self.regions[x].as_ref())
+ }
+
+ fn iter(&self) -> Iter<B> {
+ Iter(self.regions.iter())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+ extern crate vmm_sys_util;
+
+ use super::*;
+
+ use crate::bitmap::tests::test_guest_memory_and_region;
+ use crate::bitmap::AtomicBitmap;
+ use crate::GuestAddressSpace;
+
+ use std::fs::File;
+ use std::mem;
+ use std::path::Path;
+ use vmm_sys_util::tempfile::TempFile;
+
+ type GuestMemoryMmap = super::GuestMemoryMmap<()>;
+ type GuestRegionMmap = super::GuestRegionMmap<()>;
+ type MmapRegion = super::MmapRegion<()>;
+
+ #[test]
+ fn basic_map() {
+ let m = MmapRegion::new(1024).unwrap();
+ assert_eq!(1024, m.size());
+ }
+
+ fn check_guest_memory_mmap(
+ maybe_guest_mem: Result<GuestMemoryMmap, Error>,
+ expected_regions_summary: &[(GuestAddress, usize)],
+ ) {
+ assert!(maybe_guest_mem.is_ok());
+
+ let guest_mem = maybe_guest_mem.unwrap();
+ assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
+ let maybe_last_mem_reg = expected_regions_summary.last();
+ if let Some((region_addr, region_size)) = maybe_last_mem_reg {
+ let mut last_addr = region_addr.unchecked_add(*region_size as u64);
+ if last_addr.raw_value() != 0 {
+ last_addr = last_addr.unchecked_sub(1);
+ }
+ assert_eq!(guest_mem.last_addr(), last_addr);
+ }
+ for ((region_addr, region_size), mmap) in expected_regions_summary
+ .iter()
+ .zip(guest_mem.regions.iter())
+ {
+ assert_eq!(region_addr, &mmap.guest_base);
+ assert_eq!(region_size, &mmap.mapping.size());
+
+ assert!(guest_mem.find_region(*region_addr).is_some());
+ }
+ }
+
+ fn new_guest_memory_mmap(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result<GuestMemoryMmap, Error> {
+ GuestMemoryMmap::from_ranges(regions_summary)
+ }
+
+ fn new_guest_memory_mmap_from_regions(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result<GuestMemoryMmap, Error> {
+ GuestMemoryMmap::from_regions(
+ regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()
+ })
+ .collect(),
+ )
+ }
+
+ fn new_guest_memory_mmap_from_arc_regions(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result<GuestMemoryMmap, Error> {
+ GuestMemoryMmap::from_arc_regions(
+ regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap())
+ })
+ .collect(),
+ )
+ }
+
+ fn new_guest_memory_mmap_with_files(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result<GuestMemoryMmap, Error> {
+ let regions: Vec<(GuestAddress, usize, Option<FileOffset>)> = regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(*region_size as u64).unwrap();
+
+ (*region_addr, *region_size, Some(FileOffset::new(f, 0)))
+ })
+ .collect();
+
+ GuestMemoryMmap::from_ranges_with_files(&regions)
+ }
+
+ #[test]
+ fn test_no_memory_region() {
+ let regions_summary = [];
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap(&regions_summary).err().unwrap()
+ ),
+ format!("{:?}", Error::NoMemoryRegion)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_with_files(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::NoMemoryRegion)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::NoMemoryRegion)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_arc_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::NoMemoryRegion)
+ );
+ }
+
+ #[test]
+ fn test_overlapping_memory_regions() {
+ let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)];
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap(&regions_summary).err().unwrap()
+ ),
+ format!("{:?}", Error::MemoryRegionOverlap)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_with_files(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::MemoryRegionOverlap)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::MemoryRegionOverlap)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_arc_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::MemoryRegionOverlap)
+ );
+ }
+
+ #[test]
+ fn test_unsorted_memory_regions() {
+ let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)];
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap(&regions_summary).err().unwrap()
+ ),
+ format!("{:?}", Error::UnsortedMemoryRegions)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_with_files(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::UnsortedMemoryRegions)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::UnsortedMemoryRegions)
+ );
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ new_guest_memory_mmap_from_arc_regions(&regions_summary)
+ .err()
+ .unwrap()
+ ),
+ format!("{:?}", Error::UnsortedMemoryRegions)
+ );
+ }
+
+ #[test]
+ fn test_valid_memory_regions() {
+ let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)];
+
+ let guest_mem = GuestMemoryMmap::new();
+ assert_eq!(guest_mem.regions.len(), 0);
+
+ check_guest_memory_mmap(new_guest_memory_mmap(&regions_summary), &regions_summary);
+
+ check_guest_memory_mmap(
+ new_guest_memory_mmap_with_files(&regions_summary),
+ &regions_summary,
+ );
+
+ check_guest_memory_mmap(
+ new_guest_memory_mmap_from_regions(&regions_summary),
+ &regions_summary,
+ );
+
+ check_guest_memory_mmap(
+ new_guest_memory_mmap_from_arc_regions(&regions_summary),
+ &regions_summary,
+ );
+ }
+
+ #[test]
+ fn slice_addr() {
+ let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap();
+ let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
+ let guard = s.ptr_guard();
+ assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) });
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn mapped_file_read() {
+ let mut f = TempFile::new().unwrap().into_file();
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert!(f.write_all(sample_buf).is_ok());
+
+ let file = Some(FileOffset::new(f, 0));
+ let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap();
+ let buf = &mut [0u8; 16];
+ assert_eq!(
+ mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(),
+ sample_buf.len()
+ );
+ assert_eq!(buf[0..sample_buf.len()], sample_buf[..]);
+ }
+
+ #[test]
+ fn test_address_in_range() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.address_in_range(GuestAddress(0x200)));
+ assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
+ assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
+ assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
+ }
+ }
+
+ #[test]
+ fn test_check_address() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert_eq!(
+ guest_mem.check_address(GuestAddress(0x200)),
+ Some(GuestAddress(0x200))
+ );
+ assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
+ assert_eq!(
+ guest_mem.check_address(GuestAddress(0xa00)),
+ Some(GuestAddress(0xa00))
+ );
+ assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
+ }
+ }
+
+ #[test]
+ fn test_to_region_addr() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
+ let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
+ let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
+ assert!(r0.as_ptr() == r1.as_ptr());
+ assert_eq!(addr0, MemoryRegionAddress(0));
+ assert_eq!(addr1, MemoryRegionAddress(0x200));
+ }
+ }
+
+ #[test]
+ fn test_get_host_address() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
+ let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
+ let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
+ assert_eq!(
+ ptr0,
+ guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
+ );
+ assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
+ }
+ }
+
+ #[test]
+ fn test_deref() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let start_addr = GuestAddress(0x0);
+ let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+
+ assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
+ let slice = guest_mem
+ .find_region(GuestAddress(0))
+ .unwrap()
+ .as_volatile_slice()
+ .unwrap();
+
+ let buf = &mut [0, 0, 0, 0, 0];
+ assert_eq!(slice.read(buf, 0).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+ }
+ }
+
+ #[test]
+ fn test_read_u64() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x1000).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x1000).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x1000);
+ let bad_addr = GuestAddress(0x2001);
+ let bad_addr2 = GuestAddress(0x1ffc);
+ let max_addr = GuestAddress(0x2000);
+
+ let gm =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let val1: u64 = 0xaa55_aa55_aa55_aa55;
+ let val2: u64 = 0x55aa_55aa_55aa_55aa;
+ assert_eq!(
+ format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
+ format!("InvalidGuestAddress({:?})", bad_addr,)
+ );
+ assert_eq!(
+ format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ mem::size_of::<u64>(),
+ max_addr.checked_offset_from(bad_addr2).unwrap()
+ )
+ );
+
+ gm.write_obj(val1, GuestAddress(0x500)).unwrap();
+ gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
+ let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
+ let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
+ assert_eq!(val1, num1);
+ assert_eq!(val2, num2);
+ }
+ }
+
+ #[test]
+ fn write_and_read() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let mut start_addr = GuestAddress(0x1000);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+
+ assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
+
+ let buf = &mut [0u8; 5];
+ assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+
+ start_addr = GuestAddress(0x13ff);
+ assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
+ assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
+ assert_eq!(buf[0], sample_buf[0]);
+ start_addr = GuestAddress(0x1000);
+ }
+ }
+
+ #[test]
+ fn read_to_and_write_from_mem() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ GuestAddress(0x1000),
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let addr = GuestAddress(0x1010);
+ let mut file = if cfg!(unix) {
+ File::open(Path::new("/dev/zero")).unwrap()
+ } else {
+ File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
+ };
+ gm.write_obj(!0u32, addr).unwrap();
+ gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
+ .unwrap();
+ let value: u32 = gm.read_obj(addr).unwrap();
+ if cfg!(unix) {
+ assert_eq!(value, 0);
+ } else {
+ assert_eq!(value, 0x0090_5a4d);
+ }
+
+ let mut sink = Vec::new();
+ gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
+ .unwrap();
+ if cfg!(unix) {
+ assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
+ } else {
+ assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
+ };
+ }
+ }
+
+ #[test]
+ fn create_vec_with_regions() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let mut iterated_regions = Vec::new();
+ let gm = GuestMemoryMmap::from_ranges(&regions).unwrap();
+
+ for region in gm.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+
+ for region in gm.iter() {
+ iterated_regions.push((region.start_addr(), region.len() as usize));
+ }
+ assert_eq!(regions, iterated_regions);
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ assert_eq!(gm.regions[0].guest_base, regions[0].0);
+ assert_eq!(gm.regions[1].guest_base, regions[1].0);
+ }
+
+ #[test]
+ fn test_memory() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let mut iterated_regions = Vec::new();
+ let gm = Arc::new(GuestMemoryMmap::from_ranges(&regions).unwrap());
+ let mem = gm.memory();
+
+ for region in mem.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+
+ for region in mem.iter() {
+ iterated_regions.push((region.start_addr(), region.len() as usize));
+ }
+ assert_eq!(regions, iterated_regions);
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ assert_eq!(gm.regions[0].guest_base, regions[0].0);
+ assert_eq!(gm.regions[1].guest_base, regions[1].0);
+ }
+
+ #[test]
+ fn test_access_cross_boundary() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x1000).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x1000).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x1000);
+ let gm =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
+ let buf = &mut [0u8; 5];
+ assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+ }
+ }
+
+ #[test]
+ fn test_retrieve_fd_backing_memory_region() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let start_addr = GuestAddress(0x0);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_none());
+
+ let gm = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_some());
+ }
+
+ // Windows needs a dedicated test where it will retrieve the allocation
+ // granularity to determine a proper offset (other than 0) that can be
+ // used for the backing file. Refer to Microsoft docs here:
+ // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile
+ #[test]
+ #[cfg(unix)]
+ fn test_retrieve_offset_from_fd_backing_memory_region() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x1400).unwrap();
+ // Needs to be aligned on 4k, otherwise mmap will fail.
+ let offset = 0x1000;
+
+ let start_addr = GuestAddress(0x0);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_none());
+
+ let gm = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, offset)),
+ )])
+ .unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_some());
+ assert_eq!(region.file_offset().unwrap().start(), offset);
+ }
+
+ #[test]
+ fn test_mmap_insert_region() {
+ let region_size = 0x1000;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x10_0000), region_size),
+ ];
+ let gm = Arc::new(GuestMemoryMmap::from_ranges(&regions).unwrap());
+ let mem_orig = gm.memory();
+ assert_eq!(mem_orig.num_regions(), 2);
+
+ let mmap =
+ Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap());
+ let gm = gm.insert_region(mmap).unwrap();
+ let mmap =
+ Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap());
+ let gm = gm.insert_region(mmap).unwrap();
+ let mmap =
+ Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap());
+ let gm = gm.insert_region(mmap).unwrap();
+ let mmap =
+ Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap());
+ gm.insert_region(mmap).unwrap_err();
+
+ assert_eq!(mem_orig.num_regions(), 2);
+ assert_eq!(gm.num_regions(), 5);
+
+ assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
+ assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000));
+ assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000));
+ assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000));
+ assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000));
+ }
+
+ #[test]
+ fn test_mmap_remove_region() {
+ let region_size = 0x1000;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x10_0000), region_size),
+ ];
+ let gm = Arc::new(GuestMemoryMmap::from_ranges(&regions).unwrap());
+ let mem_orig = gm.memory();
+ assert_eq!(mem_orig.num_regions(), 2);
+
+ gm.remove_region(GuestAddress(0), 128).unwrap_err();
+ gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
+ let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
+
+ assert_eq!(mem_orig.num_regions(), 2);
+ assert_eq!(gm.num_regions(), 1);
+
+ assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
+ assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
+ }
+
+ #[test]
+ fn test_guest_memory_mmap_get_slice() {
+ let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap();
+
+ // Normal case.
+ let slice_addr = MemoryRegionAddress(0x100);
+ let slice_size = 0x200;
+ let slice = region.get_slice(slice_addr, slice_size).unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ // Empty slice.
+ let slice_addr = MemoryRegionAddress(0x200);
+ let slice_size = 0x0;
+ let slice = region.get_slice(slice_addr, slice_size).unwrap();
+ assert!(slice.is_empty());
+
+ // Error case when slice_size is beyond the boundary.
+ let slice_addr = MemoryRegionAddress(0x300);
+ let slice_size = 0x200;
+ assert!(region.get_slice(slice_addr, slice_size).is_err());
+ }
+
+ #[test]
+ fn test_guest_memory_mmap_as_volatile_slice() {
+ let region_size = 0x400;
+ let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap();
+
+ // Test slice length.
+ let slice = region.as_volatile_slice().unwrap();
+ assert_eq!(slice.len(), region_size);
+
+ // Test slice data.
+ let v = 0x1234_5678u32;
+ let r = slice.get_ref::<u32>(0x200).unwrap();
+ r.store(v);
+ assert_eq!(r.load(), v);
+ }
+
+ #[test]
+ fn test_guest_memory_get_slice() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+
+ // Normal cases.
+ let slice_size = 0x200;
+ let slice = guest_mem
+ .get_slice(GuestAddress(0x100), slice_size)
+ .unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ let slice_size = 0x400;
+ let slice = guest_mem
+ .get_slice(GuestAddress(0x800), slice_size)
+ .unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ // Empty slice.
+ assert!(guest_mem
+ .get_slice(GuestAddress(0x900), 0)
+ .unwrap()
+ .is_empty());
+
+ // Error cases, wrong size or base address.
+ assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
+ assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
+ assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
+ }
+
+ #[test]
+ fn test_checked_offset() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+ let start_addr3 = GuestAddress(0xc00);
+ let guest_mem = GuestMemoryMmap::from_ranges(&[
+ (start_addr1, 0x400),
+ (start_addr2, 0x400),
+ (start_addr3, 0x400),
+ ])
+ .unwrap();
+
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0x200),
+ Some(GuestAddress(0x200))
+ );
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0xa00),
+ Some(GuestAddress(0xa00))
+ );
+ assert_eq!(
+ guest_mem.checked_offset(start_addr2, 0x7ff),
+ Some(GuestAddress(0xfff))
+ );
+ assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
+ assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
+
+ assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0x400 - 1),
+ Some(GuestAddress(0x400 - 1))
+ );
+ }
+
+ #[test]
+ fn test_check_range() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+ let start_addr3 = GuestAddress(0xc00);
+ let guest_mem = GuestMemoryMmap::from_ranges(&[
+ (start_addr1, 0x400),
+ (start_addr2, 0x400),
+ (start_addr3, 0x400),
+ ])
+ .unwrap();
+
+ assert!(guest_mem.check_range(start_addr1, 0x0));
+ assert!(guest_mem.check_range(start_addr1, 0x200));
+ assert!(guest_mem.check_range(start_addr1, 0x400));
+ assert!(!guest_mem.check_range(start_addr1, 0xa00));
+ assert!(guest_mem.check_range(start_addr2, 0x7ff));
+ assert!(guest_mem.check_range(start_addr2, 0x800));
+ assert!(!guest_mem.check_range(start_addr2, 0x801));
+ assert!(!guest_mem.check_range(start_addr2, 0xc00));
+ assert!(!guest_mem.check_range(start_addr1, std::usize::MAX));
+ }
+
+ #[test]
+ fn test_atomic_accesses() {
+ let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap();
+
+ crate::bytes::tests::check_atomic_accesses(
+ region,
+ MemoryRegionAddress(0),
+ MemoryRegionAddress(0x1000),
+ );
+ }
+
+ #[test]
+ fn test_dirty_tracking() {
+ test_guest_memory_and_region(|| {
+ crate::GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0), 0x1_0000)])
+ .unwrap()
+ });
+ }
+}
diff --git a/src/mmap_unix.rs b/src/mmap_unix.rs
new file mode 100644
index 0000000..c1d1adb
--- /dev/null
+++ b/src/mmap_unix.rs
@@ -0,0 +1,669 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+//
+// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Helper structure for working with mmaped memory regions in Unix.
+
+use std::io;
+use std::os::unix::io::AsRawFd;
+use std::ptr::null_mut;
+use std::result;
+
+use crate::bitmap::{Bitmap, BS};
+use crate::guest_memory::FileOffset;
+use crate::mmap::{check_file_offset, NewBitmap};
+use crate::volatile_memory::{self, VolatileMemory, VolatileSlice};
+
+/// Error conditions that may arise when creating a new `MmapRegion` object.
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ /// The specified file offset and length cause overflow when added.
+ #[error("The specified file offset and length cause overflow when added")]
+ InvalidOffsetLength,
+ /// The specified pointer to the mapping is not page-aligned.
+ #[error("The specified pointer to the mapping is not page-aligned")]
+ InvalidPointer,
+ /// The forbidden `MAP_FIXED` flag was specified.
+ #[error("The forbidden `MAP_FIXED` flag was specified")]
+ MapFixed,
+ /// Mappings using the same fd overlap in terms of file offset and length.
+ #[error("Mappings using the same fd overlap in terms of file offset and length")]
+ MappingOverlap,
+ /// A mapping with offset + length > EOF was attempted.
+ #[error("The specified file offset and length is greater then file length")]
+ MappingPastEof,
+ /// The `mmap` call returned an error.
+ #[error("{0}")]
+ Mmap(io::Error),
+ /// Seeking the end of the file returned an error.
+ #[error("Error seeking the end of the file: {0}")]
+ SeekEnd(io::Error),
+ /// Seeking the start of the file returned an error.
+ #[error("Error seeking the start of the file: {0}")]
+ SeekStart(io::Error),
+}
+
+pub type Result<T> = result::Result<T, Error>;
+
+/// A factory struct to build `MmapRegion` objects.
+pub struct MmapRegionBuilder<B = ()> {
+ size: usize,
+ prot: i32,
+ flags: i32,
+ file_offset: Option<FileOffset>,
+ raw_ptr: Option<*mut u8>,
+ hugetlbfs: Option<bool>,
+ bitmap: B,
+}
+
+impl<B: Bitmap + Default> MmapRegionBuilder<B> {
+ /// Create a new `MmapRegionBuilder` using the default value for
+ /// the inner `Bitmap` object.
+ pub fn new(size: usize) -> Self {
+ Self::new_with_bitmap(size, B::default())
+ }
+}
+
+impl<B: Bitmap> MmapRegionBuilder<B> {
+ /// Create a new `MmapRegionBuilder` using the provided `Bitmap` object.
+ ///
+ /// When instantiating the builder for a region that does not require dirty bitmap
+ /// bitmap tracking functionality, we can specify a trivial `Bitmap` implementation
+ /// such as `()`.
+ pub fn new_with_bitmap(size: usize, bitmap: B) -> Self {
+ MmapRegionBuilder {
+ size,
+ prot: 0,
+ flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
+ file_offset: None,
+ raw_ptr: None,
+ hugetlbfs: None,
+ bitmap,
+ }
+ }
+
+ /// Create the `MmapRegion` object with the specified mmap memory protection flag `prot`.
+ pub fn with_mmap_prot(mut self, prot: i32) -> Self {
+ self.prot = prot;
+ self
+ }
+
+ /// Create the `MmapRegion` object with the specified mmap `flags`.
+ pub fn with_mmap_flags(mut self, flags: i32) -> Self {
+ self.flags = flags;
+ self
+ }
+
+ /// Create the `MmapRegion` object with the specified `file_offset`.
+ pub fn with_file_offset(mut self, file_offset: FileOffset) -> Self {
+ self.file_offset = Some(file_offset);
+ self
+ }
+
+ /// Create the `MmapRegion` object with the specified `hugetlbfs` flag.
+ pub fn with_hugetlbfs(mut self, hugetlbfs: bool) -> Self {
+ self.hugetlbfs = Some(hugetlbfs);
+ self
+ }
+
+ /// Create the `MmapRegion` object with pre-mmapped raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that `raw_addr` and `self.size` define a
+ /// region within a valid mapping that is already present in the process.
+ pub unsafe fn with_raw_mmap_pointer(mut self, raw_ptr: *mut u8) -> Self {
+ self.raw_ptr = Some(raw_ptr);
+ self
+ }
+
+ /// Build the `MmapRegion` object.
+ pub fn build(self) -> Result<MmapRegion<B>> {
+ if self.raw_ptr.is_some() {
+ return self.build_raw();
+ }
+
+ // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous
+ // in general.
+ if self.flags & libc::MAP_FIXED != 0 {
+ return Err(Error::MapFixed);
+ }
+
+ let (fd, offset) = if let Some(ref f_off) = self.file_offset {
+ check_file_offset(f_off, self.size)?;
+ (f_off.file().as_raw_fd(), f_off.start())
+ } else {
+ (-1, 0)
+ };
+
+ #[cfg(not(miri))]
+ // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters
+ // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or
+ // some wacky file).
+ let addr = unsafe {
+ libc::mmap(
+ null_mut(),
+ self.size,
+ self.prot,
+ self.flags,
+ fd,
+ offset as libc::off_t,
+ )
+ };
+
+ #[cfg(not(miri))]
+ if addr == libc::MAP_FAILED {
+ return Err(Error::Mmap(io::Error::last_os_error()));
+ }
+
+ #[cfg(miri)]
+ if self.size == 0 {
+ return Err(Error::Mmap(io::Error::from_raw_os_error(libc::EINVAL)));
+ }
+
+ // Miri does not support the mmap syscall, so we use rust's allocator for miri tests
+ #[cfg(miri)]
+ let addr = unsafe {
+ std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(self.size, 8).unwrap())
+ };
+
+ Ok(MmapRegion {
+ addr: addr as *mut u8,
+ size: self.size,
+ bitmap: self.bitmap,
+ file_offset: self.file_offset,
+ prot: self.prot,
+ flags: self.flags,
+ owned: true,
+ hugetlbfs: self.hugetlbfs,
+ })
+ }
+
+ fn build_raw(self) -> Result<MmapRegion<B>> {
+ // SAFETY: Safe because this call just returns the page size and doesn't have any side
+ // effects.
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as usize;
+ let addr = self.raw_ptr.unwrap();
+
+ // Check that the pointer to the mapping is page-aligned.
+ if (addr as usize) & (page_size - 1) != 0 {
+ return Err(Error::InvalidPointer);
+ }
+
+ Ok(MmapRegion {
+ addr,
+ size: self.size,
+ bitmap: self.bitmap,
+ file_offset: self.file_offset,
+ prot: self.prot,
+ flags: self.flags,
+ owned: false,
+ hugetlbfs: self.hugetlbfs,
+ })
+ }
+}
+
+/// Helper structure for working with mmaped memory regions in Unix.
+///
+/// The structure is used for accessing the guest's physical memory by mmapping it into
+/// the current process.
+///
+/// # Limitations
+/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's
+/// physical memory may be mapped into the current process due to the limited virtual address
+/// space size of the process.
+#[derive(Debug)]
+pub struct MmapRegion<B = ()> {
+ addr: *mut u8,
+ size: usize,
+ bitmap: B,
+ file_offset: Option<FileOffset>,
+ prot: i32,
+ flags: i32,
+ owned: bool,
+ hugetlbfs: Option<bool>,
+}
+
+// SAFETY: Send and Sync aren't automatically inherited for the raw address pointer.
+// Accessing that pointer is only done through the stateless interface which
+// allows the object to be shared by multiple threads without a decrease in
+// safety.
+unsafe impl<B: Send> Send for MmapRegion<B> {}
+// SAFETY: See comment above.
+unsafe impl<B: Sync> Sync for MmapRegion<B> {}
+
+impl<B: NewBitmap> MmapRegion<B> {
+ /// Creates a shared anonymous mapping of `size` bytes.
+ ///
+ /// # Arguments
+ /// * `size` - The size of the memory region in bytes.
+ pub fn new(size: usize) -> Result<Self> {
+ MmapRegionBuilder::new_with_bitmap(size, B::with_len(size))
+ .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE)
+ .with_mmap_flags(libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE)
+ .build()
+ }
+
+ /// Creates a shared file mapping of `size` bytes.
+ ///
+ /// # Arguments
+ /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file
+ /// referred to by `file_offset.file`.
+ /// * `size` - The size of the memory region in bytes.
+ pub fn from_file(file_offset: FileOffset, size: usize) -> Result<Self> {
+ MmapRegionBuilder::new_with_bitmap(size, B::with_len(size))
+ .with_file_offset(file_offset)
+ .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE)
+ .with_mmap_flags(libc::MAP_NORESERVE | libc::MAP_SHARED)
+ .build()
+ }
+
+ /// Creates a mapping based on the provided arguments.
+ ///
+ /// # Arguments
+ /// * `file_offset` - if provided, the method will create a file mapping at offset
+ /// `file_offset.start` in the file referred to by `file_offset.file`.
+ /// * `size` - The size of the memory region in bytes.
+ /// * `prot` - The desired memory protection of the mapping.
+ /// * `flags` - This argument determines whether updates to the mapping are visible to other
+ /// processes mapping the same region, and whether updates are carried through to
+ /// the underlying file.
+ pub fn build(
+ file_offset: Option<FileOffset>,
+ size: usize,
+ prot: i32,
+ flags: i32,
+ ) -> Result<Self> {
+ let mut builder = MmapRegionBuilder::new_with_bitmap(size, B::with_len(size))
+ .with_mmap_prot(prot)
+ .with_mmap_flags(flags);
+ if let Some(v) = file_offset {
+ builder = builder.with_file_offset(v);
+ }
+ builder.build()
+ }
+
+ /// Creates a `MmapRegion` instance for an externally managed mapping.
+ ///
+ /// This method is intended to be used exclusively in situations in which the mapping backing
+ /// the region is provided by an entity outside the control of the caller (e.g. the dynamic
+ /// linker).
+ ///
+ /// # Arguments
+ /// * `addr` - Pointer to the start of the mapping. Must be page-aligned.
+ /// * `size` - The size of the memory region in bytes.
+ /// * `prot` - Must correspond to the memory protection attributes of the existing mapping.
+ /// * `flags` - Must correspond to the flags that were passed to `mmap` for the creation of
+ /// the existing mapping.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that `addr` and `size` define a region within
+ /// a valid mapping that is already present in the process.
+ pub unsafe fn build_raw(addr: *mut u8, size: usize, prot: i32, flags: i32) -> Result<Self> {
+ MmapRegionBuilder::new_with_bitmap(size, B::with_len(size))
+ .with_raw_mmap_pointer(addr)
+ .with_mmap_prot(prot)
+ .with_mmap_flags(flags)
+ .build()
+ }
+}
+
+impl<B: Bitmap> MmapRegion<B> {
+ /// Returns a pointer to the beginning of the memory region. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ ///
+ /// Should only be used for passing this region to ioctls for setting guest memory.
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.addr
+ }
+
+ /// Returns the size of this region.
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns information regarding the offset into the file backing this region (if any).
+ pub fn file_offset(&self) -> Option<&FileOffset> {
+ self.file_offset.as_ref()
+ }
+
+ /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region.
+ pub fn prot(&self) -> i32 {
+ self.prot
+ }
+
+ /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region.
+ pub fn flags(&self) -> i32 {
+ self.flags
+ }
+
+ /// Returns `true` if the mapping is owned by this `MmapRegion` instance.
+ pub fn owned(&self) -> bool {
+ self.owned
+ }
+
+ /// Checks whether this region and `other` are backed by overlapping
+ /// [`FileOffset`](struct.FileOffset.html) objects.
+ ///
+ /// This is mostly a sanity check available for convenience, as different file descriptors
+ /// can alias the same file.
+ pub fn fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool {
+ if let Some(f_off1) = self.file_offset() {
+ if let Some(f_off2) = other.file_offset() {
+ if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() {
+ let s1 = f_off1.start();
+ let s2 = f_off2.start();
+ let l1 = self.len() as u64;
+ let l2 = other.len() as u64;
+
+ if s1 < s2 {
+ return s1 + l1 > s2;
+ } else {
+ return s2 + l2 > s1;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// Set the hugetlbfs of the region
+ pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
+ self.hugetlbfs = Some(hugetlbfs)
+ }
+
+ /// Returns `true` if the region is hugetlbfs
+ pub fn is_hugetlbfs(&self) -> Option<bool> {
+ self.hugetlbfs
+ }
+
+ /// Returns a reference to the inner bitmap object.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+}
+
+impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
+ type B = B;
+
+ fn len(&self) -> usize {
+ self.size
+ }
+
+ fn get_slice(
+ &self,
+ offset: usize,
+ count: usize,
+ ) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
+ let _ = self.compute_end_offset(offset, count)?;
+
+ Ok(
+ // SAFETY: Safe because we checked that offset + count was within our range and we only
+ // ever hand out volatile accessors.
+ unsafe {
+ VolatileSlice::with_bitmap(
+ self.addr.add(offset),
+ count,
+ self.bitmap.slice_at(offset),
+ None,
+ )
+ },
+ )
+ }
+}
+
+impl<B> Drop for MmapRegion<B> {
+ fn drop(&mut self) {
+ if self.owned {
+ // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody
+ // else is holding a reference to it.
+ unsafe {
+ #[cfg(not(miri))]
+ libc::munmap(self.addr as *mut libc::c_void, self.size);
+
+ #[cfg(miri)]
+ std::alloc::dealloc(
+ self.addr,
+ std::alloc::Layout::from_size_align(self.size, 8).unwrap(),
+ );
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+ use super::*;
+
+ use std::io::Write;
+ use std::slice;
+ use std::sync::Arc;
+ use vmm_sys_util::tempfile::TempFile;
+
+ use crate::bitmap::AtomicBitmap;
+
+ type MmapRegion = super::MmapRegion<()>;
+
+ // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
+ // distinctive value when the error is represented by another variant.
+ impl Error {
+ pub fn raw_os_error(&self) -> i32 {
+ match self {
+ Error::Mmap(e) => e.raw_os_error().unwrap(),
+ _ => std::i32::MIN,
+ }
+ }
+ }
+
+ #[test]
+ fn test_mmap_region_new() {
+ assert!(MmapRegion::new(0).is_err());
+
+ let size = 4096;
+
+ let r = MmapRegion::new(4096).unwrap();
+ assert_eq!(r.size(), size);
+ assert!(r.file_offset().is_none());
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(
+ r.flags(),
+ libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE
+ );
+ }
+
+ #[test]
+ fn test_mmap_region_set_hugetlbfs() {
+ assert!(MmapRegion::new(0).is_err());
+
+ let size = 4096;
+
+ let r = MmapRegion::new(size).unwrap();
+ assert_eq!(r.size(), size);
+ assert!(r.file_offset().is_none());
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(
+ r.flags(),
+ libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE
+ );
+ assert_eq!(r.is_hugetlbfs(), None);
+
+ let mut r = MmapRegion::new(size).unwrap();
+ r.set_hugetlbfs(false);
+ assert_eq!(r.size(), size);
+ assert!(r.file_offset().is_none());
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(
+ r.flags(),
+ libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE
+ );
+ assert_eq!(r.is_hugetlbfs(), Some(false));
+
+ let mut r = MmapRegion::new(size).unwrap();
+ r.set_hugetlbfs(true);
+ assert_eq!(r.size(), size);
+ assert!(r.file_offset().is_none());
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(
+ r.flags(),
+ libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE
+ );
+ assert_eq!(r.is_hugetlbfs(), Some(true));
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_mmap_region_from_file() {
+ let mut f = TempFile::new().unwrap().into_file();
+ let offset: usize = 0;
+ let buf1 = [1u8, 2, 3, 4, 5];
+
+ f.write_all(buf1.as_ref()).unwrap();
+ let r = MmapRegion::from_file(FileOffset::new(f, offset as u64), buf1.len()).unwrap();
+
+ assert_eq!(r.size(), buf1.len() - offset);
+ assert_eq!(r.file_offset().unwrap().start(), offset as u64);
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_SHARED);
+
+ let buf2 = unsafe { slice::from_raw_parts(r.as_ptr(), buf1.len() - offset) };
+ assert_eq!(&buf1[offset..], buf2);
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_mmap_region_build() {
+ let a = Arc::new(TempFile::new().unwrap().into_file());
+
+ let prot = libc::PROT_READ | libc::PROT_WRITE;
+ let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE;
+ let offset = 4096;
+ let size = 1000;
+
+ // Offset + size will overflow.
+ let r = MmapRegion::build(
+ Some(FileOffset::from_arc(a.clone(), std::u64::MAX)),
+ size,
+ prot,
+ flags,
+ );
+ assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength");
+
+ // Offset + size is greater than the size of the file (which is 0 at this point).
+ let r = MmapRegion::build(
+ Some(FileOffset::from_arc(a.clone(), offset)),
+ size,
+ prot,
+ flags,
+ );
+ assert_eq!(format!("{:?}", r.unwrap_err()), "MappingPastEof");
+
+ // MAP_FIXED was specified among the flags.
+ let r = MmapRegion::build(
+ Some(FileOffset::from_arc(a.clone(), offset)),
+ size,
+ prot,
+ flags | libc::MAP_FIXED,
+ );
+ assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed");
+
+ // Let's resize the file.
+ assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0);
+
+ // The offset is not properly aligned.
+ let r = MmapRegion::build(
+ Some(FileOffset::from_arc(a.clone(), offset - 1)),
+ size,
+ prot,
+ flags,
+ );
+ assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
+
+ // The build should be successful now.
+ let r =
+ MmapRegion::build(Some(FileOffset::from_arc(a, offset)), size, prot, flags).unwrap();
+
+ assert_eq!(r.size(), size);
+ assert_eq!(r.file_offset().unwrap().start(), offset);
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE);
+ assert!(r.owned());
+
+ let region_size = 0x10_0000;
+ let bitmap = AtomicBitmap::new(region_size, 0x1000);
+ let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap)
+ .with_hugetlbfs(true)
+ .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(builder.size, region_size);
+ assert_eq!(builder.hugetlbfs, Some(true));
+ assert_eq!(builder.prot, libc::PROT_READ | libc::PROT_WRITE);
+
+ crate::bitmap::tests::test_volatile_memory(&(builder.build().unwrap()));
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Causes warnings due to the pointer casts
+ fn test_mmap_region_build_raw() {
+ let addr = 0;
+ let size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
+ let prot = libc::PROT_READ | libc::PROT_WRITE;
+ let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE;
+
+ let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) };
+ assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidPointer");
+
+ let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() };
+
+ assert_eq!(r.size(), size);
+ assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE);
+ assert!(!r.owned());
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_mmap_region_fds_overlap() {
+ let a = Arc::new(TempFile::new().unwrap().into_file());
+ assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0);
+
+ let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 4096).unwrap();
+ let r2 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 4096), 4096).unwrap();
+ assert!(!r1.fds_overlap(&r2));
+
+ let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 5000).unwrap();
+ assert!(r1.fds_overlap(&r2));
+
+ let r2 = MmapRegion::from_file(FileOffset::from_arc(a, 0), 1000).unwrap();
+ assert!(r1.fds_overlap(&r2));
+
+ // Different files, so there's not overlap.
+ let new_file = TempFile::new().unwrap().into_file();
+ // Resize before mapping.
+ assert_eq!(
+ unsafe { libc::ftruncate(new_file.as_raw_fd(), 1024 * 10) },
+ 0
+ );
+ let r2 = MmapRegion::from_file(FileOffset::new(new_file, 0), 5000).unwrap();
+ assert!(!r1.fds_overlap(&r2));
+
+ // R2 is not file backed, so no overlap.
+ let r2 = MmapRegion::new(5000).unwrap();
+ assert!(!r1.fds_overlap(&r2));
+ }
+
+ #[test]
+ fn test_dirty_tracking() {
+ // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for
+ // the rest of the unit tests above.
+ let m = crate::MmapRegion::<AtomicBitmap>::new(0x1_0000).unwrap();
+ crate::bitmap::tests::test_volatile_memory(&m);
+ }
+}
diff --git a/src/mmap_windows.rs b/src/mmap_windows.rs
new file mode 100644
index 0000000..0c7dbd9
--- /dev/null
+++ b/src/mmap_windows.rs
@@ -0,0 +1,270 @@
+// Copyright (C) 2019 CrowdStrike, Inc. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Helper structure for working with mmaped memory regions in Windows.
+
+use std;
+use std::io;
+use std::os::windows::io::{AsRawHandle, RawHandle};
+use std::ptr::{null, null_mut};
+
+use libc::{c_void, size_t};
+
+use winapi::um::errhandlingapi::GetLastError;
+
+use crate::bitmap::{Bitmap, BS};
+use crate::guest_memory::FileOffset;
+use crate::mmap::NewBitmap;
+use crate::volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice};
+
+#[allow(non_snake_case)]
+#[link(name = "kernel32")]
+extern "stdcall" {
+ pub fn VirtualAlloc(
+ lpAddress: *mut c_void,
+ dwSize: size_t,
+ flAllocationType: u32,
+ flProtect: u32,
+ ) -> *mut c_void;
+
+ pub fn VirtualFree(lpAddress: *mut c_void, dwSize: size_t, dwFreeType: u32) -> u32;
+
+ pub fn CreateFileMappingA(
+ hFile: RawHandle, // HANDLE
+ lpFileMappingAttributes: *const c_void, // LPSECURITY_ATTRIBUTES
+ flProtect: u32, // DWORD
+ dwMaximumSizeHigh: u32, // DWORD
+ dwMaximumSizeLow: u32, // DWORD
+ lpName: *const u8, // LPCSTR
+ ) -> RawHandle; // HANDLE
+
+ pub fn MapViewOfFile(
+ hFileMappingObject: RawHandle,
+ dwDesiredAccess: u32,
+ dwFileOffsetHigh: u32,
+ dwFileOffsetLow: u32,
+ dwNumberOfBytesToMap: size_t,
+ ) -> *mut c_void;
+
+ pub fn CloseHandle(hObject: RawHandle) -> u32; // BOOL
+}
+
+const MM_HIGHEST_VAD_ADDRESS: u64 = 0x000007FFFFFDFFFF;
+
+const MEM_COMMIT: u32 = 0x00001000;
+const MEM_RELEASE: u32 = 0x00008000;
+const FILE_MAP_ALL_ACCESS: u32 = 0xf001f;
+const PAGE_READWRITE: u32 = 0x04;
+
+pub const MAP_FAILED: *mut c_void = 0 as *mut c_void;
+pub const INVALID_HANDLE_VALUE: RawHandle = (-1isize) as RawHandle;
+#[allow(dead_code)]
+pub const ERROR_INVALID_PARAMETER: i32 = 87;
+
+/// Helper structure for working with mmaped memory regions in Unix.
+///
+/// The structure is used for accessing the guest's physical memory by mmapping it into
+/// the current process.
+///
+/// # Limitations
+/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's
+/// physical memory may be mapped into the current process due to the limited virtual address
+/// space size of the process.
+#[derive(Debug)]
+pub struct MmapRegion<B> {
+ addr: *mut u8,
+ size: usize,
+ bitmap: B,
+ file_offset: Option<FileOffset>,
+}
+
+// Send and Sync aren't automatically inherited for the raw address pointer.
+// Accessing that pointer is only done through the stateless interface which
+// allows the object to be shared by multiple threads without a decrease in
+// safety.
+unsafe impl<B: Send> Send for MmapRegion<B> {}
+unsafe impl<B: Sync> Sync for MmapRegion<B> {}
+
+impl<B: NewBitmap> MmapRegion<B> {
+ /// Creates a shared anonymous mapping of `size` bytes.
+ ///
+ /// # Arguments
+ /// * `size` - The size of the memory region in bytes.
+ pub fn new(size: usize) -> io::Result<Self> {
+ if (size == 0) || (size > MM_HIGHEST_VAD_ADDRESS as usize) {
+ return Err(io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ // This is safe because we are creating an anonymous mapping in a place not already used by
+ // any other area in this process.
+ let addr = unsafe { VirtualAlloc(0 as *mut c_void, size, MEM_COMMIT, PAGE_READWRITE) };
+ if addr == MAP_FAILED {
+ return Err(io::Error::last_os_error());
+ }
+ Ok(Self {
+ addr: addr as *mut u8,
+ size,
+ bitmap: B::with_len(size),
+ file_offset: None,
+ })
+ }
+
+ /// Creates a shared file mapping of `size` bytes.
+ ///
+ /// # Arguments
+ /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file
+ /// referred to by `file_offset.file`.
+ /// * `size` - The size of the memory region in bytes.
+ pub fn from_file(file_offset: FileOffset, size: usize) -> io::Result<Self> {
+ let handle = file_offset.file().as_raw_handle();
+ if handle == INVALID_HANDLE_VALUE {
+ return Err(io::Error::from_raw_os_error(libc::EBADF));
+ }
+
+ let mapping = unsafe {
+ CreateFileMappingA(
+ handle,
+ null(),
+ PAGE_READWRITE,
+ (size >> 32) as u32,
+ size as u32,
+ null(),
+ )
+ };
+ if mapping == 0 as RawHandle {
+ return Err(io::Error::last_os_error());
+ }
+
+ let offset = file_offset.start();
+
+ // This is safe because we are creating a mapping in a place not already used by any other
+ // area in this process.
+ let addr = unsafe {
+ MapViewOfFile(
+ mapping,
+ FILE_MAP_ALL_ACCESS,
+ (offset >> 32) as u32,
+ offset as u32,
+ size,
+ )
+ };
+
+ unsafe {
+ CloseHandle(mapping);
+ }
+
+ if addr == null_mut() {
+ return Err(io::Error::last_os_error());
+ }
+ Ok(Self {
+ addr: addr as *mut u8,
+ size,
+ bitmap: B::with_len(size),
+ file_offset: Some(file_offset),
+ })
+ }
+}
+
+impl<B: Bitmap> MmapRegion<B> {
+ /// Returns a pointer to the beginning of the memory region. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ ///
+ /// Should only be used for passing this region to ioctls for setting guest memory.
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.addr
+ }
+
+ /// Returns the size of this region.
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns information regarding the offset into the file backing this region (if any).
+ pub fn file_offset(&self) -> Option<&FileOffset> {
+ self.file_offset.as_ref()
+ }
+
+ /// Returns a reference to the inner bitmap object.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+}
+
+impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
+ type B = B;
+
+ fn len(&self) -> usize {
+ self.size
+ }
+
+ fn get_slice(
+ &self,
+ offset: usize,
+ count: usize,
+ ) -> volatile_memory::Result<VolatileSlice<BS<Self::B>>> {
+ let end = compute_offset(offset, count)?;
+ if end > self.size {
+ return Err(volatile_memory::Error::OutOfBounds { addr: end });
+ }
+
+ // Safe because we checked that offset + count was within our range and we only ever hand
+ // out volatile accessors.
+ Ok(unsafe {
+ VolatileSlice::with_bitmap(
+ self.addr.add(offset),
+ count,
+ self.bitmap.slice_at(offset),
+ None,
+ )
+ })
+ }
+}
+
+impl<B> Drop for MmapRegion<B> {
+ fn drop(&mut self) {
+ // This is safe because we mmap the area at addr ourselves, and nobody
+ // else is holding a reference to it.
+ // Note that the size must be set to 0 when using MEM_RELEASE,
+ // otherwise the function fails.
+ unsafe {
+ let ret_val = VirtualFree(self.addr as *mut libc::c_void, 0, MEM_RELEASE);
+ if ret_val == 0 {
+ let err = GetLastError();
+ // We can't use any fancy logger here, yet we want to
+ // pin point memory leaks.
+ println!(
+ "WARNING: Could not deallocate mmap region. \
+ Address: {:?}. Size: {}. Error: {}",
+ self.addr, self.size, err
+ )
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::os::windows::io::FromRawHandle;
+
+ use crate::bitmap::AtomicBitmap;
+ use crate::guest_memory::FileOffset;
+ use crate::mmap_windows::INVALID_HANDLE_VALUE;
+
+ type MmapRegion = super::MmapRegion<()>;
+
+ #[test]
+ fn map_invalid_handle() {
+ let file = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) };
+ let file_offset = FileOffset::new(file, 0);
+ let e = MmapRegion::from_file(file_offset, 1024).unwrap_err();
+ assert_eq!(e.raw_os_error(), Some(libc::EBADF));
+ }
+
+ #[test]
+ fn test_dirty_tracking() {
+ // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for
+ // the rest of the unit tests above.
+ let m = crate::MmapRegion::<AtomicBitmap>::new(0x1_0000).unwrap();
+ crate::bitmap::tests::test_volatile_memory(&m);
+ }
+}
diff --git a/src/mmap_xen.rs b/src/mmap_xen.rs
new file mode 100644
index 0000000..b641311
--- /dev/null
+++ b/src/mmap_xen.rs
@@ -0,0 +1,1216 @@
+// Copyright 2023 Linaro Ltd. All Rights Reserved.
+// Viresh Kumar <viresh.kumar@linaro.org>
+//
+// Xen specific memory mapping implementations
+//
+// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
+
+//! Helper structure for working with mmap'ed memory regions on Xen.
+
+use bitflags::bitflags;
+use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE};
+use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result};
+
+use vmm_sys_util::{
+ fam::{Error as FamError, FamStruct, FamStructWrapper},
+ generate_fam_struct_impl,
+ ioctl::{ioctl_expr, _IOC_NONE},
+};
+
+// Use a dummy ioctl implementation for tests instead.
+#[cfg(not(test))]
+use vmm_sys_util::ioctl::ioctl_with_ref;
+
+#[cfg(test)]
+use tests::ioctl_with_ref;
+
+use crate::bitmap::{Bitmap, BS};
+use crate::guest_memory::{FileOffset, GuestAddress};
+use crate::mmap::{check_file_offset, NewBitmap};
+use crate::volatile_memory::{self, VolatileMemory, VolatileSlice};
+
+/// Error conditions that may arise when creating a new `MmapRegion` object.
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ /// The specified file offset and length cause overflow when added.
+ #[error("The specified file offset and length cause overflow when added")]
+ InvalidOffsetLength,
+ /// The forbidden `MAP_FIXED` flag was specified.
+ #[error("The forbidden `MAP_FIXED` flag was specified")]
+ MapFixed,
+ /// A mapping with offset + length > EOF was attempted.
+ #[error("The specified file offset and length is greater then file length")]
+ MappingPastEof,
+ /// The `mmap` call returned an error.
+ #[error("{0}")]
+ Mmap(io::Error),
+ /// Seeking the end of the file returned an error.
+ #[error("Error seeking the end of the file: {0}")]
+ SeekEnd(io::Error),
+ /// Seeking the start of the file returned an error.
+ #[error("Error seeking the start of the file: {0}")]
+ SeekStart(io::Error),
+ /// Invalid file offset.
+ #[error("Invalid file offset")]
+ InvalidFileOffset,
+ /// Memory mapped in advance.
+ #[error("Memory mapped in advance")]
+ MappedInAdvance,
+ /// Invalid Xen mmap flags.
+ #[error("Invalid Xen Mmap flags: {0:x}")]
+ MmapFlags(u32),
+ /// Fam error.
+ #[error("Fam error: {0}")]
+ Fam(FamError),
+ /// Unexpected error.
+ #[error("Unexpected error")]
+ UnexpectedError,
+}
+
+type Result<T> = result::Result<T, Error>;
+
+/// `MmapRange` represents a range of arguments required to create Mmap regions.
+#[derive(Clone, Debug)]
+pub struct MmapRange {
+ size: usize,
+ file_offset: Option<FileOffset>,
+ prot: Option<i32>,
+ flags: Option<i32>,
+ hugetlbfs: Option<bool>,
+ addr: GuestAddress,
+ mmap_flags: u32,
+ mmap_data: u32,
+}
+
+impl MmapRange {
+ /// Creates instance of the range with multiple arguments.
+ pub fn new(
+ size: usize,
+ file_offset: Option<FileOffset>,
+ addr: GuestAddress,
+ mmap_flags: u32,
+ mmap_data: u32,
+ ) -> Self {
+ Self {
+ size,
+ file_offset,
+ prot: None,
+ flags: None,
+ hugetlbfs: None,
+ addr,
+ mmap_flags,
+ mmap_data,
+ }
+ }
+
+ /// Creates instance of the range for `MmapXenFlags::UNIX` type mapping.
+ pub fn new_unix(size: usize, file_offset: Option<FileOffset>, addr: GuestAddress) -> Self {
+ let flags = Some(match file_offset {
+ Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED,
+ None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
+ });
+
+ Self {
+ size,
+ file_offset,
+ prot: None,
+ flags,
+ hugetlbfs: None,
+ addr,
+ mmap_flags: MmapXenFlags::UNIX.bits(),
+ mmap_data: 0,
+ }
+ }
+
+ /// Set the prot of the range.
+ pub fn set_prot(&mut self, prot: i32) {
+ self.prot = Some(prot)
+ }
+
+ /// Set the flags of the range.
+ pub fn set_flags(&mut self, flags: i32) {
+ self.flags = Some(flags)
+ }
+
+ /// Set the hugetlbfs of the range.
+ pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
+ self.hugetlbfs = Some(hugetlbfs)
+ }
+}
+
+/// Helper structure for working with mmaped memory regions with Xen.
+///
+/// The structure is used for accessing the guest's physical memory by mmapping it into
+/// the current process.
+///
+/// # Limitations
+/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's
+/// physical memory may be mapped into the current process due to the limited virtual address
+/// space size of the process.
+#[derive(Debug)]
+pub struct MmapRegion<B = ()> {
+ bitmap: B,
+ size: usize,
+ prot: i32,
+ flags: i32,
+ file_offset: Option<FileOffset>,
+ hugetlbfs: Option<bool>,
+ mmap: MmapXen,
+}
+
+// SAFETY: Send and Sync aren't automatically inherited for the raw address pointer.
+// Accessing that pointer is only done through the stateless interface which
+// allows the object to be shared by multiple threads without a decrease in
+// safety.
+unsafe impl<B: Send> Send for MmapRegion<B> {}
+// SAFETY: See comment above.
+unsafe impl<B: Sync> Sync for MmapRegion<B> {}
+
+impl<B: NewBitmap> MmapRegion<B> {
+ /// Creates a shared anonymous mapping of `size` bytes.
+ ///
+ /// # Arguments
+ /// * `range` - An instance of type `MmapRange`.
+ ///
+ /// # Examples
+ /// * Write a slice at guest address 0x1200 with Xen's Grant mapping.
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::path::Path;
+ /// use vm_memory::{
+ /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
+ /// MmapXenFlags,
+ /// };
+ /// # use vmm_sys_util::tempfile::TempFile;
+ ///
+ /// let addr = GuestAddress(0x1000);
+ /// # if false {
+ /// let file = Some(FileOffset::new(
+ /// File::open(Path::new("/dev/xen/gntdev")).expect("Could not open file"),
+ /// 0,
+ /// ));
+ ///
+ /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::GRANT.bits(), 0);
+ /// # }
+ /// # // We need a UNIX mapping for tests to succeed.
+ /// # let range = MmapRange::new_unix(0x400, None, addr);
+ ///
+ /// let r = GuestRegionMmap::new(
+ /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
+ /// addr,
+ /// )
+ /// .expect("Could not create guest region");
+ ///
+ /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
+ /// let res = gm
+ /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
+ /// .expect("Could not write to guest memory");
+ /// assert_eq!(5, res);
+ /// ```
+ ///
+ /// * Write a slice at guest address 0x1200 with Xen's Foreign mapping.
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::path::Path;
+ /// use vm_memory::{
+ /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
+ /// MmapXenFlags,
+ /// };
+ /// # use vmm_sys_util::tempfile::TempFile;
+ ///
+ /// let addr = GuestAddress(0x1000);
+ /// # if false {
+ /// let file = Some(FileOffset::new(
+ /// File::open(Path::new("/dev/xen/privcmd")).expect("Could not open file"),
+ /// 0,
+ /// ));
+ ///
+ /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::FOREIGN.bits(), 0);
+ /// # }
+ /// # // We need a UNIX mapping for tests to succeed.
+ /// # let range = MmapRange::new_unix(0x400, None, addr);
+ ///
+ /// let r = GuestRegionMmap::new(
+ /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
+ /// addr,
+ /// )
+ /// .expect("Could not create guest region");
+ ///
+ /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
+ /// let res = gm
+ /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
+ /// .expect("Could not write to guest memory");
+ /// assert_eq!(5, res);
+ /// ```
+ pub fn from_range(mut range: MmapRange) -> Result<Self> {
+ if range.prot.is_none() {
+ range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
+ }
+
+ match range.flags {
+ Some(flags) => {
+ if flags & libc::MAP_FIXED != 0 {
+ // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous
+ // in general.
+ return Err(Error::MapFixed);
+ }
+ }
+ None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED),
+ }
+
+ let mmap = MmapXen::new(&range)?;
+
+ Ok(MmapRegion {
+ bitmap: B::with_len(range.size),
+ size: range.size,
+ prot: range.prot.ok_or(Error::UnexpectedError)?,
+ flags: range.flags.ok_or(Error::UnexpectedError)?,
+ file_offset: range.file_offset,
+ hugetlbfs: range.hugetlbfs,
+ mmap,
+ })
+ }
+}
+
+impl<B: Bitmap> MmapRegion<B> {
+ /// Returns a pointer to the beginning of the memory region. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ ///
+ /// Should only be used for passing this region to ioctls for setting guest memory.
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.mmap.addr()
+ }
+
+ /// Returns the size of this region.
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns information regarding the offset into the file backing this region (if any).
+ pub fn file_offset(&self) -> Option<&FileOffset> {
+ self.file_offset.as_ref()
+ }
+
+ /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region.
+ pub fn prot(&self) -> i32 {
+ self.prot
+ }
+
+ /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region.
+ pub fn flags(&self) -> i32 {
+ self.flags
+ }
+
+ /// Checks whether this region and `other` are backed by overlapping
+ /// [`FileOffset`](struct.FileOffset.html) objects.
+ ///
+ /// This is mostly a sanity check available for convenience, as different file descriptors
+ /// can alias the same file.
+ pub fn fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool {
+ if let Some(f_off1) = self.file_offset() {
+ if let Some(f_off2) = other.file_offset() {
+ if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() {
+ let s1 = f_off1.start();
+ let s2 = f_off2.start();
+ let l1 = self.len() as u64;
+ let l2 = other.len() as u64;
+
+ if s1 < s2 {
+ return s1 + l1 > s2;
+ } else {
+ return s2 + l2 > s1;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// Set the hugetlbfs of the region
+ pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
+ self.hugetlbfs = Some(hugetlbfs)
+ }
+
+ /// Returns `true` if the region is hugetlbfs
+ pub fn is_hugetlbfs(&self) -> Option<bool> {
+ self.hugetlbfs
+ }
+
+ /// Returns a reference to the inner bitmap object.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+
+ /// Returns xen mmap flags.
+ pub fn xen_mmap_flags(&self) -> u32 {
+ self.mmap.flags()
+ }
+
+ /// Returns xen mmap data.
+ pub fn xen_mmap_data(&self) -> u32 {
+ self.mmap.data()
+ }
+}
+
+impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
+ type B = B;
+
+ fn len(&self) -> usize {
+ self.size
+ }
+
+ fn get_slice(
+ &self,
+ offset: usize,
+ count: usize,
+ ) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
+ let _ = self.compute_end_offset(offset, count)?;
+
+ let mmap_info = if self.mmap.mmap_in_advance() {
+ None
+ } else {
+ Some(&self.mmap)
+ };
+
+ Ok(
+ // SAFETY: Safe because we checked that offset + count was within our range and we only
+ // ever hand out volatile accessors.
+ unsafe {
+ VolatileSlice::with_bitmap(
+ self.as_ptr().add(offset),
+ count,
+ self.bitmap.slice_at(offset),
+ mmap_info,
+ )
+ },
+ )
+ }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+struct MmapUnix {
+ addr: *mut u8,
+ size: usize,
+}
+
+impl MmapUnix {
+ fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result<Self> {
+ let addr =
+ // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters
+ // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or
+ // some wacky file).
+ unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) };
+
+ if addr == libc::MAP_FAILED {
+ return Err(Error::Mmap(io::Error::last_os_error()));
+ }
+
+ Ok(Self {
+ addr: addr as *mut u8,
+ size,
+ })
+ }
+
+ fn addr(&self) -> *mut u8 {
+ self.addr
+ }
+}
+
+impl Drop for MmapUnix {
+ fn drop(&mut self) {
+ // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody
+ // else is holding a reference to it.
+ unsafe {
+ libc::munmap(self.addr as *mut libc::c_void, self.size);
+ }
+ }
+}
+
+// Bit mask for the vhost-user xen mmap message.
+bitflags! {
+ /// Flags for the Xen mmap message.
+ pub struct MmapXenFlags: u32 {
+ /// Standard Unix memory mapping.
+ const UNIX = 0x0;
+ /// Xen foreign memory (accessed via /dev/privcmd).
+ const FOREIGN = 0x1;
+ /// Xen grant memory (accessed via /dev/gntdev).
+ const GRANT = 0x2;
+ /// Xen no advance mapping.
+ const NO_ADVANCE_MAP = 0x8;
+ /// All valid mappings.
+ const ALL = Self::FOREIGN.bits() | Self::GRANT.bits();
+ }
+}
+
+impl MmapXenFlags {
+ /// Mmap flags are valid.
+ pub fn is_valid(&self) -> bool {
+ // only one of unix, foreign or grant should be set and mmap_in_advance() should be true
+ // with foreign and unix.
+ if self.is_grant() {
+ !self.is_foreign()
+ } else if self.is_foreign() || self.is_unix() {
+ self.mmap_in_advance()
+ } else {
+ false
+ }
+ }
+
+ /// Is standard Unix memory.
+ pub fn is_unix(&self) -> bool {
+ self.bits() == Self::UNIX.bits()
+ }
+
+ /// Is xen foreign memory.
+ pub fn is_foreign(&self) -> bool {
+ self.contains(Self::FOREIGN)
+ }
+
+ /// Is xen grant memory.
+ pub fn is_grant(&self) -> bool {
+ self.contains(Self::GRANT)
+ }
+
+ /// Can mmap entire region in advance.
+ pub fn mmap_in_advance(&self) -> bool {
+ !self.contains(Self::NO_ADVANCE_MAP)
+ }
+}
+
+fn page_size() -> u64 {
+ // SAFETY: Safe because this call just returns the page size and doesn't have any side effects.
+ unsafe { libc::sysconf(_SC_PAGESIZE) as u64 }
+}
+
+fn pages(size: usize) -> (usize, usize) {
+ let page_size = page_size() as usize;
+ let num = (size + page_size - 1) / page_size;
+
+ (num, page_size * num)
+}
+
+fn validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)> {
+ let file_offset = match file_offset {
+ Some(f) => f,
+ None => return Err(Error::InvalidFileOffset),
+ };
+
+ let fd = file_offset.file().as_raw_fd();
+ let f_offset = file_offset.start();
+
+ // We don't allow file offsets with Xen foreign mappings.
+ if f_offset != 0 {
+ return Err(Error::InvalidOffsetLength);
+ }
+
+ Ok((fd, f_offset))
+}
+
+// Xen Foreign memory mapping interface.
+trait MmapXenTrait: std::fmt::Debug {
+ fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>;
+ fn addr(&self) -> *mut u8;
+}
+
+// Standard Unix memory mapping for testing other crates.
+#[derive(Clone, Debug, PartialEq)]
+struct MmapXenUnix(MmapUnix);
+
+impl MmapXenUnix {
+ fn new(range: &MmapRange) -> Result<Self> {
+ let (fd, offset) = if let Some(ref f_off) = range.file_offset {
+ check_file_offset(f_off, range.size)?;
+ (f_off.file().as_raw_fd(), f_off.start())
+ } else {
+ (-1, 0)
+ };
+
+ Ok(Self(MmapUnix::new(
+ range.size,
+ range.prot.ok_or(Error::UnexpectedError)?,
+ range.flags.ok_or(Error::UnexpectedError)?,
+ fd,
+ offset,
+ )?))
+ }
+}
+
+impl MmapXenTrait for MmapXenUnix {
+ #[allow(unused_variables)]
+ fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
+ Err(Error::MappedInAdvance)
+ }
+
+ fn addr(&self) -> *mut u8 {
+ self.0.addr()
+ }
+}
+
+// Privcmd mmap batch v2 command
+//
+// include/uapi/xen/privcmd.h: `privcmd_mmapbatch_v2`
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+struct PrivCmdMmapBatchV2 {
+ // number of pages to populate
+ num: u32,
+ // target domain
+ domid: u16,
+ // virtual address
+ addr: *mut c_void,
+ // array of mfns
+ arr: *const u64,
+ // array of error codes
+ err: *mut c_int,
+}
+
+const XEN_PRIVCMD_TYPE: u32 = 'P' as u32;
+
+// #define IOCTL_PRIVCMD_MMAPBATCH_V2 _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t))
+fn ioctl_privcmd_mmapbatch_v2() -> c_ulong {
+ ioctl_expr(
+ _IOC_NONE,
+ XEN_PRIVCMD_TYPE,
+ 4,
+ size_of::<PrivCmdMmapBatchV2>() as u32,
+ )
+}
+
+// Xen foreign memory specific implementation.
+#[derive(Clone, Debug, PartialEq)]
+struct MmapXenForeign {
+ domid: u32,
+ guest_base: GuestAddress,
+ unix_mmap: MmapUnix,
+ fd: i32,
+}
+
+impl AsRawFd for MmapXenForeign {
+ fn as_raw_fd(&self) -> i32 {
+ self.fd
+ }
+}
+
+impl MmapXenForeign {
+ fn new(range: &MmapRange) -> Result<Self> {
+ let (fd, f_offset) = validate_file(&range.file_offset)?;
+ let (count, size) = pages(range.size);
+
+ let unix_mmap = MmapUnix::new(
+ size,
+ range.prot.ok_or(Error::UnexpectedError)?,
+ range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED,
+ fd,
+ f_offset,
+ )?;
+
+ let foreign = Self {
+ domid: range.mmap_data,
+ guest_base: range.addr,
+ unix_mmap,
+ fd,
+ };
+
+ foreign.mmap_ioctl(count)?;
+ Ok(foreign)
+ }
+
+ // Ioctl to pass additional information to mmap infrastructure of privcmd driver.
+ fn mmap_ioctl(&self, count: usize) -> Result<()> {
+ let base = self.guest_base.0 / page_size();
+
+ let mut pfn = Vec::with_capacity(count);
+ for i in 0..count {
+ pfn.push(base + i as u64);
+ }
+
+ let mut err: Vec<c_int> = vec![0; count];
+
+ let map = PrivCmdMmapBatchV2 {
+ num: count as u32,
+ domid: self.domid as u16,
+ addr: self.addr() as *mut c_void,
+ arr: pfn.as_ptr(),
+ err: err.as_mut_ptr(),
+ };
+
+ // SAFETY: This is safe because the ioctl guarantees to not access memory beyond `map`.
+ let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) };
+
+ if ret == 0 {
+ Ok(())
+ } else {
+ Err(Error::Mmap(io::Error::last_os_error()))
+ }
+ }
+}
+
+impl MmapXenTrait for MmapXenForeign {
+ #[allow(unused_variables)]
+ fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
+ Err(Error::MappedInAdvance)
+ }
+
+ fn addr(&self) -> *mut u8 {
+ self.unix_mmap.addr()
+ }
+}
+
+// Xen Grant memory mapping interface.
+
+const XEN_GRANT_ADDR_OFF: u64 = 1 << 63;
+
+// Grant reference
+//
+// include/uapi/xen/gntdev.h: `ioctl_gntdev_grant_ref`
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, PartialEq)]
+struct GntDevGrantRef {
+ // The domain ID of the grant to be mapped.
+ domid: u32,
+ // The grant reference of the grant to be mapped.
+ reference: u32,
+}
+
+#[repr(C)]
+#[derive(Debug, Default, PartialEq, Eq)]
+struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
+impl<T> __IncompleteArrayField<T> {
+ #[inline]
+ unsafe fn as_ptr(&self) -> *const T {
+ self as *const __IncompleteArrayField<T> as *const T
+ }
+ #[inline]
+ unsafe fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut __IncompleteArrayField<T> as *mut T
+ }
+ #[inline]
+ unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+
+// Grant dev mapping reference
+//
+// include/uapi/xen/gntdev.h: `ioctl_gntdev_map_grant_ref`
+#[repr(C)]
+#[derive(Debug, Default)]
+struct GntDevMapGrantRef {
+ // The number of grants to be mapped.
+ count: u32,
+ // Unused padding
+ pad: u32,
+ // The offset to be used on a subsequent call to mmap().
+ index: u64,
+ // Array of grant references, of size @count.
+ refs: __IncompleteArrayField<GntDevGrantRef>,
+}
+
+generate_fam_struct_impl!(
+ GntDevMapGrantRef,
+ GntDevGrantRef,
+ refs,
+ u32,
+ count,
+ usize::MAX
+);
+
+type GntDevMapGrantRefWrapper = FamStructWrapper<GntDevMapGrantRef>;
+
+impl GntDevMapGrantRef {
+ fn new(domid: u32, base: u32, count: usize) -> Result<GntDevMapGrantRefWrapper> {
+ let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?;
+ let refs = wrapper.as_mut_slice();
+
+ // GntDevMapGrantRef's pad and index are initialized to 0 by Fam layer.
+ for (i, r) in refs.iter_mut().enumerate().take(count) {
+ r.domid = domid;
+ r.reference = base + i as u32;
+ }
+
+ Ok(wrapper)
+ }
+}
+
+// Grant dev un-mapping reference
+//
+// include/uapi/xen/gntdev.h: `ioctl_gntdev_unmap_grant_ref`
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+struct GntDevUnmapGrantRef {
+ // The offset returned by the map operation.
+ index: u64,
+ // The number of grants to be unmapped.
+ count: u32,
+ // Unused padding
+ pad: u32,
+}
+
+impl GntDevUnmapGrantRef {
+ fn new(index: u64, count: u32) -> Self {
+ Self {
+ index,
+ count,
+ pad: 0,
+ }
+ }
+}
+
+const XEN_GNTDEV_TYPE: u32 = 'G' as u32;
+
+// #define IOCTL_GNTDEV_MAP_GRANT_REF _IOC(_IOC_NONE, 'G', 0, sizeof(ioctl_gntdev_map_grant_ref))
+fn ioctl_gntdev_map_grant_ref() -> c_ulong {
+ ioctl_expr(
+ _IOC_NONE,
+ XEN_GNTDEV_TYPE,
+ 0,
+ (size_of::<GntDevMapGrantRef>() + size_of::<GntDevGrantRef>()) as u32,
+ )
+}
+
+// #define IOCTL_GNTDEV_UNMAP_GRANT_REF _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
+fn ioctl_gntdev_unmap_grant_ref() -> c_ulong {
+ ioctl_expr(
+ _IOC_NONE,
+ XEN_GNTDEV_TYPE,
+ 1,
+ size_of::<GntDevUnmapGrantRef>() as u32,
+ )
+}
+
+// Xen grant memory specific implementation.
+#[derive(Clone, Debug)]
+struct MmapXenGrant {
+ guest_base: GuestAddress,
+ unix_mmap: Option<MmapUnix>,
+ file_offset: FileOffset,
+ flags: i32,
+ size: usize,
+ index: u64,
+ domid: u32,
+}
+
+impl AsRawFd for MmapXenGrant {
+ fn as_raw_fd(&self) -> i32 {
+ self.file_offset.file().as_raw_fd()
+ }
+}
+
+impl MmapXenGrant {
+ fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result<Self> {
+ validate_file(&range.file_offset)?;
+
+ let mut grant = Self {
+ guest_base: range.addr,
+ unix_mmap: None,
+ file_offset: range.file_offset.as_ref().unwrap().clone(),
+ flags: range.flags.ok_or(Error::UnexpectedError)?,
+ size: 0,
+ index: 0,
+ domid: range.mmap_data,
+ };
+
+ // Region can't be mapped in advance, partial mapping will be done later via
+ // `MmapXenSlice`.
+ if mmap_flags.mmap_in_advance() {
+ let (unix_mmap, index) = grant.mmap_range(
+ range.addr,
+ range.size,
+ range.prot.ok_or(Error::UnexpectedError)?,
+ )?;
+
+ grant.unix_mmap = Some(unix_mmap);
+ grant.index = index;
+ grant.size = range.size;
+ }
+
+ Ok(grant)
+ }
+
+ fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> {
+ let (count, size) = pages(size);
+ let index = self.mmap_ioctl(addr, count)?;
+ let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?;
+
+ Ok((unix_mmap, index))
+ }
+
+ fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) {
+ let (count, _) = pages(size);
+
+ // Unmap the address first.
+ drop(unix_mmap);
+ self.unmap_ioctl(count as u32, index).unwrap();
+ }
+
+ fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result<u64> {
+ let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32;
+ let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?;
+ let reference = wrapper.as_fam_struct_ref();
+
+ // SAFETY: This is safe because the ioctl guarantees to not access memory beyond reference.
+ let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) };
+
+ if ret == 0 {
+ Ok(reference.index)
+ } else {
+ Err(Error::Mmap(io::Error::last_os_error()))
+ }
+ }
+
+ fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> {
+ let unmap = GntDevUnmapGrantRef::new(index, count);
+
+ // SAFETY: This is safe because the ioctl guarantees to not access memory beyond unmap.
+ let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) };
+
+ if ret == 0 {
+ Ok(())
+ } else {
+ Err(Error::Mmap(io::Error::last_os_error()))
+ }
+ }
+}
+
+impl MmapXenTrait for MmapXenGrant {
+ // Maps a slice out of the entire region.
+ fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
+ MmapXenSlice::new_with(self.clone(), addr as usize, prot, len)
+ }
+
+ fn addr(&self) -> *mut u8 {
+ if let Some(ref unix_mmap) = self.unix_mmap {
+ unix_mmap.addr()
+ } else {
+ null_mut()
+ }
+ }
+}
+
+impl Drop for MmapXenGrant {
+ fn drop(&mut self) {
+ if let Some(unix_mmap) = self.unix_mmap.take() {
+ self.unmap_range(unix_mmap, self.size, self.index);
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct MmapXenSlice {
+ grant: Option<MmapXenGrant>,
+ unix_mmap: Option<MmapUnix>,
+ addr: *mut u8,
+ size: usize,
+ index: u64,
+}
+
+impl MmapXenSlice {
+ fn raw(addr: *mut u8) -> Self {
+ Self {
+ grant: None,
+ unix_mmap: None,
+ addr,
+ size: 0,
+ index: 0,
+ }
+ }
+
+ fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self> {
+ let page_size = page_size() as usize;
+ let page_base: usize = (offset / page_size) * page_size;
+ let offset = offset - page_base;
+ let size = offset + size;
+
+ let addr = grant.guest_base.0 + page_base as u64;
+ let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?;
+
+ // SAFETY: We have already mapped the range including offset.
+ let addr = unsafe { unix_mmap.addr().add(offset) };
+
+ Ok(Self {
+ grant: Some(grant),
+ unix_mmap: Some(unix_mmap),
+ addr,
+ size,
+ index,
+ })
+ }
+
+ // Mapped address for the region.
+ pub(crate) fn addr(&self) -> *mut u8 {
+ self.addr
+ }
+}
+
+impl Drop for MmapXenSlice {
+ fn drop(&mut self) {
+ // Unmaps memory automatically once this instance goes out of scope.
+ if let Some(unix_mmap) = self.unix_mmap.take() {
+ self.grant
+ .as_ref()
+ .unwrap()
+ .unmap_range(unix_mmap, self.size, self.index);
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MmapXen {
+ xen_flags: MmapXenFlags,
+ domid: u32,
+ mmap: Box<dyn MmapXenTrait>,
+}
+
+impl MmapXen {
+ fn new(range: &MmapRange) -> Result<Self> {
+ let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) {
+ Some(flags) => flags,
+ None => return Err(Error::MmapFlags(range.mmap_flags)),
+ };
+
+ if !xen_flags.is_valid() {
+ return Err(Error::MmapFlags(xen_flags.bits()));
+ }
+
+ Ok(Self {
+ xen_flags,
+ domid: range.mmap_data,
+ mmap: if xen_flags.is_foreign() {
+ Box::new(MmapXenForeign::new(range)?)
+ } else if xen_flags.is_grant() {
+ Box::new(MmapXenGrant::new(range, xen_flags)?)
+ } else {
+ Box::new(MmapXenUnix::new(range)?)
+ },
+ })
+ }
+
+ fn addr(&self) -> *mut u8 {
+ self.mmap.addr()
+ }
+
+ fn flags(&self) -> u32 {
+ self.xen_flags.bits()
+ }
+
+ fn data(&self) -> u32 {
+ self.domid
+ }
+
+ fn mmap_in_advance(&self) -> bool {
+ self.xen_flags.mmap_in_advance()
+ }
+
+ pub(crate) fn mmap(
+ mmap_xen: Option<&Self>,
+ addr: *mut u8,
+ prot: i32,
+ len: usize,
+ ) -> MmapXenSlice {
+ match mmap_xen {
+ Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(),
+ None => MmapXenSlice::raw(addr),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+
+ use super::*;
+ use vmm_sys_util::tempfile::TempFile;
+
+ // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
+ // distinctive value when the error is represented by another variant.
+ impl Error {
+ fn raw_os_error(&self) -> i32 {
+ match self {
+ Error::Mmap(e) => e.raw_os_error().unwrap(),
+ _ => std::i32::MIN,
+ }
+ }
+ }
+
+ #[allow(unused_variables)]
+ pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
+ 0
+ }
+
+ impl MmapRange {
+ fn initialized(is_file: bool) -> Self {
+ let file_offset = if is_file {
+ Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0))
+ } else {
+ None
+ };
+
+ let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000));
+ range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
+ range.mmap_data = 1;
+
+ range
+ }
+ }
+
+ impl MmapRegion {
+ pub fn new(size: usize) -> Result<Self> {
+ let range = MmapRange::new_unix(size, None, GuestAddress(0));
+ Self::from_range(range)
+ }
+ }
+
+ #[test]
+ fn test_mmap_xen_failures() {
+ let mut range = MmapRange::initialized(true);
+ // Invalid flags
+ range.mmap_flags = 16;
+
+ let r = MmapXen::new(&range);
+ assert_eq!(
+ format!("{:?}", r.unwrap_err()),
+ format!("MmapFlags({})", range.mmap_flags),
+ );
+
+ range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits();
+ let r = MmapXen::new(&range);
+ assert_eq!(
+ format!("{:?}", r.unwrap_err()),
+ format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()),
+ );
+
+ range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
+ let r = MmapXen::new(&range);
+ assert_eq!(
+ format!("{:?}", r.unwrap_err()),
+ format!(
+ "MmapFlags({:x})",
+ MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(),
+ ),
+ );
+ }
+
+ #[test]
+ fn test_mmap_xen_success() {
+ let mut range = MmapRange::initialized(true);
+ range.mmap_flags = MmapXenFlags::FOREIGN.bits();
+
+ let r = MmapXen::new(&range).unwrap();
+ assert_eq!(r.flags(), range.mmap_flags);
+ assert_eq!(r.data(), range.mmap_data);
+ assert_ne!(r.addr(), null_mut());
+ assert!(r.mmap_in_advance());
+
+ range.mmap_flags = MmapXenFlags::GRANT.bits();
+ let r = MmapXen::new(&range).unwrap();
+ assert_eq!(r.flags(), range.mmap_flags);
+ assert_eq!(r.data(), range.mmap_data);
+ assert_ne!(r.addr(), null_mut());
+ assert!(r.mmap_in_advance());
+
+ range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
+ let r = MmapXen::new(&range).unwrap();
+ assert_eq!(r.flags(), range.mmap_flags);
+ assert_eq!(r.data(), range.mmap_data);
+ assert_eq!(r.addr(), null_mut());
+ assert!(!r.mmap_in_advance());
+ }
+
+ #[test]
+ fn test_foreign_map_failure() {
+ let mut range = MmapRange::initialized(true);
+ range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0));
+ range.prot = None;
+ let r = MmapXenForeign::new(&range);
+ assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
+
+ let mut range = MmapRange::initialized(true);
+ range.flags = None;
+ let r = MmapXenForeign::new(&range);
+ assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
+
+ let mut range = MmapRange::initialized(true);
+ range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
+ let r = MmapXenForeign::new(&range);
+ assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength");
+
+ let mut range = MmapRange::initialized(true);
+ range.size = 0;
+ let r = MmapXenForeign::new(&range);
+ assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
+ }
+
+ #[test]
+ fn test_foreign_map_success() {
+ let range = MmapRange::initialized(true);
+ let r = MmapXenForeign::new(&range).unwrap();
+ assert_ne!(r.addr(), null_mut());
+ assert_eq!(r.domid, range.mmap_data);
+ assert_eq!(r.guest_base, range.addr);
+ }
+
+ #[test]
+ fn test_grant_map_failure() {
+ let mut range = MmapRange::initialized(true);
+ range.prot = None;
+ let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
+ assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
+
+ let mut range = MmapRange::initialized(true);
+ range.prot = None;
+ // Protection isn't used for no-advance mappings
+ MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
+
+ let mut range = MmapRange::initialized(true);
+ range.flags = None;
+ let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
+ assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
+
+ let mut range = MmapRange::initialized(true);
+ range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
+ let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
+ assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength");
+
+ let mut range = MmapRange::initialized(true);
+ range.size = 0;
+ let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
+ assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
+ }
+
+ #[test]
+ fn test_grant_map_success() {
+ let range = MmapRange::initialized(true);
+ let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
+ assert_eq!(r.addr(), null_mut());
+ assert_eq!(r.domid, range.mmap_data);
+ assert_eq!(r.guest_base, range.addr);
+
+ let mut range = MmapRange::initialized(true);
+ // Size isn't used with no-advance mapping.
+ range.size = 0;
+ MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
+
+ let range = MmapRange::initialized(true);
+ let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap();
+ assert_ne!(r.addr(), null_mut());
+ assert_eq!(r.domid, range.mmap_data);
+ assert_eq!(r.guest_base, range.addr);
+ }
+
+ #[test]
+ fn test_grant_ref_alloc() {
+ let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap();
+ let r = wrapper.as_fam_struct_ref();
+ assert_eq!(r.count, 0x100);
+ assert_eq!(r.pad, 0);
+ assert_eq!(r.index, 0);
+ }
+}
diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs
new file mode 100644
index 0000000..76e41bb
--- /dev/null
+++ b/src/volatile_memory.rs
@@ -0,0 +1,2492 @@
+// Portions Copyright 2019 Red Hat, Inc.
+//
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the THIRT-PARTY file.
+//
+// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
+
+//! Types for volatile access to memory.
+//!
+//! Two of the core rules for safe rust is no data races and no aliased mutable references.
+//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
+//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
+//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
+//! compiler reordering or eliding access because it has no visibility into what other systems are
+//! doing with that hunk of memory.
+//!
+//! For the purposes of maintaining safety, volatile memory has some rules of its own:
+//! 1. No references or slices to volatile memory (`&` or `&mut`).
+//! 2. Access should always been done with a volatile read or write.
+//! The First rule is because having references of any kind to memory considered volatile would
+//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
+//! done concurrently without synchronization. With volatile access we know that the compiler has
+//! not reordered or elided the access.
+
+use std::cmp::min;
+use std::io::{self, Read, Write};
+use std::marker::PhantomData;
+use std::mem::{align_of, size_of};
+use std::ptr::copy;
+use std::ptr::{read_volatile, write_volatile};
+use std::result;
+use std::sync::atomic::Ordering;
+use std::usize;
+
+use crate::atomic_integer::AtomicInteger;
+use crate::bitmap::{Bitmap, BitmapSlice, BS};
+use crate::{AtomicAccess, ByteValued, Bytes};
+
+#[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
+use crate::mmap_xen::{MmapXen as MmapInfo, MmapXenSlice};
+
+#[cfg(not(feature = "xen"))]
+type MmapInfo = std::marker::PhantomData<()>;
+
+use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
+
+/// `VolatileMemory` related errors.
+#[allow(missing_docs)]
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ /// `addr` is out of bounds of the volatile memory slice.
+ #[error("address 0x{addr:x} is out of bounds")]
+ OutOfBounds { addr: usize },
+ /// Taking a slice at `base` with `offset` would overflow `usize`.
+ #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
+ Overflow { base: usize, offset: usize },
+ /// Taking a slice whose size overflows `usize`.
+ #[error("{nelements:?} elements of size {size:?} would overflow a usize")]
+ TooBig { nelements: usize, size: usize },
+ /// Trying to obtain a misaligned reference.
+ #[error("address 0x{addr:x} is not aligned to {alignment:?}")]
+ Misaligned { addr: usize, alignment: usize },
+ /// Writing to memory failed
+ #[error("{0}")]
+ IOError(io::Error),
+ /// Incomplete read or write
+ #[error("only used {completed} bytes in {expected} long buffer")]
+ PartialBuffer { expected: usize, completed: usize },
+}
+
+/// Result of volatile memory operations.
+pub type Result<T> = result::Result<T, Error>;
+
+/// Convenience function for computing `base + offset`.
+///
+/// # Errors
+///
+/// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset`
+/// exceeds `usize::MAX`.
+///
+/// # Examples
+///
+/// ```
+/// # use vm_memory::volatile_memory::compute_offset;
+/// #
+/// assert_eq!(108, compute_offset(100, 8).unwrap());
+/// assert!(compute_offset(std::usize::MAX, 6).is_err());
+/// ```
+pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
+ match base.checked_add(offset) {
+ None => Err(Error::Overflow { base, offset }),
+ Some(m) => Ok(m),
+ }
+}
+
+/// Types that support raw volatile access to their data.
+pub trait VolatileMemory {
+ /// Type used for dirty memory tracking.
+ type B: Bitmap;
+
+ /// Gets the size of this slice.
+ fn len(&self) -> usize;
+
+ /// Check whether the region is empty.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
+ /// `offset`.
+ ///
+ /// Note that the property `get_slice(offset, count).len() == count` MUST NOT be
+ /// relied on for the correctness of unsafe code. This is a safe function inside of a
+ /// safe trait, and implementors are under no obligation to follow its documentation.
+ fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<BS<Self::B>>>;
+
+ /// Gets a slice of memory for the entire region that supports volatile access.
+ fn as_volatile_slice(&self) -> VolatileSlice<BS<Self::B>> {
+ self.get_slice(0, self.len()).unwrap()
+ }
+
+ /// Gets a `VolatileRef` at `offset`.
+ fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T, BS<Self::B>>> {
+ let slice = self.get_slice(offset, size_of::<T>())?;
+
+ assert_eq!(
+ slice.len(),
+ size_of::<T>(),
+ "VolatileMemory::get_slice(offset, count) returned slice of length != count."
+ );
+
+ // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
+ // slice.addr is valid memory of size slice.len(). The assert above ensures that
+ // the length of the slice is exactly enough to hold one `T`. Lastly, the lifetime of the
+ // returned VolatileRef match that of the VolatileSlice returned by get_slice and thus the
+ // lifetime one `self`.
+ unsafe {
+ Ok(VolatileRef::with_bitmap(
+ slice.addr,
+ slice.bitmap,
+ slice.mmap,
+ ))
+ }
+ }
+
+ /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at
+ /// `offset`.
+ fn get_array_ref<T: ByteValued>(
+ &self,
+ offset: usize,
+ n: usize,
+ ) -> Result<VolatileArrayRef<T, BS<Self::B>>> {
+ // Use isize to avoid problems with ptr::offset and ptr::add down the line.
+ let nbytes = isize::try_from(n)
+ .ok()
+ .and_then(|n| n.checked_mul(size_of::<T>() as isize))
+ .ok_or(Error::TooBig {
+ nelements: n,
+ size: size_of::<T>(),
+ })?;
+ let slice = self.get_slice(offset, nbytes as usize)?;
+
+ assert_eq!(
+ slice.len(),
+ nbytes as usize,
+ "VolatileMemory::get_slice(offset, count) returned slice of length != count."
+ );
+
+ // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
+ // slice.addr is valid memory of size slice.len(). The assert above ensures that
+ // the length of the slice is exactly enough to hold `n` instances of `T`. Lastly, the lifetime of the
+ // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
+ // lifetime one `self`.
+ unsafe {
+ Ok(VolatileArrayRef::with_bitmap(
+ slice.addr,
+ n,
+ slice.bitmap,
+ slice.mmap,
+ ))
+ }
+ }
+
+ /// Returns a reference to an instance of `T` at `offset`.
+ ///
+ /// # Safety
+ /// To use this safely, the caller must guarantee that there are no other
+ /// users of the given chunk of memory for the lifetime of the result.
+ ///
+ /// # Errors
+ ///
+ /// If the resulting pointer is not aligned, this method will return an
+ /// [`Error`](enum.Error.html).
+ unsafe fn aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T> {
+ let slice = self.get_slice(offset, size_of::<T>())?;
+ slice.check_alignment(align_of::<T>())?;
+
+ assert_eq!(
+ slice.len(),
+ size_of::<T>(),
+ "VolatileMemory::get_slice(offset, count) returned slice of length != count."
+ );
+
+ // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
+ // slice.addr is valid memory of size slice.len(). The assert above ensures that
+ // the length of the slice is exactly enough to hold one `T`.
+ // Dereferencing the pointer is safe because we check the alignment above, and the invariants
+ // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
+ // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
+ // lifetime one `self`.
+ unsafe { Ok(&*(slice.addr as *const T)) }
+ }
+
+ /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed
+ /// using the resulting reference are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that there are no other
+ /// users of the given chunk of memory for the lifetime of the result.
+ ///
+ /// # Errors
+ ///
+ /// If the resulting pointer is not aligned, this method will return an
+ /// [`Error`](enum.Error.html).
+ unsafe fn aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T> {
+ let slice = self.get_slice(offset, size_of::<T>())?;
+ slice.check_alignment(align_of::<T>())?;
+
+ assert_eq!(
+ slice.len(),
+ size_of::<T>(),
+ "VolatileMemory::get_slice(offset, count) returned slice of length != count."
+ );
+
+ // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
+ // slice.addr is valid memory of size slice.len(). The assert above ensures that
+ // the length of the slice is exactly enough to hold one `T`.
+ // Dereferencing the pointer is safe because we check the alignment above, and the invariants
+ // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
+ // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
+ // lifetime one `self`.
+
+ unsafe { Ok(&mut *(slice.addr as *mut T)) }
+ }
+
+ /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed
+ /// using the resulting reference are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ ///
+ /// # Errors
+ ///
+ /// If the resulting pointer is not aligned, this method will return an
+ /// [`Error`](enum.Error.html).
+ fn get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T> {
+ let slice = self.get_slice(offset, size_of::<T>())?;
+ slice.check_alignment(align_of::<T>())?;
+
+ assert_eq!(
+ slice.len(),
+ size_of::<T>(),
+ "VolatileMemory::get_slice(offset, count) returned slice of length != count."
+ );
+
+ // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
+ // slice.addr is valid memory of size slice.len(). The assert above ensures that
+ // the length of the slice is exactly enough to hold one `T`.
+ // Dereferencing the pointer is safe because we check the alignment above. Lastly, the lifetime of the
+ // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
+ // lifetime one `self`.
+ unsafe { Ok(&*(slice.addr as *const T)) }
+ }
+
+ /// Returns the sum of `base` and `offset` if the resulting address is valid.
+ fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
+ let mem_end = compute_offset(base, offset)?;
+ if mem_end > self.len() {
+ return Err(Error::OutOfBounds { addr: mem_end });
+ }
+ Ok(mem_end)
+ }
+}
+
+impl<'a> From<&'a mut [u8]> for VolatileSlice<'a, ()> {
+ fn from(value: &'a mut [u8]) -> Self {
+ // SAFETY: Since we construct the VolatileSlice from a rust slice, we know that
+ // the memory at addr `value as *mut u8` is valid for reads and writes (because mutable
+ // reference) of len `value.len()`. Since the `VolatileSlice` inherits the lifetime `'a`,
+ // it is not possible to access/mutate `value` while the VolatileSlice is alive.
+ //
+ // Note that it is possible for multiple aliasing sub slices of this `VolatileSlice`s to
+ // be created through `VolatileSlice::subslice`. This is OK, as pointers are allowed to
+ // alias, and it is impossible to get rust-style references from a `VolatileSlice`.
+ unsafe { VolatileSlice::new(value.as_mut_ptr(), value.len()) }
+ }
+}
+
+#[repr(C, packed)]
+struct Packed<T>(T);
+
+/// A guard to perform mapping and protect unmapping of the memory.
+pub struct PtrGuard {
+ addr: *mut u8,
+ len: usize,
+
+ // This isn't used anymore, but it protects the slice from getting unmapped while in use.
+ // Once this goes out of scope, the memory is unmapped automatically.
+ #[cfg(all(feature = "xen", unix))]
+ _slice: MmapXenSlice,
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl PtrGuard {
+ #[allow(unused_variables)]
+ fn new(mmap: Option<&MmapInfo>, addr: *mut u8, prot: i32, len: usize) -> Self {
+ #[cfg(all(feature = "xen", unix))]
+ let (addr, _slice) = {
+ let slice = MmapInfo::mmap(mmap, addr, prot, len);
+ (slice.addr(), slice)
+ };
+
+ Self {
+ addr,
+ len,
+
+ #[cfg(all(feature = "xen", unix))]
+ _slice,
+ }
+ }
+
+ fn read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
+ Self::new(mmap, addr, libc::PROT_READ, len)
+ }
+
+ /// Returns a non-mutable pointer to the beginning of the slice.
+ pub fn as_ptr(&self) -> *const u8 {
+ self.addr
+ }
+
+ /// Gets the length of the mapped region.
+ pub fn len(&self) -> usize {
+ self.len
+ }
+}
+
+/// A mutable guard to perform mapping and protect unmapping of the memory.
+pub struct PtrGuardMut(PtrGuard);
+
+#[allow(clippy::len_without_is_empty)]
+impl PtrGuardMut {
+ fn write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
+ Self(PtrGuard::new(mmap, addr, libc::PROT_WRITE, len))
+ }
+
+ /// Returns a mutable pointer to the beginning of the slice. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.0.addr
+ }
+
+ /// Gets the length of the mapped region.
+ pub fn len(&self) -> usize {
+ self.0.len
+ }
+}
+
+/// A slice of raw memory that supports volatile access.
+#[derive(Clone, Copy, Debug)]
+pub struct VolatileSlice<'a, B = ()> {
+ addr: *mut u8,
+ size: usize,
+ bitmap: B,
+ mmap: Option<&'a MmapInfo>,
+}
+
+impl<'a> VolatileSlice<'a, ()> {
+ /// Creates a slice of raw memory that must support volatile access.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
+ /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
+ /// must also guarantee that all other users of the given chunk of memory are using volatile
+ /// accesses.
+ pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
+ Self::with_bitmap(addr, size, (), None)
+ }
+}
+
+impl<'a, B: BitmapSlice> VolatileSlice<'a, B> {
+ /// Creates a slice of raw memory that must support volatile access, and uses the provided
+ /// `bitmap` object for dirty page tracking.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
+ /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
+ /// must also guarantee that all other users of the given chunk of memory are using volatile
+ /// accesses.
+ pub unsafe fn with_bitmap(
+ addr: *mut u8,
+ size: usize,
+ bitmap: B,
+ mmap: Option<&'a MmapInfo>,
+ ) -> VolatileSlice<'a, B> {
+ VolatileSlice {
+ addr,
+ size,
+ bitmap,
+ mmap,
+ }
+ }
+
+ /// Returns a pointer to the beginning of the slice. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ #[deprecated(
+ since = "0.12.1",
+ note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
+ )]
+ #[cfg(not(all(feature = "xen", unix)))]
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.addr
+ }
+
+ /// Returns a guard for the pointer to the underlying memory.
+ pub fn ptr_guard(&self) -> PtrGuard {
+ PtrGuard::read(self.mmap, self.addr, self.len())
+ }
+
+ /// Returns a mutable guard for the pointer to the underlying memory.
+ pub fn ptr_guard_mut(&self) -> PtrGuardMut {
+ PtrGuardMut::write(self.mmap, self.addr, self.len())
+ }
+
+ /// Gets the size of this slice.
+ pub fn len(&self) -> usize {
+ self.size
+ }
+
+ /// Checks if the slice is empty.
+ pub fn is_empty(&self) -> bool {
+ self.size == 0
+ }
+
+ /// Borrows the inner `BitmapSlice`.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use vm_memory::{VolatileMemory, VolatileSlice};
+ /// #
+ /// # // Create a buffer
+ /// # let mut mem = [0u8; 32];
+ /// #
+ /// # // Get a `VolatileSlice` from the buffer
+ /// let vslice = VolatileSlice::from(&mut mem[..]);
+ ///
+ /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice");
+ /// assert_eq!(8, start.len());
+ /// assert_eq!(24, end.len());
+ /// ```
+ pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> {
+ let end = self.offset(mid)?;
+ let start =
+ // SAFETY: safe because self.offset() already checked the bounds
+ unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone(), self.mmap) };
+
+ Ok((start, end))
+ }
+
+ /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
+ /// `offset` with `count` length.
+ ///
+ /// The returned subslice is a copy of this slice with the address increased by `offset` bytes
+ /// and the size set to `count` bytes.
+ pub fn subslice(&self, offset: usize, count: usize) -> Result<Self> {
+ let _ = self.compute_end_offset(offset, count)?;
+
+ // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
+ // the lifetime is the same as the original slice.
+ unsafe {
+ Ok(VolatileSlice::with_bitmap(
+ self.addr.add(offset),
+ count,
+ self.bitmap.slice_at(offset),
+ self.mmap,
+ ))
+ }
+ }
+
+ /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
+ /// `offset`.
+ ///
+ /// The returned subslice is a copy of this slice with the address increased by `count` bytes
+ /// and the size reduced by `count` bytes.
+ pub fn offset(&self, count: usize) -> Result<VolatileSlice<'a, B>> {
+ let new_addr = (self.addr as usize)
+ .checked_add(count)
+ .ok_or(Error::Overflow {
+ base: self.addr as usize,
+ offset: count,
+ })?;
+ let new_size = self
+ .size
+ .checked_sub(count)
+ .ok_or(Error::OutOfBounds { addr: new_addr })?;
+ // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
+ // memory of the original slice.
+ unsafe {
+ Ok(VolatileSlice::with_bitmap(
+ self.addr.add(count),
+ new_size,
+ self.bitmap.slice_at(count),
+ self.mmap,
+ ))
+ }
+ }
+
+ /// Copies as many elements of type `T` as possible from this slice to `buf`.
+ ///
+ /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
+ /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
+ /// using volatile reads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::{VolatileMemory, VolatileSlice};
+ /// #
+ /// let mut mem = [0u8; 32];
+ /// let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let mut buf = [5u8; 16];
+ /// let res = vslice.copy_to(&mut buf[..]);
+ ///
+ /// assert_eq!(16, res);
+ /// for &v in &buf[..] {
+ /// assert_eq!(v, 0);
+ /// }
+ /// ```
+ pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
+ where
+ T: ByteValued,
+ {
+ // A fast path for u8/i8
+ if size_of::<T>() == 1 {
+ let total = buf.len().min(self.len());
+
+ // SAFETY:
+ // - dst is valid for writes of at least `total`, since total <= buf.len()
+ // - src is valid for reads of at least `total` as total <= self.len()
+ // - The regions are non-overlapping as `src` points to guest memory and `buf` is
+ // a slice and thus has to live outside of guest memory (there can be more slices to
+ // guest memory without violating rust's aliasing rules)
+ // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
+ unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, self, total) }
+ } else {
+ let count = self.size / size_of::<T>();
+ let source = self.get_array_ref::<T>(0, count).unwrap();
+ source.copy_to(buf)
+ }
+ }
+
+ /// Copies as many bytes as possible from this slice to the provided `slice`.
+ ///
+ /// The copies happen in an undefined order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::{VolatileMemory, VolatileSlice};
+ /// #
+ /// # // Create a buffer
+ /// # let mut mem = [0u8; 32];
+ /// #
+ /// # // Get a `VolatileSlice` from the buffer
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// #
+ /// vslice.copy_to_volatile_slice(
+ /// vslice
+ /// .get_slice(16, 16)
+ /// .expect("Could not get VolatileSlice"),
+ /// );
+ /// ```
+ pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
+ // SAFETY: Safe because the pointers are range-checked when the slices
+ // are created, and they never escape the VolatileSlices.
+ // FIXME: ... however, is it really okay to mix non-volatile
+ // operations such as copy with read_volatile and write_volatile?
+ unsafe {
+ let count = min(self.size, slice.size);
+ copy(self.addr, slice.addr, count);
+ slice.bitmap.mark_dirty(0, count);
+ }
+ }
+
+ /// Copies as many elements of type `T` as possible from `buf` to this slice.
+ ///
+ /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::{VolatileMemory, VolatileSlice};
+ /// #
+ /// let mut mem = [0u8; 32];
+ /// let vslice = VolatileSlice::from(&mut mem[..]);
+ ///
+ /// let buf = [5u8; 64];
+ /// vslice.copy_from(&buf[..]);
+ ///
+ /// for i in 0..4 {
+ /// let val = vslice
+ /// .get_ref::<u32>(i * 4)
+ /// .expect("Could not get value")
+ /// .load();
+ /// assert_eq!(val, 0x05050505);
+ /// }
+ /// ```
+ pub fn copy_from<T>(&self, buf: &[T])
+ where
+ T: ByteValued,
+ {
+ // A fast path for u8/i8
+ if size_of::<T>() == 1 {
+ let total = buf.len().min(self.len());
+ // SAFETY:
+ // - dst is valid for writes of at least `total`, since total <= self.len()
+ // - src is valid for reads of at least `total` as total <= buf.len()
+ // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
+ // a slice and thus has to live outside of guest memory (there can be more slices to
+ // guest memory without violating rust's aliasing rules)
+ // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
+ unsafe { copy_to_volatile_slice(self, buf.as_ptr() as *const u8, total) };
+ } else {
+ let count = self.size / size_of::<T>();
+ // It's ok to use unwrap here because `count` was computed based on the current
+ // length of `self`.
+ let dest = self.get_array_ref::<T>(0, count).unwrap();
+
+ // No need to explicitly call `mark_dirty` after this call because
+ // `VolatileArrayRef::copy_from` already takes care of that.
+ dest.copy_from(buf);
+ };
+ }
+
+ /// Checks if the current slice is aligned at `alignment` bytes.
+ fn check_alignment(&self, alignment: usize) -> Result<()> {
+ // Check that the desired alignment is a power of two.
+ debug_assert!((alignment & (alignment - 1)) == 0);
+ if ((self.addr as usize) & (alignment - 1)) != 0 {
+ return Err(Error::Misaligned {
+ addr: self.addr as usize,
+ alignment,
+ });
+ }
+ Ok(())
+ }
+}
+
+impl<B: BitmapSlice> Bytes<usize> for VolatileSlice<'_, B> {
+ type E = Error;
+
+ /// # Examples
+ /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// #
+ /// let mut mem = [0u8; 1024];
+ /// let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020);
+ ///
+ /// assert!(res.is_ok());
+ /// assert_eq!(res.unwrap(), 4);
+ /// ```
+ fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
+ if buf.is_empty() {
+ return Ok(0);
+ }
+
+ if addr >= self.size {
+ return Err(Error::OutOfBounds { addr });
+ }
+
+ let total = buf.len().min(self.len() - addr);
+ let dst = self.subslice(addr, total)?;
+
+ // SAFETY:
+ // We check above that `addr` is a valid offset within this volatile slice, and by
+ // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
+ // memory of length self.len(). Furthermore, both src and dst of the call to
+ // copy_to_volatile_slice are valid for reads and writes respectively of length `total`
+ // since total is the minimum of lengths of the memory areas pointed to. The areas do not
+ // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
+ // memory are possible without violating rust's aliasing rules).
+ Ok(unsafe { copy_to_volatile_slice(&dst, buf.as_ptr(), total) })
+ }
+
+ /// # Examples
+ /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// #
+ /// let mut mem = [0u8; 1024];
+ /// let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let buf = &mut [0u8; 16];
+ /// let res = vslice.read(buf, 1010);
+ ///
+ /// assert!(res.is_ok());
+ /// assert_eq!(res.unwrap(), 14);
+ /// ```
+ fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
+ if buf.is_empty() {
+ return Ok(0);
+ }
+
+ if addr >= self.size {
+ return Err(Error::OutOfBounds { addr });
+ }
+
+ let total = buf.len().min(self.len() - addr);
+ let src = self.subslice(addr, total)?;
+
+ // SAFETY:
+ // We check above that `addr` is a valid offset within this volatile slice, and by
+ // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
+ // memory of length self.len(). Furthermore, both src and dst of the call to
+ // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
+ // since total is the minimum of lengths of the memory areas pointed to. The areas do not
+ // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
+ // memory are possible without violating rust's aliasing rules).
+ unsafe { Ok(copy_from_volatile_slice(buf.as_mut_ptr(), &src, total)) }
+ }
+
+ /// # Examples
+ /// * Write a slice at offset 256.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// #
+ /// # // Create a buffer
+ /// # let mut mem = [0u8; 1024];
+ /// #
+ /// # // Get a `VolatileSlice` from the buffer
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// #
+ /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256);
+ ///
+ /// assert!(res.is_ok());
+ /// assert_eq!(res.unwrap(), ());
+ /// ```
+ fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
+ // `mark_dirty` called within `self.write`.
+ let len = self.write(buf, addr)?;
+ if len != buf.len() {
+ return Err(Error::PartialBuffer {
+ expected: buf.len(),
+ completed: len,
+ });
+ }
+ Ok(())
+ }
+
+ /// # Examples
+ /// * Read a slice of size 16 at offset 256.
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// #
+ /// # // Create a buffer
+ /// # let mut mem = [0u8; 1024];
+ /// #
+ /// # // Get a `VolatileSlice` from the buffer
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// #
+ /// let buf = &mut [0u8; 16];
+ /// let res = vslice.read_slice(buf, 256);
+ ///
+ /// assert!(res.is_ok());
+ /// ```
+ fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
+ let len = self.read(buf, addr)?;
+ if len != buf.len() {
+ return Err(Error::PartialBuffer {
+ expected: buf.len(),
+ completed: len,
+ });
+ }
+ Ok(())
+ }
+
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # if cfg!(unix) {
+ /// # let mut mem = [0u8; 1024];
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ ///
+ /// vslice
+ /// .read_from(32, &mut file, 128)
+ /// .expect("Could not read bytes from file into VolatileSlice");
+ ///
+ /// let rand_val: u32 = vslice
+ /// .read_obj(40)
+ /// .expect("Could not read value from VolatileSlice");
+ /// # }
+ /// ```
+ fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
+ where
+ F: Read,
+ {
+ let _ = self.compute_end_offset(addr, count)?;
+
+ let mut dst = vec![0; count];
+
+ let bytes_read = loop {
+ match src.read(&mut dst) {
+ Ok(n) => break n,
+ Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
+ Err(e) => return Err(Error::IOError(e)),
+ }
+ };
+
+ // There is no guarantee that the read implementation is well-behaved, see the docs for
+ // Read::read.
+ assert!(bytes_read <= count);
+
+ let slice = self.subslice(addr, bytes_read)?;
+
+ // SAFETY: We have checked via compute_end_offset that accessing the specified
+ // region of guest memory is valid. We asserted that the value returned by `read` is between
+ // 0 and count (the length of the buffer passed to it), and that the
+ // regions don't overlap because we allocated the Vec outside of guest memory.
+ Ok(unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), bytes_read) })
+ }
+
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # if cfg!(unix) {
+ /// # let mut mem = [0u8; 1024];
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ ///
+ /// vslice
+ /// .read_exact_from(32, &mut file, 128)
+ /// .expect("Could not read bytes from file into VolatileSlice");
+ ///
+ /// let rand_val: u32 = vslice
+ /// .read_obj(40)
+ /// .expect("Could not read value from VolatileSlice");
+ /// # }
+ /// ```
+ fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
+ where
+ F: Read,
+ {
+ let _ = self.compute_end_offset(addr, count)?;
+
+ let mut dst = vec![0; count];
+
+ // Read into buffer that can be copied into guest memory
+ src.read_exact(&mut dst).map_err(Error::IOError)?;
+
+ let slice = self.subslice(addr, count)?;
+
+ // SAFETY: We have checked via compute_end_offset that accessing the specified
+ // region of guest memory is valid. We know that `dst` has len `count`, and that the
+ // regions don't overlap because we allocated the Vec outside of guest memory
+ unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), count) };
+ Ok(())
+ }
+
+ /// # Examples
+ ///
+ /// * Write 128 bytes to /dev/null
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// # use std::fs::OpenOptions;
+ /// # use std::path::Path;
+ /// #
+ /// # if cfg!(unix) {
+ /// # let mut mem = [0u8; 1024];
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ ///
+ /// vslice
+ /// .write_to(32, &mut file, 128)
+ /// .expect("Could not write value from VolatileSlice to /dev/null");
+ /// # }
+ /// ```
+ fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
+ where
+ F: Write,
+ {
+ let _ = self.compute_end_offset(addr, count)?;
+ let mut src = Vec::with_capacity(count);
+
+ let slice = self.subslice(addr, count)?;
+
+ // SAFETY: We checked the addr and count so accessing the slice is safe.
+ // It is safe to read from volatile memory. The Vec has capacity for exactly `count`
+ // many bytes, and the memory regions pointed to definitely do not overlap, as we
+ // allocated src outside of guest memory.
+ // The call to set_len is safe because the bytes between 0 and count have been initialized
+ // via copying from guest memory, and the Vec's capacity is `count`
+ unsafe {
+ copy_from_volatile_slice(src.as_mut_ptr(), &slice, count);
+ src.set_len(count);
+ }
+
+ loop {
+ match dst.write(&src) {
+ Ok(n) => break Ok(n),
+ Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
+ Err(e) => break Err(Error::IOError(e)),
+ }
+ }
+ }
+
+ /// # Examples
+ ///
+ /// * Write 128 bytes to /dev/null
+ ///
+ /// ```
+ /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
+ /// # use std::fs::OpenOptions;
+ /// # use std::path::Path;
+ /// #
+ /// # if cfg!(unix) {
+ /// # let mut mem = [0u8; 1024];
+ /// # let vslice = VolatileSlice::from(&mut mem[..]);
+ /// let mut file = OpenOptions::new()
+ /// .write(true)
+ /// .open("/dev/null")
+ /// .expect("Could not open /dev/null");
+ ///
+ /// vslice
+ /// .write_all_to(32, &mut file, 128)
+ /// .expect("Could not write value from VolatileSlice to /dev/null");
+ /// # }
+ /// ```
+ fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
+ where
+ F: Write,
+ {
+ let _ = self.compute_end_offset(addr, count)?;
+ let mut src = Vec::with_capacity(count);
+
+ let slice = self.subslice(addr, count)?;
+
+ // SAFETY: We checked the addr and count so accessing the slice is safe.
+ // It is safe to read from volatile memory. The Vec has capacity for exactly `count`
+ // many bytes, and the memory regions pointed to definitely do not overlap, as we
+ // allocated src outside of guest memory.
+ // The call to set_len is safe because the bytes between 0 and count have been initialized
+ // via copying from guest memory, and the Vec's capacity is `count`
+ unsafe {
+ copy_from_volatile_slice(src.as_mut_ptr(), &slice, count);
+ src.set_len(count);
+ }
+
+ dst.write_all(&src).map_err(Error::IOError)?;
+
+ Ok(())
+ }
+
+ fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
+ self.get_atomic_ref::<T::A>(addr).map(|r| {
+ r.store(val.into(), order);
+ self.bitmap.mark_dirty(addr, size_of::<T>())
+ })
+ }
+
+ fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
+ self.get_atomic_ref::<T::A>(addr)
+ .map(|r| r.load(order).into())
+ }
+}
+
+impl<B: BitmapSlice> VolatileMemory for VolatileSlice<'_, B> {
+ type B = B;
+
+ fn len(&self) -> usize {
+ self.size
+ }
+
+ fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<B>> {
+ let _ = self.compute_end_offset(offset, count)?;
+ Ok(
+ // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
+ // the lifetime is the same as self.
+ unsafe {
+ VolatileSlice::with_bitmap(
+ self.addr.add(offset),
+ count,
+ self.bitmap.slice_at(offset),
+ self.mmap,
+ )
+ },
+ )
+ }
+}
+
+/// A memory location that supports volatile access to an instance of `T`.
+///
+/// # Examples
+///
+/// ```
+/// # use vm_memory::VolatileRef;
+/// #
+/// let mut v = 5u32;
+/// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) };
+///
+/// assert_eq!(v, 5);
+/// assert_eq!(v_ref.load(), 5);
+/// v_ref.store(500);
+/// assert_eq!(v, 500);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct VolatileRef<'a, T, B = ()> {
+ addr: *mut Packed<T>,
+ bitmap: B,
+ mmap: Option<&'a MmapInfo>,
+}
+
+impl<'a, T> VolatileRef<'a, T, ()>
+where
+ T: ByteValued,
+{
+ /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
+ /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
+ /// must also guarantee that all other users of the given chunk of memory are using volatile
+ /// accesses.
+ pub unsafe fn new(addr: *mut u8) -> Self {
+ Self::with_bitmap(addr, (), None)
+ }
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl<'a, T, B> VolatileRef<'a, T, B>
+where
+ T: ByteValued,
+ B: BitmapSlice,
+{
+ /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the
+ /// provided `bitmap` object for dirty page tracking.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
+ /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
+ /// must also guarantee that all other users of the given chunk of memory are using volatile
+ /// accesses.
+ pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self {
+ VolatileRef {
+ addr: addr as *mut Packed<T>,
+ bitmap,
+ mmap,
+ }
+ }
+
+ /// Returns a pointer to the underlying memory. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ #[deprecated(
+ since = "0.12.1",
+ note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
+ )]
+ #[cfg(not(all(feature = "xen", unix)))]
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.addr as *mut u8
+ }
+
+ /// Returns a guard for the pointer to the underlying memory.
+ pub fn ptr_guard(&self) -> PtrGuard {
+ PtrGuard::read(self.mmap, self.addr as *mut u8, self.len())
+ }
+
+ /// Returns a mutable guard for the pointer to the underlying memory.
+ pub fn ptr_guard_mut(&self) -> PtrGuardMut {
+ PtrGuardMut::write(self.mmap, self.addr as *mut u8, self.len())
+ }
+
+ /// Gets the size of the referenced type `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::mem::size_of;
+ /// # use vm_memory::VolatileRef;
+ /// #
+ /// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
+ /// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
+ /// ```
+ pub fn len(&self) -> usize {
+ size_of::<T>()
+ }
+
+ /// Borrows the inner `BitmapSlice`.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+
+ /// Does a volatile write of the value `v` to the address of this ref.
+ #[inline(always)]
+ pub fn store(&self, v: T) {
+ let guard = self.ptr_guard_mut();
+
+ // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
+ unsafe { write_volatile(guard.as_ptr() as *mut Packed<T>, Packed::<T>(v)) };
+ self.bitmap.mark_dirty(0, self.len())
+ }
+
+ /// Does a volatile read of the value at the address of this ref.
+ #[inline(always)]
+ pub fn load(&self) -> T {
+ let guard = self.ptr_guard();
+
+ // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
+ // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
+ // in this function with the commented code below and running `cargo test --release`.
+ // unsafe { *(self.addr as *const T) }
+ unsafe { read_volatile(guard.as_ptr() as *const Packed<T>).0 }
+ }
+
+ /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and
+ /// address.
+ pub fn to_slice(&self) -> VolatileSlice<'a, B> {
+ // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
+ unsafe {
+ VolatileSlice::with_bitmap(
+ self.addr as *mut u8,
+ size_of::<T>(),
+ self.bitmap.clone(),
+ self.mmap,
+ )
+ }
+ }
+}
+
+/// A memory location that supports volatile access to an array of elements of type `T`.
+///
+/// # Examples
+///
+/// ```
+/// # use vm_memory::VolatileArrayRef;
+/// #
+/// let mut v = [5u32; 1];
+/// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) };
+///
+/// assert_eq!(v[0], 5);
+/// assert_eq!(v_ref.load(0), 5);
+/// v_ref.store(0, 500);
+/// assert_eq!(v[0], 500);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct VolatileArrayRef<'a, T, B = ()> {
+ addr: *mut u8,
+ nelem: usize,
+ bitmap: B,
+ phantom: PhantomData<&'a T>,
+ mmap: Option<&'a MmapInfo>,
+}
+
+impl<'a, T> VolatileArrayRef<'a, T>
+where
+ T: ByteValued,
+{
+ /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
+ /// type `T`.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
+ /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
+ /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
+ /// memory are using volatile accesses.
+ pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self {
+ Self::with_bitmap(addr, nelem, (), None)
+ }
+}
+
+impl<'a, T, B> VolatileArrayRef<'a, T, B>
+where
+ T: ByteValued,
+ B: BitmapSlice,
+{
+ /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
+ /// type `T`, using the provided `bitmap` object for dirty page tracking.
+ ///
+ /// # Safety
+ ///
+ /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
+ /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
+ /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
+ /// memory are using volatile accesses.
+ pub unsafe fn with_bitmap(
+ addr: *mut u8,
+ nelem: usize,
+ bitmap: B,
+ mmap: Option<&'a MmapInfo>,
+ ) -> Self {
+ VolatileArrayRef {
+ addr,
+ nelem,
+ bitmap,
+ phantom: PhantomData,
+ mmap,
+ }
+ }
+
+ /// Returns `true` if this array is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
+ /// assert!(v_array.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.nelem == 0
+ }
+
+ /// Returns the number of elements in the array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// # let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 1) };
+ /// assert_eq!(v_array.len(), 1);
+ /// ```
+ pub fn len(&self) -> usize {
+ self.nelem
+ }
+
+ /// Returns the size of `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::mem::size_of;
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// let v_ref = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
+ /// assert_eq!(v_ref.element_size(), size_of::<u32>() as usize);
+ /// ```
+ pub fn element_size(&self) -> usize {
+ size_of::<T>()
+ }
+
+ /// Returns a pointer to the underlying memory. Mutable accesses performed
+ /// using the resulting pointer are not automatically accounted for by the dirty bitmap
+ /// tracking functionality.
+ #[deprecated(
+ since = "0.12.1",
+ note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
+ )]
+ #[cfg(not(all(feature = "xen", unix)))]
+ pub fn as_ptr(&self) -> *mut u8 {
+ self.addr
+ }
+
+ /// Returns a guard for the pointer to the underlying memory.
+ pub fn ptr_guard(&self) -> PtrGuard {
+ PtrGuard::read(self.mmap, self.addr, self.len())
+ }
+
+ /// Returns a mutable guard for the pointer to the underlying memory.
+ pub fn ptr_guard_mut(&self) -> PtrGuardMut {
+ PtrGuardMut::write(self.mmap, self.addr, self.len())
+ }
+
+ /// Borrows the inner `BitmapSlice`.
+ pub fn bitmap(&self) -> &B {
+ &self.bitmap
+ }
+
+ /// Converts this to a `VolatileSlice` with the same size and address.
+ pub fn to_slice(&self) -> VolatileSlice<'a, B> {
+ // SAFETY: Safe as long as the caller validated addr when creating this object.
+ unsafe {
+ VolatileSlice::with_bitmap(
+ self.addr,
+ self.nelem * self.element_size(),
+ self.bitmap.clone(),
+ self.mmap,
+ )
+ }
+ }
+
+ /// Does a volatile read of the element at `index`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is less than the number of elements of the array to which `&self` points.
+ pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> {
+ assert!(index < self.nelem);
+ // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
+ // memory of the VolatileArrayRef.
+ unsafe {
+ // byteofs must fit in an isize as it was checked in get_array_ref.
+ let byteofs = (self.element_size() * index) as isize;
+ let ptr = self.addr.offset(byteofs);
+ VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize), self.mmap)
+ }
+ }
+
+ /// Does a volatile read of the element at `index`.
+ pub fn load(&self, index: usize) -> T {
+ self.ref_at(index).load()
+ }
+
+ /// Does a volatile write of the element at `index`.
+ pub fn store(&self, index: usize, value: T) {
+ // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic,
+ // so no need to do that in this method as well.
+ self.ref_at(index).store(value)
+ }
+
+ /// Copies as many elements of type `T` as possible from this array to `buf`.
+ ///
+ /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
+ /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
+ /// using volatile reads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// let mut v = [0u8; 32];
+ /// let v_ref = unsafe { VolatileArrayRef::new(v.as_mut_ptr(), v.len()) };
+ ///
+ /// let mut buf = [5u8; 16];
+ /// v_ref.copy_to(&mut buf[..]);
+ /// for &v in &buf[..] {
+ /// assert_eq!(v, 0);
+ /// }
+ /// ```
+ pub fn copy_to(&self, buf: &mut [T]) -> usize {
+ // A fast path for u8/i8
+ if size_of::<T>() == 1 {
+ let source = self.to_slice();
+ let total = buf.len().min(source.len());
+
+ // SAFETY:
+ // - dst is valid for writes of at least `total`, since total <= buf.len()
+ // - src is valid for reads of at least `total` as total <= source.len()
+ // - The regions are non-overlapping as `src` points to guest memory and `buf` is
+ // a slice and thus has to live outside of guest memory (there can be more slices to
+ // guest memory without violating rust's aliasing rules)
+ // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
+ return unsafe {
+ copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, &source, total)
+ };
+ }
+
+ let guard = self.ptr_guard();
+ let mut ptr = guard.as_ptr() as *const Packed<T>;
+ let start = ptr;
+
+ for v in buf.iter_mut().take(self.len()) {
+ // SAFETY: read_volatile is safe because the pointers are range-checked when
+ // the slices are created, and they never escape the VolatileSlices.
+ // ptr::add is safe because get_array_ref() validated that
+ // size_of::<T>() * self.len() fits in an isize.
+ unsafe {
+ *v = read_volatile(ptr).0;
+ ptr = ptr.add(1);
+ }
+ }
+
+ // SAFETY: It is guaranteed that start and ptr point to the regions of the same slice.
+ unsafe { ptr.offset_from(start) as usize }
+ }
+
+ /// Copies as many bytes as possible from this slice to the provided `slice`.
+ ///
+ /// The copies happen in an undefined order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// let mut v = [0u8; 32];
+ /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
+ /// let mut buf = [5u8; 16];
+ /// let v_ref2 = unsafe { VolatileArrayRef::<u8>::new(buf.as_mut_ptr(), buf.len()) };
+ ///
+ /// v_ref.copy_to_volatile_slice(v_ref2.to_slice());
+ /// for &v in &buf[..] {
+ /// assert_eq!(v, 0);
+ /// }
+ /// ```
+ pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
+ // SAFETY: Safe because the pointers are range-checked when the slices
+ // are created, and they never escape the VolatileSlices.
+ // FIXME: ... however, is it really okay to mix non-volatile
+ // operations such as copy with read_volatile and write_volatile?
+ unsafe {
+ let count = min(self.len() * self.element_size(), slice.size);
+ copy(self.addr, slice.addr, count);
+ slice.bitmap.mark_dirty(0, count);
+ }
+ }
+
+ /// Copies as many elements of type `T` as possible from `buf` to this slice.
+ ///
+ /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
+ /// to this slice's memory. The copy happens from smallest to largest address in
+ /// `T` sized chunks using volatile writes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use vm_memory::VolatileArrayRef;
+ /// #
+ /// let mut v = [0u8; 32];
+ /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
+ ///
+ /// let buf = [5u8; 64];
+ /// v_ref.copy_from(&buf[..]);
+ /// for &val in &v[..] {
+ /// assert_eq!(5u8, val);
+ /// }
+ /// ```
+ pub fn copy_from(&self, buf: &[T]) {
+ // A fast path for u8/i8
+ if size_of::<T>() == 1 {
+ let destination = self.to_slice();
+ let total = buf.len().min(destination.len());
+
+ // absurd formatting brought to you by clippy
+ // SAFETY:
+ // - dst is valid for writes of at least `total`, since total <= destination.len()
+ // - src is valid for reads of at least `total` as total <= buf.len()
+ // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
+ // a slice and thus has to live outside of guest memory (there can be more slices to
+ // guest memory without violating rust's aliasing rules)
+ // - size is always a multiple of alignment, so treating *const T as *const u8 is fine
+ unsafe { copy_to_volatile_slice(&destination, buf.as_ptr() as *const u8, total) };
+ } else {
+ let guard = self.ptr_guard_mut();
+ let start = guard.as_ptr();
+ let mut ptr = start as *mut Packed<T>;
+
+ for &v in buf.iter().take(self.len()) {
+ // SAFETY: write_volatile is safe because the pointers are range-checked when
+ // the slices are created, and they never escape the VolatileSlices.
+ // ptr::add is safe because get_array_ref() validated that
+ // size_of::<T>() * self.len() fits in an isize.
+ unsafe {
+ write_volatile(ptr, Packed::<T>(v));
+ ptr = ptr.add(1);
+ }
+ }
+
+ self.bitmap.mark_dirty(0, ptr as usize - start as usize);
+ }
+ }
+}
+
+impl<'a, B: BitmapSlice> From<VolatileSlice<'a, B>> for VolatileArrayRef<'a, u8, B> {
+ fn from(slice: VolatileSlice<'a, B>) -> Self {
+ // SAFETY: Safe because the result has the same lifetime and points to the same
+ // memory as the incoming VolatileSlice.
+ unsafe { VolatileArrayRef::with_bitmap(slice.addr, slice.len(), slice.bitmap, slice.mmap) }
+ }
+}
+
+// Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
+// cause test_non_atomic_access to fail.
+fn alignment(addr: usize) -> usize {
+ // Rust is silly and does not let me write addr & -addr.
+ addr & (!addr + 1)
+}
+
+mod copy_slice_impl {
+ use super::*;
+
+ // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
+ // - `src_addr` and `dst_addr` must be valid for reads/writes.
+ // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
+ // - `src_addr` must point to a properly initialized value, which is true here because
+ // we're only using integer primitives.
+ unsafe fn copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8) {
+ match align {
+ 8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)),
+ 4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)),
+ 2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)),
+ 1 => write_volatile(dst_addr, read_volatile(src_addr)),
+ _ => unreachable!(),
+ }
+ }
+
+ /// Copies `total` bytes from `src` to `dst` using a loop of volatile reads and writes
+ ///
+ /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
+ /// length `total`. The regions must not overlap
+ unsafe fn copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize {
+ let mut left = total;
+
+ let align = min(alignment(src as usize), alignment(dst as usize));
+
+ let mut copy_aligned_slice = |min_align| {
+ if align < min_align {
+ return;
+ }
+
+ while left >= min_align {
+ // SAFETY: Safe because we check alignment beforehand, the memory areas are valid
+ // for reads/writes, and the source always contains a valid value.
+ unsafe { copy_single(min_align, src, dst) };
+
+ left -= min_align;
+
+ if left == 0 {
+ break;
+ }
+
+ // SAFETY: We only explain the invariants for `src`, the argument for `dst` is
+ // analogous.
+ // - `src` and `src + min_align` are within (or one byte past) the same allocated object
+ // This is given by the invariant on this function ensuring that [src, src + total)
+ // are part of the same allocated object, and the condition on the while loop
+ // ensures that we do not go outside this object
+ // - The computed offset in bytes cannot overflow isize, because `min_align` is at
+ // most 8 when the closure is called (see below)
+ // - The sum `src as usize + min_align` can only wrap around if src as usize + min_align - 1 == usize::MAX,
+ // however in this case, left == 0, and we'll have exited the loop above.
+ unsafe {
+ src = src.add(min_align);
+ dst = dst.add(min_align);
+ }
+ }
+ };
+
+ if size_of::<usize>() > 4 {
+ copy_aligned_slice(8);
+ }
+ copy_aligned_slice(4);
+ copy_aligned_slice(2);
+ copy_aligned_slice(1);
+
+ total
+ }
+
+ /// Copies `total` bytes from `src` to `dst`
+ ///
+ /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
+ /// length `total`. The regions must not overlap
+ unsafe fn copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize {
+ if total <= size_of::<usize>() {
+ // SAFETY: Invariants of copy_slice_volatile are the same as invariants of copy_slice
+ unsafe {
+ copy_slice_volatile(dst, src, total);
+ };
+ } else {
+ // SAFETY:
+ // - Both src and dst are allocated for reads/writes of length `total` by function
+ // invariant
+ // - src and dst are properly aligned, as any alignment is valid for u8
+ // - The regions are not overlapping by function invariant
+ unsafe {
+ std::ptr::copy_nonoverlapping(src, dst, total);
+ }
+ }
+
+ total
+ }
+
+ /// Copies `total` bytes from `slice` to `dst`
+ ///
+ /// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at
+ /// least length `total`. The regions must not overlap.
+ pub(super) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
+ dst: *mut u8,
+ slice: &VolatileSlice<'_, B>,
+ total: usize,
+ ) -> usize {
+ let guard = slice.ptr_guard();
+
+ // SAFETY: guaranteed by function invariants.
+ copy_slice(dst, guard.as_ptr(), total)
+ }
+
+ /// Copies `total` bytes from 'src' to `slice`
+ ///
+ /// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at
+ /// least length `total`. The regions must not overlap.
+ pub(super) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
+ slice: &VolatileSlice<'_, B>,
+ src: *const u8,
+ total: usize,
+ ) -> usize {
+ let guard = slice.ptr_guard_mut();
+
+ // SAFETY: guaranteed by function invariants.
+ let count = copy_slice(guard.as_ptr(), src, total);
+ slice.bitmap.mark_dirty(0, count);
+ count
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #![allow(clippy::undocumented_unsafe_blocks)]
+
+ use super::*;
+ use std::alloc::Layout;
+
+ use std::fs::File;
+ use std::io::Cursor;
+ use std::mem::size_of_val;
+ use std::path::Path;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::{Arc, Barrier};
+ use std::thread::spawn;
+
+ use matches::assert_matches;
+ use vmm_sys_util::tempfile::TempFile;
+
+ use crate::bitmap::tests::{
+ check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory,
+ };
+ use crate::bitmap::{AtomicBitmap, RefSlice};
+
+ #[test]
+ fn test_display_error() {
+ assert_eq!(
+ format!("{}", Error::OutOfBounds { addr: 0x10 }),
+ "address 0x10 is out of bounds"
+ );
+
+ assert_eq!(
+ format!(
+ "{}",
+ Error::Overflow {
+ base: 0x0,
+ offset: 0x10
+ }
+ ),
+ "address 0x0 offset by 0x10 would overflow"
+ );
+
+ assert_eq!(
+ format!(
+ "{}",
+ Error::TooBig {
+ nelements: 100_000,
+ size: 1_000_000_000
+ }
+ ),
+ "100000 elements of size 1000000000 would overflow a usize"
+ );
+
+ assert_eq!(
+ format!(
+ "{}",
+ Error::Misaligned {
+ addr: 0x4,
+ alignment: 8
+ }
+ ),
+ "address 0x4 is not aligned to 8"
+ );
+
+ assert_eq!(
+ format!(
+ "{}",
+ Error::PartialBuffer {
+ expected: 100,
+ completed: 90
+ }
+ ),
+ "only used 90 bytes in 100 long buffer"
+ );
+ }
+
+ #[test]
+ fn misaligned_ref() {
+ let mut a = [0u8; 3];
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ unsafe {
+ assert!(
+ a_ref.aligned_as_ref::<u16>(0).is_err() ^ a_ref.aligned_as_ref::<u16>(1).is_err()
+ );
+ assert!(
+ a_ref.aligned_as_mut::<u16>(0).is_err() ^ a_ref.aligned_as_mut::<u16>(1).is_err()
+ );
+ }
+ }
+
+ #[test]
+ fn atomic_store() {
+ let mut a = [0usize; 1];
+ {
+ let a_ref = unsafe {
+ VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>())
+ };
+ let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
+ atomic.store(2usize, Ordering::Relaxed)
+ }
+ assert_eq!(a[0], 2);
+ }
+
+ #[test]
+ fn atomic_load() {
+ let mut a = [5usize; 1];
+ {
+ let a_ref = unsafe {
+ VolatileSlice::new(&mut a[0] as *mut usize as *mut u8,
+ size_of::<usize>())
+ };
+ let atomic = {
+ let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
+ assert_eq!(atomic.load(Ordering::Relaxed), 5usize);
+ atomic
+ };
+ // To make sure we can take the atomic out of the scope we made it in:
+ atomic.load(Ordering::Relaxed);
+ // but not too far:
+ // atomicu8
+ } //.load(std::sync::atomic::Ordering::Relaxed)
+ ;
+ }
+
+ #[test]
+ fn misaligned_atomic() {
+ let mut a = [5usize, 5usize];
+ let a_ref =
+ unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>()) };
+ assert!(a_ref.get_atomic_ref::<AtomicUsize>(0).is_ok());
+ assert!(a_ref.get_atomic_ref::<AtomicUsize>(1).is_err());
+ }
+
+ #[test]
+ fn ref_store() {
+ let mut a = [0u8; 1];
+ {
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let v_ref = a_ref.get_ref(0).unwrap();
+ v_ref.store(2u8);
+ }
+ assert_eq!(a[0], 2);
+ }
+
+ #[test]
+ fn ref_load() {
+ let mut a = [5u8; 1];
+ {
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let c = {
+ let v_ref = a_ref.get_ref::<u8>(0).unwrap();
+ assert_eq!(v_ref.load(), 5u8);
+ v_ref
+ };
+ // To make sure we can take a v_ref out of the scope we made it in:
+ c.load();
+ // but not too far:
+ // c
+ } //.load()
+ ;
+ }
+
+ #[test]
+ fn ref_to_slice() {
+ let mut a = [1u8; 5];
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let v_ref = a_ref.get_ref(1).unwrap();
+ v_ref.store(0x1234_5678u32);
+ let ref_slice = v_ref.to_slice();
+ assert_eq!(v_ref.addr as usize, ref_slice.addr as usize);
+ assert_eq!(v_ref.len(), ref_slice.len());
+ assert!(!ref_slice.is_empty());
+ }
+
+ #[test]
+ fn observe_mutate() {
+ struct RawMemory(*mut u8);
+
+ // SAFETY: we use property synchronization below
+ unsafe impl Send for RawMemory {}
+ unsafe impl Sync for RawMemory {}
+
+ let mem = Arc::new(RawMemory(unsafe {
+ std::alloc::alloc(Layout::from_size_align(1, 1).unwrap())
+ }));
+
+ let outside_slice = unsafe { VolatileSlice::new(Arc::clone(&mem).0, 1) };
+ let inside_arc = Arc::clone(&mem);
+
+ let v_ref = outside_slice.get_ref::<u8>(0).unwrap();
+ let barrier = Arc::new(Barrier::new(2));
+ let barrier1 = barrier.clone();
+
+ v_ref.store(99);
+ spawn(move || {
+ barrier1.wait();
+ let inside_slice = unsafe { VolatileSlice::new(inside_arc.0, 1) };
+ let clone_v_ref = inside_slice.get_ref::<u8>(0).unwrap();
+ clone_v_ref.store(0);
+ barrier1.wait();
+ });
+
+ assert_eq!(v_ref.load(), 99);
+ barrier.wait();
+ barrier.wait();
+ assert_eq!(v_ref.load(), 0);
+
+ unsafe { std::alloc::dealloc(mem.0, Layout::from_size_align(1, 1).unwrap()) }
+ }
+
+ #[test]
+ fn mem_is_empty() {
+ let mut backing = vec![0u8; 100];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ assert!(!a.is_empty());
+
+ let mut backing = vec![];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ assert!(a.is_empty());
+ }
+
+ #[test]
+ fn slice_len() {
+ let mut backing = vec![0u8; 100];
+ let mem = VolatileSlice::from(backing.as_mut_slice());
+ let slice = mem.get_slice(0, 27).unwrap();
+ assert_eq!(slice.len(), 27);
+ assert!(!slice.is_empty());
+
+ let slice = mem.get_slice(34, 27).unwrap();
+ assert_eq!(slice.len(), 27);
+ assert!(!slice.is_empty());
+
+ let slice = slice.get_slice(20, 5).unwrap();
+ assert_eq!(slice.len(), 5);
+ assert!(!slice.is_empty());
+
+ let slice = mem.get_slice(34, 0).unwrap();
+ assert!(slice.is_empty());
+ }
+
+ #[test]
+ fn slice_subslice() {
+ let mut backing = vec![0u8; 100];
+ let mem = VolatileSlice::from(backing.as_mut_slice());
+ let slice = mem.get_slice(0, 100).unwrap();
+ assert!(slice.write(&[1; 80], 10).is_ok());
+
+ assert!(slice.subslice(0, 0).is_ok());
+ assert!(slice.subslice(0, 101).is_err());
+
+ assert!(slice.subslice(99, 0).is_ok());
+ assert!(slice.subslice(99, 1).is_ok());
+ assert!(slice.subslice(99, 2).is_err());
+
+ assert!(slice.subslice(100, 0).is_ok());
+ assert!(slice.subslice(100, 1).is_err());
+
+ assert!(slice.subslice(101, 0).is_err());
+ assert!(slice.subslice(101, 1).is_err());
+
+ assert!(slice.subslice(std::usize::MAX, 2).is_err());
+ assert!(slice.subslice(2, std::usize::MAX).is_err());
+
+ let maybe_offset_slice = slice.subslice(10, 80);
+ assert!(maybe_offset_slice.is_ok());
+ let offset_slice = maybe_offset_slice.unwrap();
+ assert_eq!(offset_slice.len(), 80);
+
+ let mut buf = [0; 80];
+ assert!(offset_slice.read(&mut buf, 0).is_ok());
+ assert_eq!(&buf[0..80], &[1; 80][0..80]);
+ }
+
+ #[test]
+ fn slice_offset() {
+ let mut backing = vec![0u8; 100];
+ let mem = VolatileSlice::from(backing.as_mut_slice());
+ let slice = mem.get_slice(0, 100).unwrap();
+ assert!(slice.write(&[1; 80], 10).is_ok());
+
+ assert!(slice.offset(101).is_err());
+
+ let maybe_offset_slice = slice.offset(10);
+ assert!(maybe_offset_slice.is_ok());
+ let offset_slice = maybe_offset_slice.unwrap();
+ assert_eq!(offset_slice.len(), 90);
+ let mut buf = [0; 90];
+ assert!(offset_slice.read(&mut buf, 0).is_ok());
+ assert_eq!(&buf[0..80], &[1; 80][0..80]);
+ assert_eq!(&buf[80..90], &[0; 10][0..10]);
+ }
+
+ #[test]
+ fn slice_copy_to_u8() {
+ let mut a = [2u8, 4, 6, 8, 10];
+ let mut b = [0u8; 4];
+ let mut c = [0u8; 6];
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap();
+ v_ref.copy_to(&mut b[..]);
+ v_ref.copy_to(&mut c[..]);
+ assert_eq!(b[0..4], a[0..4]);
+ assert_eq!(c[0..5], a[0..5]);
+ }
+
+ #[test]
+ fn slice_copy_to_u16() {
+ let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5];
+ let mut b = [0u16; 4];
+ let mut c = [0u16; 6];
+ let a_ref = &mut a[..];
+ let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) };
+
+ v_ref.copy_to(&mut b[..]);
+ v_ref.copy_to(&mut c[..]);
+ assert_eq!(b[0..4], a_ref[0..4]);
+ assert_eq!(c[0..4], a_ref[0..4]);
+ assert_eq!(c[4], 0);
+ }
+
+ #[test]
+ fn slice_copy_from_u8() {
+ let a = [2u8, 4, 6, 8, 10];
+ let mut b = [0u8; 4];
+ let mut c = [0u8; 6];
+ let b_ref = VolatileSlice::from(&mut b[..]);
+ let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap();
+ v_ref.copy_from(&a[..]);
+ assert_eq!(b[0..4], a[0..4]);
+
+ let c_ref = VolatileSlice::from(&mut c[..]);
+ let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap();
+ v_ref.copy_from(&a[..]);
+ assert_eq!(c[0..5], a[0..5]);
+ }
+
+ #[test]
+ fn slice_copy_from_u16() {
+ let a = [2u16, 4, 6, 8, 10];
+ let mut b = [0u16; 4];
+ let mut c = [0u16; 6];
+ let b_ref = &mut b[..];
+ let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) };
+ v_ref.copy_from(&a[..]);
+ assert_eq!(b_ref[0..4], a[0..4]);
+
+ let c_ref = &mut c[..];
+ let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) };
+ v_ref.copy_from(&a[..]);
+ assert_eq!(c_ref[0..4], a[0..4]);
+ assert_eq!(c_ref[4], 0);
+ }
+
+ #[test]
+ fn slice_copy_to_volatile_slice() {
+ let mut a = [2u8, 4, 6, 8, 10];
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
+
+ let mut b = [0u8; 4];
+ let b_ref = VolatileSlice::from(&mut b[..]);
+ let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap();
+
+ a_slice.copy_to_volatile_slice(b_slice);
+ assert_eq!(b, [2, 4, 6, 8]);
+ }
+
+ #[test]
+ fn slice_overflow_error() {
+ use std::usize::MAX;
+ let mut backing = vec![0u8];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let res = a.get_slice(MAX, 1).unwrap_err();
+ assert_matches!(
+ res,
+ Error::Overflow {
+ base: MAX,
+ offset: 1,
+ }
+ );
+ }
+
+ #[test]
+ fn slice_oob_error() {
+ let mut backing = vec![0u8; 100];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ a.get_slice(50, 50).unwrap();
+ let res = a.get_slice(55, 50).unwrap_err();
+ assert_matches!(res, Error::OutOfBounds { addr: 105 });
+ }
+
+ #[test]
+ fn ref_overflow_error() {
+ use std::usize::MAX;
+ let mut backing = vec![0u8];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let res = a.get_ref::<u8>(MAX).unwrap_err();
+ assert_matches!(
+ res,
+ Error::Overflow {
+ base: MAX,
+ offset: 1,
+ }
+ );
+ }
+
+ #[test]
+ fn ref_oob_error() {
+ let mut backing = vec![0u8; 100];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ a.get_ref::<u8>(99).unwrap();
+ let res = a.get_ref::<u16>(99).unwrap_err();
+ assert_matches!(res, Error::OutOfBounds { addr: 101 });
+ }
+
+ #[test]
+ fn ref_oob_too_large() {
+ let mut backing = vec![0u8; 3];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let res = a.get_ref::<u32>(0).unwrap_err();
+ assert_matches!(res, Error::OutOfBounds { addr: 4 });
+ }
+
+ #[test]
+ fn slice_store() {
+ let mut backing = vec![0u8; 5];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ let r = a.get_ref(2).unwrap();
+ r.store(9u16);
+ assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
+ }
+
+ #[test]
+ fn test_write_past_end() {
+ let mut backing = vec![0u8; 5];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
+ assert!(res.is_ok());
+ assert_eq!(res.unwrap(), 5);
+ }
+
+ #[test]
+ fn slice_read_and_write() {
+ let mut backing = vec![0u8; 5];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ let sample_buf = [1, 2, 3];
+ assert!(s.write(&sample_buf, 5).is_err());
+ assert!(s.write(&sample_buf, 2).is_ok());
+ let mut buf = [0u8; 3];
+ assert!(s.read(&mut buf, 5).is_err());
+ assert!(s.read_slice(&mut buf, 2).is_ok());
+ assert_eq!(buf, sample_buf);
+
+ // Writing an empty buffer at the end of the volatile slice works.
+ assert_eq!(s.write(&[], 100).unwrap(), 0);
+ let buf: &mut [u8] = &mut [];
+ assert_eq!(s.read(buf, 4).unwrap(), 0);
+
+ // Check that reading and writing an empty buffer does not yield an error.
+ let mut backing = Vec::new();
+ let empty_mem = VolatileSlice::from(backing.as_mut_slice());
+ let empty = empty_mem.as_volatile_slice();
+ assert_eq!(empty.write(&[], 1).unwrap(), 0);
+ assert_eq!(empty.read(buf, 1).unwrap(), 0);
+ }
+
+ #[test]
+ fn obj_read_and_write() {
+ let mut backing = vec![0u8; 5];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ assert!(s.write_obj(55u16, 4).is_err());
+ assert!(s.write_obj(55u16, core::usize::MAX).is_err());
+ assert!(s.write_obj(55u16, 2).is_ok());
+ assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
+ assert!(s.read_obj::<u16>(4).is_err());
+ assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
+ }
+
+ #[test]
+ fn mem_read_and_write() {
+ let mut backing = vec![0u8; 5];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ assert!(s.write_obj(!0u32, 1).is_ok());
+ let mut file = if cfg!(unix) {
+ File::open(Path::new("/dev/zero")).unwrap()
+ } else {
+ File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
+ };
+ assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
+ assert!(s
+ .read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
+ .is_err());
+
+ assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
+
+ let mut f = TempFile::new().unwrap().into_file();
+ assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
+ format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
+
+ let value = s.read_obj::<u32>(1).unwrap();
+ if cfg!(unix) {
+ assert_eq!(value, 0);
+ } else {
+ assert_eq!(value, 0x0090_5a4d);
+ }
+
+ let mut sink = Vec::new();
+ assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
+ assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
+ assert!(s
+ .write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
+ .is_err());
+ format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
+ if cfg!(unix) {
+ assert_eq!(sink, vec![0; size_of::<u32>()]);
+ } else {
+ assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
+ };
+ }
+
+ #[test]
+ fn unaligned_read_and_write() {
+ let mut backing = vec![0u8; 7];
+ let a = VolatileSlice::from(backing.as_mut_slice());
+ let s = a.as_volatile_slice();
+ let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
+ assert!(s.write_slice(&sample_buf, 0).is_ok());
+ let r = a.get_ref::<u32>(2).unwrap();
+ assert_eq!(r.load(), 0xAAAA_AAAA);
+
+ r.store(0x5555_5555);
+ let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
+ let mut buf: [u8; 7] = Default::default();
+ assert!(s.read_slice(&mut buf, 0).is_ok());
+ assert_eq!(buf, sample_buf);
+ }
+
+ #[test]
+ fn test_read_from_exceeds_size() {
+ #[derive(Debug, Default, Copy, Clone)]
+ struct BytesToRead {
+ _val1: u128, // 16 bytes
+ _val2: u128, // 16 bytes
+ }
+ unsafe impl ByteValued for BytesToRead {}
+ let cursor_size = 20;
+ let mut image = Cursor::new(vec![1u8; cursor_size]);
+
+ // Trying to read more bytes than we have available in the cursor should
+ // make the read_from function return maximum cursor size (i.e. 20).
+ let mut bytes_to_read = BytesToRead::default();
+ let size_of_bytes = size_of_val(&bytes_to_read);
+ assert_eq!(
+ bytes_to_read
+ .as_bytes()
+ .read_from(0, &mut image, size_of_bytes)
+ .unwrap(),
+ cursor_size
+ );
+ }
+
+ #[test]
+ fn ref_array_from_slice() {
+ let mut a = [2, 4, 6, 8, 10];
+ let a_vec = a.to_vec();
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
+ let a_array_ref: VolatileArrayRef<u8, ()> = a_slice.into();
+ for (i, entry) in a_vec.iter().enumerate() {
+ assert_eq!(&a_array_ref.load(i), entry);
+ }
+ }
+
+ #[test]
+ fn ref_array_store() {
+ let mut a = [0u8; 5];
+ {
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let v_ref = a_ref.get_array_ref(1, 4).unwrap();
+ v_ref.store(1, 2u8);
+ v_ref.store(2, 4u8);
+ v_ref.store(3, 6u8);
+ }
+ let expected = [2u8, 4u8, 6u8];
+ assert_eq!(a[2..=4], expected);
+ }
+
+ #[test]
+ fn ref_array_load() {
+ let mut a = [0, 0, 2, 3, 10];
+ {
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let c = {
+ let v_ref = a_ref.get_array_ref::<u8>(1, 4).unwrap();
+ assert_eq!(v_ref.load(1), 2u8);
+ assert_eq!(v_ref.load(2), 3u8);
+ assert_eq!(v_ref.load(3), 10u8);
+ v_ref
+ };
+ // To make sure we can take a v_ref out of the scope we made it in:
+ c.load(0);
+ // but not too far:
+ // c
+ } //.load()
+ ;
+ }
+
+ #[test]
+ fn ref_array_overflow() {
+ let mut a = [0, 0, 2, 3, 10];
+ let a_ref = VolatileSlice::from(&mut a[..]);
+ let res = a_ref.get_array_ref::<u32>(4, usize::MAX).unwrap_err();
+ assert_matches!(
+ res,
+ Error::TooBig {
+ nelements: usize::MAX,
+ size: 4,
+ }
+ );
+ }
+
+ #[test]
+ fn alignment() {
+ let a = [0u8; 64];
+ let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize;
+ assert!(super::alignment(a) >= 32);
+ assert_eq!(super::alignment(a + 9), 1);
+ assert_eq!(super::alignment(a + 30), 2);
+ assert_eq!(super::alignment(a + 12), 4);
+ assert_eq!(super::alignment(a + 8), 8);
+ }
+
+ #[test]
+ fn test_atomic_accesses() {
+ let len = 0x1000;
+ let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
+ let a = unsafe { VolatileSlice::new(buf, len) };
+
+ crate::bytes::tests::check_atomic_accesses(a, 0, 0x1000);
+ unsafe {
+ std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
+ }
+ }
+
+ #[test]
+ fn split_at() {
+ let mut mem = [0u8; 32];
+ let mem_ref = VolatileSlice::from(&mut mem[..]);
+ let vslice = mem_ref.get_slice(0, 32).unwrap();
+ let (start, end) = vslice.split_at(8).unwrap();
+ assert_eq!(start.len(), 8);
+ assert_eq!(end.len(), 24);
+ let (start, end) = vslice.split_at(0).unwrap();
+ assert_eq!(start.len(), 0);
+ assert_eq!(end.len(), 32);
+ let (start, end) = vslice.split_at(31).unwrap();
+ assert_eq!(start.len(), 31);
+ assert_eq!(end.len(), 1);
+ let (start, end) = vslice.split_at(32).unwrap();
+ assert_eq!(start.len(), 32);
+ assert_eq!(end.len(), 0);
+ let err = vslice.split_at(33).unwrap_err();
+ assert_matches!(err, Error::OutOfBounds { addr: _ })
+ }
+
+ #[test]
+ fn test_volatile_slice_dirty_tracking() {
+ let val = 123u64;
+ let dirty_offset = 0x1000;
+ let dirty_len = size_of_val(&val);
+ let page_size = 0x1000;
+
+ let len = 0x10000;
+ let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
+
+ // Invoke the `Bytes` test helper function.
+ {
+ let bitmap = AtomicBitmap::new(len, page_size);
+ let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
+
+ test_bytes(
+ &slice,
+ |s: &VolatileSlice<RefSlice<AtomicBitmap>>,
+ start: usize,
+ len: usize,
+ clean: bool| { check_range(s.bitmap(), start, len, clean) },
+ |offset| offset,
+ 0x1000,
+ );
+ }
+
+ // Invoke the `VolatileMemory` test helper function.
+ {
+ let bitmap = AtomicBitmap::new(len, page_size);
+ let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
+ test_volatile_memory(&slice);
+ }
+
+ let bitmap = AtomicBitmap::new(len, page_size);
+ let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
+
+ let bitmap2 = AtomicBitmap::new(len, page_size);
+ let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) };
+
+ let bitmap3 = AtomicBitmap::new(len, page_size);
+ let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) };
+
+ assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
+ assert!(range_is_clean(slice2.bitmap(), 0, slice2.len()));
+
+ slice.write_obj(val, dirty_offset).unwrap();
+ assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
+
+ slice.copy_to_volatile_slice(slice2);
+ assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len()));
+
+ {
+ let (s1, s2) = slice.split_at(dirty_offset).unwrap();
+ assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
+ assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
+ }
+
+ {
+ let s = slice.subslice(dirty_offset, dirty_len).unwrap();
+ assert!(range_is_dirty(s.bitmap(), 0, s.len()));
+ }
+
+ {
+ let s = slice.offset(dirty_offset).unwrap();
+ assert!(range_is_dirty(s.bitmap(), 0, dirty_len));
+ }
+
+ // Test `copy_from` for size_of::<T> == 1.
+ {
+ let buf = vec![1u8; dirty_offset];
+
+ assert!(range_is_clean(slice.bitmap(), 0, dirty_offset));
+ slice.copy_from(&buf);
+ assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset));
+ }
+
+ // Test `copy_from` for size_of::<T> > 1.
+ {
+ let val = 1u32;
+ let buf = vec![val; dirty_offset / size_of_val(&val)];
+
+ assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset));
+ slice3.copy_from(&buf);
+ assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset));
+ }
+
+ unsafe {
+ std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
+ }
+ }
+
+ #[test]
+ fn test_volatile_ref_dirty_tracking() {
+ let val = 123u64;
+ let mut buf = vec![val];
+ let page_size = 0x1000;
+
+ let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
+ let vref = unsafe {
+ VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None)
+ };
+
+ assert!(range_is_clean(vref.bitmap(), 0, vref.len()));
+ vref.store(val);
+ assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
+ }
+
+ fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
+ where
+ T: ByteValued + From<u8>,
+ {
+ let bitmap = AtomicBitmap::new(size_of_val(buf), page_size);
+ let arr = unsafe {
+ VolatileArrayRef::with_bitmap(
+ buf.as_mut_ptr() as *mut u8,
+ index + 1,
+ bitmap.slice_at(0),
+ None,
+ )
+ };
+
+ let val = T::from(123);
+ let copy_buf = vec![val; index + 1];
+
+ assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::<T>()));
+ arr.copy_from(copy_buf.as_slice());
+ assert!(range_is_dirty(arr.bitmap(), 0, size_of_val(buf)));
+ }
+
+ #[test]
+ fn test_volatile_array_ref_dirty_tracking() {
+ let val = 123u64;
+ let dirty_len = size_of_val(&val);
+ let index = 0x1000;
+ let dirty_offset = dirty_len * index;
+ let page_size = 0x1000;
+
+ let mut buf = vec![0u64; index + 1];
+ let mut byte_buf = vec![0u8; index + 1];
+
+ // Test `ref_at`.
+ {
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let arr = unsafe {
+ VolatileArrayRef::with_bitmap(
+ buf.as_mut_ptr() as *mut u8,
+ index + 1,
+ bitmap.slice_at(0),
+ None,
+ )
+ };
+
+ assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len));
+ arr.ref_at(index).store(val);
+ assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len));
+ }
+
+ // Test `store`.
+ {
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let arr = unsafe {
+ VolatileArrayRef::with_bitmap(
+ buf.as_mut_ptr() as *mut u8,
+ index + 1,
+ bitmap.slice_at(0),
+ None,
+ )
+ };
+
+ let slice = arr.to_slice();
+ assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
+ arr.store(index, val);
+ assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
+ }
+
+ // Test `copy_from` when size_of::<T>() == 1.
+ test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
+ // Test `copy_from` when size_of::<T>() > 1.
+ test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
+ }
+}