aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakob Vukalovic <jakobvukalovic@google.com>2023-05-22 12:01:33 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-05-22 12:01:33 +0000
commitaeacec5ffe1e81c2457569466d4e5092c182520e (patch)
treeec6def0b06255086a4dcd31ad499b73e9f0c1669
parent9ef74a4672cdc6250b0d93e051e91338679e7f09 (diff)
parent176beac5a4f31a64aa31e5b8c8afe3f8f12ed79a (diff)
downloadaarch64-paging-aeacec5ffe1e81c2457569466d4e5092c182520e.tar.gz
Upgrade aarch64-paging to 0.4.0 am: 8a897d9866 am: 96a4c7bedc am: db0c0e1354 am: a262522162 am: d86fb9c5c7 am: 176beac5a4
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/aarch64-paging/+/2566112 Change-Id: I2145b471adf935e7855344ba50d51d6332916b6d Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp8
-rw-r--r--CHANGELOG.md15
-rw-r--r--Cargo.toml6
-rw-r--r--Cargo.toml.orig4
-rw-r--r--METADATA14
-rw-r--r--patches/Android.bp.diff22
-rw-r--r--src/idmap.rs140
-rw-r--r--src/lib.rs48
-rw-r--r--src/linearmap.rs139
-rw-r--r--src/paging.rs297
11 files changed, 545 insertions, 150 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 6abae94..9eaa221 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "7cf4bc2d66a3edb354c1bb15c6c5ef7de518082d"
+ "sha1": "346ea66fecce7eab483a159a8275b22e7ea20ad1"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 7ffbbb9..520b050 100644
--- a/Android.bp
+++ b/Android.bp
@@ -45,7 +45,7 @@ rust_test {
host_supported: true,
crate_name: "aarch64_paging",
cargo_env_compat: true,
- cargo_pkg_version: "0.3.0",
+ cargo_pkg_version: "0.4.0",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -58,7 +58,7 @@ rust_test {
"default",
],
rustlibs: [
- "libbitflags-1.3.2",
+ "libbitflags",
],
enabled: false,
arch: {
@@ -73,7 +73,7 @@ rust_library {
host_supported: true,
crate_name: "aarch64_paging",
cargo_env_compat: true,
- cargo_pkg_version: "0.3.0",
+ cargo_pkg_version: "0.4.0",
srcs: ["src/lib.rs"],
edition: "2021",
features: [
@@ -81,7 +81,7 @@ rust_library {
"default",
],
rustlibs: [
- "libbitflags-1.3.2",
+ "libbitflags",
],
apex_available: [
"//apex_available:platform",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1734e71..b0734e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,20 @@
# Changelog
+## 0.4.0
+
+### Breaking changes
+
+- Updated `bitflags` to 2.0.2, which changes the API of `Attributes` a bit.
+- Updated `map_range` method to support mapping leaf page table entries without the `VALID` flag.
+ `Attributes::VALID` is no longer implicitly set when mapping leaf page table entries.
+
+### New features
+
+- Added `modify_range` method to `IdMap`, `LinearMap` and `Mapping` to update details of a mapped
+ range. This can be used e.g. to change flags for some range which is already mapped. As part of
+ this, the `Descriptor` struct was added to the public API.
+- Added `DBM` and software flags to `Attributes`.
+
## 0.3.0
### Breaking changes
diff --git a/Cargo.toml b/Cargo.toml
index e419364..bf08bf9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,12 +12,13 @@
[package]
edition = "2021"
name = "aarch64-paging"
-version = "0.3.0"
+version = "0.4.0"
authors = [
"Ard Biesheuvel <ardb@google.com>",
"Andrew Walbran <qwandor@google.com>",
]
description = "A library to manipulate AArch64 VMSA EL1 page tables."
+readme = "README.md"
keywords = [
"arm",
"aarch64",
@@ -32,14 +33,13 @@ categories = [
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/google/aarch64-paging"
-resolver = "2"
[package.metadata.docs.rs]
all-features = true
default-target = "aarch64-unknown-none"
[dependencies.bitflags]
-version = "1.3.2"
+version = "2.0.2"
[features]
alloc = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index ca1e9ec..0981ad0 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "aarch64-paging"
-version = "0.3.0"
+version = "0.4.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "A library to manipulate AArch64 VMSA EL1 page tables."
@@ -10,7 +10,7 @@ keywords = ["arm", "aarch64", "cortex-a", "vmsa", "pagetable"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
-bitflags = "1.3.2"
+bitflags = "2.0.2"
[features]
default = ["alloc"]
diff --git a/METADATA b/METADATA
index 701e31a..2af77d1 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/aarch64-paging
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
name: "aarch64-paging"
description: "A library to manipulate AArch64 VMSA EL1 page tables."
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.3.0.crate"
+ value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.4.0.crate"
}
- version: "0.3.0"
+ version: "0.4.0"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 8
- day: 15
+ year: 2023
+ month: 4
+ day: 27
}
}
diff --git a/patches/Android.bp.diff b/patches/Android.bp.diff
deleted file mode 100644
index 28fe389..0000000
--- a/patches/Android.bp.diff
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/Android.bp b/Android.bp
-index 9a97e6d..7ffbbb9 100644
---- a/Android.bp
-+++ b/Android.bp
-@@ -58,7 +58,7 @@ rust_test {
- "default",
- ],
- rustlibs: [
-- "libbitflags",
-+ "libbitflags-1.3.2",
- ],
- enabled: false,
- arch: {
-@@ -81,7 +81,7 @@ rust_library {
- "default",
- ],
- rustlibs: [
-- "libbitflags",
-+ "libbitflags-1.3.2",
- ],
- apex_available: [
- "//apex_available:platform",
diff --git a/src/idmap.rs b/src/idmap.rs
index 06455ed..870ccf8 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation, VaRange,
- VirtualAddress,
+ deallocate, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater, Translation,
+ VaRange, VirtualAddress,
},
MapError, Mapping,
};
@@ -71,7 +71,7 @@ impl Translation for IdTranslation {
/// // Map a 2 MiB region of memory as read-write.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::EXECUTE_NEVER | Attributes::VALID,
/// ).unwrap();
/// // Set `TTBR0_EL1` to activate the page table.
/// # #[cfg(target_arch = "aarch64")]
@@ -85,7 +85,7 @@ impl Translation for IdTranslation {
/// // Now change the mapping to read-only and executable.
/// idmap.map_range(
/// &MemoryRegion::new(0x80200000, 0x80400000),
-/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY,
+/// Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
/// ).unwrap();
/// # #[cfg(target_arch = "aarch64")]
/// idmap.activate();
@@ -130,16 +130,41 @@ impl IdMap {
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
- /// table is active.
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
///
/// # Errors
///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
let pa = IdTranslation::virtual_to_physical(range.start());
self.mapping.map_range(range, pa, flags)
}
+
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the updater function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.mapping.modify_range(range, f)
+ }
}
#[cfg(test)]
@@ -147,7 +172,7 @@ mod tests {
use super::*;
use crate::{
paging::{Attributes, MemoryRegion, PAGE_SIZE},
- MapError,
+ MapError, VirtualAddress,
};
const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
@@ -157,14 +182,20 @@ mod tests {
// A single byte at the start of the address space.
let mut idmap = IdMap::new(1, 1);
assert_eq!(
- idmap.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ idmap.map_range(
+ &MemoryRegion::new(0, 1),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
// Two pages at the start of the address space.
let mut idmap = IdMap::new(1, 1);
assert_eq!(
- idmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+ idmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
@@ -176,7 +207,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -186,7 +217,7 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -196,7 +227,7 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -213,7 +244,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -224,11 +255,94 @@ mod tests {
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
)))
);
}
+
+ fn make_map() -> IdMap {
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL
+ | Attributes::NON_GLOBAL
+ | Attributes::READ_ONLY
+ | Attributes::VALID,
+ )
+ .unwrap();
+ idmap
+ }
+
+ #[test]
+ fn update_backwards_range() {
+ let mut idmap = make_map();
+ assert!(idmap
+ .modify_range(
+ &MemoryRegion::new(PAGE_SIZE * 2, 1),
+ &|_range, entry, _level| {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ Ok(())
+ },
+ )
+ .is_err());
+ }
+
+ #[test]
+ fn update_range() {
+ let mut idmap = make_map();
+ idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ }
+ Ok(())
+ })
+ .unwrap();
+ idmap
+ .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
+ assert_eq!(range.end() - range.start(), PAGE_SIZE);
+ }
+ Ok(())
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn breakup_invalid_block() {
+ const BLOCK_RANGE: usize = 0x200000;
+ let mut idmap = IdMap::new(1, 1);
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
+ )
+ .unwrap();
+ idmap
+ .map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
+ )
+ .unwrap();
+ idmap
+ .modify_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ &|range, entry, level| {
+ if level == 3 {
+ let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
+ let is_first_page = range.start().0 == 0usize;
+ assert!(has_swflag != is_first_page);
+ }
+ Ok(())
+ },
+ )
+ .unwrap();
+ }
}
diff --git a/src/lib.rs b/src/lib.rs
index 80cee63..3d4aeac 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -31,7 +31,7 @@
//! // Map a 2 MiB region of memory as read-only.
//! idmap.map_range(
//! &MemoryRegion::new(0x80200000, 0x80400000),
-//! Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY,
+//! Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
//! ).unwrap();
//! // Set `TTBR0_EL1` to activate the page table.
//! # #[cfg(target_arch = "aarch64")]
@@ -54,7 +54,8 @@ extern crate alloc;
use core::arch::asm;
use core::fmt::{self, Display, Formatter};
use paging::{
- Attributes, MemoryRegion, PhysicalAddress, RootTable, Translation, VaRange, VirtualAddress,
+ Attributes, Descriptor, MemoryRegion, PhysicalAddress, PteUpdater, RootTable, Translation,
+ VaRange, VirtualAddress,
};
/// An error attempting to map some range in the page table.
@@ -67,6 +68,8 @@ pub enum MapError {
InvalidVirtualAddress(VirtualAddress),
/// The end of the memory region is before the start.
RegionBackwards(MemoryRegion),
+ /// There was an error while updating a page table entry.
+ PteUpdateFault(Descriptor),
}
impl Display for MapError {
@@ -79,6 +82,9 @@ impl Display for MapError {
Self::RegionBackwards(region) => {
write!(f, "End of memory region {} is before start.", region)
}
+ Self::PteUpdateFault(desc) => {
+ write!(f, "Error updating page table entry {:?}", desc)
+ }
}
}
}
@@ -188,7 +194,15 @@ impl<T: Translation + Clone> Mapping<T> {
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
- /// table is active.
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
pub fn map_range(
&mut self,
range: &MemoryRegion,
@@ -203,6 +217,34 @@ impl<T: Translation + Clone> Mapping<T> {
}
Ok(())
}
+
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the updater function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.root.modify_range(range, f)?;
+ #[cfg(target_arch = "aarch64")]
+ unsafe {
+ // Safe because this is just a memory barrier.
+ asm!("dsb ishst");
+ }
+ Ok(())
+ }
}
impl<T: Translation + Clone> Drop for Mapping<T> {
diff --git a/src/linearmap.rs b/src/linearmap.rs
index 7dd7c09..921a683 100644
--- a/src/linearmap.rs
+++ b/src/linearmap.rs
@@ -8,8 +8,8 @@
use crate::{
paging::{
- deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, Translation,
- VaRange, VirtualAddress, PAGE_SIZE,
+ deallocate, is_aligned, Attributes, MemoryRegion, PageTable, PhysicalAddress, PteUpdater,
+ Translation, VaRange, VirtualAddress, PAGE_SIZE,
},
MapError, Mapping,
};
@@ -139,13 +139,16 @@ impl LinearMap {
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
- /// table is active.
+ /// table is active. This function writes block and page entries, but only maps them if `flags`
+ /// contains `Attributes::VALID`, otherwise the entries remain invalid.
///
/// # Errors
///
/// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
/// address within the `range` would result in overflow.
///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
@@ -156,6 +159,28 @@ impl LinearMap {
.virtual_to_physical(range.start())?;
self.mapping.map_range(range, pa, flags)
}
+
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
+ ///
+ /// The virtual address range passed to the updater function may be expanded compared to the
+ /// `range` parameter, due to alignment to block boundaries.
+ ///
+ /// This should generally only be called while the page table is not active. In particular, any
+ /// change that may require break-before-make per the architecture must be made while the page
+ /// table is inactive. Mapping a previously unmapped memory range may be done while the page
+ /// table is active.
+ ///
+ /// # Errors
+ ///
+ /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
+ ///
+ /// Returns [`MapError::RegionBackwards`] if the range is backwards.
+ ///
+ /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
+ /// largest virtual address covered by the page table given its root level.
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.mapping.modify_range(range, f)
+ }
}
#[cfg(test)]
@@ -175,14 +200,20 @@ mod tests {
// A single byte at the start of the address space.
let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
assert_eq!(
- pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ pagetable.map_range(
+ &MemoryRegion::new(0, 1),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
// Two pages at the start of the address space.
let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
assert_eq!(
- pagetable.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+ pagetable.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE * 2),
+ Attributes::NORMAL | Attributes::VALID
+ ),
Ok(())
);
@@ -194,7 +225,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -206,7 +237,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -219,7 +250,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -229,7 +260,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -242,7 +273,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -254,7 +285,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
@@ -271,7 +302,7 @@ mod tests {
MAX_ADDRESS_FOR_ROOT_LEVEL_1,
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -282,7 +313,7 @@ mod tests {
assert_eq!(
pagetable.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
- Attributes::NORMAL
+ Attributes::NORMAL | Attributes::VALID
),
Err(MapError::AddressRange(VirtualAddress(
MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
@@ -394,7 +425,10 @@ mod tests {
// Test that block mapping is used when the PA is appropriately aligned...
let mut pagetable = LinearMap::new(1, 1, 1 << 30, VaRange::Lower);
pagetable
- .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+ .map_range(
+ &MemoryRegion::new(0, 1 << 30),
+ Attributes::NORMAL | Attributes::VALID,
+ )
.unwrap();
assert_eq!(
pagetable.mapping.root.mapping_level(VirtualAddress(0)),
@@ -404,11 +438,86 @@ mod tests {
// ...but not when it is not.
let mut pagetable = LinearMap::new(1, 1, 1 << 29, VaRange::Lower);
pagetable
- .map_range(&MemoryRegion::new(0, 1 << 30), Attributes::NORMAL)
+ .map_range(
+ &MemoryRegion::new(0, 1 << 30),
+ Attributes::NORMAL | Attributes::VALID,
+ )
.unwrap();
assert_eq!(
pagetable.mapping.root.mapping_level(VirtualAddress(0)),
Some(2)
);
}
+
+ fn make_map() -> LinearMap {
+ let mut lmap = LinearMap::new(1, 1, 4096, VaRange::Lower);
+ // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
+ lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL)
+ .unwrap();
+ lmap
+ }
+
+ #[test]
+ fn update_backwards_range() {
+ let mut lmap = make_map();
+ assert!(lmap
+ .modify_range(
+ &MemoryRegion::new(PAGE_SIZE * 2, 1),
+ &|_range, entry, _level| {
+ entry
+ .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ Ok(())
+ },
+ )
+ .is_err());
+ }
+
+ #[test]
+ fn update_range() {
+ let mut lmap = make_map();
+ lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
+ }
+ Ok(())
+ })
+ .unwrap();
+ lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
+ if level == 3 || !entry.is_table_or_page() {
+ assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
+ assert_eq!(range.end() - range.start(), PAGE_SIZE);
+ }
+ Ok(())
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn breakup_invalid_block() {
+ const BLOCK_RANGE: usize = 0x200000;
+
+ let mut lmap = LinearMap::new(1, 1, 0x1000, VaRange::Lower);
+ lmap.map_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
+ )
+ .unwrap();
+ lmap.map_range(
+ &MemoryRegion::new(0, PAGE_SIZE),
+ Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
+ )
+ .unwrap();
+ lmap.modify_range(
+ &MemoryRegion::new(0, BLOCK_RANGE),
+ &|range, entry, level| {
+ if level == 3 {
+ let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
+ let is_first_page = range.start().0 == 0usize;
+ assert!(has_swflag != is_first_page);
+ }
+ Ok(())
+ },
+ )
+ .unwrap();
+ }
}
diff --git a/src/paging.rs b/src/paging.rs
index 7606d80..f0370f7 100644
--- a/src/paging.rs
+++ b/src/paging.rs
@@ -178,6 +178,29 @@ impl MemoryRegion {
pub const fn is_empty(&self) -> bool {
self.0.start.0 == self.0.end.0
}
+
+ fn split(&self, level: usize) -> ChunkedIterator {
+ ChunkedIterator {
+ range: self,
+ granularity: granularity_at_level(level),
+ start: self.0.start.0,
+ }
+ }
+
+ /// Returns whether this region can be mapped at 'level' using block mappings only.
+ fn is_block(&self, level: usize) -> bool {
+ let gran = granularity_at_level(level);
+ (self.0.start.0 | self.0.end.0) & (gran - 1) == 0
+ }
+
+ /// Returns a new `MemoryRegion` based on this one but with the start aligned down and the end
+ /// aligned up to the given alignment.
+ fn align_out(&self, alignment: usize) -> Self {
+ Self(
+ VirtualAddress(align_down(self.0.start.0, alignment))
+ ..VirtualAddress(align_up(self.0.end.0, alignment)),
+ )
+ }
}
impl From<Range<VirtualAddress>> for MemoryRegion {
@@ -198,6 +221,24 @@ impl Debug for MemoryRegion {
}
}
+/// A page table entry updater function; called repeatedly to update the state of a
+/// range of page table entries.
+///
+/// # Arguments
+///
+/// The updater function receives the following arguments:
+///
+/// - The full virtual address range mapped by the page table entry, which may be different than
+/// the original range passed to `modify_range`, due to alignment to block boundaries.
+/// - A page table entry whose state it may update.
+/// - The level of a translation table the entry belongs to.
+///
+/// # Return
+///
+/// - `Ok` to continue updating the remaining entries.
+/// - `Err` to signal an error during an update and stop updating the remaining entries.
+pub type PteUpdater = dyn Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()>;
+
/// A complete hierarchy of page tables including all levels.
pub struct RootTable<T: Translation> {
table: PageTableWithLevel<T>,
@@ -234,7 +275,8 @@ impl<T: Translation> RootTable<T> {
}
/// Recursively maps a range into the pagetable hierarchy starting at the root level, mapping
- /// the pages to the corresponding physical address range starting at `pa`.
+ /// the pages to the corresponding physical address range starting at `pa`. Block and page
+ /// entries will be written to, but will only be mapped if `flags` contains `Attributes::VALID`.
///
/// Returns an error if the virtual address range is out of the range covered by the pagetable
pub fn map_range(
@@ -243,28 +285,8 @@ impl<T: Translation> RootTable<T> {
pa: PhysicalAddress,
flags: Attributes,
) -> Result<(), MapError> {
- if range.end() < range.start() {
- return Err(MapError::RegionBackwards(range.clone()));
- }
- match self.va_range {
- VaRange::Lower => {
- if (range.start().0 as isize) < 0 {
- return Err(MapError::AddressRange(range.start()));
- } else if range.end().0 > self.size() {
- return Err(MapError::AddressRange(range.end()));
- }
- }
- VaRange::Upper => {
- if range.start().0 as isize >= 0
- || (range.start().0 as isize).unsigned_abs() > self.size()
- {
- return Err(MapError::AddressRange(range.start()));
- }
- }
- }
-
+ self.verify_region(range)?;
self.table.map_range(&self.translation, range, pa, flags);
-
Ok(())
}
@@ -283,6 +305,11 @@ impl<T: Translation> RootTable<T> {
&self.translation
}
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<(), MapError> {
+ self.verify_region(range)?;
+ self.table.modify_range(&self.translation, range, f)
+ }
+
/// Returns the level of mapping used for the given virtual address:
/// - `None` if it is unmapped
/// - `Some(LEAF_LEVEL)` if it is mapped as a single page
@@ -291,6 +318,30 @@ impl<T: Translation> RootTable<T> {
pub(crate) fn mapping_level(&self, va: VirtualAddress) -> Option<usize> {
self.table.mapping_level(&self.translation, va)
}
+
+ /// Checks whether the region is within range of the page table.
+ fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> {
+ if region.end() < region.start() {
+ return Err(MapError::RegionBackwards(region.clone()));
+ }
+ match self.va_range {
+ VaRange::Lower => {
+ if (region.start().0 as isize) < 0 {
+ return Err(MapError::AddressRange(region.start()));
+ } else if region.end().0 > self.size() {
+ return Err(MapError::AddressRange(region.end()));
+ }
+ }
+ VaRange::Upper => {
+ if region.start().0 as isize >= 0
+ || (region.start().0 as isize).unsigned_abs() > self.size()
+ {
+ return Err(MapError::AddressRange(region.start()));
+ }
+ }
+ }
+ Ok(())
+ }
}
impl<T: Translation> Debug for RootTable<T> {
@@ -336,24 +387,9 @@ impl Iterator for ChunkedIterator<'_> {
}
}
-impl MemoryRegion {
- fn split(&self, level: usize) -> ChunkedIterator {
- ChunkedIterator {
- range: self,
- granularity: granularity_at_level(level),
- start: self.0.start.0,
- }
- }
-
- /// Returns whether this region can be mapped at 'level' using block mappings only.
- fn is_block(&self, level: usize) -> bool {
- let gran = granularity_at_level(level);
- (self.0.start.0 | self.0.end.0) & (gran - 1) == 0
- }
-}
-
bitflags! {
/// Attribute bits for a mapping in a page table.
+ #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Attributes: usize {
const VALID = 1 << 0;
const TABLE_OR_PAGE = 1 << 1;
@@ -367,7 +403,14 @@ bitflags! {
const READ_ONLY = 1 << 7;
const ACCESSED = 1 << 10;
const NON_GLOBAL = 1 << 11;
+ const DBM = 1 << 51;
const EXECUTE_NEVER = 3 << 53;
+
+ /// Software flags in block and page descriptor entries.
+ const SWFLAG_0 = 1 << 55;
+ const SWFLAG_1 = 1 << 56;
+ const SWFLAG_2 = 1 << 57;
+ const SWFLAG_3 = 1 << 58;
}
}
@@ -427,7 +470,8 @@ impl<T: Translation> PageTableWithLevel<T> {
}
/// Maps the the given virtual address range in this pagetable to the corresponding physical
- /// address range starting at the given `pa`, recursing into any subtables as necessary.
+ /// address range starting at the given `pa`, recursing into any subtables as necessary. To map
+ /// block and page entries, `Attributes::VALID` must be set in `flags`.
///
/// Assumes that the entire range is within the range covered by this pagetable.
///
@@ -464,19 +508,22 @@ impl<T: Translation> PageTableWithLevel<T> {
} else {
let old = *entry;
let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
- if let (Some(old_flags), Some(old_pa)) = (old.flags(), old.output_address()) {
- // Old was a valid block entry, so we need to split it.
- // Recreate the entire block in the newly added table.
- let a = align_down(chunk.0.start.0, granularity);
- let b = align_up(chunk.0.end.0, granularity);
- subtable.map_range(
- translation,
- &MemoryRegion::new(a, b),
- old_pa,
- old_flags,
- );
+ if let Some(old_flags) = old.flags() {
+ if !old_flags.contains(Attributes::TABLE_OR_PAGE) {
+ let old_pa = old.output_address();
+ // `old` was a block entry, so we need to split it.
+ // Recreate the entire block in the newly added table.
+ let a = align_down(chunk.0.start.0, granularity);
+ let b = align_up(chunk.0.end.0, granularity);
+ subtable.map_range(
+ translation,
+ &MemoryRegion::new(a, b),
+ old_pa,
+ old_flags,
+ );
+ }
}
- entry.set(subtable_pa, Attributes::TABLE_OR_PAGE);
+ entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID);
subtable
};
subtable.map_range(translation, &chunk, pa, flags);
@@ -491,6 +538,7 @@ impl<T: Translation> PageTableWithLevel<T> {
translation: &T,
indentation: usize,
) -> Result<(), fmt::Error> {
+ const WIDTH: usize = 3;
// Safe because we know that the pointer is aligned, initialised and dereferencable, and the
// PageTable won't be mutated while we are using it.
let table = unsafe { self.table.as_ref() };
@@ -503,12 +551,16 @@ impl<T: Translation> PageTableWithLevel<T> {
i += 1;
}
if i - 1 == first_zero {
- writeln!(f, "{:indentation$}{}: 0", "", first_zero)?;
+ writeln!(f, "{:indentation$}{: <WIDTH$}: 0", "", first_zero)?;
} else {
- writeln!(f, "{:indentation$}{}-{}: 0", "", first_zero, i - 1)?;
+ writeln!(f, "{:indentation$}{: <WIDTH$}-{}: 0", "", first_zero, i - 1)?;
}
} else {
- writeln!(f, "{:indentation$}{}: {:?}", "", i, table.entries[i])?;
+ writeln!(
+ f,
+ "{:indentation$}{: <WIDTH$}: {:?}",
+ "", i, table.entries[i],
+ )?;
if let Some(subtable) = table.entries[i].subtable(translation, self.level) {
subtable.fmt_indented(f, translation, indentation + 2)?;
}
@@ -539,6 +591,28 @@ impl<T: Translation> PageTableWithLevel<T> {
}
}
+ /// Modifies a range of page table entries by applying a function to each page table entry.
+ /// If the range is not aligned to block boundaries, it will be expanded.
+ fn modify_range(
+ &mut self,
+ translation: &T,
+ range: &MemoryRegion,
+ f: &PteUpdater,
+ ) -> Result<(), MapError> {
+ let level = self.level;
+ for chunk in range.split(level) {
+ // VA range passed to the updater is aligned to block boundaries, as that region will
+ // be affected by changes to the entry.
+ let affected_range = chunk.align_out(granularity_at_level(level));
+ let entry = self.get_entry_mut(chunk.0.start);
+ f(&affected_range, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
+ if let Some(mut subtable) = entry.subtable(translation, level) {
+ subtable.modify_range(translation, &chunk, f)?;
+ }
+ }
+ Ok(())
+ }
+
/// Returns the level of mapping used for the given virtual address:
/// - `None` if it is unmapped
/// - `Some(LEAF_LEVEL)` if it is mapped as a single page
@@ -582,55 +656,55 @@ impl PageTable {
/// - A page mapping, if it is in the lowest level page table.
/// - A block mapping, if it is not in the lowest level page table.
/// - A pointer to a lower level pagetable, if it is not in the lowest level page table.
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
-struct Descriptor(usize);
+pub struct Descriptor(usize);
impl Descriptor {
- fn output_address(&self) -> Option<PhysicalAddress> {
- if self.is_valid() {
- Some(PhysicalAddress(
- self.0 & (!(PAGE_SIZE - 1) & !(0xffff << 48)),
- ))
- } else {
- None
- }
+ const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48);
+
+ fn output_address(self) -> PhysicalAddress {
+ PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK)
}
- fn flags(self) -> Option<Attributes> {
- if self.is_valid() {
- Attributes::from_bits(self.0 & ((PAGE_SIZE - 1) | (0xffff << 48)))
- } else {
- None
- }
+ /// Returns the flags of this page table entry, or `None` if its state does not
+ /// contain a valid set of flags.
+ pub fn flags(self) -> Option<Attributes> {
+ Attributes::from_bits(self.0 & !Self::PHYSICAL_ADDRESS_BITMASK)
}
- fn is_valid(self) -> bool {
+ /// Modifies the page table entry by setting or clearing its flags.
+ pub fn modify_flags(&mut self, set: Attributes, clear: Attributes) {
+ self.0 = (self.0 | set.bits()) & !clear.bits();
+ }
+
+ /// Returns `true` if [`Attributes::VALID`] is set on this entry, e.g. if the entry is mapped.
+ pub fn is_valid(self) -> bool {
(self.0 & Attributes::VALID.bits()) != 0
}
- fn is_table_or_page(self) -> bool {
+ /// Returns `true` if this is a valid entry pointing to a next level translation table or a page.
+ pub fn is_table_or_page(self) -> bool {
if let Some(flags) = self.flags() {
- flags.contains(Attributes::TABLE_OR_PAGE)
+ flags.contains(Attributes::TABLE_OR_PAGE | Attributes::VALID)
} else {
false
}
}
fn set(&mut self, pa: PhysicalAddress, flags: Attributes) {
- self.0 = pa.0 | (flags | Attributes::VALID).bits();
+ self.0 = (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.bits();
}
fn subtable<T: Translation>(
- &self,
+ self,
translation: &T,
level: usize,
) -> Option<PageTableWithLevel<T>> {
if level < LEAF_LEVEL && self.is_table_or_page() {
- if let Some(output_address) = self.output_address() {
- let table = translation.physical_to_virtual(output_address);
- return Some(PageTableWithLevel::from_pointer(table, level + 1));
- }
+ let output_address = self.output_address();
+ let table = translation.physical_to_virtual(output_address);
+ return Some(PageTableWithLevel::from_pointer(table, level + 1));
}
None
}
@@ -639,8 +713,10 @@ impl Descriptor {
impl Debug for Descriptor {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{:#016x}", self.0)?;
- if let (Some(flags), Some(address)) = (self.flags(), self.output_address()) {
- write!(f, " ({}, {:?})", address, flags)?;
+ if self.is_valid() {
+ if let Some(flags) = self.flags() {
+ write!(f, " ({}, {:?})", self.output_address(), flags)?;
+ }
}
Ok(())
}
@@ -691,7 +767,7 @@ pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool {
mod tests {
use super::*;
#[cfg(feature = "alloc")]
- use alloc::{format, string::ToString};
+ use alloc::{format, string::ToString, vec, vec::Vec};
#[cfg(feature = "alloc")]
#[test]
@@ -752,4 +828,61 @@ mod tests {
fn add_physical_address() {
assert_eq!(PhysicalAddress(0x1234) + 0x42, PhysicalAddress(0x1276));
}
+
+ #[test]
+ fn invalid_descriptor() {
+ let desc = Descriptor(0usize);
+ assert!(!desc.is_valid());
+ assert!(!desc.flags().unwrap().contains(Attributes::VALID));
+ }
+
+ #[test]
+ fn set_descriptor() {
+ const PHYSICAL_ADDRESS: usize = 0x12340000;
+ let mut desc = Descriptor(0usize);
+ assert!(!desc.is_valid());
+ desc.set(
+ PhysicalAddress(PHYSICAL_ADDRESS),
+ Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID,
+ );
+ assert!(desc.is_valid());
+ assert_eq!(
+ desc.flags().unwrap(),
+ Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID
+ );
+ assert_eq!(desc.output_address(), PhysicalAddress(PHYSICAL_ADDRESS));
+ }
+
+ #[test]
+ fn modify_descriptor_flags() {
+ let mut desc = Descriptor(0usize);
+ assert!(!desc.is_valid());
+ desc.set(
+ PhysicalAddress(0x12340000),
+ Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
+ );
+ desc.modify_flags(
+ Attributes::DBM | Attributes::SWFLAG_3,
+ Attributes::VALID | Attributes::SWFLAG_1,
+ );
+ assert!(!desc.is_valid());
+ assert_eq!(
+ desc.flags().unwrap(),
+ Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_3 | Attributes::DBM
+ );
+ }
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn unaligned_chunks() {
+ let region = MemoryRegion::new(0x0000_2000, 0x0020_5000);
+ let chunks = region.split(LEAF_LEVEL - 1).collect::<Vec<_>>();
+ assert_eq!(
+ chunks,
+ vec![
+ MemoryRegion::new(0x0000_2000, 0x0020_0000),
+ MemoryRegion::new(0x0020_0000, 0x0020_5000),
+ ]
+ );
+ }
}