aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Walbran <qwandor@google.com>2022-06-24 17:01:30 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-06-24 17:01:30 +0000
commit0273ac4d6ba205956d64a4eba0c9c9091da5448e (patch)
tree62125558eba41feede6d95bd3b87128742adfa06
parentf1b262bfa49875915efd8976ff186021c643c86e (diff)
parent82df24325e834d1dbda7fd69d526b5214baaf45b (diff)
downloadaarch64-paging-0273ac4d6ba205956d64a4eba0c9c9091da5448e.tar.gz
Update aarch64-paging to 0.2.0. am: 946dae8f6e am: deff0f8474 am: 82df24325e
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/aarch64-paging/+/2134475 Change-Id: Ib9cb57356143ffbccf6cd1c617b5432ef285917c Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp28
-rw-r--r--Cargo.toml4
-rw-r--r--Cargo.toml.orig4
-rw-r--r--METADATA9
-rw-r--r--src/idmap.rs88
-rw-r--r--src/lib.rs3
-rw-r--r--src/paging.rs335
8 files changed, 327 insertions, 146 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index ff83bf0..98e0084 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "2df8dc019d44f12a832535b47fcf7269960dfe26"
+ "sha1": "ead9be980db36180d96eaf0c08cee61441d72f17"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 8f6f644..89e2f97 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,8 +1,6 @@
// This file is generated by cargo2android.py --config cargo2android.json.
// Do not modify this file as changes will be overridden on upgrade.
-
-
package {
default_applicable_licenses: [
"external_rust_crates_aarch64-paging_license",
@@ -42,12 +40,36 @@ license {
],
}
+rust_test {
+ name: "aarch64-paging_test_src_lib",
+ host_supported: true,
+ crate_name: "aarch64_paging",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.2.0",
+ srcs: ["src/lib.rs"],
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ test_options: {
+ unit_test: true,
+ },
+ edition: "2021",
+ rustlibs: [
+ "libbitflags",
+ ],
+ enabled: false,
+ arch: {
+ arm64: {
+ enabled: true,
+ },
+ },
+}
+
rust_library {
name: "libaarch64_paging",
host_supported: true,
crate_name: "aarch64_paging",
cargo_env_compat: true,
- cargo_pkg_version: "0.1.0",
+ cargo_pkg_version: "0.2.0",
srcs: ["src/lib.rs"],
edition: "2021",
rustlibs: [
diff --git a/Cargo.toml b/Cargo.toml
index 1192dda..b0eada2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2021"
name = "aarch64-paging"
-version = "0.1.0"
+version = "0.2.0"
authors = [
"Ard Biesheuvel <ardb@google.com>",
"Andrew Walbran <qwandor@google.com>",
@@ -38,4 +38,4 @@ resolver = "2"
default-target = "aarch64-unknown-none"
[dependencies.bitflags]
-version = "1.3"
+version = "1.3.2"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index a15bafc..c377d9d 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "aarch64-paging"
-version = "0.1.0"
+version = "0.2.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "A library to manipulate AArch64 VMSA EL1 page tables."
@@ -10,7 +10,7 @@ keywords = ["arm", "aarch64", "cortex-a", "vmsa", "pagetable"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
-bitflags = "1.3"
+bitflags = "1.3.2"
[package.metadata.docs.rs]
default-target = "aarch64-unknown-none"
diff --git a/METADATA b/METADATA
index ad403ca..4cdf7fb 100644
--- a/METADATA
+++ b/METADATA
@@ -7,14 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.1.0.crate"
+ value: "https://static.crates.io/crates/aarch64-paging/aarch64-paging-0.2.0.crate"
}
- version: "0.1.0"
- # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+ version: "0.2.0"
license_type: NOTICE
last_upgrade_date {
year: 2022
- month: 5
- day: 6
+ month: 6
+ day: 24
}
}
diff --git a/src/idmap.rs b/src/idmap.rs
index 95a8818..dcd6892 100644
--- a/src/idmap.rs
+++ b/src/idmap.rs
@@ -4,8 +4,9 @@
//! Functionality for managing page tables with identity mapping.
-use crate::paging::{
- Attributes, MemoryRegion, PhysicalAddress, RootTable, Translation, VirtualAddress,
+use crate::{
+ paging::{Attributes, MemoryRegion, PhysicalAddress, RootTable, Translation, VirtualAddress},
+ AddressRangeError,
};
#[cfg(target_arch = "aarch64")]
use core::arch::asm;
@@ -141,13 +142,18 @@ impl IdMap {
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
/// table is active.
- pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) {
- self.root.map_range(range, flags);
+ pub fn map_range(
+ &mut self,
+ range: &MemoryRegion,
+ flags: Attributes,
+ ) -> Result<(), AddressRangeError> {
+ self.root.map_range(range, flags)?;
#[cfg(target_arch = "aarch64")]
unsafe {
// Safe because this is just a memory barrier.
asm!("dsb ishst");
}
+ Ok(())
}
}
@@ -159,3 +165,77 @@ impl Drop for IdMap {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::paging::PAGE_SIZE;
+
+ const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
+
+ #[test]
+ fn map_valid() {
+ // A single byte at the start of the address space.
+ let mut idmap = IdMap::new(1, 1);
+ assert_eq!(
+ idmap.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL),
+ Ok(())
+ );
+
+ // Two pages at the start of the address space.
+ let mut idmap = IdMap::new(1, 1);
+ assert_eq!(
+ idmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL),
+ Ok(())
+ );
+
+ // A single byte at the end of the address space.
+ let mut idmap = IdMap::new(1, 1);
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(
+ MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
+ MAX_ADDRESS_FOR_ROOT_LEVEL_1
+ ),
+ Attributes::NORMAL
+ ),
+ Ok(())
+ );
+
+ // The entire valid address space.
+ let mut idmap = IdMap::new(1, 1);
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
+ Attributes::NORMAL
+ ),
+ Ok(())
+ );
+ }
+
+ #[test]
+ fn map_out_of_range() {
+ let mut idmap = IdMap::new(1, 1);
+
+ // One byte, just past the edge of the valid range.
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(
+ MAX_ADDRESS_FOR_ROOT_LEVEL_1,
+ MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
+ ),
+ Attributes::NORMAL
+ ),
+ Err(AddressRangeError)
+ );
+
+ // From 0 to just past the valid range.
+ assert_eq!(
+ idmap.map_range(
+ &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,),
+ Attributes::NORMAL
+ ),
+ Err(AddressRangeError)
+ );
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 158fb41..3c04017 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -42,3 +42,6 @@ pub mod idmap;
pub mod paging;
extern crate alloc;
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct AddressRangeError;
diff --git a/src/paging.rs b/src/paging.rs
index 09094d9..e93d66c 100644
--- a/src/paging.rs
+++ b/src/paging.rs
@@ -5,21 +5,25 @@
//! Generic aarch64 page table manipulation functionality which doesn't assume anything about how
//! addresses are mapped.
-use alloc::{
- alloc::{alloc_zeroed, handle_alloc_error},
- boxed::Box,
-};
+use crate::AddressRangeError;
+use alloc::alloc::{alloc_zeroed, handle_alloc_error};
use bitflags::bitflags;
use core::alloc::Layout;
use core::fmt::{self, Debug, Display, Formatter};
use core::marker::PhantomData;
use core::ops::Range;
+use core::ptr::NonNull;
const PAGE_SHIFT: usize = 12;
+/// The pagetable level at which all entries are page mappings.
+const LEAF_LEVEL: usize = 3;
+
/// The page size in bytes assumed by this library, 4 KiB.
pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
+/// The number of address bits resolved in one level of page table lookup. This is a function of the
+/// page size.
pub const BITS_PER_LEVEL: usize = PAGE_SHIFT - 3;
/// An aarch64 virtual address, the input type of a stage 1 page table.
@@ -40,7 +44,13 @@ impl<T> From<*mut T> for VirtualAddress {
impl Display for VirtualAddress {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
- write!(f, "{:#016x}", self.0)
+ write!(f, "{:#018x}", self.0)
+ }
+}
+
+impl Debug for VirtualAddress {
+ fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
+ write!(f, "VirtualAddress({})", self)
}
}
@@ -55,10 +65,22 @@ pub struct PhysicalAddress(pub usize);
impl Display for PhysicalAddress {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
- write!(f, "{:#016x}", self.0)
+ write!(f, "{:#018x}", self.0)
}
}
+impl Debug for PhysicalAddress {
+ fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
+ write!(f, "PhysicalAddress({})", self)
+ }
+}
+
+/// Returns the size in bytes of the address space covered by a single entry in the page table at
+/// the given level.
+fn granularity_at_level(level: usize) -> usize {
+ PAGE_SIZE << ((LEAF_LEVEL - level) * BITS_PER_LEVEL)
+}
+
/// An implementation of this trait needs to be provided to the mapping routines, so that the
/// physical addresses used in the page tables can be converted into virtual addresses that can be
/// used to access their contents from the code.
@@ -102,22 +124,44 @@ impl MemoryRegion {
/// A complete hierarchy of page tables including all levels.
#[derive(Debug)]
pub struct RootTable<T: Translation> {
- table: Box<PageTable<T>>,
- level: usize,
+ table: PageTableWithLevel<T>,
}
impl<T: Translation> RootTable<T> {
/// Creates a new page table starting at the given root level.
+ ///
+ /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not
+ /// currently supported by this library. The value of `TCR_EL1.T0SZ` must be set appropriately
+ /// to match.
pub fn new(level: usize) -> Self {
+ if level > LEAF_LEVEL {
+ panic!("Invalid root table level {}.", level);
+ }
RootTable {
- table: PageTable::new(),
- level,
+ table: PageTableWithLevel::new(level),
}
}
+ /// Returns the size in bytes of the virtual address space which can be mapped in this page
+ /// table.
+ ///
+ /// This is a function of the chosen root level.
+ pub fn size(&self) -> usize {
+ granularity_at_level(self.table.level) << BITS_PER_LEVEL
+ }
+
/// Recursively maps a range into the pagetable hierarchy starting at the root level.
- pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) {
- self.table.map_range(range, flags, self.level);
+ pub fn map_range(
+ &mut self,
+ range: &MemoryRegion,
+ flags: Attributes,
+ ) -> Result<(), AddressRangeError> {
+ if range.end().0 > self.size() {
+ return Err(AddressRangeError);
+ }
+
+ self.table.map_range(range, flags);
+ Ok(())
}
/// Returns the physical address of the root table in memory.
@@ -126,6 +170,12 @@ impl<T: Translation> RootTable<T> {
}
}
+impl<T: Translation> Drop for RootTable<T> {
+ fn drop(&mut self) {
+ self.table.free()
+ }
+}
+
struct ChunkedIterator<'a> {
range: &'a MemoryRegion,
granularity: usize,
@@ -155,14 +205,14 @@ impl MemoryRegion {
fn split(&self, level: usize) -> ChunkedIterator {
ChunkedIterator {
range: self,
- granularity: PAGE_SIZE << ((3 - level) * BITS_PER_LEVEL),
+ granularity: granularity_at_level(level),
start: self.0.start.0,
}
}
/// Returns whether this region can be mapped at 'level' using block mappings only.
fn is_block(&self, level: usize) -> bool {
- let gran = PAGE_SIZE << ((3 - level) * BITS_PER_LEVEL);
+ let gran = granularity_at_level(level);
(self.0.start.0 | self.0.end.0) & (gran - 1) == 0
}
}
@@ -186,6 +236,132 @@ bitflags! {
}
}
+/// Smart pointer which owns a [`PageTable`] and knows what level it is at. This allows it to
+/// implement `Debug` and `Drop`, as walking the page table hierachy requires knowing the starting
+/// level.
+struct PageTableWithLevel<T: Translation> {
+ table: NonNull<PageTable<T>>,
+ level: usize,
+}
+
+impl<T: Translation> PageTableWithLevel<T> {
+ /// Allocates a new, zeroed, appropriately-aligned page table on the heap.
+ fn new(level: usize) -> Self {
+ assert!(level <= LEAF_LEVEL);
+ Self {
+ // Safe because the pointer has been allocated with the appropriate layout by the global
+ // allocator, and the memory is zeroed which is valid initialisation for a PageTable.
+ table: unsafe { allocate_zeroed() },
+ level,
+ }
+ }
+
+ /// Returns the physical address of this page table in memory.
+ fn to_physical(&self) -> PhysicalAddress {
+ T::virtual_to_physical(VirtualAddress::from(self.table.as_ptr()))
+ }
+
+ /// Returns a mutable reference to the descriptor corresponding to a given virtual address.
+ fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor {
+ let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
+ let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
+ // Safe because we know that the pointer is properly aligned, dereferenced and initialised,
+ // and nothing else can access the page table while we hold a mutable reference to the
+ // PageTableWithLevel (assuming it is not currently active).
+ let table = unsafe { self.table.as_mut() };
+ &mut table.entries[index]
+ }
+
+ fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) {
+ let mut pa = T::virtual_to_physical(range.start());
+ let level = self.level;
+
+ for chunk in range.split(level) {
+ let entry = self.get_entry_mut(chunk.0.start);
+
+ if level == LEAF_LEVEL {
+ // Put down a page mapping.
+ entry.set(pa, flags | Attributes::ACCESSED | Attributes::TABLE_OR_PAGE);
+ } else if chunk.is_block(level) && !entry.is_table_or_page() {
+ // Rather than leak the entire subhierarchy, only put down
+ // a block mapping if the region is not already covered by
+ // a table mapping.
+ entry.set(pa, flags | Attributes::ACCESSED);
+ } else {
+ let mut subtable = if let Some(subtable) = entry.subtable::<T>(level) {
+ subtable
+ } else {
+ let old = *entry;
+ let mut subtable = Self::new(level + 1);
+ if let Some(old_flags) = old.flags() {
+ let granularity = granularity_at_level(level);
+ // Old was a valid block entry, so we need to split it.
+ // Recreate the entire block in the newly added table.
+ let a = align_down(chunk.0.start.0, granularity);
+ let b = align_up(chunk.0.end.0, granularity);
+ subtable.map_range(&MemoryRegion::new(a, b), old_flags);
+ }
+ entry.set(subtable.to_physical(), Attributes::TABLE_OR_PAGE);
+ subtable
+ };
+ subtable.map_range(&chunk, flags);
+ }
+ pa.0 += chunk.len();
+ }
+ }
+
+ fn fmt_indented(&self, f: &mut Formatter, indentation: usize) -> Result<(), fmt::Error> {
+ // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
+ // PageTable won't be mutated while we are using it.
+ let table = unsafe { self.table.as_ref() };
+
+ let mut i = 0;
+ while i < table.entries.len() {
+ if table.entries[i].0 == 0 {
+ let first_zero = i;
+ while i < table.entries.len() && table.entries[i].0 == 0 {
+ i += 1;
+ }
+ if i - 1 == first_zero {
+ writeln!(f, "{:indentation$}{}: 0", "", first_zero)?;
+ } else {
+ writeln!(f, "{:indentation$}{}-{}: 0", "", first_zero, i - 1)?;
+ }
+ } else {
+ writeln!(f, "{:indentation$}{}: {:?}", "", i, table.entries[i])?;
+ if let Some(subtable) = table.entries[i].subtable::<T>(self.level) {
+ subtable.fmt_indented(f, indentation + 2)?;
+ }
+ i += 1;
+ }
+ }
+ Ok(())
+ }
+
+ /// Frees the memory used by this pagetable and all subtables. It is not valid to access the
+ /// page table after this.
+ fn free(&mut self) {
+ // Safe because we know that the pointer is aligned, initialised and dereferencable, and the
+ // PageTable won't be mutated while we are freeing it.
+ let table = unsafe { self.table.as_ref() };
+ for entry in table.entries {
+ if let Some(mut subtable) = entry.subtable::<T>(self.level) {
+ // Safe because the subtable was allocated by `PageTable::new` with the global
+ // allocator and appropriate layout.
+ subtable.free();
+ }
+ }
+ }
+}
+
+impl<T: Translation> Debug for PageTableWithLevel<T> {
+ fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
+ writeln!(f, "PageTableWithLevel {{ level: {}, table:", self.level)?;
+ self.fmt_indented(f, 0)?;
+ write!(f, "}}")
+ }
+}
+
/// A single level of a page table.
#[repr(C, align(4096))]
struct PageTable<T: Translation> {
@@ -227,7 +403,7 @@ impl Descriptor {
(self.0 & Attributes::VALID.bits()) != 0
}
- fn is_table(self) -> bool {
+ fn is_table_or_page(self) -> bool {
if let Some(flags) = self.flags() {
flags.contains(Attributes::TABLE_OR_PAGE)
} else {
@@ -239,11 +415,15 @@ impl Descriptor {
self.0 = pa.0 | (flags | Attributes::VALID).bits();
}
- fn subtable<T: Translation>(&self) -> Option<&mut PageTable<T>> {
- if self.is_table() {
+ fn subtable<T: Translation>(&self, level: usize) -> Option<PageTableWithLevel<T>> {
+ if level < LEAF_LEVEL && self.is_table_or_page() {
if let Some(output_address) = self.output_address() {
let va = T::physical_to_virtual(output_address);
- return Some(unsafe { &mut *(va.0 as *mut PageTable<T>) });
+ let ptr = va.0 as *mut PageTable<T>;
+ return Some(PageTableWithLevel {
+ level: level + 1,
+ table: NonNull::new(ptr).expect("Subtable pointer must be non-null."),
+ });
}
}
None
@@ -260,123 +440,20 @@ impl Debug for Descriptor {
}
}
-impl<T: Translation> Debug for PageTable<T> {
- fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
- writeln!(f)?;
- self.fmt_indented(f, 0)
- }
-}
-
/// Allocates appropriately aligned heap space for a `T` and zeroes it.
-fn allocate_zeroed<T>() -> *mut T {
+///
+/// # Safety
+///
+/// It must be valid to initialise the type `T` by simply zeroing its memory.
+unsafe fn allocate_zeroed<T>() -> NonNull<T> {
let layout = Layout::new::<T>();
// Safe because we know the layout has non-zero size.
- let pointer = unsafe { alloc_zeroed(layout) };
+ let pointer = alloc_zeroed(layout);
if pointer.is_null() {
handle_alloc_error(layout);
}
- pointer as *mut T
-}
-
-impl<T: Translation> PageTable<T> {
- /// Allocates a new, zeroed, appropriately-aligned page table on the heap.
- pub fn new() -> Box<Self> {
- // Safe because the pointer has been allocated with the appropriate layout by the global
- // allocator, and the memory is zeroed which is valid initialisation for a PageTable.
- unsafe {
- // We need to use Box::from_raw here rather than Box::new to avoid allocating on the
- // stack and copying to the heap.
- // TODO: Use Box::new_zeroed().assume_init() once it is stable.
- Box::from_raw(allocate_zeroed())
- }
- }
-
- /// Returns the physical address of this page table in memory.
- pub fn to_physical(&self) -> PhysicalAddress {
- T::virtual_to_physical(VirtualAddress::from(self as *const Self))
- }
-
- fn get_entry_mut(&mut self, va: usize, level: usize) -> &mut Descriptor {
- let shift = PAGE_SHIFT + (3 - level) * BITS_PER_LEVEL;
- let index = (va >> shift) % (1 << BITS_PER_LEVEL);
- &mut self.entries[index]
- }
-
- fn map_range(&mut self, range: &MemoryRegion, flags: Attributes, level: usize) {
- assert!(level <= 3);
- let mut pa = T::virtual_to_physical(range.start());
-
- for chunk in range.split(level) {
- let entry = self.get_entry_mut(chunk.0.start.0, level);
-
- if level == 3 {
- // Put down a page mapping.
- entry.set(pa, flags | Attributes::ACCESSED | Attributes::TABLE_OR_PAGE);
- } else if chunk.is_block(level) && !entry.is_table() {
- // Rather than leak the entire subhierarchy, only put down
- // a block mapping if the region is not already covered by
- // a table mapping.
- entry.set(pa, flags | Attributes::ACCESSED);
- } else {
- let subtable = if let Some(subtable) = entry.subtable::<T>() {
- subtable
- } else {
- let old = *entry;
- let subtable = Box::leak(PageTable::<T>::new());
- if let Some(old_flags) = old.flags() {
- let granularity = PAGE_SIZE << ((3 - level) * BITS_PER_LEVEL);
- // Old was a valid block entry, so we need to split it.
- // Recreate the entire block in the newly added table.
- let a = align_down(chunk.0.start.0, granularity);
- let b = align_up(chunk.0.end.0, granularity);
- subtable.map_range(&MemoryRegion::new(a, b), old_flags, level + 1);
- }
- entry.set(subtable.to_physical(), Attributes::TABLE_OR_PAGE);
- subtable
- };
- subtable.map_range(&chunk, flags, level + 1);
- }
- pa.0 += chunk.len();
- }
- }
-
- fn fmt_indented(&self, f: &mut Formatter, indentation: usize) -> Result<(), fmt::Error> {
- let mut i = 0;
- while i < self.entries.len() {
- if self.entries[i].0 == 0 {
- let first_zero = i;
- while i < self.entries.len() && self.entries[i].0 == 0 {
- i += 1;
- }
- if i - 1 == first_zero {
- writeln!(f, "{:indentation$}{}: 0", "", first_zero)?;
- } else {
- writeln!(f, "{:indentation$}{}-{}: 0", "", first_zero, i - 1)?;
- }
- } else {
- writeln!(f, "{:indentation$}{}: {:?}", "", i, self.entries[i])?;
- if let Some(subtable) = self.entries[i].subtable::<T>() {
- subtable.fmt_indented(f, indentation + 2)?;
- }
- i += 1;
- }
- }
- Ok(())
- }
-}
-
-impl<T: Translation> Drop for PageTable<T> {
- fn drop(&mut self) {
- for entry in self.entries {
- if let Some(subtable) = entry.subtable::<T>() {
- // Safe because the subtable was allocated by `PageTable::new` with the global
- // allocator and appropriate layout.
- unsafe {
- drop(Box::from_raw(subtable));
- }
- }
- }
- }
+ // Safe because we just checked that the pointer is non-null.
+ NonNull::new_unchecked(pointer as *mut T)
}
const fn align_down(value: usize, alignment: usize) -> usize {