aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Walbran <qwandor@google.com>2022-04-27 20:09:29 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-04-27 20:09:29 +0000
commit1ffd4ebeeba2cbcb1d1cd41ffbbd9aa0f55f73a7 (patch)
tree3851369b22a286c1e678bc66fb996d2ffbcd9fbc
parent105511c7f26d817bce5e9dadce7b4624a93955a1 (diff)
parent7715deb348c705405891f86f876ce36677cdf268 (diff)
downloadbuddy_system_allocator-1ffd4ebeeba2cbcb1d1cd41ffbbd9aa0f55f73a7.tar.gz
Initial import of buddy_system_allocator crate. am: 05bbb90269 am: 1509275376 am: 377395a661 am: 7715deb348
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/buddy_system_allocator/+/2076461 Change-Id: Ief67deb82a97e1d71052d14197891bc6bb857b0a Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json5
-rw-r--r--.github/workflows/rust.yml26
-rw-r--r--.gitignore3
-rw-r--r--Android.bp49
-rw-r--r--Cargo.toml31
-rw-r--r--Cargo.toml.orig20
-rw-r--r--LICENSE7
-rw-r--r--METADATA19
-rw-r--r--MODULE_LICENSE_MIT0
-rw-r--r--OWNERS1
-rw-r--r--README.md47
-rw-r--r--cargo2android.json12
-rw-r--r--patches/Android.bp.patch22
-rw-r--r--src/frame.rs167
-rw-r--r--src/lib.rs342
-rw-r--r--src/linked_list.rs141
-rw-r--r--src/test.rs147
17 files changed, 1039 insertions, 0 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..22ced39
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,5 @@
+{
+ "git": {
+ "sha1": "6586514c79f0263dc7dc3f9a61750f50d5198d40"
+ }
+}
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
new file mode 100644
index 0000000..afebadc
--- /dev/null
+++ b/.github/workflows/rust.yml
@@ -0,0 +1,26 @@
+name: Rust
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ rust:
+ - stable
+ - nightly-2021-03-09
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Use ${{ matrix.rust }}
+ run: rustup default ${{ matrix.rust }}
+ - name: Build
+ run: cargo build --verbose
+ - name: Run tests
+ run: cargo test --verbose
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2f88dba
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+/target
+**/*.rs.bk
+Cargo.lock \ No newline at end of file
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..bcae11e
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,49 @@
+// This file is generated by cargo2android.py --config cargo2android.json.
+// Do not modify this file as changes will be overridden on upgrade.
+
+
+
+rust_test {
+ name: "buddy_system_allocator_test_src_lib",
+ host_supported: true,
+ crate_name: "buddy_system_allocator",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.8.0",
+ srcs: ["src/lib.rs"],
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ test_options: {
+ unit_test: true,
+ },
+ edition: "2018",
+ features: [
+ "default",
+ "spin",
+ "use_spin",
+ ],
+ rustlibs: [
+ "libspin_nostd",
+ ],
+}
+
+rust_library_rlib {
+ name: "libbuddy_system_allocator",
+ host_supported: true,
+ crate_name: "buddy_system_allocator",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.8.0",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
+ features: [
+ "default",
+ "spin",
+ "use_spin",
+ ],
+ rustlibs: [
+ "libspin_nostd",
+ ],
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.virt",
+ ],
+}
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..4c33a88
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "buddy_system_allocator"
+version = "0.8.0"
+authors = ["Jiajie Chen <noc@jiegec.ac.cn>", "Vinay Chandra Dommeti <github@vinay.vc>"]
+description = "A bare metal allocator that uses buddy system."
+homepage = "https://github.com/rcore-os/buddy_system_allocator"
+documentation = "https://docs.rs/buddy_system_allocator"
+keywords = ["allocator", "no_std", "heap"]
+license = "MIT"
+repository = "https://github.com/rcore-os/buddy_system_allocator"
+[dependencies.spin]
+version = "0.7"
+optional = true
+
+[features]
+const_fn = []
+default = ["use_spin"]
+use_spin = ["spin"]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..52cd0ac
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,20 @@
+[package]
+name = "buddy_system_allocator"
+description = "A bare metal allocator that uses buddy system."
+documentation = "https://docs.rs/buddy_system_allocator"
+homepage = "https://github.com/rcore-os/buddy_system_allocator"
+repository = "https://github.com/rcore-os/buddy_system_allocator"
+keywords = ["allocator", "no_std", "heap"]
+version = "0.8.0"
+authors = ["Jiajie Chen <noc@jiegec.ac.cn>", "Vinay Chandra Dommeti <github@vinay.vc>"]
+edition = "2018"
+license = "MIT"
+
+[features]
+default = ["use_spin"]
+use_spin = ["spin"]
+const_fn = []
+
+[dependencies.spin]
+version = "0.7"
+optional = true
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..978d8f2
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,7 @@
+Copyright 2019-2020 Jiajie Chen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..34964ac
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "buddy_system_allocator"
+description: "A bare metal allocator that uses buddy system."
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/buddy_system_allocator"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/buddy_system_allocator/buddy_system_allocator-0.8.0.crate"
+ }
+ version: "0.8.0"
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2022
+ month: 4
+ day: 20
+ }
+}
diff --git a/MODULE_LICENSE_MIT b/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_MIT
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..45dc4dd
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:master:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..07349b1
--- /dev/null
+++ b/README.md
@@ -0,0 +1,47 @@
+# buddy_system_allocator
+
+[![Crates.io version][crate-img]][crate]
+[![docs.rs][docs-img]][docs]
+
+An (almost) drop-in replacement for [phil-opp/linked-list-allocator](https://github.com/phil-opp/linked-list-allocator). But it uses buddy system instead.
+
+## Usage
+
+To use buddy_system_allocator for global allocator:
+
+```rust
+use buddy_system_allocator::LockedHeap;
+
+#[global_allocator]
+static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
+```
+
+To init the allocator:
+
+```rust
+unsafe {
+ HEAP_ALLOCATOR.lock().init(heap_start, heap_size);
+ // or
+ HEAP_ALLOCATOR.lock().add_to_heap(heap_start, heap_end);
+}
+```
+
+You can also use `FrameAllocator` and `LockedHeapWithRescue`, see their documentation for usage.
+
+## Features
+
+- **`use_spin`** (default): Provide a `LockedHeap` type that implements the [`GlobalAlloc`] trait by using a spinlock.
+- **`const_fn`** (nightly only): Provide const fn version of `LockedHeapWithRescue::new`.
+
+[`GlobalAlloc`]: https://doc.rust-lang.org/nightly/core/alloc/trait.GlobalAlloc.html
+
+## License
+
+Some code comes from phil-opp's linked-list-allocator.
+
+Licensed under MIT License. Thanks phill-opp's linked-list-allocator for inspirations and interface.
+
+[crate-img]: https://img.shields.io/crates/v/buddy_system_allocator.svg
+[crate]: https://crates.io/crates/buddy_system_allocator
+[docs-img]: https://docs.rs/buddy_system_allocator/badge.svg
+[docs]: https://docs.rs/buddy_system_allocator
diff --git a/cargo2android.json b/cargo2android.json
new file mode 100644
index 0000000..b734ccf
--- /dev/null
+++ b/cargo2android.json
@@ -0,0 +1,12 @@
+{
+ "apex-available": [
+ "//apex_available:platform",
+ "com.android.virt"
+ ],
+ "dependencies": true,
+ "device": true,
+ "force-rlib": true,
+ "patch": "patches/Android.bp.patch",
+ "run": true,
+ "tests": true
+} \ No newline at end of file
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
new file mode 100644
index 0000000..e1df864
--- /dev/null
+++ b/patches/Android.bp.patch
@@ -0,0 +1,22 @@
+diff --git a/Android.bp b/Android.bp
+index 64d311b..bcae11e 100644
+--- a/Android.bp
++++ b/Android.bp
+@@ -22,7 +22,7 @@ rust_test {
+ "use_spin",
+ ],
+ rustlibs: [
+- "libspin",
++ "libspin_nostd",
+ ],
+ }
+
+@@ -40,7 +40,7 @@ rust_library_rlib {
+ "use_spin",
+ ],
+ rustlibs: [
+- "libspin",
++ "libspin_nostd",
+ ],
+ apex_available: [
+ "//apex_available:platform",
diff --git a/src/frame.rs b/src/frame.rs
new file mode 100644
index 0000000..27ab6bc
--- /dev/null
+++ b/src/frame.rs
@@ -0,0 +1,167 @@
+use super::prev_power_of_two;
+use alloc::collections::BTreeSet;
+use core::cmp::min;
+use core::ops::Range;
+
+#[cfg(feature = "use_spin")]
+use core::ops::Deref;
+#[cfg(feature = "use_spin")]
+use spin::Mutex;
+
+/// A frame allocator that uses buddy system,
+/// requiring a global allocator
+///
+/// # Usage
+///
+/// Create a frame allocator and add some frames to it:
+/// ```
+/// use buddy_system_allocator::*;
+/// let mut frame = FrameAllocator::new();
+/// assert!(frame.alloc(1).is_none());
+///
+/// frame.add_frame(0, 3);
+/// let num = frame.alloc(1);
+/// assert_eq!(num, Some(2));
+/// let num = frame.alloc(2);
+/// assert_eq!(num, Some(0));
+/// ```
+pub struct FrameAllocator {
+ // buddy system with max order of 32
+ free_list: [BTreeSet<usize>; 32],
+
+ // statistics
+ allocated: usize,
+ total: usize,
+}
+
+impl FrameAllocator {
+ /// Create an empty frame allocator
+ pub fn new() -> Self {
+ FrameAllocator {
+ free_list: Default::default(),
+ allocated: 0,
+ total: 0,
+ }
+ }
+
+ /// Add a range of frame number [start, end) to the allocator
+ pub fn add_frame(&mut self, start: usize, end: usize) {
+ assert!(start <= end);
+
+ let mut total = 0;
+ let mut current_start = start;
+
+ while current_start < end {
+ let lowbit = if current_start > 0 {
+ current_start & (!current_start + 1)
+ } else {
+ 32
+ };
+ let size = min(lowbit, prev_power_of_two(end - current_start));
+ total += size;
+
+ self.free_list[size.trailing_zeros() as usize].insert(current_start);
+ current_start += size;
+ }
+
+ self.total += total;
+ }
+
+ /// Add a range of frame to the allocator
+ pub fn insert(&mut self, range: Range<usize>) {
+ self.add_frame(range.start, range.end);
+ }
+
+ /// Alloc a range of frames from the allocator, return the first frame of the allocated range
+ pub fn alloc(&mut self, count: usize) -> Option<usize> {
+ let size = count.next_power_of_two();
+ let class = size.trailing_zeros() as usize;
+ for i in class..self.free_list.len() {
+ // Find the first non-empty size class
+ if !self.free_list[i].is_empty() {
+ // Split buffers
+ for j in (class + 1..i + 1).rev() {
+ if let Some(block_ref) = self.free_list[j].iter().next() {
+ let block = *block_ref;
+ self.free_list[j - 1].insert(block + (1 << (j - 1)));
+ self.free_list[j - 1].insert(block);
+ self.free_list[j].remove(&block);
+ } else {
+ return None;
+ }
+ }
+
+ let result = self.free_list[class].iter().next().clone();
+ if let Some(result_ref) = result {
+ let result = *result_ref;
+ self.free_list[class].remove(&result);
+ self.allocated += size;
+ return Some(result);
+ } else {
+ return None;
+ }
+ }
+ }
+ None
+ }
+
+ /// Dealloc a range of frames [frame, frame+count) from the frame allocator.
+ /// The range should be exactly the same when it was allocated, as in heap allocator
+ pub fn dealloc(&mut self, frame: usize, count: usize) {
+ let size = count.next_power_of_two();
+ let class = size.trailing_zeros() as usize;
+
+ // Merge free buddy lists
+ let mut current_ptr = frame;
+ let mut current_class = class;
+ while current_class < self.free_list.len() {
+ let buddy = current_ptr ^ (1 << current_class);
+ if self.free_list[current_class].remove(&buddy) == true {
+ // Free buddy found
+ current_ptr = min(current_ptr, buddy);
+ current_class += 1;
+ } else {
+ self.free_list[current_class].insert(current_ptr);
+ break;
+ }
+ }
+
+ self.allocated -= size;
+ }
+}
+
+/// A locked version of `FrameAllocator`
+///
+/// # Usage
+///
+/// Create a locked frame allocator and add frames to it:
+/// ```
+/// use buddy_system_allocator::*;
+/// let mut frame = LockedFrameAllocator::new();
+/// assert!(frame.lock().alloc(1).is_none());
+///
+/// frame.lock().add_frame(0, 3);
+/// let num = frame.lock().alloc(1);
+/// assert_eq!(num, Some(2));
+/// let num = frame.lock().alloc(2);
+/// assert_eq!(num, Some(0));
+/// ```
+#[cfg(feature = "use_spin")]
+pub struct LockedFrameAllocator(Mutex<FrameAllocator>);
+
+#[cfg(feature = "use_spin")]
+impl LockedFrameAllocator {
+ /// Creates an empty heap
+ pub fn new() -> LockedFrameAllocator {
+ LockedFrameAllocator(Mutex::new(FrameAllocator::new()))
+ }
+}
+
+#[cfg(feature = "use_spin")]
+impl Deref for LockedFrameAllocator {
+ type Target = Mutex<FrameAllocator>;
+
+ fn deref(&self) -> &Mutex<FrameAllocator> {
+ &self.0
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..1eefea4
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,342 @@
+#![cfg_attr(feature = "const_fn", feature(const_mut_refs, const_fn_fn_ptr_basics))]
+#![no_std]
+
+#[cfg(test)]
+#[macro_use]
+extern crate std;
+
+#[cfg(feature = "use_spin")]
+extern crate spin;
+
+extern crate alloc;
+
+use core::alloc::{GlobalAlloc, Layout};
+use core::cmp::{max, min};
+use core::fmt;
+use core::mem::size_of;
+#[cfg(feature = "use_spin")]
+use core::ops::Deref;
+use core::ptr::NonNull;
+#[cfg(feature = "use_spin")]
+use spin::Mutex;
+
+mod frame;
+pub mod linked_list;
+#[cfg(test)]
+mod test;
+
+pub use frame::*;
+
+/// A heap that uses buddy system with configurable order.
+///
+/// # Usage
+///
+/// Create a heap and add a memory region to it:
+/// ```
+/// use buddy_system_allocator::*;
+/// # use core::mem::size_of;
+/// let mut heap = Heap::<32>::empty();
+/// # let space: [usize; 100] = [0; 100];
+/// # let begin: usize = space.as_ptr() as usize;
+/// # let end: usize = begin + 100 * size_of::<usize>();
+/// # let size: usize = 100 * size_of::<usize>();
+/// unsafe {
+/// heap.init(begin, size);
+/// // or
+/// heap.add_to_heap(begin, end);
+/// }
+/// ```
+pub struct Heap<const ORDER: usize> {
+ // buddy system with max order of `ORDER`
+ free_list: [linked_list::LinkedList; ORDER],
+
+ // statistics
+ user: usize,
+ allocated: usize,
+ total: usize,
+}
+
+impl<const ORDER: usize> Heap<ORDER> {
+ /// Create an empty heap
+ pub const fn new() -> Self {
+ Heap {
+ free_list: [linked_list::LinkedList::new(); ORDER],
+ user: 0,
+ allocated: 0,
+ total: 0,
+ }
+ }
+
+ /// Create an empty heap
+ pub const fn empty() -> Self {
+ Self::new()
+ }
+
+ /// Add a range of memory [start, end) to the heap
+ pub unsafe fn add_to_heap(&mut self, mut start: usize, mut end: usize) {
+ // avoid unaligned access on some platforms
+ start = (start + size_of::<usize>() - 1) & (!size_of::<usize>() + 1);
+ end = end & (!size_of::<usize>() + 1);
+ assert!(start <= end);
+
+ let mut total = 0;
+ let mut current_start = start;
+
+ while current_start + size_of::<usize>() <= end {
+ let lowbit = current_start & (!current_start + 1);
+ let size = min(lowbit, prev_power_of_two(end - current_start));
+ total += size;
+
+ self.free_list[size.trailing_zeros() as usize].push(current_start as *mut usize);
+ current_start += size;
+ }
+
+ self.total += total;
+ }
+
+ /// Add a range of memory [start, end) to the heap
+ pub unsafe fn init(&mut self, start: usize, size: usize) {
+ self.add_to_heap(start, start + size);
+ }
+
+ /// Alloc a range of memory from the heap satifying `layout` requirements
+ pub fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, ()> {
+ let size = max(
+ layout.size().next_power_of_two(),
+ max(layout.align(), size_of::<usize>()),
+ );
+ let class = size.trailing_zeros() as usize;
+ for i in class..self.free_list.len() {
+ // Find the first non-empty size class
+ if !self.free_list[i].is_empty() {
+ // Split buffers
+ for j in (class + 1..i + 1).rev() {
+ if let Some(block) = self.free_list[j].pop() {
+ unsafe {
+ self.free_list[j - 1]
+ .push((block as usize + (1 << (j - 1))) as *mut usize);
+ self.free_list[j - 1].push(block);
+ }
+ } else {
+ return Err(());
+ }
+ }
+
+ let result = NonNull::new(
+ self.free_list[class]
+ .pop()
+ .expect("current block should have free space now")
+ as *mut u8,
+ );
+ if let Some(result) = result {
+ self.user += layout.size();
+ self.allocated += size;
+ return Ok(result);
+ } else {
+ return Err(());
+ }
+ }
+ }
+ Err(())
+ }
+
+ /// Dealloc a range of memory from the heap
+ pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
+ let size = max(
+ layout.size().next_power_of_two(),
+ max(layout.align(), size_of::<usize>()),
+ );
+ let class = size.trailing_zeros() as usize;
+
+ unsafe {
+ // Put back into free list
+ self.free_list[class].push(ptr.as_ptr() as *mut usize);
+
+ // Merge free buddy lists
+ let mut current_ptr = ptr.as_ptr() as usize;
+ let mut current_class = class;
+ while current_class < self.free_list.len() {
+ let buddy = current_ptr ^ (1 << current_class);
+ let mut flag = false;
+ for block in self.free_list[current_class].iter_mut() {
+ if block.value() as usize == buddy {
+ block.pop();
+ flag = true;
+ break;
+ }
+ }
+
+ // Free buddy found
+ if flag {
+ self.free_list[current_class].pop();
+ current_ptr = min(current_ptr, buddy);
+ current_class += 1;
+ self.free_list[current_class].push(current_ptr as *mut usize);
+ } else {
+ break;
+ }
+ }
+ }
+
+ self.user -= layout.size();
+ self.allocated -= size;
+ }
+
+ /// Return the number of bytes that user requests
+ pub fn stats_alloc_user(&self) -> usize {
+ self.user
+ }
+
+ /// Return the number of bytes that are actually allocated
+ pub fn stats_alloc_actual(&self) -> usize {
+ self.allocated
+ }
+
+ /// Return the total number of bytes in the heap
+ pub fn stats_total_bytes(&self) -> usize {
+ self.total
+ }
+}
+
+impl<const ORDER: usize> fmt::Debug for Heap<ORDER> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Heap")
+ .field("user", &self.user)
+ .field("allocated", &self.allocated)
+ .field("total", &self.total)
+ .finish()
+ }
+}
+
+/// A locked version of `Heap`
+///
+/// # Usage
+///
+/// Create a locked heap and add a memory region to it:
+/// ```
+/// use buddy_system_allocator::*;
+/// # use core::mem::size_of;
+/// let mut heap = LockedHeap::<32>::new();
+/// # let space: [usize; 100] = [0; 100];
+/// # let begin: usize = space.as_ptr() as usize;
+/// # let end: usize = begin + 100 * size_of::<usize>();
+/// # let size: usize = 100 * size_of::<usize>();
+/// unsafe {
+/// heap.lock().init(begin, size);
+/// // or
+/// heap.lock().add_to_heap(begin, end);
+/// }
+/// ```
+#[cfg(feature = "use_spin")]
+pub struct LockedHeap<const ORDER: usize>(Mutex<Heap<ORDER>>);
+
+#[cfg(feature = "use_spin")]
+impl<const ORDER: usize> LockedHeap<ORDER> {
+ /// Creates an empty heap
+ pub const fn new() -> Self {
+ LockedHeap(Mutex::new(Heap::<ORDER>::new()))
+ }
+
+ /// Creates an empty heap
+ pub const fn empty() -> Self {
+ LockedHeap(Mutex::new(Heap::<ORDER>::new()))
+ }
+}
+
+#[cfg(feature = "use_spin")]
+impl<const ORDER: usize> Deref for LockedHeap<ORDER> {
+ type Target = Mutex<Heap<ORDER>>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+#[cfg(feature = "use_spin")]
+unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeap<ORDER> {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ self.0
+ .lock()
+ .alloc(layout)
+ .ok()
+ .map_or(0 as *mut u8, |allocation| allocation.as_ptr())
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ self.0.lock().dealloc(NonNull::new_unchecked(ptr), layout)
+ }
+}
+
+/// A locked version of `Heap` with rescue before oom
+///
+/// # Usage
+///
+/// Create a locked heap:
+/// ```
+/// use buddy_system_allocator::*;
+/// let heap = LockedHeapWithRescue::new(|heap: &mut Heap<32>, layout: &core::alloc::Layout| {});
+/// ```
+///
+/// Before oom, the allocator will try to call rescue function and try for one more time.
+#[cfg(feature = "use_spin")]
+pub struct LockedHeapWithRescue<const ORDER: usize> {
+ inner: Mutex<Heap<ORDER>>,
+ rescue: fn(&mut Heap<ORDER>, &Layout),
+}
+
+#[cfg(feature = "use_spin")]
+impl<const ORDER: usize> LockedHeapWithRescue<ORDER> {
+ /// Creates an empty heap
+ #[cfg(feature = "const_fn")]
+ pub const fn new(rescue: fn(&mut Heap<ORDER>, &Layout)) -> Self {
+ LockedHeapWithRescue {
+ inner: Mutex::new(Heap::<ORDER>::new()),
+ rescue,
+ }
+ }
+
+ /// Creates an empty heap
+ #[cfg(not(feature = "const_fn"))]
+ pub fn new(rescue: fn(&mut Heap<ORDER>, &Layout)) -> Self {
+ LockedHeapWithRescue {
+ inner: Mutex::new(Heap::<ORDER>::new()),
+ rescue,
+ }
+ }
+}
+
+#[cfg(feature = "use_spin")]
+impl<const ORDER: usize> Deref for LockedHeapWithRescue<ORDER> {
+ type Target = Mutex<Heap<ORDER>>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+#[cfg(feature = "use_spin")]
+unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeapWithRescue<ORDER> {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let mut inner = self.inner.lock();
+ match inner.alloc(layout) {
+ Ok(allocation) => allocation.as_ptr(),
+ Err(_) => {
+ (self.rescue)(&mut inner, &layout);
+ inner
+ .alloc(layout)
+ .ok()
+ .map_or(0 as *mut u8, |allocation| allocation.as_ptr())
+ }
+ }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ self.inner
+ .lock()
+ .dealloc(NonNull::new_unchecked(ptr), layout)
+ }
+}
+
+pub(crate) fn prev_power_of_two(num: usize) -> usize {
+ 1 << (8 * (size_of::<usize>()) - num.leading_zeros() as usize - 1)
+}
diff --git a/src/linked_list.rs b/src/linked_list.rs
new file mode 100644
index 0000000..8d649ce
--- /dev/null
+++ b/src/linked_list.rs
@@ -0,0 +1,141 @@
+//! Provide the intrusive LinkedList
+#![allow(dead_code)]
+
+use core::{fmt, ptr};
+
+/// An intrusive linked list
+///
+/// A clean room implementation of the one used in CS140e 2018 Winter
+///
+/// Thanks Sergio Benitez for his excellent work,
+/// See [CS140e](https://cs140e.sergio.bz/) for more information
+#[derive(Copy, Clone)]
+pub struct LinkedList {
+ head: *mut usize,
+}
+
+unsafe impl Send for LinkedList {}
+
+impl LinkedList {
+ /// Create a new LinkedList
+ pub const fn new() -> LinkedList {
+ LinkedList {
+ head: ptr::null_mut(),
+ }
+ }
+
+ /// Return `true` if the list is empty
+ pub fn is_empty(&self) -> bool {
+ self.head.is_null()
+ }
+
+ /// Push `item` to the front of the list
+ pub unsafe fn push(&mut self, item: *mut usize) {
+ *item = self.head as usize;
+ self.head = item;
+ }
+
+ /// Try to remove the first item in the list
+ pub fn pop(&mut self) -> Option<*mut usize> {
+ match self.is_empty() {
+ true => None,
+ false => {
+ // Advance head pointer
+ let item = self.head;
+ self.head = unsafe { *item as *mut usize };
+ Some(item)
+ }
+ }
+ }
+
+ /// Return an iterator over the items in the list
+ pub fn iter(&self) -> Iter {
+ Iter {
+ curr: self.head,
+ list: self,
+ }
+ }
+
+ /// Return an mutable iterator over the items in the list
+ pub fn iter_mut(&mut self) -> IterMut {
+ IterMut {
+ prev: &mut self.head as *mut *mut usize as *mut usize,
+ curr: self.head,
+ list: self,
+ }
+ }
+}
+
+impl fmt::Debug for LinkedList {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+/// An iterator over the linked list
+pub struct Iter<'a> {
+ curr: *mut usize,
+ list: &'a LinkedList,
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = *mut usize;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.curr.is_null() {
+ None
+ } else {
+ let item = self.curr;
+ let next = unsafe { *item as *mut usize };
+ self.curr = next;
+ Some(item)
+ }
+ }
+}
+
+/// Represent a mutable node in `LinkedList`
+pub struct ListNode {
+ prev: *mut usize,
+ curr: *mut usize,
+}
+
+impl ListNode {
+ /// Remove the node from the list
+ pub fn pop(self) -> *mut usize {
+ // Skip the current one
+ unsafe {
+ *(self.prev) = *(self.curr);
+ }
+ self.curr
+ }
+
+ /// Returns the pointed address
+ pub fn value(&self) -> *mut usize {
+ self.curr
+ }
+}
+
+/// A mutable iterator over the linked list
+pub struct IterMut<'a> {
+ list: &'a mut LinkedList,
+ prev: *mut usize,
+ curr: *mut usize,
+}
+
+impl<'a> Iterator for IterMut<'a> {
+ type Item = ListNode;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.curr.is_null() {
+ None
+ } else {
+ let res = ListNode {
+ prev: self.prev,
+ curr: self.curr,
+ };
+ self.prev = self.curr;
+ self.curr = unsafe { *self.curr as *mut usize };
+ Some(res)
+ }
+ }
+}
diff --git a/src/test.rs b/src/test.rs
new file mode 100644
index 0000000..acf25d8
--- /dev/null
+++ b/src/test.rs
@@ -0,0 +1,147 @@
+use crate::linked_list;
+use crate::FrameAllocator;
+use crate::Heap;
+use crate::LockedHeapWithRescue;
+use core::alloc::GlobalAlloc;
+use core::alloc::Layout;
+use core::mem::size_of;
+
+#[test]
+fn test_linked_list() {
+ let mut value1: usize = 0;
+ let mut value2: usize = 0;
+ let mut value3: usize = 0;
+ let mut list = linked_list::LinkedList::new();
+ unsafe {
+ list.push(&mut value1 as *mut usize);
+ list.push(&mut value2 as *mut usize);
+ list.push(&mut value3 as *mut usize);
+ }
+
+ // Test links
+ assert_eq!(value3, &value2 as *const usize as usize);
+ assert_eq!(value2, &value1 as *const usize as usize);
+ assert_eq!(value1, 0);
+
+ // Test iter
+ let mut iter = list.iter();
+ assert_eq!(iter.next(), Some(&mut value3 as *mut usize));
+ assert_eq!(iter.next(), Some(&mut value2 as *mut usize));
+ assert_eq!(iter.next(), Some(&mut value1 as *mut usize));
+ assert_eq!(iter.next(), None);
+
+ // Test iter_mut
+
+ let mut iter_mut = list.iter_mut();
+ assert_eq!(iter_mut.next().unwrap().pop(), &mut value3 as *mut usize);
+
+ // Test pop
+ assert_eq!(list.pop(), Some(&mut value2 as *mut usize));
+ assert_eq!(list.pop(), Some(&mut value1 as *mut usize));
+ assert_eq!(list.pop(), None);
+}
+
+#[test]
+fn test_empty_heap() {
+ let mut heap = Heap::<32>::new();
+ assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err());
+}
+
+#[test]
+fn test_heap_add() {
+ let mut heap = Heap::<32>::new();
+ assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err());
+
+ let space: [usize; 100] = [0; 100];
+ unsafe {
+ heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize);
+ }
+ let addr = heap.alloc(Layout::from_size_align(1, 1).unwrap());
+ assert!(addr.is_ok());
+}
+
+#[test]
+fn test_heap_oom() {
+ let mut heap = Heap::<32>::new();
+ let space: [usize; 100] = [0; 100];
+ unsafe {
+ heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize);
+ }
+
+ assert!(heap
+ .alloc(Layout::from_size_align(100 * size_of::<usize>(), 1).unwrap())
+ .is_err());
+ assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_ok());
+}
+
+#[test]
+fn test_heap_oom_rescue() {
+ static mut SPACE: [usize; 100] = [0; 100];
+ let heap = LockedHeapWithRescue::new(|heap: &mut Heap<32>, _layout: &Layout| unsafe {
+ heap.add_to_heap(SPACE.as_ptr() as usize, SPACE.as_ptr().add(100) as usize);
+ });
+
+ unsafe {
+ assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()) as usize != 0);
+ }
+}
+
+#[test]
+fn test_heap_alloc_and_free() {
+ let mut heap = Heap::<32>::new();
+ assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err());
+
+ let space: [usize; 100] = [0; 100];
+ unsafe {
+ heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize);
+ }
+ for _ in 0..100 {
+ let addr = heap.alloc(Layout::from_size_align(1, 1).unwrap()).unwrap();
+ heap.dealloc(addr, Layout::from_size_align(1, 1).unwrap());
+ }
+}
+
+#[test]
+fn test_empty_frame_allocator() {
+ let mut frame = FrameAllocator::new();
+ assert!(frame.alloc(1).is_none());
+}
+
+#[test]
+fn test_frame_allocator_add() {
+ let mut frame = FrameAllocator::new();
+ assert!(frame.alloc(1).is_none());
+
+ frame.insert(0..3);
+ let num = frame.alloc(1);
+ assert_eq!(num, Some(2));
+ let num = frame.alloc(2);
+ assert_eq!(num, Some(0));
+ assert!(frame.alloc(1).is_none());
+ assert!(frame.alloc(2).is_none());
+}
+
+#[test]
+fn test_frame_allocator_alloc_and_free() {
+ let mut frame = FrameAllocator::new();
+ assert!(frame.alloc(1).is_none());
+
+ frame.add_frame(0, 1024);
+ for _ in 0..100 {
+ let addr = frame.alloc(512).unwrap();
+ frame.dealloc(addr, 512);
+ }
+}
+
+#[test]
+fn test_frame_allocator_alloc_and_free_complex() {
+ let mut frame = FrameAllocator::new();
+ frame.add_frame(100, 1024);
+ for _ in 0..10 {
+ let addr = frame.alloc(1).unwrap();
+ frame.dealloc(addr, 1);
+ }
+ let addr1 = frame.alloc(1).unwrap();
+ let addr2 = frame.alloc(1).unwrap();
+ assert_ne!(addr1, addr2);
+}