summaryrefslogtreecommitdiff
path: root/gbl/libboot/src/aarch64.rs
diff options
context:
space:
mode:
Diffstat (limited to 'gbl/libboot/src/aarch64.rs')
-rw-r--r--gbl/libboot/src/aarch64.rs91
1 files changed, 57 insertions, 34 deletions
diff --git a/gbl/libboot/src/aarch64.rs b/gbl/libboot/src/aarch64.rs
index 6b84fda..3c4f52c 100644
--- a/gbl/libboot/src/aarch64.rs
+++ b/gbl/libboot/src/aarch64.rs
@@ -47,8 +47,45 @@ pub fn current_el() -> ExceptionLevel {
}
extern "C" {
- /// Clean and invalidate data cache and disable data/instruction cache and MMU.
- fn disable_cache_mmu();
+ /// Clean and invalidate data cache by address range. The function is from ATF library.
+ fn flush_dcache_range(addr: usize, len: usize);
+}
+
+/// Flush all data cache for the given buffer.
+fn flush_dcache_buffer(buf: &[u8]) {
+ unsafe { flush_dcache_range(buf.as_ptr() as usize, buf.len()) }
+ // SAFETY: Assembly code for instruction synchronization.
+ unsafe { asm!("isb") };
+}
+
+/// Disable cache, MMU and jump to the given kernel address with arguments.
+///
+/// # Args
+///
+/// * `addr`: Address to jump.
+/// * `arg[0-3]`: Arguments for the target jump address.
+///
+/// # Safety
+///
+/// * Caller must ensure that `addr` contains valid execution code.
+/// * Caller must ensure to flush any data cache for memory regions that contain data to be accessed
+/// by the destination code, including the execution code itself at address `addr`
+unsafe fn jump_kernel(addr: usize, arg0: usize, arg1: usize, arg2: usize, arg3: usize) -> ! {
+ // TODO(b/334962949): Disable other stuffs such as interrupt, async abort, branch prediction etc.
+
+ // After disabling MMU and cache, memory regions that have unflushed cache are stale and cannot
+ // be trusted, including stack memory. Therefore all needed data including local variables must
+ // be ensured to be loaded to registers first. `disable_cache_mmu_and_jump` only operates on
+ // registers and does not access stack or any other memory.
+ asm!(
+ "b disable_cache_mmu_and_jump",
+ in("x0") arg0,
+ in("x1") arg1,
+ in("x2") arg2,
+ in("x3") arg3,
+ in("x4") addr,
+ );
+ unreachable!();
}
/// Boots a Linux kernel in mode EL2 or lower with the given FDT blob.
@@ -56,49 +93,35 @@ extern "C" {
/// # Safety
///
/// Caller must ensure that `kernel` contains a valid Linux kernel.
-pub unsafe fn jump_linux_el2_or_lower(kernel: &[u8], fdt: &[u8]) -> ! {
+pub unsafe fn jump_linux_el2_or_lower(kernel: &[u8], ramdisk: &[u8], fdt: &[u8]) -> ! {
assert_ne!(current_el(), ExceptionLevel::EL3);
// The following is sufficient to work for existing use cases such as Cuttlefish. But there are
// additional initializations listed
// https://www.kernel.org/doc/html/v5.11/arm64/booting.html that may need to be performed
// explicitly for other platforms.
- // SAFETY: The function only flushes/disable cache and MMU.
- unsafe { disable_cache_mmu() };
- // SAFETY: By safety requirement of this function, `kernel` contains a valid Linux kernel.
- unsafe {
- asm!(
- "mov x1, 0",
- "mov x2, 0",
- "mov x3, 0",
- "br x4",
- in("x4") kernel.as_ptr() as usize,
- in("x0") fdt.as_ptr() as usize,
- );
- }
- unreachable!();
+ flush_dcache_buffer(kernel);
+ flush_dcache_buffer(ramdisk);
+ flush_dcache_buffer(fdt);
+ // SAFETY:
+ // * `kernel`, `ramdisk` and `fdt` have been flushed.
+ // * By requirement of this function, `kernel` is a valid kernel entry point.
+ unsafe { jump_kernel(kernel.as_ptr() as _, fdt.as_ptr() as _, 0, 0, 0) };
}
/// Boots a ZBI kernel in mode EL2 or lower with the given ZBI blob.
///
/// # Safety
///
-/// Caller must ensure that address at `kernel_entry` contains a valid zircon kernel.
-pub unsafe fn jump_zircon_el2_or_lower(kernel_entry: usize, zbi: &[u8]) -> ! {
+/// Caller must ensure that `zbi_kernel` contains a valid zircon kernel ZBI item and `entry_off` is
+/// the correct kernel entry offset.
+pub unsafe fn jump_zircon_el2_or_lower(zbi_kernel: &[u8], entry_off: usize, zbi_item: &[u8]) -> ! {
assert_ne!(current_el(), ExceptionLevel::EL3);
- // SAFETY: The function only flushes/disable cache and MMU.
- unsafe { disable_cache_mmu() };
- // SAFETY: By safety requirement of this function, `kernel` contains a valid zircon kernel.
- unsafe {
- asm!(
- "mov x1, 0",
- "mov x2, 0",
- "mov x3, 0",
- "br x4",
- in("x4") kernel_entry,
- in("x0") zbi.as_ptr() as usize,
- );
- }
-
- unreachable!();
+ flush_dcache_buffer(zbi_kernel);
+ flush_dcache_buffer(zbi_item);
+ let addr = (zbi_kernel.as_ptr() as usize).checked_add(entry_off).unwrap();
+ // SAFETY:
+ // * `zbi_kernel` and `zbi_item` have been flushed.
+ // * By requirement of this function, the computed `addr` is a valid kernel entry point.
+ unsafe { jump_kernel(addr, zbi_item.as_ptr() as _, 0, 0, 0) };
}