summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYecheng Zhao <zyecheng@google.com>2024-04-16 23:27:16 +0000
committerYecheng Zhao <zyecheng@google.com>2024-04-22 19:05:49 +0000
commit276f9de6b1b1d83013360ee3ff5c9f442be164cb (patch)
treebdcfa9616caa5216f6c4a04b61812fb30d5cf73c
parented983f7e39d46f01872ebca8a75a27db4732a8e8 (diff)
downloadlibbootloader-276f9de6b1b1d83013360ee3ff5c9f442be164cb.tar.gz
Flush cache by VA on aarch64
On aarch64, GBL flushes data cache by all sets/ways. This is only valid however, during core power-up/power-down sequence and does not guarantee to work outside of it, i.e. in smp mode. The more reliable approach is to flush by VA, i.e. "dc civac", which guarantees reaching PoC. The CL switches to use `flush_dcache_range` from the ATF library and explicitly flushes only the image buffers needed for kernel booting. The rest of the memory, in particular the stack, is left stale and the kernel jump logic is reworked to only use register values loaded before disabling MMU/cache. Bug: 316349883 Change-Id: I6bc67c562bd3a384879d23bd11344d03d9ee2379
-rw-r--r--gbl/efi/src/android_boot.rs2
-rw-r--r--gbl/efi/src/fuchsia_boot.rs10
-rw-r--r--gbl/efi/src/utils.rs2
-rw-r--r--gbl/libboot/aarch64_cache_helper/disable_cache_mmu.S73
-rw-r--r--gbl/libboot/src/aarch64.rs91
5 files changed, 103 insertions, 75 deletions
diff --git a/gbl/efi/src/android_boot.rs b/gbl/efi/src/android_boot.rs
index ad766f8..5576a34 100644
--- a/gbl/efi/src/android_boot.rs
+++ b/gbl/efi/src/android_boot.rs
@@ -371,7 +371,7 @@ pub fn android_boot_demo(entry: EfiEntry) -> Result<()> {
{
let _ = exit_boot_services(entry, remains)?;
// SAFETY: We currently targets at Cuttlefish emulator where images are provided valid.
- unsafe { boot::aarch64::jump_linux_el2_or_lower(kernel, fdt) };
+ unsafe { boot::aarch64::jump_linux_el2_or_lower(kernel, ramdisk, fdt) };
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
diff --git a/gbl/efi/src/fuchsia_boot.rs b/gbl/efi/src/fuchsia_boot.rs
index 0109241..79d91c0 100644
--- a/gbl/efi/src/fuchsia_boot.rs
+++ b/gbl/efi/src/fuchsia_boot.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use crate::error::{EfiAppError, Result};
-use crate::utils::{aligned_subslice, find_gpt_devices, get_efi_fdt, usize_add};
+use crate::utils::{aligned_subslice, find_gpt_devices, get_efi_fdt, to_usize, usize_add};
use core::fmt::Write;
use core::mem::size_of;
use efi::{efi_print, efi_println, EfiEntry};
@@ -36,7 +36,7 @@ struct ZbiKernelHeader {
// https://fuchsia.googlesource.com/fuchsia/+/4f204d8a0243e84a86af4c527a8edcc1ace1615f/zircon/kernel/target/arm64/boot-shim/BUILD.gn#38
const ZIRCON_KERNEL_ALIGN: usize = 64 * 1024;
-/// Relocates a ZBI kernel to a different buffer and returns the kernel entry address.
+/// Relocates a ZBI kernel to a different buffer and returns the kernel entry offset.
pub fn relocate_kernel(kernel: &[u8], dest: &mut [u8]) -> Result<usize> {
if (dest.as_ptr() as usize % ZIRCON_KERNEL_ALIGN) != 0 {
return Err(EfiAppError::BufferAlignment.into());
@@ -60,7 +60,7 @@ pub fn relocate_kernel(kernel: &[u8], dest: &mut [u8]) -> Result<usize> {
dest_kernel_header.container_header.length = (kernel_size - size_of::<ZbiHeader>())
.try_into()
.map_err(|_| EfiAppError::ArithmeticOverflow)?;
- Ok(usize_add(dest_kernel_header.entry, dest.as_ptr() as usize)?)
+ Ok(to_usize(dest_kernel_header.entry)?)
}
/// A helper for getting the total size of a ZBI container, including payload and header.
@@ -79,7 +79,7 @@ fn zbi_get_unused_buffer(zbi: &mut [u8]) -> Result<(&mut [u8], &mut [u8])> {
/// Relocate a ZBI kernel to the trailing unused buffer.
///
-/// Returns the original kernel subslice, relocated kernel subslice, and kernel entry address.
+/// Returns the original kernel subslice, relocated kernel subslice, and kernel entry offset.
fn relocate_to_tail(kernel: &mut [u8]) -> Result<(&mut [u8], &mut [u8], usize)> {
let (original, relocated) = zbi_get_unused_buffer(kernel)?;
let relocated = aligned_subslice(relocated, ZIRCON_KERNEL_ALIGN)?;
@@ -202,7 +202,7 @@ pub fn fuchsia_boot_demo(efi_entry: EfiEntry) -> Result<()> {
let (_, remains) = zbi_get_unused_buffer(relocated)?;
let _ = efi::exit_boot_services(efi_entry, remains).unwrap();
// SAFETY: For demo, we assume images are provided valid.
- unsafe { boot::aarch64::jump_zircon_el2_or_lower(kernel_entry, original) };
+ unsafe { boot::aarch64::jump_zircon_el2_or_lower(relocated, kernel_entry, original) };
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
diff --git a/gbl/efi/src/utils.rs b/gbl/efi/src/utils.rs
index fca175b..386dfcc 100644
--- a/gbl/efi/src/utils.rs
+++ b/gbl/efi/src/utils.rs
@@ -34,7 +34,7 @@ pub const EFI_DTB_TABLE_GUID: EfiGuid =
EfiGuid::new(0xb1b621d5, 0xf19c, 0x41a5, [0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0]);
/// Checks and converts an integer into usize
-fn to_usize<T: TryInto<usize>>(val: T) -> Result<usize> {
+pub fn to_usize<T: TryInto<usize>>(val: T) -> Result<usize> {
Ok(val.try_into().map_err(|_| EfiAppError::ArithmeticOverflow)?)
}
diff --git a/gbl/libboot/aarch64_cache_helper/disable_cache_mmu.S b/gbl/libboot/aarch64_cache_helper/disable_cache_mmu.S
index 8c602aa..da4ce8d 100644
--- a/gbl/libboot/aarch64_cache_helper/disable_cache_mmu.S
+++ b/gbl/libboot/aarch64_cache_helper/disable_cache_mmu.S
@@ -16,48 +16,53 @@
#include <arch.h>
-.global disable_cache_mmu
+.global disable_cache_mmu_and_jump
-/*
- * Cache flush and Cache/MMU disable should be done in order atomically to make sure RAM is up to
- * date.
+/* ---------------------------------------------------------------
+ * Disable cache, MMU and jump to the given address with arguments
+ *
+ * x0 - x3: Arguments for the target jump address.
+ * x4: The jump address.
+ * ---------------------------------------------------------------
*/
-disable_cache_mmu:
- stp x29, x30, [sp, #-0x10]!
+disable_cache_mmu_and_jump:
+ // Query current EL
+ mrs x5, CurrentEL
+ cmp x5, #(1 << 3)
+ beq asm_disable_cache_mmu_el2
+
/*
- * Call helper function "dcsw_op_all(DCCISW)" from ATF library to flush all cache.
+ * Invalidate instruction cache before disabling it.
*/
- mov x0, #DCCISW
- bl dcsw_op_all
ic iallu
isb
- // Query current EL
- mrs x0, CurrentEL
- cmp x0, #(1 << 3)
- beq disable_cache_mmu_el2
-
-disable_cache_mmu_el1:
- mrs x1, sctlr_el1
- bic x1, x1, #SCTLR_M_BIT
- bic x1, x1, #SCTLR_C_BIT
- bic x1, x1, #SCTLR_I_BIT
- msr sctlr_el1, x1
- b finish
-
-disable_cache_mmu_el2:
- mrs x1, sctlr_el2
- bic x1, x1, #SCTLR_M_BIT
- bic x1, x1, #SCTLR_C_BIT
- bic x1, x1, #SCTLR_I_BIT
- msr sctlr_el2, x1
-
-finish:
- mov x0, #DCCISW
- bl dcsw_op_all
+asm_disable_cache_mmu_el1:
+ mrs x5, sctlr_el1
+ bic x5, x5, #SCTLR_M_BIT
+ bic x5, x5, #SCTLR_C_BIT
+ bic x5, x5, #SCTLR_I_BIT
+ msr sctlr_el1, x5
+ b asm_finish
+
+asm_disable_cache_mmu_el2:
+ mrs x5, sctlr_el2
+ bic x5, x5, #SCTLR_M_BIT
+ bic x5, x5, #SCTLR_C_BIT
+ bic x5, x5, #SCTLR_I_BIT
+ msr sctlr_el2, x5
+
+asm_finish:
ic iallu
isb
+ /*
+ * Invalidate TLB.
+ */
tlbi vmalle1
- ldp x29, x30, [sp], #0x10
- ret
+ br x4
+ /*
+ * Prevent speculative execution.
+ */
+ dsb nsh
+ isb
diff --git a/gbl/libboot/src/aarch64.rs b/gbl/libboot/src/aarch64.rs
index 6b84fda..3c4f52c 100644
--- a/gbl/libboot/src/aarch64.rs
+++ b/gbl/libboot/src/aarch64.rs
@@ -47,8 +47,45 @@ pub fn current_el() -> ExceptionLevel {
}
extern "C" {
- /// Clean and invalidate data cache and disable data/instruction cache and MMU.
- fn disable_cache_mmu();
+ /// Clean and invalidate data cache by address range. The function is from ATF library.
+ fn flush_dcache_range(addr: usize, len: usize);
+}
+
+/// Flush all data cache for the given buffer.
+fn flush_dcache_buffer(buf: &[u8]) {
+ unsafe { flush_dcache_range(buf.as_ptr() as usize, buf.len()) }
+ // SAFETY: Assembly code for instruction synchronization.
+ unsafe { asm!("isb") };
+}
+
+/// Disable cache, MMU and jump to the given kernel address with arguments.
+///
+/// # Args
+///
+/// * `addr`: Address to jump.
+/// * `arg[0-3]`: Arguments for the target jump address.
+///
+/// # Safety
+///
+/// * Caller must ensure that `addr` contains valid execution code.
+/// * Caller must ensure to flush any data cache for memory regions that contain data to be accessed
+/// by the destination code, including the execution code itself at address `addr`
+unsafe fn jump_kernel(addr: usize, arg0: usize, arg1: usize, arg2: usize, arg3: usize) -> ! {
+ // TODO(b/334962949): Disable other stuffs such as interrupt, async abort, branch prediction etc.
+
+ // After disabling MMU and cache, memory regions that have unflushed cache are stale and cannot
+ // be trusted, including stack memory. Therefore all needed data including local variables must
+ // be ensured to be loaded to registers first. `disable_cache_mmu_and_jump` only operates on
+ // registers and does not access stack or any other memory.
+ asm!(
+ "b disable_cache_mmu_and_jump",
+ in("x0") arg0,
+ in("x1") arg1,
+ in("x2") arg2,
+ in("x3") arg3,
+ in("x4") addr,
+ );
+ unreachable!();
}
/// Boots a Linux kernel in mode EL2 or lower with the given FDT blob.
@@ -56,49 +93,35 @@ extern "C" {
/// # Safety
///
/// Caller must ensure that `kernel` contains a valid Linux kernel.
-pub unsafe fn jump_linux_el2_or_lower(kernel: &[u8], fdt: &[u8]) -> ! {
+pub unsafe fn jump_linux_el2_or_lower(kernel: &[u8], ramdisk: &[u8], fdt: &[u8]) -> ! {
assert_ne!(current_el(), ExceptionLevel::EL3);
// The following is sufficient to work for existing use cases such as Cuttlefish. But there are
// additional initializations listed
// https://www.kernel.org/doc/html/v5.11/arm64/booting.html that may need to be performed
// explicitly for other platforms.
- // SAFETY: The function only flushes/disable cache and MMU.
- unsafe { disable_cache_mmu() };
- // SAFETY: By safety requirement of this function, `kernel` contains a valid Linux kernel.
- unsafe {
- asm!(
- "mov x1, 0",
- "mov x2, 0",
- "mov x3, 0",
- "br x4",
- in("x4") kernel.as_ptr() as usize,
- in("x0") fdt.as_ptr() as usize,
- );
- }
- unreachable!();
+ flush_dcache_buffer(kernel);
+ flush_dcache_buffer(ramdisk);
+ flush_dcache_buffer(fdt);
+ // SAFETY:
+ // * `kernel`, `ramdisk` and `fdt` have been flushed.
+ // * By requirement of this function, `kernel` is a valid kernel entry point.
+ unsafe { jump_kernel(kernel.as_ptr() as _, fdt.as_ptr() as _, 0, 0, 0) };
}
/// Boots a ZBI kernel in mode EL2 or lower with the given ZBI blob.
///
/// # Safety
///
-/// Caller must ensure that address at `kernel_entry` contains a valid zircon kernel.
-pub unsafe fn jump_zircon_el2_or_lower(kernel_entry: usize, zbi: &[u8]) -> ! {
+/// Caller must ensure that `zbi_kernel` contains a valid zircon kernel ZBI item and `entry_off` is
+/// the correct kernel entry offset.
+pub unsafe fn jump_zircon_el2_or_lower(zbi_kernel: &[u8], entry_off: usize, zbi_item: &[u8]) -> ! {
assert_ne!(current_el(), ExceptionLevel::EL3);
- // SAFETY: The function only flushes/disable cache and MMU.
- unsafe { disable_cache_mmu() };
- // SAFETY: By safety requirement of this function, `kernel` contains a valid zircon kernel.
- unsafe {
- asm!(
- "mov x1, 0",
- "mov x2, 0",
- "mov x3, 0",
- "br x4",
- in("x4") kernel_entry,
- in("x0") zbi.as_ptr() as usize,
- );
- }
-
- unreachable!();
+ flush_dcache_buffer(zbi_kernel);
+ flush_dcache_buffer(zbi_item);
+ let addr = (zbi_kernel.as_ptr() as usize).checked_add(entry_off).unwrap();
+ // SAFETY:
+ // * `zbi_kernel` and `zbi_item` have been flushed.
+ // * By requirement of this function, the computed `addr` is a valid kernel entry point.
+ unsafe { jump_kernel(addr, zbi_item.as_ptr() as _, 0, 0, 0) };
}