summaryrefslogtreecommitdiff
path: root/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mmu/mali_kbase_mmu_hw_direct.c')
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw_direct.c609
1 files changed, 498 insertions, 111 deletions
diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
index cdf9a84..d5411bd 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,15 +24,40 @@
#include <mali_kbase.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_mem.h>
+#include <mali_kbase_reset_gpu.h>
#include <mmu/mali_kbase_mmu_hw.h>
#include <tl/mali_kbase_tracepoints.h>
+#include <linux/delay.h>
+
+#if MALI_USE_CSF
+/**
+ * mmu_has_flush_skip_pgd_levels() - Check if the GPU has the feature
+ * AS_LOCKADDR_FLUSH_SKIP_LEVELS
+ *
+ * @gpu_props: GPU properties for the GPU instance.
+ *
+ * This function returns whether a cache flush can apply the skip flags of
+ * AS_LOCKADDR_FLUSH_SKIP_LEVELS.
+ *
+ * Return: True if cache flush has the said feature.
+ */
+static bool mmu_has_flush_skip_pgd_levels(struct kbase_gpu_props const *gpu_props)
+{
+ u32 const signature =
+ gpu_props->props.raw_props.gpu_id & (GPU_ID2_ARCH_MAJOR | GPU_ID2_ARCH_REV);
+
+ return signature >= (u32)GPU_ID2_PRODUCT_MAKE(12, 0, 4, 0);
+}
+#endif
/**
* lock_region() - Generate lockaddr to lock memory region in MMU
- * @gpu_props: GPU properties for finding the MMU lock region size
- * @pfn: Starting page frame number of the region to lock
- * @num_pages: Number of pages to lock. It must be greater than 0.
- * @lockaddr: Address and size of memory region to lock
+ *
+ * @gpu_props: GPU properties for finding the MMU lock region size.
+ * @lockaddr: Address and size of memory region to lock.
+ * @op_param: Pointer to a struct containing the starting page frame number of
+ * the region to lock, the number of pages to lock and page table
+ * levels to skip when flushing (if supported).
*
* The lockaddr value is a combination of the starting address and
* the size of the region that encompasses all the memory pages to lock.
@@ -63,14 +88,14 @@
*
* Return: 0 if success, or an error code on failure.
*/
-static int lock_region(struct kbase_gpu_props const *gpu_props, u64 pfn, u32 num_pages,
- u64 *lockaddr)
+static int lock_region(struct kbase_gpu_props const *gpu_props, u64 *lockaddr,
+ const struct kbase_mmu_hw_op_param *op_param)
{
- const u64 lockaddr_base = pfn << PAGE_SHIFT;
- const u64 lockaddr_end = ((pfn + num_pages) << PAGE_SHIFT) - 1;
+ const u64 lockaddr_base = op_param->vpfn << PAGE_SHIFT;
+ const u64 lockaddr_end = ((op_param->vpfn + op_param->nr) << PAGE_SHIFT) - 1;
u64 lockaddr_size_log2;
- if (num_pages == 0)
+ if (op_param->nr == 0)
return -EINVAL;
/* The MMU lock region is a self-aligned region whose size
@@ -101,7 +126,7 @@ static int lock_region(struct kbase_gpu_props const *gpu_props, u64 pfn, u32 num
* therefore the highest bit that differs is bit #16
* and the region size (as a logarithm) is 16 + 1 = 17, i.e. 128 kB.
*/
- lockaddr_size_log2 = fls(lockaddr_base ^ lockaddr_end);
+ lockaddr_size_log2 = fls64(lockaddr_base ^ lockaddr_end);
/* Cap the size against minimum and maximum values allowed. */
if (lockaddr_size_log2 > KBASE_LOCK_REGION_MAX_SIZE_LOG2)
@@ -123,40 +148,69 @@ static int lock_region(struct kbase_gpu_props const *gpu_props, u64 pfn, u32 num
*lockaddr = lockaddr_base & ~((1ull << lockaddr_size_log2) - 1);
*lockaddr |= lockaddr_size_log2 - 1;
+#if MALI_USE_CSF
+ if (mmu_has_flush_skip_pgd_levels(gpu_props))
+ *lockaddr =
+ AS_LOCKADDR_FLUSH_SKIP_LEVELS_SET(*lockaddr, op_param->flush_skip_levels);
+#endif
+
return 0;
}
-static int wait_ready(struct kbase_device *kbdev,
- unsigned int as_nr)
+/**
+ * wait_ready() - Wait for previously issued MMU command to complete.
+ *
+ * @kbdev: Kbase device to wait for a MMU command to complete.
+ * @as_nr: Address space to wait for a MMU command to complete.
+ *
+ * Reset GPU if the wait for previously issued command fails.
+ *
+ * Return: 0 on successful completion. negative error on failure.
+ */
+static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr)
{
- unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+ const ktime_t wait_loop_start = ktime_get_raw();
+ const u32 mmu_as_inactive_wait_time_ms = kbdev->mmu_or_gpu_cache_op_wait_time_ms;
+ s64 diff;
- /* Wait for the MMU status to indicate there is no active command. */
- while (--max_loops &&
- kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS)) &
- AS_STATUS_AS_ACTIVE) {
- ;
- }
+ if (unlikely(kbdev->mmu_unresponsive))
+ return -EBUSY;
- if (WARN_ON_ONCE(max_loops == 0)) {
- dev_err(kbdev->dev,
- "AS_ACTIVE bit stuck for as %u, might be caused by slow/unstable GPU clock or possible faulty FPGA connector",
- as_nr);
- return -1;
- }
+ do {
+ unsigned int i;
- return 0;
+ for (i = 0; i < 1000; i++) {
+ /* Wait for the MMU status to indicate there is no active command */
+ if (!(kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_STATUS))) &
+ AS_STATUS_AS_ACTIVE))
+ return 0;
+ }
+
+ diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
+ } while (diff < mmu_as_inactive_wait_time_ms);
+
+ dev_err(kbdev->dev,
+ "AS_ACTIVE bit stuck for as %u. Might be caused by unstable GPU clk/pwr or faulty system",
+ as_nr);
+ kbdev->mmu_unresponsive = true;
+ if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+ kbase_reset_gpu_locked(kbdev);
+
+ return -ETIMEDOUT;
}
static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
{
- int status;
-
/* write AS_COMMAND when MMU is ready to accept another command */
- status = wait_ready(kbdev, as_nr);
- if (status == 0)
- kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
- else {
+ const int status = wait_ready(kbdev, as_nr);
+
+ if (likely(status == 0))
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_COMMAND)), cmd);
+ else if (status == -EBUSY) {
+ dev_dbg(kbdev->dev,
+ "Skipped the wait for AS_ACTIVE bit for as %u, before sending MMU command %u",
+ as_nr, cmd);
+ } else {
dev_err(kbdev->dev,
"Wait for AS_ACTIVE bit failed for as %u, before sending MMU command %u",
as_nr, cmd);
@@ -165,6 +219,131 @@ static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
return status;
}
+#if MALI_USE_CSF
+static int wait_l2_power_trans_complete(struct kbase_device *kbdev)
+{
+ const ktime_t wait_loop_start = ktime_get_raw();
+ const u32 pwr_trans_wait_time_ms = kbdev->mmu_or_gpu_cache_op_wait_time_ms;
+ s64 diff;
+ u64 value;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ do {
+ unsigned int i;
+
+ for (i = 0; i < 1000; i++) {
+ value = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_PWRTRANS_HI));
+ value <<= 32;
+ value |= kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_PWRTRANS_LO));
+
+ if (!value)
+ return 0;
+ }
+
+ diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
+ } while (diff < pwr_trans_wait_time_ms);
+
+ dev_warn(kbdev->dev, "L2_PWRTRANS %016llx set for too long", value);
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
+ kbase_reset_gpu_locked(kbdev);
+
+ return -ETIMEDOUT;
+}
+
+#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
+static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
+{
+#define WAIT_TIMEOUT 50000 /* 50ms timeout */
+#define DELAY_TIME_IN_US 1
+ const int max_iterations = WAIT_TIMEOUT;
+ int loop;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (loop = 0; loop < max_iterations; loop++) {
+ u32 lo =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO));
+ u32 hi =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI));
+
+ if (!lo && !hi)
+ break;
+
+ udelay(DELAY_TIME_IN_US);
+ }
+
+ if (loop == max_iterations) {
+ dev_warn(kbdev->dev, "SHADER_PWRTRANS %08x%08x set for too long",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO)));
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * apply_hw_issue_GPU2019_3901_wa - Apply WA for the HW issue GPU2019_3901
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @mmu_cmd: Pointer to the variable contain the value of MMU command
+ * that needs to be sent to flush the L2 cache and do an
+ * implicit unlock.
+ * @as_nr: Address space number for which MMU command needs to be
+ * sent.
+ *
+ * This function ensures that the flush of LSC is not missed for the pages that
+ * were unmapped from the GPU, due to the power down transition of shader cores.
+ *
+ * Return: 0 if the WA was successfully applied, non-zero otherwise.
+ */
+static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_cmd,
+ unsigned int as_nr)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Check if L2 is OFF. The cores also must be OFF if L2 is not up, so
+ * the workaround can be safely skipped.
+ */
+ if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
+ if (unlikely(*mmu_cmd != AS_COMMAND_FLUSH_MEM)) {
+ dev_warn(kbdev->dev, "Unexpected MMU command(%u) received", *mmu_cmd);
+ return -EINVAL;
+ }
+
+ /* Wait for the LOCK MMU command to complete, issued by the caller */
+ ret = wait_ready(kbdev, as_nr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = kbase_gpu_cache_flush_and_busy_wait(kbdev,
+ GPU_COMMAND_CACHE_CLN_INV_LSC);
+ if (unlikely(ret))
+ return ret;
+
+ ret = wait_cores_power_trans_complete(kbdev);
+ if (unlikely(ret)) {
+ if (kbase_prepare_to_reset_gpu_locked(kbdev,
+ RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+ kbase_reset_gpu_locked(kbdev);
+ return ret;
+ }
+
+ /* As LSC is guaranteed to have been flushed we can use FLUSH_PT
+ * MMU command to only flush the L2.
+ */
+ *mmu_cmd = AS_COMMAND_FLUSH_PT;
+ }
+
+ return ret;
+}
+#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+#endif /* MALI_USE_CSF */
+
void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
{
struct kbase_mmu_setup *current_setup = &as->current_setup;
@@ -195,19 +374,18 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
}
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
- transcfg);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSCFG_LO)), transcfg);
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSCFG_HI)),
(transcfg >> 32) & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSTAB_LO)),
current_setup->transtab & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSTAB_HI)),
(current_setup->transtab >> 32) & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_MEMATTR_LO)),
current_setup->memattr & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_MEMATTR_HI)),
(current_setup->memattr >> 32) & 0xFFFFFFFFUL);
KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as,
@@ -222,93 +400,302 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
#endif
}
-int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_mmu_hw_op_param *op_param)
+/**
+ * mmu_command_instr - Record an MMU command for instrumentation purposes.
+ *
+ * @kbdev: Kbase device used to issue MMU operation on.
+ * @kctx_id: Kernel context ID for MMU command tracepoint.
+ * @cmd: Command issued to the MMU.
+ * @lock_addr: Address of memory region locked for the operation.
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
+ */
+static void mmu_command_instr(struct kbase_device *kbdev, u32 kctx_id, u32 cmd, u64 lock_addr,
+ enum kbase_caller_mmu_sync_info mmu_sync_info)
+{
+ u64 lock_addr_base = AS_LOCKADDR_LOCKADDR_BASE_GET(lock_addr);
+ u32 lock_addr_size = AS_LOCKADDR_LOCKADDR_SIZE_GET(lock_addr);
+
+ bool is_mmu_synchronous = (mmu_sync_info == CALLER_MMU_SYNC);
+
+ KBASE_TLSTREAM_AUX_MMU_COMMAND(kbdev, kctx_id, cmd, is_mmu_synchronous, lock_addr_base,
+ lock_addr_size);
+}
+
+/* Helper function to program the LOCKADDR register before LOCK/UNLOCK command
+ * is issued.
+ */
+static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, int as_nr, u64 *lock_addr,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ int ret;
+
+ ret = lock_region(&kbdev->gpu_props, lock_addr, op_param);
+
+ if (!ret) {
+ /* Set the region that needs to be updated */
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_LOCKADDR_LO)),
+ *lock_addr & 0xFFFFFFFFUL);
+ kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_LOCKADDR_HI)),
+ (*lock_addr >> 32) & 0xFFFFFFFFUL);
+ }
+ return ret;
+}
+
+/**
+ * mmu_hw_do_lock_no_wait - Issue LOCK command to the MMU and return without
+ * waiting for it's completion.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @lock_addr: Address of memory region locked for this operation.
+ * @op_param: Pointer to a struct containing information about the MMU operation.
+ *
+ * Return: 0 if issuing the command was successful, otherwise an error code.
+ */
+static int mmu_hw_do_lock_no_wait(struct kbase_device *kbdev, struct kbase_as *as, u64 *lock_addr,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ int ret;
+
+ ret = mmu_hw_set_lock_addr(kbdev, as->number, lock_addr, op_param);
+
+ if (likely(!ret))
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+
+ return ret;
+}
+
+/**
+ * mmu_hw_do_lock - Issue LOCK command to the MMU and wait for its completion.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to a struct containing information about the MMU operation.
+ *
+ * Return: 0 if issuing the LOCK command was successful, otherwise an error code.
+ */
+static int mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
{
int ret;
u64 lock_addr = 0x0;
- if (WARN_ON(kbdev == NULL) ||
- WARN_ON(as == NULL) ||
- WARN_ON(op_param == NULL))
+ if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
+
+ if (!ret)
+ ret = wait_ready(kbdev, as->number);
+
+ if (!ret)
+ mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_LOCK, lock_addr,
+ op_param->mmu_sync_info);
+ else
+ dev_err(kbdev->dev, "AS_ACTIVE bit stuck after sending UNLOCK command");
- if (op_param->op == KBASE_MMU_OP_UNLOCK) {
- /* Unlock doesn't require a lock first */
- ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+ return ret;
+}
+
+int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
- /* Wait for UNLOCK command to complete */
+ return mmu_hw_do_lock(kbdev, as, op_param);
+}
+
+int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ int ret = 0;
+
+ if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+ return -EINVAL;
+
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+
+ /* Wait for UNLOCK command to complete */
+ if (likely(!ret))
ret = wait_ready(kbdev, as->number);
- if (!ret) {
- /* read MMU_AS_CONTROL.LOCKADDR register */
- lock_addr |= (u64)kbase_reg_read(kbdev,
- MMU_AS_REG(as->number, AS_LOCKADDR_HI)) << 32;
- lock_addr |= (u64)kbase_reg_read(kbdev,
- MMU_AS_REG(as->number, AS_LOCKADDR_LO));
+ if (likely(!ret)) {
+ u64 lock_addr = 0x0;
+ /* read MMU_AS_CONTROL.LOCKADDR register */
+ lock_addr |= (u64)kbase_reg_read(
+ kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_LOCKADDR_HI)))
+ << 32;
+ lock_addr |= (u64)kbase_reg_read(
+ kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_LOCKADDR_LO)));
+
+ mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_UNLOCK,
+ lock_addr, op_param->mmu_sync_info);
+ }
+
+ return ret;
+}
+
+int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ int ret = 0;
+ u64 lock_addr = 0x0;
+
+ if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+ return -EINVAL;
+
+ ret = mmu_hw_set_lock_addr(kbdev, as->number, &lock_addr, op_param);
+
+ if (!ret)
+ ret = kbase_mmu_hw_do_unlock_no_addr(kbdev, as,
+ op_param);
+
+ return ret;
+}
+
+/**
+ * mmu_hw_do_flush - Flush MMU and wait for its completion.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to a struct containing information about the MMU operation.
+ * @hwaccess_locked: Flag to indicate if the lock has been held.
+ *
+ * Return: 0 if flushing MMU was successful, otherwise an error code.
+ */
+static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param, bool hwaccess_locked)
+{
+ int ret;
+ u64 lock_addr = 0x0;
+ u32 mmu_cmd = AS_COMMAND_FLUSH_MEM;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
+
+ if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+ return -EINVAL;
+
+ /* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
+ * this point would be unexpected.
+ */
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
+ dev_err(kbdev->dev, "Unexpected flush operation received");
+ return -EINVAL;
+ }
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
+ mmu_cmd = AS_COMMAND_FLUSH_PT;
+
+ /* Lock the region that needs to be updated */
+ ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
+ if (ret)
+ return ret;
+
+#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
+ /* WA for the BASE_HW_ISSUE_GPU2019_3901. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3901) &&
+ mmu_cmd == AS_COMMAND_FLUSH_MEM) {
+ if (!hwaccess_locked) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ } else {
+ ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
}
- } else if (op_param->op >= KBASE_MMU_OP_FIRST &&
- op_param->op < KBASE_MMU_OP_COUNT) {
- ret = lock_region(&kbdev->gpu_props, op_param->vpfn, op_param->nr, &lock_addr);
-
- if (!ret) {
- /* Lock the region that needs to be updated */
- kbase_reg_write(kbdev,
- MMU_AS_REG(as->number, AS_LOCKADDR_LO),
- lock_addr & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev,
- MMU_AS_REG(as->number, AS_LOCKADDR_HI),
- (lock_addr >> 32) & 0xFFFFFFFFUL);
- write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
-
- /* Translate and send operation to HW */
- switch (op_param->op) {
- case KBASE_MMU_OP_FLUSH_PT:
- write_cmd(kbdev, as->number,
- AS_COMMAND_FLUSH_PT);
- break;
- case KBASE_MMU_OP_FLUSH_MEM:
- write_cmd(kbdev, as->number,
- AS_COMMAND_FLUSH_MEM);
- break;
- case KBASE_MMU_OP_LOCK:
- /* No further operation. */
- break;
- default:
- dev_warn(kbdev->dev,
- "Unsupported MMU operation (op=%d).\n",
- op_param->op);
- return -EINVAL;
- };
-
- /* Wait for the command to complete */
- ret = wait_ready(kbdev, as->number);
+
+ if (ret) {
+ dev_warn(
+ kbdev->dev,
+ "Failed to apply WA for HW issue when doing MMU flush op on VA range %llx-%llx for AS %u",
+ op_param->vpfn << PAGE_SHIFT,
+ ((op_param->vpfn + op_param->nr) << PAGE_SHIFT) - 1, as->number);
+ /* Continue with the MMU flush operation */
}
- } else {
- /* Code should not reach here. */
- dev_warn(kbdev->dev, "Invalid mmu operation (op=%d).\n",
- op_param->op);
+ }
+#endif
+
+ ret = write_cmd(kbdev, as->number, mmu_cmd);
+
+ /* Wait for the command to complete */
+ if (likely(!ret))
+ ret = wait_ready(kbdev, as->number);
+
+ if (likely(!ret)) {
+ mmu_command_instr(kbdev, op_param->kctx_id, mmu_cmd, lock_addr,
+ op_param->mmu_sync_info);
+#if MALI_USE_CSF
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
+#endif
+ }
+
+ return ret;
+}
+
+int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ return mmu_hw_do_flush(kbdev, as, op_param, true);
+}
+
+int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ return mmu_hw_do_flush(kbdev, as, op_param, false);
+}
+
+int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
+{
+ int ret, ret2;
+ u32 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2_LSC;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
+
+ if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+ return -EINVAL;
+
+ /* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
+ * this point would be unexpected.
+ */
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
+ dev_err(kbdev->dev, "Unexpected flush operation received");
return -EINVAL;
}
- /* MMU command instrumentation */
- if (!ret) {
- u64 lock_addr_base = AS_LOCKADDR_LOCKADDR_BASE_GET(lock_addr);
- u32 lock_addr_size = AS_LOCKADDR_LOCKADDR_SIZE_GET(lock_addr);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
+ gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2;
+
+ /* 1. Issue MMU_AS_CONTROL.COMMAND.LOCK operation. */
+ ret = mmu_hw_do_lock(kbdev, as, op_param);
+ if (ret)
+ return ret;
- bool is_mmu_synchronous = false;
+ /* 2. Issue GPU_CONTROL.COMMAND.FLUSH_CACHES operation */
+ ret = kbase_gpu_cache_flush_and_busy_wait(kbdev, gpu_cmd);
- if (op_param->mmu_sync_info == CALLER_MMU_SYNC)
- is_mmu_synchronous = true;
+ /* 3. Issue MMU_AS_CONTROL.COMMAND.UNLOCK operation. */
+ ret2 = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
- KBASE_TLSTREAM_AUX_MMU_COMMAND(kbdev, op_param->kctx_id,
- op_param->op, is_mmu_synchronous,
- lock_addr_base, lock_addr_size);
+#if MALI_USE_CSF
+ if (!ret && !ret2) {
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
}
+#endif
- return ret;
+ return ret ?: ret2;
}
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
@@ -333,7 +720,7 @@ void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
pf_bf_mask |= MMU_BUS_ERROR(as->number);
#endif
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask);
+ kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_CLEAR), pf_bf_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
@@ -357,15 +744,15 @@ void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
if (kbdev->irq_reset_flush)
goto unlock;
- irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)) |
- MMU_PAGE_FAULT(as->number);
+ irq_mask =
+ kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK)) | MMU_PAGE_FAULT(as->number);
#if !MALI_USE_CSF
if (type == KBASE_MMU_FAULT_TYPE_BUS ||
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
irq_mask |= MMU_BUS_ERROR(as->number);
#endif
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask);
+ kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), irq_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);