summaryrefslogtreecommitdiff
path: root/mali_kbase/mmu/mali_kbase_mmu_hw.h
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mmu/mali_kbase_mmu_hw.h')
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw.h155
1 files changed, 120 insertions, 35 deletions
diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw.h b/mali_kbase/mmu/mali_kbase_mmu_hw.h
index 31658e0..49e050e 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_hw.h
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2014-2015, 2018-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -55,32 +55,14 @@ enum kbase_mmu_fault_type {
};
/**
- * enum kbase_mmu_op_type - enum for MMU operations
- * @KBASE_MMU_OP_NONE: To help catch uninitialized struct
- * @KBASE_MMU_OP_FIRST: The lower boundary of enum
- * @KBASE_MMU_OP_LOCK: Lock memory region
- * @KBASE_MMU_OP_UNLOCK: Unlock memory region
- * @KBASE_MMU_OP_FLUSH_PT: Flush page table (CLN+INV L2 only)
- * @KBASE_MMU_OP_FLUSH_MEM: Flush memory (CLN+INV L2+LSC)
- * @KBASE_MMU_OP_COUNT: The upper boundary of enum
- */
-enum kbase_mmu_op_type {
- KBASE_MMU_OP_NONE = 0, /* Must be zero */
- KBASE_MMU_OP_FIRST, /* Must be the first non-zero op */
- KBASE_MMU_OP_LOCK = KBASE_MMU_OP_FIRST,
- KBASE_MMU_OP_UNLOCK,
- KBASE_MMU_OP_FLUSH_PT,
- KBASE_MMU_OP_FLUSH_MEM,
- KBASE_MMU_OP_COUNT /* Must be the last in enum */
-};
-
-/**
- * struct kbase_mmu_hw_op_param - parameters for kbase_mmu_hw_do_operation()
- * @vpfn: MMU Virtual Page Frame Number to start the operation on.
- * @nr: Number of pages to work on.
- * @op: Operation type (written to ASn_COMMAND).
- * @kctx_id: Kernel context ID for MMU command tracepoint
- * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
+ * struct kbase_mmu_hw_op_param - parameters for kbase_mmu_hw_do_* functions
+ * @vpfn: MMU Virtual Page Frame Number to start the operation on.
+ * @nr: Number of pages to work on.
+ * @op: Operation type (written to AS_COMMAND).
+ * @kctx_id: Kernel context ID for MMU command tracepoint.
+ * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
+ * @flush_skip_levels: Page table levels to skip flushing. (Only
+ * applicable if GPU supports feature)
*/
struct kbase_mmu_hw_op_param {
u64 vpfn;
@@ -88,6 +70,7 @@ struct kbase_mmu_hw_op_param {
enum kbase_mmu_op_type op;
u32 kctx_id;
enum kbase_caller_mmu_sync_info mmu_sync_info;
+ u64 flush_skip_levels;
};
/**
@@ -102,18 +85,120 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev,
struct kbase_as *as);
/**
- * kbase_mmu_hw_do_operation - Issue an operation to the MMU.
- * @kbdev: kbase device to issue the MMU operation on.
- * @as: address space to issue the MMU operation on.
- * @op_param: parameters for the operation.
+ * kbase_mmu_hw_do_lock - Issue LOCK command to the MMU and program
+ * the LOCKADDR register.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * hwaccess_lock needs to be held when calling this function.
+ *
+ * Return: 0 if issuing the command was successful, otherwise an error code.
+ */
+int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+
+/**
+ * kbase_mmu_hw_do_unlock_no_addr - Issue UNLOCK command to the MMU without
+ * programming the LOCKADDR register and wait
+ * for it to complete before returning.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * This function should be called for GPU where GPU command is used to flush
+ * the cache(s) instead of MMU command.
+ *
+ * Return: 0 if issuing the command was successful, otherwise an error code.
+ */
+int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+
+/**
+ * kbase_mmu_hw_do_unlock - Issue UNLOCK command to the MMU and wait for it
+ * to complete before returning.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * Return: 0 if issuing the command was successful, otherwise an error code.
+ */
+int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+/**
+ * kbase_mmu_hw_do_lock - Issue a LOCK operation to the MMU.
*
- * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
- * is associated with the provided kbase_context over the specified range
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * Context: Acquires the hwaccess_lock, expects the caller to hold the mmu_hw_mutex
*
* Return: Zero if the operation was successful, non-zero otherwise.
*/
-int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_mmu_hw_op_param *op_param);
+int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+
+/**
+ * kbase_mmu_hw_do_flush - Issue a flush operation to the MMU.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * Issue a flush operation on the address space as per the information
+ * specified inside @op_param. This function should not be called for
+ * GPUs where MMU command to flush the cache(s) is deprecated.
+ * mmu_hw_mutex needs to be held when calling this function.
+ *
+ * Return: 0 if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+
+/**
+ * kbase_mmu_hw_do_flush_locked - Issue a flush operation to the MMU.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * Issue a flush operation on the address space as per the information
+ * specified inside @op_param. This function should not be called for
+ * GPUs where MMU command to flush the cache(s) is deprecated.
+ * Both mmu_hw_mutex and hwaccess_lock need to be held when calling this
+ * function.
+ *
+ * Return: 0 if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
+
+/**
+ * kbase_mmu_hw_do_flush_on_gpu_ctrl - Issue a flush operation to the MMU.
+ *
+ * @kbdev: Kbase device to issue the MMU operation on.
+ * @as: Address space to issue the MMU operation on.
+ * @op_param: Pointer to struct containing information about the MMU
+ * operation to perform.
+ *
+ * Issue a flush operation on the address space as per the information
+ * specified inside @op_param. GPU command is used to flush the cache(s)
+ * instead of the MMU command.
+ *
+ * Return: 0 if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param);
/**
* kbase_mmu_hw_clear_fault - Clear a fault that has been previously reported by