summaryrefslogtreecommitdiff
path: root/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
diff options
context:
space:
mode:
Diffstat (limited to 'common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h')
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h164
1 files changed, 159 insertions, 5 deletions
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
index 1794ddc..c9de5fd 100644
--- a/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -56,10 +56,44 @@
* - Added new Base memory allocation interface
* 1.10:
* - First release of new HW performance counters interface.
+ * 1.11:
+ * - Dummy model (no mali) backend will now clear HWC values after each sample
+ * 1.12:
+ * - Added support for incremental rendering flag in CSG create call
+ * 1.13:
+ * - Added ioctl to query a register of USER page.
+ * 1.14:
+ * - Added support for passing down the buffer descriptor VA in tiler heap init
+ * 1.15:
+ * - Enable new sync_wait GE condition
+ * 1.16:
+ * - Remove legacy definitions:
+ * - base_jit_alloc_info_10_2
+ * - base_jit_alloc_info_11_5
+ * - kbase_ioctl_mem_jit_init_10_2
+ * - kbase_ioctl_mem_jit_init_11_5
+ * 1.17:
+ * - Fix kinstr_prfcnt issues:
+ * - Missing implicit sample for CMD_STOP when HWCNT buffer is full.
+ * - Race condition when stopping periodic sampling.
+ * - prfcnt_block_metadata::block_idx gaps.
+ * - PRFCNT_CONTROL_CMD_SAMPLE_ASYNC is removed.
+ * 1.18:
+ * - Relax the requirement to create a mapping with BASE_MEM_MAP_TRACKING_HANDLE
+ * before allocating GPU memory for the context.
+ * - CPU mappings of USER_BUFFER imported memory handles must be cached.
+ * 1.19:
+ * - Add NE support in queue_group_create IOCTL fields
+ * - Previous version retained as KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18 for
+ * backward compatibility.
+ * 1.20:
+ * - Restrict child process from doing supported file operations (like mmap, ioctl,
+ * read, poll) on the file descriptor of mali device file that was inherited
+ * from the parent process.
*/
#define BASE_UK_VERSION_MAJOR 1
-#define BASE_UK_VERSION_MINOR 10
+#define BASE_UK_VERSION_MINOR 20
/**
* struct kbase_ioctl_version_check - Check version compatibility between
@@ -232,6 +266,56 @@ union kbase_ioctl_cs_queue_group_create_1_6 {
_IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6)
/**
+ * union kbase_ioctl_cs_queue_group_create_1_18 - Create a GPU command queue group
+ * @in: Input parameters
+ * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use.
+ * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use.
+ * @in.compute_mask: Mask of compute endpoints the group is allowed to use.
+ * @in.cs_min: Minimum number of CSs required.
+ * @in.priority: Queue group's priority within a process.
+ * @in.tiler_max: Maximum number of tiler endpoints the group is allowed
+ * to use.
+ * @in.fragment_max: Maximum number of fragment endpoints the group is
+ * allowed to use.
+ * @in.compute_max: Maximum number of compute endpoints the group is allowed
+ * to use.
+ * @in.csi_handlers: Flags to signal that the application intends to use CSI
+ * exception handlers in some linear buffers to deal with
+ * the given exception types.
+ * @in.padding: Currently unused, must be zero
+ * @out: Output parameters
+ * @out.group_handle: Handle of a newly created queue group.
+ * @out.padding: Currently unused, must be zero
+ * @out.group_uid: UID of the queue group available to base.
+ */
+union kbase_ioctl_cs_queue_group_create_1_18 {
+ struct {
+ __u64 tiler_mask;
+ __u64 fragment_mask;
+ __u64 compute_mask;
+ __u8 cs_min;
+ __u8 priority;
+ __u8 tiler_max;
+ __u8 fragment_max;
+ __u8 compute_max;
+ __u8 csi_handlers;
+ __u8 padding[2];
+ /**
+ * @in.dvs_buf: buffer for deferred vertex shader
+ */
+ __u64 dvs_buf;
+ } in;
+ struct {
+ __u8 group_handle;
+ __u8 padding[3];
+ __u32 group_uid;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18 \
+ _IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create_1_18)
+
+/**
* union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group
* @in: Input parameters
* @in.tiler_mask: Mask of tiler endpoints the group is allowed to use.
@@ -245,6 +329,9 @@ union kbase_ioctl_cs_queue_group_create_1_6 {
* allowed to use.
* @in.compute_max: Maximum number of compute endpoints the group is allowed
* to use.
+ * @in.csi_handlers: Flags to signal that the application intends to use CSI
+ * exception handlers in some linear buffers to deal with
+ * the given exception types.
* @in.padding: Currently unused, must be zero
* @out: Output parameters
* @out.group_handle: Handle of a newly created queue group.
@@ -261,11 +348,16 @@ union kbase_ioctl_cs_queue_group_create {
__u8 tiler_max;
__u8 fragment_max;
__u8 compute_max;
- __u8 padding[3];
+ __u8 csi_handlers;
+ /**
+ * @in.reserved: Reserved, currently unused, must be zero.
+ */
+ __u16 reserved;
/**
- * @reserved: Reserved
+ * @in.dvs_buf: buffer for deferred vertex shader
*/
- __u64 reserved;
+ __u64 dvs_buf;
+ __u64 padding[9];
} in;
struct {
__u8 group_handle;
@@ -353,6 +445,7 @@ struct kbase_ioctl_kcpu_queue_enqueue {
* allowed.
* @in.group_id: Group ID to be used for physical allocations.
* @in.padding: Padding
+ * @in.buf_desc_va: Buffer descriptor GPU VA for tiler heap reclaims.
* @out: Output parameters
* @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up
* for the heap.
@@ -368,6 +461,7 @@ union kbase_ioctl_cs_tiler_heap_init {
__u16 target_in_flight;
__u8 group_id;
__u8 padding;
+ __u64 buf_desc_va;
} in;
struct {
__u64 gpu_heap_va;
@@ -379,6 +473,43 @@ union kbase_ioctl_cs_tiler_heap_init {
_IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init)
/**
+ * union kbase_ioctl_cs_tiler_heap_init_1_13 - Initialize chunked tiler memory heap,
+ * earlier version upto 1.13
+ * @in: Input parameters
+ * @in.chunk_size: Size of each chunk.
+ * @in.initial_chunks: Initial number of chunks that heap will be created with.
+ * @in.max_chunks: Maximum number of chunks that the heap is allowed to use.
+ * @in.target_in_flight: Number of render-passes that the driver should attempt to
+ * keep in flight for which allocation of new chunks is
+ * allowed.
+ * @in.group_id: Group ID to be used for physical allocations.
+ * @in.padding: Padding
+ * @out: Output parameters
+ * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up
+ * for the heap.
+ * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap,
+ * actually points to the header of heap chunk and not to
+ * the low address of free memory in the chunk.
+ */
+union kbase_ioctl_cs_tiler_heap_init_1_13 {
+ struct {
+ __u32 chunk_size;
+ __u32 initial_chunks;
+ __u32 max_chunks;
+ __u16 target_in_flight;
+ __u8 group_id;
+ __u8 padding;
+ } in;
+ struct {
+ __u64 gpu_heap_va;
+ __u64 first_chunk_va;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \
+ _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13)
+
+/**
* struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap
* instance
*
@@ -479,6 +610,29 @@ union kbase_ioctl_mem_alloc_ex {
#define KBASE_IOCTL_MEM_ALLOC_EX _IOWR(KBASE_IOCTL_TYPE, 59, union kbase_ioctl_mem_alloc_ex)
+/**
+ * union kbase_ioctl_read_user_page - Read a register of USER page
+ *
+ * @in: Input parameters.
+ * @in.offset: Register offset in USER page.
+ * @in.padding: Padding to round up to a multiple of 8 bytes, must be zero.
+ * @out: Output parameters.
+ * @out.val_lo: Value of 32bit register or the 1st half of 64bit register to be read.
+ * @out.val_hi: Value of the 2nd half of 64bit register to be read.
+ */
+union kbase_ioctl_read_user_page {
+ struct {
+ __u32 offset;
+ __u32 padding;
+ } in;
+ struct {
+ __u32 val_lo;
+ __u32 val_hi;
+ } out;
+};
+
+#define KBASE_IOCTL_READ_USER_PAGE _IOWR(KBASE_IOCTL_TYPE, 60, union kbase_ioctl_read_user_page)
+
/***************
* test ioctls *
***************/