summaryrefslogtreecommitdiff
path: root/mali_kbase/hwcnt/backend/mali_kbase_hwcnt_backend_csf_if.h
blob: 382a3adaa1273f4efe2c249e16dcef5bad440cc4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 *
 * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

/*
 * Virtual interface for CSF hardware counter backend.
 */

#ifndef _KBASE_HWCNT_BACKEND_CSF_IF_H_
#define _KBASE_HWCNT_BACKEND_CSF_IF_H_

#include <linux/types.h>

struct kbase_hwcnt_backend_csf_if_ctx;

struct kbase_hwcnt_backend_csf_if_ring_buf;

/**
 * struct kbase_hwcnt_backend_csf_if_enable - enable hardware counter collection
 *                                            structure.
 * @fe_bm:          Front End counters selection bitmask.
 * @shader_bm:      Shader counters selection bitmask.
 * @tiler_bm:       Tiler counters selection bitmask.
 * @mmu_l2_bm:      MMU_L2 counters selection bitmask.
 * @counter_set:    The performance counter set to enable.
 * @clk_enable_map: An array of u64 bitfields, each bit of which enables cycle
 *                  counter for a given clock domain.
 */
struct kbase_hwcnt_backend_csf_if_enable {
	u32 fe_bm;
	u32 shader_bm;
	u32 tiler_bm;
	u32 mmu_l2_bm;
	u8 counter_set;
	u64 clk_enable_map;
};

/**
 * struct kbase_hwcnt_backend_csf_if_prfcnt_info - Performance counter
 *                                                 information.
 * @prfcnt_hw_size:    Total length in bytes of all the hardware counters data. The hardware
 *                     counters are sub-divided into 4 classes: front-end, shader, tiler, and
 *                     memory system (l2 cache + MMU).
 * @prfcnt_fw_size:    Total length in bytes of all the firmware counters data.
 * @dump_bytes:        Bytes of GPU memory required to perform a performance
 *                     counter dump. dump_bytes = prfcnt_hw_size + prfcnt_fw_size.
 * @prfcnt_block_size: Bytes of each performance counter block.
 * @l2_count:          The MMU L2 cache count.
 * @core_mask:         Shader core mask.
 * @clk_cnt:           Clock domain count in the system.
 * @clearing_samples:  Indicates whether counters are cleared after each sample
 *                     is taken.
 */
struct kbase_hwcnt_backend_csf_if_prfcnt_info {
	size_t prfcnt_hw_size;
	size_t prfcnt_fw_size;
	size_t dump_bytes;
	size_t prfcnt_block_size;
	size_t l2_count;
	u64 core_mask;
	u8 clk_cnt;
	bool clearing_samples;
};

/**
 * typedef kbase_hwcnt_backend_csf_if_assert_lock_held_fn - Assert that the
 *                                                          backend spinlock is
 *                                                          held.
 * @ctx: Non-NULL pointer to a CSF context.
 */
typedef void
kbase_hwcnt_backend_csf_if_assert_lock_held_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx);

/**
 * typedef kbase_hwcnt_backend_csf_if_lock_fn - Acquire backend spinlock.
 *
 * @ctx:   Non-NULL pointer to a CSF context.
 * @flags: Pointer to the memory location that would store the previous
 *         interrupt state.
 */
typedef void kbase_hwcnt_backend_csf_if_lock_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
						unsigned long *flags);

/**
 * typedef kbase_hwcnt_backend_csf_if_unlock_fn - Release backend spinlock.
 *
 * @ctx:   Non-NULL pointer to a CSF context.
 * @flags: Previously stored interrupt state when Scheduler interrupt
 *         spinlock was acquired.
 */
typedef void kbase_hwcnt_backend_csf_if_unlock_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
						  unsigned long flags);

/**
 * typedef kbase_hwcnt_backend_csf_if_get_prfcnt_info_fn - Get performance
 *                                                         counter information.
 * @ctx:          Non-NULL pointer to a CSF context.
 * @prfcnt_info:  Non-NULL pointer to struct where performance counter
 *                information should be stored.
 */
typedef void kbase_hwcnt_backend_csf_if_get_prfcnt_info_fn(
	struct kbase_hwcnt_backend_csf_if_ctx *ctx,
	struct kbase_hwcnt_backend_csf_if_prfcnt_info *prfcnt_info);

/**
 * typedef kbase_hwcnt_backend_csf_if_ring_buf_alloc_fn - Allocate a ring buffer
 *                                                        for CSF interface.
 * @ctx:           Non-NULL pointer to a CSF context.
 * @buf_count:     The buffer count in the ring buffer to be allocated,
 *                 MUST be power of 2.
 * @cpu_dump_base: Non-NULL pointer to where ring buffer CPU base address is
 *                 stored when success.
 * @ring_buf:      Non-NULL pointer to where ring buffer is stored when success.
 *
 * A ring buffer is needed by the CSF interface to do manual HWC sample and
 * automatic HWC samples, the buffer count in the ring buffer MUST be power
 * of 2 to meet the hardware requirement.
 *
 * Return: 0 on success, else error code.
 */
typedef int
kbase_hwcnt_backend_csf_if_ring_buf_alloc_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
					     u32 buf_count, void **cpu_dump_base,
					     struct kbase_hwcnt_backend_csf_if_ring_buf **ring_buf);

/**
 * typedef kbase_hwcnt_backend_csf_if_ring_buf_sync_fn - Sync HWC dump buffers
 *                                                       memory.
 * @ctx:             Non-NULL pointer to a CSF context.
 * @ring_buf:        Non-NULL pointer to the ring buffer.
 * @buf_index_first: The first buffer index in the ring buffer to be synced,
 *                   inclusive.
 * @buf_index_last:  The last buffer index in the ring buffer to be synced,
 *                   exclusive.
 * @for_cpu:         The direction of sync to be applied, set to true when CPU
 *                   cache needs invalidating before reading the buffer, and set
 *                   to false after CPU writes to flush these before this memory
 *                   is overwritten by the GPU.
 *
 * Flush cached HWC dump buffer data to ensure that all writes from GPU and CPU
 * are correctly observed.
 */
typedef void
kbase_hwcnt_backend_csf_if_ring_buf_sync_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
					    struct kbase_hwcnt_backend_csf_if_ring_buf *ring_buf,
					    u32 buf_index_first, u32 buf_index_last, bool for_cpu);

/**
 * typedef kbase_hwcnt_backend_csf_if_ring_buf_free_fn - Free a ring buffer for
 *                                                       the CSF interface.
 *
 * @ctx:      Non-NULL pointer to a CSF interface context.
 * @ring_buf: Non-NULL pointer to the ring buffer which to be freed.
 */
typedef void
kbase_hwcnt_backend_csf_if_ring_buf_free_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
					    struct kbase_hwcnt_backend_csf_if_ring_buf *ring_buf);

/**
 * typedef kbase_hwcnt_backend_csf_if_timestamp_ns_fn - Get the current
 *                                                      timestamp of the CSF
 *                                                      interface.
 * @ctx: Non-NULL pointer to a CSF interface context.
 *
 * Return: CSF interface timestamp in nanoseconds.
 */
typedef u64 kbase_hwcnt_backend_csf_if_timestamp_ns_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx);

/**
 * typedef kbase_hwcnt_backend_csf_if_dump_enable_fn - Setup and enable hardware
 *                                                     counter in CSF interface.
 * @ctx:      Non-NULL pointer to a CSF interface context.
 * @ring_buf: Non-NULL pointer to the ring buffer which used to setup the HWC.
 * @enable:   Non-NULL pointer to the enable map of HWC.
 *
 * Requires lock to be taken before calling.
 */
typedef void
kbase_hwcnt_backend_csf_if_dump_enable_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
					  struct kbase_hwcnt_backend_csf_if_ring_buf *ring_buf,
					  struct kbase_hwcnt_backend_csf_if_enable *enable);

/**
 * typedef kbase_hwcnt_backend_csf_if_dump_disable_fn - Disable hardware counter
 *                                                      in CSF interface.
 * @ctx: Non-NULL pointer to a CSF interface context.
 *
 * Requires lock to be taken before calling.
 */
typedef void kbase_hwcnt_backend_csf_if_dump_disable_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx);

/**
 * typedef kbase_hwcnt_backend_csf_if_dump_request_fn - Request a HWC dump.
 *
 * @ctx: Non-NULL pointer to the interface context.
 *
 * Requires lock to be taken before calling.
 */
typedef void kbase_hwcnt_backend_csf_if_dump_request_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx);

/**
 * typedef kbase_hwcnt_backend_csf_if_get_indexes_fn - Get current extract and
 *                                                     insert indexes of the
 *                                                     ring buffer.
 *
 * @ctx:           Non-NULL pointer to a CSF interface context.
 * @extract_index: Non-NULL pointer where current extract index to be saved.
 * @insert_index:  Non-NULL pointer where current insert index to be saved.
 *
 * Requires lock to be taken before calling.
 */
typedef void kbase_hwcnt_backend_csf_if_get_indexes_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
						       u32 *extract_index, u32 *insert_index);

/**
 * typedef kbase_hwcnt_backend_csf_if_set_extract_index_fn - Update the extract
 *                                                           index of the ring
 *                                                           buffer.
 *
 * @ctx:            Non-NULL pointer to a CSF interface context.
 * @extract_index:  New extract index to be set.
 *
 * Requires lock to be taken before calling.
 */
typedef void
kbase_hwcnt_backend_csf_if_set_extract_index_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
						u32 extract_index);

/**
 * typedef kbase_hwcnt_backend_csf_if_get_gpu_cycle_count_fn - Get the current
 *                                                             GPU cycle count.
 * @ctx:            Non-NULL pointer to a CSF interface context.
 * @cycle_counts:   Non-NULL pointer to an array where cycle counts to be saved,
 *                  the array size should be at least as big as the number of
 *                  clock domains returned by get_prfcnt_info interface.
 * @clk_enable_map: An array of bitfields, each bit specifies an enabled clock
 *                  domain.
 *
 * Requires lock to be taken before calling.
 */
typedef void
kbase_hwcnt_backend_csf_if_get_gpu_cycle_count_fn(struct kbase_hwcnt_backend_csf_if_ctx *ctx,
						  u64 *cycle_counts, u64 clk_enable_map);

/**
 * struct kbase_hwcnt_backend_csf_if - Hardware counter backend CSF virtual
 *                                     interface.
 * @ctx:                 CSF interface context.
 * @assert_lock_held:    Function ptr to assert backend spinlock is held.
 * @lock:                Function ptr to acquire backend spinlock.
 * @unlock:              Function ptr to release backend spinlock.
 * @get_prfcnt_info:     Function ptr to get performance counter related
 *                       information.
 * @ring_buf_alloc:      Function ptr to allocate ring buffer for CSF HWC.
 * @ring_buf_sync:       Function ptr to sync ring buffer to CPU.
 * @ring_buf_free:       Function ptr to free ring buffer for CSF HWC.
 * @timestamp_ns:        Function ptr to get the current CSF interface
 *                       timestamp.
 * @dump_enable:         Function ptr to enable dumping.
 * @dump_disable:        Function ptr to disable dumping.
 * @dump_request:        Function ptr to request a dump.
 * @get_indexes:         Function ptr to get extract and insert indexes of the
 *                       ring buffer.
 * @set_extract_index:   Function ptr to set extract index of ring buffer.
 * @get_gpu_cycle_count: Function ptr to get the GPU cycle count.
 */
struct kbase_hwcnt_backend_csf_if {
	struct kbase_hwcnt_backend_csf_if_ctx *ctx;
	kbase_hwcnt_backend_csf_if_assert_lock_held_fn *assert_lock_held;
	kbase_hwcnt_backend_csf_if_lock_fn *lock;
	kbase_hwcnt_backend_csf_if_unlock_fn *unlock;
	kbase_hwcnt_backend_csf_if_get_prfcnt_info_fn *get_prfcnt_info;
	kbase_hwcnt_backend_csf_if_ring_buf_alloc_fn *ring_buf_alloc;
	kbase_hwcnt_backend_csf_if_ring_buf_sync_fn *ring_buf_sync;
	kbase_hwcnt_backend_csf_if_ring_buf_free_fn *ring_buf_free;
	kbase_hwcnt_backend_csf_if_timestamp_ns_fn *timestamp_ns;
	kbase_hwcnt_backend_csf_if_dump_enable_fn *dump_enable;
	kbase_hwcnt_backend_csf_if_dump_disable_fn *dump_disable;
	kbase_hwcnt_backend_csf_if_dump_request_fn *dump_request;
	kbase_hwcnt_backend_csf_if_get_indexes_fn *get_indexes;
	kbase_hwcnt_backend_csf_if_set_extract_index_fn *set_extract_index;
	kbase_hwcnt_backend_csf_if_get_gpu_cycle_count_fn *get_gpu_cycle_count;
};

#endif /* #define _KBASE_HWCNT_BACKEND_CSF_IF_H_ */