summaryrefslogtreecommitdiff
path: root/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h
blob: 1cfc82e809ac2fd75e7ffe084876d6cd6d16b54f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
 *
 * Copyright (C) 2022 Google LLC
 */

#ifndef __GCIP_KCI_H__
#define __GCIP_KCI_H__

#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>

#include <gcip/gcip-mailbox.h>

/*
 * Command/response sequence numbers capped at half the range of the 64-bit value range. The second
 * half is reserved for incoming requests from firmware.
 * These are tagged with the MSB set.
 */
#define GCIP_KCI_REVERSE_FLAG (0x8000000000000000ull)

/* Command/response queue elements for KCI. */

struct gcip_kci_dma_descriptor {
	u64 address;
	u32 size;
	u32 flags;
};

struct gcip_kci_command_element {
	/*
	 * Set by gcip_kci_push_cmd() in case of KCI cmd and copied from the RKCI cmd in case of
	 * RKCI response.
	 */
	u64 seq;
	u16 code;
	u16 reserved[3]; /* Explicit padding, does not affect alignment. */
	struct gcip_kci_dma_descriptor dma;
} __packed;

struct gcip_kci_response_element {
	u64 seq;
	u16 code;
	/*
	 * Firmware can set some data according to the type of the response.
	 * TODO(b/279386960): as we don't manage the status of responses using this field anymore,
	 *                    rename this field to more reasonable name.
	 */
	u16 status;
	/*
	 * Return value is not currently needed by KCI command responses.
	 * For reverse KCI commands this is set as value2.
	 */
	u32 retval;
} __packed;

/*
 * Definition of code in command elements.
 * Code for KCI is a 16-bit unsigned integer.
 */
enum gcip_kci_code {
	GCIP_KCI_CODE_ACK = 0,
	GCIP_KCI_CODE_UNMAP_BUFFER = 1,
	GCIP_KCI_CODE_MAP_LOG_BUFFER = 2,
	GCIP_KCI_CODE_JOIN_GROUP = 3,
	GCIP_KCI_CODE_LEAVE_GROUP = 4,
	GCIP_KCI_CODE_MAP_TRACE_BUFFER = 5,
	GCIP_KCI_CODE_SHUTDOWN = 7,
	GCIP_KCI_CODE_GET_DEBUG_DUMP = 8,
	GCIP_KCI_CODE_OPEN_DEVICE = 9,
	GCIP_KCI_CODE_CLOSE_DEVICE = 10,
	GCIP_KCI_CODE_FIRMWARE_INFO = 11,
	/* TODO(b/271372136): remove v1 when v1 firmware no longer in use. */
	GCIP_KCI_CODE_GET_USAGE_V1 = 12,
	/* Backward compatible define, also update when v1 firmware no longer in use. */
	GCIP_KCI_CODE_GET_USAGE = 12,
	GCIP_KCI_CODE_NOTIFY_THROTTLING = 13,
	GCIP_KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
	GCIP_KCI_CODE_ALLOCATE_VMBOX = 15,
	GCIP_KCI_CODE_RELEASE_VMBOX = 16,
	GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17,
	GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18,
	GCIP_KCI_CODE_FIRMWARE_TRACING_LEVEL = 19,
	GCIP_KCI_CODE_THERMAL_CONTROL = 20,
	GCIP_KCI_CODE_GET_USAGE_V2 = 21,
	GCIP_KCI_CODE_SET_DEVICE_PROPERTIES = 22,

	GCIP_KCI_CODE_RKCI_ACK = 256,
};

/*
 * Definition of reverse KCI request code ranges.
 * Code for reverse KCI is a 16-bit unsigned integer.
 * The first half is reserved for the chip specific codes and the generic codes can use the
 * second half.
 */
enum gcip_reverse_kci_code {
	GCIP_RKCI_CHIP_CODE_FIRST = 0,
	GCIP_RKCI_PM_QOS_REQUEST,
	GCIP_RKCI_CHANGE_BTS_SCENARIO,
	GCIP_RKCI_PM_QOS_BTS_REQUEST,
	GCIP_RKCI_DSP_CORE_TELEMETRY_TRY_READ,
	GCIP_RKCI_CLIENT_FATAL_ERROR_NOTIFY,
	GCIP_RKCI_CHIP_CODE_LAST = 0x7FFF,
	GCIP_RKCI_GENERIC_CODE_FIRST = 0x8000,
	GCIP_RKCI_FIRMWARE_CRASH = GCIP_RKCI_GENERIC_CODE_FIRST + 0,
	GCIP_RKCI_JOB_LOCKUP = GCIP_RKCI_GENERIC_CODE_FIRST + 1,
	GCIP_RKCI_GENERIC_CODE_LAST = 0xFFFF,
};

/*
 * Definition of code in response elements.
 * It is a 16-bit unsigned integer.
 */
enum gcip_kci_error {
	GCIP_KCI_ERROR_OK = 0, /* Not an error; returned on success. */
	GCIP_KCI_ERROR_CANCELLED = 1,
	GCIP_KCI_ERROR_UNKNOWN = 2,
	GCIP_KCI_ERROR_INVALID_ARGUMENT = 3,
	GCIP_KCI_ERROR_DEADLINE_EXCEEDED = 4,
	GCIP_KCI_ERROR_NOT_FOUND = 5,
	GCIP_KCI_ERROR_ALREADY_EXISTS = 6,
	GCIP_KCI_ERROR_PERMISSION_DENIED = 7,
	GCIP_KCI_ERROR_RESOURCE_EXHAUSTED = 8,
	GCIP_KCI_ERROR_FAILED_PRECONDITION = 9,
	GCIP_KCI_ERROR_ABORTED = 10,
	GCIP_KCI_ERROR_OUT_OF_RANGE = 11,
	GCIP_KCI_ERROR_UNIMPLEMENTED = 12,
	GCIP_KCI_ERROR_INTERNAL = 13,
	GCIP_KCI_ERROR_UNAVAILABLE = 14,
	GCIP_KCI_ERROR_DATA_LOSS = 15,
	GCIP_KCI_ERROR_UNAUTHENTICATED = 16,
};

/* Type of the chip of the offload vmbox to be linked. */
enum gcip_kci_offload_chip_type {
	GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU = 0,
};

/*
 * Reason for triggering the CMD doorbell.
 * The CMD doorbell is triggered either when a CMD is pushed or the RESP that might blocks the FW is
 * consumed.
 */
enum gcip_kci_doorbell_reason {
	GCIP_KCI_PUSH_CMD,
	GCIP_KCI_CONSUME_RESP,
};

/* Struct to hold a circular buffer for incoming KCI responses. */
struct gcip_reverse_kci {
	/* Reverse kci buffer head. */
	unsigned long head;
	/* Reverse kci buffer tail. */
	unsigned long tail;
	/*
	 * Maximum number of outstanding KCI requests from firmware.
	 * This is used to size a circular buffer, so it must be a power of 2.
	 */
	u32 buffer_size;
	struct gcip_kci_response_element *buffer;
	/* Lock to push elements in the buffer from the interrupt handler. */
	spinlock_t producer_lock;
	/* Lock to pop elements from the buffer in the worker. */
	spinlock_t consumer_lock;
	/* Worker to handle responses. */
	struct work_struct work;
};

struct gcip_kci;

/*
 * KCI operators.
 * For in_interrupt() context, see the implementation of gcip_kci_handle_irq for details.
 */
struct gcip_kci_ops {
	/* Mandatory. */
	/*
	 * Gets the head of mailbox command queue.
	 * Context: normal.
	 */
	u32 (*get_cmd_queue_head)(struct gcip_kci *kci);
	/*
	 * Gets the tail of mailbox command queue.
	 * Context: normal.
	 */
	u32 (*get_cmd_queue_tail)(struct gcip_kci *kci);
	/*
	 * Increases the tail of mailbox command queue by @inc.
	 * Context: normal.
	 */
	void (*inc_cmd_queue_tail)(struct gcip_kci *kci, u32 inc);

	/*
	 * Gets the size of mailbox response queue.
	 * Context: normal.
	 */
	u32 (*get_resp_queue_size)(struct gcip_kci *kci);
	/*
	 * Gets the head of mailbox response queue.
	 * Context: normal and in_interrupt().
	 */
	u32 (*get_resp_queue_head)(struct gcip_kci *kci);
	/*
	 * Gets the tail of mailbox response queue.
	 * Context: normal and in_interrupt().
	 */
	u32 (*get_resp_queue_tail)(struct gcip_kci *kci);
	/*
	 * Increases the head of mailbox response queue by @inc.
	 * Context: normal and in_interrupt().
	 */
	void (*inc_resp_queue_head)(struct gcip_kci *kci, u32 inc);
	/*
	 * Rings the doorbell.
	 * Context: normal.
	 */
	void (*trigger_doorbell)(struct gcip_kci *kci, enum gcip_kci_doorbell_reason);

	/* Optional. */
	/*
	 * Reverse KCI handler called by the worker. Only required if reverse kci is enabled.
	 * Context: normal.
	 */
	void (*reverse_kci_handle_response)(struct gcip_kci *kci,
					    struct gcip_kci_response_element *resp);
	/*
	 * Usage updater called by the worker.
	 * Context: normal.
	 */
	int (*update_usage)(struct gcip_kci *kci);
};

struct gcip_kci {
	/* Device used for logging and memory allocation. */
	struct device *dev;
	/* Mailbox used by KCI. */
	struct gcip_mailbox mailbox;
	/* Protects cmd_queue. */
	struct mutex cmd_queue_lock;
	/* Protects resp_queue. */
	spinlock_t resp_queue_lock;
	/* Queue for waiting for the response doorbell to be rung. */
	wait_queue_head_t resp_doorbell_waitq;
	/* Protects wait_list. */
	spinlock_t wait_list_lock;
	/* Worker of consuming responses. */
	struct work_struct work;
	/* Handler for reverse (firmware -> kernel) requests. */
	struct gcip_reverse_kci rkci;
	/* Worker that sends update usage KCI. */
	struct work_struct usage_work;
	/* KCI operators. */
	const struct gcip_kci_ops *ops;
	/* Private data. */
	void *data;
};

/*
 * Arguments for gcip_kci_init.
 *
 * For the following arguments, see struct gcip_kci and struct gcip_reverse_kci for details.
 * : `dev`, `rkci_buffer_size`, `ops` and `data`.
 *
 * For the following arguments, see struct gcip_mailbox for details. They will be passed to the
 * struct gcip_mailbox using struct gcip_mailbox_args internally.
 * : `dev`, `cmd_queue`, `resp_queue`, `queue_wrap_bit` and `timeout`.
 */
struct gcip_kci_args {
	struct device *dev;
	void *cmd_queue;
	void *resp_queue;
	u32 queue_wrap_bit;
	u32 rkci_buffer_size;
	u32 timeout;
	const struct gcip_kci_ops *ops;
	void *data;
};

/* Initializes a KCI object. */
int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);

/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
void gcip_kci_cancel_work_queues(struct gcip_kci *kci);

/*
 * Release KCI.
 * Caller must call gcip_kci_cancel_work_queues before calling gcip_kci_release.
 */
void gcip_kci_release(struct gcip_kci *kci);

/*
 * Pushes an element to cmd queue and waits for the response.
 * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout.
 *
 * Returns the code of response, or a negative errno on error.
 */
int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd);

/*
 * Pushes an element to cmd queue and waits for the response.
 * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
 *
 * Returns the code of response, or a negative errno on error.
 * @resp is updated with the response, as to retrieve returned retval field.
 */
int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
				  struct gcip_kci_response_element *resp);

/*
 * Interrupt handler.
 * This function should be called when the interrupt of KCI mailbox is fired.
 */
void gcip_kci_handle_irq(struct gcip_kci *kci);

/*
 * Schedules a usage update worker.
 *
 * For functions that don't require the usage to be updated immediately, use this function instead
 * of update_usage in struct gcip_kci_ops.
 */
void gcip_kci_update_usage_async(struct gcip_kci *kci);

/* Gets the KCI private data. */
static inline void *gcip_kci_get_data(struct gcip_kci *kci)
{
	return kci->data;
}

/* Returns the element size according to @type. */
static inline u32 gcip_kci_queue_element_size(enum gcip_mailbox_queue_type type)
{
	if (type == GCIP_MAILBOX_CMD_QUEUE)
		return sizeof(struct gcip_kci_command_element);
	else
		return sizeof(struct gcip_kci_response_element);
}

static inline u64 gcip_kci_get_cur_seq(struct gcip_kci *kci)
{
	return gcip_mailbox_get_cur_seq(&kci->mailbox);
}

static inline struct gcip_kci_command_element *gcip_kci_get_cmd_queue(struct gcip_kci *kci)
{
	return (struct gcip_kci_command_element *)gcip_mailbox_get_cmd_queue(&kci->mailbox);
}

static inline struct gcip_kci_response_element *gcip_kci_get_resp_queue(struct gcip_kci *kci)
{
	return (struct gcip_kci_response_element *)gcip_mailbox_get_resp_queue(&kci->mailbox);
}

static inline u64 gcip_kci_get_queue_wrap_bit(struct gcip_kci *kci)
{
	return gcip_mailbox_get_queue_wrap_bit(&kci->mailbox);
}

static inline struct list_head *gcip_kci_get_wait_list(struct gcip_kci *kci)
{
	return gcip_mailbox_get_wait_list(&kci->mailbox);
}

static inline u32 gcip_kci_get_timeout(struct gcip_kci *kci)
{
	return gcip_mailbox_get_timeout(&kci->mailbox);
}

static inline unsigned long gcip_rkci_get_head(struct gcip_kci *kci)
{
	return kci->rkci.head;
}

static inline unsigned long gcip_rkci_get_tail(struct gcip_kci *kci)
{
	return kci->rkci.tail;
}

#endif /* __GCIP_KCI_H__ */