aboutsummaryrefslogtreecommitdiff
path: root/include/lib/el3_runtime/cpu_data.h
blob: 2c7b619670e0a536681d221e8bd0268d5fc433de (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
/*
 * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#ifndef CPU_DATA_H
#define CPU_DATA_H

#include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */

#include <bl31/ehf.h>

/* Size of psci_cpu_data structure */
#define PSCI_CPU_DATA_SIZE		12

#ifdef __aarch64__

/* 8-bytes aligned size of psci_cpu_data structure */
#define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)

#if ENABLE_RME
/* Size of cpu_context array */
#define CPU_DATA_CONTEXT_NUM		3
/* Offset of cpu_ops_ptr, size 8 bytes */
#define CPU_DATA_CPU_OPS_PTR		0x18
#else /* ENABLE_RME */
#define CPU_DATA_CONTEXT_NUM		2
#define CPU_DATA_CPU_OPS_PTR		0x10
#endif /* ENABLE_RME */

#if ENABLE_PAUTH
/* 8-bytes aligned offset of apiakey[2], size 16 bytes */
#define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
					     + CPU_DATA_CPU_OPS_PTR)
#define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
#else /* ENABLE_PAUTH */
#define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
					     + CPU_DATA_CPU_OPS_PTR)
#endif /* ENABLE_PAUTH */

/* need enough space in crash buffer to save 8 registers */
#define CPU_DATA_CRASH_BUF_SIZE		64

#else	/* !__aarch64__ */

#if CRASH_REPORTING
#error "Crash reporting is not supported in AArch32"
#endif
#define CPU_DATA_CPU_OPS_PTR		0x0
#define CPU_DATA_CRASH_BUF_OFFSET	(0x4 + PSCI_CPU_DATA_SIZE)

#endif	/* __aarch64__ */

#if CRASH_REPORTING
#define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
						CPU_DATA_CRASH_BUF_SIZE)
#else
#define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
#endif

/* cpu_data size is the data size rounded up to the platform cache line size */
#define CPU_DATA_SIZE			(((CPU_DATA_CRASH_BUF_END + \
					CACHE_WRITEBACK_GRANULE - 1) / \
						CACHE_WRITEBACK_GRANULE) * \
							CACHE_WRITEBACK_GRANULE)

#if ENABLE_RUNTIME_INSTRUMENTATION
/* Temporary space to store PMF timestamps from assembly code */
#define CPU_DATA_PMF_TS_COUNT		1
#define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_CRASH_BUF_END
#define CPU_DATA_PMF_TS0_IDX		0
#endif

#ifndef __ASSEMBLER__

#include <assert.h>
#include <stdint.h>

#include <arch_helpers.h>
#include <lib/cassert.h>
#include <lib/psci/psci.h>

#include <platform_def.h>

/* Offsets for the cpu_data structure */
#define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)

#if PLAT_PCPU_DATA_SIZE
#define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
		(cpu_data_t, platform_cpu_data)
#endif

typedef enum context_pas {
	CPU_CONTEXT_SECURE = 0,
	CPU_CONTEXT_NS,
#if ENABLE_RME
	CPU_CONTEXT_REALM,
#endif
	CPU_CONTEXT_NUM
} context_pas_t;

/*******************************************************************************
 * Function & variable prototypes
 ******************************************************************************/

/*******************************************************************************
 * Cache of frequently used per-cpu data:
 *   Pointers to non-secure, realm, and secure security state contexts
 *   Address of the crash stack
 * It is aligned to the cache line boundary to allow efficient concurrent
 * manipulation of these pointers on different cpus
 *
 * The data structure and the _cpu_data accessors should not be used directly
 * by components that have per-cpu members. The member access macros should be
 * used for this.
 ******************************************************************************/
typedef struct cpu_data {
#ifdef __aarch64__
	void *cpu_context[CPU_DATA_CONTEXT_NUM];
#endif /* __aarch64__ */
	uintptr_t cpu_ops_ptr;
	struct psci_cpu_data psci_svc_cpu_data;
#if ENABLE_PAUTH
	uint64_t apiakey[2];
#endif
#if CRASH_REPORTING
	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
#endif
#if ENABLE_RUNTIME_INSTRUMENTATION
	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
#endif
#if PLAT_PCPU_DATA_SIZE
	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
#endif
#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
	pe_exc_data_t ehf_data;
#endif
} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;

extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];

#ifdef __aarch64__
CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
		assert_cpu_data_context_num_mismatch);
#endif

#if ENABLE_PAUTH
CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
	(cpu_data_t, apiakey),
	assert_cpu_data_pauth_stack_offset_mismatch);
#endif

#if CRASH_REPORTING
/* verify assembler offsets match data structures */
CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
	(cpu_data_t, crash_buf),
	assert_cpu_data_crash_stack_offset_mismatch);
#endif

CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
		assert_cpu_data_size_mismatch);

CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
		(cpu_data_t, cpu_ops_ptr),
		assert_cpu_data_cpu_ops_ptr_offset_mismatch);

#if ENABLE_RUNTIME_INSTRUMENTATION
CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
		(cpu_data_t, cpu_data_pmf_ts[0]),
		assert_cpu_data_pmf_ts0_offset_mismatch);
#endif

struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);

#ifdef __aarch64__
/* Return the cpu_data structure for the current CPU. */
static inline struct cpu_data *_cpu_data(void)
{
	return (cpu_data_t *)read_tpidr_el3();
}
#else
struct cpu_data *_cpu_data(void);
#endif

/*
 * Returns the index of the cpu_context array for the given security state.
 * All accesses to cpu_context should be through this helper to make sure
 * an access is not out-of-bounds. The function assumes security_state is
 * valid.
 */
static inline context_pas_t get_cpu_context_index(uint32_t security_state)
{
	if (security_state == SECURE) {
		return CPU_CONTEXT_SECURE;
	} else {
#if ENABLE_RME
		if (security_state == NON_SECURE) {
			return CPU_CONTEXT_NS;
		} else {
			assert(security_state == REALM);
			return CPU_CONTEXT_REALM;
		}
#else
		assert(security_state == NON_SECURE);
		return CPU_CONTEXT_NS;
#endif
	}
}

/**************************************************************************
 * APIs for initialising and accessing per-cpu data
 *************************************************************************/

void init_cpu_data_ptr(void);
void init_cpu_ops(void);

#define get_cpu_data(_m)		   _cpu_data()->_m
#define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
#define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
#define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
						&(_cpu_data()->_m), \
						sizeof(((cpu_data_t *)0)->_m))
#define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
						&(_cpu_data()->_m), \
						sizeof(((cpu_data_t *)0)->_m))
#define flush_cpu_data_by_index(_ix, _m)	\
				   flush_dcache_range((uintptr_t)	  \
					 &(_cpu_data_by_index(_ix)->_m),  \
						sizeof(((cpu_data_t *)0)->_m))


#endif /* __ASSEMBLER__ */
#endif /* CPU_DATA_H */