xref: /netbsd-src/sys/arch/aarch64/aarch64/vectors.S (revision a87250117ad8929d62ddf579163d64e532dfc524)
1/*	$NetBSD: vectors.S,v 1.29 2022/06/26 11:14:36 jmcneill Exp $	*/
2
3#include <aarch64/asm.h>
4#include <aarch64/locore.h>
5
6#include "assym.h"
7
8#include "opt_compat_netbsd32.h"
9#include "opt_cpuoptions.h"
10#include "opt_ddb.h"
11#include "opt_dtrace.h"
12
13RCSID("$NetBSD: vectors.S,v 1.29 2022/06/26 11:14:36 jmcneill Exp $")
14
15	ARMV8_DEFINE_OPTIONS
16
17#ifdef KDTRACE_HOOKS
18/*
19 * dtrace needs to emulate  stp x29,x30,[sp,#-FRAMESIZE]!   where
20 * FRAMESIZE can be as large as 512, so create a 512-byte buffer
21 * between the interrupted code's frame and our struct trapframe.
22 */
23#define	TRAP_FRAMESIZE	(TF_SIZE + 512)
24#else
25#define	TRAP_FRAMESIZE	TF_SIZE
26#endif
27
28/*
29 * Template for the handler functions.
30 */
31.macro	vector_func, func, el, intr_p, label, tpidr
32	.align 7	/* cacheline-aligned */
33
34ENTRY_NBTI(\func)
35
36	.if \el == 1
37	/* need to allocate stack on el1 */
38	sub	sp, sp, #TRAP_FRAMESIZE
39	.endif
40
41	stp	x0, x1, [sp, #TF_X0]
42	stp	x2, x3, [sp, #TF_X2]
43	stp	x4, x5, [sp, #TF_X4]
44	stp	x6, x7, [sp, #TF_X6]
45	stp	x8, x9, [sp, #TF_X8]
46	stp	x10, x11, [sp, #TF_X10]
47	stp	x12, x13, [sp, #TF_X12]
48	stp	x14, x15, [sp, #TF_X14]
49	stp	x16, x17, [sp, #TF_X16]
50	str	x18, [sp, #TF_X18]
51	stp	x19, x20, [sp, #TF_X19]
52	stp	x21, x22, [sp, #TF_X21]
53	stp	x23, x24, [sp, #TF_X23]
54	stp	x25, x26, [sp, #TF_X25]
55	stp	x27, x28, [sp, #TF_X27]
56	stp	x29, x30, [sp, #TF_X29]
57
58	/* get sp and elr */
59	.if \el == 0
60	mrs	x20, sp_el0
61	.else
62	/* sp was already adjusted, so adjust x20 back */
63	add	x20, sp, #TRAP_FRAMESIZE
64	.endif
65	mrs	x21, elr_el1
66
67	/* store sp and elr */
68	.if TF_SP + 8 == TF_PC
69	stp	x20, x21, [sp, #TF_SP]
70	.else
71	str	x20, [sp, #TF_SP]
72	str	x21, [sp, #TF_PC]
73	.endif
74
75	mrs	x22, spsr_el1
76	str	x22, [sp, #TF_SPSR]
77
78	.if \intr_p == 1
79	mov	x23, #-1
80	mov	x24, xzr
81	.else
82	mrs	x23, esr_el1
83	mrs	x24, far_el1
84	.endif
85
86	.if TF_ESR + 8 == TF_FAR
87	stp	x23, x24, [sp, #TF_ESR]
88	.else
89	str	x23, [sp, #TF_ESR]
90	str	x24, [sp, #TF_FAR]
91	.endif
92
93	.if \el == 0
94	/* curlwp->l_private = tpidr{,ro}_el0 */
95	mrs	x1, tpidr_el1		/* x1 = curlwp */
96	mrs	x0, tpidr\tpidr\()_el0
97	str	x0, [x1, #L_PRIVATE]	/* curlwp->l_private = tpidr{,ro}_el0 */
98
99#ifdef ARMV83_PAC
100	/* Switch to the kern PAC key. */
101	adrl	x4, _C_LABEL(aarch64_pac_enabled)
102	ldr	w4, [x4]
103	cbz	w4, 1f
104	ldp	x5, x6, [x1, #L_MD_IA_KERN]
105	msr	APIAKeyLo_EL1, x5
106	msr	APIAKeyHi_EL1, x6
107	isb
1081:
109#endif
110	.endif
111
112	adr	x30, el\el\()_trap_exit	/* el[01]_trap_exit */
113	mov	x0, sp
114#ifdef DDB
115	mov	x29, sp			/* for backtrace */
116#endif
117	b	\label
118END(\func)
119.endm
120
121/*
122 * The vector_entry macro must be small enough to fit 0x80 bytes! We just jump
123 * into the proper function, so this constraint is always respected.
124 */
125.macro	vector_entry, func
126	.align 7	/* aligned 0x80 */
127	b	\func
128.endm
129
130/*
131 * The functions.
132 */
133vector_func	el1t_sync_handler,  1, 0, trap_el1t_sync
134vector_func	el1t_irq_handler,   1, 1, trap_el1t_irq
135vector_func	el1t_fiq_handler,   1, 1, trap_el1t_fiq
136vector_func	el1t_error_handler, 1, 0, trap_el1t_error
137
138vector_func	el1h_sync_handler,  1, 0, trap_el1h_sync
139vector_func	el1h_intr_handler,  1, 1, cpu_irq
140vector_func	el1h_fiq_handler,   1, 1, cpu_fiq
141vector_func	el1h_error_handler, 1, 0, trap_el1h_error
142
143vector_func	el0_sync_handler,  0, 0, trap_el0_sync
144vector_func	el0_intr_handler,  0, 1, cpu_irq
145vector_func	el0_fiq_handler,   0, 1, cpu_fiq
146vector_func	el0_error_handler, 0, 0, trap_el0_error
147
148vector_func	el0_32sync_handler,  0, 0, trap_el0_32sync, ro
149vector_func	el0_32intr_handler,  0, 1, cpu_irq, ro
150vector_func	el0_32fiq_handler,   0, 1, cpu_fiq, ro
151vector_func	el0_32error_handler, 0, 0, trap_el0_32error, ro
152
153/*
154 * The vector table. Must be aligned to 2048.
155 */
156	.align 11
157ENTRY_NBTI(el1_vectors)
158	/*
159	 * Exception taken from current Exception Level with SP_EL0.
160	 * (These shouldn't happen)
161	 */
162	vector_entry	el1t_sync_handler
163	vector_entry	el1t_irq_handler
164	vector_entry	el1t_fiq_handler
165	vector_entry	el1t_error_handler
166
167	/*
168	 * Exception taken from current Exception Level with SP_EL1.
169	 * There are entries for exceptions caused in EL1 (kernel exceptions).
170	 */
171	vector_entry	el1h_sync_handler
172	vector_entry	el1h_intr_handler
173	vector_entry	el1h_fiq_handler
174	vector_entry	el1h_error_handler
175
176	/*
177	 * Exception taken from lower Exception Level which is using AArch64.
178	 * There are entries for exceptions caused in EL0 (native user exceptions).
179	 */
180	vector_entry	el0_sync_handler
181	vector_entry	el0_intr_handler
182	vector_entry	el0_fiq_handler
183	vector_entry	el0_error_handler
184
185	/*
186	 * Exception taken from lower Exception Level which is using AArch32.
187	 * There are entries for exceptions caused in EL0 (compat user exceptions).
188	 */
189	vector_entry	el0_32sync_handler
190	vector_entry	el0_32intr_handler
191	vector_entry	el0_32fiq_handler
192	vector_entry	el0_32error_handler
193END(el1_vectors)
194
195	.macro unwind_x0_x2
196	ldp	x0, x1, [sp, #TF_X0]
197	ldr	x2, [sp, #TF_X2]
198	.endm
199
200	.macro unwind_x3_x30
201	ldp	x3, x4, [sp, #TF_X3]
202	ldp	x5, x6, [sp, #TF_X5]
203	ldp	x7, x8, [sp, #TF_X7]
204	ldp	x9, x10, [sp, #TF_X9]
205	ldp	x11, x12, [sp, #TF_X11]
206	ldp	x13, x14, [sp, #TF_X13]
207	ldp	x15, x16, [sp, #TF_X15]
208	ldp	x17, x18, [sp, #TF_X17]
209	ldp	x19, x20, [sp, #TF_X19]
210	ldp	x21, x22, [sp, #TF_X21]
211	ldp	x23, x24, [sp, #TF_X23]
212	ldp	x25, x26, [sp, #TF_X25]
213	ldp	x27, x28, [sp, #TF_X27]
214	ldp	x29, x30, [sp, #TF_X29]
215	.endm
216
217/*
218 * EL1 exception return for trap and interrupt.
219 */
220#ifdef DDB
221ENTRY_NP(el1_trap)
222	nop				/* dummy for DDB backtrace (for lr-4) */
223#endif
224ENTRY_NP(el1_trap_exit)
225	DISABLE_INTERRUPT		/* make sure I|F marked */
226
227	unwind_x3_x30
228
229#if TF_PC + 8 == TF_SPSR
230	ldp	x0, x1, [sp, #TF_PC]
231#else
232	ldr	x0, [sp, #TF_PC]
233	ldr	x1, [sp, #TF_SPSR]
234#endif
235	msr	elr_el1, x0		/* exception pc */
236	msr	spsr_el1, x1		/* exception pstate */
237
238	/*
239	 * cpu_jump_onfault() modify tf->tf_sp, therefore
240	 * we need to restore sp from trapframe,
241	 * and unwind x0-x2 without sp.
242	 */
243	mov	x0, sp
244	ldr	x1, [x0, #TF_SP]
245	mov	sp, x1
246	ldp	x1, x2, [x0, #TF_X1]
247	ldr	x0, [x0, #TF_X0]
248
249	ERET
250END(el1_trap_exit)
251#ifdef DDB
252END(el1_trap)
253#endif
254
255/*
256 * EL0 exception return for trap, interrupt and syscall with
257 * possible AST processing.
258 */
259#ifdef DDB
260ENTRY_NP(el0_trap)
261	nop				/* dummy for DDB backtrace (for lr-4) */
262#endif
263ENTRY_NP(el0_trap_exit)
264
265	adr	lr, 1f			/* return address from trap_doast */
2661:
267	/* while (curlwp->l_md.md_astpending != 0) { */
268	DISABLE_INTERRUPT		/* make sure I|F marked */
269	mrs	x9, tpidr_el1
270	ldr	w8, [x9, #L_MD_ASTPENDING]
271	cbz	w8, 9f
272
273	/* curlwp->l_md.md_astpending = 0; */
274	str	wzr, [x9, #L_MD_ASTPENDING]
275
276	/*  trap_doast(tf); */
277	ENABLE_INTERRUPT
278	mov	x0, sp
279	b	_C_LABEL(trap_doast)	/* tail call (return to 1b) */
280	/* } */
2819:
282
283	/* x9 is tpidr_el1 */
284	ldr	x23, [x9, #L_MD_CPACR]
285	msr	cpacr_el1, x23		/* FP unit EL0 handover */
286	isb				/* necessary? */
287
288	ldr	x0, [x9, #L_PRIVATE]	/* tpidr_el0 = curlwp->l_private */
289	msr	tpidr_el0, x0
290#ifdef COMPAT_NETBSD32
291	msr	tpidrro_el0, x0
292#endif
293
294#ifdef ARMV83_PAC
295	/* Switch to the user PAC key. */
296	adrl	x4, _C_LABEL(aarch64_pac_enabled)
297	ldr	w4, [x4]
298	cbz	w4, 1f
299	ldp	x5, x6, [x9, #L_MD_IA_USER]
300	msr	APIAKeyLo_EL1, x5
301	msr	APIAKeyHi_EL1, x6
302	isb
3031:
304#endif
305
306	unwind_x3_x30
307
308#if TF_PC + 8 == TF_SPSR
309	ldp	x0, x1, [sp, #TF_PC]
310#else
311	ldr	x0, [sp, #TF_PC]
312	ldr	x1, [sp, #TF_SPSR]
313#endif
314	ldr	x2, [sp, #TF_SP]
315	msr	elr_el1, x0		/* exception pc */
316	msr	spsr_el1, x1		/* exception pstate */
317	msr	sp_el0, x2		/* restore EL0 stack */
318
319	/* if the process is traced, enable MDSCR_EL1.SS */
320	tbz	x1, #SPSR_SS_SHIFT, 1f
321	mrs	x0, mdscr_el1
322	orr	x0, x0, #MDSCR_SS
323#ifdef DDB
324	bic	x0, x0, #MDSCR_KDE
325#endif
326	msr	mdscr_el1, x0
3271:
328	unwind_x0_x2
329
330	/* leave sp at l_md.md_utf, return back to EL0 user process */
331	ERET
332END(el0_trap_exit)
333#ifdef DDB
334END(el0_trap)
335#endif
336