xref: /netbsd-src/sys/arch/aarch64/aarch64/locore.S (revision b83ebeba7f767758d2778bb0f9d7a76534253621)
1/* $NetBSD: locore.S,v 1.1 2014/08/10 05:47:37 matt Exp $ */
2
3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <aarch64/asm.h>
33#include "assym.h"
34
35#include "opt_ddb.h"
36
37RCSID("$NetBSD: locore.S,v 1.1 2014/08/10 05:47:37 matt Exp $")
38
39/*
40 * At IPL_SCHED:
41 *	x0 = oldlwp (maybe be NULL)
42 *	x1 = newlwp
43 *	x2 = returning
44 * returns x0-x2 unchanged
45 */
46ENTRY_NP(cpu_switchto)
47	cbz	x0, .Lrestore_lwp
48
49	/*
50	 * Store the callee saved register on the stack in a trapframe
51	 */
52	sub	sp, sp, #TF_SIZE
53	stp	x19, x20, [sp, #TF_X19]
54	stp	x21, x22, [sp, #TF_X21]
55	stp	x23, x24, [sp, #TF_X23]
56	stp	x25, x26, [sp, #TF_X25]
57	stp	x27, x28, [sp, #TF_X27]
58	stp	x29, x30, [sp, #TF_X29]
59
60	/*
61	 * Get the previous trapframe pointer and the user writeable Thread ID
62	 * register and save them in the trap frame.
63	 */
64	ldr	x5, [x0, #L_MD_KTF]
65	mrs	x4, tpidr_el0
66#if TF_TPIDR + 8 == TF_CHAIN
67	str	x4, x5, [sp, #TF_TPIDR]
68#else
69	str	x4, [sp, #TF_TPIDR]
70	str	x5, [sp, #TF_CHAIN]
71#endif
72
73	/*
74	 * Get the current stack pointer and the CPACR and save them in
75	 * old lwp md area.
76	 */
77	mov	x4, sp
78	mrs	x5, cpacr_el1
79#if L_MD_KTF + 8 == L_MD_CPACR
80	stp	x4, x5, [x0, #L_MD_KTF]
81#else
82	str	x4, [x0, #L_MD_KTF]
83	str	x5, [x0, #L_MD_CPACR]
84#endif
85
86	/* We are done with the old lwp */
87
88.Lrestore_lwp:
89#if L_MD_KTF + 8 == L_MD_CPACR
90	ldp	x4, x5, [x1, #L_MD_KTF]	// get trapframe ptr and cpacr_el1
91#else
92	ldr	x4, [x0, #L_MD_KTF]	// get trapframe ptr (aka SP)
93	ldr	x5, [x0, #L_MD_CPACR]	// get cpacr_el1
94#endif
95	mov	sp, x4			// restore stack pointer
96	msr	cpacr_el1, x5		// restore cpacr_el1
97
98	ldr	x4, [sp, #TF_TPIDR]	// load user writeable thread ip reg
99	msr	tpidr_el0, x4		// restore it
100
101	mrs	x3, tpidr_el1		// get curcpu
102	str	x1, [x3, #CI_CURLWP]	// show as curlwp
103
104	/*
105	 * Restore callee save registers
106	 */
107	ldp	x19, x20, [sp, #TF_X19]
108	ldp	x21, x22, [sp, #TF_X21]
109	ldp	x23, x24, [sp, #TF_X23]
110	ldp	x25, x26, [sp, #TF_X25]
111	ldp	x27, x28, [sp, #TF_X27]
112	ldp	x29, x30, [sp, #TF_X29]
113	add	sp, sp, #TF_SIZE	/* pop trapframe from stack */
114
115	ret
116END(cpu_switchto)
117
118/*
119 * Called at IPL_SCHED
120 *	x0 = old lwp (from cpu_switchto)
121 *	x1 = new lwp (from cpu_switchto)
122 *	x27 = func
123 *	x28 = arg
124 */
125ENTRY_NP(lwp_trampoline)
126#if defined(MULTIPROCESSOR)
127	mov	x19, x0
128	mov	x20, x1
129	bl	_C_LABEL(proc_trampoline_mp)
130	mov	x1, x20
131	mov	x0, x19
132#endif
133	bl	_C_LABEL(lwp_startup)
134
135	/*
136	 * If the function returns, have it return to the exception trap return
137	 * handler which will restore all user state before returning to EL0.
138	 */
139	adr	x30, exception_trap_exit	// set function return address
140	mov	x0, x28				// mov arg into place
141	br	x27				// call function with arg
142END(lwp_trampoline)
143
144/*
145 * Return from exception.  There's a trap return, an intr return, and
146 * a syscall return.
147 */
148ENTRY_NP(exception_trap_exit)
149	ldp	x0, x1, [sp, #TF_X0]
150	ldp	x2, x3, [sp, #TF_X2]
151	ldp	x4, x5, [sp, #TF_X4]
152	ldp	x6, x7, [sp, #TF_X6]
153	ldp	x8, x9, [sp, #TF_X8]
154	ldp	x10, x11, [sp, #TF_X10]
155	ldp	x12, x13, [sp, #TF_X12]
156	ldp	x14, x15, [sp, #TF_X14]
157exception_syscall_exit:
158	ldp	x16, x17, [sp, #TF_X16]
159	ldr	x18, [sp, #TF_X18]
160
161#if TF_SP + 8 == TF_PC
162	ldp	x20, x21, [sp, #TF_SP]
163#else
164	ldr	x20, [sp, #TF_SP]
165	ldr	x21, [sp, #TF_PC]
166#endif
167	ldr	x22, [sp, #TF_SPSR]
168	msr	sp_el0, x20
169	msr	elr_el1, x21
170	msr	spsr_el1, x22
171
172	ldp	x19, x20, [sp, #TF_X19]
173	ldp	x21, x22, [sp, #TF_X21]
174	ldp	x23, x24, [sp, #TF_X23]
175	ldp	x25, x26, [sp, #TF_X25]
176	ldp	x27, x28, [sp, #TF_X27]
177	ldp	x29, x30, [sp, #TF_X29]
178
179	/*
180	 * Don't adjust the stack for the trapframe since we would
181	 * just add subtract it again upon exception entry.
182	 */
183	eret
184END(exception_trap_exit)
185
186#ifdef DDB
187ENTRY(cpu_Debugger)
188	brk	#0xffff
189	ret
190END(cpu_Debugger)
191#endif /* DDB */
192
193ENTRY(setjmp)
194	stp	x19, x20, [x0, #0]
195	stp	x21, x22, [x0, #16]
196	stp	x23, x24, [x0, #32]
197	stp	x25, x26, [x0, #48]
198	stp	x27, x28, [x0, #64]
199	stp	x29, x30, [x0, #80]
200	mov	x1, sp
201	str	x1, [x0, #96]
202	mov	x0, #0
203	ret
204END(setjmp)
205
206ENTRY(longjmp)
207	ldp	x19, x20, [x0, #0]
208	ldp	x21, x22, [x0, #16]
209	ldp	x23, x24, [x0, #32]
210	ldp	x25, x26, [x0, #48]
211	ldp	x27, x28, [x0, #64]
212	ldp	x29, x30, [x0, #80]
213	ldr	x1, [x0, #96]
214	mov	sp, x1
215	mov	x0, #1
216	ret
217END(longjmp)
218