xref: /netbsd-src/sys/arch/arm/arm32/cpuswitch.S (revision ca865cda721a89798a19b34de0e3977218f63145)
1*ca865cdaSriastradh/*	$NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $	*/
227f96e84Schris
327f96e84Schris/*
441a1932eSscw * Copyright 2003 Wasabi Systems, Inc.
541a1932eSscw * All rights reserved.
641a1932eSscw *
741a1932eSscw * Written by Steve C. Woodford for Wasabi Systems, Inc.
841a1932eSscw *
941a1932eSscw * Redistribution and use in source and binary forms, with or without
1041a1932eSscw * modification, are permitted provided that the following conditions
1141a1932eSscw * are met:
1241a1932eSscw * 1. Redistributions of source code must retain the above copyright
1341a1932eSscw *    notice, this list of conditions and the following disclaimer.
1441a1932eSscw * 2. Redistributions in binary form must reproduce the above copyright
1541a1932eSscw *    notice, this list of conditions and the following disclaimer in the
1641a1932eSscw *    documentation and/or other materials provided with the distribution.
1741a1932eSscw * 3. All advertising materials mentioning features or use of this software
1841a1932eSscw *    must display the following acknowledgement:
1941a1932eSscw *      This product includes software developed for the NetBSD Project by
2041a1932eSscw *      Wasabi Systems, Inc.
2141a1932eSscw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
2241a1932eSscw *    or promote products derived from this software without specific prior
2341a1932eSscw *    written permission.
2441a1932eSscw *
2541a1932eSscw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
2641a1932eSscw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2741a1932eSscw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2841a1932eSscw * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
2941a1932eSscw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
3041a1932eSscw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3141a1932eSscw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3241a1932eSscw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3341a1932eSscw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3441a1932eSscw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3541a1932eSscw * POSSIBILITY OF SUCH DAMAGE.
3641a1932eSscw */
3741a1932eSscw/*
3827f96e84Schris * Copyright (c) 1994-1998 Mark Brinicombe.
3927f96e84Schris * Copyright (c) 1994 Brini.
4027f96e84Schris * All rights reserved.
4127f96e84Schris *
4227f96e84Schris * This code is derived from software written for Brini by Mark Brinicombe
4327f96e84Schris *
4427f96e84Schris * Redistribution and use in source and binary forms, with or without
4527f96e84Schris * modification, are permitted provided that the following conditions
4627f96e84Schris * are met:
4727f96e84Schris * 1. Redistributions of source code must retain the above copyright
4827f96e84Schris *    notice, this list of conditions and the following disclaimer.
4927f96e84Schris * 2. Redistributions in binary form must reproduce the above copyright
5027f96e84Schris *    notice, this list of conditions and the following disclaimer in the
5127f96e84Schris *    documentation and/or other materials provided with the distribution.
5227f96e84Schris * 3. All advertising materials mentioning features or use of this software
5327f96e84Schris *    must display the following acknowledgement:
5427f96e84Schris *	This product includes software developed by Brini.
5527f96e84Schris * 4. The name of the company nor the name of the author may be used to
5627f96e84Schris *    endorse or promote products derived from this software without specific
5727f96e84Schris *    prior written permission.
5827f96e84Schris *
5927f96e84Schris * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
6027f96e84Schris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
6127f96e84Schris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
6227f96e84Schris * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
6327f96e84Schris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6427f96e84Schris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
6527f96e84Schris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
6627f96e84Schris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
6727f96e84Schris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
6827f96e84Schris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
6927f96e84Schris * SUCH DAMAGE.
7027f96e84Schris *
7127f96e84Schris * RiscBSD kernel project
7227f96e84Schris *
7327f96e84Schris * cpuswitch.S
7427f96e84Schris *
7527f96e84Schris * cpu switching functions
7627f96e84Schris *
7727f96e84Schris * Created      : 15/10/94
7827f96e84Schris */
7927f96e84Schris
8027f96e84Schris#include "opt_armfpe.h"
81825088edSmatt#include "opt_cpuoptions.h"
82d329adb0Sskrll#include "opt_kasan.h"
83d505b189Smartin#include "opt_lockdebug.h"
84467df8a7Sskrll#include "opt_multiprocessor.h"
8527f96e84Schris
8627f96e84Schris#include "assym.h"
87e3737c67Smatt#include <arm/asm.h>
88e3737c67Smatt#include <arm/locore.h>
89825088edSmatt
90*ca865cdaSriastradh	RCSID("$NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $")
9127f96e84Schris
9228f5335aSkristerw/* LINTSTUB: include <sys/param.h> */
9328f5335aSkristerw
94b45d78c7Sjoerg#ifdef FPU_VFP
95b45d78c7Sjoerg	.fpu vfpv2
96b45d78c7Sjoerg#endif
97b45d78c7Sjoerg
9827f96e84Schris#undef IRQdisable
9927f96e84Schris#undef IRQenable
10027f96e84Schris
10127f96e84Schris/*
10227f96e84Schris * New experimental definitions of IRQdisable and IRQenable
10327f96e84Schris * These keep FIQ's enabled since FIQ's are special.
10427f96e84Schris */
10527f96e84Schris
106825088edSmatt#ifdef _ARM_ARCH_6
107825088edSmatt#define	IRQdisable	cpsid	i
108825088edSmatt#define	IRQenable	cpsie	i
109825088edSmatt#else
11027f96e84Schris#define IRQdisable \
111ad733493Sthorpej	mrs	r14, cpsr ; \
11227f96e84Schris	orr	r14, r14, #(I32_bit) ; \
113825088edSmatt	msr	cpsr_c, r14
11427f96e84Schris
11527f96e84Schris#define IRQenable \
116ad733493Sthorpej	mrs	r14, cpsr ; \
11727f96e84Schris	bic	r14, r14, #(I32_bit) ; \
118825088edSmatt	msr	cpsr_c, r14
11927f96e84Schris
120a7385c57Sbjh21#endif
12127f96e84Schris
12227f96e84Schris	.text
12341a1932eSscw
12427f96e84Schris/*
125f0301095Syamt * struct lwp *
126f0301095Syamt * cpu_switchto(struct lwp *current, struct lwp *next)
12771c04700Sskrll *
128f0301095Syamt * Switch to the specified next LWP
12923bc2503Sthorpej * Arguments:
130f0301095Syamt *
131c88f2c1aSskrll *	r0	'struct lwp *' of the current LWP
132f0301095Syamt *	r1	'struct lwp *' of the LWP to switch to
133825088edSmatt *	r2	returning
13427f96e84Schris */
135f0301095SyamtENTRY(cpu_switchto)
136cf47b9b0Sskrll	mov	ip, sp
137ab152917Smatt	push	{r4-r7, ip, lr}
13827f96e84Schris
1394bdf3741Sdholland	/* move lwps into callee saved registers */
1401c0a9343Schris	mov	r6, r1
1411c0a9343Schris	mov	r4, r0
142825088edSmatt
14347befd3bSmatt#ifdef TPIDRPRW_IS_CURCPU
144236e90d4Smatt	GET_CURCPU(r5)
14542df8eceSmatt#else
146236e90d4Smatt	ldr	r5, [r6, #L_CPU]		/* get cpu from new lwp */
1470636d208Smatt#endif
14827f96e84Schris
149f0301095Syamt	/* rem: r4 = old lwp */
150236e90d4Smatt	/* rem: r5 = curcpu() */
151b053add4Sskrll	/* rem: r6 = new lwp */
152165b0233Schris	/* rem: interrupts are enabled */
15327f96e84Schris
15471c04700Sskrll	/* Save old context */
15527f96e84Schris
15623bc2503Sthorpej	/* Get the user structure for the old lwp. */
157236e90d4Smatt	ldr	r7, [r4, #(L_PCB)]
15827f96e84Schris
15923bc2503Sthorpej	/* Save all the registers in the old lwp's pcb */
160fc5bdb58Smatt#if defined(_ARM_ARCH_DWORD_OK)
161236e90d4Smatt	strd	r8, r9, [r7, #(PCB_R8)]
162236e90d4Smatt	strd	r10, r11, [r7, #(PCB_R10)]
163236e90d4Smatt	strd	r12, r13, [r7, #(PCB_R12)]
164825088edSmatt#else
165236e90d4Smatt	add	r0, r7, #(PCB_R8)
166825088edSmatt	stmia	r0, {r8-r13}
16763d24b09Sscw#endif
16827f96e84Schris
169825088edSmatt#ifdef _ARM_ARCH_6
170825088edSmatt	/*
171825088edSmatt	 * Save user read/write thread/process id register
172825088edSmatt	 */
173825088edSmatt	mrc	p15, 0, r0, c13, c0, 2
174236e90d4Smatt	str	r0, [r7, #(PCB_USER_PID_RW)]
175825088edSmatt#endif
17627f96e84Schris	/*
17723bc2503Sthorpej	 * NOTE: We can now use r8-r13 until it is time to restore
17823bc2503Sthorpej	 * them for the new process.
17923bc2503Sthorpej	 */
18023bc2503Sthorpej
18171c04700Sskrll	/* Restore saved context */
18227f96e84Schris
18342df8eceSmatt	/* rem: r4 = old lwp */
18442df8eceSmatt	/* rem: r5 = curcpu() */
18542df8eceSmatt	/* rem: r6 = new lwp */
18642df8eceSmatt
18742df8eceSmatt	IRQdisable
18842df8eceSmatt#if defined(TPIDRPRW_IS_CURLWP)
18942df8eceSmatt	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
19042df8eceSmatt#endif
19142df8eceSmatt
1928c02e9fbSriastradh	/*
1938c02e9fbSriastradh	 * Issue barriers to coordinate mutex_exit on this CPU with
1948c02e9fbSriastradh	 * mutex_vector_enter on another CPU.
1958c02e9fbSriastradh	 *
1968c02e9fbSriastradh	 * 1. Any prior mutex_exit by oldlwp must be visible to other
1978c02e9fbSriastradh	 *    CPUs before we set ci_curlwp := newlwp on this one,
1988c02e9fbSriastradh	 *    requiring a store-before-store barrier.
1998c02e9fbSriastradh	 *
2008c02e9fbSriastradh	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
2018c02e9fbSriastradh	 *    before any subsequent mutex_exit by newlwp can even test
2028c02e9fbSriastradh	 *    whether there might be waiters, requiring a
2038c02e9fbSriastradh	 *    store-before-load barrier.
2048c02e9fbSriastradh	 *
2058c02e9fbSriastradh	 * See kern_mutex.c for details -- this is necessary for
2068c02e9fbSriastradh	 * adaptive mutexes to detect whether the lwp is on the CPU in
2078c02e9fbSriastradh	 * order to safely block without requiring atomic r/m/w in
2088c02e9fbSriastradh	 * mutex_exit.
2098c02e9fbSriastradh	 */
210317fb4ddSskrll
2118c02e9fbSriastradh	/* We have a new curlwp now so make a note of it */
212317fb4ddSskrll#ifdef _ARM_ARCH_7
2138c02e9fbSriastradh	dmb				/* store-before-store */
2148c02e9fbSriastradh#endif
2158c02e9fbSriastradh	str	r6, [r5, #(CI_CURLWP)]
2168c02e9fbSriastradh#ifdef _ARM_ARCH_7
2178c02e9fbSriastradh	dmb				/* store-before-load */
218317fb4ddSskrll#endif
219ee74073bSskrll
220236e90d4Smatt	/* Get the new pcb */
221236e90d4Smatt	ldr	r7, [r6, #(L_PCB)]
222236e90d4Smatt
22342df8eceSmatt	/* make sure we are using the new lwp's stack */
22442df8eceSmatt	ldr	sp, [r7, #(PCB_KSP)]
22542df8eceSmatt
22642df8eceSmatt	/* At this point we can allow IRQ's again. */
22742df8eceSmatt	IRQenable
22842df8eceSmatt
229f0301095Syamt	/* rem: r4 = old lwp */
230236e90d4Smatt	/* rem: r5 = curcpu() */
23123bc2503Sthorpej	/* rem: r6 = new lwp */
2321c0a9343Schris	/* rem: r7 = new pcb */
233d21493adSchris	/* rem: interrupts are enabled */
23423bc2503Sthorpej
235236e90d4Smatt	/*
236236e90d4Smatt	 * If we are switching to a system lwp, don't bother restoring
237236e90d4Smatt	 * thread or vfp registers and skip the ras check.
238236e90d4Smatt	 */
239236e90d4Smatt	ldr	r0, [r6, #(L_FLAG)]
240236e90d4Smatt	tst	r0, #(LW_SYSTEM)
241236e90d4Smatt	bne	.Lswitch_do_restore
242236e90d4Smatt
243825088edSmatt#ifdef _ARM_ARCH_6
244825088edSmatt	/*
245825088edSmatt	 * Restore user thread/process id registers
246825088edSmatt	 */
247825088edSmatt	ldr	r0, [r7, #(PCB_USER_PID_RW)]
248825088edSmatt	mcr	p15, 0, r0, c13, c0, 2
249b6591ab3Smatt	ldr	r0, [r6, #(L_PRIVATE)]
250825088edSmatt	mcr	p15, 0, r0, c13, c0, 3
251825088edSmatt#endif
252825088edSmatt
253fc5bdb58Smatt#ifdef FPU_VFP
254fc5bdb58Smatt	/*
255fc5bdb58Smatt	 * If we have a VFP, we need to load FPEXC.
256fc5bdb58Smatt	 */
257236e90d4Smatt	ldr	r0, [r5, #(CI_VFP_ID)]
258fc5bdb58Smatt	cmp	r0, #0
259fc5bdb58Smatt	ldrne	r0, [r7, #(PCB_VFP_FPEXC)]
2601d8e66d8Sjoerg	vmsrne	fpexc, r0
261fc5bdb58Smatt#endif
262fc5bdb58Smatt
263236e90d4Smatt	/*
264236e90d4Smatt	 * Check for restartable atomic sequences (RAS).
265236e90d4Smatt	 */
266236e90d4Smatt	ldr	r0, [r6, #(L_PROC)]	/* fetch the proc for ras_lookup */
267236e90d4Smatt	ldr	r2, [r0, #(P_RASLIST)]
268236e90d4Smatt	cmp	r2, #0			/* p->p_nras == 0? */
269236e90d4Smatt	beq	.Lswitch_do_restore
270236e90d4Smatt
271236e90d4Smatt	/* we can use r8 since we haven't restored saved registers yet. */
272236e90d4Smatt	ldr	r8, [r6, #(L_MD_TF)]	/* r1 = trapframe (used below) */
273236e90d4Smatt	ldr	r1, [r8, #(TF_PC)]	/* second ras_lookup() arg */
274236e90d4Smatt	bl	_C_LABEL(ras_lookup)
275236e90d4Smatt	cmn	r0, #1			/* -1 means "not in a RAS" */
276236e90d4Smatt	strne	r0, [r8, #(TF_PC)]
277236e90d4Smatt
278236e90d4Smatt	/* rem: r4 = old lwp */
279236e90d4Smatt	/* rem: r5 = curcpu() */
280236e90d4Smatt	/* rem: r6 = new lwp */
281236e90d4Smatt	/* rem: r7 = new pcb */
282236e90d4Smatt
283236e90d4Smatt.Lswitch_do_restore:
284b440b92fSskrll	/* Restore all the saved registers */
285825088edSmatt#ifdef __XSCALE__
28663d24b09Sscw	ldr	r8, [r7, #(PCB_R8)]
28763d24b09Sscw	ldr	r9, [r7, #(PCB_R9)]
28863d24b09Sscw	ldr	r10, [r7, #(PCB_R10)]
28963d24b09Sscw	ldr	r11, [r7, #(PCB_R11)]
29063d24b09Sscw	ldr	r12, [r7, #(PCB_R12)]
291fc5bdb58Smatt#elif defined(_ARM_ARCH_DWORD_OK)
292bac960acSjoerg	ldrd	r8, r9, [r7, #(PCB_R8)]
293bac960acSjoerg	ldrd	r10, r11, [r7, #(PCB_R10)]
29442df8eceSmatt	ldr	r12, [r7, #(PCB_R12)]
295825088edSmatt#else
296825088edSmatt	add	r0, r7, #PCB_R8
29742df8eceSmatt	ldmia	r0, {r8-r12}
29863d24b09Sscw#endif
29923bc2503Sthorpej
3003fd7f57eSscw	/* Record the old lwp for pmap_activate()'s benefit */
301c7572f9fSmatt#ifndef ARM_MMU_EXTENDED
302236e90d4Smatt	str	r4, [r5, #CI_LASTLWP]
303c7572f9fSmatt#endif
3043fd7f57eSscw
305f0301095Syamt	/* cpu_switchto returns the old lwp */
30623bc2503Sthorpej	mov	r0, r4
307f0a7346dSsnj	/* lwp_trampoline expects new lwp as its second argument */
308f0301095Syamt	mov	r1, r6
30927f96e84Schris
31033f1e749Smatt#ifdef _ARM_ARCH_7
31133f1e749Smatt	clrex				/* cause any subsequent STREX* to fail */
31233f1e749Smatt#endif
31333f1e749Smatt
31427f96e84Schris	/*
315cf47b9b0Sskrll	 * Pull the registers that got pushed when cpu_switchto() was called,
316cf47b9b0Sskrll	 * and return.
31727f96e84Schris	 */
318ab152917Smatt	pop	{r4-r7, ip, pc}
31927f96e84Schris
320ab152917SmattEND(cpu_switchto)
321212cb9f7Sthorpej
32211765be2SskrllENTRY_NP(lwp_trampoline)
323b440b92fSskrll	/*
324b440b92fSskrll	 * cpu_switchto gives us:
325b440b92fSskrll	 *	arg0(r0) = old lwp
326b440b92fSskrll	 *	arg1(r1) = new lwp
32733f1e749Smatt	 * setup by cpu_lwp_fork:
32833f1e749Smatt	 *	r4 = func to call
32933f1e749Smatt	 *	r5 = arg to func
33033f1e749Smatt	 *	r6 = <unused>
33133f1e749Smatt	 *	r7 = spsr mode
332b440b92fSskrll	 */
333f0301095Syamt	bl	_C_LABEL(lwp_startup)
33452c15bbdSscw
335c38bec1fSmatt	mov	fp, #0			/* top stack frame */
33627f96e84Schris	mov	r0, r5
33727f96e84Schris	mov	r1, sp
338fdc67518Smatt#ifdef _ARM_ARCH_5
33933f1e749Smatt	blx	r4
34033f1e749Smatt#else
341d599df95Sbjh21	mov	lr, pc
34227f96e84Schris	mov	pc, r4
34333f1e749Smatt#endif
34427f96e84Schris
34533f1e749Smatt	GET_CPSR(r0)
34633f1e749Smatt	CPSID_I(r0, r0)			/* Kill irq's */
34727f96e84Schris
34853e11b6dSskrll	/* for DO_AST */
34953e11b6dSskrll	GET_CURX(r4, r5)		/* r4 = curcpu, r5 = curlwp */
35033f1e749Smatt	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
35127f96e84Schris	PULLFRAME
35227f96e84Schris
35327f96e84Schris	movs	pc, lr			/* Exit */
354ab152917SmattEND(lwp_trampoline)
355825088edSmatt
356204f751fSskrllAST_ALIGNMENT_FAULT_LOCALS
357204f751fSskrll
358825088edSmatt#ifdef __HAVE_FAST_SOFTINTS
359825088edSmatt/*
360825088edSmatt *	Called at IPL_HIGH
361825088edSmatt *	r0 = new lwp
362825088edSmatt *	r1 = ipl for softint_dispatch
363825088edSmatt */
364825088edSmattENTRY_NP(softint_switch)
365ab152917Smatt	push	{r4, r6, r7, lr}
366825088edSmatt
367825088edSmatt	ldr	r7, [r0, #L_CPU]	/* get curcpu */
36833f1e749Smatt#if defined(TPIDRPRW_IS_CURLWP)
369825088edSmatt	mrc	p15, 0, r4, c13, c0, 4	/* get old lwp */
370825088edSmatt#else
371825088edSmatt	ldr	r4, [r7, #(CI_CURLWP)]	/* get old lwp */
372825088edSmatt#endif
373825088edSmatt	mrs	r6, cpsr		/* we need to save this */
374825088edSmatt
375825088edSmatt	/*
376825088edSmatt	 * If the soft lwp blocks, it needs to return to softint_tramp
377825088edSmatt	 */
378825088edSmatt	mov	r2, sp			/* think ip */
379825088edSmatt	adr	r3, softint_tramp	/* think lr */
380ab152917Smatt	push	{r2-r3}
381ab152917Smatt	push	{r4-r7}
382825088edSmatt
383825088edSmatt	mov	r5, r0			/* save new lwp */
384825088edSmatt
385e9f7af26Srmind	ldr	r2, [r4, #(L_PCB)]	/* get old lwp's pcb */
386825088edSmatt
387825088edSmatt	/* Save all the registers into the old lwp's pcb */
388825088edSmatt#if defined(__XSCALE__) || defined(_ARM_ARCH_6)
389bac960acSjoerg	strd	r8, r9, [r2, #(PCB_R8)]
390bac960acSjoerg	strd	r10, r11, [r2, #(PCB_R10)]
391bac960acSjoerg	strd	r12, r13, [r2, #(PCB_R12)]
392825088edSmatt#else
393825088edSmatt	add	r3, r2, #(PCB_R8)
394825088edSmatt	stmia	r3, {r8-r13}
395825088edSmatt#endif
396825088edSmatt
3970df464abSmatt#ifdef _ARM_ARCH_6
3980df464abSmatt	/*
399a98c5bc7Sskrll	 * Save user read/write thread/process id register in case it was
4000df464abSmatt	 * set in userland.
4010df464abSmatt	 */
4020df464abSmatt	mrc	p15, 0, r0, c13, c0, 2
40322e0fb41Smatt	str	r0, [r2, #(PCB_USER_PID_RW)]
4040df464abSmatt#endif
4050df464abSmatt
406825088edSmatt	/* this is an invariant so load before disabling intrs */
407e9f7af26Srmind	ldr	r2, [r5, #(L_PCB)]	/* get new lwp's pcb */
408825088edSmatt
409825088edSmatt	IRQdisable
410825088edSmatt	/*
411825088edSmatt	 * We're switching to a bound LWP so its l_cpu is already correct.
412825088edSmatt	 */
41333f1e749Smatt#if defined(TPIDRPRW_IS_CURLWP)
414825088edSmatt	mcr	p15, 0, r5, c13, c0, 4	/* save new lwp */
415825088edSmatt#endif
4168c02e9fbSriastradh#ifdef _ARM_ARCH_7
4178c02e9fbSriastradh	dmb				/* for mutex_enter; see cpu_switchto */
4188c02e9fbSriastradh#endif
419825088edSmatt	str	r5, [r7, #(CI_CURLWP)]	/* save new lwp */
420*ca865cdaSriastradh	/*
421*ca865cdaSriastradh	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
422*ca865cdaSriastradh	 * enter a softint lwp, it can't be holding any mutexes, so it
423*ca865cdaSriastradh	 * can't release any until after it has acquired them, so we
424*ca865cdaSriastradh	 * need not participate in the protocol with mutex_vector_enter
425*ca865cdaSriastradh	 * barriers here.
426*ca865cdaSriastradh	 */
427825088edSmatt
428d329adb0Sskrll#ifdef KASAN
429d329adb0Sskrll	mov	r0, r5
430d329adb0Sskrll	bl	_C_LABEL(kasan_softint)
431d329adb0Sskrll#endif
432d329adb0Sskrll
433825088edSmatt	/*
434825088edSmatt	 * Normally, we'd get {r8-r13} but since this is a softint lwp
435f0a7346dSsnj	 * its existing state doesn't matter.  We start the stack just
436825088edSmatt	 * below the trapframe.
437825088edSmatt	 */
438445ebaadSmatt	ldr	sp, [r5, #(L_MD_TF)]	/* get new lwp's stack ptr */
439825088edSmatt
440825088edSmatt	/* At this point we can allow IRQ's again. */
441825088edSmatt	IRQenable
442825088edSmatt					/* r1 still has ipl */
443825088edSmatt	mov	r0, r4			/* r0 has pinned (old) lwp */
444825088edSmatt	bl	_C_LABEL(softint_dispatch)
445825088edSmatt	/*
446825088edSmatt	 * If we've returned, we need to change everything back and return.
447825088edSmatt	 */
448e9f7af26Srmind	ldr	r2, [r4, #(L_PCB)]	/* get pinned lwp's pcb */
449825088edSmatt
450825088edSmatt	/*
451825088edSmatt	 * We don't need to restore all the registers since another lwp was
452825088edSmatt	 * never executed.  But we do need the SP from the formerly pinned lwp.
453825088edSmatt	 */
454825088edSmatt
45542df8eceSmatt	IRQdisable
45633f1e749Smatt#if defined(TPIDRPRW_IS_CURLWP)
457825088edSmatt	mcr	p15, 0, r4, c13, c0, 4	/* restore pinned lwp */
458825088edSmatt#endif
4598c02e9fbSriastradh#ifdef _ARM_ARCH_7
4608c02e9fbSriastradh	dmb				/* for mutex_enter; see cpu_switchto */
4618c02e9fbSriastradh#endif
462825088edSmatt	str	r4, [r7, #(CI_CURLWP)]	/* restore pinned lwp */
4638c02e9fbSriastradh#ifdef _ARM_ARCH_7
4648c02e9fbSriastradh	dmb				/* for mutex_enter; see cpu_switchto */
4658c02e9fbSriastradh#endif
46618d4b478Smatt	ldr	sp, [r2, #(PCB_KSP)]	/* now running on the old stack. */
467825088edSmatt
468825088edSmatt	/* At this point we can allow IRQ's again. */
469825088edSmatt	msr	cpsr_c, r6
470825088edSmatt
471825088edSmatt	/*
472825088edSmatt	 * Grab the registers that got pushed at the start and return.
473825088edSmatt	 */
474ab152917Smatt	pop	{r4-r7, ip, lr}		/* eat switch frame */
475ab152917Smatt	pop	{r4, r6, r7, pc}	/* pop stack and return */
476825088edSmatt
477825088edSmattEND(softint_switch)
478825088edSmatt
479825088edSmatt/*
480825088edSmatt * r0 = previous LWP (the soft lwp)
481825088edSmatt * r4 = original LWP (the current lwp)
482825088edSmatt * r6 = original CPSR
483825088edSmatt * r7 = curcpu()
484825088edSmatt */
485825088edSmattENTRY_NP(softint_tramp)
486d86b0622Sskrll	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readjust after mi_switch */
487825088edSmatt	add	r3, r3, #1
488825088edSmatt	str	r3, [r7, #(CI_MTX_COUNT)]
489825088edSmatt
490825088edSmatt	msr	cpsr_c, r6			/* restore interrupts */
491ab152917Smatt	pop	{r4, r6, r7, pc}		/* pop stack and return */
492825088edSmattEND(softint_tramp)
493825088edSmatt#endif /* __HAVE_FAST_SOFTINTS */
494