xref: /netbsd-src/sys/arch/mips/rmi/rmixl_spl.S (revision e8cc649d653c72cfa524ddb16240b9a56aa8cc36)
1*e8cc649dSandvar/*	$NetBSD: rmixl_spl.S,v 1.7 2021/09/01 14:17:46 andvar Exp $	*/
23e67b512Smatt
33e67b512Smatt/*-
43e67b512Smatt * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
53e67b512Smatt * All rights reserved.
63e67b512Smatt *
73e67b512Smatt * This code is derived from software contributed to The NetBSD Foundation
83e67b512Smatt * by Matt Thomas <matt@3am-software.com>.
93e67b512Smatt *
103e67b512Smatt * Redistribution and use in source and binary forms, with or without
113e67b512Smatt * modification, are permitted provided that the following conditions
123e67b512Smatt * are met:
133e67b512Smatt * 1. Redistributions of source code must retain the above copyright
143e67b512Smatt *    notice, this list of conditions and the following disclaimer.
153e67b512Smatt * 2. Redistributions in binary form must reproduce the above copyright
163e67b512Smatt *    notice, this list of conditions and the following disclaimer in the
173e67b512Smatt *    documentation and/or other materials provided with the distribution.
183e67b512Smatt *
193e67b512Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
203e67b512Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
213e67b512Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
223e67b512Smatt * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
233e67b512Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
243e67b512Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
253e67b512Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
263e67b512Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
273e67b512Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
283e67b512Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
293e67b512Smatt * POSSIBILITY OF SUCH DAMAGE.
303e67b512Smatt */
313e67b512Smatt
323e67b512Smatt#include "opt_cputype.h"	/* which mips CPU levels do we support? */
333e67b512Smatt
343e67b512Smatt#include <sys/cdefs.h>
353e67b512Smatt
363e67b512Smatt#include <mips/asm.h>
373e67b512Smatt#include <mips/cpuregs.h>
383e67b512Smatt
39*e8cc649dSandvarRCSID("$NetBSD: rmixl_spl.S,v 1.7 2021/09/01 14:17:46 andvar Exp $");
403e67b512Smatt
413e67b512Smatt#include "assym.h"
423e67b512Smatt
433e67b512Smatt#define MAP_SCALESHIFT		3
443e67b512Smatt#define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
453e67b512Smatt#define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
46127ed29eScliff#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
47127ed29eScliff#define RMIXL_EIRR_PRESERVE	(RMIXL_INT_MASK_5 | RMIXL_SOFT_INT_MASK)
483e67b512Smatt#define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
493e67b512Smatt#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
503e67b512Smatt
513e67b512Smatt	.set noreorder
523e67b512Smatt
533e67b512Smatt/*
543e67b512Smatt * Array of mask of bits to set in the EIMR when we go to a
553e67b512Smatt * given hardware interrupt priority level.
563e67b512Smatt * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
573e67b512Smatt * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
583e67b512Smatt */
593e67b512Smatt	.data
603e67b512Smatt	.globl	_C_LABEL(ipl_eimr_map)
613e67b512Smatt	.type	_C_LABEL(ipl_eimr_map),@object
623e67b512Smatt	.p2align MAP_SCALESHIFT
633e67b512Smatt_C_LABEL(ipl_eimr_map):
643e67b512Smatt	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
653e67b512Smatt	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
663e67b512Smatt	.dword	0			/* IPL_SOFT{NET,SERIAL} */
673e67b512Smatt	.dword	0			/* IPL_VM */
683e67b512Smatt	.dword	0			/* IPL_SCHED */
693e67b512Smatt	.dword	0			/* IPL_DDB */
703e67b512Smatt	.dword	0			/* IPL_HIGH */
713e67b512Smatt
723e67b512Smatt	.text
733e67b512Smatt
743e67b512Smatt/*
753e67b512Smatt * initialize cp0 interrupt control for this cpu
763e67b512Smatt * - set STATUS[IE]
773e67b512Smatt * - clear EIRR and EIMR
783e67b512Smatt * on return, all interrupts are disabled by EIMR
793e67b512Smatt *
803e67b512Smatt * henceforth STATUS[IE] is expected to remain normally set
813e67b512Smatt * but may be cleared and restored for temporary interrupt disablement
823e67b512Smatt *
833e67b512Smatt * call before the first call to spl0 on this cpu
843e67b512Smatt */
853e67b512SmattLEAF_NOPROFILE(rmixl_spl_init_cpu)
863e67b512Smatt	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
873e67b512Smatt	ori	t0, MIPS_SR_INT_IE		# set IE
883e67b512Smatt	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
89127ed29eScliff	COP0_SYNC
90b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		##  "       "   "   "  EIMR
91127ed29eScliff	COP0_SYNC
92b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIRR		## clear EIRR
93127ed29eScliff	COP0_SYNC
943e67b512Smatt	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
95127ed29eScliff	JR_HB_RA
963e67b512SmattEND(rmixl_spl_init_cpu)
973e67b512Smatt
983e67b512Smatt/*
993e67b512Smatt * RMIXL processor interrupt control
1003e67b512Smatt *
1013e67b512Smatt * Used as building blocks for spl(9) kernel interface.
1023e67b512Smatt */
103127ed29eScliff
1043e67b512Smatt_splraise:
1053e67b512Smatt	/*
106127ed29eScliff	 * a0 = EIMR bits requested to be set for this IPL
1073e67b512Smatt	 * a1 = this IPL (IPL_*)
1083e67b512Smatt	 * Can only use a0-a3 and v0-v1
109127ed29eScliff	 * old IPL is returned in v0
1103e67b512Smatt	 */
111b05a0344Ssimonb	dmfc0	a2, MIPS_COP_0_EIMR		# save EIMR
112b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
113127ed29eScliff	COP0_SYNC
114127ed29eScliff	PTR_L	a3, L_CPU(MIPS_CURLWP)		##
115127ed29eScliff	INT_L	v0, CPU_INFO_CPL(a3)		## get current IPL from cpu_info
116127ed29eScliff	sltu	v1, a1, v0			## newipl < curipl
117127ed29eScliff	bnez	v1, 1f				## yes, don't change.
118127ed29eScliff	 nop
1193e67b512Smatt	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
120b05a0344Ssimonb	dmtc0	a0, MIPS_COP_0_EIMR		## set new EIMR
121127ed29eScliff	JR_HB_RA
122127ed29eScliff1:
123b05a0344Ssimonb	dmtc0	a2, MIPS_COP_0_EIMR		## restore saved EIMR
124127ed29eScliff	JR_HB_RA
1253e67b512Smatt
1263e67b512SmattSTATIC_LEAF(_splsw_splx)
1273e67b512SmattSTATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
1283e67b512Smatt	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
1293e67b512Smatt	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
1303e67b512Smatt	PTR_ADDU v1, a2				# add to table addr
1313e67b512Smatt	REG_L	v1, (v1)			# load EIMR bits for this IPL
132127ed29eScliff
133b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
134127ed29eScliff	COP0_SYNC
135127ed29eScliff	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
1363e67b512Smatt	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
137b05a0344Ssimonb	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
138127ed29eScliff	JR_HB_RA
1393e67b512SmattEND(_splsw_splx)
1403e67b512Smatt
1413e67b512SmattSTATIC_LEAF(_splsw_spl0)
1423e67b512Smatt	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
143b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
144127ed29eScliff	COP0_SYNC
145127ed29eScliff	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
1463e67b512Smatt#if IPL_NONE == 0
147127ed29eScliff	INT_S	zero, CPU_INFO_CPL(a3)		## save IPL in cpu_info
1483e67b512Smatt#else
1493e67b512Smatt#error IPL_NONE != 0
1503e67b512Smatt#endif
151b05a0344Ssimonb	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
152127ed29eScliff	JR_HB_RA
1533e67b512SmattEND(_splsw_spl0)
1543e67b512Smatt
1553e67b512SmattSTATIC_LEAF(_splsw_setsoftintr)
156127ed29eScliff	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
157127ed29eScliff	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
158b05a0344Ssimonb	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
159b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
160127ed29eScliff	COP0_SYNC
161b05a0344Ssimonb	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
162127ed29eScliff	and	v0, RMIXL_EIRR_PRESERVE		## preserve clock & softints
163127ed29eScliff	or	v0, a0				## set new softint bit
164b05a0344Ssimonb	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
165127ed29eScliff	COP0_SYNC
166b05a0344Ssimonb	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
167127ed29eScliff	JR_HB_RA
1683e67b512SmattEND(_splsw_setsoftintr)
1693e67b512Smatt
1703e67b512SmattSTATIC_LEAF(_splsw_clrsoftintr)
171127ed29eScliff	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
172127ed29eScliff	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
173127ed29eScliff	xor	a0, RMIXL_EIRR_PRESERVE		# clear from preserve mask
174b05a0344Ssimonb	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
175b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
176127ed29eScliff	COP0_SYNC
177b05a0344Ssimonb	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
178127ed29eScliff	and	v0, a0				## apply preserve mask
179b05a0344Ssimonb	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
180127ed29eScliff	COP0_SYNC
181b05a0344Ssimonb	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
182127ed29eScliff	JR_HB_RA
1833e67b512SmattEND(_splsw_clrsoftintr)
1843e67b512Smatt
1853e67b512SmattSTATIC_LEAF(_splsw_splraise)
1863e67b512Smatt	move	a1, a0
1873e67b512Smatt	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
1883e67b512Smatt	sll	a2, a0, MAP_SCALESHIFT
1893e67b512Smatt	PTR_ADDU v1, a2
1903e67b512Smatt	REG_L	a0, (v1)
1913e67b512Smatt	b	_splraise
1923e67b512Smatt	 nop
1933e67b512SmattEND(_splsw_splraise)
1943e67b512Smatt
1953e67b512SmattSTATIC_LEAF(_splsw_splhigh)
1963e67b512SmattSTATIC_XLEAF(_splsw_splhigh_noprof)
197b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
198127ed29eScliff	COP0_SYNC
199127ed29eScliff	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info from curlwp
200127ed29eScliff	li	a1, IPL_HIGH			##
201127ed29eScliff	INT_L	v0, CPU_INFO_CPL(a3)		## old IPL for return value
2023e67b512Smatt	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
2033e67b512Smatt						## interrupts remain disabled!
2043e67b512Smatt	j	ra				# return
2053e67b512Smatt	 nop
2063e67b512SmattEND(_splsw_splhigh)
2073e67b512Smatt
2083e67b512SmattSTATIC_LEAF(_splsw_splddb)
2093e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
2103e67b512Smatt	li	a1, IPL_DDB
2113e67b512Smatt	b	_splraise
2123e67b512Smatt	 nop
2133e67b512SmattEND(_splsw_splddb)
2143e67b512Smatt
2153e67b512SmattSTATIC_LEAF(_splsw_splsched)
2163e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
2173e67b512Smatt	li	a1, IPL_SCHED
2183e67b512Smatt	b	_splraise
2193e67b512Smatt	 nop
2203e67b512SmattEND(_splsw_splsched)
2213e67b512Smatt
2223e67b512SmattSTATIC_LEAF(_splsw_splvm)
2233e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
2243e67b512Smatt	li	a1, IPL_VM
2253e67b512Smatt	b	_splraise
2263e67b512Smatt	 nop
2273e67b512SmattEND(_splsw_splvm)
2283e67b512Smatt
2293e67b512SmattSTATIC_LEAF(_splsw_splsoftserial)
2303e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
2313e67b512Smatt	li	a1, IPL_SOFTSERIAL
2323e67b512Smatt	b	_splraise
2333e67b512Smatt	 nop
2343e67b512SmattEND(_splsw_splsoftserial)
2353e67b512Smatt
2363e67b512SmattSTATIC_LEAF(_splsw_splsoftnet)
2373e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
2383e67b512Smatt	li	a1, IPL_SOFTNET
2393e67b512Smatt	b	_splraise
2403e67b512Smatt	 nop
2413e67b512SmattEND(_splsw_splsoftnet)
2423e67b512Smatt
2433e67b512SmattSTATIC_LEAF(_splsw_splsoftbio)
2443e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
2453e67b512Smatt	li	a1, IPL_SOFTBIO
2463e67b512Smatt	b	_splraise
2473e67b512Smatt	 nop
2483e67b512SmattEND(_splsw_splsoftbio)
2493e67b512Smatt
2503e67b512SmattSTATIC_LEAF(_splsw_splsoftclock)
2513e67b512Smatt	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
2523e67b512Smatt	li	a1, IPL_SOFTCLOCK
2533e67b512Smatt	b	_splraise
2543e67b512Smatt	 nop
2553e67b512SmattEND(_splsw_splsoftclock)
2563e67b512Smatt
2573e67b512SmattSTATIC_LEAF(_splsw_splintr)
258b05a0344Ssimonb	dmfc0	ta1, MIPS_COP_0_EIRR		# get active interrupts
2593e67b512Smatt						# restrict to hard int bits:
2603e67b512Smatt	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
2613e67b512Smatt	xor	v1, ta1				#   "       "
2623e67b512Smatt
2633e67b512Smatt	li	v0, IPL_NONE
2643e67b512Smatt	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
2653e67b512Smatt	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
2663e67b512Smatt	and	v1, ta2				# apply to pending bits
2673e67b512Smatt	beq	v1, zero, 4f			# if nothing pending...
2683e67b512Smatt	 nop					# ... return IPL_NONE
2693e67b512Smatt
2703e67b512Smatt	li	v0, IPL_VM			# ipl=IPL_VM
2713e67b512Smatt1:
2723e67b512Smatt	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
2733e67b512Smatt	and	ta2, v1				# any match to pending intrs?
2743e67b512Smatt	beq	ta2, zero, 2f			#  no, return ipl
2753e67b512Smatt	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
2763e67b512Smatt	addiu	v0, 1				# ipl++
2773e67b512Smatt	move	v1, ta2				# update highest pending
2783e67b512Smatt	b	1b				# loop
2793e67b512Smatt	 nop
2803e67b512Smatt
2813e67b512Smatt2:
2823e67b512Smatt	/*
2833e67b512Smatt	 * Emulate the CP0_SR 'IM' bits in 'pending'
2843e67b512Smatt	 * - if clock intr is requested, set MIPS_INT_MASK_5
2853e67b512Smatt	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
2863e67b512Smatt	 *   the RMI evbmips_iointr function will sort through
287*e8cc649dSandvar	 *   individual EIRR requests
2883e67b512Smatt	 */
2893e67b512Smatt	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
2903e67b512Smatt	and	t1, v1, t2			# save count/compare intr request value
2913e67b512Smatt	nor	t0, zero, t2			# invert the mask
2923e67b512Smatt	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
2933e67b512Smatt	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
2943e67b512Smatt	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
2953e67b512Smatt						#  for non-clock hw intrs
2963e67b512Smatt3:
2973e67b512Smatt	or	v1, t1				# combine clock and non-clock-summary
2983e67b512Smatt	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
2993e67b512Smatt4:
3003e67b512Smatt	INT_S	v1, (a0)			# set a (fake) new pending mask
3013e67b512Smatt	j	ra				# and return highest ipl pending
3023e67b512Smatt	 nop
3033e67b512SmattEND(_splsw_splintr)
3043e67b512Smatt
3053e67b512SmattSTATIC_LEAF(_splsw_splcheck)
3063e67b512Smatt#ifdef PARANOIA
3073e67b512Smatt	PTR_L	t0, L_CPU(MIPS_CURLWP)
3083e67b512Smatt	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
3093e67b512Smatt
310b05a0344Ssimonb	dmfc0	t0, MIPS_COP_0_EIMR		# get current EIMR
3113e67b512Smatt
3123e67b512Smatt	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
3133e67b512Smatt	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
3143e67b512Smatt	PTR_ADDU t2, t1
3153e67b512Smatt	REG_L	t3, (t2)			# load value
3163e67b512Smatt1:	bne	t0, t3, 1b			# loop forever if not equal
3173e67b512Smatt	 nop
3183e67b512Smatt#endif /* PARANOIA */
3193e67b512Smatt	j	ra
3203e67b512Smatt	 nop
3213e67b512SmattEND(_splsw_splcheck)
3223e67b512Smatt
3233e67b512Smatt	.rdata
3243e67b512Smatt	.globl _C_LABEL(rmixl_splsw)
3253e67b512Smatt_C_LABEL(rmixl_splsw):
3263e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splhigh)
3273e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splsched)
3283e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splvm)
3293e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splsoftserial)
3303e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splsoftnet)
3313e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splsoftbio)
3323e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splsoftclock)
3333e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splraise)
3343e67b512Smatt        PTR_WORD _C_LABEL(_splsw_spl0)
3353e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splx)
3363e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
3373e67b512Smatt        PTR_WORD _C_LABEL(_splsw_splx_noprof)
3383e67b512Smatt	PTR_WORD _C_LABEL(_splsw_setsoftintr)
3393e67b512Smatt	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
3403e67b512Smatt	PTR_WORD _C_LABEL(_splsw_splintr)
3413e67b512Smatt	PTR_WORD _C_LABEL(_splsw_splcheck)
342