xref: /netbsd-src/sys/arch/mips/rmi/rmixl_spl.S (revision e8cc649d653c72cfa524ddb16240b9a56aa8cc36)
1/*	$NetBSD: rmixl_spl.S,v 1.7 2021/09/01 14:17:46 andvar Exp $	*/
2
3/*-
4 * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_cputype.h"	/* which mips CPU levels do we support? */
33
34#include <sys/cdefs.h>
35
36#include <mips/asm.h>
37#include <mips/cpuregs.h>
38
39RCSID("$NetBSD: rmixl_spl.S,v 1.7 2021/09/01 14:17:46 andvar Exp $");
40
41#include "assym.h"
42
43#define MAP_SCALESHIFT		3
44#define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
45#define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
46#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
47#define RMIXL_EIRR_PRESERVE	(RMIXL_INT_MASK_5 | RMIXL_SOFT_INT_MASK)
48#define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
49#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
50
51	.set noreorder
52
53/*
54 * Array of mask of bits to set in the EIMR when we go to a
55 * given hardware interrupt priority level.
56 * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
57 * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
58 */
59	.data
60	.globl	_C_LABEL(ipl_eimr_map)
61	.type	_C_LABEL(ipl_eimr_map),@object
62	.p2align MAP_SCALESHIFT
63_C_LABEL(ipl_eimr_map):
64	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
65	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
66	.dword	0			/* IPL_SOFT{NET,SERIAL} */
67	.dword	0			/* IPL_VM */
68	.dword	0			/* IPL_SCHED */
69	.dword	0			/* IPL_DDB */
70	.dword	0			/* IPL_HIGH */
71
72	.text
73
74/*
75 * initialize cp0 interrupt control for this cpu
76 * - set STATUS[IE]
77 * - clear EIRR and EIMR
78 * on return, all interrupts are disabled by EIMR
79 *
80 * henceforth STATUS[IE] is expected to remain normally set
81 * but may be cleared and restored for temporary interrupt disablement
82 *
83 * call before the first call to spl0 on this cpu
84 */
85LEAF_NOPROFILE(rmixl_spl_init_cpu)
86	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
87	ori	t0, MIPS_SR_INT_IE		# set IE
88	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
89	COP0_SYNC
90	dmtc0	zero, MIPS_COP_0_EIMR		##  "       "   "   "  EIMR
91	COP0_SYNC
92	dmtc0	zero, MIPS_COP_0_EIRR		## clear EIRR
93	COP0_SYNC
94	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
95	JR_HB_RA
96END(rmixl_spl_init_cpu)
97
98/*
99 * RMIXL processor interrupt control
100 *
101 * Used as building blocks for spl(9) kernel interface.
102 */
103
104_splraise:
105	/*
106	 * a0 = EIMR bits requested to be set for this IPL
107	 * a1 = this IPL (IPL_*)
108	 * Can only use a0-a3 and v0-v1
109	 * old IPL is returned in v0
110	 */
111	dmfc0	a2, MIPS_COP_0_EIMR		# save EIMR
112	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
113	COP0_SYNC
114	PTR_L	a3, L_CPU(MIPS_CURLWP)		##
115	INT_L	v0, CPU_INFO_CPL(a3)		## get current IPL from cpu_info
116	sltu	v1, a1, v0			## newipl < curipl
117	bnez	v1, 1f				## yes, don't change.
118	 nop
119	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
120	dmtc0	a0, MIPS_COP_0_EIMR		## set new EIMR
121	JR_HB_RA
1221:
123	dmtc0	a2, MIPS_COP_0_EIMR		## restore saved EIMR
124	JR_HB_RA
125
126STATIC_LEAF(_splsw_splx)
127STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
128	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
129	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
130	PTR_ADDU v1, a2				# add to table addr
131	REG_L	v1, (v1)			# load EIMR bits for this IPL
132
133	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
134	COP0_SYNC
135	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
136	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
137	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
138	JR_HB_RA
139END(_splsw_splx)
140
141STATIC_LEAF(_splsw_spl0)
142	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
143	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
144	COP0_SYNC
145	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
146#if IPL_NONE == 0
147	INT_S	zero, CPU_INFO_CPL(a3)		## save IPL in cpu_info
148#else
149#error IPL_NONE != 0
150#endif
151	dmtc0	v1, MIPS_COP_0_EIMR		## set new EIMR
152	JR_HB_RA
153END(_splsw_spl0)
154
155STATIC_LEAF(_splsw_setsoftintr)
156	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
157	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
158	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
159	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
160	COP0_SYNC
161	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
162	and	v0, RMIXL_EIRR_PRESERVE		## preserve clock & softints
163	or	v0, a0				## set new softint bit
164	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
165	COP0_SYNC
166	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
167	JR_HB_RA
168END(_splsw_setsoftintr)
169
170STATIC_LEAF(_splsw_clrsoftintr)
171	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
172	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
173	xor	a0, RMIXL_EIRR_PRESERVE		# clear from preserve mask
174	dmfc0	v1, MIPS_COP_0_EIMR		# save EIMR register
175	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
176	COP0_SYNC
177	dmfc0	v0, MIPS_COP_0_EIRR		## load EIRR
178	and	v0, a0				## apply preserve mask
179	dmtc0	v0, MIPS_COP_0_EIRR		## store EIRR
180	COP0_SYNC
181	dmtc0	v1, MIPS_COP_0_EIMR		## restore EIMR
182	JR_HB_RA
183END(_splsw_clrsoftintr)
184
185STATIC_LEAF(_splsw_splraise)
186	move	a1, a0
187	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
188	sll	a2, a0, MAP_SCALESHIFT
189	PTR_ADDU v1, a2
190	REG_L	a0, (v1)
191	b	_splraise
192	 nop
193END(_splsw_splraise)
194
195STATIC_LEAF(_splsw_splhigh)
196STATIC_XLEAF(_splsw_splhigh_noprof)
197	dmtc0	zero, MIPS_COP_0_EIMR		## disable all interrupts
198	COP0_SYNC
199	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info from curlwp
200	li	a1, IPL_HIGH			##
201	INT_L	v0, CPU_INFO_CPL(a3)		## old IPL for return value
202	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
203						## interrupts remain disabled!
204	j	ra				# return
205	 nop
206END(_splsw_splhigh)
207
208STATIC_LEAF(_splsw_splddb)
209	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
210	li	a1, IPL_DDB
211	b	_splraise
212	 nop
213END(_splsw_splddb)
214
215STATIC_LEAF(_splsw_splsched)
216	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
217	li	a1, IPL_SCHED
218	b	_splraise
219	 nop
220END(_splsw_splsched)
221
222STATIC_LEAF(_splsw_splvm)
223	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
224	li	a1, IPL_VM
225	b	_splraise
226	 nop
227END(_splsw_splvm)
228
229STATIC_LEAF(_splsw_splsoftserial)
230	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
231	li	a1, IPL_SOFTSERIAL
232	b	_splraise
233	 nop
234END(_splsw_splsoftserial)
235
236STATIC_LEAF(_splsw_splsoftnet)
237	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
238	li	a1, IPL_SOFTNET
239	b	_splraise
240	 nop
241END(_splsw_splsoftnet)
242
243STATIC_LEAF(_splsw_splsoftbio)
244	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
245	li	a1, IPL_SOFTBIO
246	b	_splraise
247	 nop
248END(_splsw_splsoftbio)
249
250STATIC_LEAF(_splsw_splsoftclock)
251	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
252	li	a1, IPL_SOFTCLOCK
253	b	_splraise
254	 nop
255END(_splsw_splsoftclock)
256
257STATIC_LEAF(_splsw_splintr)
258	dmfc0	ta1, MIPS_COP_0_EIRR		# get active interrupts
259						# restrict to hard int bits:
260	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
261	xor	v1, ta1				#   "       "
262
263	li	v0, IPL_NONE
264	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
265	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
266	and	v1, ta2				# apply to pending bits
267	beq	v1, zero, 4f			# if nothing pending...
268	 nop					# ... return IPL_NONE
269
270	li	v0, IPL_VM			# ipl=IPL_VM
2711:
272	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
273	and	ta2, v1				# any match to pending intrs?
274	beq	ta2, zero, 2f			#  no, return ipl
275	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
276	addiu	v0, 1				# ipl++
277	move	v1, ta2				# update highest pending
278	b	1b				# loop
279	 nop
280
2812:
282	/*
283	 * Emulate the CP0_SR 'IM' bits in 'pending'
284	 * - if clock intr is requested, set MIPS_INT_MASK_5
285	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
286	 *   the RMI evbmips_iointr function will sort through
287	 *   individual EIRR requests
288	 */
289	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
290	and	t1, v1, t2			# save count/compare intr request value
291	nor	t0, zero, t2			# invert the mask
292	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
293	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
294	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
295						#  for non-clock hw intrs
2963:
297	or	v1, t1				# combine clock and non-clock-summary
298	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
2994:
300	INT_S	v1, (a0)			# set a (fake) new pending mask
301	j	ra				# and return highest ipl pending
302	 nop
303END(_splsw_splintr)
304
305STATIC_LEAF(_splsw_splcheck)
306#ifdef PARANOIA
307	PTR_L	t0, L_CPU(MIPS_CURLWP)
308	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
309
310	dmfc0	t0, MIPS_COP_0_EIMR		# get current EIMR
311
312	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
313	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
314	PTR_ADDU t2, t1
315	REG_L	t3, (t2)			# load value
3161:	bne	t0, t3, 1b			# loop forever if not equal
317	 nop
318#endif /* PARANOIA */
319	j	ra
320	 nop
321END(_splsw_splcheck)
322
323	.rdata
324	.globl _C_LABEL(rmixl_splsw)
325_C_LABEL(rmixl_splsw):
326        PTR_WORD _C_LABEL(_splsw_splhigh)
327        PTR_WORD _C_LABEL(_splsw_splsched)
328        PTR_WORD _C_LABEL(_splsw_splvm)
329        PTR_WORD _C_LABEL(_splsw_splsoftserial)
330        PTR_WORD _C_LABEL(_splsw_splsoftnet)
331        PTR_WORD _C_LABEL(_splsw_splsoftbio)
332        PTR_WORD _C_LABEL(_splsw_splsoftclock)
333        PTR_WORD _C_LABEL(_splsw_splraise)
334        PTR_WORD _C_LABEL(_splsw_spl0)
335        PTR_WORD _C_LABEL(_splsw_splx)
336        PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
337        PTR_WORD _C_LABEL(_splsw_splx_noprof)
338	PTR_WORD _C_LABEL(_splsw_setsoftintr)
339	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
340	PTR_WORD _C_LABEL(_splsw_splintr)
341	PTR_WORD _C_LABEL(_splsw_splcheck)
342