xref: /netbsd-src/sys/arch/mips/rmi/rmixl_spl.S (revision 1897181a7231d5fc7ab48994d1447fcbc4e13a49)
1/*	$NetBSD: rmixl_spl.S,v 1.3 2011/04/14 05:16:54 cliff Exp $	*/
2
3/*-
4 * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_cputype.h"	/* which mips CPU levels do we support? */
33
34#include <sys/cdefs.h>
35
36#include <machine/param.h>
37#include <mips/asm.h>
38#include <mips/cpuregs.h>
39
40RCSID("$NetBSD: rmixl_spl.S,v 1.3 2011/04/14 05:16:54 cliff Exp $");
41
42#include "assym.h"
43
44#define MAP_SCALESHIFT		3
45#define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
46#define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
47#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
48#define RMIXL_EIRR_PRESERVE	(RMIXL_INT_MASK_5 | RMIXL_SOFT_INT_MASK)
49#define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
50#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
51#define RMIXL_COP_0_EIRR	_(9), 6
52#define RMIXL_COP_0_EIMR	_(9), 7
53
54	.set noreorder
55
56/*
57 * Array of mask of bits to set in the EIMR when we go to a
58 * given hardware interrupt priority level.
59 * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
60 * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
61 */
62	.data
63	.globl	_C_LABEL(ipl_eimr_map)
64	.type	_C_LABEL(ipl_eimr_map),@object
65	.p2align MAP_SCALESHIFT
66_C_LABEL(ipl_eimr_map):
67	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
68	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
69	.dword	0			/* IPL_SOFT{NET,SERIAL} */
70	.dword	0			/* IPL_VM */
71	.dword	0			/* IPL_SCHED */
72	.dword	0			/* IPL_DDB */
73	.dword	0			/* IPL_HIGH */
74
75	.text
76
77/*
78 * initialize cp0 interrupt control for this cpu
79 * - set STATUS[IE]
80 * - clear EIRR and EIMR
81 * on return, all interrupts are disabled by EIMR
82 *
83 * henceforth STATUS[IE] is expected to remain normally set
84 * but may be cleared and restored for temporary interrupt disablement
85 *
86 * call before the first call to spl0 on this cpu
87 */
88LEAF_NOPROFILE(rmixl_spl_init_cpu)
89	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
90	ori	t0, MIPS_SR_INT_IE		# set IE
91	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
92	COP0_SYNC
93	dmtc0	zero, RMIXL_COP_0_EIMR		##  "       "   "   "  EIMR
94	COP0_SYNC
95	dmtc0	zero, RMIXL_COP_0_EIRR		## clear EIRR
96	COP0_SYNC
97	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
98	JR_HB_RA
99END(rmixl_spl_init_cpu)
100
101/*
102 * RMIXL processor interrupt control
103 *
104 * Used as building blocks for spl(9) kernel interface.
105 */
106
107_splraise:
108	/*
109	 * a0 = EIMR bits requested to be set for this IPL
110	 * a1 = this IPL (IPL_*)
111	 * Can only use a0-a3 and v0-v1
112	 * old IPL is returned in v0
113	 */
114	dmfc0	a2, RMIXL_COP_0_EIMR		# save EIMR
115	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
116	COP0_SYNC
117	PTR_L	a3, L_CPU(MIPS_CURLWP)		##
118	INT_L	v0, CPU_INFO_CPL(a3)		## get current IPL from cpu_info
119	sltu	v1, a1, v0			## newipl < curipl
120	bnez	v1, 1f				## yes, don't change.
121	 nop
122	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
123	dmtc0	a0, RMIXL_COP_0_EIMR		## set new EIMR
124	JR_HB_RA
1251:
126	dmtc0	a2, RMIXL_COP_0_EIMR		## restore saved EIMR
127	JR_HB_RA
128
129STATIC_LEAF(_splsw_splx)
130STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
131	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
132	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
133	PTR_ADDU v1, a2				# add to table addr
134	REG_L	v1, (v1)			# load EIMR bits for this IPL
135
136	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
137	COP0_SYNC
138	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
139	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
140	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
141	JR_HB_RA
142END(_splsw_splx)
143
144STATIC_LEAF(_splsw_spl0)
145	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
146	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
147	COP0_SYNC
148	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info
149#if IPL_NONE == 0
150	INT_S	zero, CPU_INFO_CPL(a3)		## save IPL in cpu_info
151#else
152#error IPL_NONE != 0
153#endif
154	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
155	JR_HB_RA
156END(_splsw_spl0)
157
158STATIC_LEAF(_splsw_setsoftintr)
159	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
160	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
161	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
162	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
163	COP0_SYNC
164	dmfc0	v0, RMIXL_COP_0_EIRR		## load EIRR
165	and	v0, RMIXL_EIRR_PRESERVE		## preserve clock & softints
166	or	v0, a0				## set new softint bit
167	dmtc0	v0, RMIXL_COP_0_EIRR		## store EIRR
168	COP0_SYNC
169	dmtc0	v1, RMIXL_COP_0_EIMR		## restore EIMR
170	JR_HB_RA
171END(_splsw_setsoftintr)
172
173STATIC_LEAF(_splsw_clrsoftintr)
174	dsrl	a0, 8				# convert CAUSE bit to EIRR bit
175	and	a0, RMIXL_SOFT_INT_MASK		# restrict to softint bits
176	xor	a0, RMIXL_EIRR_PRESERVE		# clear from preserve mask
177	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
178	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
179	COP0_SYNC
180	dmfc0	v0, RMIXL_COP_0_EIRR		## load EIRR
181	and	v0, a0				## apply preserve mask
182	dmtc0	v0, RMIXL_COP_0_EIRR		## store EIRR
183	COP0_SYNC
184	dmtc0	v1, RMIXL_COP_0_EIMR		## restore EIMR
185	JR_HB_RA
186END(_splsw_clrsoftintr)
187
188STATIC_LEAF(_splsw_splraise)
189	move	a1, a0
190	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
191	sll	a2, a0, MAP_SCALESHIFT
192	PTR_ADDU v1, a2
193	REG_L	a0, (v1)
194	b	_splraise
195	 nop
196END(_splsw_splraise)
197
198STATIC_LEAF(_splsw_splhigh)
199STATIC_XLEAF(_splsw_splhigh_noprof)
200	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
201	COP0_SYNC
202	PTR_L	a3, L_CPU(MIPS_CURLWP)		## get cpu_info from curlwp
203	li	a1, IPL_HIGH			##
204	INT_L	v0, CPU_INFO_CPL(a3)		## old IPL for return value
205	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
206						## interrupts remain disabled!
207	j	ra				# return
208	 nop
209END(_splsw_splhigh)
210
211STATIC_LEAF(_splsw_splddb)
212	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
213	li	a1, IPL_DDB
214	b	_splraise
215	 nop
216END(_splsw_splddb)
217
218STATIC_LEAF(_splsw_splsched)
219	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
220	li	a1, IPL_SCHED
221	b	_splraise
222	 nop
223END(_splsw_splsched)
224
225STATIC_LEAF(_splsw_splvm)
226	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
227	li	a1, IPL_VM
228	b	_splraise
229	 nop
230END(_splsw_splvm)
231
232STATIC_LEAF(_splsw_splsoftserial)
233	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
234	li	a1, IPL_SOFTSERIAL
235	b	_splraise
236	 nop
237END(_splsw_splsoftserial)
238
239STATIC_LEAF(_splsw_splsoftnet)
240	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
241	li	a1, IPL_SOFTNET
242	b	_splraise
243	 nop
244END(_splsw_splsoftnet)
245
246STATIC_LEAF(_splsw_splsoftbio)
247	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
248	li	a1, IPL_SOFTBIO
249	b	_splraise
250	 nop
251END(_splsw_splsoftbio)
252
253STATIC_LEAF(_splsw_splsoftclock)
254	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
255	li	a1, IPL_SOFTCLOCK
256	b	_splraise
257	 nop
258END(_splsw_splsoftclock)
259
260STATIC_LEAF(_splsw_splintr)
261	dmfc0	ta1, RMIXL_COP_0_EIRR		# get active interrupts
262						# restrict to hard int bits:
263	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
264	xor	v1, ta1				#   "       "
265
266	li	v0, IPL_NONE
267	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
268	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
269	and	v1, ta2				# apply to pending bits
270	beq	v1, zero, 4f			# if nothing pending...
271	 nop					# ... return IPL_NONE
272
273	li	v0, IPL_VM			# ipl=IPL_VM
2741:
275	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
276	and	ta2, v1				# any match to pending intrs?
277	beq	ta2, zero, 2f			#  no, return ipl
278	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
279	addiu	v0, 1				# ipl++
280	move	v1, ta2				# update highest pending
281	b	1b				# loop
282	 nop
283
2842:
285	/*
286	 * Emulate the CP0_SR 'IM' bits in 'pending'
287	 * - if clock intr is requested, set MIPS_INT_MASK_5
288	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
289	 *   the RMI evbmips_iointr function will sort through
290	 *   individial EIRR requests
291	 */
292	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
293	and	t1, v1, t2			# save count/compare intr request value
294	nor	t0, zero, t2			# invert the mask
295	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
296	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
297	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
298						#  for non-clock hw intrs
2993:
300	or	v1, t1				# combine clock and non-clock-summary
301	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
3024:
303	INT_S	v1, (a0)			# set a (fake) new pending mask
304	j	ra				# and return highest ipl pending
305	 nop
306END(_splsw_splintr)
307
308STATIC_LEAF(_splsw_splcheck)
309#ifdef PARANOIA
310	PTR_L	t0, L_CPU(MIPS_CURLWP)
311	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
312
313	dmfc0	t0, RMIXL_COP_0_EIMR		# get current EIMR
314
315	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
316	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
317	PTR_ADDU t2, t1
318	REG_L	t3, (t2)			# load value
3191:	bne	t0, t3, 1b			# loop forever if not equal
320	 nop
321#endif /* PARANOIA */
322	j	ra
323	 nop
324END(_splsw_splcheck)
325
326	.rdata
327	.globl _C_LABEL(rmixl_splsw)
328_C_LABEL(rmixl_splsw):
329        PTR_WORD _C_LABEL(_splsw_splhigh)
330        PTR_WORD _C_LABEL(_splsw_splsched)
331        PTR_WORD _C_LABEL(_splsw_splvm)
332        PTR_WORD _C_LABEL(_splsw_splsoftserial)
333        PTR_WORD _C_LABEL(_splsw_splsoftnet)
334        PTR_WORD _C_LABEL(_splsw_splsoftbio)
335        PTR_WORD _C_LABEL(_splsw_splsoftclock)
336        PTR_WORD _C_LABEL(_splsw_splraise)
337        PTR_WORD _C_LABEL(_splsw_spl0)
338        PTR_WORD _C_LABEL(_splsw_splx)
339        PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
340        PTR_WORD _C_LABEL(_splsw_splx_noprof)
341	PTR_WORD _C_LABEL(_splsw_setsoftintr)
342	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
343	PTR_WORD _C_LABEL(_splsw_splintr)
344	PTR_WORD _C_LABEL(_splsw_splcheck)
345