xref: /netbsd-src/sys/arch/mips/rmi/rmixl_spl.S (revision 213144e1de7024d4193d04aa51005ba3a5ad95e7)
1/*	$NetBSD: rmixl_spl.S,v 1.2 2011/02/20 07:48:37 matt Exp $	*/
2
3/*-
4 * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_cputype.h"	/* which mips CPU levels do we support? */
33
34#include <sys/cdefs.h>
35
36#include <machine/param.h>
37#include <mips/asm.h>
38#include <mips/cpuregs.h>
39
40RCSID("$NetBSD: rmixl_spl.S,v 1.2 2011/02/20 07:48:37 matt Exp $");
41
42#include "assym.h"
43
44
45#define MAP_SCALESHIFT		3
46#define RMIXL_SOFT_INT_MASK_1	(MIPS_SOFT_INT_MASK_1 >> 8)
47#define RMIXL_SOFT_INT_MASK	(MIPS_SOFT_INT_MASK >> 8)
48#define RMIXL_INT_MASK_1	(MIPS_INT_MASK_1 >> 8)
49#define RMIXL_INT_MASK_5	(MIPS_INT_MASK_5 >> 8)
50#define RMIXL_COP_0_EIRR	_(9), 6
51#define RMIXL_COP_0_EIMR	_(9), 7
52
53	.set noreorder
54
55/*
56 * Array of mask of bits to set in the EIMR when we go to a
57 * given hardware interrupt priority level.
58 * The softint bits in [IPL_NONE] and [IPL_SOFTCLOCK] should stay constant
59 * Hard intr bits are managed by rmixl_vec_establish and rmixl_vec_disestablish.
60 */
61	.data
62	.globl	_C_LABEL(ipl_eimr_map)
63	.type	_C_LABEL(ipl_eimr_map),@object
64	.p2align MAP_SCALESHIFT
65_C_LABEL(ipl_eimr_map):
66	.dword	RMIXL_SOFT_INT_MASK	/* IPL_NONE */
67	.dword	RMIXL_SOFT_INT_MASK_1	/* IPL_SOFT{CLOCK,BIO} */
68	.dword	0			/* IPL_SOFT{NET,SERIAL} */
69	.dword	0			/* IPL_VM */
70	.dword	0			/* IPL_SCHED */
71	.dword	0			/* IPL_DDB */
72	.dword	0			/* IPL_HIGH */
73
74	.text
75
76/*
77 * initialize cp0 interrupt control for this cpu
78 * - set STATUS[IE]
79 * - clear EIRR and EIMR
80 * on return, all interrupts are disabled by EIMR
81 *
82 * henceforth STATUS[IE] is expected to remain normally set
83 * but may be cleared and restored for temporary interrupt disablement
84 *
85 * call before the first call to spl0 on this cpu
86 */
87LEAF_NOPROFILE(rmixl_spl_init_cpu)
88	mfc0	t0, MIPS_COP_0_STATUS		# get STATUS
89	ori	t0, MIPS_SR_INT_IE		# set IE
90	mtc0	zero, MIPS_COP_0_STATUS		## disable all ints in STATUS
91	dmtc0	zero, RMIXL_COP_0_EIMR		##  "       "   "   "  EIMR
92	dmtc0	zero, RMIXL_COP_0_EIRR		## clear EIRR
93	mtc0	t0, MIPS_COP_0_STATUS		## set STATUS | IE
94	j	ra
95	 nop
96END(rmixl_spl_init_cpu)
97
98/*
99 * RMIXL processor interrupt control
100 *
101 * Used as building blocks for spl(9) kernel interface.
102 */
103_splraise:
104	/*
105	 * a0 = EIMR bits to be set for this IPL
106	 * a1 = this IPL (IPL_*)
107	 * Can only use a0-a3 and v0-v1
108	 */
109	PTR_L	a3, L_CPU(MIPS_CURLWP)
110	INT_L	v0, CPU_INFO_CPL(a3)		# get current IPL from cpu_info
111	sltu	v1, a1, v0			# newipl < curipl
112	bnez	v1, 2f				# yes, don't change.
113	 nop
114	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
115	PTR_L	a3, L_CPU(MIPS_CURLWP)		## reload L_CPU in case we were
116						##  preempted and moved...
117	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
118	dmtc0	a0, RMIXL_COP_0_EIMR		## set new EIMR
119#ifdef PARANOIA
120	j	ra
121	 nop
122#endif /* PARANOIA */
123#ifdef PARANOIA
124	dmfc0	v0, RMIXL_COP_0_EIMR		# get EIMR
1251:	bne	a0, v0, 1b			# loop forever if not equal
126	 nop
127#endif /* PARANOIA */
1282:	j	ra
129	 nop
130
131STATIC_LEAF(_splsw_splx)
132STATIC_XLEAF(_splsw_splx_noprof)		# does not get mcount hooks
133	PTR_L	a3, L_CPU(MIPS_CURLWP)		# get cpu_info
134	INT_L	a2, CPU_INFO_CPL(a3)		# get IPL from cpu_info
135	beq	a0, a2, 2f			# if same, nothing to do
136	 nop
137#ifdef PARANOIA
138	sltu	v0, a0, a2			# v0 = a0 < a2
13999:	beqz	v0, 99b				# loop forever if false
140	 nop
141#endif /* PARANOIA */
142	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
143	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
144	PTR_ADDU v1, a2				# add to table addr
145	REG_L	v1, (v1)			# load EIMR bits for this IPL
1461:
147	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
148	INT_S	a0, CPU_INFO_CPL(a3)		## save IPL in cpu_info
149	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
150#ifdef PARANOIA
151	j	ra
152	 nop
153#endif /* PARANOIA */
1542:
155#ifdef PARANOIA
156	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
157	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
158	PTR_ADDU v1, a2				# add to table addr
159	REG_L	a1, (v1)			# load EIMR bits for this IPL
160	dmfc0	v1, RMIXL_COP_0_EIMR		# get EIMR
1613:	bne	a1, v1, 3b			# loop forever if not equal
162	 nop
163#endif /* PARANOIA */
164	j	ra
165	 nop
166END(_splsw_splx)
167
168STATIC_LEAF(_splsw_spl0)
169	REG_L	v1, _C_LABEL(ipl_eimr_map) + 8*IPL_NONE
170	PTR_L	a3, L_CPU(MIPS_CURLWP)
171	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
172#if IPL_NONE == 0
173	INT_S	zero, CPU_INFO_CPL(a3)		## set ipl to 0
174#else
175#error IPL_NONE != 0
176#endif
177	dmtc0	v1, RMIXL_COP_0_EIMR		## set new EIMR
178	j	ra
179	 nop
180END(_splsw_spl0)
181
182LEAF_NOPROFILE(rmixl_spln)
183	PTR_LA	v1, _C_LABEL(ipl_eimr_map)	# get address of table
184	sll	a2, a0, MAP_SCALESHIFT		# convert IPL to array offset
185	PTR_ADDU v1, a2				# add to table addr
186	REG_L	v0, (v1)			# load EIMR bits for this IPL
187	j	ra
188	 nop
189END(rmixl_spln)
190
191STATIC_LEAF(_splsw_setsoftintr)
192	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
193	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
194	mfc0	v0, MIPS_COP_0_CAUSE		## load cause register
195	or	v0, v0, a0			## set soft intr. bits
196	mtc0	v0, MIPS_COP_0_CAUSE		## store back
197	dmtc0	v1, RMIXL_COP_0_EIMR		## restore EIMR
198	j	ra
199	 nop
200END(_splsw_setsoftintr)
201
202STATIC_LEAF(_splsw_clrsoftintr)
203	dmfc0	v1, RMIXL_COP_0_EIMR		# save EIMR register
204	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
205	mfc0	v0, MIPS_COP_0_CAUSE		## load cause register
206	nor	a0, zero, a0			## bitwise inverse of A0
207	and	v0, v0, a0			## clear soft intr. bits
208	mtc0	v0, MIPS_COP_0_CAUSE		## store back
209	dmtc0	v1, RMIXL_COP_0_EIMR		## enable EIMR
210	j	ra
211	 nop
212END(_splsw_clrsoftintr)
213
214STATIC_LEAF(_splsw_splraise)
215	move	a1, a0
216	PTR_LA	v1, _C_LABEL(ipl_eimr_map)
217	sll	a2, a0, MAP_SCALESHIFT
218	PTR_ADDU v1, a2
219	REG_L	a0, (v1)
220	b	_splraise
221	 nop
222END(_splsw_splraise)
223
224STATIC_LEAF(_splsw_splhigh)
225STATIC_XLEAF(_splsw_splhigh_noprof)
226	PTR_L	a3, L_CPU(MIPS_CURLWP)
227	INT_L	v0, CPU_INFO_CPL(a3)		# get current IPL from cpu_info
228	li	a1, IPL_HIGH			#
229	beq	v0, a1, 1f			# don't do anything if IPL_HIGH
230	 nop
231	dmtc0	zero, RMIXL_COP_0_EIMR		## disable all interrupts
232	PTR_L	a3, L_CPU(MIPS_CURLWP)		## reload L_CPU in case we were
233						##  preempted and moved...
234	INT_S	a1, CPU_INFO_CPL(a3)		## save IPL in cpu_info
235						## interrupts remain disabled!
236#ifdef PARANOIA
237	j	ra				# return
238	 nop
239#endif /* PARANOIA */
2401:
241#ifdef PARANOIA
242	dmfc0	v1, RMIXL_COP_0_EIMR		# load EIMR
2432:	bnez	v1, 2b				# loop forever if not 0.
244	 nop
245#endif /* PARANOIA */
246	j	ra				## return
247	 nop
248END(_splsw_splhigh)
249
250STATIC_LEAF(_splsw_splddb)
251	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_DDB
252	li	a1, IPL_DDB
253	b	_splraise
254	 nop
255END(_splsw_splddb)
256
257STATIC_LEAF(_splsw_splsched)
258	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SCHED
259	li	a1, IPL_SCHED
260	b	_splraise
261	 nop
262END(_splsw_splsched)
263
264STATIC_LEAF(_splsw_splvm)
265	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
266	li	a1, IPL_VM
267	b	_splraise
268	 nop
269END(_splsw_splvm)
270
271STATIC_LEAF(_splsw_splsoftserial)
272	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTSERIAL
273	li	a1, IPL_SOFTSERIAL
274	b	_splraise
275	 nop
276END(_splsw_splsoftserial)
277
278STATIC_LEAF(_splsw_splsoftnet)
279	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTNET
280	li	a1, IPL_SOFTNET
281	b	_splraise
282	 nop
283END(_splsw_splsoftnet)
284
285STATIC_LEAF(_splsw_splsoftbio)
286	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTBIO
287	li	a1, IPL_SOFTBIO
288	b	_splraise
289	 nop
290END(_splsw_splsoftbio)
291
292STATIC_LEAF(_splsw_splsoftclock)
293	REG_L	a0, _C_LABEL(ipl_eimr_map) + 8*IPL_SOFTCLOCK
294	li	a1, IPL_SOFTCLOCK
295	b	_splraise
296	 nop
297END(_splsw_splsoftclock)
298
299STATIC_LEAF(_splsw_splintr)
300	dmfc0	ta1, RMIXL_COP_0_EIRR		# get active interrupts
301						# restrict to hard int bits:
302	and	v1, ta1, RMIXL_SOFT_INT_MASK	#  v1 = ta1 & ~RMIXL_SOFT_INT_MASK
303	xor	v1, ta1				#   "       "
304
305	li	v0, IPL_NONE
306	PTR_LA	ta3, _C_LABEL(ipl_eimr_map) + 8*IPL_VM
307	REG_L	ta2, -8(ta3)			# load 'enabled' bits for IPL_SOFTSERIAL
308	and	v1, ta2				# apply to pending bits
309	beq	v1, zero, 4f			# if nothing pending...
310	 nop					# ... return IPL_NONE
311
312	li	v0, IPL_VM			# ipl=IPL_VM
3131:
314	REG_L	ta2, (ta3)			# load 'enabled' bits for ipl
315	and	ta2, v1				# any match to pending intrs?
316	beq	ta2, zero, 2f			#  no, return ipl
317	 PTR_ADDI ta3, 1 << MAP_SCALESHIFT	#  point to next entry
318	addiu	v0, 1				# ipl++
319	move	v1, ta2				# update highest pending
320	b	1b				# loop
321	 nop
322
3232:
324	/*
325	 * Emulate the CP0_SR 'IM' bits in 'pending'
326	 * - if clock intr is requested, set MIPS_INT_MASK_5
327	 * - if other HW intr is requested, set MIPS_INT_MASK_1 as summary bit
328	 *   the RMI evbmips_iointr function will sort through
329	 *   individial EIRR requests
330	 */
331	li	t2, RMIXL_INT_MASK_5		# load RMIXL_INT_MASK_5
332	and	t1, v1, t2			# save count/compare intr request value
333	nor	t0, zero, t2			# invert the mask
334	and	v1, t0				# v1 &= ~RMIXL_INT_MASK_5
335	beq	v1, zero, 3f			# no non-clock intrs? skip ahead
336	 li	v1, RMIXL_INT_MASK_1		# use INT_MASK_1 as 'summary' bit
337						#  for non-clock hw intrs
3383:
339	or	v1, t1				# combine clock and non-clock-summary
340	sll	v1, MIPS_INT_MASK_SHIFT		# shift to emulate COP0_SR 'IM' bits
3414:
342	INT_S	v1, (a0)			# set a (fake) new pending mask
343	j	ra				# and return highest ipl pending
344	 nop
345END(_splsw_splintr)
346
347STATIC_LEAF(_splsw_splcheck)
348#ifdef PARANOIA
349	PTR_L	t0, L_CPU(MIPS_CURLWP)
350	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
351
352	dmfc0	t0, RMIXL_COP_0_EIMR		# get current EIMR
353
354	PTR_LA	t2, _C_LABEL(ipl_eimr_map)
355	sll	t1, MAP_SCALESHIFT		# shift cpl to array index
356	PTR_ADDU t2, t1
357	REG_L	t3, (t2)			# load value
3581:	bne	t0, t3, 1b			# loop forever if not equal
359	 nop
360#endif /* PARANOIA */
361	j	ra
362	 nop
363END(_splsw_splcheck)
364
365	.rdata
366	.globl _C_LABEL(rmixl_splsw)
367_C_LABEL(rmixl_splsw):
368        PTR_WORD _C_LABEL(_splsw_splhigh)
369        PTR_WORD _C_LABEL(_splsw_splsched)
370        PTR_WORD _C_LABEL(_splsw_splvm)
371        PTR_WORD _C_LABEL(_splsw_splsoftserial)
372        PTR_WORD _C_LABEL(_splsw_splsoftnet)
373        PTR_WORD _C_LABEL(_splsw_splsoftbio)
374        PTR_WORD _C_LABEL(_splsw_splsoftclock)
375        PTR_WORD _C_LABEL(_splsw_splraise)
376        PTR_WORD _C_LABEL(_splsw_spl0)
377        PTR_WORD _C_LABEL(_splsw_splx)
378        PTR_WORD _C_LABEL(_splsw_splhigh_noprof)
379        PTR_WORD _C_LABEL(_splsw_splx_noprof)
380	PTR_WORD _C_LABEL(_splsw_setsoftintr)
381	PTR_WORD _C_LABEL(_splsw_clrsoftintr)
382	PTR_WORD _C_LABEL(_splsw_splintr)
383	PTR_WORD _C_LABEL(_splsw_splcheck)
384