xref: /netbsd-src/sys/arch/mips/mips/mipsX_subr.S (revision ff23aff6ad91ceda282e396451a03e2d9c996146)
1/*	$NetBSD: mipsX_subr.S,v 1.115 2022/05/31 08:43:14 andvar Exp $	*/
2
3/*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 *    must display the following acknowledgement:
52 *      This product includes software developed by Jonathan R. Stone for
53 *      the NetBSD Project.
54 * 4. The name of the author may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * SUCH DAMAGE.
68 */
69
70/*
71 * Copyright (c) 1992, 1993
72 *	The Regents of the University of California.  All rights reserved.
73 *
74 * This code is derived from software contributed to Berkeley by
75 * Digital Equipment Corporation and Ralph Campbell.
76 *
77 * Redistribution and use in source and binary forms, with or without
78 * modification, are permitted provided that the following conditions
79 * are met:
80 * 1. Redistributions of source code must retain the above copyright
81 *    notice, this list of conditions and the following disclaimer.
82 * 2. Redistributions in binary form must reproduce the above copyright
83 *    notice, this list of conditions and the following disclaimer in the
84 *    documentation and/or other materials provided with the distribution.
85 * 3. Neither the name of the University nor the names of its contributors
86 *    may be used to endorse or promote products derived from this software
87 *    without specific prior written permission.
88 *
89 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
90 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
91 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
92 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
93 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
94 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
95 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
96 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
97 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
98 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
99 * SUCH DAMAGE.
100 *
101 * Copyright (C) 1989 Digital Equipment Corporation.
102 * Permission to use, copy, modify, and distribute this software and
103 * its documentation for any purpose and without fee is hereby granted,
104 * provided that the above copyright notice appears in all copies.
105 * Digital Equipment Corporation makes no representations about the
106 * suitability of this software for any purpose.  It is provided "as is"
107 * without express or implied warranty.
108 *
109 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
110 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
111 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
112 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
113 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
114 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
115 *
116 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
117 */
118
119#include <mips/asm.h>
120RCSID("$NetBSD: mipsX_subr.S,v 1.115 2022/05/31 08:43:14 andvar Exp $")
121
122#include "opt_cputype.h"
123#include "opt_ddb.h"
124#include "opt_kgdb.h"
125#include "opt_mips3_wired.h"
126#include "opt_multiprocessor.h"
127#include "opt_vmswap.h"
128
129#include <sys/cdefs.h>
130#include <sys/endian.h>
131
132#include <mips/cpuregs.h>
133#if defined(MIPS3)
134#include <mips/cache_r4k.h>
135#endif
136#include <mips/trap.h>
137
138#include "assym.h"
139
140#if defined(MIPS64_OCTEON)
141#include "cpunode.h"			/* for NWDOG */
142#else
143#define NWDOG 0
144#endif
145
146#if defined(MIPS1) || defined(MIPS2)
147#error use locore_mips1.S
148#endif
149
150#if defined(__mips_o32)
151#define	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(reg) \
152	mtc0 zero, MIPS_COP_0_STATUS
153#define	SET_EXCEPTION_LEVEL(reg) \
154	li reg, MIPS_SR_EXL; mtc0 reg, MIPS_COP_0_STATUS
155#else
156#define	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(reg) \
157	li reg, MIPS_SR_KX; mtc0 reg, MIPS_COP_0_STATUS
158#define	SET_EXCEPTION_LEVEL(reg) \
159	li reg, MIPS_SR_EXL | MIPS_SR_KX; mtc0 reg, MIPS_COP_0_STATUS
160#endif
161
162#ifdef MIPS3_LOONGSON2
163#define KERN_ENTRY_ERRATA \
164	li k0, MIPS_DIAG_BTB_CLEAR|MIPS_DIAG_RAS_DISABLE; mtc0 k0, MIPS_COP_0_DIAG
165#else
166#define KERN_ENTRY_ERRATA /* nothing */
167#endif
168
169#if MIPS1
170#error This file can not be compiled with MIPS1 defined
171#endif
172
173#if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) != 1
174# error  Only one of MIPS{3,32,32R2,64,64R2} can be defined
175#endif
176
177#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
178#define MIPSNN
179#if (MIPS32R2 + MIPS64R2) > 0
180#define MIPSNNR2
181#endif
182#endif
183
184/*
185 * Use 64bit cp0 instructions?
186 */
187#if (MIPS3 + MIPS64 + MIPS64R2) > 0
188#define	USE_64BIT_INSTRUCTIONS
189#define	USE_64BIT_CP0_FUNCTIONS
190#elif (MIPS32 + MIPS32R2) > 0
191#ifdef _LP64
192#error	MIPS32 and MIPS32R2 can't run 64-bit kernels.
193#endif
194#undef	USE_64BIT_INSTRUCTIONS
195#undef	USE_64BIT_CP0_FUNCTIONS
196#else
197#error	One of MIPS{3,32,32R2,64,64R2} must be defined
198#endif
199
200#ifdef _LP64
201#define	_SLLV		dsllv
202#else
203#define	_SLLV		sllv
204#endif
205
206#if defined(USE_64BIT_CP0_FUNCTIONS)
207#define	_SLL		dsll
208#define	_SRL		dsrl
209#define	_SRA		dsra
210#define	_EXT		dext
211#define	_INS		dins
212#define	WIRED_SHIFT	34
213#define WIRED_POS	30
214#define	PG_V_LSHIFT	(63 - V_MIPS3_PG_V)
215#define	PG_V_RSHIFT	63
216#else
217#define	_SLL		sll
218#define	_SRL		srl
219#define	_SRA		sra
220#define	_EXT		ext
221#define	_INS		ins
222#define	WIRED_SHIFT	2
223#define WIRED_POS	30
224#define	PG_V_LSHIFT	(31 - V_MIPS3_PG_V)
225#define	PG_V_RSHIFT	31
226#endif
227
228/*
229 * Use correct-sized m?c0/dm?c0 opcodes.
230 */
231#if defined(USE_64BIT_CP0_FUNCTIONS)
232#define	_MFC0	dmfc0
233#define	_MTC0	dmtc0
234#else
235#define	_MFC0	mfc0
236#define	_MTC0	mtc0
237#endif
238
239
240/*
241 * Set ISA level for the assembler.
242 */
243#if defined(MIPS3)
244	.set	mips3
245#endif
246
247#if defined(MIPS32)
248	.set	mips32
249#endif
250
251#if defined(MIPS32R2)
252	.set	mips32r2
253#endif
254
255#if defined(MIPS64)
256	.set	mips64
257#endif
258
259#if defined(MIPS64R2)
260	.set	mips64r2
261#endif
262
263
264/*
265 * CPP function renaming macros.
266 */
267
268#if defined(MIPS3_LOONGSON2)
269#define	MIPSX(name)	__CONCAT(loongson2_,name)
270#elif defined(MIPS3)
271#define	MIPSX(name)	__CONCAT(mips3_,name)
272#endif
273
274#if defined(MIPS32)
275#define	MIPSX(name)	__CONCAT(mips32_,name)
276#endif
277
278#if defined(MIPS32R2)
279#define	MIPSX(name)	__CONCAT(mips32r2_,name)
280#endif
281
282#if defined(MIPS64)
283#define	MIPSX(name)	__CONCAT(mips64_,name)
284#endif
285
286#if defined(MIPS64R2)
287#define	MIPSX(name)	__CONCAT(mips64r2_,name)
288#endif
289
290#define	_VECTOR_END(name)	VECTOR_END(name)
291
292/*
293 * XXX We need a cleaner way of handling the instruction hazards of
294 * the various processors.  Here are the relevant rules for the QED 52XX:
295 *	tlbw[ri]	-- two integer ops beforehand
296 *	tlbr		-- two integer ops beforehand
297 *	tlbp		-- two integer ops beforehand
298 *	mtc0	[PageMask,EntryHi,Cp0] -- two integer ops afterwards
299 *	changing JTLB	-- two integer ops afterwards
300 *	mtc0	[EPC,ErrorEPC,Status] -- two int ops afterwards before eret
301 *	config.k0	-- five int ops before kseg0, ckseg0 memref
302 *
303 * For the IDT R4000, some hazards are:
304 *	mtc0/mfc0	one integer op before and after
305 *	tlbp		-- one integer op afterwards
306 * Obvious solution is to take least common denominator.
307 *
308 * For the Toshiba R5900, TX79:
309 *	mtc0		following sync.p
310 *	tlbw[ri], tlbp	following sync.p or eret
311 * for those CPU, define COP0_SYNC as sync.p
312 */
313
314
315/*
316 *============================================================================
317 *
318 *  MIPS III ISA support, part 1: locore exception vectors.
319 *  The following code is copied to the vector locations to which
320 *  the CPU jumps in response to an exception or a TLB miss.
321 *
322 *============================================================================
323 */
324	.set	noreorder
325
326/*
327 * TLB handling data.   'CPUVAR(PMAP_SEG0TAB)' points to the base of the segment
328 * table.   this is read and written by C code in mips_machdep.c.
329 *
330 * XXX: use linear mapped PTs at fixed VA in kseg2 in the future?
331 */
332	.text
333
334
335/*
336 * some useful labels for debugging
337 */
338.global	mips_kseg0
339.equiv	mips_kseg0,		MIPS_KSEG0_START
340.global	mips_kseg1
341.equiv	mips_kseg1,		MIPS_KSEG1_START
342.global	mips_kseg2
343.equiv	mips_kseg2,		MIPS_KSEG2_START
344.global	mips_xkphys
345.equiv	mips_xkphys,		MIPS_XKPHYS_START
346.global	mips_xkphys_u
347.equiv	mips_xkphys_u,		MIPS_XKPHYS_UNCACHED
348.global	mips_xkphys_cca3
349.equiv	mips_xkphys_cca3,	MIPS_XKPHYS_CCA3
350.global	mips_xkphys_cca4
351.equiv	mips_xkphys_cca4,	MIPS_XKPHYS_CCA4
352.global	mips_xkseg
353.equiv	mips_xkseg,		MIPS_XKSEG_START
354
355
356/*
357 *----------------------------------------------------------------------------
358 *
359 * mipsN_tlb_miss --
360 *
361 *	Vector code for the TLB-miss exception vector 0x80000000
362 *	on an r4000.
363 *
364 * This code is copied to the TLB exception vector address to
365 * handle TLB translation misses.
366 * NOTE: This code should be relocatable and max 32 instructions!!!
367 *
368 * Don't check for invalid pte's here. We load them as well and
369 * let the processor trap to load the correct value after service.
370 *
371 * Loongson2 processors don't have separate tlbmiss and xtlbmiss handlers;
372 * so we have to check for useg addresses in tlb_miss. The good news is that
373 * we can use 64 instructions from tlbmiss instead of 32.
374 *
375 *----------------------------------------------------------------------------
376 */
377#ifdef MIPS3_LOONGSON2
378/* this loongson2-specific part is almost a copy of xtlb_miss */
379VECTOR(MIPSX(tlb_miss), unknown)
380	.set	noat
381	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
382#ifdef _LP64
383	nop					#01: nop
384	PTR_SRL k1, k0, 31			#02: clear useg bits
385	beqz	k1, 2f				#03: k1==0 -> useg address
386	 PTR_SLL k1, k0, 2			#0x: clear top bits
387	PTR_SRL k1, XSEGSHIFT+XSEGLENGTH+2	#04: clear valid bits
388	bnez	k1, MIPSX(nopagetable)		#05: not legal address
389	 PTR_SRA k0, XSEGSHIFT - PTR_SCALESHIFT #06: k0=seg offset (almost)
390	bgez	k0, 1f				#07: k0<0 -> kernel fault
391	 lui	k1, %hi(CPUVAR(PMAP_SEGTAB))	#08: k1=hi of segtab
392	PTR_ADDI k1,  1 << PTR_SCALESHIFT	#09: kernel segtab entry
3931:
394	andi	k0, (NSEGPG-1)<<PTR_SCALESHIFT	#0a: k0=seg offset (mask 0x3)
395	PTR_L	k1, %lo(CPUVAR(PMAP_SEGTAB))(k1)#0b: k1=segment tab
396	PTR_ADDU k1, k0				#0c: k1=seg entry address
397	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#0d: k0=bad address (again)
398	PTR_L	k1, 0(k1)			#0e: k1=seg entry
399	b	MIPSX(tlb_miss_common)		#0f
400	 PTR_SRL k0, SEGSHIFT - PTR_SCALESHIFT	#10: k0=seg offset (almost)
401#endif /* LP64 */
4022: /* handle useg addresses */
403	lui	k1, %hi(CPUVAR(PMAP_SEG0TAB))	#11: k1=hi of seg0tab
404	dsrl	k0, 31				#12: clear low 31 bits
405	bnez	k0, MIPSX(nopagetable)		#13: not legal address
406	 PTR_L	k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1)#14: k1=segment tab base
407	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#15: k0=bad address (again)
408	nop					#16
409	b	MIPSX(tlb_miss_common)		#17
410	 PTR_SRL k0, 1*(PGSHIFT-PTR_SCALESHIFT)+(PGSHIFT-2) #18: k0=seg offset (almost)
411_VECTOR_END(MIPSX(tlb_miss))
412/* dummy xtlb_miss (also a placeholder for tlb_miss_common) */
413VECTOR(MIPSX(xtlb_miss), unknown)
414	lui	k0, %hi(_C_LABEL(panic))	#00
415	addiu	k0, %lo(_C_LABEL(panic))	#01
416	lui	a0, %hi(loongson2_xtlb_miss_str) #02
417	jr	k0				#03
418	 addiu	a0, %lo(loongson2_xtlb_miss_str) #04
419#else /* !MIPS3_LOONGSON2 */
420VECTOR(MIPSX(tlb_miss), unknown)
421	.set	noat
422	_MFC0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
423	lui	k1, %hi(CPUVAR(PMAP_SEG0TAB))	#01: k1=hi of seg0tab
424#ifdef _LP64
425	bltz	k0, MIPSX(nopagetable)		#02: k0<0 -> kernel access
426#ifdef MIPSNNR2
427	 _EXT	k0, k0, SEGSHIFT, SEGLENGTH	#03: k0=seg index
428#else
429	 PTR_SRA k0, SEGSHIFT - PTR_SCALESHIFT	#03: k0=seg offset (almost)
430#endif
431#else
432	bgez	k0, 1f				#02: k0<0 -> kernel access
433#ifdef MIPSNNR2
434	 _EXT	k0, k0, SEGSHIFT, SEGLENGTH	#03: k0=seg index
435#else
436	 PTR_SRA k0, SEGSHIFT - PTR_SCALESHIFT	#03: k0=seg offset (almost)
437#endif
438	PTR_ADDU k1, 1 << PTR_SCALESHIFT	#04: fetch kernel segtab
4391:
440#endif
441	PTR_L	k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1)#05: k1=seg0tab
442#endif /* !MIPS3_LOONGSON2 */
443MIPSX(tlb_miss_common):
444#ifdef _LP64
445	beqz	k1, MIPSX(nopagetable)		#06: is there a pagetable?
446#endif
447	/* the next instruction might be in a delay slot */
448#ifdef MIPSNNR2
449	_INS	k1, k0, PTR_SCALESHIFT, SEGLENGTH #07: k1=seg entry address
450#else
451	andi	k0, (NSEGPG-1)<<PTR_SCALESHIFT	#07: k0=seg offset (mask 0x3)
452	PTR_ADDU k1, k0				#08: k1=seg entry address
453#endif
454	PTR_L	k1, 0(k1)			#09: k1=seg entry
455	_MFC0	k0, MIPS_COP_0_BAD_VADDR	#0a: k0=bad address (again)
456	beqz	k1, MIPSX(nopagetable)		#0b: ==0 -- no page table
457	 # delay slot varies
458#if (PGSHIFT & 1)
459#ifdef MIPSNNR2
460	 _EXT	k0, k0, PGSHIFT, PTPLENGTH	#0c: delay slot: page index
461	_INS	k1, k0, PTPSHIFT, PTPLENGTH	#0d: k1=pte address
462#else
463	 PTR_SRL k0, PGSHIFT - PTPSHIFT		#0c: k0=VPN (aka va>>10)
464	andi	k0, (NPTEPG-1) << PTPSHIFT	#0d: k0=page table offset
465	PTR_ADDU k1, k0				#0e: k1=pte address
466#endif
467	INT_L	k0, 0(k1)			#0f: k0=lo0 pte
468#ifdef MIPSNNR2
469	_EXT	k0, k0, 0, WIRED_POS		#10: chop top 2 bits
470#else
471	_SLL	k0, WIRED_SHIFT			#10: chop top 2 bits (part 1a)
472	_SRL	k0, WIRED_SHIFT			#11: chop top 2 bits (part 1b)
473#endif
474	INT_ADDU k1, k0, MIPS3_PG_NEXT		#12: k1=lo1 pte
475#else /* (PGSHIFT & 1) == 0 */
476	 PTR_SRL k0, PGSHIFT - PTPSHIFT		#0c: k0=VPN (aka va>>10) --ds--
477	andi	k0, (NPTEPG/2-1) << (PTPSHIFT+1)#0d: k0=page table offset
478	PTR_ADDU k1, k0				#0e: k1=pte address
479#ifdef USE_64BIT_CP0_FUNCTIONS
480	ld	k0, 0(k1)			#0f: load both ptes
481#ifdef MIPSNNR2
482	_EXT	k1, k0, 32*_QUAD_HIGHWORD, WIRED_POS	#10: get lo1 pte
483	_EXT	k0, k0, 32*_QUAD_LOWWORD, WIRED_POS	#11: get lo0 pte
484#else
485	_SLL	k1, k0, WIRED_SHIFT - 32*_QUAD_HIGHWORD	#10: get lo1 pte (1a)
486	_SLL	k0, k0, WIRED_SHIFT - 32*_QUAD_LOWWORD	#11: get lo0 pte (2a)
487	_SRL	k0, WIRED_SHIFT			#12: chopped top 2 bits (1b)
488	_SRL	k1, WIRED_SHIFT			#13: chopped top 2 bits (2b)
489#endif
490#else
491	INT_L	k0, 0(k1)			#0f: k0=lo0 pte
492	INT_L	k1, 4(k1)			#10: k1=lo1 pte
493	_SLL	k0, WIRED_SHIFT			#11: chop top 2 bits (part 1a)
494	_SLL	k1, WIRED_SHIFT			#12: chop top 2 bits (part 2a)
495	_SRL	k0, WIRED_SHIFT			#13: chop top 2 bits (part 1b)
496	_SRL	k1, WIRED_SHIFT			#14: chop top 2 bits (part 2b)
497#endif
498#endif /* PGSHIFT & 1 */
499	_MTC0	k0, MIPS_COP_0_TLB_LO0		#15: lo0 is loaded
500	_MTC0	k1, MIPS_COP_0_TLB_LO1		#16: lo1 is loaded
501	sll	$0, $0, 3			#17: standard nop (ehb)
502#ifdef MIPS3
503	nop					#18: extra nop for QED5230
504#endif
505	tlbwr					#19: write to tlb
506	sll	$0, $0, 3			#1a: standard nop (ehb)
507#if (MIPS3 + MIPS64 + MIPS64R2) > 0
508	lui	k1, %hi(CPUVAR(EV_TLBMISSES))	#1b: k1=hi of tlbmisses
509	REG_L	k0, %lo(CPUVAR(EV_TLBMISSES))(k1) #1c
510	REG_ADDU k0, 1				#1d
511	REG_S	k0, %lo(CPUVAR(EV_TLBMISSES))(k1) #1e
512#endif
513	eret					#1f: return from exception
514	.set	at
515#ifdef MIPS3_LOONGSON2
516_VECTOR_END(MIPSX(xtlb_miss))
517#else
518_VECTOR_END(MIPSX(tlb_miss))
519#endif
520
521#ifndef MIPS3_LOONGSON2
522#if defined(USE_64BIT_CP0_FUNCTIONS)
523/*
524 * mipsN_xtlb_miss routine
525 *
526 *	Vector code for the XTLB-miss exception vector 0x80000080 on an r4000.
527 *
528 * This code is copied to the XTLB exception vector address to
529 * handle TLB translation misses while in 64-bit mode.
530 * NOTE: This code should be relocatable and max 32 instructions!!!
531 *
532 * Note that we do not support the full size of the PTEs, relying
533 * on appropriate truncation/sign extension.
534 *
535 * Don't check for invalid pte's here. We load them as well and
536 * let the processor trap to load the correct value after service.
537 *
538 * Loongson2 CPUs don't have separate tlbmiss and xtlbmiss, so we have
539 * to check the address size here and branch to tlb_miss if needed.
540 */
541VECTOR(MIPSX(xtlb_miss), unknown)
542	.set	noat
543	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
544#ifdef _LP64
545	MFC0_HAZARD				#01: nop
546	PTR_SLL	k1, k0, 2			#02: clear top bits
547	PTR_SRL k1, XSEGSHIFT+XSEGLENGTH+2	#03: clear valid bits
548	bnez	k1, MIPSX(nopagetable)		#04: not legal address
549	 PTR_SRA k0, XSEGSHIFT - PTR_SCALESHIFT	#05: k0=seg offset (almost)
550	bgez	k0, 1f				#06: k0<0 -> kernel fault
551	 lui	k1, %hi(CPUVAR(PMAP_SEGTAB))	#07: k1=hi of segtab
552	PTR_ADDU k1, 1 << PTR_SCALESHIFT	#08: advance to kernel segtab
5531:
554	PTR_L	k1, %lo(CPUVAR(PMAP_SEGTAB))(k1)#09: k1=segment tab
555	andi	k0, (NSEGPG-1)<<PTR_SCALESHIFT	#0a: k0=seg offset (mask 0x3)
556	PTR_ADDU k1, k0				#0b: k1=seg entry address
557	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#0c: k0=bad address (again)
558	PTR_L	k1, 0(k1)			#0d: k1=seg entry address
559#else
560	lui	k1, %hi(CPUVAR(PMAP_SEG0TAB))	#02: k1=hi of seg0tab
561	bgez	k0, 1f				#03: k0<0 -> kernel access
562	 dsra	k0, 31				#04: clear low 31 bits
563	PTR_ADDU k1, 1 << PTR_SCALESHIFT	#05
5641:
565	PTR_ADDU k0, 1				#06
566	sltiu	k0, k0, 2			#07
567	beqz	k0, MIPSX(nopagetable)		#08: not legal address
568	 nop					#09
569	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#0a: k0=bad address (again)
570	PTR_L	k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1)#0b: k1=segment tab base
571#endif /* _LP64 */
572	b	MIPSX(tlb_miss_common)		#0e/0c
573#ifdef MIPSNNR2
574	 _EXT	k0, k0, SEGSHIFT, SEGLENGTH	#0f/0d: k0=seg index
575#else
576	 PTR_SRL k0, SEGSHIFT - PTR_SCALESHIFT	#0f/0d: k0=seg offset (almost)
577#endif
578	.set	at
579_VECTOR_END(MIPSX(xtlb_miss))
580#else
581	.space	128
582#endif /* USE_64BIT_CP0_FUNCTIONS */
583#endif /* !MIPS3_LOONGSON2 */
584
585/*
586 * Vector to real handler in KSEG1.
587 */
588VECTOR(MIPSX(cache), unknown)
589	PTR_LA	k0, _C_LABEL(MIPSX(cache_exception))	#00
590	li	k1, MIPS_PHYS_MASK			#01
591	and	k0, k1					#02
592	li	k1, MIPS_KSEG1_START			#03
593	or	k0, k1					#04
594	lui	k1, %hi(CPUVAR(CURLWP))			#05: k1=hi of curlwp
595	jr	k0					#06
596	 PTR_L	k1, %lo(CPUVAR(CURLWP))(k1)		#07: k1=lo of curlwp
597_VECTOR_END(MIPSX(cache))
598
599/*
600 *----------------------------------------------------------------------------
601 *
602 * mipsN_exception
603 *
604 *	Vector code for the general exception vector 0x80000180
605 *	on an r4000 or r4400.
606 *
607 * This code is copied to the general exception vector address to
608 * handle most exceptions.
609 * NOTE: This code should be relocatable and max 32 instructions!!!
610 *----------------------------------------------------------------------------
611 */
612VECTOR(MIPSX(exception), unknown)
613/*
614 * Find out what mode we came from and jump to the proper handler.
615 */
616	.set	noat
617	mfc0	k0, MIPS_COP_0_STATUS		#00: get the status register
618	mfc0	k1, MIPS_COP_0_CAUSE		#01: get the cause register
619	and	k0, MIPS3_SR_KSU_USER		#02: test for user mode
620						#    sneaky but the bits are
621						#    with us........
622	sll	k0, 3				#03: shift user bit for cause index
623	and	k1, MIPS3_CR_EXC_CODE		#04: mask out the cause bits.
624	or	k1, k0				#05: change index to user table
625#if PTR_SCALESHIFT > MIPS_CR_EXC_CODE_SHIFT
626	PTR_SLL	k1, PTR_SCALESHIFT - MIPS_CR_EXC_CODE_SHIFT
627#endif
628	PTR_LA	k0, MIPSX(excpt_sw)		#06: get base of the jump table
629	PTR_ADDU k0, k1				#08: get the address of the
630						#  function entry.  Note that
631						#  the cause is already
632						#  shifted left by 2 bits so
633						#  we dont have to shift.
634	PTR_L	k0, 0(k0)			#09: get the function address
635	lui	k1, %hi(CPUVAR(CURLWP))		#0a: k1=hi of curlwp
636	jr	k0				#0b: jump to the function
637	 PTR_L	k1, %lo(CPUVAR(CURLWP))(k1)	#0c: k1=lo of curlwp
638	nop					#0d
639	nop					#0e
640#ifndef _LP64
641	nop					#0f
642#endif
643	.p2align 4
644MIPSX(nopagetable):
645	lui	k1, %hi(CPUVAR(CURLWP))		#10: k1=hi of curlwp
646	j	MIPSX(slowfault)		#11: no page table present
647	 PTR_L	k1, %lo(CPUVAR(CURLWP))(k1)	#12: k1=lo of curlwp
648	nop					#13: branch delay slot
649	.set	at
650_VECTOR_END(MIPSX(exception))
651
652/*
653 * Handle MIPS32/MIPS64 style interrupt exception vector.
654 */
655VECTOR(MIPSX(intr), unknown)
656	.set	noat
657	mfc0	k0, MIPS_COP_0_STATUS		#00: get the status register
658	MFC0_HAZARD				#01: stall
659	and	k0, k0, MIPS3_SR_KSU_USER	#02: test for user mode
660	bnez	k0, 1f				#03: yep, do it
661	 nop					#04:  branch deay
662	j	MIPSX(kern_intr)		#05: nope, kernel intr
6631:
664	lui	k1, %hi(CPUVAR(CURLWP))		#06: k1=hi of curlwp
665	j	MIPSX(user_intr)		#07: user intr
666	 PTR_L	k1, %lo(CPUVAR(CURLWP))(k1)	#08: k1=lo of curlwp
667	.set	at
668_VECTOR_END(MIPSX(intr))
669
670/*----------------------------------------------------------------------------
671 *
672 * mipsN_slowfault
673 *
674 * Alternate entry point into the mipsN_user_gen_exception or
675 * mipsN_kern_gen_exception, when the UTLB miss handler couldn't
676 * find a TLB entry.
677 *
678 * Find out what mode we came from and call the appropriate handler.
679 *
680 *----------------------------------------------------------------------------
681 */
682	.org	((. + 31) & ~31) + 12
683MIPSX(slowfault):
684	.set	noat
685	mfc0	k0, MIPS_COP_0_STATUS
686	MFC0_HAZARD
687	and	k0, MIPS3_SR_KSU_USER
688	bnez	k0, _C_LABEL(MIPSX(user_gen_exception))
689	 nop
690	.set	at
691/*
692 * Fall through ...
693 */
694
695/*
696 * mipsN_kern_gen_exception
697 *
698 * Handle an exception during kernel mode.
699 * Build trapframe on stack to hold interrupted kernel context, then
700 * call trap() to process the condition.
701 *
702 * trapframe is pointed to by the 5th arg and a dummy sixth argument is used
703 * to avoid alignment problems
704 * {
705 *	register_t cf_args[4 + 1];
706 *	register_t cf_pad;		(for 8 word alignment)
707 *	register_t cf_sp;
708 *	register_t cf_ra;
709 *	struct reg cf_tf;
710 * };
711 */
712NESTED_NOPROFILE(MIPSX(kern_gen_exception), KERNFRAME_SIZ, ra)
713	.set	noat
714	.mask	0x80000000, -4
715#if defined(PARANOIA)
716	PTR_L	k0, L_PCB(MIPS_CURLWP)
717	slt	k0, k0, sp		# k0 = L_PCB(MIPS_CURLWP) < sp
7181:	beqz	k0, 1b			# loop forever if false
719	 nop
720	PTR_L	k0, L_PCB(MIPS_CURLWP)
721	PTR_ADDU k0, USPACE
722	slt	k0, sp, k0		# k0 = sp < L_PCB(MIPS_CURLWP) + USPACE
7232:	beqz	k0, 2b			# loop forever if false
724	 nop
725#endif /* PARANOIA */
726/*
727 * Save the relevant kernel registers onto the stack.
728 * We don't need to save s0 - s8, sp and gp because
729 * the compiler does it for us.
730 */
731	PTR_SUBU sp, KERNFRAME_SIZ
732	REG_S	AT, TF_BASE+TF_REG_AST(sp)
733	REG_S	v0, TF_BASE+TF_REG_V0(sp)
734	REG_S	v1, TF_BASE+TF_REG_V1(sp)
735	mflo	v0
736	mfhi	v1
737	REG_S	a0, TF_BASE+TF_REG_A0(sp)
738	REG_S	a1, TF_BASE+TF_REG_A1(sp)
739	REG_S	a2, TF_BASE+TF_REG_A2(sp)
740	REG_S	a3, TF_BASE+TF_REG_A3(sp)
741	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
742	REG_S	t0, TF_BASE+TF_REG_T0(sp)
743	REG_S	t1, TF_BASE+TF_REG_T1(sp)
744	REG_S	t2, TF_BASE+TF_REG_T2(sp)
745	REG_S	t3, TF_BASE+TF_REG_T3(sp)
746	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
747	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
748	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
749	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
750	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
751	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
752#ifdef DDB
753	REG_S	t8, TF_BASE+TF_REG_T8(sp)	# is MIPS_CURLWP
754#endif
755	REG_S	t9, TF_BASE+TF_REG_T9(sp)
756	REG_S	ra, TF_BASE+TF_REG_RA(sp)
757	REG_S	a0, TF_BASE+TF_REG_SR(sp)
758	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
759	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
760	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
761	REG_S	a3, TF_BASE+TF_REG_EPC(sp)
762	REG_S	a1, TF_BASE+TF_REG_CAUSE(sp)
763#if defined(DDB) || defined(KGDB)
764	REG_S	s0, TF_BASE+TF_REG_S0(sp)
765	REG_S	s1, TF_BASE+TF_REG_S1(sp)
766	REG_S	s2, TF_BASE+TF_REG_S2(sp)
767	REG_S	s3, TF_BASE+TF_REG_S3(sp)
768	REG_S	s4, TF_BASE+TF_REG_S4(sp)
769	REG_S	s5, TF_BASE+TF_REG_S5(sp)
770	REG_S	s6, TF_BASE+TF_REG_S6(sp)
771	REG_S	s7, TF_BASE+TF_REG_S7(sp)
772	PTR_ADDU v0, sp, KERNFRAME_SIZ
773	REG_S	v0, TF_BASE+TF_REG_SP(sp)
774	REG_S	s8, TF_BASE+TF_REG_S8(sp)
775	REG_S	gp, TF_BASE+TF_REG_GP(sp)
776#endif
777#if defined(__mips_o32) || defined(__mips_o64)
778	PTR_ADDU v0, sp, TF_BASE
779	REG_S	v0, KERNFRAME_ARG5(sp)		# 5th arg is p. to trapframe
780#endif
781#if defined(__mips_n32) || defined(__mips_n64)
782	PTR_ADDU a4, sp, TF_BASE		# 5th arg is p. to trapframe
783#endif
784#ifdef PARANOIA
785	/*
786	 * save PPL in trapframe
787	 */
788	PTR_L	t0, L_CPU(MIPS_CURLWP)
789	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
790	INT_S	t1, TF_BASE+TF_PPL(sp)		# save priority level
791#endif /* PARANOIA */
792
793#if defined(__mips_o32) && (defined(DDB) || defined(DEBUG) || defined(KGDB))
794	PTR_ADDU v0, sp, KERNFRAME_SIZ
795	REG_S	v0, KERNFRAME_SP(sp)
796#endif
797
798#ifdef PARANOIA
799	/*
800	 * Verify our existing interrupt level.
801	 */
802	jal	_C_LABEL(splcheck)
803	 nop
804#endif /* PARANOIA */
805
806	/*
807	 * We need to find out if this was due to a T_BREAK and if so
808	 * turn off interrupts in addition to clearing the exception level.
809	 */
810	li	v1, MIPS_SR_INT_IE << T_BREAK	# make a mask of T_BREAK
811	srl	t0, a1, MIPS_CR_EXC_CODE_SHIFT	# shift exc code to low 5 bits
812	srl	v1, t0				# shift break mask using it
813	and	v1, MIPS_SR_INT_IE		# restrict to IE bit
814	or	v1, MIPS_SR_EXL			# or in EXL bit
815	and	v1, a0				# extract bits from status
816	xor	v0, a0, v1 			# generate new status
817	mtc0	v0, MIPS_COP_0_STATUS		# update.
818	COP0_SYNC
819#ifdef MIPS3
820	nop
821	nop
822	nop
823#endif
824	/*
825	 * Call the trap handler.
826	 */
827	jal	_C_LABEL(trap)
828	 REG_S	a3, KERNFRAME_RA(sp)		# for debugging
829
830	/*
831	 * Restore registers and return from the exception.
832	 */
833	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
834	COP0_SYNC
835#ifdef MIPS3
836	nop					# 3 nop delay
837	nop
838	nop
839#endif
840	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# get SR with EXL set
841	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
842	COP0_SYNC
843
844	/*
845	 * Start of common kernel exception return code for both
846	 * mipxN_kern_gen_exception and mipsN_kern_intr.
847	 */
848MIPSX(kern_return):
849	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
850	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
851	REG_L	k1, TF_BASE+TF_REG_EPC(sp)	# might be changed inside trap
852	mtlo	t0
853	mthi	t1
854
855#ifdef PARANOIA
856	INT_L	t2, TF_BASE+TF_PPL(sp)		# get saved priority level
857	PTR_L	t0, L_CPU(MIPS_CURLWP)
858	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
85911:	bne	t2, t1, 11b			# loop forever if unequal
860	 nop
861
862	/*
863	 * Verify our existing interrupt level.
864	 */
865	jal	_C_LABEL(splcheck)
866	 nop
867#endif /* PARANOIA */
868
869	/*
870	 * Check for kernel restartable atomic sequences.
871	 */
872	PTR_LA	t0, _C_LABEL(_lock_ras_start)
873	li	t1, -MIPS_LOCK_RAS_SIZE
874	and	t1, k1
875	bne	t1, t0, 1f			# exception PC in RAS area?
876	 nop
877	jal	_C_LABEL(_restart_lock_ras)	# fix the pc (k1)
878	 nop
8791:
880
881	_MTC0	k1, MIPS_COP_0_EXC_PC		# set return address
882	COP0_SYNC
883
884	REG_L	AT, TF_BASE+TF_REG_AST(sp)
885	REG_L	v0, TF_BASE+TF_REG_V0(sp)
886	REG_L	v1, TF_BASE+TF_REG_V1(sp)
887	REG_L	a0, TF_BASE+TF_REG_A0(sp)
888	REG_L	a1, TF_BASE+TF_REG_A1(sp)
889	REG_L	a2, TF_BASE+TF_REG_A2(sp)
890	REG_L	a3, TF_BASE+TF_REG_A3(sp)
891	REG_L	t0, TF_BASE+TF_REG_T0(sp)
892	REG_L	t1, TF_BASE+TF_REG_T1(sp)
893	REG_L	t2, TF_BASE+TF_REG_T2(sp)
894	REG_L	t3, TF_BASE+TF_REG_T3(sp)
895	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
896	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
897	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
898	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
899	#REG_L	t8, TF_BASE+TF_REG_T8(sp)	# is MIPS_CURLWP
900	REG_L	t9, TF_BASE+TF_REG_T9(sp)
901	REG_L	ra, TF_BASE+TF_REG_RA(sp)
902#ifdef DDBnotyet
903	REG_L	s0, TF_BASE+TF_REG_S0(sp)
904	REG_L	s1, TF_BASE+TF_REG_S1(sp)
905	REG_L	s2, TF_BASE+TF_REG_S2(sp)
906	REG_L	s3, TF_BASE+TF_REG_S3(sp)
907	REG_L	s4, TF_BASE+TF_REG_S4(sp)
908	REG_L	s5, TF_BASE+TF_REG_S5(sp)
909	REG_L	s6, TF_BASE+TF_REG_S6(sp)
910	REG_L	s7, TF_BASE+TF_REG_S7(sp)
911	REG_L	s8, TF_BASE+TF_REG_S8(sp)
912#endif
913	PTR_ADDU sp, KERNFRAME_SIZ
914	eret					# return to interrupted point
915	.set	at
916END(MIPSX(kern_gen_exception))
917
918#if NWDOG > 0 || defined(DDB)
919/*
920 * mipsN_kern_nonmaskable_intr
921 *
922 * Handle a NMI during kernel mode.
923 * Build trapframe on stack to hold interrupted kernel context, then
924 * call trap() to process the condition.
925 *
926 * trapframe is pointed to by the 5th arg and a dummy sixth argument is used
927 * to avoid alignment problems
928 * {
929 *	register_t cf_args[4 + 1];
930 *	register_t cf_pad;		(for 8 word alignment)
931 *	register_t cf_sp;
932 *	register_t cf_ra;
933 *	struct reg cf_tf;
934 * };
935 */
936NESTED_NOPROFILE(MIPSX(kern_nonmaskable_intr), KERNFRAME_SIZ, ra)
937	.set	noat
938	.mask	0x80000000, -4
939#if defined(PARANOIA)
940	PTR_L	k0, L_PCB(MIPS_CURLWP)
941	slt	k0, k0, sp		# k0 = L_PCB(MIPS_CURLWP) < sp
9421:	beqz	k0, 1b			# loop forever if false
943	 nop
944
945	PTR_L	k0, L_PCB(MIPS_CURLWP)
946	PTR_ADDU k0, USPACE
947	slt	k0, sp, k0		# k0 = sp < L_PCB(MIPS_CURLWP) + USPACE
9482:	beqz	k0, 2b			# loop forever if false
949	 nop
950#endif /* PARANOIA */
951
952/*
953 * Save the relevant kernel registers onto the NMI stack.
954 * We save s0 - s8, sp and gp so DDB can see them.
955 */
956	move	k1, sp				# save for later
957	PTR_L	sp, CPU_INFO_NMI_STACK(k0)	# get NMI stack
958	REG_S	k1, TF_BASE+TF_REG_SP(sp)
959	REG_S	AT, TF_BASE+TF_REG_AST(sp)
960	REG_S	v0, TF_BASE+TF_REG_V0(sp)
961	REG_S	v1, TF_BASE+TF_REG_V1(sp)
962	mflo	v0
963	mfhi	v1
964
965	REG_S	a0, TF_BASE+TF_REG_A0(sp)
966	REG_S	a1, TF_BASE+TF_REG_A1(sp)
967	REG_S	a2, TF_BASE+TF_REG_A2(sp)
968	REG_S	a3, TF_BASE+TF_REG_A3(sp)
969	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
970	REG_S	t0, TF_BASE+TF_REG_T0(sp)
971	REG_S	t1, TF_BASE+TF_REG_T1(sp)
972	REG_S	t2, TF_BASE+TF_REG_T2(sp)
973	REG_S	t3, TF_BASE+TF_REG_T3(sp)
974	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
975	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
976	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
977	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
978	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
979	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
980	REG_S	t8, TF_BASE+TF_REG_T8(sp)	# is MIPS_CURLWP
981	REG_S	t9, TF_BASE+TF_REG_T9(sp)
982	REG_S	ra, TF_BASE+TF_REG_RA(sp)
983	REG_S	a0, TF_BASE+TF_REG_SR(sp)
984	_MFC0	a3, MIPS_COP_0_ERROR_PC		# 4th arg is exception PC
985	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
986	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
987	REG_S	a3, TF_BASE+TF_REG_EPC(sp)
988	REG_S	a1, TF_BASE+TF_REG_CAUSE(sp)
989	REG_S	s0, TF_BASE+TF_REG_S0(sp)
990	REG_S	s1, TF_BASE+TF_REG_S1(sp)
991	REG_S	s2, TF_BASE+TF_REG_S2(sp)
992	REG_S	s3, TF_BASE+TF_REG_S3(sp)
993	REG_S	s4, TF_BASE+TF_REG_S4(sp)
994	REG_S	s5, TF_BASE+TF_REG_S5(sp)
995	REG_S	s6, TF_BASE+TF_REG_S6(sp)
996	REG_S	s7, TF_BASE+TF_REG_S7(sp)
997	//PTR_ADDU v0, sp, KERNFRAME_SIZ
998	REG_S	s8, TF_BASE+TF_REG_S8(sp)
999	REG_S	gp, TF_BASE+TF_REG_GP(sp)
1000	PTR_L	t8, CPU_INFO_CURLWP(k0)
1001#if defined(__mips_o32) || defined(__mips_o64)
1002	PTR_ADDU v0, sp, TF_BASE
1003	REG_S	v0, KERNFRAME_ARG5(sp)		# 5th arg is p. to trapframe
1004#endif
1005#if defined(__mips_n32) || defined(__mips_n64)
1006	PTR_ADDU a4, sp, TF_BASE		# 5th arg is p. to trapframe
1007#endif
1008
1009	/*
1010	 * save PPL in trapframe
1011	 */
1012	PTR_L	t0, L_CPU(MIPS_CURLWP)
1013	INT_L	t1, CPU_INFO_CPL(t0)		# get current priority level
1014	INT_S	t1, TF_BASE+TF_PPL(sp)		# save priority level
1015
1016#if defined(__mips_o32) && (defined(DDB) || defined(DEBUG) || defined(KGDB))
1017	PTR_ADDU v0, sp, KERNFRAME_SIZ
1018	REG_S	v0, KERNFRAME_SP(sp)
1019#endif
1020
1021#if defined(PARANOIA_SPL)
1022	/*
1023	 * Verify our existing interrupt level.
1024	 */
1025	jal	_C_LABEL(splcheck)
1026	 nop
1027#endif /* PARANOIA_SPL */
1028
1029	/*
1030	 * Clear exception level.
1031	 */
1032	li	v0, ~(MIPS_SR_EXL|MIPS3_SR_NMI)
1033	and	v0, a0				# zero NMI/EXL bits
1034	mtc0	v0, MIPS_COP_0_STATUS		# update.
1035	COP0_SYNC
1036#ifdef MIPS3
1037	nop
1038	nop
1039	nop
1040#endif
1041
1042	/*
1043	 * Call the trap handler.
1044	 */
1045	jal	_C_LABEL(trap)
1046	 REG_S	a3, KERNFRAME_RA(sp)		# for debugging
1047
1048	/*
1049	 * Wait for a reset
1050	 */
10511:	wait
1052	b	1b
1053	 nop
1054	.set	at
1055END(MIPSX(kern_nonmaskable_intr))
1056#endif /* NWDOG > 0 || DDB */
1057
1058/*
1059 * mipsN_kern_intr
1060 *
1061 * Handle an interrupt from kernel mode.
1062 * Build kernframe on stack to hold interrupted kernel context, then
1063 * call cpu_intr() to process it.
1064 *
1065 */
1066	.p2align 5
1067NESTED_NOPROFILE(MIPSX(kern_intr), KERNFRAME_SIZ, ra)
1068	.set	noat
1069	.mask	0x80000000, -4
1070#ifdef PARANOIA
1071	PTR_L	k0, L_PCB(MIPS_CURLWP)
1072	slt	k0, k0, sp			# k0 = L_PCB(MIPS_CURLWP) < sp
10731:	beqz	k0, 1b				# loop forever if false
1074	 nop
1075	PTR_L	k0, L_PCB(MIPS_CURLWP)
1076	PTR_ADDU k0, USPACE
1077	slt	k0, sp, k0			# k0 = sp < L_PCB(MIPS_CURLWP) + USPACE
10782:	beqz	k0, 2b				# loop forever if false
1079	 nop
1080	PTR_L	k0, L_CPU(MIPS_CURLWP)
1081	INT_L	k0, CPU_INFO_IDEPTH(k0)		# grab interrupt depth
1082	sltu	k0, k0, 3			# must be < 3
10833:	beqz	k0, 3b				# loop forever if false
1084	 nop
1085#endif
1086	/*
1087	 * Save the relevant kernel registers onto the stack.  We don't need
1088	 * to save s0 - s8, sp, and gp because the compiler does it for us.
1089	 * But we use s0-s2 so need to save them.
1090	 */
1091	PTR_SUBU sp, KERNFRAME_SIZ
1092	REG_S	AT, TF_BASE+TF_REG_AST(sp)
1093	REG_S	v0, TF_BASE+TF_REG_V0(sp)
1094	REG_S	v1, TF_BASE+TF_REG_V1(sp)
1095	mflo	v0
1096	mfhi	v1
1097	REG_S	a0, TF_BASE+TF_REG_A0(sp)
1098	REG_S	a1, TF_BASE+TF_REG_A1(sp)
1099	REG_S	a2, TF_BASE+TF_REG_A2(sp)
1100	REG_S	a3, TF_BASE+TF_REG_A3(sp)
1101	REG_S	t0, TF_BASE+TF_REG_T0(sp)
1102	REG_S	t1, TF_BASE+TF_REG_T1(sp)
1103	REG_S	t2, TF_BASE+TF_REG_T2(sp)
1104	REG_S	t3, TF_BASE+TF_REG_T3(sp)
1105	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
1106	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
1107	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
1108	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
1109	REG_S	s0, TF_BASE+TF_REG_S0(sp)	# used for saved ipl/idepth
1110	REG_S	s1, TF_BASE+TF_REG_S1(sp)	# used for initial status
1111	mfc0	s1, MIPS_COP_0_STATUS
1112	REG_S	s2, TF_BASE+TF_REG_S2(sp)	# used for cpu_info
1113#ifdef DDB
1114	REG_S	t8, TF_BASE+TF_REG_T8(sp)	# already contains MIPS_CURLWP
1115#endif
1116	REG_S	t9, TF_BASE+TF_REG_T9(sp)
1117	REG_S	ra, TF_BASE+TF_REG_RA(sp)
1118	REG_S	s1, TF_BASE+TF_REG_SR(sp)
1119	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
1120	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
1121/*
1122 * Call the interrupt handler.
1123 */
1124	_MFC0	ta0, MIPS_COP_0_EXC_PC		# grab exception PC
1125	PTR_L	s2, L_CPU(MIPS_CURLWP)		# delay slot
1126	REG_S	ta0, TF_BASE+TF_REG_EPC(sp)	# and save it
1127
1128#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1129	REG_S	ta0, KERNFRAME_RA(sp)		# for debugging
1130#endif
1131
1132#ifdef PARANOIA
1133	INT_L	s0, CPU_INFO_CPL(s2)
1134	INT_S	s0, TF_BASE+TF_PPL(sp)		# save priority level
1135
1136	/*
1137	 * Verify the current interrupt level
1138	 */
1139	jal	_C_LABEL(splcheck)
1140	 nop
1141#endif /* PARANOIA */
1142
1143	/*
1144	 * We first need to get to IPL_HIGH so that interrupts are masked.
1145	 */
1146	jal	_C_LABEL(splhigh_noprof)
1147	 nop
1148
1149#ifdef PARANOIA
11501:	bne	s0, v0, 1b
1151	 nop
1152#endif /* PARANOIA */
1153
1154	sll	s0, v0, 8			# remember previous priority
1155						# low 8 bits used for idepth
1156
1157#ifdef PARANOIA
1158	/*
1159	 * Interrupts at IPL_HIGH are not allowed.
1160	 */
1161	li	v1, IPL_HIGH
1162	sltu	t0, v0, v1
11632:	beqz	t0, 2b
1164	 nop
1165#endif /* PARANOIA */
1166
1167	INT_L	t1, CPU_INFO_IDEPTH(s2)		# we need to inc. intr depth
1168	or	s0, t1				#   save old interrupt depth
1169	INT_ADDU t1, 1
1170	INT_S	t1, CPU_INFO_IDEPTH(s2)		#   store new interrupt depth
1171
1172	/*
1173	 * Now we can clear exception level since no interrupts can be delivered
1174	 */
1175	mfc0	v1, MIPS_COP_0_STATUS
1176	MFC0_HAZARD
1177	and	v0, v1, MIPS_SR_EXL		# grab exception level bit
1178	xor	v0, v1				# clear it
1179	mtc0	v0, MIPS_COP_0_STATUS		# write new status
1180	COP0_SYNC
1181
1182	/*
1183	 * Now hard interrupts can be processed.
1184	 */
1185	move	a1, ta0				# 2nd arg is exception PC
1186	move	a2, s1				# 3rd arg is status
1187	jal	_C_LABEL(cpu_intr)		# cpu_intr(ppl, pc, status)
1188	 srl	a0, s0, 8			# 1st arg is previous pri level
1189
1190	and	t1, s0, 0xff			# get previous interrupt depth
1191	INT_S	t1, CPU_INFO_IDEPTH(s2)		# to it previous value
1192
1193#if defined(PARANOIA)
1194	mfc0	t0, MIPS_COP_0_STATUS		# verify INT_IE is still set
1195	MFC0_HAZARD
1196	and	t0, MIPS_SR_INT_IE
1197#if defined(MIPSNN)
1198	teqi	t0, 0
1199#else
12003:	beqz	t0, 3b
1201	 nop
1202#endif
1203#endif /* PARANOIA */
1204
1205#ifdef __HAVE_FAST_SOFTINTS
1206	and	a0, s1, MIPS_SOFT_INT_MASK	# were softints enabled?
1207	beqz	a0, 4f				#   nope
1208	 nop
1209	mfc0	v0, MIPS_COP_0_CAUSE		# grab the pending softints
1210	MFC0_HAZARD
1211	and	a0, v0				# are softints pending
1212	beqz	a0, 4f				#   nope
1213	 nop
1214
1215	jal	_C_LABEL(softint_process)	# softint_process(pending)
1216	 nop
1217
1218#ifdef __HAVE_PREEMPTION
1219	srl	v1, s0, 8			# get saved priority level
1220	bnez	v1, 4f				# branch if not at IPL_NONE
1221	 nop
1222	INT_L	t0, CPU_INFO_SOFTINTS(s2)	# get pending softints
1223	and	v0, t0, 1 << SOFTINT_KPREEMPT	# do we need a kernel preempt?
1224	beqz	v0, 4f				#   nope
1225	 nop
1226	xor	t0, v0				# clear preempt bit
1227	INT_S	t0, CPU_INFO_SOFTINTS(s2)	# and save it.
1228
1229	jal	_C_LABEL(splx_noprof)		# drop to IPL_SCHED
1230	 li	a0, IPL_SCHED
1231
1232	jal	_C_LABEL(kpreempt)		# kpreempt(pc)
1233	 li	a0, -2
1234#endif /* __HAVE_PREEMPTION */
12354:
1236#endif /* __HAVE_FAST_SOFTINTS */
1237	/*
1238	 * Interrupts handled, restore registers and return from the interrupt.
1239	 * First, clear interrupt enable
1240	 */
1241#ifdef MIPSNNR2
1242	di	v0				# disable interrupts
1243#else
1244	mfc0	v0, MIPS_COP_0_STATUS		# read it
1245	MFC0_HAZARD
1246	xor	v0, MIPS_SR_INT_IE		# disable interrupts
1247	mtc0	v0, MIPS_COP_0_STATUS		# write it
1248#endif
1249	COP0_SYNC
1250
1251	or	v0, MIPS_SR_EXL			# set exception mode
1252	mtc0	v0, MIPS_COP_0_STATUS		# write it
1253	COP0_SYNC
1254
1255	srl	a0, s0, 8			# get previous priority level
1256#ifdef PARANOIA
1257	INT_L	t0, TF_BASE+TF_PPL(sp)		# get saved priority level
12589:	bne	t0, a0, 9b			# should still match
1259	 nop
1260
1261	li	t0, IPL_HIGH
1262	sltu	v0, a0, t0
12638:	beqz	v0, 8b
1264	 nop
1265#endif /* PARANOIA */
1266
1267	/*
1268	 * Restore IPL knowing interrupts are disabled
1269	 */
1270	jal	_C_LABEL(splx_noprof)		# splx(ppl)
1271	 nop
1272
1273#ifdef PARANOIA
1274	mfc0	v0, MIPS_COP_0_STATUS
1275	MFC0_HAZARD
1276	or	v0, MIPS_SR_INT_IE
12775:	bne	v0, s1, 5b
1278	 nop
1279#endif /* PARANOIA */
1280
1281	/*
1282	 * Restore SR
1283	 */
1284	mtc0	s1, MIPS_COP_0_STATUS
1285	COP0_SYNC
1286
1287	/*
1288	 * Restore s0-s2 and goto common kernel return code.
1289	 */
1290	REG_L	s0, TF_BASE+TF_REG_S0(sp)
1291	REG_L	s1, TF_BASE+TF_REG_S1(sp)
1292	b	MIPSX(kern_return)
1293	 REG_L	s2, TF_BASE+TF_REG_S2(sp)
1294	.set	at
1295END(MIPSX(kern_intr))
1296
1297/*
1298 *
1299 */
1300	.p2align 5
1301NESTED_NOPROFILE(MIPSX(user_reserved_insn), CALLFRAME_SIZ, ra)
1302	.set	noat
1303	.mask	0x80000000, -4
1304	/*
1305	 * Save a minimum of registers to see if this is rdhwr $3,$29
1306	 */
1307	KERN_ENTRY_ERRATA
1308	/* K1 already has CURLWP */
1309	PTR_L	k0, L_PCB(k1)			# XXXuvm_lwp_getuarea
1310	PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ
1311
1312	/* Need two working registers */
1313	REG_S	AT, CALLFRAME_SIZ+TF_REG_AST(k0)
1314	REG_S	v0, CALLFRAME_SIZ+TF_REG_V0(k0)
1315
1316	/* If this was in a branch delay slot, take the slow path. */
1317	mfc0	v0, MIPS_COP_0_CAUSE
1318	MFC0_HAZARD
1319	bltz	v0, MIPSX(user_gen_exception_common)
1320	 nop
1321
1322	/*
1323	 * Get exception PC and fetch the instruction.  We know we can do
1324	 * this since the instruction actually got read.
1325	 */
1326	_MFC0	v0, MIPS_COP_0_EXC_PC
1327	MFC0_HAZARD
1328	INT_L	AT, 0(v0)
1329
1330	/*
1331	 * Was this rdhwr $3,$29?
1332	 */
1333	lui	v0, %hi(0x7c03e83b)	# 0x7c03e83b => rdhwr $3,$29
1334	addiu	v0, %lo(0x7c03e83b)	#        or ... rdhwr v1,ulr
1335	bne	AT, v0, MIPSX(user_gen_exception_common)
1336	 nop
1337
1338	/*
1339	 * Advance the PC (don't want to restart at the rdhwr).
1340	 */
1341	_MFC0	v0, MIPS_COP_0_EXC_PC
1342	MFC0_HAZARD
1343	PTR_ADDIU v0, 4
1344	_MTC0	v0, MIPS_COP_0_EXC_PC
1345	COP0_SYNC
1346
1347	PTR_L	v1, L_PRIVATE(k1)		# rdhwr $3,$29 updates v1
1348
1349	REG_L	AT, CALLFRAME_SIZ+TF_REG_AST(k0)# restore reg
1350	REG_L	v0, CALLFRAME_SIZ+TF_REG_V0(k0) # restore reg
1351	eret
1352END(MIPSX(user_reserved_insn))
1353
1354/*
1355 * mipsN_user_gen_exception
1356 *
1357 * Handle an exception during user mode.
1358 * Save user context atop the kernel stack, then call trap() to process
1359 * the condition.  The context can be manipulated alternatively via
1360 * curlwp->l_md.md_regs.
1361 */
1362	.p2align 5
1363NESTED_NOPROFILE(MIPSX(user_gen_exception), CALLFRAME_SIZ, ra)
1364	.set	noat
1365	.mask	0x80000000, -4
1366	/*
1367	 * Save all the registers except the kernel temporaries onto the stack.
1368	 */
1369	KERN_ENTRY_ERRATA
1370	/* K1 already has CURLWP */
1371	PTR_L	k0, L_PCB(k1)			# XXXuvm_lwp_getuarea
1372	PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ
1373	REG_S	AT, CALLFRAME_SIZ+TF_REG_AST(k0)
1374	REG_S	v0, CALLFRAME_SIZ+TF_REG_V0(k0)
1375MIPSX(user_gen_exception_common):
1376	REG_S	v1, CALLFRAME_SIZ+TF_REG_V1(k0)
1377	mflo	v0
1378	REG_S	a0, CALLFRAME_SIZ+TF_REG_A0(k0)
1379	REG_S	a1, CALLFRAME_SIZ+TF_REG_A1(k0)
1380	REG_S	a2, CALLFRAME_SIZ+TF_REG_A2(k0)
1381	REG_S	a3, CALLFRAME_SIZ+TF_REG_A3(k0)
1382	mfhi	v1
1383	REG_S	t0, CALLFRAME_SIZ+TF_REG_T0(k0)
1384	REG_S	t1, CALLFRAME_SIZ+TF_REG_T1(k0)
1385	REG_S	t2, CALLFRAME_SIZ+TF_REG_T2(k0)
1386	REG_S	t3, CALLFRAME_SIZ+TF_REG_T3(k0)
1387	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
1388	REG_S	ta0, CALLFRAME_SIZ+TF_REG_TA0(k0)
1389	REG_S	ta1, CALLFRAME_SIZ+TF_REG_TA1(k0)
1390	REG_S	ta2, CALLFRAME_SIZ+TF_REG_TA2(k0)
1391	REG_S	ta3, CALLFRAME_SIZ+TF_REG_TA3(k0)
1392	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
1393	REG_S	s0, CALLFRAME_SIZ+TF_REG_S0(k0)
1394	REG_S	s1, CALLFRAME_SIZ+TF_REG_S1(k0)
1395	REG_S	s2, CALLFRAME_SIZ+TF_REG_S2(k0)
1396	REG_S	s3, CALLFRAME_SIZ+TF_REG_S3(k0)
1397	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
1398	REG_S	s4, CALLFRAME_SIZ+TF_REG_S4(k0)
1399	REG_S	s5, CALLFRAME_SIZ+TF_REG_S5(k0)
1400	REG_S	s6, CALLFRAME_SIZ+TF_REG_S6(k0)
1401	REG_S	s7, CALLFRAME_SIZ+TF_REG_S7(k0)
1402	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
1403	REG_S	t8, CALLFRAME_SIZ+TF_REG_T8(k0)	# will be MIPS_CURLWP
1404	REG_S	t9, CALLFRAME_SIZ+TF_REG_T9(k0)
1405	REG_S	v0, CALLFRAME_SIZ+TF_REG_MULLO(k0)
1406	REG_S	v1, CALLFRAME_SIZ+TF_REG_MULHI(k0)
1407	REG_S	gp, CALLFRAME_SIZ+TF_REG_GP(k0)
1408	REG_S	sp, CALLFRAME_SIZ+TF_REG_SP(k0)
1409	REG_S	s8, CALLFRAME_SIZ+TF_REG_S8(k0)
1410	REG_S	ra, CALLFRAME_SIZ+TF_REG_RA(k0)
1411	REG_S	a0, CALLFRAME_SIZ+TF_REG_SR(k0)
1412	REG_S	a3, CALLFRAME_SIZ+TF_REG_EPC(k0)
1413#ifdef __GP_SUPPORT__
1414	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
1415#endif
1416	move	sp, k0				# switch to kernel SP
1417	move	MIPS_CURLWP, k1
1418#ifdef NOFPU
1419	/*
1420	 * enter kernel mode
1421	 */
1422	and	t0, a0, MIPS_SR_EXL|MIPS_SR_KSU_MASK # bits to clear
1423	xor	t0, a0				# clear them.
1424#else
1425	/*
1426	 * Turn off FPU and enter kernel mode
1427	 */
1428	lui	t0, %hi(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1429	addiu	t0, %lo(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1430	and	t0, a0
1431#endif
1432/*
1433 * Call the trap handler.
1434 */
1435	mtc0	t0, MIPS_COP_0_STATUS
1436	COP0_SYNC
1437
1438	jal	_C_LABEL(trap)
1439	 REG_S	a3, CALLFRAME_RA(sp)		# for debugging
1440/*
1441 * Check pending asynchronous traps.
1442 */
1443	INT_L	v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast?
1444	beqz	v0, MIPSX(user_return)		# if no, skip ast processing
1445	 nop
1446/*
1447 * We have pending asynchronous traps; all the state is already saved.
1448 */
1449	lui	ra, %hi(MIPSX(user_return))	# return directly to user return
1450	j	_C_LABEL(ast)
1451	 PTR_ADDIU ra, %lo(MIPSX(user_return))	# return directly to user return
1452	.set	at
1453END(MIPSX(user_gen_exception))
1454
1455/*----------------------------------------------------------------------------
1456 *
1457 * mipsN_user_intr
1458 *
1459 *	Handle an interrupt from user mode.
1460 *	We save partial state onto the kernel stack since we know there will
1461 *	always a kernel stack and chances are we won't need the registers we
1462 *	don't save.  If there is a pending asynchronous system trap, then save
1463 *	the remaining state and call ast().
1464 *
1465 * Results:
1466 * 	None.
1467 *
1468 * Side effects:
1469 *	None.
1470 *
1471 *----------------------------------------------------------------------------
1472 */
1473	.p2align 5
1474NESTED_NOPROFILE(MIPSX(user_intr), CALLFRAME_SIZ, ra)
1475	.set	noat
1476	.mask	0x80000000, -4
1477/*
1478 * Save the relevant user registers onto the kernel stack.
1479 * We don't need to save s0 - s8 because the compiler does it for us.
1480 */
1481	KERN_ENTRY_ERRATA
1482	/* k1 contains curlwp */
1483	PTR_L	k0, L_PCB(k1)			# XXXuvm_lwp_getuarea
1484	PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ
1485	REG_S	AT, CALLFRAME_SIZ+TF_REG_AST(k0)	# $1
1486	REG_S	v0, CALLFRAME_SIZ+TF_REG_V0(k0)		# $2
1487	REG_S	v1, CALLFRAME_SIZ+TF_REG_V1(k0)		# $3
1488	mflo	v0
1489	REG_S	a0, CALLFRAME_SIZ+TF_REG_A0(k0)		# $4
1490	REG_S	a1, CALLFRAME_SIZ+TF_REG_A1(k0)		# $5
1491	REG_S	a2, CALLFRAME_SIZ+TF_REG_A2(k0)		# $6
1492	REG_S	a3, CALLFRAME_SIZ+TF_REG_A3(k0)		# $7
1493	mfhi	v1
1494	REG_S	t0, CALLFRAME_SIZ+TF_REG_T0(k0)		# $12
1495	REG_S	t1, CALLFRAME_SIZ+TF_REG_T1(k0)		# $13
1496	REG_S	t2, CALLFRAME_SIZ+TF_REG_T2(k0)		# $14
1497	REG_S	t3, CALLFRAME_SIZ+TF_REG_T3(k0)		# $15
1498	mfc0	t0, MIPS_COP_0_CAUSE
1499	REG_S	ta0, CALLFRAME_SIZ+TF_REG_TA0(k0)	# $8
1500	REG_S	ta1, CALLFRAME_SIZ+TF_REG_TA1(k0)	# $9
1501	REG_S	ta2, CALLFRAME_SIZ+TF_REG_TA2(k0)	# $10
1502	REG_S	ta3, CALLFRAME_SIZ+TF_REG_TA3(k0)	# $11
1503	REG_S	s0, CALLFRAME_SIZ+TF_REG_S0(k0)		# $16
1504	REG_S	s1, CALLFRAME_SIZ+TF_REG_S1(k0)		# $17
1505	mfc0	s1, MIPS_COP_0_STATUS
1506	REG_S	t8, CALLFRAME_SIZ+TF_REG_T8(k0)		# $24 MIPS_CURLWP
1507	REG_S	t9, CALLFRAME_SIZ+TF_REG_T9(k0)		# $25
1508	REG_S	gp, CALLFRAME_SIZ+TF_REG_GP(k0)		# $28
1509	REG_S	sp, CALLFRAME_SIZ+TF_REG_SP(k0)		# $29
1510	REG_S	ra, CALLFRAME_SIZ+TF_REG_RA(k0)		# $31
1511	REG_S	s1, CALLFRAME_SIZ+TF_REG_SR(k0)
1512	_MFC0	ta0, MIPS_COP_0_EXC_PC
1513	REG_S	v0, CALLFRAME_SIZ+TF_REG_MULLO(k0)
1514	REG_S	v1, CALLFRAME_SIZ+TF_REG_MULHI(k0)
1515	REG_S	ta0, CALLFRAME_SIZ+TF_REG_EPC(k0)
1516	REG_S	t0, CALLFRAME_SIZ+TF_REG_CAUSE(k0)
1517	move	sp, k0				# switch to kernel SP
1518	move	MIPS_CURLWP, k1			# set curlwp reg (t8)
1519#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1520	REG_S	ta0, CALLFRAME_RA(sp)		# for debugging
1521#endif
1522#ifdef __GP_SUPPORT__
1523	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
1524#endif
1525
1526	/*
1527	 * We first need to get to IPL_HIGH so that interrupts are masked.
1528	 */
1529	jal	_C_LABEL(splhigh_noprof)	# splhigh()
1530	 nop
1531	move	s0, v0				# remember previous priority
1532
1533	/*
1534	 * Now we can turn off the FPU, clear exception level, and enter
1535	 * kernel mode since no interrupts can be delivered
1536	 */
1537	mfc0	v1, MIPS_COP_0_STATUS
1538#ifdef NOFPU
1539	/*
1540	 * enter kernel mode
1541	 */
1542	MFC0_HAZARD
1543	and	v0, v1, MIPS_SR_EXL|MIPS_SR_KSU_MASK # bits to clear
1544	xor	v0, v1				# clear them.
1545#else
1546	/*
1547	 * Turn off FPU and enter kernel mode
1548	 */
1549	lui	v0, %hi(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1550	addiu	v0, %lo(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1551	and	v0, v1
1552#endif
1553	mtc0	v0, MIPS_COP_0_STATUS		# write new status
1554	COP0_SYNC
1555
1556	/*
1557	 * Since we interrupted user mode, the new interrupt depth must be 1.
1558	 */
1559	PTR_L	t0, L_CPU(MIPS_CURLWP)
1560	li	t1, 1
1561	INT_S	t1, CPU_INFO_IDEPTH(t0)		# store new interrupt depth (1)
1562
1563	/*
1564	 * Now hard interrupts can be processed.
1565	 */
1566	move	a1, ta0				# 2nd arg is exception pc
1567	move	a2, s1				# 3rd arg is status
1568	jal	_C_LABEL(cpu_intr)		# cpu_intr(ppl, pc, status)
1569	 move	a0, s0				# 1st arg is previous pri level
1570
1571	/*
1572	 * Interrupt depth is now back to 0.
1573	 */
1574	PTR_L	t0, L_CPU(MIPS_CURLWP)
1575	INT_S	zero, CPU_INFO_IDEPTH(t0)
1576
1577#ifdef __HAVE_FAST_SOFTINTS
1578	/*
1579	 * This an interrupt from user mode so both softints must be enabled.
1580	 * No need to check (unless we're being paranoid).
1581	 */
1582#ifdef PARANOIA
1583	and	a0, s1, MIPS_SOFT_INT_MASK	# get softints enabled bits
1584	xor	a0, MIPS_SOFT_INT_MASK		# invert them.
15851:	bnez	a0, 1b				# loop forever if disabled
1586	 nop
1587#endif
1588	mfc0	a0, MIPS_COP_0_CAUSE		# grab the pending softints
1589	MFC0_HAZARD
1590	and	a0, MIPS_SOFT_INT_MASK		# are there softints pending
1591	beqz	a0, 4f				#   nope
1592	 nop
1593	jal	_C_LABEL(softint_process)	# softint_process(pending)
1594	 nop
15954:
1596#endif
1597	/*
1598	 * Disable interrupts
1599	 */
1600#ifdef MIPSNNR2
1601	di	v1				# disable interrupts
1602#else
1603	mfc0	v1, MIPS_COP_0_STATUS
1604	MFC0_HAZARD
1605	and	v0, v1, MIPS_SR_INT_IE		# clear interrupt enable
1606	xor	v0, v1
1607	mtc0	v0, MIPS_COP_0_STATUS		# interrupts are disabled
1608#endif
1609	COP0_SYNC
1610
1611	/*
1612	 * Restore IPL knowing interrupts are off
1613	 */
1614	jal	_C_LABEL(splx_noprof)
1615	 move	a0, s0				# fetch previous priority level
1616
1617	/*
1618	 * Check pending asynchronous traps.
1619	 */
1620	REG_L	s0, CALLFRAME_SIZ+TF_REG_S0(sp)	# restore
1621	REG_L	s1, CALLFRAME_SIZ+TF_REG_S1(sp)	# restore
1622	INT_L	v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast?
1623	beqz	v0, MIPSX(user_intr_return)	# if no, skip ast processing
1624	 nop
1625
1626	/*
1627	 * We have a pending asynchronous trap; save remaining user state into
1628	 * trapframe.
1629	 */
1630	#REG_S	s0, CALLFRAME_SIZ+TF_REG_S0(sp)	# $16 (saved above)
1631	#REG_S	s1, CALLFRAME_SIZ+TF_REG_S1(sp)	# $17 (saved above)
1632	REG_S	s2, CALLFRAME_SIZ+TF_REG_S2(sp)	# $18
1633	REG_S	s3, CALLFRAME_SIZ+TF_REG_S3(sp)	# $19
1634	REG_S	s4, CALLFRAME_SIZ+TF_REG_S4(sp)	# $20
1635	REG_S	s5, CALLFRAME_SIZ+TF_REG_S5(sp)	# $21
1636	REG_S	s6, CALLFRAME_SIZ+TF_REG_S6(sp)	# $22
1637	REG_S	s7, CALLFRAME_SIZ+TF_REG_S7(sp)	# $23
1638	REG_S	s8, CALLFRAME_SIZ+TF_REG_S8(sp)	# $30
1639#if !defined(MIPS_DYNAMIC_STATUS_MASK) && defined(MIPSNNR2)
1640	ei					# enable interrupts
1641#else
1642	mfc0	t0, MIPS_COP_0_STATUS		#
1643	MFC0_HAZARD
1644	or	t0, MIPS_SR_INT_IE 		# enable interrupts
1645	DYNAMIC_STATUS_MASK(t0, t1)		# machine dependent masking
1646	mtc0	t0, MIPS_COP_0_STATUS		# enable interrupts (spl0)
1647#endif
1648	COP0_SYNC
1649
1650	PTR_LA	ra, MIPSX(user_return)
1651	j	_C_LABEL(ast)			# ast()
1652	 nop
1653	.set	at
1654END(MIPSX(user_intr))
1655
1656/*
1657 * mipsN_systemcall
1658 *
1659 * Save user context atop of kernel stack, then call syscall() to process
1660 * a system call.  The context can be manipulated alternatively via
1661 * curlwp->l_md.md_utf->tf_regs.
1662 */
1663	.p2align 5
1664NESTED_NOPROFILE(MIPSX(systemcall), CALLFRAME_SIZ, ra)
1665	.set	noat
1666	.mask	0x80000000, -4
1667	/*
1668	 * Save all the registers but kernel temporaries onto the stack.
1669	 */
1670	KERN_ENTRY_ERRATA
1671	/* k1 already contains curlwp */
1672	PTR_L	k0, L_PCB(k1)			# XXXuvm_lwp_getuarea
1673	PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ
1674	#REG_S	AT, CALLFRAME_SIZ+TF_REG_AST(k0)
1675	#.set	at
1676	REG_S	v0, CALLFRAME_SIZ+TF_REG_V0(k0)		# syscall #
1677	REG_S	v1, CALLFRAME_SIZ+TF_REG_V1(k0)		# used by syscall()
1678	mflo	v0
1679	REG_S	a0, CALLFRAME_SIZ+TF_REG_A0(k0)
1680	REG_S	a1, CALLFRAME_SIZ+TF_REG_A1(k0)
1681	REG_S	a2, CALLFRAME_SIZ+TF_REG_A2(k0)
1682	REG_S	a3, CALLFRAME_SIZ+TF_REG_A3(k0)
1683	mfhi	v1
1684	mfc0	a1, MIPS_COP_0_STATUS			# 2nd arg is STATUS
1685	REG_S	s0, CALLFRAME_SIZ+TF_REG_S0(k0)
1686	REG_S	s1, CALLFRAME_SIZ+TF_REG_S1(k0)
1687	REG_S	s2, CALLFRAME_SIZ+TF_REG_S2(k0)
1688	REG_S	s3, CALLFRAME_SIZ+TF_REG_S3(k0)
1689	mfc0	a2, MIPS_COP_0_CAUSE			# 3rd arg is CAUSE
1690	REG_S	s4, CALLFRAME_SIZ+TF_REG_S4(k0)
1691	REG_S	s5, CALLFRAME_SIZ+TF_REG_S5(k0)
1692	REG_S	s6, CALLFRAME_SIZ+TF_REG_S6(k0)
1693	REG_S	s7, CALLFRAME_SIZ+TF_REG_S7(k0)
1694	_MFC0	a3, MIPS_COP_0_EXC_PC			# 4th arg is PC
1695	REG_S	t0, CALLFRAME_SIZ+TF_REG_T0(k0)
1696	REG_S	t1, CALLFRAME_SIZ+TF_REG_T1(k0)
1697	REG_S	t2, CALLFRAME_SIZ+TF_REG_T2(k0)
1698	REG_S	t3, CALLFRAME_SIZ+TF_REG_T3(k0)		# syscall saved gp for fork
1699#if defined(__mips_n32) || defined(__mips_n64)
1700	REG_S	a4, CALLFRAME_SIZ+TF_REG_A4(k0)
1701	REG_S	a5, CALLFRAME_SIZ+TF_REG_A5(k0)
1702	REG_S	a6, CALLFRAME_SIZ+TF_REG_A6(k0)
1703	REG_S	a7, CALLFRAME_SIZ+TF_REG_A7(k0)
1704#else
1705	REG_S	ta0, CALLFRAME_SIZ+TF_REG_TA0(k0)
1706	REG_S	ta1, CALLFRAME_SIZ+TF_REG_TA1(k0)
1707	REG_S	ta2, CALLFRAME_SIZ+TF_REG_TA2(k0)
1708	REG_S	ta3, CALLFRAME_SIZ+TF_REG_TA3(k0)
1709#endif
1710	REG_S	t8, CALLFRAME_SIZ+TF_REG_T8(k0)		# will be MIPS_CURLWP
1711	REG_S	t9, CALLFRAME_SIZ+TF_REG_T9(k0)
1712	REG_S	gp, CALLFRAME_SIZ+TF_REG_GP(k0)
1713	REG_S	sp, CALLFRAME_SIZ+TF_REG_SP(k0)
1714	REG_S	s8, CALLFRAME_SIZ+TF_REG_S8(k0)
1715	REG_S	ra, CALLFRAME_SIZ+TF_REG_RA(k0)
1716	REG_S	a1, CALLFRAME_SIZ+TF_REG_SR(k0)
1717	REG_S	v0, CALLFRAME_SIZ+TF_REG_MULLO(k0)
1718	REG_S	v1, CALLFRAME_SIZ+TF_REG_MULHI(k0)
1719	REG_S	a3, CALLFRAME_SIZ+TF_REG_EPC(k0)
1720	move	MIPS_CURLWP, k1			# set curlwp reg
1721	move	sp, k0				# switch to kernel SP
1722#ifdef __GP_SUPPORT__
1723	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
1724#endif
1725#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1726	move	ra, a3
1727	REG_S	ra, CALLFRAME_RA(sp)
1728#endif
1729	PTR_L	s0, L_PROC(MIPS_CURLWP)		# curlwp->l_proc
1730	PTR_L	t9, P_MD_SYSCALL(s0)		# t9 = syscall
1731#ifdef NOFPU
1732	/*
1733	 * enter kernel mode
1734	 */
1735	and	t0, a1, MIPS_SR_EXL|MIPS_SR_KSU_MASK # bits to clear
1736	xor	t0, a1				# clear them.
1737#else
1738	/*
1739	 * Turn off FPU and enter kernel mode
1740	 */
1741	lui	t0, %hi(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1742	addiu	t0, %lo(~(MIPS_SR_COP_1_BIT|MIPS_SR_EXL|MIPS_SR_KSU_MASK))
1743	and	t0, a1
1744#endif
1745	mtc0	t0, MIPS_COP_0_STATUS
1746	COP0_SYNC
1747/*
1748 * Call the system call handler.
1749 */
1750	.set	at
1751	jalr	t9
1752	 move	a0, MIPS_CURLWP			# 1st arg is curlwp
1753
1754/*
1755 * Check pending asynchronous traps.
1756 */
1757	INT_L	v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast?
1758	beqz	v0, MIPSX(user_return)		# no, skip ast processing
1759	 nop
1760/*
1761 * We have pending asynchronous traps; all the state is already saved.
1762 */
1763	lui	ra, %hi(MIPSX(user_return))	# return directly to user return
1764	j	_C_LABEL(ast)
1765	 PTR_ADDIU ra, %lo(MIPSX(user_return))	# return directly to user return
1766END(MIPSX(systemcall))
1767
1768/*
1769 * Panic on cache errors.  A lot more could be done to recover
1770 * from some types of errors but it is tricky.
1771 */
1772	.p2align 5
1773NESTED_NOPROFILE(MIPSX(cache_exception), KERNFRAME_SIZ, ra)
1774	.set	noat
1775	.mask	0x80000000, -4
1776#ifdef sbmips	/* XXX!  SB-1 needs a real cache error handler */
1777	eret
1778	nop
1779#endif
1780	PTR_LA	k0, panic			# return to panic
1781	PTR_LA	a0, 9f				# panicstr
1782	_MFC0	a1, MIPS_COP_0_ERROR_PC
1783#if defined(MIPS64_XLS) && defined(MIPS64)
1784	.set	push
1785	.set	arch=xlr
1786	li	k1, 0x309	/* L1D_CACHE_ERROR_LOG */
1787	mfcr	a2, k1
1788	li	k1, 0x30b	/* L1D_CACHE_INTERRUPT */
1789	mfcr	a3, k1
1790	.set	pop
1791#if defined(__mips_o32)
1792#error O32 not supported.
1793#endif
1794	mfc0	a4, MIPS_COP_0_STATUS
1795	mfc0	a5, MIPS_COP_0_CAUSE
1796#else
1797	mfc0	a2, MIPS_COP_0_ECC
1798	mfc0	a3, MIPS_COP_0_CACHE_ERR
1799#endif
1800
1801	_MTC0	k0, MIPS_COP_0_ERROR_PC		# set return address
1802	COP0_SYNC
1803
1804	mfc0	k0, MIPS_COP_0_STATUS		# restore status
1805	li	k1, MIPS3_SR_DIAG_PE		# ignore further errors
1806	or	k0, k1
1807	mtc0	k0, MIPS_COP_0_STATUS		# restore status
1808	COP0_SYNC
1809
1810	eret
1811
1812#if defined(MIPS64_XLS)
1813	MSG("cache error @ EPC %#lx\nL1D_CACHE_ERROR_LOG %#lx\nL1D_CACHE_INTERRUPT %#lx\nstatus %#x, cause %#x");
1814#else
1815	MSG("cache error @ EPC 0x%x ErrCtl 0x%x CacheErr 0x%x");
1816#endif
1817	.set	at
1818END(MIPSX(cache_exception))
1819
1820
1821/*----------------------------------------------------------------------------
1822 *
1823 *	R4000 TLB exception handlers
1824 *
1825 *----------------------------------------------------------------------------
1826 */
1827
1828#if (PGSHIFT & 1) == 0
1829/*----------------------------------------------------------------------------
1830 *
1831 * mipsN_tlb_invalid_exception --
1832 *
1833 *	Handle a TLB invalid exception from kernel mode in kernel space.
1834 *	The BaddVAddr, Context, and EntryHi registers contain the failed
1835 *	virtual address.
1836 *
1837 *	If we are page sizes which use both TLB LO entries, either both
1838 *	are valid or neither are.  So this exception should never happen.
1839 *
1840 * Results:
1841 *	None.
1842 *
1843 * Side effects:
1844 *	None.
1845 *
1846 *----------------------------------------------------------------------------
1847 */
1848LEAF_NOPROFILE(MIPSX(kern_tlb_invalid_exception))
1849	.set	noat
1850	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address
1851#if !defined(_LP64) && (MIPS64 + MIPS64R2) > 0
1852#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
1853	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1854	slt	k1, k0, k1
1855	bnez	k1, _C_LABEL(MIPSX(kern_gen_exception)) # full trap processing
1856	 nop
1857#elif VM_MIN_KERNEL_ADDRESS > MIPS_XKSEG_START
1858	li	k1, VM_MIN_KERNEL_ADDRESS>>32	# compute index
1859	dsll32	k1, k1, 0
1860	slt	k1, k0, k1
1861	bnez	k1, _C_LABEL(MIPSX(kern_gen_exception)) # full trap processing
1862	 nop
1863#endif
1864#endif /* !_LP64 && (MIPS64 + MIPS64R2) > 0 */
1865	PTR_LA	k1, _C_LABEL(pmap_limits)
1866	PTR_L	k1, PMAP_LIMITS_VIRTUAL_END(k1)
1867	PTR_SUBU k1, k0
1868	blez	k1, _C_LABEL(MIPSX(kern_gen_exception)) # full trap processing
1869	 nop
1870	PTR_LA	k1, _C_LABEL(pmap_kern_segtab)
1871#ifdef _LP64
1872#ifdef MIPSNNR2
1873	_EXT	k0, k0, XSEGSHIFT, XSEGLENGTH
1874	_INS	k1, k0, PTR_SCALESHIFT, XSEGLENGTH
1875#else
1876	PTR_SRL	k0, XSEGSHIFT - PTR_SCALESHIFT
1877	andi	k0, (NXSEGPG-1) << PTR_SCALESHIFT
1878	PTR_ADDU k1, k0
1879#endif
1880	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address (again)
1881	PTR_L	k1, (k1)			# load segtab address
1882	beqz	k1, _C_LABEL(MIPSX(kern_gen_exception))
1883	 nop
1884#endif /* _LP64 */
1885#ifdef MIPSNNR2
1886	_EXT	k0, k0, SEGSHIFT, SEGLENGTH
1887	_INS	k1, k0, PTR_SCALESHIFT, SEGLENGTH
1888#else
1889	PTR_SRL k0, SEGSHIFT - PTR_SCALESHIFT
1890	andi	k0, (NSEGPG-1) << PTR_SCALESHIFT
1891	PTR_ADDU k1, k0
1892#endif
1893	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address (again)
1894	PTR_L	k1, (k1)			# load page table address
1895	beqz	k1, _C_LABEL(MIPSX(kern_gen_exception))
1896	 nop
1897#ifdef MIPSNNR2
1898	_EXT	k0, k0, PGSHIFT, PTPLENGTH
1899	_INS	k1, k0, PTPSHIFT, PTPLENGTH
1900#else
1901	PTR_SRL k0, PTPLENGTH
1902	andi	k0, (NPTEPG-1) << PTPSHIFT
1903	PTR_ADDU k1, k0
1904#endif
1905	tlbp					# Probe the invalid entry
1906	COP0_SYNC
1907
1908	mfc0	k0, MIPS_COP_0_TLB_INDEX
1909	MFC0_HAZARD
1910	bltz	k0, _C_LABEL(MIPSX(kern_gen_exception)) # ASSERT(TLB entry exists)
1911	 nop					# - delay slot -
1912
1913	and	k0, k1, 4			# check even/odd page
1914#ifdef MIPS3
1915	nop					# required for QED 5230
1916#endif
1917	bnez	k0, MIPSX(kern_tlbi_odd)
1918	 nop
1919
1920	INT_L	k0, 0(k1)			# get PTE entry
1921#ifdef MIPSNNR2
1922	_EXT	k0, k0, 0, WIRED_POS		# get rid of "wired" bit
1923#else
1924	_SLL	k0, k0, WIRED_SHIFT		# get rid of "wired" bit
1925	_SRL	k0, k0, WIRED_SHIFT
1926#endif
1927	_MTC0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1928	and	k0, k0, MIPS3_PG_V		# check for valid entry
1929#ifdef MIPS3
1930	nop					# required for QED5230
1931#endif
1932	beqz	k0, _C_LABEL(MIPSX(kern_gen_exception))	# PTE invalid
1933	 nop					# - delay slot -
1934
1935	INT_L	k0, 4(k1)			# get odd PTE entry
1936	mfc0	k1, MIPS_COP_0_TLB_INDEX
1937#ifdef MIPSNNR2
1938	_EXT	k0, k0, 0, WIRED_POS
1939#else
1940	_SLL	k0, k0, WIRED_SHIFT
1941	_SRL	k0, k0, WIRED_SHIFT
1942#endif
1943#if UPAGES == 1
1944	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1945	or	k1, k1, k0
1946#endif
1947	_MTC0	k0, MIPS_COP_0_TLB_LO1		# load PTE entry
1948	COP0_SYNC
1949#ifdef MIPS3
1950	nop
1951	nop					# required for QED5230
1952#endif
1953	tlbwi					# write TLB
1954	COP0_SYNC
1955#ifdef MIPS3_LOONGSON2
1956	li	k0, MIPS_DIAG_ITLB_CLEAR
1957	mtc0	k0, MIPS_COP_0_DIAG		# invalidate ITLB
1958#elif defined(MIPS3)
1959	nop
1960	nop
1961#endif
1962	eret
1963
1964MIPSX(kern_tlbi_odd):
1965	INT_L	k0, 0(k1)			# get PTE entry
1966#ifdef MIPSNNR2
1967	_EXT	k0, k0, 0, WIRED_POS
1968#else
1969	_SLL	k0, k0, WIRED_SHIFT		# get rid of wired bit
1970	_SRL	k0, k0, WIRED_SHIFT
1971#endif
1972	_MTC0	k0, MIPS_COP_0_TLB_LO1		# load PTE entry
1973	COP0_SYNC
1974	and	k0, k0, MIPS3_PG_V		# check for valid entry
1975#ifdef MIPS3
1976	nop					# required for QED5230
1977#endif
1978	beqz	k0, _C_LABEL(MIPSX(kern_gen_exception))	# PTE invalid
1979	 nop					# - delay slot -
1980
1981	INT_L	k0, -4(k1)			# get even PTE entry
1982	mfc0	k1, MIPS_COP_0_TLB_INDEX
1983#ifdef MIPSNNR2
1984	_EXT	k0, k0, 0, WIRED_POS
1985#else
1986	_SLL	k0, k0, WIRED_SHIFT
1987	_SRL	k0, k0, WIRED_SHIFT
1988#endif
1989#if UPAGES == 1
1990	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1991	or	k1, k1, k0
1992#endif
1993	_MTC0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1994	COP0_SYNC
1995#ifdef MIPS3
1996	nop					# required for QED5230
1997#endif
1998	tlbwi					# update TLB
1999	COP0_SYNC
2000#ifdef MIPS3_LOONGSON2
2001	li	k0, MIPS_DIAG_ITLB_CLEAR
2002	mtc0	k0, MIPS_COP_0_DIAG		# invalidate ITLB
2003#elif defined(MIPS3)
2004	nop
2005	nop
2006#endif
2007	eret
2008END(MIPSX(kern_tlb_invalid_exception))
2009#endif /* (PGSHIFT & 1) == 0 */
2010
2011/*
2012 * Mark where code entered from exception handler jumptable
2013 * ends, for stack traceback code.
2014 */
2015
2016	.globl	_C_LABEL(MIPSX(exceptionentry_end))
2017_C_LABEL(MIPSX(exceptionentry_end)):
2018
2019	.set	at
2020
2021/*--------------------------------------------------------------------------
2022 *
2023 * mipsN_tlb_get_asid --
2024 *
2025 *	Return the current ASID
2026 *
2027 *	tlb_asid_t mipsN_tlb_get_asid(void)
2028 *
2029 * Results:
2030 *	Return the current ASID.
2031 *
2032 * Side effects:
2033 *	None.
2034 *
2035 *--------------------------------------------------------------------------
2036 */
2037LEAF(MIPSX(tlb_get_asid))
2038	_MFC0	v0, MIPS_COP_0_TLB_HI		# read the hi reg value
2039	MFC0_HAZARD
2040	jr	ra
2041	 and	v0, v0, MIPS3_TLB_ASID		# make off ASID
2042END(MIPSX(tlb_get_asid))
2043
2044/*--------------------------------------------------------------------------
2045 *
2046 * mipsN_tlb_set_asid --
2047 *
2048 *	Write the given pid into the TLB pid reg.
2049 *
2050 *	void mipsN_tlb_set_asid(tlb_asid_t pid)
2051 *
2052 * Results:
2053 *	None.
2054 *
2055 * Side effects:
2056 *	ASID set in the entry hi register.
2057 *
2058 *--------------------------------------------------------------------------
2059 */
2060LEAF(MIPSX(tlb_set_asid))
2061	_MFC0	v0, MIPS_COP_0_TLB_HI		# read the hi reg value
2062#ifdef MIPSNNR2
2063	_INS	v0, a0, V_MIPS3_PG_ASID, S_MIPS3_PG_ASID
2064#else
2065	li	t0, MIPS3_PG_ASID
2066	not	t1, t0
2067	and	v0, v0, t1
2068	and	a0, a0, t0
2069	or	v0, v0, a0
2070#endif
2071	_MTC0	v0, MIPS_COP_0_TLB_HI		# Write the hi reg value
2072	JR_HB_RA
2073END(MIPSX(tlb_set_asid))
2074
2075/*--------------------------------------------------------------------------
2076 *
2077 * mipsN_tlb_update_addr --
2078 *
2079 *	Update the TLB if found; otherwise insert randomly if requested
2080 *
2081 *	bool mipsN_tlb_update(vaddr_t va, tlb_asid_t asid, pt_entry_t pte,
2082 *	    bool insert)
2083 *
2084 * Results:
2085 *	false (0) if skipped, true (1) if updated.
2086 *
2087 * Side effects:
2088 *	None.
2089 *
2090 *--------------------------------------------------------------------------
2091 */
2092LEAF(MIPSX(tlb_update_addr))
2093#ifdef MIPSNNR2
2094	di	ta0				# Disable interrupts
2095#else
2096	mfc0	ta0, MIPS_COP_0_STATUS		# Save the status register.
2097	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
2098#endif
2099	COP0_SYNC
2100#if (PGSHIFT & 1) == 0
2101	and	t1, a0, MIPS3_PG_ODDPG		# t1 = Even/Odd flag
2102#endif
2103	and	a1, a1, MIPS3_PG_ASID
2104#ifdef MIPSNNR2
2105	_INS	a0, a1, 0, V_MIPS3_PG_HVPN	# insert ASID + clear other bits
2106#else
2107	li	v0, MIPS3_PG_HVPN
2108	and	a0, a0, v0
2109	or	a0, a0, a1			# Merge ASID
2110#endif
2111	_MFC0	ta1, MIPS_COP_0_TLB_HI		# Save current ASID
2112	_MTC0	a0, MIPS_COP_0_TLB_HI		# Init high reg
2113	COP0_SYNC
2114#if (PGSHIFT & 1) == 0
2115	and	t0, a2, MIPS3_PG_G		# Copy global bit
2116#endif
2117	tlbp					# Probe for the entry.
2118	COP0_SYNC
2119#ifdef MIPSNNR2
2120	_EXT	a1, a2, 0, WIRED_POS		# clear top bits of new pte
2121#else
2122	_SLL	a2, WIRED_SHIFT			# clear top bits of new pte
2123	_SRL	a1, a2, WIRED_SHIFT
2124#endif
2125	mfc0	v1, MIPS_COP_0_TLB_INDEX	# See what we got
2126#ifdef MIPS3
2127	nop
2128	nop					# required for QED5230
2129#endif
2130
2131#if (PGSHIFT & 1)
2132	bgez	v1, 1f				# index < 0 => !present
2133	 REG_ADDU a2, a1, MIPS3_PG_NEXT
2134	beqz	a3, 7f
2135	 li	v0, 0
21361:	_MTC0	a1, MIPS_COP_0_TLB_LO0		# init low reg0.
2137	_MTC0	a2, MIPS_COP_0_TLB_LO1		# init low reg1.
2138	bltz	v1, 5f				# index < 0 => !present
2139	 nop
2140	COP0_SYNC
2141	tlbwi					# overwrite entry
2142	b	6f
2143	 nop
2144#else /* (PGSHIFT & 1) == 0 */
2145	bltz	v1, 3f				# index < 0 => !found
2146	 nop
2147	tlbr					# update, read entry first
2148	COP0_SYNC
2149	bnez	t1, 1f				# Decide even odd
2150	 nop
2151# EVEN
2152	_MTC0	a1, MIPS_COP_0_TLB_LO0		# init low reg0.
2153	b	2f
2154	 nop
21551:
2156# ODD
2157	_MTC0	a1, MIPS_COP_0_TLB_LO1		# init low reg1.
21582:
2159	COP0_SYNC
2160	tlbwi					# update slot found
2161	b	6f
2162	 nop
21633:
2164	beqz	a3, 7f				# not found and no insert
2165	 li	v0, 0				# assume failure
2166	bnez	t1, 4f				# Decide even odd
2167	 nop
2168	move	t3, a1				# swap a1 and t0
2169	move	a1, t0				#
2170	move	t0, t3				#
21714:
2172	_MTC0	t0, MIPS_COP_0_TLB_LO0		# init low reg0.
2173	_MTC0	a1, MIPS_COP_0_TLB_LO1		# init low reg1.
2174#endif /* PGSHIFT & 1 */
21755:
2176	COP0_SYNC
2177	tlbwr					# enter randomly
21786:
2179	COP0_SYNC
2180#ifdef MIPS3_LOONGSON2
2181	li	t1, MIPS_DIAG_ITLB_CLEAR
2182	mtc0	t1, MIPS_COP_0_DIAG		# invalidate ITLB
2183#elif defined(MIPS3)
2184	nop					# required for QED5230
2185	nop					# required for QED5230
2186#endif
2187	li	v0, 1				# found or inserted
2188#ifdef MIPS3
2189	nop					# Make sure pipeline
2190	nop					# advances before we
2191	nop					# use the TLB.
2192	nop
2193#endif
21947:
2195	_MTC0	ta1, MIPS_COP_0_TLB_HI		# restore ASID
2196	COP0_SYNC
2197#ifdef MIPS3
2198	nop					# required for QED5230
2199	nop					# required for QED5230
2200#endif
2201	mtc0	ta0, MIPS_COP_0_STATUS		# Restore the status register
2202	JR_HB_RA
2203END(MIPSX(tlb_update_addr))
2204
2205/*--------------------------------------------------------------------------
2206 *
2207 * mipsN_tlb_read_entry --
2208 *
2209 *	Read the TLB entry.
2210 *
2211 *	void mipsN_tlb_read_entry(size_t tlb_index, struct tlbmask *tlb);
2212 *
2213 * Results:
2214 *	None.
2215 *
2216 * Side effects:
2217 *	tlb will contain the TLB entry found.
2218 *
2219 *--------------------------------------------------------------------------
2220 */
2221LEAF(MIPSX(tlb_read_entry))
2222	mfc0	v1, MIPS_COP_0_STATUS		# Save the status register.
2223	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
2224	COP0_SYNC
2225#ifdef MIPS3
2226	nop
2227#endif
2228	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2229#ifdef MIPS3
2230	nop
2231#endif
2232	_MFC0	t0, MIPS_COP_0_TLB_HI		# Get current ASID
2233
2234	mtc0	a0, MIPS_COP_0_TLB_INDEX	# Set the index register
2235	COP0_SYNC
2236#ifdef MIPS3
2237	nop
2238	nop					# required for QED5230
2239#endif
2240	tlbr					# Read from the TLB
2241	COP0_SYNC
2242#ifdef MIPS3
2243	nop
2244	nop
2245	nop
2246#endif
2247	mfc0	t2, MIPS_COP_0_TLB_PG_MASK	# fetch the pgMask
2248	_MFC0	t3, MIPS_COP_0_TLB_HI		# fetch the hi entry
2249	_MFC0	ta0, MIPS_COP_0_TLB_LO0		# See what we got
2250	_MFC0	ta1, MIPS_COP_0_TLB_LO1		# See what we got
2251	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore ASID
2252	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2253	COP0_SYNC
2254	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
2255	COP0_SYNC
2256	PTR_S	t3, TLBMASK_HI(a1)
2257	REG_S	ta0, TLBMASK_LO0(a1)
2258	REG_S	ta1, TLBMASK_LO1(a1)
2259	jr	ra
2260	 INT_S	t2, TLBMASK_MASK(a1)
2261END(MIPSX(tlb_read_entry))
2262
2263/*--------------------------------------------------------------------------
2264 *
2265 * void mipsN_tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
2266 *
2267 * Invalidate a TLB entry which has the given vaddr and ASID if found.
2268 *--------------------------------------------------------------------------
2269 */
2270LEAF_NOPROFILE(MIPSX(tlb_invalidate_addr))
2271	mfc0	ta0, MIPS_COP_0_STATUS		# save status register
2272	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2273	COP0_SYNC
2274
2275#if (PGSHIFT & 1) == 0
2276	_SRL	a2, a0, V_MIPS3_PG_ODDPG - V_MIPS3_PG_V
2277	and	a2, MIPS3_PG_V			# lo0 V bit
2278	xor	a3, a2, MIPS3_PG_V		# lo1 V bit
2279#endif
2280	and	a1, a1, MIPS3_PG_ASID
2281#ifdef MIPSNNR2
2282	_INS	a0, a1, 0, V_MIPS3_PG_HVPN
2283#else
2284	_SRA	a0, V_MIPS3_PG_HVPN		# clear bottom bits of VA
2285	_SLL	a0, V_MIPS3_PG_HVPN		# clear bottom bits of VA
2286	or	a0, a0, a1
2287#endif
2288	_MFC0	ta1, MIPS_COP_0_TLB_HI		# save current ASID
2289	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2290	_MTC0	a0, MIPS_COP_0_TLB_HI		# look for the vaddr & ASID
2291	COP0_SYNC
2292	tlbp					# probe the entry in question
2293	COP0_SYNC
2294	mfc0	v0, MIPS_COP_0_TLB_INDEX	# see what we got
2295	MFC0_HAZARD
2296	bltz	v0, 2f				# index < 0 then skip
2297	 li	t2, MIPS_KSEG0_START		# invalid address
2298	PTR_SLL	v0, PGSHIFT | 1			# PAGE_SHIFT | 1
2299	PTR_ADDU t2, v0
2300	move	t0, zero
2301	move	t1, zero
2302#if (PGSHIFT & 1) == 0
2303	tlbr					# read entry
2304	COP0_SYNC
2305	_MFC0	t0, MIPS_COP_0_TLB_LO0		# fetch entryLo0
2306	_MFC0	t1, MIPS_COP_0_TLB_LO1		# fetch entryLo1
2307#ifdef MIPS3
2308	nop
2309#endif
2310	and	a2, t0				# a2=valid entryLo0 afterwards
2311	and	a3, t1				# a3=valid entryLo1 afterwards
2312	or	v0, a2, a3			# will one be valid?
2313#ifdef MIPSNNX
2314#error Global bit is lost here when V==0 and it needs to be preserved
2315	movz	t0, zero, a2			# zero lo0 if V would not be set
2316	movz	t1, zero, a3			# zero lo1 if V would not be set
2317	movn	t2, a0, v0			# yes, keep VA the same
2318#else
2319	_SLL	a2, a2, PG_V_LSHIFT		# move V to MSB
2320	_SRA	a2, a2, PG_V_RSHIFT		# fill with MSB
2321	or	a2, MIPS3_PG_G			# mask needs to preserve G
2322	and	t0, t0, a2			# zero lo0 if V would not be set
2323	_SLL	a3, a3, PG_V_LSHIFT		# move V to MSB
2324	_SRA	a3, a3, PG_V_RSHIFT		# fill with MSB
2325	or	a3, MIPS3_PG_G			# mask needs to preserve G
2326	beqz	v0, 1f				# no valid entry
2327	 and	t1, t1, a3			# zero lo1 if V would not be set
2328	move	t2, a0				# we need entryHi to be valid
23291:
2330#endif /* MIPSNN */
2331#endif /* (PGSHIFT & 1) == 0 */
2332	_MTC0	t0, MIPS_COP_0_TLB_LO0		# zero out entryLo0
2333	_MTC0	t1, MIPS_COP_0_TLB_LO1		# zero out entryLo1
2334	_MTC0	t2, MIPS_COP_0_TLB_HI		# make entryHi invalid
2335#if 0
2336	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2337#endif
2338	COP0_SYNC
2339#ifdef MIPS3
2340	nop
2341	nop
2342#endif
2343
2344	tlbwi
2345	COP0_SYNC
2346#ifdef MIPS3_LOONGSON2
2347	li	v0, MIPS_DIAG_ITLB_CLEAR
2348	mtc0	v0, MIPS_COP_0_DIAG		# invalidate ITLB
2349#elif defined(MIPS3)
2350	nop
2351	nop
2352#endif
23532:
2354	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2355	_MTC0	ta1, MIPS_COP_0_TLB_HI		# restore current ASID
2356	COP0_SYNC
2357	mtc0	ta0, MIPS_COP_0_STATUS		# restore status register
2358	JR_HB_RA
2359END(MIPSX(tlb_invalidate_addr))
2360
2361/*
2362 * void mipsN_tlb_invalidate_asids(uint32_t base, uint32_t limit);
2363 *
2364 * Invalidate TLB entries belong to per process user spaces with
2365 * base <= ASIDs <= limit while leaving entries for kernel space
2366 * marked global intact.
2367 */
2368LEAF_NOPROFILE(MIPSX(tlb_invalidate_asids))
2369	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2370	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2371	COP0_SYNC
2372
2373	_MFC0	t0, MIPS_COP_0_TLB_HI		# Save the current ASID.
2374	mfc0	t1, MIPS_COP_0_TLB_WIRED
2375	li	v0, MIPS_KSEG0_START		# invalid address
2376	INT_L	t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2377	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2378
2379	# do {} while (t1 < t2)
23801:
2381	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set index
2382	COP0_SYNC
2383	sll	ta0, t1, PGSHIFT | 1		# PAGE_SHIFT | 1
2384	tlbr					# obtain an entry
2385	COP0_SYNC
2386	_MFC0	ta1, MIPS_COP_0_TLB_LO1
2387	MFC0_HAZARD
2388	and	ta1, MIPS3_PG_G			# check to see it has G bit
2389	bnez	ta1, 2f				# yep, skip this one.
2390	 nop
2391	_MFC0	ta1, MIPS_COP_0_TLB_HI		# get VA and ASID
2392	MFC0_HAZARD
2393	and	ta1, MIPS3_PG_ASID		# focus on ASID
2394	sltu	a3, ta1, a0			# asid < base?
2395	bnez	a3, 2f				# yes, skip this entry.
2396	 nop
2397	sltu	a3, a1, ta1			# limit < asid
2398	bnez	a3, 2f				# yes, skip this entry.
2399	 nop
2400	PTR_ADDU ta0, v0
2401
2402	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2403	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2404	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2405	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out mask entry
2406	COP0_SYNC
2407	tlbwi					# invalidate the TLB entry
2408	COP0_SYNC
24092:
2410	addu	t1, 1
2411	bne	t1, t2, 1b
2412	 nop
2413
2414	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore ASID.
2415	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2416	COP0_SYNC
2417
2418#ifdef MIPS3_LOONGSON2
2419	li	v0, MIPS_DIAG_ITLB_CLEAR
2420	mtc0	v0, MIPS_COP_0_DIAG		# invalidate ITLB
2421#endif
2422
2423	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2424	JR_HB_RA				# new ASID will be set soon
2425END(MIPSX(tlb_invalidate_asids))
2426
2427#ifdef MULTIPROCESSOR
2428/*
2429 * void mipsN_tlb_invalidate_globals(void);
2430 *
2431 * Invalidate the non-wired TLB entries belonging to kernel space while
2432 * leaving entries for user space (not marked global) intact.
2433 */
2434LEAF_NOPROFILE(MIPSX(tlb_invalidate_globals))
2435	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2436	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2437	COP0_SYNC
2438
2439	_MFC0	t0, MIPS_COP_0_TLB_HI		# save current ASID
2440	mfc0	t1, MIPS_COP_0_TLB_WIRED
2441	li	v0, MIPS_KSEG0_START		# invalid address
2442	INT_L	t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2443	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2444
2445	# do {} while (t1 < t2)
24461:
2447	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set index
2448	COP0_SYNC
2449	sll	ta0, t1, PGSHIFT | 1		# PAGE_SHIFT | 1
2450	tlbr					# obtain an entry
2451	COP0_SYNC
2452	_MFC0	a0, MIPS_COP_0_TLB_LO1
2453	MFC0_HAZARD
2454	and	a0, MIPS3_PG_G			# check to see it has G bit
2455	beqz	a0, 2f				# no, skip this entry
2456	 nop
2457	PTR_ADDU ta0, v0
2458
2459	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2460	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2461	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2462	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out mask entry
2463	COP0_SYNC
2464	tlbwi					# invalidate the TLB entry
2465	COP0_SYNC
24662:
2467	addu	t1, 1
2468	bne	t1, t2, 1b
2469	 nop
2470
2471	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore current ASID
2472	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2473	COP0_SYNC
2474
2475#ifdef MIPS3_LOONGSON2
2476	li	v0, MIPS_DIAG_ITLB_CLEAR
2477	mtc0	v0, MIPS_COP_0_DIAG		# invalidate ITLB
2478#endif
2479
2480	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2481	JR_HB_RA
2482END(MIPSX(tlb_invalidate_globals))
2483#endif /* MULTIPROCESSOR */
2484
2485/*
2486 * void mipsN_tlb_invalidate_all(void);
2487 *
2488 * Invalidate all of non-wired TLB entries.
2489 */
2490LEAF_NOPROFILE(MIPSX(tlb_invalidate_all))
2491	mfc0	ta0, MIPS_COP_0_STATUS		# save status register
2492	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2493	COP0_SYNC
2494
2495	INT_L	a0, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2496
2497	mfc0	t0, MIPS_COP_0_TLB_WIRED
2498	_MFC0	ta1, MIPS_COP_0_TLB_HI		# save current ASID
2499	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2500	COP0_SYNC
2501	li	v0, MIPS_KSEG0_START		# invalid address
2502	sll	v1, t0, PGSHIFT | 1		# PAGE_SHIFT | 1
2503	PTR_ADDU v0, v1
2504#if (1 << (PGSHIFT|1)) >= 0x8000
2505	li	v1, 1
2506	sll	v1, PGSHIFT | 1			# PAGE_SHIFT | 1
2507#else
2508	li	v1, 1 << (PGSHIFT | 1)
2509#endif
2510
2511	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2512	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2513	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2514
2515	# do {} while (t0 < a0)
25161:
2517	mtc0	t0, MIPS_COP_0_TLB_INDEX	# set TLBindex
2518	_MTC0	v0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2519	COP0_SYNC
2520	tlbwi					# clear the entry
2521	COP0_SYNC
2522	addu	t0, 1				# increment index
2523	bne	t0, a0, 1b
2524	 PTR_ADDU v0, v1
2525
2526	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2527	_MTC0	ta1, MIPS_COP_0_TLB_HI		# restore ASID
2528	COP0_SYNC
2529
2530#ifdef MIPS3_LOONGSON2
2531	li	v0, MIPS_DIAG_ITLB_CLEAR
2532	mtc0	v0, MIPS_COP_0_DIAG		# invalidate ITLB
2533#endif
2534
2535	mtc0	ta0, MIPS_COP_0_STATUS		# restore status register
2536	JR_HB_RA
2537END(MIPSX(tlb_invalidate_all))
2538
2539/*
2540 * u_int mipsN_tlb_record_asids(u_long *bitmap, tlb_asid_t asid_max)
2541 *
2542 * Record all the ASIDs in use in the TLB and return the number of different
2543 * ASIDs present.
2544 */
2545LEAF_NOPROFILE(MIPSX(tlb_record_asids))
2546	_MFC0	a3, MIPS_COP_0_TLB_HI		# Save the current ASID.
2547	mfc0	ta0, MIPS_COP_0_TLB_WIRED
2548	INT_L	ta1, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2549	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2550	move	ta2, zero
2551	li	ta3, 1
2552	move	v0, zero			# start at zero ASIDs
2553
2554#ifdef MIPSNNR2
2555	di	v1				# disable interrupts
2556#else
2557	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2558#ifdef _LP64
2559	and	t0, v1, MIPS_SR_INT_IE
2560	xor	t0, v1
2561	mtc0	t0, MIPS_COP_0_STATUS		# disable interrupts
2562#else
2563	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2564#endif
2565#endif
2566	COP0_SYNC
2567
2568	# do {} while (ta0 < ta1)
25691:
2570	mtc0	ta0, MIPS_COP_0_TLB_INDEX	# set index
2571	COP0_SYNC
2572	tlbr					# obtain an entry
2573	COP0_SYNC
2574	_MFC0	t0, MIPS_COP_0_TLB_LO1
2575	MFC0_HAZARD
2576	and	t0, MIPS3_PG_G			# check to see it has G bit
2577	bnez	t0, 4f				# yep, skip this one.
2578	 nop
2579
2580	_MFC0	t0, MIPS_COP_0_TLB_HI		# get VA and ASID
2581	MFC0_HAZARD
2582	and	t0, t0, MIPS3_PG_ASID		# focus on ASID
2583	bgt	t0, a1, 4f			# > ASID max? skip
2584	 nop
2585
2586	srl	a2, t0, 3 + LONG_SCALESHIFT	# drop low 5 or 6 bits
2587	sll	a2, LONG_SCALESHIFT		# make an index for the bitmap
2588	_SLLV	t0, ta3, t0			# t0 is mask (ta3 == 1)
2589
2590	PTR_ADDU a2, a0				# index into the bitmap
2591	beq	a2, ta2, 3f			# is the desired cell loaded?
2592	 nop					#   yes, don't reload it
2593	beqz	ta2, 2f				# have we ever loaded it?
2594	 nop					#   nope, so don't save it
2595
2596	LONG_S	t2, 0(ta2)			# save the updated value.
25972:
2598	move	ta2, a2				# remember the new cell's addr
2599	LONG_L	t2, 0(ta2)			# and load it
26003:
2601	and	t1, t2, t0			# see if this asid was recorded
2602	sltu	t1, t1, ta3			# t1 = t1 < 1 (aka t1 == 0)
2603	addu	v0, t1				# v0 += t1
2604	or	t2, t0				# or in the new ASID bits
2605
26064:
2607	addu	ta0, 1				# increment TLB entry #
2608	bne	ta0, ta1, 1b			# keep lookup if not limit
2609	 nop
2610
2611	beqz	ta2, 5f				# do we have a cell to write?
2612	 nop					#   nope, nothing.
2613
2614	LONG_S	t2, 0(ta2)			# save the updated value.
26155:
2616	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2617	_MTC0	a3, MIPS_COP_0_TLB_HI		# restore ASID
2618	COP0_SYNC
2619
2620	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2621	JR_HB_RA
2622END(MIPSX(tlb_record_asids))
2623
2624/*
2625 * mipsN_lwp_trampoline()
2626 *
2627 * Arrange for a function to be invoked neatly, after a cpu_switch().
2628 * Call the service function with one argument, specified by the s0
2629 * and s1 respectively.  There is no need register save operation.
2630 * XXX - Not profiled because we pass an arg in with v0 which isn't
2631 *       preserved by _mcount()
2632 */
2633LEAF_NOPROFILE(MIPSX(lwp_trampoline))
2634	PTR_ADDU sp, -CALLFRAME_SIZ
2635
2636	# Call lwp_startup(), with args from cpu_switchto()/cpu_lwp_fork()
2637	move	a0, v0
2638	jal	_C_LABEL(lwp_startup)
2639	 move	a1, MIPS_CURLWP
2640
2641	# Call the routine specified by cpu_lwp_fork()
2642	jalr	s0
2643	 move	a0, s1
2644
2645	#
2646	# Return to user (won't happen if a kernel thread)
2647	#
2648	# Make sure to disable interrupts here, as otherwise
2649	# we can take an interrupt *after* EXL is set, and
2650	# end up returning to a bogus PC since the PC is not
2651	# saved if EXL=1.
2652	#
2653	.set	noat
2654MIPSX(user_return):
2655	REG_L	s0, CALLFRAME_SIZ+TF_REG_S0(sp)		# $16
2656	REG_L	s1, CALLFRAME_SIZ+TF_REG_S1(sp)		# $17
2657	REG_L	s2, CALLFRAME_SIZ+TF_REG_S2(sp)		# $18
2658	REG_L	s3, CALLFRAME_SIZ+TF_REG_S3(sp)		# $19
2659	REG_L	s4, CALLFRAME_SIZ+TF_REG_S4(sp)		# $20
2660	REG_L	s5, CALLFRAME_SIZ+TF_REG_S5(sp)		# $21
2661	REG_L	s6, CALLFRAME_SIZ+TF_REG_S6(sp)		# $22
2662	REG_L	s7, CALLFRAME_SIZ+TF_REG_S7(sp)		# $23
2663	REG_L	s8, CALLFRAME_SIZ+TF_REG_S8(sp)		# $30
2664MIPSX(user_intr_return):
2665#ifdef PARANOIA
2666	PTR_L	t0, L_CPU(MIPS_CURLWP)
2667	INT_L	t1, CPU_INFO_CPL(t0)			# get curcpu()->ci_cpl
26682:	bnez	t1, 2b
2669	 nop
2670#endif
2671	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
2672	COP0_SYNC
2673	SET_EXCEPTION_LEVEL(v0)			# set exception level
2674	COP0_SYNC
2675	REG_L	t0, CALLFRAME_SIZ+TF_REG_MULLO(sp)
2676	REG_L	t1, CALLFRAME_SIZ+TF_REG_MULHI(sp)
2677	REG_L	v0, CALLFRAME_SIZ+TF_REG_EPC(sp)
2678	mtlo	t0
2679	mthi	t1
2680	_MTC0	v0, MIPS_COP_0_EXC_PC
2681	COP0_SYNC
2682	move	k1, sp
2683	REG_L	AT, CALLFRAME_SIZ+TF_REG_AST(k1)	# $1
2684	REG_L	v0, CALLFRAME_SIZ+TF_REG_V0(k1)		# $2
2685	REG_L	v1, CALLFRAME_SIZ+TF_REG_V1(k1)		# $3
2686	REG_L	a0, CALLFRAME_SIZ+TF_REG_A0(k1)		# $4
2687	REG_L	a1, CALLFRAME_SIZ+TF_REG_A1(k1)		# $5
2688	REG_L	a2, CALLFRAME_SIZ+TF_REG_A2(k1)		# $6
2689	REG_L	a3, CALLFRAME_SIZ+TF_REG_A3(k1)		# $7
2690	REG_L	t0, CALLFRAME_SIZ+TF_REG_T0(k1)		# $12 /  $8
2691	REG_L	t1, CALLFRAME_SIZ+TF_REG_T1(k1)		# $13 /  $9
2692	REG_L	t2, CALLFRAME_SIZ+TF_REG_T2(k1)		# $14 / $10
2693	REG_L	t3, CALLFRAME_SIZ+TF_REG_T3(k1)		# $15 / $11
2694	REG_L	ta0, CALLFRAME_SIZ+TF_REG_TA0(k1)	#  $8 / $12
2695	REG_L	ta1, CALLFRAME_SIZ+TF_REG_TA1(k1)	#  $9 / $13
2696	REG_L	ta2, CALLFRAME_SIZ+TF_REG_TA2(k1)	# $10 / $14
2697	REG_L	ta3, CALLFRAME_SIZ+TF_REG_TA3(k1)	# $11 / $15
2698	REG_L	t8, CALLFRAME_SIZ+TF_REG_T8(k1)		# $24 MIPS_CURLWP
2699	REG_L	t9, CALLFRAME_SIZ+TF_REG_T9(k1)		# $25
2700	REG_L	k0, CALLFRAME_SIZ+TF_REG_SR(k1)		# status register
2701	DYNAMIC_STATUS_MASK(k0, sp)		# machine dependent masking
2702	REG_L	gp, CALLFRAME_SIZ+TF_REG_GP(k1)		# $28
2703	REG_L	sp, CALLFRAME_SIZ+TF_REG_SP(k1)		# $29
2704	REG_L	ra, CALLFRAME_SIZ+TF_REG_RA(k1)		# $31
2705	mtc0	k0, MIPS_COP_0_STATUS
2706	COP0_SYNC
2707	eret
2708	.set	at
2709END(MIPSX(lwp_trampoline))
2710
2711/*
2712 * void mipsN_cpu_switch_resume(struct lwp *newlwp)
2713 *
2714 * Wiredown the USPACE of newproc in TLB entry#0.  Check whether target
2715 * USPACE is already in another place of TLB before that, and make
2716 * sure TBIS(it) in the case.
2717 *
2718 * Disable the optimisation for PGSHIFT == 14 (aka ENABLE_MIPS_16KB_PAGE)
2719 * as the code needs fixing for this case
2720 *
2721 * A TLB entry isn't used for the following cases:
2722 *  - 16kB USPACE
2723 *  - LP64 - USPACE is always accessed directly via XKPHYS
2724 */
2725
2726LEAF_NOPROFILE(MIPSX(cpu_switch_resume))
2727#if !defined(_LP64)
2728#if (PAGE_SIZE < 16384)
2729#if (USPACE > PAGE_SIZE) || !defined(_LP64)
2730	INT_L	a1, L_MD_UPTE_0(a0)		# a1 = upte[0]
2731#if (PGSHIFT & 1)
2732#if (USPACE > PAGE_SIZE)
2733#error Unsupported
2734#else
2735	/* even/odd are contiguaous */
2736	INT_ADD a2, a1, MIPS3_PG_NEXT		# a2 = upper half
2737#endif
2738#else
2739	INT_L	a2, L_MD_UPTE_1(a0)		# a2 = upte[1]
2740#endif /* (PGSHIFT & 1) */
2741	PTR_L	v0, L_PCB(a0)			# va = l->l_addr
2742#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
2743	li	t0, VM_MIN_KERNEL_ADDRESS	# compute index
2744	blt	v0, t0, MIPSX(resume)
2745	 nop
2746#else
2747	li	t0, MIPS_KSEG0_START		# below KSEG0?
2748	blt	t0, v0, MIPSX(resume)
2749	 nop
2750	li	t0, VM_MIN_KERNEL_ADDRESS>>32	# below XKSEG?
2751	dsll32	t0, t0, 0
2752	blt	v0, t0, MIPSX(resume)
2753	 nop
2754#endif
2755
2756#if (PGSHIFT & 1) == 0
2757	and	t0, v0, MIPS3_PG_ODDPG
2758	beqz	t0, MIPSX(entry0)
2759	 nop
2760
2761	break
2762
2763	PANIC("USPACE sat on odd page boundary")
2764
2765MIPSX(entry0):
2766#endif /* (PGSHIFT & 1) == 0 */
2767	_MFC0	ta1, MIPS_COP_0_TLB_HI		# save TLB_HI
2768	_MTC0	v0, MIPS_COP_0_TLB_HI		# VPN = va
2769
2770	COP0_SYNC
2771	tlbp					# probe VPN
2772	COP0_SYNC
2773	mfc0	t0, MIPS_COP_0_TLB_INDEX
2774	MFC0_HAZARD
2775	bltz	t0, MIPSX(entry0set)
2776	 sll	t0, t0, PGSHIFT | 1		# PAGE_SHIFT | 1
2777	PTR_LA	t0, MIPS_KSEG0_START(t0)
2778	_MTC0	t0, MIPS_COP_0_TLB_HI
2779	_MTC0	zero, MIPS_COP_0_TLB_LO0
2780	_MTC0	zero, MIPS_COP_0_TLB_LO1
2781	COP0_SYNC
2782	tlbwi
2783	COP0_SYNC
2784	_MTC0	v0, MIPS_COP_0_TLB_HI		# set VPN again
2785	COP0_SYNC
2786MIPSX(entry0set):
2787
2788#ifdef MULTIPROCESSOR
2789	PTR_L	t0, L_CPU(a0)			# get cpu_info
2790	INT_L	t1, CPU_INFO_KSP_TLB_SLOT(t0)	# get TLB# for KSP
2791	mtc0	t1, MIPS_COP_0_TLB_INDEX	# TLB entry (virtual)
2792#else
2793	mtc0	zero, MIPS_COP_0_TLB_INDEX	# TLB entry #0 (virtual)
2794#endif /* MULTIPROCESSOR */
2795	COP0_SYNC
2796	_MTC0	a1, MIPS_COP_0_TLB_LO0		# upte[0] | PG_G
2797	_MTC0	a2, MIPS_COP_0_TLB_LO1		# upte[1] | PG_G
2798	COP0_SYNC
2799	tlbwi					# set TLB entry #0
2800	COP0_SYNC
2801	_MTC0	ta1, MIPS_COP_0_TLB_HI		# restore TLB_HI
2802	COP0_SYNC
2803MIPSX(resume):
2804#endif /* (USPACE > PAGE_SIZE) || !defined(_LP64) */
2805#endif /* PAGE_SIZE < 16384 */
2806#endif /* ! LP64 */
2807#ifdef MIPSNNR2
2808	PTR_L	v0, L_PRIVATE(a0)		# get lwp private
2809	_MTC0	v0, MIPS_COP_0_USERLOCAL	# make available for rdhwr
2810#endif
2811	jr	ra
2812	 nop
2813END(MIPSX(cpu_switch_resume))
2814
2815/*--------------------------------------------------------------------------
2816 *
2817 * mipsN_tlb_write_entry --
2818 *
2819 *      Write the given entry into the TLB at the given index.
2820 *      Pass full R4000 style TLB info including variable page size mask.
2821 *
2822 *      void mipsN_tlb_write_entry(size_t tlb_index, const struct tlbmask *tlb)
2823 *
2824 * Results:
2825 *      None.
2826 *
2827 * Side effects:
2828 *      TLB entry set.
2829 *
2830 *--------------------------------------------------------------------------
2831 */
2832LEAF(MIPSX(tlb_write_entry))
2833	mfc0	ta0, MIPS_COP_0_STATUS		# Save the status register.
2834	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
2835	COP0_SYNC
2836	REG_L	a2, TLBMASK_LO0(a1)		# fetch tlb->tlb_lo0
2837	REG_L	a3, TLBMASK_LO1(a1)		# fetch tlb->tlb_lo1
2838	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# Save current page mask.
2839	_MFC0	ta1, MIPS_COP_0_TLB_HI		# Save the current ASID.
2840
2841	_MTC0	a2, MIPS_COP_0_TLB_LO0		# Set up entry low0.
2842	_MTC0	a3, MIPS_COP_0_TLB_LO1		# Set up entry low1.
2843	COP0_SYNC
2844	INT_L	a2, TLBMASK_MASK(a1)		# fetch tlb->tlb_mask
2845	INT_ADD	v0, a0, 1			# add 1 to it
2846#ifdef MIPSNNR2
2847	movz	a2, ta2, v0			# a2 = ta2 if v0 is 0
2848#else
2849	bnez	a2, 1f				# branch if tlb_mask != -1
2850	 nop					# --delay-slot--
2851	move	a2, ta2				# use existing tlb_mask
28521:
2853#endif
2854	PTR_L	a3, TLBMASK_HI(a1)		# fetch tlb->tlb_hi
2855	mtc0	a0, MIPS_COP_0_TLB_INDEX	# Set the index.
2856	mtc0	a2, MIPS_COP_0_TLB_PG_MASK	# Set up entry pagemask.
2857	_MTC0	a3, MIPS_COP_0_TLB_HI		# Set up entry high.
2858	COP0_SYNC
2859	tlbwi					# Write the TLB
2860	COP0_SYNC
2861#ifdef MIPS3
2862	nop
2863#endif
2864
2865	_MTC0	ta1, MIPS_COP_0_TLB_HI		# Restore the ASID.
2866	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# Restore page mask.
2867	COP0_SYNC
2868
2869#ifdef MIPS3_LOONGSON2
2870	li	v0, MIPS_DIAG_ITLB_CLEAR
2871	mtc0	v0, MIPS_COP_0_DIAG		# invalidate ITLB
2872#endif
2873
2874	mtc0	ta0, MIPS_COP_0_STATUS		# Restore the status register
2875	JR_HB_RA
2876END(MIPSX(tlb_write_entry))
2877
2878#if defined(MIPS3)
2879/*----------------------------------------------------------------------------
2880 *
2881 * mipsN_VCED --
2882 *
2883 *	Handle virtual coherency exceptions.
2884 *	Called directly from the mips3 exception-table code.
2885 *	only k0, k1 are available on entry
2886 *
2887 * Results:
2888 *	None.
2889 *
2890 * Side effects:
2891 *	Remaps the conflicting address as uncached and returns
2892 *	from the exception.
2893 *
2894 *	NB: cannot be profiled, all registers are user registers on entry.
2895 *
2896 *----------------------------------------------------------------------------
2897 */
2898LEAF_NOPROFILE(MIPSX(VCED))
2899	.set	noat
2900	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
2901	li	k1, -16
2902	and	k0, k1
2903	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
2904	cache	(CACHE_R4K_D | CACHEOP_R4K_HIT_INV), 0(k0)
2905#ifdef DEBUG
2906	_MFC0	k0, MIPS_COP_0_BAD_VADDR
2907	PTR_LA	k1, MIPSX(VCED_vaddr)
2908	PTR_S	k0, 0(k1)
2909	_MFC0	k0, MIPS_COP_0_EXC_PC
2910	PTR_LA	k1, MIPSX(VCED_epc)
2911	PTR_S	k0, 0(k1)
2912	PTR_LA	k1, MIPSX(VCED_count)	# count number of exceptions
2913	PTR_SRL	k0, k0, 26		# position upper 4 bits of VA
2914	and	k0, k0, 0x3c		# mask it off
2915	PTR_ADDU k1, k0			# get address of count table
2916	LONG_L	k0, 0(k1)
2917	LONG_ADDU k0, 1
2918	LONG_S	k0, 0(k1)
2919#endif
2920	eret
2921	.set	at
2922
2923#ifdef DEBUG
2924	.data
2925	.globl	_C_LABEL(MIPSX(VCED_count))
2926_C_LABEL(MIPSX(VCED_count)):
2927	LONG_WORD	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2928	.globl	_C_LABEL(MIPSX(VCED_epc))
2929_C_LABEL(MIPSX(VCED_epc)):
2930	PTR_WORD	0
2931	.globl	_C_LABEL(MIPSX(VCED_vaddr))
2932_C_LABEL(MIPSX(VCED_vaddr)):
2933	PTR_WORD	0
2934	.text
2935#endif
2936END(MIPSX(VCED))
2937
2938LEAF_NOPROFILE(MIPSX(VCEI))
2939	.set	noat
2940	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
2941	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
2942	cache	(CACHE_R4K_I | CACHEOP_R4K_HIT_INV), 0(k0)
2943#ifdef DEBUG
2944	_MFC0	k0, MIPS_COP_0_BAD_VADDR
2945	PTR_LA	k1, MIPSX(VCEI_vaddr)
2946	PTR_S	k0, 0(k1)
2947	PTR_LA	k1, MIPSX(VCEI_count)	# count number of exceptions
2948	PTR_SRL	k0, k0, 26		# position upper 4 bits of VA
2949	and	k0, k0, 0x3c		# mask it off
2950	PTR_ADDU k1, k0			# get address of count table
2951	LONG_L	k0, 0(k1)
2952	PTR_ADDU k0, 1
2953	LONG_S	k0, 0(k1)
2954#endif
2955	eret
2956	.set	at
2957
2958#ifdef DEBUG
2959	.data
2960	.globl	_C_LABEL(MIPSX(VCEI_count))
2961_C_LABEL(MIPSX(VCEI_count)):
2962	LONG_WORD	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2963	.globl	_C_LABEL(MIPSX(VCEI_vaddr))
2964_C_LABEL(MIPSX(VCEI_vaddr)):
2965	PTR_WORD	0
2966	.text
2967#endif
2968END(MIPSX(VCEI))
2969#endif /* MIPS3 */
2970
2971#ifdef USE_64BIT_INSTRUCTIONS
2972LEAF(MIPSX(pagezero))
2973	li	a1, PAGE_SIZE >> 6
2974
29751:	sd	zero, 0(a0)			# try to miss cache first
2976	sd	zero, 32(a0)
2977	subu	a1, 1
2978	sd	zero, 16(a0)
2979	sd	zero, 48(a0)
2980	sd	zero, 8(a0)			# fill in cache lines
2981	sd	zero, 40(a0)
2982	sd	zero, 24(a0)
2983	sd	zero, 56(a0)
2984	bgtz	a1, 1b
2985	addu	a0, 64
2986
2987	jr	ra
2988	nop
2989END(MIPSX(pagezero))
2990#endif /* USE_64BIT_INSTRUCTIONS */
2991
2992	.rdata
2993
2994	.globl _C_LABEL(MIPSX(locore_vec))
2995_C_LABEL(MIPSX(locore_vec)):
2996	PTR_WORD _C_LABEL(MIPSX(cpu_switch_resume))
2997	PTR_WORD _C_LABEL(MIPSX(lwp_trampoline))
2998	PTR_WORD _C_LABEL(MIPSX(wbflush)) # wbflush
2999	PTR_WORD _C_LABEL(MIPSX(tlb_get_asid))
3000	PTR_WORD _C_LABEL(MIPSX(tlb_set_asid))
3001	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_asids))
3002	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_addr))
3003#ifdef MULTIPROCESSOR
3004	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_globals))
3005#else
3006	PTR_WORD _C_LABEL(nullop)
3007#endif
3008	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_all))
3009	PTR_WORD _C_LABEL(MIPSX(tlb_record_asids))
3010	PTR_WORD _C_LABEL(MIPSX(tlb_update_addr))
3011	PTR_WORD _C_LABEL(MIPSX(tlb_read_entry))
3012	PTR_WORD _C_LABEL(MIPSX(tlb_write_entry))
3013
3014	.globl _C_LABEL(MIPSX(locoresw))
3015_C_LABEL(MIPSX(locoresw)):
3016	PTR_WORD _C_LABEL(MIPSX(wbflush))	# lsw_wbflush
3017	PTR_WORD _C_LABEL(nullop)		# lsw_cpu_idle
3018	PTR_WORD _C_LABEL(nullop)		# lsw_send_ipi
3019	PTR_WORD _C_LABEL(nullop)		# lsw_cpu_offline_md
3020	PTR_WORD _C_LABEL(nullop)		# lsw_cpu_init
3021	PTR_WORD _C_LABEL(nullop)		# lsw_cpu_run
3022	PTR_WORD _C_LABEL(nullop)		# lsw_bus_error
3023
3024MIPSX(excpt_sw):
3025	####
3026	#### The kernel exception handlers.
3027	####
3028	PTR_WORD _C_LABEL(MIPSX(kern_intr))		#  0 external interrupt
3029	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  1 TLB modification
3030#if (PGSHIFT & 1) == 0
3031	PTR_WORD _C_LABEL(MIPSX(kern_tlb_invalid_exception))	#  2 TLB miss (LW/I-fetch)
3032	PTR_WORD _C_LABEL(MIPSX(kern_tlb_invalid_exception))	#  3 TLB miss (SW)
3033#else
3034	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  2 TLB miss (LW/I-fetch)
3035	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  3 TLB miss (SW)
3036#endif
3037	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  4 address error (LW/I-fetch)
3038	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  5 address error (SW)
3039	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  6 bus error (I-fetch)
3040	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  7 bus error (load or store)
3041	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  8 system call
3042	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	#  9 breakpoint
3043	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 10 reserved instruction
3044	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 11 coprocessor unusable
3045	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 12 arithmetic overflow
3046	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 13 r4k trap exception
3047#if defined(MIPS3)
3048	PTR_WORD _C_LABEL(MIPSX(VCEI))			# 14 r4k virt coherence
3049#else
3050	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 14 reserved
3051#endif
3052	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 15 r4k FP exception
3053	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 16 reserved
3054	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 17 reserved
3055	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 18 reserved
3056	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 19 reserved
3057	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 20 reserved
3058	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 21 reserved
3059	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 22 reserved
3060	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 23 watch exception
3061	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 24 reserved
3062	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 25 reserved
3063	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 26 reserved
3064	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 27 reserved
3065	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 28 reserved
3066	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 29 reserved
3067	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 30 reserved
3068#if defined(MIPS3)
3069	PTR_WORD _C_LABEL(MIPSX(VCED))			# 31 v. coherence exception data
3070#else
3071	PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))	# 31 reserved
3072#endif
3073	#####
3074	##### The user exception handlers.
3075	#####
3076	PTR_WORD _C_LABEL(MIPSX(user_intr))		#  0
3077	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  1
3078	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  2
3079	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  3
3080	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  4
3081	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  5
3082	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  6
3083	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  7
3084	PTR_WORD _C_LABEL(MIPSX(systemcall))		#  8
3085	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	#  9
3086	PTR_WORD _C_LABEL(MIPSX(user_reserved_insn))	# 10
3087	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 11
3088	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 12
3089	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 13
3090#if defined(MIPS3)
3091	PTR_WORD _C_LABEL(MIPSX(VCEI))			# 14
3092#else
3093	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 14
3094#endif
3095	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 15
3096	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 16
3097	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 17
3098	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 18
3099	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 19
3100	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 20
3101	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 21
3102	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 22
3103	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 23
3104	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 24
3105	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 25
3106	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 26
3107	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 27
3108	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 28
3109	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 29
3110	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 30
3111#if defined(MIPS3)
3112	PTR_WORD _C_LABEL(MIPSX(VCED))			# 31 v. coherence exception data
3113#else
3114	PTR_WORD _C_LABEL(MIPSX(user_gen_exception))	# 31
3115#endif
3116#ifdef MIPS3_LOONGSON2
3117loongson2_xtlb_miss_str:
3118	.string "loongson2_xtlb_miss"
3119#endif
3120