xref: /netbsd-src/sys/arch/mips/mips/locore.S (revision d1b93bab8c7801129a73a508d678c429e8f0fa6a)
1/*	$NetBSD: locore.S,v 1.231 2023/06/24 05:31:04 msaitoh Exp $	*/
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Digital Equipment Corporation and Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Copyright (C) 1989 Digital Equipment Corporation.
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby granted,
37 * provided that the above copyright notice appears in all copies.
38 * Digital Equipment Corporation makes no representations about the
39 * suitability of this software for any purpose.  It is provided "as is"
40 * without express or implied warranty.
41 *
42 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
43 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
44 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
45 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
46 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
47 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
48 *
49 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
50 */
51
52#include "opt_cputype.h"	/* which mips CPU levels do we support? */
53#include "opt_ddb.h"
54#include "opt_kgdb.h"
55#include "opt_lockdebug.h"
56#include "opt_multiprocessor.h"
57
58#include <sys/cdefs.h>
59#include <sys/errno.h>
60
61#include <mips/asm.h>
62#include <mips/cpuregs.h>
63#include <mips/trap.h>
64#include <mips/locore.h>
65
66RCSID("$NetBSD: locore.S,v 1.231 2023/06/24 05:31:04 msaitoh Exp $")
67
68#include "assym.h"
69
70	.set	noreorder
71
72EXPORT(start)
73EXPORT_OBJECT(kernel_text)			# libkvm refers this
74	/* First disable the interrupts only, for safety */
75	mfc0	k0, MIPS_COP_0_STATUS
76	MFC0_HAZARD
77
78	and	k0, ~MIPS_SR_INT_IE
79	mtc0	k0, MIPS_COP_0_STATUS
80	COP0_SYNC
81
82	/* Known state: BEV, coprocessors disabled. */
83	/* Leaving TS | RE alone (for emips) */
84	and	k0, MIPS_SR_TS | MIPS3_SR_RE
85	mtc0	k0, MIPS_COP_0_STATUS
86	mtc0	zero, MIPS_COP_0_CAUSE
87	COP0_SYNC
88
89#if defined(_LP64)
90	/* Enable 64-bit addressing */
91	mfc0	k0, MIPS_COP_0_STATUS
92	MFC0_HAZARD
93	or	k0, MIPS_SR_KX | MIPS_SR_UX
94	mtc0	k0, MIPS_COP_0_STATUS
95#elif defined(__mips_n32)
96        mfc0    k0, MIPS_COP_0_STATUS
97        MFC0_HAZARD
98        or      k0, MIPS_SR_KX
99        mtc0    k0, MIPS_COP_0_STATUS
100#endif
101
102#ifdef HPCMIPS_L1CACHE_DISABLE
103	mfc0	t0, MIPS_COP_0_CONFIG
104	li	t1, 0xfffffff8
105	and	t0, t0, t1
106	or	t0, 0x00000002			# XXX, KSEG0 is uncached
107	mtc0	t0, MIPS_COP_0_CONFIG
108	COP0_SYNC
109#endif /* HPCMIPS_L1CACHE_DISABLE */
110
111#ifdef MIPS64_OCTEON
112	//
113	// U-boot on the erlite starts all cpus at the kernel entry point.
114	// Use EBASE to find our CPU number and if it's not 0, call
115	// octeon_cpu_spinup if MP or loop using the wait instruction since
116	// non-primary CPUs can't do anything useful.
117	//
118	mfc0	a0, MIPS_COP_0_EBASE		# EBASE
119	COP0_SYNC
120	andi    a1, a0, MIPS_EBASE_CPUNUM	# fetch cpunum
121	beqz	a1, 2f				# normal startup if 0
122	 nop
123#ifdef MULTIPROCESSOR
124	j	_C_LABEL(octeon_cpu_spinup)
125	 nop
126#else
1271:	wait
128	b	1b
129	 nop
130#endif /* MIPS64_OCTEON */
1312:
132#endif
133/*
134 * Initialize stack and call machine startup.
135 */
136	PTR_LA	v1, start
137	slt	v0, v1, sp
138	bne	v0, zero, 1f
139	PTR_ADDU v0, v1, -CALLFRAME_SIZ
140	PTR_SUBU v0, v1, sp
141	slt	v0, v0, 4096			# within 4KB of _start
142	beq	v0, zero, 2f
143	PTR_ADDU v0, v1, -CALLFRAME_SIZ
1441:
145	move	sp, v0
1462:
147#ifdef __GP_SUPPORT__
148	PTR_LA	gp, _C_LABEL(_gp)
149#endif
150
151	mfc0	t0, MIPS_COP_0_PRID		# read product revision ID
152	COP0_SYNC
153	nop
154
155#ifdef NOFPU
156	li	t1, 0				# Dummy FPU_ID
157#else
158	/* Enable the FPU to read FPUID */
159	mfc0	k0, MIPS_COP_0_STATUS
160	MFC0_HAZARD
161	or	k0, MIPS_SR_COP_1_BIT
162	mtc0	k0, MIPS_COP_0_STATUS
163	COP0_HAZARD_FPUENABLE
164
165	cfc1	t1, MIPS_FIR
166
167	/* Disable again, we don't want it on in the kernel */
168	and	k0, ~MIPS_SR_COP_1_BIT
169	mtc0	k0, MIPS_COP_0_STATUS
170#endif
171
172	INT_S	t0, _C_LABEL(mips_options)+MO_CPU_ID # save PRID register
173	INT_S	t1, _C_LABEL(mips_options)+MO_FPU_ID # save FPU ID register
174	PTR_LA	MIPS_CURLWP, _C_LABEL(lwp0)	# set curlwp
175	jal	_C_LABEL(mach_init)		# mach_init(a0, a1, a2, a3)
176	 nop
177
178	# XXXuvm_lwp_getuarea
179	PTR_L	sp, L_PCB(MIPS_CURLWP)		# switch to lwp0 stack
180	NOP_L
181	PTR_ADDU sp, USPACE - TF_SIZ - CALLFRAME_SIZ
182	/*
183	 * Raise to IPLHIGH
184	 */
185	jal	_C_LABEL(splhigh_noprof)	# go to splhigh
186	 nop
187	/*
188	 * Now enable interrupts (but they are all masked).
189	 */
190#if __mips_isa_rev >= 2
191	ei
192#else
193	mfc0	v0, MIPS_COP_0_STATUS
194	MFC0_HAZARD
195	or	v0, MIPS_SR_INT_IE
196	mtc0	v0, MIPS_COP_0_STATUS
197#endif
198	COP0_SYNC
199
200	jal	_C_LABEL(main)			# main(void)
201	 nop
202
203	PANIC("main() returned")		# main never returns
204	.set	at
205	.globl _C_LABEL(verylocore)
206_C_LABEL(verylocore):
207
208/*
209 * struct lwp *cpu_switchto(struct lwp *cur, struct lwp *next)
210 * Switch to the specified next LWP
211 * Arguments:
212 *	a0	the current LWP
213 *	a1	the LWP to switch to
214 * Returns:
215 *	v0	the LWP we have switched from
216 *
217 * called at IPL_SCHED
218 */
219NESTED(cpu_switchto, CALLFRAME_SIZ, ra)
220#if defined(PARANOIA)
221	/*
222	 * Make sure we are at IPL_SCHED
223	 */
224	PTR_L	v0, L_CPU(MIPS_CURLWP)
225	INT_L	v1, CPU_INFO_CPL(v0)
226#if __mips >= 32
227	tnei	v1, IPL_SCHED
228#else
229	li	v0, IPL_SCHED
23010:	bne	v0, v1, 10b
231	 nop
232#endif
233
234	mfc0	t0, MIPS_COP_0_STATUS
235	MFC0_HAZARD
236	and	t0, MIPS_SR_INT_IE
237#if __mips >= 32
238	teqi	t0, 0
239#else
24011:	beqz	t0, 11b
241	 nop
242#endif
243#endif /* PARANOIA */
244	/*
245	 * Save old context
246	 */
247	PTR_L	a2, L_PCB(a0)			# a2 = pcb of old lwp
248	mfc0	t0, MIPS_COP_0_STATUS
249	REG_PROLOGUE
250	REG_S	s0, PCB_CONTEXT+SF_REG_S0(a2)
251	REG_S	s1, PCB_CONTEXT+SF_REG_S1(a2)
252	REG_S	s2, PCB_CONTEXT+SF_REG_S2(a2)
253	REG_S	s3, PCB_CONTEXT+SF_REG_S3(a2)
254	REG_S	s4, PCB_CONTEXT+SF_REG_S4(a2)
255	REG_S	s5, PCB_CONTEXT+SF_REG_S5(a2)
256	REG_S	s6, PCB_CONTEXT+SF_REG_S6(a2)
257	REG_S	s7, PCB_CONTEXT+SF_REG_S7(a2)
258	#REG_S	t8, PCB_CONTEXT+SF_REG_T8(a2)	# no reason to save MIPS_CURLWP
259	REG_S	sp, PCB_CONTEXT+SF_REG_SP(a2)
260	REG_S	s8, PCB_CONTEXT+SF_REG_S8(a2)
261	REG_S	ra, PCB_CONTEXT+SF_REG_RA(a2)
262	REG_S	t0, PCB_CONTEXT+SF_REG_SR(a2)
263#if defined(__mips_n32) || defined(__mips_n64)
264	REG_S	gp, PCB_CONTEXT+SF_REG_GP(a2)
265#endif
266	REG_EPILOGUE
267
268#if defined(PARANOID_SPL)
269	/*
270	 * Verify interrupt configuration matches IPL_SCHED
271	 */
272	jal	_C_LABEL(splcheck)
273	 nop
274#endif /* PARANOID_SPL */
275
276	move	s6, a0				# s6 = old lwp
277	move	MIPS_CURLWP, a1			# t8 = new lwp
278	PTR_SUBU sp, CALLFRAME_SIZ
279	REG_S	ra, CALLFRAME_RA(sp)
280	.mask	0x80000000, -4
281/*
282 * Switch to new context.
283 */
284	jal	_C_LABEL(mips_cpu_switch_resume)
285	 move	a0, MIPS_CURLWP
286
287	PTR_L	t2, L_CPU(MIPS_CURLWP)
288	nop					# patchable load delay slot
289
290	/*
291	 * Issue barriers to coordinate mutex_exit on this CPU with
292	 * mutex_vector_enter on another CPU.
293	 *
294	 * 1. Any prior mutex_exit by oldlwp must be visible to other
295	 *    CPUs before we set ci_curlwp := newlwp on this one,
296	 *    requiring a store-before-store barrier.
297	 *
298	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
299	 *    before any subsequent mutex_exit by newlwp can even test
300	 *    whether there might be waiters, requiring a
301	 *    store-before-load barrier.
302	 *
303	 * See kern_mutex.c for details -- this is necessary for
304	 * adaptive mutexes to detect whether the lwp is on the CPU in
305	 * order to safely block without requiring atomic r/m/w in
306	 * mutex_exit.
307	 */
308	SYNC_PRODUCER		/* XXX fixup to nop for uniprocessor boot */
309	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(t2)
310	SYNC_DEKKER		/* XXX fixup to nop for uniprocessor boot */
311
312	/* Check for restartable atomic sequences (RAS) */
313	PTR_L	a0, L_PROC(MIPS_CURLWP)		# argument to ras_lookup
314	PTR_L	s5, L_PCB(MIPS_CURLWP)		# XXXuvm_lwp_getuarea
315	PTR_L	v1, P_RASLIST(a0)		# get raslist
316	NOP_L					# load delay
317	beqz	v1, 1f				#   skip call if empty
318	 nop
319	jal	_C_LABEL(ras_lookup)		# ras_lookup(p, pc)
320	 PTR_L	a1, (USPACE - TF_SIZ - CALLFRAME_SIZ + TF_REG_EPC)(s5)
321	PTR_ADDU v1, v0, 1
322	beqz	v1, 1f				# branch if v0 + 1 == 0
323	 nop
324	PTR_S	v0, (USPACE - TF_SIZ - CALLFRAME_SIZ + TF_REG_EPC)(s5)
3251:
326	/* New context is now active */
327	move	v0, s6				# Save return value (old lwp)
328	REG_PROLOGUE
329	REG_L	t0, PCB_CONTEXT+SF_REG_SR(s5)
330#if defined(PARANOIA) && __mips >= 32
331	and	t1, t0, MIPS_SR_INT_IE
332	teqi	t1, 0
333#elif defined(PARANOID_LOOP)
334	and	t1, t0, MIPS_SR_INT_IE
3352:	beqz	t1, 2b				# status reg should not differ
336	 nop
337#endif /* PARANOID_LOOP */
338	DYNAMIC_STATUS_MASK(t0,ra)
339	move	a0, s5
340	REG_L	ra, PCB_CONTEXT+SF_REG_RA(a0)
341	REG_L	s0, PCB_CONTEXT+SF_REG_S0(a0)
342	REG_L	s1, PCB_CONTEXT+SF_REG_S1(a0)
343	REG_L	s2, PCB_CONTEXT+SF_REG_S2(a0)
344	REG_L	s3, PCB_CONTEXT+SF_REG_S3(a0)
345	REG_L	s4, PCB_CONTEXT+SF_REG_S4(a0)
346	REG_L	s5, PCB_CONTEXT+SF_REG_S5(a0)
347	REG_L	s6, PCB_CONTEXT+SF_REG_S6(a0)
348	REG_L	s7, PCB_CONTEXT+SF_REG_S7(a0)
349	#REG_L	t8, PCB_CONTEXT+SF_REG_T8(a0)	# no reason to load MIPS_CURLWP
350#if defined(__mips_n32) || defined(__mips_n64)
351	REG_L	gp, PCB_CONTEXT+SF_REG_GP(a0)
352#endif
353	REG_L	sp, PCB_CONTEXT+SF_REG_SP(a0)
354	REG_L	s8, PCB_CONTEXT+SF_REG_S8(a0)
355	REG_EPILOGUE
356	mtc0	t0, MIPS_COP_0_STATUS
357#if defined(PARANOID_SPL)
358	COP0_SYNC
359	/*
360	 * Verify interrupt configuration still matches IPL_SCHED
361	 */
362	j	_C_LABEL(splcheck)
363	 nop
364#else
365	JR_HB_RA
366#endif /* PARANOID_SPL */
367END(cpu_switchto)
368
369#ifdef __HAVE_FAST_SOFTINTS
370/*
371 * void softint_fast_dispatch(struct lwp *l, int s);
372 *
373 * called at IPL_HIGH
374 *
375 * Arguments:
376 *	a0	the LWP to switch to
377 *	a1	IPL to execute at
378 */
379NESTED(softint_fast_dispatch, CALLFRAME_SIZ, ra)
380	PTR_SUBU sp, CALLFRAME_SIZ
381	REG_S	a0, CALLFRAME_S0(sp)		# save softint lwp
382	REG_S	ra, CALLFRAME_RA(sp)		# save return address
383	.mask	0x80000000, -4
384	PTR_L	t0, L_PCB(MIPS_CURLWP)		# t0 = curlwp->l_addr
385
386	/*
387	 * Save our state in case softint_dispatch blocks and get switched back
388	 * to.
389 	 */
390	mfc0	t1, MIPS_COP_0_STATUS
391#if defined(PARANOIA) && __mips >= 32
392	MFC0_HAZARD
393	and	v0, t1, MIPS_SR_INT_IE		# assert interrupts are on
394	teqi	v0, 0
395#elif defined(PARANOID_LOOP)
396	MFC0_HAZARD
397	and	v0, t1, MIPS_SR_INT_IE		# assert interrupts are on
3981:	beqz	v0, 1b
399	 nop
400#endif /* PARANOID_LOOP */
401	PTR_LA	t2, softint_cleanup		# if softint blocks, return here
402	REG_PROLOGUE
403	REG_S	s0, PCB_CONTEXT+SF_REG_S0(t0)
404	REG_S	s1, PCB_CONTEXT+SF_REG_S1(t0)
405	REG_S	s2, PCB_CONTEXT+SF_REG_S2(t0)
406	REG_S	s3, PCB_CONTEXT+SF_REG_S3(t0)
407	REG_S	s4, PCB_CONTEXT+SF_REG_S4(t0)
408	REG_S	s5, PCB_CONTEXT+SF_REG_S5(t0)
409	REG_S	s6, PCB_CONTEXT+SF_REG_S6(t0)
410	REG_S	s7, PCB_CONTEXT+SF_REG_S7(t0)
411	#REG_S	t8, PCB_CONTEXT+SF_REG_T8(t0)	# no reason to save MIPS_CURLWP
412	REG_S	sp, PCB_CONTEXT+SF_REG_SP(t0)
413	REG_S	s8, PCB_CONTEXT+SF_REG_S8(t0)
414	REG_S	t2, PCB_CONTEXT+SF_REG_RA(t0)
415	REG_S	t1, PCB_CONTEXT+SF_REG_SR(t0)
416#if defined(__mips_n32) || defined(__mips_n64)
417	REG_S	gp, PCB_CONTEXT+SF_REG_GP(t0)
418#endif
419	REG_EPILOGUE
420
421	/*
422	 * Switch to a fast softint thread.  We don't care about its existing
423	 * state and we use a private KSEG0/XKPHYS mapped stack so don't have
424	 * to do TLB manipulation.
425	 */
426	move	s0, MIPS_CURLWP				# remember current lwp
427	move	MIPS_CURLWP, a0				# switch to softint lwp
428	PTR_L	s1, L_CPU(MIPS_CURLWP)			# get curcpu()
429	nop					# patchable load delay slot
430	SYNC_PRODUCER /* XXX fixup */	/* for mutex_enter; see cpu_switchto */
431	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(s1)	#    ...
432	/*
433	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
434	 * enter a softint lwp, it can't be holding any mutexes, so it
435	 * can't release any until after it has acquired them, so we
436	 * need not participate in the protocol with mutex_vector_enter
437	 * barriers here.
438	 */
439	move	s2, sp					# remember sp
440	move	s3, t0					# remember curpcb
441
442	PTR_L	t2, L_PCB(MIPS_CURLWP)
443	move	a0, s0					# wants the pinned lwp
444	jal	_C_LABEL(softint_dispatch)
445	 PTR_ADDU sp, t2, USPACE - TF_SIZ - CALLFRAME_SIZ
446
447	move	sp, s2					# restore stack
448	move	MIPS_CURLWP, s0				# restore curlwp
449	SYNC_PRODUCER /* XXX fixup */	/* for mutex_enter; see cpu_switchto */
450	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(s1)	#    ....
451	SYNC_DEKKER /* XXX fixup */	/* for mutex_enter; see cpu_switchto */
452
453	REG_L	ra, CALLFRAME_RA(sp)		# load early since we use it
454
455	REG_PROLOGUE
456	REG_L	s0, PCB_CONTEXT+SF_REG_S0(s3)		# restore the saved
457	REG_L	s1, PCB_CONTEXT+SF_REG_S1(s3)		#    registers that we
458	REG_L	s2, PCB_CONTEXT+SF_REG_S2(s3)		#    used
459	REG_L	s3, PCB_CONTEXT+SF_REG_S3(s3)
460	REG_EPILOGUE
461
462	/*
463	 * Almost everything (all except sp) is restored so we can return.
464	 */
465	jr	ra
466	 PTR_ADDU sp, CALLFRAME_SIZ
467
468softint_cleanup:
469#ifdef PARANOIA
470	mfc0	t1, MIPS_COP_0_STATUS
471	MFC0_HAZARD
472	and	v0, t1, MIPS_SR_INT_IE
473#if __mips >= 32
474	teqi	v0, 0
475#else
4761:	beqz	v0, 1b
477	 nop
478#endif
479#endif /* PARANOIA */
480	PTR_L	t0, L_CPU(MIPS_CURLWP)
481	NOP_L					# load delay
482	INT_L	t1, CPU_INFO_MTX_COUNT(t0)
483	NOP_L					# load delay
484	INT_ADDU t1, 1
485	INT_S	t1, CPU_INFO_MTX_COUNT(t0)
486	REG_L	ra, CALLFRAME_RA(sp)
487	REG_L	v0, CALLFRAME_S0(sp)		# get softint lwp
488	NOP_L					# load delay
489#if IPL_SCHED != IPL_HIGH
490	j	_C_LABEL(splhigh_noprof)
491#else
492	jr	ra
493#endif
494	 PTR_ADDU sp, CALLFRAME_SIZ
495END(softint_fast_dispatch)
496#endif /* __HAVE_FAST_SOFTINTS */
497
498/*
499 * int lwp_oncpu(lwp_t *);
500 */
501LEAF(lwp_oncpu)
502	PTR_L	t0, L_PCB(MIPS_CURLWP)		# get curpcb
503	li	v0, EFAULT			# assume failure
504	PTR_LA	t1, 1f				# load addr of cleanup
505	PTR_S	t1, PCB_ONFAULT(t0)		# save onfault handler
506	PTR_L	t2, L_CPU(a0)			# grab cpu of supplied lwp
507	NOP_L					# load delay
508	PTR_L	t3, CPU_INFO_CURLWP(t2)		# grab curlwp of that cpu
509	li	v0, ESRCH			# assume the lwp isn't curlwp
510	bne	a0, t3, 1f			# branch if true (not equal)
511	 nop
512	PTR_S	t2, 0(a1)			# return the cpu_info
513	li	v0, 0				# load success
5141:
515	PTR_S	zero, PCB_ONFAULT(t0)		# reset fault handler
516	jr	ra				# and return.
517	 nop
518END(lwp_oncpu)
519
520
521/*
522 * void savectx(struct pcb *)
523 */
524LEAF(savectx)
525	mfc0	v0, MIPS_COP_0_STATUS
526#ifdef PARANOIA
527	MFC0_HAZARD
528	and	t0, v0, MIPS_SR_INT_IE
529#if __mips >= 32
530	teqi	t0, 0
531#else
5321:	beqz	t0, 1b
533	 nop
534#endif
535#endif /* PARANOIA */
536	REG_PROLOGUE
537	REG_S	s0, PCB_CONTEXT+SF_REG_S0(a0)
538	REG_S	s1, PCB_CONTEXT+SF_REG_S1(a0)
539	REG_S	s2, PCB_CONTEXT+SF_REG_S2(a0)
540	REG_S	s3, PCB_CONTEXT+SF_REG_S3(a0)
541	REG_S	s4, PCB_CONTEXT+SF_REG_S4(a0)
542	REG_S	s5, PCB_CONTEXT+SF_REG_S5(a0)
543	REG_S	s6, PCB_CONTEXT+SF_REG_S6(a0)
544	REG_S	s7, PCB_CONTEXT+SF_REG_S7(a0)
545	REG_S	t8, PCB_CONTEXT+SF_REG_T8(a0)	# MIPS_CURLWP
546#if defined(__mips_n32) || defined(__mips_n64)
547	REG_S	gp, PCB_CONTEXT+SF_REG_GP(a0)
548#endif
549	REG_S	sp, PCB_CONTEXT+SF_REG_SP(a0)
550	REG_S	s8, PCB_CONTEXT+SF_REG_S8(a0)
551	REG_S	ra, PCB_CONTEXT+SF_REG_RA(a0)
552	REG_S	v0, PCB_CONTEXT+SF_REG_SR(a0)
553	REG_EPILOGUE
554	jr	ra
555	move	v0, zero
556END(savectx)
557
558#if defined(DDB) || defined(KGDB)
559/*
560 * setjmp(label_t *)
561 * longjmp(label_t *)
562 */
563LEAF(setjmp)
564	mfc0	v0, MIPS_COP_0_STATUS
565	REG_PROLOGUE
566	REG_S	s0, SF_REG_S0(a0)
567	REG_S	s1, SF_REG_S1(a0)
568	REG_S	s2, SF_REG_S2(a0)
569	REG_S	s3, SF_REG_S3(a0)
570	REG_S	s4, SF_REG_S4(a0)
571	REG_S	s5, SF_REG_S5(a0)
572	REG_S	s6, SF_REG_S6(a0)
573	REG_S	s7, SF_REG_S7(a0)
574	#REG_S	t8, SF_REG_T8(a0)		# no reason to save MIPS_CURLWP
575#if defined(__mips_n32) || defined(__mips_n64)
576	REG_S	gp, SF_REG_GP(a0)
577#endif
578	REG_S	sp, SF_REG_SP(a0)
579	REG_S	s8, SF_REG_S8(a0)
580	REG_S	ra, SF_REG_RA(a0)
581	REG_S	v0, SF_REG_SR(a0)
582	REG_EPILOGUE
583	jr	ra
584	move	v0, zero
585END(setjmp)
586
587LEAF(longjmp)
588	REG_PROLOGUE
589	REG_L	v0, SF_REG_SR(a0)
590	DYNAMIC_STATUS_MASK(v0,ra)		# machine dependent masking
591	REG_L	ra, SF_REG_RA(a0)
592	REG_L	s0, SF_REG_S0(a0)
593	REG_L	s1, SF_REG_S1(a0)
594	REG_L	s2, SF_REG_S2(a0)
595	REG_L	s3, SF_REG_S3(a0)
596	REG_L	s4, SF_REG_S4(a0)
597	REG_L	s5, SF_REG_S5(a0)
598	REG_L	s6, SF_REG_S6(a0)
599	REG_L	s7, SF_REG_S7(a0)
600	#REG_L	t8, SF_REG_T8(a0)		# no reason to load MIPS_CURLWP
601#if defined(__mips_n32) || defined(__mips_n64)
602	REG_L	gp, SF_REG_GP(a0)
603#endif
604	REG_L	sp, SF_REG_SP(a0)
605	REG_L	s8, SF_REG_S8(a0)
606	REG_EPILOGUE
607	mtc0	v0, MIPS_COP_0_STATUS
608	COP0_SYNC
609	jr	ra
610	 li	v0, 1
611END(longjmp)
612#endif
613
614/*
615 * uint32_t mips_cp0_cause_read(void)
616 *
617 *	Return the current value of the CP0 Cause register.
618 *
619 *	Note: Not profiled, skews CPU-clock measurement (mips_mcclock.c)
620 *	to uselessness.
621 */
622LEAF_NOPROFILE(mips_cp0_cause_read)
623	mfc0	v0, MIPS_COP_0_CAUSE
624	jr	ra
625	 nop
626END(mips_cp0_cause_read)
627
628/*
629 * void mips_cp0_cause_write(uint32_t)
630 *
631 *	Set the value of the CP0 Cause register.
632 */
633LEAF(mips_cp0_cause_write)
634	mtc0	a0, MIPS_COP_0_CAUSE
635	JR_HB_RA
636END(mips_cp0_cause_write)
637
638
639/*
640 * uint32_t mips_cp0_status_read(void)
641 *
642 *	Return the current value of the CP0 Status register.
643 */
644LEAF(mips_cp0_status_read)
645	mfc0	v0, MIPS_COP_0_STATUS
646	jr	ra
647	 nop
648END(mips_cp0_status_read)
649
650/*
651 * void mips_cp0_status_write(uint32_t)
652 *
653 *	Set the value of the CP0 Status register.
654 *
655 *	Note: This is almost certainly not the way you want to write a
656 *	"permanent" value to to the CP0 Status register, since it gets
657 *	saved in trap frames and restores.
658 */
659LEAF(mips_cp0_status_write)
660	mtc0	a0, MIPS_COP_0_STATUS
661	JR_HB_RA
662END(mips_cp0_status_write)
663
664#if !defined(NOFPU) || defined(FPEMUL)
665/*----------------------------------------------------------------------------
666 *
667 * mips_fpu_intr --
668 * mips_fpu_trap --
669 *
670 *	Handle a floating point interrupt (r3k) or trap (r4k).
671 *	the handlers are identical, only the reporting mechanisms differ.
672 *
673 *	mips_fpu_intr(vaddr_t pc, struct trapframe *tf)
674 *
675 *	mips_fpu_trap(vaddr_t pc, struct trapframe *tf)
676 *
677 * Results:
678 *	None.
679 *
680 * Side effects:
681 *	None.
682 *
683 *----------------------------------------------------------------------------
684 */
685NESTED(mips_fpu_intr, CALLFRAME_SIZ, ra)
686XNESTED(mips_fpu_trap)
687	.mask	0x80000000, -4
688	PTR_SUBU	sp, CALLFRAME_SIZ
689	mfc0		t0, MIPS_COP_0_STATUS
690	REG_S		ra, CALLFRAME_RA(sp)
691	or		t0, t0, MIPS_SR_COP_1_BIT
692	mtc0		t0, MIPS_COP_0_STATUS
693	COP0_HAZARD_FPUENABLE
694
695	REG_PROLOGUE
696	REG_L		a2, TF_REG_CAUSE(a1)
697	REG_EPILOGUE
698
699	cfc1		t0, MIPS_FCSR		# stall til FP done
700	cfc1		t0, MIPS_FCSR		# now get status
701	nop
702	sll		t2, t0, (31 - 17)	# unimplemented operation?
703	bgez		t2, 3f			# no, normal trap
704	 nop
705
706/*
707 * We received an unimplemented operation trap so
708 * fetch the instruction and emulate the instruction.
709 *
710 * We check whether it's an unimplemented FP instruction here rather
711 * than invoking mips_emul_inst(), since it is faster.
712 */
713	srl		v1, a2, 31		# move branch delay bit to LSB
714	sll		v1, 2			# shift it left by 2 (mult by 4)
715	PTR_ADDU	a0, v1			# add to a0
716	INT_L		a0, 0(a0)		# a0 = coproc instruction
717	NOP_L					# load delay
718
719/*
720 * Check to see if the instruction to be emulated is a floating-point
721 * instruction.
722 */
723	srl		t2, a0, MIPS_OPCODE_SHIFT
724	beq		t2, MIPS_OPCODE_C1, 4f
725	 nop
726
727/*
728 * Send an ILL signal to the current LWP if the instruction can't be emulated.
729 */
730	srl		a2, 8
731	sll		a2, 8
732	ori		a2, T_RES_INST << MIPS_CR_EXC_CODE_SHIFT
733	REG_PROLOGUE
734	REG_S		a2, TF_REG_CAUSE(a1)
735	REG_EPILOGUE
736
737	and		t2, t0, ~MIPS_FCSR_CAUSE
738	ctc1		t2, MIPS_FCSR
739
740	move		a1, a0				# code = instruction
741	jal		_C_LABEL(mips_fpuillinst)
742	 move		a0, MIPS_CURLWP			# get current LWP
743
744	b		FPReturn
745	 nop
746
747/*
748 * Send a FPE signal to the current LWP if it tripped the any of
749 * the VZOUI bits.
750 */
7513:
752	REG_PROLOGUE
753	REG_S		a2, TF_REG_CAUSE(a1)
754	REG_EPILOGUE
755
756	and		a0, t0, ~MIPS_FCSR_CAUSE
757	ctc1		a0, MIPS_FCSR
758
759	move		a1, t0			# FPU status
760	jal		_C_LABEL(mips_fpuexcept)
761	 move		a0, MIPS_CURLWP		# get current LWP
762
763	b		FPReturn
764	 nop
765
766/*
767 * Finally, we can call
768 * mips_emul_fp(uint32_t insn, struct trapframe *tf, uint32_t cause).
769 */
7704:
771	jal		_C_LABEL(mips_emul_fp)
772	 nop
773
774/*
775 * Turn off the floating point coprocessor and return.
776 */
777FPReturn:
778	mfc0		t0, MIPS_COP_0_STATUS
779	REG_L		ra, CALLFRAME_RA(sp)
780	and		t0, ~MIPS_SR_COP_1_BIT
781	mtc0		t0, MIPS_COP_0_STATUS
782	COP0_SYNC
783	j		ra
784	 PTR_ADDU	sp, CALLFRAME_SIZ
785END(mips_fpu_intr)
786#endif /* !defined(NOFPU) || defined(FPEMUL) */
787
788LEAF(mips_pagecopy)
789	.set	push
790#if defined(__mips_n32) || defined(_LP64)
791	.set	mips3
792#endif
793	li		a2, PAGE_SIZE / (8 * SZREG)
794
7951:	REG_L		t0,  (0*SZREG)(a1)
796	REG_L		ta0, (4*SZREG)(a1)
797	PTR_SUBU	a2, 1
798	REG_L		t1,  (1*SZREG)(a1)
799	REG_L		t2,  (2*SZREG)(a1)
800	REG_L		t3,  (3*SZREG)(a1)
801	REG_L		ta1, (5*SZREG)(a1)
802	REG_L		ta2, (6*SZREG)(a1)
803	REG_L		ta3, (7*SZREG)(a1)
804
805	REG_S		t0,  (0*SZREG)(a0)
806	REG_S		ta0, (4*SZREG)(a0)
807	PTR_ADDU	a1, 8*SZREG
808	REG_S		t1,  (1*SZREG)(a0)
809	REG_S		t2,  (2*SZREG)(a0)
810	REG_S		t3,  (3*SZREG)(a0)
811	REG_S		ta1, (5*SZREG)(a0)
812	REG_S		ta2, (6*SZREG)(a0)
813	REG_S		ta3, (7*SZREG)(a0)
814	bgtz		a2, 1b
815	PTR_ADDU	a0, 8*SZREG
816	.set	pop
817	jr	ra
818	nop
819END(mips_pagecopy)
820
821LEAF(mips_pagezero)
822/* We can always safely store a 64-bit zero on MIPS3,4,64 */
823	.set	push
824#if (MIPS1 + MIPS32 + MIPS32R2) == 0
825	.set	mips3
826#endif
827	li		a1, PAGE_SIZE / (8*SZREG)
828
8291:	REG_S		zero, (0*SZREG)(a0)	# try to miss cache first
830	REG_S		zero, (4*SZREG)(a0)
831	subu		a1, 1
832	REG_S		zero, (1*SZREG)(a0)	# fill in cache lines
833	REG_S		zero, (2*SZREG)(a0)
834	REG_S		zero, (3*SZREG)(a0)
835	REG_S		zero, (5*SZREG)(a0)
836	REG_S		zero, (6*SZREG)(a0)
837	REG_S		zero, (7*SZREG)(a0)
838	bgtz		a1,1b
839	PTR_ADDU	a0, 8*SZREG
840	.set	pop
841	jr	ra
842	nop
843END(mips_pagezero)
844
845
846#ifndef DDB_TRACE
847
848#if defined(DEBUG) || defined(DDB) || defined(KGDB) || defined(geo)
849/*
850 * Stacktrace support hooks which use type punnign to access
851 * the caller's registers.
852 */
853
854
855/*
856 * stacktrace() -- print a stack backtrace to the console.
857 *	implicitly accesses caller's a0-a3.
858 */
859#if defined(__mips_o32) || defined(__mips_o64)
860#define	XCALLFRAME_SIZ		(CALLFRAME_SIZ + 6*SZREG)
861#define	XCALLFRAME_RA		(CALLFRAME_RA  + 4*SZREG)
862#endif
863#if defined(__mips_n32) || defined(__mips_n64)
864#define	XCALLFRAME_SIZ		(CALLFRAME_SIZ + 2*SZREG)
865#define	XCALLFRAME_RA		(CALLFRAME_RA  + 2*SZREG)
866#endif
867NESTED(stacktrace, XCALLFRAME_SIZ, ra)
868XNESTED(logstacktrace)
869	PTR_SUBU sp, XCALLFRAME_SIZ		# four arg-passing slots
870	move	t0, ra				# save caller's PC
871	PTR_ADDU t1, sp, XCALLFRAME_SIZ		# save caller's SP
872	move	t2, s8				# non-virtual frame pointer
873
874	PTR_LA	v0, _C_LABEL(printf)
875
876	REG_S	ra, XCALLFRAME_RA(sp)		# save return address
877#if defined(__mips_o32) || defined(__mips_o64)
878	/* a0-a3 are still caller's a0-a3, pass in-place as given. */
879	REG_S	t0, 4*SZREG(sp)			# push caller's PC
880	REG_S	t1, 5*SZREG(sp)			# push caller's SP
881	REG_S	t2, 6*SZREG(sp)			# push caller's FP, in case
882	REG_S	zero, 7*SZREG(sp)		# caller's RA on stack
883	/* this uses the slot used for saving s0 in the callframe */
884	jal	_C_LABEL(stacktrace_subr)
885	 REG_S	v0, 8*SZREG(sp)			# push printf
886#endif
887#if defined(__mips_n32) || defined(__mips_n64)
888	move	a4, t0				# pass caller's PC
889	move	a5, t1				# pass caller's SP
890	move	a6, t2				# pass caller's FP, in case
891	move	a7, zero			# caller's RA on stack
892	/* this uses the slot used for saving s0 in the callframe */
893	jal	_C_LABEL(stacktrace_subr)
894	 REG_S	v0, 0(sp)			# push printf
895#endif
896
897	REG_L	ra, XCALLFRAME_RA(sp)
898	PTR_ADDU sp, XCALLFRAME_SIZ
899	jr	ra
900	nop
901#undef XCALLFRAME_RA
902#undef XCALLFRAME_SIZ
903END(stacktrace)
904#endif	/* DEBUG || DDB */
905#endif	/* DDB_TRACE */
906
907	.section .stub, "ax"
908NESTED_NOPROFILE(tlb_update_addr, 0, ra)
909	lui	v0,%hi(_C_LABEL(mips_locore_jumpvec)+LJV_TLB_UPDATE_ADDR)
910	PTR_L	t9,%lo(_C_LABEL(mips_locore_jumpvec)+LJV_TLB_UPDATE_ADDR)(v0)
911	jr	t9
912	 nop
913END(tlb_update_addr)
914
915	.sdata
916	.globl	_C_LABEL(esym)
917_C_LABEL(esym):
918	.word 0
919
920#ifdef MIPS_DYNAMIC_STATUS_MASK
921	.globl	_C_LABEL(mips_dynamic_status_mask)
922_C_LABEL(mips_dynamic_status_mask):
923	.word	0xffffffff
924#endif
925