xref: /openbsd-src/sys/arch/alpha/alpha/locore.s (revision f9fed175ef459df9c546cb2689e7978147406130)
1/* $OpenBSD: locore.s,v 1.56 2024/10/10 19:33:05 miod Exp $ */
2/* $NetBSD: locore.s,v 1.94 2001/04/26 03:10:44 ross Exp $ */
3
4/*-
5 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Author: Chris G. Demetriou
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61.file	1 __FILE__
62
63#include <machine/asm.h>
64
65#include "assym.h"
66
67#if defined(MULTIPROCESSOR)
68
69/*
70 * Get various per-cpu values.  A pointer to our cpu_info structure
71 * is stored in SysValue.  These macros clobber v0, t0, t8..t11.
72 *
73 * All return values are in v0.
74 */
75#define	GET_CPUINFO		call_pal PAL_OSF1_rdval
76
77#else	/* if not MULTIPROCESSOR... */
78
79IMPORT(cpu_info_primary, CPU_INFO_SIZEOF)
80
81#define	GET_CPUINFO		lda v0, cpu_info_primary
82
83#endif
84
85/*
86 * Perform actions necessary to switch to a new context.  The
87 * hwpcb should be in a0.  Clobbers v0, t0, t8..t11, a0.
88 */
89#define	SWITCH_CONTEXT							\
90	/* Make a note of the context we're running on. */		\
91	GET_CPUINFO						;	\
92	stq	a0, CPU_INFO_CURPCB(v0)				;	\
93									\
94	/* Swap in the new context. */					\
95	call_pal PAL_OSF1_swpctx
96
97
98	/* don't reorder instructions; paranoia. */
99	.set noreorder
100	.text
101
102	.macro	bfalse	reg, dst
103	beq	\reg, \dst
104	.endm
105
106	.macro	btrue	reg, dst
107	bne	\reg, \dst
108	.endm
109
110/**************************************************************************/
111
112/*
113 * Pull in the PROM interface routines; these are needed for
114 * prom printf (while bootstrapping), and for determining the
115 * boot device, etc.
116 */
117#include <alpha/alpha/prom_disp.s>
118
119/**************************************************************************/
120
121/*
122 * Pull in the PALcode function stubs.
123 */
124#include <alpha/alpha/pal.s>
125
126/**************************************************************************/
127
128/**************************************************************************/
129
130#if defined(MULTIPROCESSOR)
131/*
132 * Pull in the multiprocessor glue.
133 */
134#include <alpha/alpha/multiproc.s>
135#endif /* MULTIPROCESSOR */
136
137/**************************************************************************/
138
139/**************************************************************************/
140
141#if defined(DDB)
142/*
143 * Pull in debugger glue.
144 */
145#include <alpha/alpha/debug.s>
146#endif /* DDB */
147
148/**************************************************************************/
149
150/**************************************************************************/
151
152	.text
153.loc	1 __LINE__
154backtolocore1:
155/**************************************************************************/
156
157/*
158 * Signal "trampoline" code. Invoked from RTE setup by sendsig().
159 *
160 * On entry, stack & registers look like:
161 *
162 *      a0	signal number
163 *      a1	signal specific code
164 *      a2	pointer to signal context frame (scp)
165 *      a3	address of handler
166 *      sp+0	saved hardware state
167 *                      .
168 *                      .
169 *      scp+0	beginning of signal context frame
170 */
171
172	.section .rodata
173NESTED(sigcode,0,0,ra,0,0)
174	lda	sp, -16(sp)		/* save the sigcontext pointer */
175	stq	a2, 0(sp)
176	jsr	ra, (t12)		/* call the signal handler (t12==pv) */
177	ldq	a0, 0(sp)		/* get the sigcontext pointer */
178	lda	sp, 16(sp)
179	ldiq	v0, SYS_sigreturn	/* and call sigreturn() with it. */
180	.globl	sigcodecall
181sigcodecall:
182	call_pal PAL_OSF1_callsys
183	.globl  sigcoderet
184sigcoderet:
185	mov	v0, a0			/* if that failed, get error code */
186	ldiq	v0, SYS_exit		/* and call exit() with it. */
187	call_pal PAL_OSF1_callsys
188XNESTED(esigcode,0)
189	END(sigcode)
190
191	.globl	sigfill
192sigfill:
193	halt
194esigfill:
195
196	.globl	sigfillsiz
197sigfillsiz:
198	.quad	esigfill - sigfill
199
200	.text
201
202/**************************************************************************/
203
204/*
205 * exception_return: return from trap, exception, or syscall
206 */
207
208BSS(ssir, 8)
209
210LEAF(exception_return, 1)			/* XXX should be NESTED */
211	br	pv, 1f
2121:	LDGP(pv)
213
214	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
215	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
216	bne	t0, 4f				/* != 0: can't do AST or SIR */
217
218	/* see if we can do an SIR */
2192:	ldq	t1, ssir			/* SIR pending? */
220	bne	t1, 5f				/* yes */
221	/* no */
222
223	/* check for AST */
2243:	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
225	beq	t0, 4f				/* no: just return */
226	/* yes */
227
228	/* GET_CPUINFO clobbers v0, t0, t8...t11. */
229	GET_CPUINFO
230	ldq	t1, CPU_INFO_CURPROC(v0)
231	ldl	t2, P_MD_ASTPENDING(t1)		/* AST pending? */
232	bne	t2, 6f				/* yes */
233	/* no: return & deal with FP */
234
235	/*
236	 * We are going back to usermode.  Enable the FPU based on whether
237	 * the current proc is fpcurproc.  v0 already contains the cpu_info
238	 * pointer from above.
239	 */
240	ldq	t2, CPU_INFO_FPCURPROC(v0)
241	cmpeq	t1, t2, t1
242	mov	zero, a0
243	cmovne	t1, 1, a0
244	call_pal PAL_OSF1_wrfen
245
246	/* restore the registers, and return */
2474:	bsr	ra, exception_restore_regs	/* jmp/CALL trashes pv/t12 */
248	ldq	ra,(FRAME_RA*8)(sp)
249	.set noat
250	ldq	at_reg,(FRAME_AT*8)(sp)
251
252	lda	sp,(FRAME_SW_SIZE*8)(sp)
253	call_pal PAL_OSF1_rti
254	.set at
255	/* NOTREACHED */
256
257	/* We've got a SIR */
2585:	ldiq	a0, ALPHA_PSL_IPL_SOFT
259	call_pal PAL_OSF1_swpipl
260	mov	v0, s2				/* remember old IPL */
261	CALL(softintr_dispatch)
262
263	/* SIR handled; restore IPL and check again */
264	mov	s2, a0
265	call_pal PAL_OSF1_swpipl
266	br	2b
267
268	/* We've got an AST */
2696:	ldiq	a0, ALPHA_PSL_IPL_0		/* drop IPL to zero */
270	call_pal PAL_OSF1_swpipl
271	mov	v0, s2				/* remember old IPL */
272
273	mov	sp, a0				/* only arg is frame */
274	CALL(ast)
275
276	/* AST handled; restore IPL and check again */
277	mov	s2, a0
278	call_pal PAL_OSF1_swpipl
279	br	3b
280
281	END(exception_return)
282
283LEAF(exception_save_regs, 0)
284	stq	v0,(FRAME_V0*8)(sp)
285	stq	a3,(FRAME_A3*8)(sp)
286	stq	a4,(FRAME_A4*8)(sp)
287	stq	a5,(FRAME_A5*8)(sp)
288	stq	s0,(FRAME_S0*8)(sp)
289	stq	s1,(FRAME_S1*8)(sp)
290	stq	s2,(FRAME_S2*8)(sp)
291	stq	s3,(FRAME_S3*8)(sp)
292	stq	s4,(FRAME_S4*8)(sp)
293	stq	s5,(FRAME_S5*8)(sp)
294	stq	s6,(FRAME_S6*8)(sp)
295	stq	t0,(FRAME_T0*8)(sp)
296	stq	t1,(FRAME_T1*8)(sp)
297	stq	t2,(FRAME_T2*8)(sp)
298	stq	t3,(FRAME_T3*8)(sp)
299	stq	t4,(FRAME_T4*8)(sp)
300	stq	t5,(FRAME_T5*8)(sp)
301	stq	t6,(FRAME_T6*8)(sp)
302	stq	t7,(FRAME_T7*8)(sp)
303	stq	t8,(FRAME_T8*8)(sp)
304	stq	t9,(FRAME_T9*8)(sp)
305	stq	t10,(FRAME_T10*8)(sp)
306	stq	t11,(FRAME_T11*8)(sp)
307	stq	t12,(FRAME_T12*8)(sp)
308	RET
309	END(exception_save_regs)
310
311LEAF(exception_restore_regs, 0)
312	ldq	v0,(FRAME_V0*8)(sp)
313	ldq	a3,(FRAME_A3*8)(sp)
314	ldq	a4,(FRAME_A4*8)(sp)
315	ldq	a5,(FRAME_A5*8)(sp)
316	ldq	s0,(FRAME_S0*8)(sp)
317	ldq	s1,(FRAME_S1*8)(sp)
318	ldq	s2,(FRAME_S2*8)(sp)
319	ldq	s3,(FRAME_S3*8)(sp)
320	ldq	s4,(FRAME_S4*8)(sp)
321	ldq	s5,(FRAME_S5*8)(sp)
322	ldq	s6,(FRAME_S6*8)(sp)
323	ldq	t0,(FRAME_T0*8)(sp)
324	ldq	t1,(FRAME_T1*8)(sp)
325	ldq	t2,(FRAME_T2*8)(sp)
326	ldq	t3,(FRAME_T3*8)(sp)
327	ldq	t4,(FRAME_T4*8)(sp)
328	ldq	t5,(FRAME_T5*8)(sp)
329	ldq	t6,(FRAME_T6*8)(sp)
330	ldq	t7,(FRAME_T7*8)(sp)
331	ldq	t8,(FRAME_T8*8)(sp)
332	ldq	t9,(FRAME_T9*8)(sp)
333	ldq	t10,(FRAME_T10*8)(sp)
334	ldq	t11,(FRAME_T11*8)(sp)
335	ldq	t12,(FRAME_T12*8)(sp)
336	RET
337	END(exception_restore_regs)
338
339/**************************************************************************/
340
341/*
342 * XentArith:
343 * System arithmetic trap entry point.
344 */
345
346	PALVECT(XentArith)		/* setup frame, save registers */
347
348	/* a0, a1, & a2 already set up */
349	ldiq	a3, ALPHA_KENTRY_ARITH
350	mov	sp, a4			; .loc 1 __LINE__
351	CALL(trap)
352
353	jmp	zero, exception_return
354	END(XentArith)
355
356/**************************************************************************/
357
358/*
359 * XentIF:
360 * System instruction fault trap entry point.
361 */
362
363	PALVECT(XentIF)			/* setup frame, save registers */
364
365	/* a0, a1, & a2 already set up */
366	ldiq	a3, ALPHA_KENTRY_IF
367	mov	sp, a4			; .loc 1 __LINE__
368	CALL(trap)
369	jmp	zero, exception_return
370	END(XentIF)
371
372/**************************************************************************/
373
374/*
375 * XentInt:
376 * System interrupt entry point.
377 */
378
379	PALVECT(XentInt)		/* setup frame, save registers */
380
381	/* a0, a1, & a2 already set up */
382	mov	sp, a3			; .loc 1 __LINE__
383	CALL(interrupt)
384	jmp	zero, exception_return
385	END(XentInt)
386
387/**************************************************************************/
388
389/*
390 * XentMM:
391 * System memory management fault entry point.
392 */
393
394	PALVECT(XentMM)			/* setup frame, save registers */
395
396	/* a0, a1, & a2 already set up */
397	ldiq	a3, ALPHA_KENTRY_MM
398	mov	sp, a4			; .loc 1 __LINE__
399	CALL(trap)
400
401	jmp	zero, exception_return
402	END(XentMM)
403
404/**************************************************************************/
405
406/*
407 * XentSys:
408 * System call entry point.
409 */
410
411	ESETUP(XentSys)			; .loc 1 __LINE__
412
413	stq	v0,(FRAME_V0*8)(sp)		/* in case we need to restart */
414	stq	s0,(FRAME_S0*8)(sp)
415	stq	s1,(FRAME_S1*8)(sp)
416	stq	s2,(FRAME_S2*8)(sp)
417	stq	s3,(FRAME_S3*8)(sp)
418	stq	s4,(FRAME_S4*8)(sp)
419	stq	s5,(FRAME_S5*8)(sp)
420	stq	s6,(FRAME_S6*8)(sp)
421	stq	a0,(FRAME_A0*8)(sp)
422	stq	a1,(FRAME_A1*8)(sp)
423	stq	a2,(FRAME_A2*8)(sp)
424	stq	a3,(FRAME_A3*8)(sp)
425	stq	a4,(FRAME_A4*8)(sp)
426	stq	a5,(FRAME_A5*8)(sp)
427	stq	ra,(FRAME_RA*8)(sp)
428
429	/* syscall number, passed in v0, is first arg, frame pointer second */
430	mov	v0,a0
431	mov	sp,a1			; .loc 1 __LINE__
432	CALL(syscall)
433
434	jmp	zero, exception_return
435	END(XentSys)
436
437/**************************************************************************/
438
439/*
440 * XentUna:
441 * System unaligned access entry point.
442 */
443
444LEAF(XentUna, 3)				/* XXX should be NESTED */
445	.set noat
446	lda	sp,-(FRAME_SW_SIZE*8)(sp)
447	stq	at_reg,(FRAME_AT*8)(sp)
448	.set at
449	stq	ra,(FRAME_RA*8)(sp)
450	bsr	ra, exception_save_regs		/* jmp/CALL trashes pv/t12 */
451
452	/* a0, a1, & a2 already set up */
453	ldiq	a3, ALPHA_KENTRY_UNA
454	mov	sp, a4			; .loc 1 __LINE__
455	CALL(trap)
456
457	jmp	zero, exception_return
458	END(XentUna)
459
460/**************************************************************************/
461
462/*
463 * savefpstate: Save a process's floating point state.
464 *
465 * Arguments:
466 *	a0	'struct fpstate *' to save into
467 */
468
469LEAF(savefpstate, 1)
470	LDGP(pv)
471	/* save all of the FP registers */
472	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
473	stt	$f0,   (0 * 8)(t1)	/* save first register, using hw name */
474	stt	$f1,   (1 * 8)(t1)	/* etc. */
475	stt	$f2,   (2 * 8)(t1)
476	stt	$f3,   (3 * 8)(t1)
477	stt	$f4,   (4 * 8)(t1)
478	stt	$f5,   (5 * 8)(t1)
479	stt	$f6,   (6 * 8)(t1)
480	stt	$f7,   (7 * 8)(t1)
481	stt	$f8,   (8 * 8)(t1)
482	stt	$f9,   (9 * 8)(t1)
483	stt	$f10, (10 * 8)(t1)
484	stt	$f11, (11 * 8)(t1)
485	stt	$f12, (12 * 8)(t1)
486	stt	$f13, (13 * 8)(t1)
487	stt	$f14, (14 * 8)(t1)
488	stt	$f15, (15 * 8)(t1)
489	stt	$f16, (16 * 8)(t1)
490	stt	$f17, (17 * 8)(t1)
491	stt	$f18, (18 * 8)(t1)
492	stt	$f19, (19 * 8)(t1)
493	stt	$f20, (20 * 8)(t1)
494	stt	$f21, (21 * 8)(t1)
495	stt	$f22, (22 * 8)(t1)
496	stt	$f23, (23 * 8)(t1)
497	stt	$f24, (24 * 8)(t1)
498	stt	$f25, (25 * 8)(t1)
499	stt	$f26, (26 * 8)(t1)
500	stt	$f27, (27 * 8)(t1)
501	stt	$f28, (28 * 8)(t1)
502	stt	$f29, (29 * 8)(t1)
503	stt	$f30, (30 * 8)(t1)
504
505	/*
506	 * Then save the FPCR; note that the necessary 'trapb's are taken
507	 * care of on kernel entry and exit.
508	 */
509	mf_fpcr	ft0
510	stt	ft0, FPREG_FPR_CR(a0)	/* store to FPCR save area */
511
512	RET
513	END(savefpstate)
514
515/**************************************************************************/
516
517/*
518 * restorefpstate: Restore a process's floating point state.
519 *
520 * Arguments:
521 *	a0	'struct fpstate *' to restore from
522 */
523
524LEAF(restorefpstate, 1)
525	LDGP(pv)
526	/*
527	 * Restore the FPCR; note that the necessary 'trapb's are taken care of
528	 * on kernel entry and exit.
529	 */
530	ldt	ft0, FPREG_FPR_CR(a0)	/* load from FPCR save area */
531	mt_fpcr	ft0
532
533	/* Restore all of the FP registers. */
534	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
535	ldt	$f0,   (0 * 8)(t1)	/* restore first reg., using hw name */
536	ldt	$f1,   (1 * 8)(t1)	/* etc. */
537	ldt	$f2,   (2 * 8)(t1)
538	ldt	$f3,   (3 * 8)(t1)
539	ldt	$f4,   (4 * 8)(t1)
540	ldt	$f5,   (5 * 8)(t1)
541	ldt	$f6,   (6 * 8)(t1)
542	ldt	$f7,   (7 * 8)(t1)
543	ldt	$f8,   (8 * 8)(t1)
544	ldt	$f9,   (9 * 8)(t1)
545	ldt	$f10, (10 * 8)(t1)
546	ldt	$f11, (11 * 8)(t1)
547	ldt	$f12, (12 * 8)(t1)
548	ldt	$f13, (13 * 8)(t1)
549	ldt	$f14, (14 * 8)(t1)
550	ldt	$f15, (15 * 8)(t1)
551	ldt	$f16, (16 * 8)(t1)
552	ldt	$f17, (17 * 8)(t1)
553	ldt	$f18, (18 * 8)(t1)
554	ldt	$f19, (19 * 8)(t1)
555	ldt	$f20, (20 * 8)(t1)
556	ldt	$f21, (21 * 8)(t1)
557	ldt	$f22, (22 * 8)(t1)
558	ldt	$f23, (23 * 8)(t1)
559	ldt	$f24, (24 * 8)(t1)
560	ldt	$f25, (25 * 8)(t1)
561	ldt	$f26, (26 * 8)(t1)
562	ldt	$f27, (27 * 8)(t1)
563	.set noat
564	ldt	$f28, (28 * 8)(t1)
565	.set at
566	ldt	$f29, (29 * 8)(t1)
567	ldt	$f30, (30 * 8)(t1)
568
569	RET
570	END(restorefpstate)
571
572/**************************************************************************/
573
574/*
575 * savectx: save process context, i.e. callee-saved registers
576 *
577 * Note that savectx() only works for processes other than curproc,
578 * since cpu_switch will copy over the info saved here.  (It _can_
579 * sanely be used for curproc iff cpu_switch won't be called again, e.g.
580 * if called from boot().)
581 *
582 * Arguments:
583 *	a0	'struct user *' of the process that needs its context saved
584 *
585 * Return:
586 *	v0	0.  (note that for child processes, it seems
587 *		like savectx() returns 1, because the return address
588 *		in the PCB is set to the return address from savectx().)
589 */
590
591LEAF(savectx, 1)
592	br	pv, 1f
5931:	LDGP(pv)
594	stq	sp, U_PCB_HWPCB_KSP(a0)		/* store sp */
595	stq	s0, U_PCB_CONTEXT+(0 * 8)(a0)	/* store s0 - s6 */
596	stq	s1, U_PCB_CONTEXT+(1 * 8)(a0)
597	stq	s2, U_PCB_CONTEXT+(2 * 8)(a0)
598	stq	s3, U_PCB_CONTEXT+(3 * 8)(a0)
599	stq	s4, U_PCB_CONTEXT+(4 * 8)(a0)
600	stq	s5, U_PCB_CONTEXT+(5 * 8)(a0)
601	stq	s6, U_PCB_CONTEXT+(6 * 8)(a0)
602	stq	ra, U_PCB_CONTEXT+(7 * 8)(a0)	/* store ra */
603	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
604	stq	v0, U_PCB_CONTEXT+(8 * 8)(a0)	/* store ps, for ipl */
605
606	mov	zero, v0
607	RET
608	END(savectx)
609
610/**************************************************************************/
611
612/*
613 * cpu_switchto(struct proc *old, struct proc *new)
614 * Switch from "old" proc to "new".
615 */
616LEAF(cpu_switchto, 2)
617	LDGP(pv)
618
619	/*
620	 * Don't bother saving the old context if oldproc is NULL.
621	 */
622	beq	a0, 1f
623
624	/*
625	 * do an inline savectx(), to save old context
626	 */
627	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
628	ldq	t0, P_ADDR(a0)
629	/* NOTE: ksp is stored by the swpctx */
630	stq	s0, U_PCB_CONTEXT+(0 * 8)(t0)	/* store s0 - s6 */
631	stq	s1, U_PCB_CONTEXT+(1 * 8)(t0)
632	stq	s2, U_PCB_CONTEXT+(2 * 8)(t0)
633	stq	s3, U_PCB_CONTEXT+(3 * 8)(t0)
634	stq	s4, U_PCB_CONTEXT+(4 * 8)(t0)
635	stq	s5, U_PCB_CONTEXT+(5 * 8)(t0)
636	stq	s6, U_PCB_CONTEXT+(6 * 8)(t0)
637	stq	ra, U_PCB_CONTEXT+(7 * 8)(t0)	/* store ra */
638	stq	v0, U_PCB_CONTEXT+(8 * 8)(t0)	/* store ps, for ipl */
639
6401:
641	mov	a0, s0				/* save old proc */
642	mov	a1, s2				/* save new proc */
643	ldq	s3, P_MD_PCBPADDR(s2)		/* save new pcbpaddr */
644
645	/*
646	 * Deactivate the old address space before activating the
647	 * new one.  We need to do this before activating the
648	 * new process's address space in the event that new
649	 * process is using the same vmspace as the old.  If we
650	 * do this after we activate, then we might end up
651	 * incorrectly marking the pmap inactive!
652	 *
653	 * We don't deactivate if we came here from sched_exit
654	 * (old pmap no longer exists; vmspace has been freed).
655	 * oldproc will be NULL in this case.  We have actually
656	 * taken care of calling pmap_deactivate() in cpu_exit(),
657	 * before the vmspace went away.
658	 */
659	beq	s0, 2f
660
661	mov	s0, a0				/* pmap_deactivate(oldproc) */
662	CALL(pmap_deactivate)
663
6642:	/*
665	 * Activate the new process's address space and perform
666	 * the actual context swap.
667	 */
668
669	mov	s2, a0				/* pmap_activate(p) */
670	CALL(pmap_activate)
671
672	mov	s3, a0				/* swap the context */
673	SWITCH_CONTEXT
674
675	/*
676	 * Now that the switch is done, update curproc and other
677	 * globals.  We must do this even if switching to ourselves
678	 * because we might have re-entered cpu_switch() from idle(),
679	 * in which case curproc would be NULL.
680	 *
681	 * Note: GET_CPUINFO clobbers v0, t0, t8...t11.
682	 */
683EXPORT(__bwx_switch0)
684	addq	s2, P_STAT, t3			/* p->p_stat = SONPROC */
685	ldq_u	t1, 0(t3)
686	ldiq	t0, SONPROC
687	insbl	t0, t3, t0
688	mskbl	t1, t3, t1
689	or	t0, t1, t0
690	stq_u	t0, 0(t3)
691EXPORT(__bwx_switch1)
692
693	GET_CPUINFO
694	/* p->p_cpu initialized in fork1() for single-processor */
695#if defined(MULTIPROCESSOR)
696	stq	v0, P_CPU(s2)			/* p->p_cpu = curcpu() */
697#endif
698	stq	s2, CPU_INFO_CURPROC(v0)	/* curproc = p */
699
700	/*
701	 * Now running on the new u struct.
702	 * Restore registers and return.
703	 */
704	ldq	t0, P_ADDR(s2)
705
706	/* NOTE: ksp is restored by the swpctx */
707	ldq	s0, U_PCB_CONTEXT+(0 * 8)(t0)		/* restore s0 - s6 */
708	ldq	s1, U_PCB_CONTEXT+(1 * 8)(t0)
709	ldq	s2, U_PCB_CONTEXT+(2 * 8)(t0)
710	ldq	s3, U_PCB_CONTEXT+(3 * 8)(t0)
711	ldq	s4, U_PCB_CONTEXT+(4 * 8)(t0)
712	ldq	s5, U_PCB_CONTEXT+(5 * 8)(t0)
713	ldq	s6, U_PCB_CONTEXT+(6 * 8)(t0)
714	ldq	ra, U_PCB_CONTEXT+(7 * 8)(t0)		/* restore ra */
715	ldq	a0, U_PCB_CONTEXT+(8 * 8)(t0)		/* restore ipl */
716	and	a0, ALPHA_PSL_IPL_MASK, a0
717	call_pal PAL_OSF1_swpipl
718
719	ldiq	v0, 1				/* possible ret to savectx() */
720	RET
721	END(cpu_switchto)
722
723#ifndef SMALL_KERNEL
724	/*
725	 * BWX-enhanced version of the p->p_stat assignment, to be copied
726	 * over the __bwx_switch0 area.
727
728	 * Do not put anything between the end of cpu_switch and this!
729	 */
730EXPORT(__bwx_switch2)
731	ldiq	t0, SONPROC			/* p->p_stat = SONPROC */
732	stb	t0, P_STAT(s2)
733EXPORT(__bwx_switch3)
734#endif
735
736LEAF(cpu_idle_enter, 0)
737	RET
738	END(cpu_idle_enter)
739
740LEAF(cpu_idle_cycle, 0)
741	RET
742	END(cpu_idle_cycle)
743
744LEAF(cpu_idle_leave, 0)
745	RET
746	END(cpu_idle_leave)
747
748/*
749 * proc_trampoline()
750 *
751 * Arrange for a function to be invoked neatly, after a cpu_fork().
752 *
753 * Invokes the function specified by the s0 register with the return
754 * address specified by the s1 register and with one argument specified
755 * by the s2 register.
756 */
757LEAF(proc_trampoline, 0)
758	CALL(proc_trampoline_mi)
759	mov	s0, pv
760	mov	s1, ra
761	mov	s2, a0
762	jmp	zero, (pv)
763	END(proc_trampoline)
764
765/**************************************************************************/
766
767/*
768 * Copy a null-terminated string within the kernel's address space.
769 * If lenp is not NULL, store the number of chars copied in *lenp
770 *
771 * int copystr(char *from, char *to, size_t len, size_t *lenp);
772 */
773STATIC_LEAF(copystr, 4)
774	LDGP(pv)
775
776	mov	a2, t0			/* t0 = i = len */
777	bne	a2, 1f			/* if (len != 0), proceed */
778	ldiq	t1, 1			/* else bail */
779	br	zero, 2f
780
7811:	ldq_u	t1, 0(a0)		/* t1 = *from */
782	extbl	t1, a0, t1
783	ldq_u	t3, 0(a1)		/* set up t2 with quad around *to */
784	insbl	t1, a1, t2
785	mskbl	t3, a1, t3
786	or	t3, t2, t3		/* add *from to quad around *to */
787	stq_u	t3, 0(a1)		/* write out that quad */
788
789	subl	a2, 1, a2		/* len-- */
790	beq	t1, 2f			/* if (*from == 0), bail out */
791	addq	a1, 1, a1		/* to++ */
792	addq	a0, 1, a0		/* from++ */
793	bne	a2, 1b			/* if (len != 0) copy more */
794
7952:	beq	a3, 3f			/* if (lenp != NULL) */
796	subl	t0, a2, t0		/* *lenp = (i - len) */
797	stq	t0, 0(a3)
7983:	beq	t1, 4f			/* *from == '\0'; leave quietly */
799
800	ldiq	v0, ENAMETOOLONG	/* *from != '\0'; error. */
801	RET
802
8034:	mov	zero, v0		/* return 0. */
804	RET
805	END(copystr)
806
807NESTED(_copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
808	LDGP(pv)
809	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
810	cmpult	a0, t0, t1			/* is in user space.	     */
811	beq	t1, copyfault			/* if it's not, error out.   */
812	lda	sp, -16(sp)			/* set up stack frame	     */
813	stq	ra, (16-8)(sp)			/* save ra		     */
814	stq	s0, (16-16)(sp)			/* save s0		     */
815	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
816	GET_CPUINFO
817	ldq	t0, CPU_INFO_CURPROC(v0)
818	ldq	s0, P_ADDR(t0)
819	lda	v0, copyerr			/* set up fault handler.     */
820	stq	v0, U_PCB_ONFAULT(s0)
821	CALL(copystr)				/* do the copy.		     */
822	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
823	ldq	ra, (16-8)(sp)			/* restore ra.		     */
824	ldq	s0, (16-16)(sp)			/* restore s0.		     */
825	lda	sp, 16(sp)			/* kill stack frame.	     */
826	RET					/* v0 left over from copystr */
827	END(_copyinstr)
828
829NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
830	LDGP(pv)
831	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
832	cmpult	a1, t0, t1			/* is in user space.	     */
833	beq	t1, copyfault			/* if it's not, error out.   */
834	lda	sp, -16(sp)			/* set up stack frame	     */
835	stq	ra, (16-8)(sp)			/* save ra		     */
836	stq	s0, (16-16)(sp)			/* save s0		     */
837	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
838	GET_CPUINFO
839	ldq	t0, CPU_INFO_CURPROC(v0)
840	ldq	s0, P_ADDR(t0)
841	lda	v0, copyerr			/* set up fault handler.     */
842	stq	v0, U_PCB_ONFAULT(s0)
843	CALL(copystr)				/* do the copy.		     */
844	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
845	ldq	ra, (16-8)(sp)			/* restore ra.		     */
846	ldq	s0, (16-16)(sp)			/* restore s0.		     */
847	lda	sp, 16(sp)			/* kill stack frame.	     */
848	RET					/* v0 left over from copystr */
849	END(copyoutstr)
850
851/*
852 * kcopy(const void *src, void *dst, size_t len);
853 *
854 * Copy len bytes from src to dst, aborting if we encounter a fatal
855 * page fault.
856 *
857 * kcopy() _must_ save and restore the old fault handler since it is
858 * called by uiomove(), which may be in the path of servicing a non-fatal
859 * page fault.
860 */
861NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
862	LDGP(pv)
863	lda	sp, -32(sp)			/* set up stack frame	     */
864	stq	ra, (32-8)(sp)			/* save ra		     */
865	stq	s0, (32-16)(sp)			/* save s0		     */
866	stq	s1, (32-24)(sp)			/* save s1		     */
867	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
868	GET_CPUINFO
869	ldq	t0, CPU_INFO_CURPROC(v0)
870	ldq	s1, P_ADDR(t0)
871	lda	v0, kcopyerr			/* set up fault handler.     */
872	ldq	s0, U_PCB_ONFAULT(s1)		/* save old handler.	     */
873	stq	v0, U_PCB_ONFAULT(s1)
874	CALL(bcopy)				/* do the copy.		     */
875	stq	s0, U_PCB_ONFAULT(s1)
876	ldq	ra, (32-8)(sp)			/* restore ra.		     */
877	ldq	s0, (32-16)(sp)			/* restore s0.		     */
878	ldq	s1, (32-24)(sp)			/* restore s1.		     */
879	lda	sp, 32(sp)			/* kill stack frame.	     */
880	mov	zero, v0			/* return 0. */
881	RET
882	END(kcopy)
883
884LEAF(kcopyerr, 0)
885	stq	s0, U_PCB_ONFAULT(s1)		/* restore the old handler.  */
886	ldq	ra, (32-8)(sp)			/* restore ra.		     */
887	ldq	s0, (32-16)(sp)			/* restore s0.		     */
888	ldq	s1, (32-24)(sp)			/* restore s1.		     */
889	lda	sp, 32(sp)			/* kill stack frame.	     */
890	ldiq	v0, EFAULT			/* return EFAULT.	     */
891	RET
892END(kcopyerr)
893
894NESTED(_copyin, 3, 16, ra, IM_RA|IM_S0, 0)
895	LDGP(pv)
896	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
897	cmpult	a0, t0, t1			/* is in user space.	     */
898	beq	t1, copyfault			/* if it's not, error out.   */
899	lda	sp, -16(sp)			/* set up stack frame	     */
900	stq	ra, (16-8)(sp)			/* save ra		     */
901	stq	s0, (16-16)(sp)			/* save s0		     */
902	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
903	GET_CPUINFO
904	ldq	t0, CPU_INFO_CURPROC(v0)
905	ldq	s0, P_ADDR(t0)
906	lda	v0, copyerr			/* set up fault handler.     */
907	stq	v0, U_PCB_ONFAULT(s0)
908	CALL(bcopy)				/* do the copy.		     */
909	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
910	ldq	ra, (16-8)(sp)			/* restore ra.		     */
911	ldq	s0, (16-16)(sp)			/* restore s0.		     */
912	lda	sp, 16(sp)			/* kill stack frame.	     */
913	mov	zero, v0			/* return 0. */
914	RET
915	END(_copyin)
916
917NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
918	LDGP(pv)
919	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
920	cmpult	a1, t0, t1			/* is in user space.	     */
921	beq	t1, copyfault			/* if it's not, error out.   */
922	lda	sp, -16(sp)			/* set up stack frame	     */
923	stq	ra, (16-8)(sp)			/* save ra		     */
924	stq	s0, (16-16)(sp)			/* save s0		     */
925	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
926	GET_CPUINFO
927	ldq	t0, CPU_INFO_CURPROC(v0)
928	ldq	s0, P_ADDR(t0)
929	lda	v0, copyerr			/* set up fault handler.     */
930	stq	v0, U_PCB_ONFAULT(s0)
931	CALL(bcopy)				/* do the copy.		     */
932	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
933	ldq	ra, (16-8)(sp)			/* restore ra.		     */
934	ldq	s0, (16-16)(sp)			/* restore s0.		     */
935	lda	sp, 16(sp)			/* kill stack frame.	     */
936	mov	zero, v0			/* return 0. */
937	RET
938	END(copyout)
939
940NESTED(copyin32, 2, 16, ra, IM_RA|IM_S0, 0)
941	LDGP(pv)
942	and	a0, 0x3, t0			/* make sure that src addr   */
943	bne	t0, copyfault			/* is properly aligned.	     */
944	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
945	cmpult	a0, t0, t1			/* is in user space.	     */
946	beq	t1, copyfault			/* if it's not, error out.   */
947	lda	sp, -16(sp)			/* set up stack frame	     */
948	stq	ra, (16-8)(sp)			/* save ra		     */
949	stq	s0, (16-16)(sp)			/* save s0		     */
950	/* Note: GET_CPUINFO clobbers v0, t0, t8...t11. */
951	GET_CPUINFO
952	ldq	t0, CPU_INFO_CURPROC(v0)
953	ldq	s0, P_ADDR(t0)
954	lda	v0, copyerr			/* set up fault handler.     */
955	stq	v0, U_PCB_ONFAULT(s0)
956	ldl	t0, 0(a0)
957	stl	t0, 0(a1)
958	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
959	ldq	ra, (16-8)(sp)			/* restore ra.		     */
960	ldq	s0, (16-16)(sp)			/* restore s0.		     */
961	lda	sp, 16(sp)			/* kill stack frame.	     */
962	mov	zero, v0			/* return 0. */
963	RET
964	END(copyin32)
965
966LEAF(copyerr, 0)
967	LDGP(pv)
968	stq	zero, U_PCB_ONFAULT(s0)		/* kill the fault handler.   */
969	ldq	ra, (16-8)(sp)			/* restore ra.		     */
970	ldq	s0, (16-16)(sp)			/* restore s0.		     */
971	lda	sp, 16(sp)			/* kill stack frame.	     */
972copyfault:
973	ldiq	v0, EFAULT			/* return EFAULT.	     */
974	RET
975END(copyerr)
976
977/**************************************************************************/
978
979/*
980 * console 'restart' routine to be placed in HWRPB.
981 */
982LEAF(XentRestart, 1)			/* XXX should be NESTED */
983	.set noat
984	lda	sp,-(FRAME_SIZE*8)(sp)
985	stq	at_reg,(FRAME_AT*8)(sp)
986	.set at
987	stq	v0,(FRAME_V0*8)(sp)
988	stq	a0,(FRAME_A0*8)(sp)
989	stq	a1,(FRAME_A1*8)(sp)
990	stq	a2,(FRAME_A2*8)(sp)
991	stq	a3,(FRAME_A3*8)(sp)
992	stq	a4,(FRAME_A4*8)(sp)
993	stq	a5,(FRAME_A5*8)(sp)
994	stq	s0,(FRAME_S0*8)(sp)
995	stq	s1,(FRAME_S1*8)(sp)
996	stq	s2,(FRAME_S2*8)(sp)
997	stq	s3,(FRAME_S3*8)(sp)
998	stq	s4,(FRAME_S4*8)(sp)
999	stq	s5,(FRAME_S5*8)(sp)
1000	stq	s6,(FRAME_S6*8)(sp)
1001	stq	t0,(FRAME_T0*8)(sp)
1002	stq	t1,(FRAME_T1*8)(sp)
1003	stq	t2,(FRAME_T2*8)(sp)
1004	stq	t3,(FRAME_T3*8)(sp)
1005	stq	t4,(FRAME_T4*8)(sp)
1006	stq	t5,(FRAME_T5*8)(sp)
1007	stq	t6,(FRAME_T6*8)(sp)
1008	stq	t7,(FRAME_T7*8)(sp)
1009	stq	t8,(FRAME_T8*8)(sp)
1010	stq	t9,(FRAME_T9*8)(sp)
1011	stq	t10,(FRAME_T10*8)(sp)
1012	stq	t11,(FRAME_T11*8)(sp)
1013	stq	t12,(FRAME_T12*8)(sp)
1014	stq	ra,(FRAME_RA*8)(sp)
1015
1016	br	pv,1f
10171:	LDGP(pv)
1018
1019	mov	sp,a0
1020	CALL(console_restart)
1021
1022	call_pal PAL_halt
1023	END(XentRestart)
1024
1025/**************************************************************************/
1026
1027#ifdef DDB
1028/*
1029 * Kernel setjmp and longjmp.  Rather minimalist.
1030 *
1031 *	longjmp(label_t *a)
1032 * will generate a "return (1)" from the last call to
1033 *	setjmp(label_t *a)
1034 * by restoring registers from the stack,
1035 */
1036
1037	.set	noreorder
1038
1039LEAF(setjmp, 1)
1040	LDGP(pv)
1041
1042	stq	ra, (0 * 8)(a0)			/* return address */
1043	stq	s0, (1 * 8)(a0)			/* callee-saved registers */
1044	stq	s1, (2 * 8)(a0)
1045	stq	s2, (3 * 8)(a0)
1046	stq	s3, (4 * 8)(a0)
1047	stq	s4, (5 * 8)(a0)
1048	stq	s5, (6 * 8)(a0)
1049	stq	s6, (7 * 8)(a0)
1050	stq	sp, (8 * 8)(a0)
1051
1052	ldiq	t0, 0xbeeffedadeadbabe		/* set magic number */
1053	stq	t0, (9 * 8)(a0)
1054
1055	mov	zero, v0			/* return zero */
1056	RET
1057END(setjmp)
1058
1059LEAF(longjmp, 1)
1060	LDGP(pv)
1061
1062	ldiq	t0, 0xbeeffedadeadbabe		/* check magic number */
1063	ldq	t1, (9 * 8)(a0)
1064	cmpeq	t0, t1, t0
1065	beq	t0, longjmp_botch		/* if bad, punt */
1066
1067	ldq	ra, (0 * 8)(a0)			/* return address */
1068	ldq	s0, (1 * 8)(a0)			/* callee-saved registers */
1069	ldq	s1, (2 * 8)(a0)
1070	ldq	s2, (3 * 8)(a0)
1071	ldq	s3, (4 * 8)(a0)
1072	ldq	s4, (5 * 8)(a0)
1073	ldq	s5, (6 * 8)(a0)
1074	ldq	s6, (7 * 8)(a0)
1075	ldq	sp, (8 * 8)(a0)
1076
1077	ldiq	v0, 1
1078	RET
1079
1080longjmp_botch:
1081	lda	a0, longjmp_botchmsg
1082	mov	ra, a1
1083	CALL(panic)
1084	call_pal PAL_bugchk
1085
1086	.data
1087longjmp_botchmsg:
1088	.asciz	"longjmp botch from %p"
1089	.text
1090END(longjmp)
1091
1092#endif /* DDB */
1093
1094/*
1095 * void sts(int rn, u_int32_t *rval);
1096 * void stt(int rn, u_int64_t *rval);
1097 * void lds(int rn, u_int32_t *rval);
1098 * void ldt(int rn, u_int64_t *rval);
1099 */
1100
1101#ifndef NO_IEEE
1102.macro make_freg_util name, op
1103	LEAF(alpha_\name, 2)
1104	and	a0, 0x1f, a0
1105	s8addq	a0, pv, pv
1106	addq	pv, 1f - alpha_\name, pv
1107	jmp	(pv)
11081:
1109	rn = 0
1110	.rept   32
1111	\op     $f0 + rn, 0(a1)
1112	RET
1113	rn = rn + 1
1114	.endr
1115	END(alpha_\name)
1116.endm
1117/*
1118LEAF(alpha_sts, 2)
1119LEAF(alpha_stt, 2)
1120LEAF(alpha_lds, 2)
1121LEAF(alpha_ldt, 2)
1122 */
1123	make_freg_util sts, sts
1124	make_freg_util stt, stt
1125	make_freg_util lds, lds
1126	make_freg_util ldt, ldt
1127
1128LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1129	lda	sp, -framesz(sp)
1130	stt	$f30, f30save(sp)
1131	mf_fpcr	$f30
1132	stt	$f30, rettmp(sp)
1133	ldt	$f30, f30save(sp)
1134	ldq	v0, rettmp(sp)
1135	lda	sp, framesz(sp)
1136	RET
1137END(alpha_read_fpcr)
1138
1139LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1140	lda	sp, -framesz(sp)
1141	stq	a0, fpcrtmp(sp)
1142	stt	$f30, f30save(sp)
1143	ldt	$f30, fpcrtmp(sp)
1144	mt_fpcr	$f30
1145	ldt	$f30, f30save(sp)
1146	lda	sp, framesz(sp)
1147	RET
1148END(alpha_write_fpcr)
1149#endif
1150
1151#if 0
1152NESTED(transfer_check,0,0,ra,0,0)
1153	CALL(U_need_2_run_config)
1154	END(transfer_check)
1155#endif
1156
1157/* Random data that shouldn't be necessary. */
1158	.data
1159EXPORT(cold)
1160	.long 1			/* cold start flag (.long -> _4_ bytes) */
1161	.align 3
1162EXPORT(esym)
1163	.quad 1			/* store end of kernel symbol table here */
1164
1165
1166/**************************************************************************/
1167