xref: /netbsd-src/sys/arch/mips/rmi/rmixl_subr.S (revision b05a0344bfc40c54d872fa48b973cfcde0f47f7c)
1/*	$NetBSD: rmixl_subr.S,v 1.7 2020/07/26 07:48:07 simonb Exp $	*/
2
3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Cliff Neighbors.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_cputype.h"
33#include "opt_multiprocessor.h"
34
35#include <sys/cdefs.h>
36
37#include <mips/asm.h>
38#include <mips/cpuregs.h>
39
40RCSID("$NetBSD: rmixl_subr.S,v 1.7 2020/07/26 07:48:07 simonb Exp $");
41
42#include "assym.h"
43
44	.set    noreorder
45	.set	arch=xlr
46	.text
47
48/*
49 * read XLS Processor Control register
50 *
51 * uint64_t rmixl_mfcr(u_int cr);
52 */
53LEAF(rmixl_mfcr)
54#if defined(__mips_o32)
55#error O32 not supported
56#endif
57	j	ra
58	 mfcr	v0, a0
59END(rmixl_mfcr)
60
61/*
62 * write XLS Processor Control register
63 *
64 * void rmixl_mtcr(u_int cr, uint64_t val);
65 */
66LEAF(rmixl_mtcr)
67#if defined(__mips_o32)
68#error O32 not supported
69#endif
70	j	ra
71	 mtcr	a1, a0
72END(rmixl_mtcr)
73
74/*
75 * void rmixl_eirr_ack(uint64_t eimr, uint64_t vecbit, uint64_t preserve)
76 *
77 *	ack in EIRR the irq we are about to handle
78 *	disable all interrupts to prevent a race that would allow
79 *	e.g. softints set from a higher interrupt getting
80 *	clobbered by the EIRR read-modify-write
81 */
82LEAF(rmixl_eirr_ack)
83	dmtc0	zero, MIPS_COP_0_EIMR	/* EIMR = 0 */
84	COP0_SYNC
85	dmfc0	a3, MIPS_COP_0_EIRR	/* a3 = EIRR */
86	and	a3, a2			/* a3 &= preserve */
87	or	a3, a1			/* a3 |= vecbit */
88	dmtc0	a3, MIPS_COP_0_EIRR	/* EIRR = a3 */
89	COP0_SYNC
90	dmtc0	a0, MIPS_COP_0_EIMR	/* EIMR = eimr */
91	JR_HB_RA
92END(rmixl_eirr_ack)
93
94#ifdef MULTIPROCESSOR
95/*
96 * rmixlfw_wakeup_cpu(func, args, mask, callback)
97 */
98NESTED(rmixlfw_wakeup_cpu, CALLFRAME_SIZ+4*SZREG, ra)
99	PTR_ADDU	sp, sp, -(CALLFRAME_SIZ+4*SZREG)
100	REG_S		ra, CALLFRAME_RA(sp)
101	REG_S		s0, CALLFRAME_S0(sp)
102	REG_S		gp, CALLFRAME_SIZ+0*SZREG(sp)
103	REG_S		t8, CALLFRAME_SIZ+1*SZREG(sp)
104	mfc0		t0, MIPS_COP_0_STATUS
105	REG_S		t0, CALLFRAME_SIZ+2*SZREG(sp)
106
107	move		s0, sp			/* save sp */
108#ifdef _LP64
109	dsll32		t0, sp, 0		/* nuke upper half */
110	dsrl32		t0, t0, 0		/*  "    "     "   */
111	li		t1, MIPS_KSEG0_START
112	or		sp, t0, t1		/* set MIPS_KSEG0_START */
113#endif
114	jalr		a3			/* callback to firmware */
115	 nop
116	move		sp, s0			/* restore sp */
117
118	REG_L		t0, CALLFRAME_SIZ+2*SZREG(sp)
119	mtc0		t0, MIPS_COP_0_STATUS
120	REG_L		t8, CALLFRAME_SIZ+1*SZREG(sp)
121	REG_L		gp, CALLFRAME_SIZ+0*SZREG(sp)
122	REG_L		s0, CALLFRAME_S0(sp)
123	REG_L		ra, CALLFRAME_RA(sp)
124	jr		ra
125	 PTR_ADDU	sp, sp, (CALLFRAME_SIZ+4*SZREG)
126END(rmixlfw_wakeup_cpu)
127
128/*
129 * rmixl_cpu_trampoline - entry point for subordinate (non-#0) CPU wakeup
130 */
131NESTED(rmixl_cpu_trampoline, CALLFRAME_SIZ, ra)
132#ifdef _LP64
133	/*
134	 * reconstruct trampoline args addr:
135	 * sign-extend 32 bit KSEG0 address in a0
136	 * to make proper 64 bit KSEG0 addr
137	 */
138	sll		s0, a0, 0
139	li		t0, MIPS_SR_KX
140#else
141	li		t0, 0
142#endif
143
144	mtc0		zero, MIPS_COP_0_EIMR	/* disable all in MIPS_COP_0_EIMR */
145
146	mtc0		t0, MIPS_COP_0_STATUS
147
148	/* ensure COP_0_EBASE field 'EBASE' is 0 */
149	mfc0		t0, MIPS_COP_0_EBASE	/* MIPS_COP_0_EBASE */
150	and		t0, t0, 0x3ff
151	mtc0		t0, MIPS_COP_0_EBASE	/* MIPS_COP_0_EBASE */
152
153	/*
154	 * load our stack pointer from trampoline args
155	 */
156	REG_L		sp, 0*SZREG(s0)		/* XXX ta_sp */
157
158	/*
159	 * load our (idle) lwp from trampoline args
160	 * save in t8 reg dedicated as 'mips_curlwp'
161	 */
162	REG_L		t8, 1*SZREG(s0)		/* XXX ta_lwp */
163
164	/*
165	 * load our ta_cpuinfo from trampoline args and pass in a1
166	 * jump to common mips cpu_trampoline
167	 */
168	REG_L		a1, 2*SZREG(s0)		/* XXX ta_cpuinfo */
169	dmtc0		a1, MIPS_COP_0_OSSCRATCH
170	j		cpu_trampoline
171	 nop
172
173	/* NOTREACHED */
174
175END(rmixl_cpu_trampoline)
176
177#endif	/* MULTIPROCESSOR */
178