xref: /netbsd-src/sys/arch/mips/rmi/rmixl_subr.S (revision b05a0344bfc40c54d872fa48b973cfcde0f47f7c)
1*b05a0344Ssimonb/*	$NetBSD: rmixl_subr.S,v 1.7 2020/07/26 07:48:07 simonb Exp $	*/
23e67b512Smatt
33e67b512Smatt/*-
43e67b512Smatt * Copyright (c) 2010 The NetBSD Foundation, Inc.
53e67b512Smatt * All rights reserved.
63e67b512Smatt *
73e67b512Smatt * This code is derived from software contributed to The NetBSD Foundation
83e67b512Smatt * by Cliff Neighbors.
93e67b512Smatt *
103e67b512Smatt * Redistribution and use in source and binary forms, with or without
113e67b512Smatt * modification, are permitted provided that the following conditions
123e67b512Smatt * are met:
133e67b512Smatt * 1. Redistributions of source code must retain the above copyright
143e67b512Smatt *    notice, this list of conditions and the following disclaimer.
153e67b512Smatt * 2. Redistributions in binary form must reproduce the above copyright
163e67b512Smatt *    notice, this list of conditions and the following disclaimer in the
173e67b512Smatt *    documentation and/or other materials provided with the distribution.
183e67b512Smatt *
193e67b512Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
203e67b512Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
213e67b512Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
223e67b512Smatt * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
233e67b512Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
243e67b512Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
253e67b512Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
263e67b512Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
273e67b512Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
283e67b512Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
293e67b512Smatt * POSSIBILITY OF SUCH DAMAGE.
303e67b512Smatt */
31290a34a0Smatt
32290a34a0Smatt#include "opt_cputype.h"
333e67b512Smatt#include "opt_multiprocessor.h"
34290a34a0Smatt
35290a34a0Smatt#include <sys/cdefs.h>
36290a34a0Smatt
37290a34a0Smatt#include <mips/asm.h>
38290a34a0Smatt#include <mips/cpuregs.h>
39290a34a0Smatt
40*b05a0344SsimonbRCSID("$NetBSD: rmixl_subr.S,v 1.7 2020/07/26 07:48:07 simonb Exp $");
413e67b512Smatt
423e67b512Smatt#include "assym.h"
433e67b512Smatt
44290a34a0Smatt	.set    noreorder
45e75727fdSmatt	.set	arch=xlr
46290a34a0Smatt	.text
47290a34a0Smatt
48290a34a0Smatt/*
49290a34a0Smatt * read XLS Processor Control register
50290a34a0Smatt *
51290a34a0Smatt * uint64_t rmixl_mfcr(u_int cr);
52290a34a0Smatt */
53290a34a0SmattLEAF(rmixl_mfcr)
543e67b512Smatt#if defined(__mips_o32)
553e67b512Smatt#error O32 not supported
563e67b512Smatt#endif
57290a34a0Smatt	j	ra
58290a34a0Smatt	 mfcr	v0, a0
59290a34a0SmattEND(rmixl_mfcr)
60290a34a0Smatt
61290a34a0Smatt/*
62290a34a0Smatt * write XLS Processor Control register
63290a34a0Smatt *
64290a34a0Smatt * void rmixl_mtcr(u_int cr, uint64_t val);
65290a34a0Smatt */
66290a34a0SmattLEAF(rmixl_mtcr)
673e67b512Smatt#if defined(__mips_o32)
683e67b512Smatt#error O32 not supported
693e67b512Smatt#endif
70290a34a0Smatt	j	ra
71290a34a0Smatt	 mtcr	a1, a0
72290a34a0SmattEND(rmixl_mtcr)
73290a34a0Smatt
74143f3bb0Scliff/*
75143f3bb0Scliff * void rmixl_eirr_ack(uint64_t eimr, uint64_t vecbit, uint64_t preserve)
76143f3bb0Scliff *
77143f3bb0Scliff *	ack in EIRR the irq we are about to handle
78143f3bb0Scliff *	disable all interrupts to prevent a race that would allow
79143f3bb0Scliff *	e.g. softints set from a higher interrupt getting
80143f3bb0Scliff *	clobbered by the EIRR read-modify-write
81143f3bb0Scliff */
82143f3bb0ScliffLEAF(rmixl_eirr_ack)
83*b05a0344Ssimonb	dmtc0	zero, MIPS_COP_0_EIMR	/* EIMR = 0 */
84143f3bb0Scliff	COP0_SYNC
85*b05a0344Ssimonb	dmfc0	a3, MIPS_COP_0_EIRR	/* a3 = EIRR */
86143f3bb0Scliff	and	a3, a2			/* a3 &= preserve */
87143f3bb0Scliff	or	a3, a1			/* a3 |= vecbit */
88*b05a0344Ssimonb	dmtc0	a3, MIPS_COP_0_EIRR	/* EIRR = a3 */
89143f3bb0Scliff	COP0_SYNC
90*b05a0344Ssimonb	dmtc0	a0, MIPS_COP_0_EIMR	/* EIMR = eimr */
91143f3bb0Scliff	JR_HB_RA
92143f3bb0ScliffEND(rmixl_eirr_ack)
93143f3bb0Scliff
943e67b512Smatt#ifdef MULTIPROCESSOR
953e67b512Smatt/*
963e67b512Smatt * rmixlfw_wakeup_cpu(func, args, mask, callback)
973e67b512Smatt */
983e67b512SmattNESTED(rmixlfw_wakeup_cpu, CALLFRAME_SIZ+4*SZREG, ra)
993e67b512Smatt	PTR_ADDU	sp, sp, -(CALLFRAME_SIZ+4*SZREG)
1003e67b512Smatt	REG_S		ra, CALLFRAME_RA(sp)
1013e67b512Smatt	REG_S		s0, CALLFRAME_S0(sp)
1023e67b512Smatt	REG_S		gp, CALLFRAME_SIZ+0*SZREG(sp)
1033e67b512Smatt	REG_S		t8, CALLFRAME_SIZ+1*SZREG(sp)
1043e67b512Smatt	mfc0		t0, MIPS_COP_0_STATUS
1053e67b512Smatt	REG_S		t0, CALLFRAME_SIZ+2*SZREG(sp)
1063e67b512Smatt
1073e67b512Smatt	move		s0, sp			/* save sp */
1083e67b512Smatt#ifdef _LP64
1093e67b512Smatt	dsll32		t0, sp, 0		/* nuke upper half */
1103e67b512Smatt	dsrl32		t0, t0, 0		/*  "    "     "   */
1113e67b512Smatt	li		t1, MIPS_KSEG0_START
1123e67b512Smatt	or		sp, t0, t1		/* set MIPS_KSEG0_START */
1133e67b512Smatt#endif
1143e67b512Smatt	jalr		a3			/* callback to firmware */
1153e67b512Smatt	 nop
1163e67b512Smatt	move		sp, s0			/* restore sp */
1173e67b512Smatt
1183e67b512Smatt	REG_L		t0, CALLFRAME_SIZ+2*SZREG(sp)
1193e67b512Smatt	mtc0		t0, MIPS_COP_0_STATUS
1203e67b512Smatt	REG_L		t8, CALLFRAME_SIZ+1*SZREG(sp)
1213e67b512Smatt	REG_L		gp, CALLFRAME_SIZ+0*SZREG(sp)
1223e67b512Smatt	REG_L		s0, CALLFRAME_S0(sp)
1233e67b512Smatt	REG_L		ra, CALLFRAME_RA(sp)
1243e67b512Smatt	jr		ra
1253e67b512Smatt	 PTR_ADDU	sp, sp, (CALLFRAME_SIZ+4*SZREG)
1263e67b512SmattEND(rmixlfw_wakeup_cpu)
1273e67b512Smatt
1283e67b512Smatt/*
1293e67b512Smatt * rmixl_cpu_trampoline - entry point for subordinate (non-#0) CPU wakeup
1303e67b512Smatt */
1313e67b512SmattNESTED(rmixl_cpu_trampoline, CALLFRAME_SIZ, ra)
1323e67b512Smatt#ifdef _LP64
133143f3bb0Scliff	/*
134143f3bb0Scliff	 * reconstruct trampoline args addr:
135143f3bb0Scliff	 * sign-extend 32 bit KSEG0 address in a0
136143f3bb0Scliff	 * to make proper 64 bit KSEG0 addr
137143f3bb0Scliff	 */
1383e67b512Smatt	sll		s0, a0, 0
139143f3bb0Scliff	li		t0, MIPS_SR_KX
1403e67b512Smatt#else
1413e67b512Smatt	li		t0, 0
1423e67b512Smatt#endif
1433e67b512Smatt
144*b05a0344Ssimonb	mtc0		zero, MIPS_COP_0_EIMR	/* disable all in MIPS_COP_0_EIMR */
1453e67b512Smatt
1463e67b512Smatt	mtc0		t0, MIPS_COP_0_STATUS
1473e67b512Smatt
1483e67b512Smatt	/* ensure COP_0_EBASE field 'EBASE' is 0 */
1498f95c8a3Smatt	mfc0		t0, MIPS_COP_0_EBASE	/* MIPS_COP_0_EBASE */
1503e67b512Smatt	and		t0, t0, 0x3ff
1518f95c8a3Smatt	mtc0		t0, MIPS_COP_0_EBASE	/* MIPS_COP_0_EBASE */
1523e67b512Smatt
1533e67b512Smatt	/*
1543e67b512Smatt	 * load our stack pointer from trampoline args
1553e67b512Smatt	 */
1563e67b512Smatt	REG_L		sp, 0*SZREG(s0)		/* XXX ta_sp */
1573e67b512Smatt
1583e67b512Smatt	/*
1593e67b512Smatt	 * load our (idle) lwp from trampoline args
1603e67b512Smatt	 * save in t8 reg dedicated as 'mips_curlwp'
1613e67b512Smatt	 */
1623e67b512Smatt	REG_L		t8, 1*SZREG(s0)		/* XXX ta_lwp */
1633e67b512Smatt
1643e67b512Smatt	/*
1653e67b512Smatt	 * load our ta_cpuinfo from trampoline args and pass in a1
1663e67b512Smatt	 * jump to common mips cpu_trampoline
1673e67b512Smatt	 */
1683e67b512Smatt	REG_L		a1, 2*SZREG(s0)		/* XXX ta_cpuinfo */
1698f95c8a3Smatt	dmtc0		a1, MIPS_COP_0_OSSCRATCH
1703e67b512Smatt	j		cpu_trampoline
1713e67b512Smatt	 nop
1723e67b512Smatt
1733e67b512Smatt	/* NOTREACHED */
1743e67b512Smatt
1753e67b512SmattEND(rmixl_cpu_trampoline)
1763e67b512Smatt
1773e67b512Smatt#endif	/* MULTIPROCESSOR */
178