1*5b6b6999Sskrll/* $NetBSD: spl.S,v 1.20 2023/05/22 06:50:52 skrll Exp $ */ 2e544d504Smatt 3e544d504Smatt/*- 4e544d504Smatt * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc. 5e544d504Smatt * All rights reserved. 6e544d504Smatt * 7e544d504Smatt * This code is derived from software contributed to The NetBSD Foundation 8e544d504Smatt * by Matt Thomas <matt@3am-software.com>. 9e544d504Smatt * 10e544d504Smatt * Redistribution and use in source and binary forms, with or without 11e544d504Smatt * modification, are permitted provided that the following conditions 12e544d504Smatt * are met: 13e544d504Smatt * 1. Redistributions of source code must retain the above copyright 14e544d504Smatt * notice, this list of conditions and the following disclaimer. 15e544d504Smatt * 2. Redistributions in binary form must reproduce the above copyright 16e544d504Smatt * notice, this list of conditions and the following disclaimer in the 17e544d504Smatt * documentation and/or other materials provided with the distribution. 18e544d504Smatt * 19e544d504Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20e544d504Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21e544d504Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22e544d504Smatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23e544d504Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24e544d504Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25e544d504Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26e544d504Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27e544d504Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28e544d504Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29e544d504Smatt * POSSIBILITY OF SUCH DAMAGE. 30e544d504Smatt */ 31e544d504Smatt 32e544d504Smatt#include "opt_multiprocessor.h" /* MP kernel? */ 33e544d504Smatt#include "opt_cputype.h" /* which mips CPU levels do we support? */ 34f8478724Smatt#include "opt_ddb.h" 35e544d504Smatt 36e544d504Smatt#include <sys/cdefs.h> 37e544d504Smatt 38e544d504Smatt#include <mips/asm.h> 39e544d504Smatt#include <mips/cpuregs.h> 40e544d504Smatt 41*5b6b6999SskrllRCSID("$NetBSD: spl.S,v 1.20 2023/05/22 06:50:52 skrll Exp $") 42e544d504Smatt 43e544d504Smatt#include "assym.h" 44e544d504Smatt 45e544d504Smatt .data 46e544d504Smatt .globl _C_LABEL(ipl_sr_map) 47e544d504Smatt .type _C_LABEL(ipl_sr_map),@object 48e544d504Smatt .p2align INT_SCALESHIFT 49e544d504Smatt_C_LABEL(ipl_sr_map): 50e544d504Smatt .word 0 /* IPL_NONE */ 51e544d504Smatt .word MIPS_SOFT_INT_MASK_0 /* IPL_SOFT{CLOCK,BIO} */ 52e544d504Smatt .word MIPS_SOFT_INT_MASK /* IPL_SOFT{NET,SERIAL} */ 53e544d504Smatt .word MIPS_INT_MASK /* IPL_VM */ 54e544d504Smatt .word MIPS_INT_MASK /* IPL_SCHED */ 55e544d504Smatt .word MIPS_INT_MASK /* IPL_DDB */ 56e544d504Smatt .word MIPS_INT_MASK /* IPL_HIGH */ 57e544d504Smatt 58e544d504Smatt .text 59e544d504Smatt .set noreorder 60e544d504Smatt/* 61e544d504Smatt * MIPS processor interrupt control 62e544d504Smatt * 63e544d504Smatt * Used as building blocks for spl(9) kernel interface. 64e544d504Smatt */ 65e544d504Smatt_splraise: 66e544d504Smatt /* 67e544d504Smatt * a0 = SR bits to be cleared for this IPL 68e544d504Smatt * a1 = this IPL (IPL_*) 69e544d504Smatt * Can only use a0-a3 and v0-v1 70e544d504Smatt */ 71e544d504Smatt PTR_L a3, L_CPU(MIPS_CURLWP) 72b23f3dbeStsutsui NOP_L # load delay 73e544d504Smatt INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info 74b23f3dbeStsutsui NOP_L # load delay 75e544d504Smatt sltu v1, a1, v0 # newipl < curipl 76e544d504Smatt bnez v1, 1f # yes, don't change. 77e544d504Smatt nop # branch delay 78e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # fetch status register 7948097a1eSmaya MFC0_HAZARD # load delay 80e544d504Smatt or v1, MIPS_INT_MASK # enable all interrupts 81e544d504Smatt xor a0, v1 # disable ipl's masked bits 82e544d504Smatt DYNAMIC_STATUS_MASK(a0,v0) # machine dependent masking 831865df3dSmacallan#if !defined(__mips_o32) 841865df3dSmacallan or v1, MIPS_SR_INT_IE # 851865df3dSmacallan xor v1, MIPS_SR_INT_IE # clear interrupt enable bit 86f38574beSskrll mtc0 v1, MIPS_COP_0_STATUS # disable interrupts 87639910d3Smatt#else 88f38574beSskrll mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 89639910d3Smatt#endif 90e544d504Smatt COP0_SYNC 91e544d504Smatt#ifdef MULTIPROCESSOR 92f38574beSskrll PTR_L a3, L_CPU(MIPS_CURLWP) # make sure curcpu is correct 93f38574beSskrll NOP_L # load delay 94e544d504Smatt#endif 95f38574beSskrll INT_S a1, CPU_INFO_CPL(a3) # save IPL in cpu_info 96f38574beSskrll mtc0 a0, MIPS_COP_0_STATUS # store back 97e544d504Smatt COP0_SYNC 98e544d504Smatt#ifdef PARANOIA 99e544d504Smatt jr ra 100e544d504Smatt nop # branch delay 101e544d504Smatt#endif /* PARANOIA */ 102e544d504Smatt1: 103e544d504Smatt#ifdef PARANOIA 104e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS 10548097a1eSmaya MFC0_HAZARD # load delay 106e544d504Smatt and a0, v1 # a1 contains bit that MBZ 107e544d504Smatt3: bnez a0, 3b # loop forever 108e544d504Smatt nop # branch delay 109e544d504Smatt#endif /* PARANOIA */ 110e544d504Smatt jr ra 111e544d504Smatt nop # branch delay 112e544d504Smatt 113e544d504SmattSTATIC_LEAF(_splsw_splx) 114e544d504SmattSTATIC_XLEAF(_splsw_splx_noprof) # does not get mcount hooks 115e544d504Smatt#ifdef PARANOIA 116e544d504Smatt sltiu v0, a0, IPL_HIGH+1 # v0 = a0 <= IPL_HIGH 117e544d504Smatt98: beqz v0, 98b 118e544d504Smatt nop 119e544d504Smatt#endif 120e544d504Smatt PTR_L a3, L_CPU(MIPS_CURLWP) # get cpu_info 121e544d504Smatt NOP_L # load delay 122e544d504Smatt INT_L a2, CPU_INFO_CPL(a3) # get IPL from cpu_info 123b23f3dbeStsutsui NOP_L # load delay 124e544d504Smatt beq a0, a2, 2f # if same, nothing to do 125e544d504Smatt nop # branch delay 126e544d504Smatt#ifdef PARANOIA 127e544d504Smatt sltu v0, a0, a2 # v0 = a0 < a2 128e544d504Smatt99: beqz v0, 99b # loop forever if false 129e544d504Smatt nop # branch delay 130e544d504Smatt#endif /* PARANOIA */ 131e544d504Smatt PTR_LA v1, _C_LABEL(ipl_sr_map) # get address of table 132e544d504Smatt sll a2, a0, INT_SCALESHIFT # convert IPL to array offset 133e544d504Smatt PTR_ADDU v1, a2 # add to table addr 134e544d504Smatt INT_L a1, (v1) # load SR bits for this IPL 135e544d504Smatt1: 136e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # fetch status register 13718d1e872Sskrll xor a1, MIPS_INT_MASK # invert SR bits 13818d1e872Sskrll or v1, a1 # set any bits for this IPL 139e544d504Smatt DYNAMIC_STATUS_MASK(v1,t0) # machine dependent masking 1401865df3dSmacallan#if !defined(__mips_o32) 1411865df3dSmacallan or v0, v1, MIPS_SR_INT_IE # 1421865df3dSmacallan xor v0, MIPS_SR_INT_IE # clear interrupt enable bit 143f38574beSskrll mtc0 v0, MIPS_COP_0_STATUS # disable interrupts 1441865df3dSmacallan#else 145f38574beSskrll mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1461865df3dSmacallan#endif 147e544d504Smatt COP0_SYNC 148f38574beSskrll INT_S a0, CPU_INFO_CPL(a3) # save IPL in cpu_info (KSEG0) 149f38574beSskrll mtc0 v1, MIPS_COP_0_STATUS # store back 150e544d504Smatt COP0_SYNC 151e544d504Smatt#ifdef PARANOIA 15290086ecfSmatt jr ra 153e544d504Smatt nop # branch delay 154e544d504Smatt#endif /* PARANOIA */ 155e544d504Smatt2: 156e544d504Smatt#ifdef PARANOIA 157e544d504Smatt PTR_LA v1, _C_LABEL(ipl_sr_map) # get address of table 158e544d504Smatt sll a2, a0, INT_SCALESHIFT # convert IPL to array offset 159e544d504Smatt PTR_ADDU v1, a2 # add to table addr 160e544d504Smatt INT_L a1, (v1) # load SR bits for this IPL 161e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS 1621f089c97Sskrll MFC0_HAZARD # load delay 163e544d504Smatt and v1, MIPS_INT_MASK 164e544d504Smatt xor a1, MIPS_INT_MASK 165e544d504Smatt3: bne a1, v1, 3b 166e544d504Smatt nop # branch delay 167e544d504Smatt#endif /* PARANOIA */ 16890086ecfSmatt jr ra 169e544d504Smatt nop # branch delay 170e544d504SmattEND(_splsw_splx) 171e544d504Smatt 172e544d504SmattSTATIC_LEAF(_splsw_spl0) 173e544d504Smatt INT_L v1, _C_LABEL(ipl_sr_map) + 4*IPL_NONE 174e544d504Smatt PTR_L a3, L_CPU(MIPS_CURLWP) 17565dbfcbbSskrll or v1, MIPS_SR_INT_IE # make sure interrupts are on 1760bde4e4dSmatt xor v1, MIPS_INT_MASK # invert 177e544d504Smatt mfc0 a0, MIPS_COP_0_STATUS 17848097a1eSmaya MFC0_HAZARD # load delay 179e544d504Smatt or v0, a0, v1 180e544d504Smatt DYNAMIC_STATUS_MASK(v0,t0) # machine dependent masking 1811865df3dSmacallan#if !defined(__mips_o32) 1821865df3dSmacallan or v1, v0, MIPS_SR_INT_IE # 1831865df3dSmacallan xor v1, MIPS_SR_INT_IE # clear interrupt enable bit 184f38574beSskrll mtc0 v1, MIPS_COP_0_STATUS # disable interrupts 1851865df3dSmacallan#else 186f38574beSskrll mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1871865df3dSmacallan#endif 188e544d504Smatt COP0_SYNC 189e544d504Smatt#if IPL_NONE == 0 190f38574beSskrll INT_S zero, CPU_INFO_CPL(a3) # set ipl to 0 191e544d504Smatt#else 192e544d504Smatt#error IPL_NONE != 0 193e544d504Smatt#endif 194f38574beSskrll mtc0 v0, MIPS_COP_0_STATUS # enable all sources 19590086ecfSmatt JR_HB_RA # return (clear hazards) 196e544d504SmattEND(_splsw_spl0) 197e544d504Smatt 198e544d504SmattSTATIC_LEAF(_splsw_setsoftintr) 199e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # save status register 2001865df3dSmacallan#if !defined(__mips_o32) 2011f089c97Sskrll MFC0_HAZARD # load delay 2021865df3dSmacallan or v0, v1, MIPS_SR_INT_IE # 2031865df3dSmacallan xor v0, MIPS_SR_INT_IE # clear interrupt enable bit 204f38574beSskrll mtc0 v0, MIPS_COP_0_STATUS # disable interrupts 2051865df3dSmacallan#else 206f38574beSskrll mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2071865df3dSmacallan#endif 208e544d504Smatt COP0_SYNC 209e544d504Smatt mfc0 v0, MIPS_COP_0_CAUSE # fetch cause register 21048097a1eSmaya MFC0_HAZARD # load delay 211e544d504Smatt or v0, v0, a0 # set soft intr. bits 212e544d504Smatt mtc0 v0, MIPS_COP_0_CAUSE # store back 213e544d504Smatt COP0_SYNC 214e544d504Smatt mtc0 v1, MIPS_COP_0_STATUS # enable interrupts 21590086ecfSmatt JR_HB_RA # return (clear hazards) 216e544d504SmattEND(_splsw_setsoftintr) 217e544d504Smatt 218e544d504SmattSTATIC_LEAF(_splsw_clrsoftintr) 219e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # save status register 2201865df3dSmacallan#if !defined(__mips_o32) 2211f089c97Sskrll MFC0_HAZARD # load delay 2221865df3dSmacallan or v0, v1, MIPS_SR_INT_IE # 2231865df3dSmacallan xor v0, MIPS_SR_INT_IE # clear interrupt enable bit 224f38574beSskrll mtc0 v0, MIPS_COP_0_STATUS # disable interrupts 2251865df3dSmacallan#else 226f38574beSskrll mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2271865df3dSmacallan#endif 228e544d504Smatt COP0_SYNC 229e544d504Smatt mfc0 v0, MIPS_COP_0_CAUSE # fetch cause register 230e544d504Smatt nor a0, zero, a0 # bitwise inverse of A0 231e544d504Smatt and v0, v0, a0 # clear soft intr. bits 232e544d504Smatt mtc0 v0, MIPS_COP_0_CAUSE # store back 233e544d504Smatt COP0_SYNC 234e544d504Smatt mtc0 v1, MIPS_COP_0_STATUS # enable interrupts 23590086ecfSmatt JR_HB_RA # return (clear hazards) 236e544d504SmattEND(_splsw_clrsoftintr) 237e544d504Smatt 238e544d504SmattSTATIC_LEAF(_splsw_splraise) 239f8478724Smatt#if defined(DDB) && __mips >= 32 240f8478724Smatt tgeiu a0, IPL_HIGH+1 241f8478724Smatt#endif 242e544d504Smatt move a1, a0 243e544d504Smatt PTR_LA v1, _C_LABEL(ipl_sr_map) 244e544d504Smatt sll a2, a0, INT_SCALESHIFT 245e544d504Smatt PTR_ADDU v1, a2 246e544d504Smatt b _splraise 247e544d504Smatt INT_L a0, (v1) 248e544d504SmattEND(_splsw_splraise) 249e544d504Smatt 250e544d504SmattSTATIC_LEAF(_splsw_splhigh) 251e544d504SmattSTATIC_XLEAF(_splsw_splhigh_noprof) 252e544d504Smatt PTR_L a3, L_CPU(MIPS_CURLWP) 253e544d504Smatt NOP_L # load delay 254e544d504Smatt INT_L v0, CPU_INFO_CPL(a3) # get current IPL from cpu_info 255e544d504Smatt li a1, IPL_HIGH # 256e544d504Smatt beq v0, a1, 1f # don't do anything if IPL_HIGH 257e544d504Smatt nop # branch delay 258e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # fetch status register 25948097a1eSmaya MFC0_HAZARD # load delay 260e544d504Smatt and a0, v1, MIPS_INT_MASK # select all interrupts 261e544d504Smatt xor a0, v1 # clear all interrupts 262e544d504Smatt DYNAMIC_STATUS_MASK(a0,a2) # machine dependent masking 263f38574beSskrll mtc0 a0, MIPS_COP_0_STATUS # store back 264e544d504Smatt COP0_SYNC 265e544d504Smatt#ifdef MULTIPROCESSOR 266f38574beSskrll PTR_L a3, L_CPU(MIPS_CURLWP) # make sure curcpu is correct 267f38574beSskrll NOP_L # load delay 268e544d504Smatt#endif 269f38574beSskrll INT_S a1, CPU_INFO_CPL(a3) # save IPL in cpu_info 270e544d504Smatt#ifdef PARANOIA 271f38574beSskrll jr ra # return 272e544d504Smatt nop # branch delay 273e544d504Smatt#endif /* PARANOIA */ 274e544d504Smatt1: 275e544d504Smatt#ifdef PARANOIA 276e544d504Smatt mfc0 v1, MIPS_COP_0_STATUS # fetch status register 2771f089c97Sskrll MFC0_HAZARD # load delay 278e544d504Smatt and v1, MIPS_INT_MASK # any int bits set? 279e544d504Smatt2: bnez v1, 2b # loop forever. 280e544d504Smatt nop # branch delay 281e544d504Smatt#endif /* PARANOIA */ 282f38574beSskrll jr ra # return 283e544d504Smatt nop # branch delay 284e544d504SmattEND(_splsw_splhigh) 285e544d504Smatt 286e544d504Smatt .p2align 4 287e544d504SmattSTATIC_LEAF(_splsw_splddb) 288e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_DDB 289e544d504Smatt b _splraise 290e544d504Smatt li a1, IPL_DDB 291e544d504Smatt nop 292e544d504SmattEND(_splsw_splddb) 293e544d504Smatt 294e544d504SmattSTATIC_LEAF(_splsw_splsched) 295e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_SCHED 296e544d504Smatt b _splraise 297e544d504Smatt li a1, IPL_SCHED 298e544d504Smatt nop 299e544d504SmattEND(_splsw_splsched) 300e544d504Smatt 301e544d504SmattSTATIC_LEAF(_splsw_splvm) 302e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_VM 303e544d504Smatt b _splraise 304e544d504Smatt li a1, IPL_VM 305e544d504Smatt nop 306e544d504SmattEND(_splsw_splvm) 307e544d504Smatt 308e544d504SmattSTATIC_LEAF(_splsw_splsoftserial) 309e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_SOFTSERIAL 310e544d504Smatt b _splraise 311e544d504Smatt li a1, IPL_SOFTSERIAL 312e544d504Smatt nop 313e544d504SmattEND(_splsw_splsoftserial) 314e544d504Smatt 315e544d504SmattSTATIC_LEAF(_splsw_splsoftnet) 316e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_SOFTNET 317e544d504Smatt b _splraise 318e544d504Smatt li a1, IPL_SOFTNET 319e544d504Smatt nop 320e544d504SmattEND(_splsw_splsoftnet) 321e544d504Smatt 322e544d504SmattSTATIC_LEAF(_splsw_splsoftbio) 323e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_SOFTBIO 324e544d504Smatt b _splraise 325e544d504Smatt li a1, IPL_SOFTBIO 326e544d504Smatt nop 327e544d504SmattEND(_splsw_splsoftbio) 328e544d504Smatt 329e544d504SmattSTATIC_LEAF(_splsw_splsoftclock) 330e544d504Smatt INT_L a0, _C_LABEL(ipl_sr_map) + 4*IPL_SOFTCLOCK 331e544d504Smatt b _splraise 332e544d504Smatt li a1, IPL_SOFTCLOCK 333e544d504Smatt nop 334e544d504SmattEND(_splsw_splsoftclock) 335e544d504Smatt 336e544d504SmattSTATIC_LEAF(_splsw_splintr) 337e544d504Smatt mfc0 ta1, MIPS_COP_0_CAUSE # get active interrupts 33848097a1eSmaya MFC0_HAZARD # load delay 339e544d504Smatt # restrict to hard int bits 340e544d504Smatt and v1, ta1, MIPS_HARD_INT_MASK # now have pending interrupts 341e544d504Smatt li v0, IPL_NONE # return IPL_NONE 342e544d504Smatt beq v1, zero, 2f # quick exit if nothing pending 343e544d504Smatt nop # branch delay 344e544d504Smatt 345e544d504Smatt li v0, IPL_VM # start at IPL_VM 346e544d504Smatt PTR_LA ta3, _C_LABEL(ipl_sr_map) + 4*IPL_VM 347e544d504Smatt INT_L ta2, -4(ta3) # load mask for IPL_SOFTSERIAL 348e544d504Smatt NOP_L # load delay 349e544d504Smatt xor ta2, MIPS_INT_MASK # invert 350e544d504Smatt and v1, ta2 # apply to pending bits 351e544d504Smatt 352e544d504Smatt1: 353*5b6b6999Sskrll INT_L ta2, (ta3) # get SR bits for ipl in ta2 354e544d504Smatt NOP_L # load delay 355e544d504Smatt xor ta2, MIPS_INT_MASK # invert 356e544d504Smatt and ta2, v1 # any match to pending intrs? 357e544d504Smatt beq ta2, zero, 2f # no, return ipl 358e544d504Smatt nop # branch delay 359e544d504Smatt 360e544d504Smatt PTR_ADDU ta3, 1 << INT_SCALESHIFT # point to next entry 361e544d504Smatt addiu v0, 1 # increase ipl by 1 362e544d504Smatt b 1b # and check it 363e544d504Smatt move v1, ta2 # whittle down pending intrs 364e544d504Smatt 365e544d504Smatt2: 36690086ecfSmatt jr ra 367e544d504Smatt INT_S v1, (a0) # return a new pending mask 368e544d504SmattEND(_splsw_splintr) 369e544d504Smatt 370e544d504SmattSTATIC_LEAF(_splsw_splcheck) 371e544d504Smatt#ifdef PARANOIA 372e544d504Smatt PTR_L t0, L_CPU(MIPS_CURLWP) 373e544d504Smatt NOP_L # load delay 374e544d504Smatt INT_L t1, CPU_INFO_CPL(t0) # get current priority level 375e544d504Smatt 376e544d504Smatt mfc0 t0, MIPS_COP_0_STATUS # get current status 3771f089c97Sskrll MFC0_HAZARD # load delay 378e544d504Smatt and t0, MIPS_INT_MASK # just want INT bits 379e544d504Smatt 380e544d504Smatt PTR_LA t2, _C_LABEL(ipl_sr_map) 381e544d504Smatt sll t1, INT_SCALESHIFT # shift cpl to array index 382e544d504Smatt PTR_ADDU t2, t1 383e544d504Smatt INT_L t3, (t2) # load value 384e544d504Smatt NOP_L # load delay 385e544d504Smatt xor t3, MIPS_INT_MASK # invert 386e544d504Smatt1: bne t0, t3, 1b # loop forever if not equal 387e544d504Smatt nop # branch delay 388e544d504Smatt#endif /* PARANOIA */ 38990086ecfSmatt jr ra 390e544d504Smatt nop # branch delay 391e544d504SmattEND(_splsw_splcheck) 392e544d504Smatt 393e544d504Smatt .rdata 394e544d504Smatt .globl _C_LABEL(std_splsw) 395e544d504Smatt_C_LABEL(std_splsw): 396e544d504Smatt PTR_WORD _C_LABEL(_splsw_splhigh) 397e544d504Smatt PTR_WORD _C_LABEL(_splsw_splsched) 398e544d504Smatt PTR_WORD _C_LABEL(_splsw_splvm) 399e544d504Smatt PTR_WORD _C_LABEL(_splsw_splsoftserial) 400e544d504Smatt PTR_WORD _C_LABEL(_splsw_splsoftnet) 401e544d504Smatt PTR_WORD _C_LABEL(_splsw_splsoftbio) 402e544d504Smatt PTR_WORD _C_LABEL(_splsw_splsoftclock) 403e544d504Smatt PTR_WORD _C_LABEL(_splsw_splraise) 404e544d504Smatt PTR_WORD _C_LABEL(_splsw_spl0) 405e544d504Smatt PTR_WORD _C_LABEL(_splsw_splx) 406e544d504Smatt PTR_WORD _C_LABEL(_splsw_splhigh_noprof) 407e544d504Smatt PTR_WORD _C_LABEL(_splsw_splx_noprof) 408e544d504Smatt PTR_WORD _C_LABEL(_splsw_setsoftintr) 409e544d504Smatt PTR_WORD _C_LABEL(_splsw_clrsoftintr) 410e544d504Smatt PTR_WORD _C_LABEL(_splsw_splintr) 411e544d504Smatt PTR_WORD _C_LABEL(_splsw_splcheck) 412