1 /* $NetBSD: psl.h,v 1.63 2023/07/11 11:02:07 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)psl.h 8.1 (Berkeley) 6/11/93 41 */ 42 43 #ifndef PSR_IMPL 44 45 /* 46 * SPARC Process Status Register (in psl.h for hysterical raisins). This 47 * doesn't exist on the V9. 48 * 49 * The picture in the Sun manuals looks like this: 50 * 1 1 51 * 31 28 27 24 23 20 19 14 3 2 11 8 7 6 5 4 0 52 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+ 53 * | impl | ver | icc | reserved |E|E| pil |S|P|E| CWP | 54 * | | |n z v c| |C|F| | |S|T| | 55 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+ 56 */ 57 58 #define PSR_IMPL 0xf0000000 /* implementation */ 59 #define PSR_VER 0x0f000000 /* version */ 60 #define PSR_ICC 0x00f00000 /* integer condition codes */ 61 #define PSR_N 0x00800000 /* negative */ 62 #define PSR_Z 0x00400000 /* zero */ 63 #define PSR_O 0x00200000 /* overflow */ 64 #define PSR_C 0x00100000 /* carry */ 65 #define PSR_EC 0x00002000 /* coprocessor enable */ 66 #define PSR_EF 0x00001000 /* FP enable */ 67 #define PSR_PIL 0x00000f00 /* interrupt level */ 68 #define PSR_S 0x00000080 /* supervisor (kernel) mode */ 69 #define PSR_PS 0x00000040 /* previous supervisor mode (traps) */ 70 #define PSR_ET 0x00000020 /* trap enable */ 71 #define PSR_CWP 0x0000001f /* current window pointer */ 72 73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET" 74 75 /* Interesting spl()s */ 76 #define PIL_SCSI 3 77 #define PIL_FDSOFT 4 78 #define PIL_AUSOFT 4 79 #define PIL_BIO 5 80 #define PIL_VIDEO 5 81 #define PIL_TTY 6 82 #define PIL_LPT 6 83 #define PIL_NET 6 84 #define PIL_VM 7 85 #define PIL_AUD 8 86 #define PIL_CLOCK 10 87 #define PIL_FD 11 88 #define PIL_SER 12 89 #define PIL_STATCLOCK 14 90 #define PIL_HIGH 15 91 #define PIL_SCHED PIL_CLOCK 92 #define PIL_LOCK PIL_HIGH 93 94 /* 95 * SPARC V9 CCR register 96 */ 97 98 #define ICC_C 0x01L 99 #define ICC_V 0x02L 100 #define ICC_Z 0x04L 101 #define ICC_N 0x08L 102 #define XCC_SHIFT 4 103 #define XCC_C (ICC_C<<XCC_SHIFT) 104 #define XCC_V (ICC_V<<XCC_SHIFT) 105 #define XCC_Z (ICC_Z<<XCC_SHIFT) 106 #define XCC_N (ICC_N<<XCC_SHIFT) 107 108 109 /* 110 * SPARC V9 PSTATE register (what replaces the PSR in V9) 111 * 112 * Here's the layout: 113 * 114 * 11 10 9 8 7 6 5 4 3 2 1 0 115 * +------------------------------------------------------------+ 116 * | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG | 117 * +------------------------------------------------------------+ 118 */ 119 120 #define PSTATE_IG 0x800 /* enable spitfire interrupt globals */ 121 #define PSTATE_MG 0x400 /* enable spitfire MMU globals */ 122 #define PSTATE_CLE 0x200 /* current little endian */ 123 #define PSTATE_TLE 0x100 /* traps little endian */ 124 #define PSTATE_MM 0x0c0 /* memory model */ 125 #define PSTATE_MM_TSO 0x000 /* total store order */ 126 #define PSTATE_MM_PSO 0x040 /* partial store order */ 127 #define PSTATE_MM_RMO 0x080 /* Relaxed memory order */ 128 #define PSTATE_RED 0x020 /* RED state */ 129 #define PSTATE_PEF 0x010 /* enable floating point */ 130 #define PSTATE_AM 0x008 /* 32-bit address masking */ 131 #define PSTATE_PRIV 0x004 /* privileged mode */ 132 #define PSTATE_IE 0x002 /* interrupt enable */ 133 #define PSTATE_AG 0x001 /* enable alternate globals */ 134 135 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG" 136 137 138 /* 139 * 32-bit code requires TSO or at best PSO since that's what's supported on 140 * SPARC V8 and earlier machines. 141 * 142 * 64-bit code sets the memory model in the ELF header. 143 * 144 * We're running kernel code in TSO for the moment so we don't need to worry 145 * about possible memory barrier bugs. 146 */ 147 148 #ifdef __arch64__ 149 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV) 150 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG) 151 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_PRIV) 152 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE) 153 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 154 #define PSTATE_USER (PSTATE_MM_RMO|PSTATE_IE) 155 #else 156 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV) 157 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG) 158 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV) 159 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE) 160 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 161 #define PSTATE_USER (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 162 #endif 163 164 165 /* 166 * SPARC V9 TSTATE register 167 * 168 * 39 32 31 24 23 20 19 8 7 5 4 0 169 * +-----+-----+-----+--------+---+-----+ 170 * | CCR | ASI | - | PSTATE | - | CWP | 171 * +-----+-----+-----+--------+---+-----+ 172 */ 173 174 #define TSTATE_CWP 0x01f 175 #define TSTATE_PSTATE 0xfff00 176 #define TSTATE_PSTATE_SHIFT 8 177 #define TSTATE_ASI 0xff000000LL 178 #define TSTATE_ASI_SHIFT 24 179 #define TSTATE_CCR 0xff00000000LL 180 #define TSTATE_CCR_SHIFT 32 181 182 #define PSRCC_TO_TSTATE(x) (((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20)) 183 #define TSTATECCR_TO_PSR(x) (((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20)) 184 185 /* 186 * These are here to simplify life. 187 */ 188 #define TSTATE_IG (PSTATE_IG<<TSTATE_PSTATE_SHIFT) 189 #define TSTATE_MG (PSTATE_MG<<TSTATE_PSTATE_SHIFT) 190 #define TSTATE_CLE (PSTATE_CLE<<TSTATE_PSTATE_SHIFT) 191 #define TSTATE_TLE (PSTATE_TLE<<TSTATE_PSTATE_SHIFT) 192 #define TSTATE_MM (PSTATE_MM<<TSTATE_PSTATE_SHIFT) 193 #define TSTATE_MM_TSO (PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT) 194 #define TSTATE_MM_PSO (PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT) 195 #define TSTATE_MM_RMO (PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT) 196 #define TSTATE_RED (PSTATE_RED<<TSTATE_PSTATE_SHIFT) 197 #define TSTATE_PEF (PSTATE_PEF<<TSTATE_PSTATE_SHIFT) 198 #define TSTATE_AM (PSTATE_AM<<TSTATE_PSTATE_SHIFT) 199 #define TSTATE_PRIV (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT) 200 #define TSTATE_IE (PSTATE_IE<<TSTATE_PSTATE_SHIFT) 201 #define TSTATE_AG (PSTATE_AG<<TSTATE_PSTATE_SHIFT) 202 203 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG" 204 205 #define TSTATE_KERN ((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT) 206 #define TSTATE_USER ((PSTATE_USER)<<TSTATE_PSTATE_SHIFT) 207 /* 208 * SPARC V9 VER version register. 209 * 210 * 63 48 47 32 31 24 23 16 15 8 7 5 4 0 211 * +-------+------+------+-----+-------+---+--------+ 212 * | manuf | impl | mask | - | maxtl | - | maxwin | 213 * +-------+------+------+-----+-------+---+--------+ 214 * 215 */ 216 217 #define VER_MANUF 0xffff000000000000LL 218 #define VER_MANUF_SHIFT 48 219 #define VER_IMPL 0x0000ffff00000000LL 220 #define VER_IMPL_SHIFT 32 221 #define VER_MASK 0x00000000ff000000LL 222 #define VER_MASK_SHIFT 24 223 #define VER_MAXTL 0x000000000000ff00LL 224 #define VER_MAXTL_SHIFT 8 225 #define VER_MAXWIN 0x000000000000001fLL 226 227 #define MANUF_FUJITSU 0x04 /* Fujitsu SPARC64 */ 228 #define MANUF_SUN 0x17 /* Sun UltraSPARC */ 229 230 #define IMPL_SPARC64 0x01 /* SPARC64 */ 231 #define IMPL_SPARC64_II 0x02 /* SPARC64-II */ 232 #define IMPL_SPARC64_III 0x03 /* SPARC64-III */ 233 #define IMPL_SPARC64_IV 0x04 /* SPARC64-IV */ 234 #define IMPL_ZEUS 0x05 /* SPARC64-V */ 235 #define IMPL_OLYMPUS_C 0x06 /* SPARC64-VI */ 236 #define IMPL_JUPITER 0x07 /* SPARC64-VII */ 237 238 #define IMPL_SPITFIRE 0x10 /* UltraSPARC-I */ 239 #define IMPL_BLACKBIRD 0x11 /* UltraSPARC-II */ 240 #define IMPL_SABRE 0x12 /* UltraSPARC-IIi */ 241 #define IMPL_HUMMINGBIRD 0x13 /* UltraSPARC-IIe */ 242 #define IMPL_CHEETAH 0x14 /* UltraSPARC-III */ 243 #define IMPL_CHEETAH_PLUS 0x15 /* UltraSPARC-III+ */ 244 #define IMPL_JALAPENO 0x16 /* UltraSPARC-IIIi */ 245 #define IMPL_JAGUAR 0x18 /* UltraSPARC-IV */ 246 #define IMPL_PANTHER 0x19 /* UltraSPARC-IV+ */ 247 #define IMPL_SERRANO 0x22 /* UltraSPARC-IIIi+ */ 248 249 /* 250 * Here are a few things to help us transition between user and kernel mode: 251 */ 252 253 /* Memory models */ 254 #define KERN_MM PSTATE_MM_TSO 255 #define USER_MM PSTATE_MM_RMO 256 257 /* 258 * Register window handlers. These point to generic routines that check the 259 * stack pointer and then vector to the real handler. We could optimize this 260 * if we could guarantee only 32-bit or 64-bit stacks. 261 */ 262 #define WSTATE_KERN 026 263 #define WSTATE_USER 022 264 265 #define CWP 0x01f 266 267 /* 268 * UltraSPARC Ancillary State Registers 269 */ 270 #define SET_SOFTINT %asr20 /* Set Software Interrupt register bits */ 271 #define CLEAR_SOFTINT %asr21 /* Clear Software Interrupt register bits */ 272 #define SOFTINT %asr22 /* Software Interrupt register */ 273 #define TICK_CMPR %asr23 /* TICK Compare register */ 274 #define STICK %asr24 /* STICK register */ 275 #define STICK_CMPR %asr25 /* STICK Compare register */ 276 277 /* SOFTINT bit descriptions */ 278 #define TICK_INT 0x01 /* CPU clock timer interrupt */ 279 #define STICK_INT (0x1<<16) /* system clock timer interrupt */ 280 281 /* 64-byte alignment -- this seems the best place to put this. */ 282 #define SPARC64_BLOCK_SIZE 64 283 #define SPARC64_BLOCK_ALIGN 0x3f 284 285 286 #if (defined(_KERNEL) || defined(_KMEMUSER)) && !defined(_LOCORE) 287 typedef uint8_t ipl_t; 288 typedef struct { 289 ipl_t _ipl; 290 } ipl_cookie_t; 291 #endif /* _KERNEL|_KMEMUSER&!_LOCORE */ 292 293 #if defined(_KERNEL) && !defined(_LOCORE) 294 295 #if defined(_KERNEL_OPT) 296 #include "opt_sparc_arch.h" 297 #endif 298 299 /* 300 * Put "memory" to asm inline on sun4v to avoid issuing rdpr %ver 301 * before checking cputyp as a result of code moving by compiler 302 * optimization. 303 */ 304 #ifdef SUN4V 305 #define constasm_clobbers "memory" 306 #else 307 #define constasm_clobbers 308 #endif 309 310 /* 311 * Inlines for manipulating privileged and ancillary state registers 312 */ 313 #define SPARC64_RDCONST_DEF(rd, name, reg, type) \ 314 static __inline __constfunc type get##name(void) \ 315 { \ 316 type _val; \ 317 __asm(#rd " %" #reg ",%0" : "=r" (_val) : : constasm_clobbers); \ 318 return _val; \ 319 } 320 #define SPARC64_RD_DEF(rd, name, reg, type) \ 321 static __inline type get##name(void) \ 322 { \ 323 type _val; \ 324 __asm volatile(#rd " %" #reg ",%0" : "=r" (_val)); \ 325 return _val; \ 326 } 327 #define SPARC64_WR_DEF(wr, name, reg, type) \ 328 static __inline void set##name(type _val) \ 329 { \ 330 __asm volatile(#wr " %0,0,%" #reg : : "r" (_val) : "memory"); \ 331 } 332 333 #ifdef __arch64__ 334 #define SPARC64_RDCONST64_DEF(rd, name, reg) \ 335 SPARC64_RDCONST_DEF(rd, name, reg, uint64_t) 336 #define SPARC64_RD64_DEF(rd, name, reg) SPARC64_RD_DEF(rd, name, reg, uint64_t) 337 #define SPARC64_WR64_DEF(wr, name, reg) SPARC64_WR_DEF(wr, name, reg, uint64_t) 338 #else 339 #define SPARC64_RDCONST64_DEF(rd, name, reg) \ 340 static __inline __constfunc uint64_t get##name(void) \ 341 { \ 342 uint32_t _hi, _lo; \ 343 __asm(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0" \ 344 : "=r" (_hi), "=r" (_lo) : : constasm_clobbers); \ 345 return ((uint64_t)_hi << 32) | _lo; \ 346 } 347 #define SPARC64_RD64_DEF(rd, name, reg) \ 348 static __inline uint64_t get##name(void) \ 349 { \ 350 uint32_t _hi, _lo; \ 351 __asm volatile(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0" \ 352 : "=r" (_hi), "=r" (_lo)); \ 353 return ((uint64_t)_hi << 32) | _lo; \ 354 } 355 #define SPARC64_WR64_DEF(wr, name, reg) \ 356 static __inline void set##name(uint64_t _val) \ 357 { \ 358 uint32_t _hi = _val >> 32, _lo = _val; \ 359 __asm volatile("sllx %1,32,%0; or %0,%2,%0; " #wr " %0,0,%" #reg\ 360 : "=&r" (_hi) /* scratch register */ \ 361 : "r" (_hi), "r" (_lo) : "memory"); \ 362 } 363 #endif 364 365 #define SPARC64_RDPR_DEF(name, reg, type) SPARC64_RD_DEF(rdpr, name, reg, type) 366 #define SPARC64_WRPR_DEF(name, reg, type) SPARC64_WR_DEF(wrpr, name, reg, type) 367 #define SPARC64_RDPR64_DEF(name, reg) SPARC64_RD64_DEF(rdpr, name, reg) 368 #define SPARC64_WRPR64_DEF(name, reg) SPARC64_WR64_DEF(wrpr, name, reg) 369 #define SPARC64_RDASR64_DEF(name, reg) SPARC64_RD64_DEF(rd, name, reg) 370 #define SPARC64_WRASR64_DEF(name, reg) SPARC64_WR64_DEF(wr, name, reg) 371 372 /* Tick Register (PR 4) */ 373 SPARC64_RDPR64_DEF(tick, %tick) /* gettick() */ 374 SPARC64_WRPR64_DEF(tick, %tick) /* settick() */ 375 376 /* Processor State Register (PR 6) */ 377 SPARC64_RDPR_DEF(pstate, %pstate, int) /* getpstate() */ 378 SPARC64_WRPR_DEF(pstate, %pstate, int) /* setpstate() */ 379 380 /* Trap Level Register (PR 7) */ 381 SPARC64_RDPR_DEF(tl, %tl, int) /* gettl() */ 382 383 /* Current Window Pointer Register (PR 9) */ 384 SPARC64_RDPR_DEF(cwp, %cwp, int) /* getcwp() */ 385 SPARC64_WRPR_DEF(cwp, %cwp, int) /* setcwp() */ 386 387 /* Version Register (PR 31) */ 388 SPARC64_RDCONST64_DEF(rdpr, ver, %ver) /* getver() */ 389 390 /* System Tick Register (ASR 24) */ 391 SPARC64_RDASR64_DEF(stick, STICK) /* getstick() */ 392 SPARC64_WRASR64_DEF(stick, STICK) /* setstick() */ 393 394 /* System Tick Compare Register (ASR 25) */ 395 SPARC64_RDASR64_DEF(stickcmpr, STICK_CMPR) /* getstickcmpr() */ 396 397 /* Some simple macros to check the cpu type. */ 398 #define GETVER_CPU_MASK() ((getver() & VER_MASK) >> VER_MASK_SHIFT) 399 #define GETVER_CPU_IMPL() ((getver() & VER_IMPL) >> VER_IMPL_SHIFT) 400 #define GETVER_CPU_MANUF() ((getver() & VER_MANUF) >> VER_MANUF_SHIFT) 401 #define CPU_IS_SPITFIRE() (GETVER_CPU_IMPL() == IMPL_SPITFIRE) 402 #define CPU_IS_HUMMINGBIRD() (GETVER_CPU_IMPL() == IMPL_HUMMINGBIRD) 403 #define CPU_IS_USIIIi() ((GETVER_CPU_IMPL() == IMPL_JALAPENO) || \ 404 (GETVER_CPU_IMPL() == IMPL_SERRANO)) 405 #define CPU_IS_USIII_UP() (GETVER_CPU_IMPL() >= IMPL_CHEETAH) 406 #define CPU_IS_SPARC64_V_UP() (GETVER_CPU_MANUF() == MANUF_FUJITSU && \ 407 GETVER_CPU_IMPL() >= IMPL_ZEUS) 408 409 static __inline int 410 intr_disable(void) 411 { 412 int pstate = getpstate(); 413 414 setpstate(pstate & ~PSTATE_IE); 415 return pstate; 416 } 417 418 static __inline void 419 intr_restore(int pstate) 420 { 421 setpstate(pstate); 422 } 423 424 /* 425 * GCC pseudo-functions for manipulating PIL 426 */ 427 428 #ifdef SPLDEBUG 429 void prom_printf(const char *fmt, ...); 430 extern int printspl; 431 #define SPLPRINT(x) \ 432 { \ 433 if (printspl) { \ 434 int i = 10000000; \ 435 prom_printf x ; \ 436 while (i--) \ 437 ; \ 438 } \ 439 } 440 #define SPL(name, newpil) \ 441 static __inline int name##X(const char* file, int line) \ 442 { \ 443 int oldpil; \ 444 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 445 SPLPRINT(("{%s:%d %d=>%d}", file, line, oldpil, newpil)); \ 446 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 447 return (oldpil); \ 448 } 449 /* A non-priority-decreasing version of SPL */ 450 #define SPLHOLD(name, newpil) \ 451 static __inline int name##X(const char* file, int line) \ 452 { \ 453 int oldpil; \ 454 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 455 if (newpil <= oldpil) \ 456 return oldpil; \ 457 SPLPRINT(("{%s:%d %d->!d}", file, line, oldpil, newpil)); \ 458 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 459 return (oldpil); \ 460 } 461 462 #else 463 #define SPLPRINT(x) 464 #define SPL(name, newpil) \ 465 static __inline __always_inline int name(void) \ 466 { \ 467 int oldpil; \ 468 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 469 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 470 return (oldpil); \ 471 } 472 /* A non-priority-decreasing version of SPL */ 473 #define SPLHOLD(name, newpil) \ 474 static __inline __always_inline int name(void) \ 475 { \ 476 int oldpil; \ 477 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 478 if (newpil <= oldpil) \ 479 return oldpil; \ 480 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 481 return (oldpil); \ 482 } 483 #endif 484 485 static __inline ipl_cookie_t 486 makeiplcookie(ipl_t ipl) 487 { 488 489 return (ipl_cookie_t){._ipl = ipl}; 490 } 491 492 static __inline int __attribute__((__unused__)) 493 splraiseipl(ipl_cookie_t icookie) 494 { 495 int newpil = icookie._ipl; 496 int oldpil; 497 498 /* 499 * NetBSD/sparc64's IPL_* constants equate directly to the 500 * corresponding PIL_* names; no need to map them here. 501 */ 502 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); 503 if (newpil <= oldpil) 504 return (oldpil); 505 __asm volatile("wrpr %0,0,%%pil" : : "r" (newpil) : "memory"); 506 return (oldpil); 507 } 508 509 SPL(spl0, 0) 510 511 SPLHOLD(splsoftint, 1) 512 #define splsoftclock splsoftint 513 #define splsoftnet splsoftint 514 515 SPLHOLD(splsoftserial, 4) 516 517 /* audio software interrupts are at software level 4 */ 518 SPLHOLD(splausoft, PIL_AUSOFT) 519 520 /* floppy software interrupts are at software level 4 too */ 521 SPLHOLD(splfdsoft, PIL_FDSOFT) 522 523 /* 524 * Memory allocation (must be as high as highest network, tty, or disk device) 525 */ 526 SPLHOLD(splvm, PIL_VM) 527 528 SPLHOLD(splsched, PIL_SCHED) 529 530 SPLHOLD(splhigh, PIL_HIGH) 531 532 /* splx does not have a return value */ 533 #ifdef SPLDEBUG 534 #define spl0() spl0X(__FILE__, __LINE__) 535 #define splsoftint() splsoftintX(__FILE__, __LINE__) 536 #define splsoftserial() splsoftserialX(__FILE__, __LINE__) 537 #define splausoft() splausoftX(__FILE__, __LINE__) 538 #define splfdsoft() splfdsoftX(__FILE__, __LINE__) 539 #define splvm() splvmX(__FILE__, __LINE__) 540 #define splclock() splclockX(__FILE__, __LINE__) 541 #define splfd() splfdX(__FILE__, __LINE__) 542 #define splzs() splzsX(__FILE__, __LINE__) 543 #define splserial() splzerialX(__FILE__, __LINE__) 544 #define splaudio() splaudioX(__FILE__, __LINE__) 545 #define splstatclock() splstatclockX(__FILE__, __LINE__) 546 #define splsched() splschedX(__FILE__, __LINE__) 547 #define spllock() spllockX(__FILE__, __LINE__) 548 #define splhigh() splhighX(__FILE__, __LINE__) 549 #define splx(x) splxX((x),__FILE__, __LINE__) 550 551 static __inline void splxX(int newpil, const char *file, int line) 552 #else 553 static __inline __always_inline void splx(int newpil) 554 #endif 555 { 556 #ifdef SPLDEBUG 557 int pil; 558 559 __asm volatile("rdpr %%pil,%0" : "=r" (pil)); 560 SPLPRINT(("{%d->%d}", pil, newpil)); 561 #endif 562 __asm volatile("wrpr %%g0,%0,%%pil" : : "rn" (newpil) : "memory"); 563 } 564 #endif /* KERNEL && !_LOCORE */ 565 566 #endif /* PSR_IMPL */ 567