1 /* $NetBSD: psl.h,v 1.49 2011/07/12 07:51:34 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)psl.h 8.1 (Berkeley) 6/11/93 41 */ 42 43 #ifndef PSR_IMPL 44 45 /* 46 * SPARC Process Status Register (in psl.h for hysterical raisins). This 47 * doesn't exist on the V9. 48 * 49 * The picture in the Sun manuals looks like this: 50 * 1 1 51 * 31 28 27 24 23 20 19 14 3 2 11 8 7 6 5 4 0 52 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+ 53 * | impl | ver | icc | reserved |E|E| pil |S|P|E| CWP | 54 * | | |n z v c| |C|F| | |S|T| | 55 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+ 56 */ 57 58 #define PSR_IMPL 0xf0000000 /* implementation */ 59 #define PSR_VER 0x0f000000 /* version */ 60 #define PSR_ICC 0x00f00000 /* integer condition codes */ 61 #define PSR_N 0x00800000 /* negative */ 62 #define PSR_Z 0x00400000 /* zero */ 63 #define PSR_O 0x00200000 /* overflow */ 64 #define PSR_C 0x00100000 /* carry */ 65 #define PSR_EC 0x00002000 /* coprocessor enable */ 66 #define PSR_EF 0x00001000 /* FP enable */ 67 #define PSR_PIL 0x00000f00 /* interrupt level */ 68 #define PSR_S 0x00000080 /* supervisor (kernel) mode */ 69 #define PSR_PS 0x00000040 /* previous supervisor mode (traps) */ 70 #define PSR_ET 0x00000020 /* trap enable */ 71 #define PSR_CWP 0x0000001f /* current window pointer */ 72 73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET" 74 75 /* Interesting spl()s */ 76 #define PIL_SCSI 3 77 #define PIL_FDSOFT 4 78 #define PIL_AUSOFT 4 79 #define PIL_BIO 5 80 #define PIL_VIDEO 5 81 #define PIL_TTY 6 82 #define PIL_LPT 6 83 #define PIL_NET 6 84 #define PIL_VM 7 85 #define PIL_AUD 8 86 #define PIL_CLOCK 10 87 #define PIL_FD 11 88 #define PIL_SER 12 89 #define PIL_STATCLOCK 14 90 #define PIL_HIGH 15 91 #define PIL_SCHED PIL_CLOCK 92 #define PIL_LOCK PIL_HIGH 93 94 /* 95 * SPARC V9 CCR register 96 */ 97 98 #define ICC_C 0x01L 99 #define ICC_V 0x02L 100 #define ICC_Z 0x04L 101 #define ICC_N 0x08L 102 #define XCC_SHIFT 4 103 #define XCC_C (ICC_C<<XCC_SHIFT) 104 #define XCC_V (ICC_V<<XCC_SHIFT) 105 #define XCC_Z (ICC_Z<<XCC_SHIFT) 106 #define XCC_N (ICC_N<<XCC_SHIFT) 107 108 109 /* 110 * SPARC V9 PSTATE register (what replaces the PSR in V9) 111 * 112 * Here's the layout: 113 * 114 * 11 10 9 8 7 6 5 4 3 2 1 0 115 * +------------------------------------------------------------+ 116 * | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG | 117 * +------------------------------------------------------------+ 118 */ 119 120 #define PSTATE_IG 0x800 /* enable spitfire interrupt globals */ 121 #define PSTATE_MG 0x400 /* enable spitfire MMU globals */ 122 #define PSTATE_CLE 0x200 /* current little endian */ 123 #define PSTATE_TLE 0x100 /* traps little endian */ 124 #define PSTATE_MM 0x0c0 /* memory model */ 125 #define PSTATE_MM_TSO 0x000 /* total store order */ 126 #define PSTATE_MM_PSO 0x040 /* partial store order */ 127 #define PSTATE_MM_RMO 0x080 /* Relaxed memory order */ 128 #define PSTATE_RED 0x020 /* RED state */ 129 #define PSTATE_PEF 0x010 /* enable floating point */ 130 #define PSTATE_AM 0x008 /* 32-bit address masking */ 131 #define PSTATE_PRIV 0x004 /* privileged mode */ 132 #define PSTATE_IE 0x002 /* interrupt enable */ 133 #define PSTATE_AG 0x001 /* enable alternate globals */ 134 135 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG" 136 137 138 /* 139 * 32-bit code requires TSO or at best PSO since that's what's supported on 140 * SPARC V8 and earlier machines. 141 * 142 * 64-bit code sets the memory model in the ELF header. 143 * 144 * We're running kernel code in TSO for the moment so we don't need to worry 145 * about possible memory barrier bugs. 146 */ 147 148 #ifdef __arch64__ 149 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV) 150 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG) 151 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_PRIV) 152 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE) 153 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 154 #define PSTATE_USER (PSTATE_MM_RMO|PSTATE_IE) 155 #else 156 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV) 157 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG) 158 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV) 159 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE) 160 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 161 #define PSTATE_USER (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE) 162 #endif 163 164 165 /* 166 * SPARC V9 TSTATE register 167 * 168 * 39 32 31 24 23 18 17 8 7 5 4 0 169 * +-----+-----+-----+--------+---+-----+ 170 * | CCR | ASI | - | PSTATE | - | CWP | 171 * +-----+-----+-----+--------+---+-----+ 172 */ 173 174 #define TSTATE_CWP 0x01f 175 #define TSTATE_PSTATE 0x6ff00 176 #define TSTATE_PSTATE_SHIFT 8 177 #define TSTATE_ASI 0xff000000LL 178 #define TSTATE_ASI_SHIFT 24 179 #define TSTATE_CCR 0xff00000000LL 180 #define TSTATE_CCR_SHIFT 32 181 182 #define PSRCC_TO_TSTATE(x) (((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20)) 183 #define TSTATECCR_TO_PSR(x) (((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20)) 184 185 /* 186 * These are here to simplify life. 187 */ 188 #define TSTATE_IG (PSTATE_IG<<TSTATE_PSTATE_SHIFT) 189 #define TSTATE_MG (PSTATE_MG<<TSTATE_PSTATE_SHIFT) 190 #define TSTATE_CLE (PSTATE_CLE<<TSTATE_PSTATE_SHIFT) 191 #define TSTATE_TLE (PSTATE_TLE<<TSTATE_PSTATE_SHIFT) 192 #define TSTATE_MM (PSTATE_MM<<TSTATE_PSTATE_SHIFT) 193 #define TSTATE_MM_TSO (PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT) 194 #define TSTATE_MM_PSO (PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT) 195 #define TSTATE_MM_RMO (PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT) 196 #define TSTATE_RED (PSTATE_RED<<TSTATE_PSTATE_SHIFT) 197 #define TSTATE_PEF (PSTATE_PEF<<TSTATE_PSTATE_SHIFT) 198 #define TSTATE_AM (PSTATE_AM<<TSTATE_PSTATE_SHIFT) 199 #define TSTATE_PRIV (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT) 200 #define TSTATE_IE (PSTATE_IE<<TSTATE_PSTATE_SHIFT) 201 #define TSTATE_AG (PSTATE_AG<<TSTATE_PSTATE_SHIFT) 202 203 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG" 204 205 #define TSTATE_KERN ((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT) 206 #define TSTATE_USER ((PSTATE_USER)<<TSTATE_PSTATE_SHIFT) 207 /* 208 * SPARC V9 VER version register. 209 * 210 * 63 48 47 32 31 24 23 16 15 8 7 5 4 0 211 * +-------+------+------+-----+-------+---+--------+ 212 * | manuf | impl | mask | - | maxtl | - | maxwin | 213 * +-------+------+------+-----+-------+---+--------+ 214 * 215 */ 216 217 #define VER_MANUF 0xffff000000000000LL 218 #define VER_MANUF_SHIFT 48 219 #define VER_IMPL 0x0000ffff00000000LL 220 #define VER_IMPL_SHIFT 32 221 #define VER_MASK 0x00000000ff000000LL 222 #define VER_MASK_SHIFT 24 223 #define VER_MAXTL 0x000000000000ff00LL 224 #define VER_MAXTL_SHIFT 8 225 #define VER_MAXWIN 0x000000000000001fLL 226 227 #define MANUF_FUJITSU 0x04 /* Fujitsu SPARC64 */ 228 #define MANUF_SUN 0x17 /* Sun UltraSPARC */ 229 230 #define IMPL_SPARC64 0x01 /* SPARC64 */ 231 #define IMPL_SPARC64_II 0x02 /* SPARC64-II */ 232 #define IMPL_SPARC64_III 0x03 /* SPARC64-III */ 233 #define IMPL_SPARC64_IV 0x04 /* SPARC64-IV */ 234 #define IMPL_ZEUS 0x05 /* SPARC64-V */ 235 #define IMPL_OLYMPUS_C 0x06 /* SPARC64-VI */ 236 #define IMPL_JUPITER 0x07 /* SPARC64-VII */ 237 238 #define IMPL_SPITFIRE 0x10 /* UltraSPARC-I */ 239 #define IMPL_BLACKBIRD 0x11 /* UltraSPARC-II */ 240 #define IMPL_SABRE 0x12 /* UltraSPARC-IIi */ 241 #define IMPL_HUMMINGBIRD 0x13 /* UltraSPARC-IIe */ 242 #define IMPL_CHEETAH 0x14 /* UltraSPARC-III */ 243 #define IMPL_CHEETAH_PLUS 0x15 /* UltraSPARC-III+ */ 244 #define IMPL_JALAPENO 0x16 /* UltraSPARC-IIIi */ 245 #define IMPL_JAGUAR 0x18 /* UltraSPARC-IV */ 246 #define IMPL_PANTHER 0x19 /* UltraSPARC-IV+ */ 247 #define IMPL_SERRANO 0x22 /* UltraSPARC-IIIi+ */ 248 249 /* 250 * Here are a few things to help us transition between user and kernel mode: 251 */ 252 253 /* Memory models */ 254 #define KERN_MM PSTATE_MM_TSO 255 #define USER_MM PSTATE_MM_RMO 256 257 /* 258 * Register window handlers. These point to generic routines that check the 259 * stack pointer and then vector to the real handler. We could optimize this 260 * if we could guarantee only 32-bit or 64-bit stacks. 261 */ 262 #define WSTATE_KERN 026 263 #define WSTATE_USER 022 264 265 #define CWP 0x01f 266 267 /* 64-byte alignment -- this seems the best place to put this. */ 268 #define SPARC64_BLOCK_SIZE 64 269 #define SPARC64_BLOCK_ALIGN 0x3f 270 271 #if defined(_KERNEL) && !defined(_LOCORE) 272 273 /* 274 * Inlines for manipulating privileged registers 275 */ 276 #define SPARC64_GETPR_DEF(pr, type) \ 277 static __inline type get##pr(void) \ 278 { \ 279 type pr; \ 280 __asm volatile("rdpr %%" #pr ",%0" : "=r" (pr)); \ 281 return pr; \ 282 } 283 #define SPARC64_SETPR_DEF(pr, type) \ 284 static __inline void set##pr(type pr) \ 285 { \ 286 __asm volatile("wrpr %0,0,%%" #pr : : "r" (pr) : "memory"); \ 287 } 288 289 #ifdef __arch64__ 290 #define SPARC64_GETPR64_DEF(pr) SPARC64_GETPR_DEF(pr, uint64_t) 291 #define SPARC64_SETPR64_DEF(pr) SPARC64_SETPR_DEF(pr, uint64_t) 292 #else 293 #define SPARC64_GETPR64_DEF(pr) \ 294 static __inline uint64_t get##pr(void) \ 295 { \ 296 uint32_t _hi, _lo; \ 297 __asm volatile("rdpr %%" #pr ",%0; srl %0,0,%1; srlx %0,32,%0" \ 298 : "=r" (_hi), "=r" (_lo)); \ 299 return ((uint64_t)_hi << 32) | _lo; \ 300 } 301 #define SPARC64_SETPR64_DEF(pr) \ 302 static __inline void set##pr(uint64_t pr) \ 303 { \ 304 uint32_t _hi = pr >> 32, _lo = pr; \ 305 __asm volatile("sllx %1,32,%0; or %0,%2,%0; wrpr %0,0,%%" #pr \ 306 : "=&r" (_hi) /* scratch register */ \ 307 : "r" (_hi), "r" (_lo) : "memory"); \ 308 } 309 #endif 310 311 /* Tick Register (PR 4) */ 312 SPARC64_GETPR64_DEF(tick) /* gettick() */ 313 SPARC64_SETPR64_DEF(tick) /* settick() */ 314 315 /* Processor State Register (PR 6) */ 316 SPARC64_GETPR_DEF(pstate, int) /* getpstate() */ 317 SPARC64_SETPR_DEF(pstate, int) /* setpstate() */ 318 319 /* Trap Level Register (PR 7) */ 320 SPARC64_GETPR_DEF(tl, int) /* gettl() */ 321 322 /* Current Window Pointer Register (PR 9) */ 323 SPARC64_GETPR_DEF(cwp, int) /* getcwp() */ 324 SPARC64_SETPR_DEF(cwp, int) /* setcwp() */ 325 326 /* Version Register (PR 31) */ 327 SPARC64_GETPR64_DEF(ver) /* getver() */ 328 329 /* Some simple macros to check the cpu type. */ 330 #define GETVER_CPU_IMPL() ((getver() & VER_IMPL) >> VER_IMPL_SHIFT) 331 #define GETVER_CPU_MANUF() ((getver() & VER_MANUF) >> VER_MANUF_SHIFT) 332 #define CPU_IS_SPITFIRE() (GETVER_CPU_IMPL() == IMPL_SPITFIRE) 333 #define CPU_IS_USIIIi() ((GETVER_CPU_IMPL() == IMPL_JALAPENO) || \ 334 (GETVER_CPU_IMPL() == IMPL_SERRANO)) 335 #define CPU_IS_USIII_UP() (GETVER_CPU_IMPL() >= IMPL_CHEETAH) 336 #define CPU_IS_SPARC64_V_UP() (GETVER_CPU_MANUF() == MANUF_FUJITSU && \ 337 GETVER_CPU_IMPL() >= IMPL_ZEUS) 338 339 static __inline int 340 intr_disable(void) 341 { 342 int pstate = getpstate(); 343 344 setpstate(pstate & ~PSTATE_IE); 345 return pstate; 346 } 347 348 static __inline void 349 intr_restore(int pstate) 350 { 351 setpstate(pstate); 352 } 353 354 /* 355 * GCC pseudo-functions for manipulating PIL 356 */ 357 358 #ifdef SPLDEBUG 359 void prom_printf(const char *fmt, ...); 360 extern int printspl; 361 #define SPLPRINT(x) \ 362 { \ 363 if (printspl) { \ 364 int i = 10000000; \ 365 prom_printf x ; \ 366 while (i--) \ 367 ; \ 368 } \ 369 } 370 #define SPL(name, newpil) \ 371 static __inline int name##X(const char* file, int line) \ 372 { \ 373 int oldpil; \ 374 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 375 SPLPRINT(("{%s:%d %d=>%d}", file, line, oldpil, newpil)); \ 376 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 377 return (oldpil); \ 378 } 379 /* A non-priority-decreasing version of SPL */ 380 #define SPLHOLD(name, newpil) \ 381 static __inline int name##X(const char* file, int line) \ 382 { \ 383 int oldpil; \ 384 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 385 if (newpil <= oldpil) \ 386 return oldpil; \ 387 SPLPRINT(("{%s:%d %d->!d}", file, line, oldpil, newpil)); \ 388 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 389 return (oldpil); \ 390 } 391 392 #else 393 #define SPLPRINT(x) 394 #define SPL(name, newpil) \ 395 static __inline int name(void) \ 396 { \ 397 int oldpil; \ 398 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 399 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 400 return (oldpil); \ 401 } 402 /* A non-priority-decreasing version of SPL */ 403 #define SPLHOLD(name, newpil) \ 404 static __inline int name(void) \ 405 { \ 406 int oldpil; \ 407 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \ 408 if (newpil <= oldpil) \ 409 return oldpil; \ 410 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \ 411 return (oldpil); \ 412 } 413 #endif 414 415 typedef uint8_t ipl_t; 416 typedef struct { 417 ipl_t _ipl; 418 } ipl_cookie_t; 419 420 static __inline ipl_cookie_t 421 makeiplcookie(ipl_t ipl) 422 { 423 424 return (ipl_cookie_t){._ipl = ipl}; 425 } 426 427 static __inline int __attribute__((__unused__)) 428 splraiseipl(ipl_cookie_t icookie) 429 { 430 int newpil = icookie._ipl; 431 int oldpil; 432 433 /* 434 * NetBSD/sparc64's IPL_* constants equate directly to the 435 * corresponding PIL_* names; no need to map them here. 436 */ 437 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); 438 if (newpil <= oldpil) 439 return (oldpil); 440 __asm volatile("wrpr %0,0,%%pil" : : "r" (newpil) : "memory"); 441 return (oldpil); 442 } 443 444 SPL(spl0, 0) 445 446 SPLHOLD(splsoftint, 1) 447 #define splsoftclock splsoftint 448 #define splsoftnet splsoftint 449 450 SPLHOLD(splsoftserial, 4) 451 452 /* audio software interrupts are at software level 4 */ 453 SPLHOLD(splausoft, PIL_AUSOFT) 454 455 /* floppy software interrupts are at software level 4 too */ 456 SPLHOLD(splfdsoft, PIL_FDSOFT) 457 458 /* 459 * Memory allocation (must be as high as highest network, tty, or disk device) 460 */ 461 SPLHOLD(splvm, PIL_VM) 462 463 /* fd hardware interrupts are at level 11 */ 464 SPLHOLD(splfd, PIL_FD) 465 466 /* zs hardware interrupts are at level 12 */ 467 SPLHOLD(splzs, PIL_SER) 468 SPLHOLD(splserial, PIL_SER) 469 470 /* audio hardware interrupts are at level 13 */ 471 SPLHOLD(splaudio, PIL_AUD) 472 473 /* second sparc timer interrupts at level 14 */ 474 SPLHOLD(splstatclock, PIL_STATCLOCK) 475 476 SPLHOLD(splsched, PIL_SCHED) 477 SPLHOLD(spllock, PIL_LOCK) 478 479 SPLHOLD(splhigh, PIL_HIGH) 480 481 /* splx does not have a return value */ 482 #ifdef SPLDEBUG 483 #define spl0() spl0X(__FILE__, __LINE__) 484 #define splsoftint() splsoftintX(__FILE__, __LINE__) 485 #define splsoftserial() splsoftserialX(__FILE__, __LINE__) 486 #define splausoft() splausoftX(__FILE__, __LINE__) 487 #define splfdsoft() splfdsoftX(__FILE__, __LINE__) 488 #define splvm() splvmX(__FILE__, __LINE__) 489 #define splclock() splclockX(__FILE__, __LINE__) 490 #define splfd() splfdX(__FILE__, __LINE__) 491 #define splzs() splzsX(__FILE__, __LINE__) 492 #define splserial() splzerialX(__FILE__, __LINE__) 493 #define splaudio() splaudioX(__FILE__, __LINE__) 494 #define splstatclock() splstatclockX(__FILE__, __LINE__) 495 #define splsched() splschedX(__FILE__, __LINE__) 496 #define spllock() spllockX(__FILE__, __LINE__) 497 #define splhigh() splhighX(__FILE__, __LINE__) 498 #define splx(x) splxX((x),__FILE__, __LINE__) 499 500 static __inline void splxX(int newpil, const char *file, int line) 501 #else 502 static __inline void splx(int newpil) 503 #endif 504 { 505 #ifdef SPLDEBUG 506 int pil; 507 508 __asm volatile("rdpr %%pil,%0" : "=r" (pil)); 509 SPLPRINT(("{%d->%d}", pil, newpil)); 510 #endif 511 __asm volatile("wrpr %%g0,%0,%%pil" : : "rn" (newpil) : "memory"); 512 } 513 #endif /* KERNEL && !_LOCORE */ 514 515 #endif /* PSR_IMPL */ 516