xref: /netbsd-src/sys/arch/sparc64/include/psl.h (revision adf4d5a0e68273481db10d76435b2583b1f6813b)
1 /*	$NetBSD: psl.h,v 1.65 2024/04/07 17:08:00 rillig Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
41  */
42 
43 #ifndef PSR_IMPL
44 
45 /*
46  * SPARC Process Status Register (in psl.h for hysterical raisins).  This
47  * doesn't exist on the V9.
48  *
49  * The picture in the Sun manuals looks like this:
50  *	                                     1 1
51  *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
52  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
53  *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
54  *	|       |       |n z v c|           |C|F|       | |S|T|         |
55  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
56  */
57 
58 #define PSR_IMPL	0xf0000000	/* implementation */
59 #define PSR_VER		0x0f000000	/* version */
60 #define PSR_ICC		0x00f00000	/* integer condition codes */
61 #define PSR_N		0x00800000	/* negative */
62 #define PSR_Z		0x00400000	/* zero */
63 #define PSR_O		0x00200000	/* overflow */
64 #define PSR_C		0x00100000	/* carry */
65 #define PSR_EC		0x00002000	/* coprocessor enable */
66 #define PSR_EF		0x00001000	/* FP enable */
67 #define PSR_PIL		0x00000f00	/* interrupt level */
68 #define PSR_S		0x00000080	/* supervisor (kernel) mode */
69 #define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
70 #define PSR_ET		0x00000020	/* trap enable */
71 #define PSR_CWP		0x0000001f	/* current window pointer */
72 
73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
74 
75 /* Interesting spl()s */
76 #define PIL_BIO		5
77 #define PIL_VIDEO	5
78 #define PIL_TTY		6
79 #define PIL_LPT		6
80 #define PIL_NET		6
81 #define PIL_VM		7
82 #define	PIL_AUD		8
83 #define PIL_CLOCK	10
84 #define PIL_FD		11
85 #define PIL_SER		12
86 #define	PIL_STATCLOCK	14
87 #define PIL_HIGH	15
88 #define PIL_SCHED	PIL_CLOCK
89 #define PIL_LOCK	PIL_HIGH
90 
91 /*
92  * SPARC V9 CCR register
93  */
94 
95 #define ICC_C	0x01L
96 #define ICC_V	0x02L
97 #define ICC_Z	0x04L
98 #define ICC_N	0x08L
99 #define XCC_SHIFT	4
100 #define XCC_C	(ICC_C<<XCC_SHIFT)
101 #define XCC_V	(ICC_V<<XCC_SHIFT)
102 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
103 #define XCC_N	(ICC_N<<XCC_SHIFT)
104 
105 
106 /*
107  * SPARC V9 PSTATE register (what replaces the PSR in V9)
108  *
109  * Here's the layout:
110  *
111  *    11   10    9     8   7  6   5     4     3     2     1   0
112  *  +------------------------------------------------------------+
113  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
114  *  +------------------------------------------------------------+
115  */
116 
117 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
118 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
119 #define PSTATE_CLE	0x200	/* current little endian */
120 #define PSTATE_TLE	0x100	/* traps little endian */
121 #define PSTATE_MM	0x0c0	/* memory model */
122 #define PSTATE_MM_TSO	0x000	/* total store order */
123 #define PSTATE_MM_PSO	0x040	/* partial store order */
124 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
125 #define PSTATE_RED	0x020	/* RED state */
126 #define PSTATE_PEF	0x010	/* enable floating point */
127 #define PSTATE_AM	0x008	/* 32-bit address masking */
128 #define PSTATE_PRIV	0x004	/* privileged mode */
129 #define PSTATE_IE	0x002	/* interrupt enable */
130 #define PSTATE_AG	0x001	/* enable alternate globals */
131 
132 #define PSTATE_BITS "\177\020"						\
133 	"b\013IG\0"	"b\012MG\0"	"b\011CLE\0"	"b\010TLE\0"	\
134 			"F\006\002\0"	":\000MM_TSO\0"	":\001MM_PSO\0"	\
135 	":\002MM_RMO\0"	"*?\0"		"b\005RED\0"	"b\004PEF\0"	\
136 	"b\003AM\0"	"b\002PRIV\0"	"b\001IE\0"	"b\000AG\0"
137 
138 
139 /*
140  * 32-bit code requires TSO or at best PSO since that's what's supported on
141  * SPARC V8 and earlier machines.
142  *
143  * 64-bit code sets the memory model in the ELF header.
144  *
145  * We're running kernel code in TSO for the moment so we don't need to worry
146  * about possible memory barrier bugs.
147  */
148 
149 #ifdef __arch64__
150 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
151 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
152 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
153 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
154 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
155 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
156 #else
157 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
158 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
159 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
160 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
161 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
162 #define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
163 #endif
164 
165 
166 /*
167  * SPARC V9 TSTATE register
168  *
169  *   39 32 31 24 23 20  19   8	7 5 4   0
170  *  +-----+-----+-----+--------+---+-----+
171  *  | CCR | ASI |  -  | PSTATE | - | CWP |
172  *  +-----+-----+-----+--------+---+-----+
173  */
174 
175 #define TSTATE_CWP		0x01f
176 #define TSTATE_PSTATE		0xfff00
177 #define TSTATE_PSTATE_SHIFT	8
178 #define TSTATE_ASI		0xff000000LL
179 #define TSTATE_ASI_SHIFT	24
180 #define TSTATE_CCR		0xff00000000LL
181 #define TSTATE_CCR_SHIFT	32
182 
183 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20))
184 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20))
185 
186 /*
187  * These are here to simplify life.
188  */
189 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
190 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
191 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
192 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
193 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
194 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
195 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
196 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
197 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
198 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
199 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
200 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
201 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
202 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
203 
204 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
205 
206 #define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
207 #define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
208 /*
209  * SPARC V9 VER version register.
210  *
211  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
212  * +-------+------+------+-----+-------+---+--------+
213  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
214  * +-------+------+------+-----+-------+---+--------+
215  *
216  */
217 
218 #define VER_MANUF	0xffff000000000000LL
219 #define VER_MANUF_SHIFT	48
220 #define VER_IMPL	0x0000ffff00000000LL
221 #define VER_IMPL_SHIFT	32
222 #define VER_MASK	0x00000000ff000000LL
223 #define VER_MASK_SHIFT	24
224 #define VER_MAXTL	0x000000000000ff00LL
225 #define VER_MAXTL_SHIFT	8
226 #define VER_MAXWIN	0x000000000000001fLL
227 
228 #define MANUF_FUJITSU		0x04 /* Fujitsu SPARC64 */
229 #define MANUF_SUN		0x17 /* Sun UltraSPARC */
230 
231 #define IMPL_SPARC64		0x01 /* SPARC64 */
232 #define IMPL_SPARC64_II		0x02 /* SPARC64-II */
233 #define IMPL_SPARC64_III	0x03 /* SPARC64-III */
234 #define IMPL_SPARC64_IV		0x04 /* SPARC64-IV */
235 #define IMPL_ZEUS		0x05 /* SPARC64-V */
236 #define IMPL_OLYMPUS_C		0x06 /* SPARC64-VI */
237 #define IMPL_JUPITER		0x07 /* SPARC64-VII */
238 
239 #define IMPL_SPITFIRE		0x10 /* UltraSPARC-I */
240 #define IMPL_BLACKBIRD		0x11 /* UltraSPARC-II */
241 #define IMPL_SABRE		0x12 /* UltraSPARC-IIi */
242 #define IMPL_HUMMINGBIRD	0x13 /* UltraSPARC-IIe */
243 #define IMPL_CHEETAH		0x14 /* UltraSPARC-III */
244 #define IMPL_CHEETAH_PLUS	0x15 /* UltraSPARC-III+ */
245 #define IMPL_JALAPENO		0x16 /* UltraSPARC-IIIi */
246 #define IMPL_JAGUAR		0x18 /* UltraSPARC-IV */
247 #define IMPL_PANTHER		0x19 /* UltraSPARC-IV+ */
248 #define IMPL_SERRANO		0x22 /* UltraSPARC-IIIi+ */
249 
250 /*
251  * Here are a few things to help us transition between user and kernel mode:
252  */
253 
254 /* Memory models */
255 #define KERN_MM		PSTATE_MM_TSO
256 #define USER_MM		PSTATE_MM_RMO
257 
258 /*
259  * Register window handlers.  These point to generic routines that check the
260  * stack pointer and then vector to the real handler.  We could optimize this
261  * if we could guarantee only 32-bit or 64-bit stacks.
262  */
263 #define WSTATE_KERN	026
264 #define WSTATE_USER	022
265 
266 #define CWP		0x01f
267 
268 /*
269  * UltraSPARC Ancillary State Registers
270  */
271 #define SET_SOFTINT	%asr20	/* Set Software Interrupt register bits */
272 #define CLEAR_SOFTINT	%asr21	/* Clear Software Interrupt register bits */
273 #define SOFTINT		%asr22	/* Software Interrupt register */
274 #define TICK_CMPR	%asr23	/* TICK Compare register */
275 #define STICK		%asr24	/* STICK register */
276 #define STICK_CMPR	%asr25	/* STICK Compare register */
277 
278 /* SOFTINT bit descriptions */
279 #define TICK_INT	0x01		/* CPU clock timer interrupt */
280 #define STICK_INT	(0x1<<16)	/* system clock timer interrupt */
281 
282 /* 64-byte alignment -- this seems the best place to put this. */
283 #define SPARC64_BLOCK_SIZE	64
284 #define SPARC64_BLOCK_ALIGN	0x3f
285 
286 
287 #if (defined(_KERNEL) || defined(_KMEMUSER)) && !defined(_LOCORE)
288 typedef uint8_t ipl_t;
289 typedef struct {
290 	ipl_t _ipl;
291 } ipl_cookie_t;
292 #endif /* _KERNEL|_KMEMUSER&!_LOCORE */
293 
294 #if defined(_KERNEL) && !defined(_LOCORE)
295 
296 #if defined(_KERNEL_OPT)
297 #include "opt_sparc_arch.h"
298 #endif
299 
300 /*
301  * Put "memory" to asm inline on sun4v to avoid issuing rdpr %ver
302  * before checking cputyp as a result of code moving by compiler
303  * optimization.
304  */
305 #ifdef SUN4V
306 #define constasm_clobbers "memory"
307 #else
308 #define constasm_clobbers
309 #endif
310 
311 /*
312  * Inlines for manipulating privileged and ancillary state registers
313  */
314 #define SPARC64_RDCONST_DEF(rd, name, reg, type)			\
315 static __inline __constfunc type get##name(void)			\
316 {									\
317 	type _val;							\
318 	__asm(#rd " %" #reg ",%0" : "=r" (_val) : : constasm_clobbers);	\
319 	return _val;							\
320 }
321 #define SPARC64_RD_DEF(rd, name, reg, type)				\
322 static __inline type get##name(void)					\
323 {									\
324 	type _val;							\
325 	__asm volatile(#rd " %" #reg ",%0" : "=r" (_val));		\
326 	return _val;							\
327 }
328 #define SPARC64_WR_DEF(wr, name, reg, type)				\
329 static __inline void set##name(type _val)				\
330 {									\
331 	__asm volatile(#wr " %0,0,%" #reg : : "r" (_val) : "memory");	\
332 }
333 
334 #ifdef __arch64__
335 #define SPARC64_RDCONST64_DEF(rd, name, reg) \
336 	SPARC64_RDCONST_DEF(rd, name, reg, uint64_t)
337 #define SPARC64_RD64_DEF(rd, name, reg) SPARC64_RD_DEF(rd, name, reg, uint64_t)
338 #define SPARC64_WR64_DEF(wr, name, reg) SPARC64_WR_DEF(wr, name, reg, uint64_t)
339 #else
340 #define SPARC64_RDCONST64_DEF(rd, name, reg)				\
341 static __inline __constfunc uint64_t get##name(void)			\
342 {									\
343 	uint32_t _hi, _lo;						\
344 	__asm(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0"		\
345 		: "=r" (_hi), "=r" (_lo) : : constasm_clobbers);	\
346 	return ((uint64_t)_hi << 32) | _lo;				\
347 }
348 #define SPARC64_RD64_DEF(rd, name, reg)					\
349 static __inline uint64_t get##name(void)				\
350 {									\
351 	uint32_t _hi, _lo;						\
352 	__asm volatile(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0"	\
353 		: "=r" (_hi), "=r" (_lo));				\
354 	return ((uint64_t)_hi << 32) | _lo;				\
355 }
356 #define SPARC64_WR64_DEF(wr, name, reg)					\
357 static __inline void set##name(uint64_t _val)				\
358 {									\
359 	uint32_t _hi = _val >> 32, _lo = _val;				\
360 	__asm volatile("sllx %1,32,%0; or %0,%2,%0; " #wr " %0,0,%" #reg\
361 		       : "=&r" (_hi) /* scratch register */		\
362 		       : "r" (_hi), "r" (_lo) : "memory");		\
363 }
364 #endif
365 
366 #define SPARC64_RDPR_DEF(name, reg, type) SPARC64_RD_DEF(rdpr, name, reg, type)
367 #define SPARC64_WRPR_DEF(name, reg, type) SPARC64_WR_DEF(wrpr, name, reg, type)
368 #define SPARC64_RDPR64_DEF(name, reg)	SPARC64_RD64_DEF(rdpr, name, reg)
369 #define SPARC64_WRPR64_DEF(name, reg)	SPARC64_WR64_DEF(wrpr, name, reg)
370 #define SPARC64_RDASR64_DEF(name, reg)	SPARC64_RD64_DEF(rd, name, reg)
371 #define SPARC64_WRASR64_DEF(name, reg)	SPARC64_WR64_DEF(wr, name, reg)
372 
373 /* Tick Register (PR 4) */
374 SPARC64_RDPR64_DEF(tick, %tick)			/* gettick() */
375 SPARC64_WRPR64_DEF(tick, %tick)			/* settick() */
376 
377 /* Processor State Register (PR 6) */
378 SPARC64_RDPR_DEF(pstate, %pstate, int)		/* getpstate() */
379 SPARC64_WRPR_DEF(pstate, %pstate, int)		/* setpstate() */
380 
381 /* Trap Level Register (PR 7) */
382 SPARC64_RDPR_DEF(tl, %tl, int)			/* gettl() */
383 
384 /* Current Window Pointer Register (PR 9) */
385 SPARC64_RDPR_DEF(cwp, %cwp, int)		/* getcwp() */
386 SPARC64_WRPR_DEF(cwp, %cwp, int)		/* setcwp() */
387 
388 /* Version Register (PR 31) */
389 SPARC64_RDCONST64_DEF(rdpr, ver, %ver)		/* getver() */
390 
391 /* System Tick Register (ASR 24) */
SPARC64_RDASR64_DEF(stick,STICK)392 SPARC64_RDASR64_DEF(stick, STICK)		/* getstick() */
393 SPARC64_WRASR64_DEF(stick, STICK)		/* setstick() */
394 
395 /* System Tick Compare Register (ASR 25) */
396 SPARC64_RDASR64_DEF(stickcmpr, STICK_CMPR)	/* getstickcmpr() */
397 
398 /* Some simple macros to check the cpu type. */
399 #define GETVER_CPU_MASK()	((getver() & VER_MASK) >> VER_MASK_SHIFT)
400 #define GETVER_CPU_IMPL()	((getver() & VER_IMPL) >> VER_IMPL_SHIFT)
401 #define GETVER_CPU_MANUF()	((getver() & VER_MANUF) >> VER_MANUF_SHIFT)
402 #define CPU_IS_SPITFIRE()	(GETVER_CPU_IMPL() == IMPL_SPITFIRE)
403 #define CPU_IS_HUMMINGBIRD()	(GETVER_CPU_IMPL() == IMPL_HUMMINGBIRD)
404 #define CPU_IS_USIIIi()		((GETVER_CPU_IMPL() == IMPL_JALAPENO) || \
405 				 (GETVER_CPU_IMPL() == IMPL_SERRANO))
406 #define CPU_IS_USIII_UP()	(GETVER_CPU_IMPL() >= IMPL_CHEETAH)
407 #define CPU_IS_SPARC64_V_UP()	(GETVER_CPU_MANUF() == MANUF_FUJITSU && \
408 				 GETVER_CPU_IMPL() >= IMPL_ZEUS)
409 
410 static __inline int
411 intr_disable(void)
412 {
413 	int pstate = getpstate();
414 
415 	setpstate(pstate & ~PSTATE_IE);
416 	return pstate;
417 }
418 
419 static __inline void
intr_restore(int pstate)420 intr_restore(int pstate)
421 {
422 	setpstate(pstate);
423 }
424 
425 /*
426  * GCC pseudo-functions for manipulating PIL
427  */
428 
429 #ifdef SPLDEBUG
430 void prom_printf(const char *fmt, ...);
431 extern int printspl;
432 #define SPLPRINT(x) \
433 { \
434 	if (printspl) { \
435 		int i = 10000000; \
436 		prom_printf x ; \
437 		while (i--) \
438 			; \
439 	} \
440 }
441 #define	SPL(name, newpil) \
442 static __inline int name##X(const char* file, int line) \
443 { \
444 	int oldpil; \
445 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
446 	SPLPRINT(("{%s:%d %d=>%d}", file, line, oldpil, newpil)); \
447 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
448 	return (oldpil); \
449 }
450 /* A non-priority-decreasing version of SPL */
451 #define	SPLHOLD(name, newpil) \
452 static __inline int name##X(const char* file, int line) \
453 { \
454 	int oldpil; \
455 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
456 	if (newpil <= oldpil) \
457 		return oldpil; \
458 	SPLPRINT(("{%s:%d %d->!d}", file, line, oldpil, newpil)); \
459 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
460 	return (oldpil); \
461 }
462 
463 #else
464 #define SPLPRINT(x)
465 #define	SPL(name, newpil) \
466 static __inline __always_inline int name(void) \
467 { \
468 	int oldpil; \
469 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
470 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
471 	return (oldpil); \
472 }
473 /* A non-priority-decreasing version of SPL */
474 #define	SPLHOLD(name, newpil) \
475 static __inline __always_inline int name(void) \
476 { \
477 	int oldpil; \
478 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
479 	if (newpil <= oldpil) \
480 		return oldpil; \
481 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
482 	return (oldpil); \
483 }
484 #endif
485 
486 static __inline ipl_cookie_t
makeiplcookie(ipl_t ipl)487 makeiplcookie(ipl_t ipl)
488 {
489 
490 	return (ipl_cookie_t){._ipl = ipl};
491 }
492 
493 static __inline int __attribute__((__unused__))
splraiseipl(ipl_cookie_t icookie)494 splraiseipl(ipl_cookie_t icookie)
495 {
496 	int newpil = icookie._ipl;
497 	int oldpil;
498 
499 	/*
500 	 * NetBSD/sparc64's IPL_* constants equate directly to the
501 	 * corresponding PIL_* names; no need to map them here.
502 	 */
503 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil));
504 	if (newpil <= oldpil)
505 		return (oldpil);
506 	__asm volatile("wrpr %0,0,%%pil" : : "r" (newpil) : "memory");
507 	return (oldpil);
508 }
509 
510 SPL(spl0, 0)
511 
512 SPLHOLD(splsoftint, 1)
513 #define	splsoftclock	splsoftint
514 #define	splsoftnet	splsoftint
515 
516 SPLHOLD(splsoftserial, 4)
517 
518 /*
519  * Memory allocation (must be as high as highest network, tty, or disk device)
520  */
SPLHOLD(splvm,PIL_VM)521 SPLHOLD(splvm, PIL_VM)
522 
523 SPLHOLD(splsched, PIL_SCHED)
524 
525 SPLHOLD(splhigh, PIL_HIGH)
526 
527 /* splx does not have a return value */
528 #ifdef SPLDEBUG
529 #define	spl0()	spl0X(__FILE__, __LINE__)
530 #define	splsoftint()	splsoftintX(__FILE__, __LINE__)
531 #define	splsoftserial()	splsoftserialX(__FILE__, __LINE__)
532 #define	splausoft()	splausoftX(__FILE__, __LINE__)
533 #define	splfdsoft()	splfdsoftX(__FILE__, __LINE__)
534 #define	splvm()		splvmX(__FILE__, __LINE__)
535 #define	splclock()	splclockX(__FILE__, __LINE__)
536 #define	splfd()		splfdX(__FILE__, __LINE__)
537 #define	splzs()		splzsX(__FILE__, __LINE__)
538 #define	splserial()	splzerialX(__FILE__, __LINE__)
539 #define	splaudio()	splaudioX(__FILE__, __LINE__)
540 #define	splstatclock()	splstatclockX(__FILE__, __LINE__)
541 #define	splsched()	splschedX(__FILE__, __LINE__)
542 #define	spllock()	spllockX(__FILE__, __LINE__)
543 #define	splhigh()	splhighX(__FILE__, __LINE__)
544 #define splx(x)		splxX((x),__FILE__, __LINE__)
545 
546 static __inline void splxX(int newpil, const char *file, int line)
547 #else
548 static __inline __always_inline void splx(int newpil)
549 #endif
550 {
551 #ifdef SPLDEBUG
552 	int pil;
553 
554 	__asm volatile("rdpr %%pil,%0" : "=r" (pil));
555 	SPLPRINT(("{%d->%d}", pil, newpil));
556 #endif
557 	__asm volatile("wrpr %%g0,%0,%%pil" : : "rn" (newpil) : "memory");
558 }
559 #endif /* KERNEL && !_LOCORE */
560 
561 #endif /* PSR_IMPL */
562