xref: /netbsd-src/sys/arch/sparc64/include/psl.h (revision 10ad5ffa714ce1a679dcc9dd8159648df2d67b5a)
1 /*	$NetBSD: psl.h,v 1.41 2009/05/16 19:15:34 nakayama Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
41  */
42 
43 #ifndef PSR_IMPL
44 
45 /*
46  * SPARC Process Status Register (in psl.h for hysterical raisins).  This
47  * doesn't exist on the V9.
48  *
49  * The picture in the Sun manuals looks like this:
50  *	                                     1 1
51  *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
52  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
53  *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
54  *	|       |       |n z v c|           |C|F|       | |S|T|         |
55  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
56  */
57 
58 #define PSR_IMPL	0xf0000000	/* implementation */
59 #define PSR_VER		0x0f000000	/* version */
60 #define PSR_ICC		0x00f00000	/* integer condition codes */
61 #define PSR_N		0x00800000	/* negative */
62 #define PSR_Z		0x00400000	/* zero */
63 #define PSR_O		0x00200000	/* overflow */
64 #define PSR_C		0x00100000	/* carry */
65 #define PSR_EC		0x00002000	/* coprocessor enable */
66 #define PSR_EF		0x00001000	/* FP enable */
67 #define PSR_PIL		0x00000f00	/* interrupt level */
68 #define PSR_S		0x00000080	/* supervisor (kernel) mode */
69 #define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
70 #define PSR_ET		0x00000020	/* trap enable */
71 #define PSR_CWP		0x0000001f	/* current window pointer */
72 
73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
74 
75 /* Interesting spl()s */
76 #define PIL_SCSI	3
77 #define PIL_FDSOFT	4
78 #define PIL_AUSOFT	4
79 #define PIL_BIO		5
80 #define PIL_VIDEO	5
81 #define PIL_TTY		6
82 #define PIL_LPT		6
83 #define PIL_NET		6
84 #define PIL_VM		7
85 #define	PIL_AUD		8
86 #define PIL_CLOCK	10
87 #define PIL_FD		11
88 #define PIL_SER		12
89 #define	PIL_STATCLOCK	14
90 #define PIL_HIGH	15
91 #define PIL_SCHED	PIL_CLOCK
92 #define PIL_LOCK	PIL_HIGH
93 
94 /*
95  * SPARC V9 CCR register
96  */
97 
98 #define ICC_C	0x01L
99 #define ICC_V	0x02L
100 #define ICC_Z	0x04L
101 #define ICC_N	0x08L
102 #define XCC_SHIFT	4
103 #define XCC_C	(ICC_C<<XCC_SHIFT)
104 #define XCC_V	(ICC_V<<XCC_SHIFT)
105 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
106 #define XCC_N	(ICC_N<<XCC_SHIFT)
107 
108 
109 /*
110  * SPARC V9 PSTATE register (what replaces the PSR in V9)
111  *
112  * Here's the layout:
113  *
114  *    11   10    9     8   7  6   5     4     3     2     1   0
115  *  +------------------------------------------------------------+
116  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
117  *  +------------------------------------------------------------+
118  */
119 
120 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
121 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
122 #define PSTATE_CLE	0x200	/* current little endian */
123 #define PSTATE_TLE	0x100	/* traps little endian */
124 #define PSTATE_MM	0x0c0	/* memory model */
125 #define PSTATE_MM_TSO	0x000	/* total store order */
126 #define PSTATE_MM_PSO	0x040	/* partial store order */
127 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
128 #define PSTATE_RED	0x020	/* RED state */
129 #define PSTATE_PEF	0x010	/* enable floating point */
130 #define PSTATE_AM	0x008	/* 32-bit address masking */
131 #define PSTATE_PRIV	0x004	/* privileged mode */
132 #define PSTATE_IE	0x002	/* interrupt enable */
133 #define PSTATE_AG	0x001	/* enable alternate globals */
134 
135 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
136 
137 
138 /*
139  * 32-bit code requires TSO or at best PSO since that's what's supported on
140  * SPARC V8 and earlier machines.
141  *
142  * 64-bit code sets the memory model in the ELF header.
143  *
144  * We're running kernel code in TSO for the moment so we don't need to worry
145  * about possible memory barrier bugs.
146  */
147 
148 #ifdef __arch64__
149 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
150 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
151 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
152 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
153 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
154 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
155 #else
156 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
157 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
158 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
159 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
160 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
161 #define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
162 #endif
163 
164 
165 /*
166  * SPARC V9 TSTATE register
167  *
168  *   39 32 31 24 23 18  17   8	7 5 4   0
169  *  +-----+-----+-----+--------+---+-----+
170  *  | CCR | ASI |  -  | PSTATE | - | CWP |
171  *  +-----+-----+-----+--------+---+-----+
172  */
173 
174 #define TSTATE_CWP		0x01f
175 #define TSTATE_PSTATE		0x6ff00
176 #define TSTATE_PSTATE_SHIFT	8
177 #define TSTATE_ASI		0xff000000LL
178 #define TSTATE_ASI_SHIFT	24
179 #define TSTATE_CCR		0xff00000000LL
180 #define TSTATE_CCR_SHIFT	32
181 
182 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20))
183 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20))
184 
185 /*
186  * These are here to simplify life.
187  */
188 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
189 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
190 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
191 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
192 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
193 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
194 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
195 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
196 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
197 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
198 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
199 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
200 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
201 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
202 
203 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
204 
205 #define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
206 #define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
207 /*
208  * SPARC V9 VER version register.
209  *
210  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
211  * +-------+------+------+-----+-------+---+--------+
212  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
213  * +-------+------+------+-----+-------+---+--------+
214  *
215  */
216 
217 #define VER_MANUF	0xffff000000000000LL
218 #define VER_MANUF_SHIFT	48
219 #define VER_IMPL	0x0000ffff00000000LL
220 #define VER_IMPL_SHIFT	32
221 #define VER_MASK	0x00000000ff000000LL
222 #define VER_MASK_SHIFT	24
223 #define VER_MAXTL	0x000000000000ff00LL
224 #define VER_MAXTL_SHIFT	8
225 #define VER_MAXWIN	0x000000000000001fLL
226 
227 /*
228  * Here are a few things to help us transition between user and kernel mode:
229  */
230 
231 /* Memory models */
232 #define KERN_MM		PSTATE_MM_TSO
233 #define USER_MM		PSTATE_MM_RMO
234 
235 /*
236  * Register window handlers.  These point to generic routines that check the
237  * stack pointer and then vector to the real handler.  We could optimize this
238  * if we could guarantee only 32-bit or 64-bit stacks.
239  */
240 #define WSTATE_KERN	026
241 #define WSTATE_USER	022
242 
243 #define CWP		0x01f
244 
245 /* 64-byte alignment -- this seems the best place to put this. */
246 #define BLOCK_SIZE	64
247 #define BLOCK_ALIGN	0x3f
248 
249 #if defined(_KERNEL) && !defined(_LOCORE)
250 
251 /*
252  * Inlines for manipulating privileged registers
253  */
254 static __inline uint64_t
255 gettick(void)
256 {
257 #ifdef __arch64__
258 	uint64_t tick;
259 
260 	__asm volatile("rdpr %%tick, %0" : "=r" (tick));
261 	return tick;
262 #else
263 	uint32_t tick_hi, tick_lo;
264 
265 	__asm volatile("rdpr %%tick, %0; srl %0,0,%1; srlx %0,32,%0"
266 		: "=r" (tick_hi), "=r" (tick_lo));
267 	return ((uint64_t)tick_hi << 32) | tick_lo;
268 #endif
269 }
270 
271 static __inline void
272 settick(uint64_t newtick)
273 {
274 #ifdef __arch64__
275 	__asm volatile("wrpr %0, 0, %%tick" : : "r" (newtick) : "memory");
276 #else
277 	uint32_t tick_hi = newtick >> 32, tick_lo = newtick;
278 
279 	__asm volatile("sllx %1,32,%0; or %0,%2,%0; wrpr %0, 0, %%tick"
280 		       : "=&r" (tick_hi) /* scratch register */
281 		       : "r" (tick_hi), "r" (tick_lo) : "memory");
282 #endif
283 }
284 
285 static __inline int
286 getpstate(void)
287 {
288 	int pstate;
289 
290 	__asm volatile("rdpr %%pstate,%0" : "=r" (pstate));
291 	return pstate;
292 }
293 
294 static __inline void
295 setpstate(int newpstate)
296 {
297 	__asm volatile("wrpr %0,0,%%pstate" : : "r" (newpstate) : "memory");
298 }
299 
300 static __inline int
301 gettl(void)
302 {
303 	int tl;
304 
305 	__asm volatile("rdpr %%tl, %0" : "=r" (tl));
306 	return tl;
307 }
308 
309 static __inline int
310 getcwp(void)
311 {
312 	int cwp;
313 
314 	__asm volatile("rdpr %%cwp,%0" : "=r" (cwp));
315 	return cwp;
316 }
317 
318 static __inline void
319 setcwp(int newcwp)
320 {
321 	__asm volatile("wrpr %0,0,%%cwp" : : "r" (newcwp) : "memory");
322 }
323 
324 static __inline uint64_t
325 getver(void)
326 {
327 #ifdef __arch64__
328 	uint64_t ver;
329 
330 	__asm volatile("rdpr %%ver,%0" : "=r" (ver));
331 	return ver;
332 #else
333 	uint32_t ver_hi, ver_lo;
334 
335 	__asm volatile("rdpr %%ver,%0; srl %0,0,%1; srlx %0,32,%0"
336 		       : "=r" (ver_hi), "=r" (ver_lo));
337 	return (uint64_t)ver_hi << 32 | ver_lo;
338 #endif
339 }
340 
341 static __inline int
342 intr_disable(void)
343 {
344 	int pstate = getpstate();
345 
346 	setpstate(pstate & ~PSTATE_IE);
347 	return pstate;
348 }
349 
350 static __inline void
351 intr_restore(int pstate)
352 {
353 	setpstate(pstate);
354 }
355 
356 /*
357  * GCC pseudo-functions for manipulating PIL
358  */
359 
360 #ifdef SPLDEBUG
361 void prom_printf(const char *fmt, ...);
362 extern int printspl;
363 #define SPLPRINT(x) \
364 { \
365 	if (printspl) { \
366 		int i = 10000000; \
367 		prom_printf x ; \
368 		while (i--) \
369 			; \
370 	} \
371 }
372 #define	SPL(name, newpil) \
373 static __inline int name##X(const char* file, int line) \
374 { \
375 	int oldpil; \
376 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
377 	SPLPRINT(("{%s:%d %d=>%d}", file, line, oldpil, newpil)); \
378 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
379 	return (oldpil); \
380 }
381 /* A non-priority-decreasing version of SPL */
382 #define	SPLHOLD(name, newpil) \
383 static __inline int name##X(const char* file, int line) \
384 { \
385 	int oldpil; \
386 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
387 	if (newpil <= oldpil) \
388 		return oldpil; \
389 	SPLPRINT(("{%s:%d %d->!d}", file, line, oldpil, newpil)); \
390 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
391 	return (oldpil); \
392 }
393 
394 #else
395 #define SPLPRINT(x)
396 #define	SPL(name, newpil) \
397 static __inline int name(void) \
398 { \
399 	int oldpil; \
400 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
401 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
402 	return (oldpil); \
403 }
404 /* A non-priority-decreasing version of SPL */
405 #define	SPLHOLD(name, newpil) \
406 static __inline int name(void) \
407 { \
408 	int oldpil; \
409 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
410 	if (newpil <= oldpil) \
411 		return oldpil; \
412 	__asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
413 	return (oldpil); \
414 }
415 #endif
416 
417 typedef uint8_t ipl_t;
418 typedef struct {
419 	ipl_t _ipl;
420 } ipl_cookie_t;
421 
422 static inline ipl_cookie_t
423 makeiplcookie(ipl_t ipl)
424 {
425 
426 	return (ipl_cookie_t){._ipl = ipl};
427 }
428 
429 static __inline int __attribute__((__unused__))
430 splraiseipl(ipl_cookie_t icookie)
431 {
432 	int newpil = icookie._ipl;
433 	int oldpil;
434 
435 	/*
436 	 * NetBSD/sparc64's IPL_* constants equate directly to the
437 	 * corresponding PIL_* names; no need to map them here.
438 	 */
439 	__asm volatile("rdpr %%pil,%0" : "=r" (oldpil));
440 	if (newpil <= oldpil)
441 		return (oldpil);
442 	__asm volatile("wrpr %0,0,%%pil" : : "r" (newpil) : "memory");
443 	return (oldpil);
444 }
445 
446 SPL(spl0, 0)
447 
448 SPLHOLD(splsoftint, 1)
449 #define	splsoftclock	splsoftint
450 #define	splsoftnet	splsoftint
451 
452 SPLHOLD(splsoftserial, 4)
453 
454 /* audio software interrupts are at software level 4 */
455 SPLHOLD(splausoft, PIL_AUSOFT)
456 
457 /* floppy software interrupts are at software level 4 too */
458 SPLHOLD(splfdsoft, PIL_FDSOFT)
459 
460 /*
461  * Memory allocation (must be as high as highest network, tty, or disk device)
462  */
463 SPLHOLD(splvm, PIL_VM)
464 
465 /* fd hardware interrupts are at level 11 */
466 SPLHOLD(splfd, PIL_FD)
467 
468 /* zs hardware interrupts are at level 12 */
469 SPLHOLD(splzs, PIL_SER)
470 SPLHOLD(splserial, PIL_SER)
471 
472 /* audio hardware interrupts are at level 13 */
473 SPLHOLD(splaudio, PIL_AUD)
474 
475 /* second sparc timer interrupts at level 14 */
476 SPLHOLD(splstatclock, PIL_STATCLOCK)
477 
478 SPLHOLD(splsched, PIL_SCHED)
479 SPLHOLD(spllock, PIL_LOCK)
480 
481 SPLHOLD(splipi, PIL_HIGH)
482 
483 SPLHOLD(splhigh, PIL_HIGH)
484 
485 /* splx does not have a return value */
486 #ifdef SPLDEBUG
487 #define	spl0()	spl0X(__FILE__, __LINE__)
488 #define	splsoftint()	splsoftintX(__FILE__, __LINE__)
489 #define	splsoftserial()	splsoftserialX(__FILE__, __LINE__)
490 #define	splausoft()	splausoftX(__FILE__, __LINE__)
491 #define	splfdsoft()	splfdsoftX(__FILE__, __LINE__)
492 #define	splvm()		splvmX(__FILE__, __LINE__)
493 #define	splclock()	splclockX(__FILE__, __LINE__)
494 #define	splfd()		splfdX(__FILE__, __LINE__)
495 #define	splzs()		splzsX(__FILE__, __LINE__)
496 #define	splserial()	splzerialX(__FILE__, __LINE__)
497 #define	splaudio()	splaudioX(__FILE__, __LINE__)
498 #define	splstatclock()	splstatclockX(__FILE__, __LINE__)
499 #define	splsched()	splschedX(__FILE__, __LINE__)
500 #define	spllock()	spllockX(__FILE__, __LINE__)
501 #define	splhigh()	splhighX(__FILE__, __LINE__)
502 #define splx(x)		splxX((x),__FILE__, __LINE__)
503 #define splipi()	splhighX(__FILE__, __LINE__)
504 
505 static __inline void splxX(int newpil, const char *file, int line)
506 #else
507 static __inline void splx(int newpil)
508 #endif
509 {
510 #ifdef SPLDEBUG
511 	int pil;
512 
513 	__asm volatile("rdpr %%pil,%0" : "=r" (pil));
514 	SPLPRINT(("{%d->%d}", pil, newpil));
515 #endif
516 	__asm volatile("wrpr %%g0,%0,%%pil" : : "rn" (newpil) : "memory");
517 }
518 #endif /* KERNEL && !_LOCORE */
519 
520 #endif /* PSR_IMPL */
521