xref: /openbsd-src/sys/arch/sparc64/include/psl.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: psl.h,v 1.31 2016/06/13 01:08:13 dlg Exp $	*/
2 /*	$NetBSD: psl.h,v 1.20 2001/04/13 23:30:05 thorpej Exp $ */
3 
4 /*
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This software was developed by the Computer Systems Engineering group
9  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10  * contributed to Berkeley.
11  *
12  * All advertising materials mentioning features or use of this software
13  * must display the following acknowledgement:
14  *	This product includes software developed by the University of
15  *	California, Lawrence Berkeley Laboratory.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
42  */
43 
44 #ifndef _SPARC64_PSL_
45 #define _SPARC64_PSL_
46 
47 /* Interesting spl()s */
48 #define PIL_SCSI	3
49 #define PIL_BIO		5
50 #define PIL_VIDEO	5
51 #define PIL_TTY		6
52 #define PIL_NET		6
53 #define PIL_VM		7
54 #define	PIL_AUD		8
55 #define PIL_CLOCK	10
56 #define PIL_FD		11
57 #define PIL_SER		12
58 #define PIL_STATCLOCK	14
59 #define PIL_HIGH	15
60 #define PIL_SCHED	PIL_STATCLOCK
61 #define PIL_LOCK	PIL_HIGH
62 
63 /*
64  * SPARC V9 CCR register
65  */
66 
67 #define ICC_C	0x01L
68 #define ICC_V	0x02L
69 #define ICC_Z	0x04L
70 #define ICC_N	0x08L
71 #define XCC_SHIFT	4
72 #define XCC_C	(ICC_C<<XCC_SHIFT)
73 #define XCC_V	(ICC_V<<XCC_SHIFT)
74 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
75 #define XCC_N	(ICC_N<<XCC_SHIFT)
76 
77 
78 /*
79  * SPARC V9 PSTATE register (what replaces the PSR in V9)
80  *
81  * Here's the layout:
82  *
83  *    11   10    9     8   7  6   5     4     3     2     1   0
84  *  +------------------------------------------------------------+
85  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
86  *  +------------------------------------------------------------+
87  */
88 
89 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
90 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
91 #define PSTATE_CLE	0x200	/* current little endian */
92 #define PSTATE_TLE	0x100	/* traps little endian */
93 #define PSTATE_MM	0x0c0	/* memory model */
94 #define PSTATE_MM_TSO	0x000	/* total store order */
95 #define PSTATE_MM_PSO	0x040	/* partial store order */
96 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
97 #define PSTATE_RED	0x020	/* RED state */
98 #define PSTATE_PEF	0x010	/* enable floating point */
99 #define PSTATE_AM	0x008	/* 32-bit address masking */
100 #define PSTATE_PRIV	0x004	/* privileged mode */
101 #define PSTATE_IE	0x002	/* interrupt enable */
102 #define PSTATE_AG	0x001	/* enable alternate globals */
103 
104 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
105 
106 
107 /*
108  * 32-bit code requires TSO or at best PSO since that's what's supported on
109  * SPARC V8 and earlier machines.
110  *
111  * 64-bit code sets the memory model in the ELF header.
112  *
113  * We're running kernel code in TSO for the moment so we don't need to worry
114  * about possible memory barrier bugs.
115  */
116 
117 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
118 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
119 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
120 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
121 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
122 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
123 
124 
125 /*
126  * SPARC V9 TSTATE register
127  *
128  *   39 32 31 24 23 18  17   8	7 5 4   0
129  *  +-----+-----+-----+--------+---+-----+
130  *  | CCR | ASI |  -  | PSTATE | - | CWP |
131  *  +-----+-----+-----+--------+---+-----+
132  */
133 
134 #define TSTATE_CWP		0x01f
135 #define TSTATE_PSTATE		0x6ff00
136 #define TSTATE_PSTATE_SHIFT	8
137 #define TSTATE_ASI		0xff000000LL
138 #define TSTATE_ASI_SHIFT	24
139 #define TSTATE_CCR		0xff00000000LL
140 #define TSTATE_CCR_SHIFT	32
141 
142 /* Leftover SPARC V8 PSTATE stuff */
143 #define PSR_ICC 0x00f00000
144 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-19))
145 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-19))
146 
147 /*
148  * These are here to simplify life.
149  */
150 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
151 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
152 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
153 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
154 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
155 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
156 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
157 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
158 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
159 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
160 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
161 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
162 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
163 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
164 
165 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
166 
167 #define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
168 #define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
169 /*
170  * SPARC V9 VER version register.
171  *
172  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
173  * +-------+------+------+-----+-------+---+--------+
174  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
175  * +-------+------+------+-----+-------+---+--------+
176  *
177  */
178 
179 #define VER_MANUF	0xffff000000000000ULL
180 #define VER_MANUF_SHIFT	48
181 #define VER_IMPL	0x0000ffff00000000ULL
182 #define VER_IMPL_SHIFT	32
183 #define VER_MASK	0x00000000ff000000ULL
184 #define VER_MASK_SHIFT	24
185 #define VER_MAXTL	0x000000000000ff00ULL
186 #define VER_MAXTL_SHIFT	8
187 #define VER_MAXWIN	0x000000000000001fULL
188 
189 #define IMPL_SPARC64		0x01 /* SPARC64 */
190 #define IMPL_SPARC64_II		0x02 /* SPARC64-II */
191 #define IMPL_SPARC64_III	0x03 /* SPARC64-III */
192 #define IMPL_SPARC64_IV		0x04 /* SPARC64-IV */
193 #define IMPL_ZEUS		0x05 /* SPARC64-V */
194 #define IMPL_OLYMPUS_C		0x06 /* SPARC64-VI */
195 #define IMPL_JUPITER		0x07 /* SPARC64-VII */
196 #define IMPL_SPITFIRE		0x10 /* UltraSPARC */
197 #define IMPL_BLACKBIRD		0x11 /* UltraSPARC-II */
198 #define IMPL_SABRE		0x12 /* UltraSPARC-IIi */
199 #define IMPL_HUMMINGBIRD	0x13 /* UltraSPARC-IIe */
200 #define IMPL_CHEETAH		0x14 /* UltraSPARC-III */
201 #define IMPL_CHEETAH_PLUS	0x15 /* UltraSPARC-III+ */
202 #define IMPL_JALAPENO		0x16 /* UltraSPARC-IIIi */
203 #define IMPL_JAGUAR		0x18 /* UltraSPARC-IV */
204 #define IMPL_PANTHER		0x19 /* UltraSPARC-IV+ */
205 #define IMPL_SERRANO		0x22 /* UltraSPARC-IIIi+ */
206 
207 /*
208  * Here are a few things to help us transition between user and kernel mode:
209  */
210 
211 /* Memory models */
212 #define KERN_MM		PSTATE_MM_TSO
213 #define USER_MM		PSTATE_MM_RMO
214 
215 /*
216  * Register window handlers.  These point to generic routines that check the
217  * stack pointer and then vector to the real handler.  We could optimize this
218  * if we could guarantee only 32-bit or 64-bit stacks.
219  */
220 #define WSTATE_KERN	027
221 #define WSTATE_USER	022
222 
223 #define CWP		0x01f
224 
225 /* 64-byte alignment -- this seems the best place to put this. */
226 #define BLOCK_SIZE	64
227 #define BLOCK_ALIGN	0x3f
228 
229 #if defined(_KERNEL) && !defined(_LOCORE)
230 
231 extern u_int64_t ver;	/* Copy of v9 version register.  We need to read this only once, in locore.s. */
232 #ifndef SPLDEBUG
233 extern __inline void splx(int);
234 #endif
235 
236 #ifdef DIAGNOSTIC
237 /*
238  * Although this function is implemented in MI code, it must be in this MD
239  * header because we don't want this header to include MI includes.
240  */
241 void splassert_fail(int, int, const char *);
242 extern int splassert_ctl;
243 void splassert_check(int, const char *);
244 #define splassert(__wantipl) do {			\
245 	if (splassert_ctl > 0) {			\
246 		splassert_check(__wantipl, __func__);	\
247 	}						\
248 } while (0)
249 #define splsoftassert(wantipl) splassert(wantipl)
250 #else
251 #define splassert(wantipl)	do { /* nada */ } while (0)
252 #define splsoftassert(wantipl)	do { /* nada */ } while (0)
253 #endif
254 
255 /*
256  * GCC pseudo-functions for manipulating privileged registers
257  */
258 extern __inline u_int64_t getpstate(void);
259 extern __inline
260 u_int64_t getpstate(void)
261 {
262 	return (sparc_rdpr(pstate));
263 }
264 
265 extern __inline void setpstate(u_int64_t);
266 extern __inline void setpstate(u_int64_t newpstate)
267 {
268 	sparc_wrpr(pstate, newpstate, 0);
269 }
270 
271 extern __inline int getcwp(void);
272 extern __inline
273 int getcwp(void)
274 {
275 	return (sparc_rdpr(cwp));
276 }
277 
278 extern __inline void setcwp(u_int64_t);
279 extern __inline void
280 setcwp(u_int64_t newcwp)
281 {
282 	sparc_wrpr(cwp, newcwp, 0);
283 }
284 
285 extern __inline u_int64_t getver(void);
286 extern __inline
287 u_int64_t getver(void)
288 {
289 	return (sparc_rdpr(ver));
290 }
291 
292 extern __inline u_int64_t intr_disable(void);
293 extern __inline u_int64_t
294 intr_disable(void)
295 {
296 	u_int64_t s;
297 
298 	s = sparc_rdpr(pstate);
299 	sparc_wrpr(pstate, s & ~PSTATE_IE, 0);
300 	return (s);
301 }
302 
303 extern __inline void intr_restore(u_int64_t);
304 extern __inline void
305 intr_restore(u_int64_t s)
306 {
307 	sparc_wrpr(pstate, s, 0);
308 }
309 
310 extern __inline void stxa_sync(u_int64_t, u_int64_t, u_int64_t);
311 extern __inline void
312 stxa_sync(u_int64_t va, u_int64_t asi, u_int64_t val)
313 {
314 	u_int64_t s = intr_disable();
315 	stxa_nc(va, asi, val);
316 	membar(Sync);
317 	intr_restore(s);
318 }
319 
320 static inline int
321 _spl(int newipl)
322 {
323 	int oldpil;
324 
325 	__asm volatile(	"    rdpr %%pil, %0		\n"
326 			"    wrpr %%g0, %1, %%pil	\n"
327 	    : "=&r" (oldpil)
328 	    : "I" (newipl)
329 	    : "%g0");
330 	__asm volatile("" : : : "memory");
331 
332 	return (oldpil);
333 }
334 
335 /* A non-priority-decreasing version of SPL */
336 static inline int
337 _splraise(int newpil)
338 {
339 	int oldpil;
340 
341 	oldpil = sparc_rdpr(pil);
342 	if (newpil > oldpil)
343 		sparc_wrpr(pil, newpil, 0);
344         return (oldpil);
345 }
346 
347 static inline void
348 _splx(int newpil)
349 {
350 	sparc_wrpr(pil, newpil, 0);
351 }
352 
353 #endif /* KERNEL && !_LOCORE */
354 
355 #endif /* _SPARC64_PSL_ */
356