xref: /openbsd-src/sys/arch/powerpc/include/cpu.h (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: cpu.h,v 1.16 2003/07/08 21:46:19 drahn Exp $	*/
2 /*	$NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef	_POWERPC_CPU_H_
35 #define	_POWERPC_CPU_H_
36 
37 #include <machine/frame.h>
38 
39 #include <machine/psl.h>
40 
41 #define	CLKF_USERMODE(frame)	(((frame)->srr1 & PSL_PR) != 0)
42 #define	CLKF_PC(frame)		((frame)->srr0)
43 #define	CLKF_INTR(frame)	((frame)->depth != 0)
44 
45 #define	cpu_swapout(p)
46 #define cpu_wait(p)
47 
48 void	delay(unsigned);
49 #define	DELAY(n)		delay(n)
50 
51 extern volatile int want_resched;
52 extern volatile int astpending;
53 
54 #define	need_resched()		(want_resched = 1, astpending = 1)
55 #define	need_proftick(p)	((p)->p_flag |= P_OWEUPC, astpending = 1)
56 #define	signotify(p)		(astpending = 1)
57 
58 extern char *bootpath;
59 
60 #ifndef	CACHELINESIZE
61 #define	CACHELINESIZE	32			/* For now		XXX */
62 #endif
63 
64 static __inline void
65 syncicache(void *from, int len)
66 {
67 	int l;
68 	char *p = from;
69 
70 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
71 	l = len;
72 
73 	do {
74 		__asm __volatile ("dcbst 0,%0" :: "r"(p));
75 		p += CACHELINESIZE;
76 	} while ((l -= CACHELINESIZE) > 0);
77 	__asm __volatile ("sync");
78 	p = from;
79 	l = len;
80 	do {
81 		__asm __volatile ("icbi 0,%0" :: "r"(p));
82 		p += CACHELINESIZE;
83 	} while ((l -= CACHELINESIZE) > 0);
84 	__asm __volatile ("isync");
85 }
86 
87 static __inline void
88 invdcache(void *from, int len)
89 {
90 	int l;
91 	char *p = from;
92 
93 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
94 	l = len;
95 
96 	do {
97 		__asm __volatile ("dcbi 0,%0" :: "r"(p));
98 		p += CACHELINESIZE;
99 	} while ((l -= CACHELINESIZE) > 0);
100 	__asm __volatile ("sync");
101 }
102 
103 #define FUNC_SPR(n, name) \
104 static __inline u_int32_t ppc_mf ## name (void)			\
105 {								\
106 	int ret;						\
107         __asm __volatile ("mfspr %0," # n : "=r" (ret));	\
108 	return ret;						\
109 }								\
110 static __inline void ppc_mt ## name (u_int32_t val)		\
111 {								\
112         __asm __volatile ("mtspr "# n ",%0" :: "r" (val));	\
113 }								\
114 
115 FUNC_SPR(0, mq)
116 FUNC_SPR(1, xer)
117 FUNC_SPR(4, rtcu)
118 FUNC_SPR(5, rtcl)
119 FUNC_SPR(8, lr)
120 FUNC_SPR(9, ctr)
121 FUNC_SPR(18, dsisr)
122 FUNC_SPR(19, dar)
123 FUNC_SPR(22, dec)
124 FUNC_SPR(25, sdr1)
125 FUNC_SPR(26, srr0)
126 FUNC_SPR(27, srr1)
127 FUNC_SPR(256, vrsave)
128 FUNC_SPR(272, sprg0)
129 FUNC_SPR(273, sprg1)
130 FUNC_SPR(274, sprg2)
131 FUNC_SPR(275, sprg3)
132 FUNC_SPR(282, ear)
133 FUNC_SPR(287, pvr)
134 FUNC_SPR(528, ibat0u)
135 FUNC_SPR(529, ibat0l)
136 FUNC_SPR(530, ibat1u)
137 FUNC_SPR(531, ibat1l)
138 FUNC_SPR(532, ibat2u)
139 FUNC_SPR(533, ibat2l)
140 FUNC_SPR(534, ibat3u)
141 FUNC_SPR(535, ibat3l)
142 FUNC_SPR(536, dbat0u)
143 FUNC_SPR(537, dbat0l)
144 FUNC_SPR(538, dbat1u)
145 FUNC_SPR(539, dbat1l)
146 FUNC_SPR(540, dbat2u)
147 FUNC_SPR(541, dbat2l)
148 FUNC_SPR(542, dbat3u)
149 FUNC_SPR(543, dbat3l)
150 FUNC_SPR(1008, hid0)
151 FUNC_SPR(1009, hid1)
152 FUNC_SPR(1010, iabr)
153 FUNC_SPR(1017, l2cr)
154 FUNC_SPR(1018, l3cr)
155 FUNC_SPR(1013, dabr)
156 FUNC_SPR(1023, pir)
157 
158 static __inline u_int32_t
159 ppc_mftbl (void)
160 {
161 	int ret;
162         __asm __volatile ("mftb %0" : "=r" (ret));
163 	return ret;
164 }
165 
166 static __inline u_int64_t
167 ppc_mftb(void)
168 {
169 	u_long scratch;
170 	u_int64_t tb;
171 
172 	__asm __volatile ("1: mftbu %0; mftb %0+1; mftbu %1; cmpw 0,%0,%1; bne 1b"
173 	     : "=r"(tb), "=r"(scratch));
174 	return tb;
175 }
176 
177 static __inline u_int32_t
178 ppc_mfmsr (void)
179 {
180 	int ret;
181         __asm __volatile ("mfmsr %0" : "=r" (ret));
182 	return ret;
183 }
184 
185 static __inline void
186 ppc_mtmsr (u_int32_t val)
187 {
188         __asm __volatile ("mtmsr %0" :: "r" (val));
189 }
190 
191 static __inline void
192 ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
193 {
194 	__asm __volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted) );
195 
196 }
197 
198 /*
199  * General functions to enable and disable interrupts
200  * without having inlined assembly code in many functions.
201  */
202 static __inline void
203 ppc_intr_enable(int enable)
204 {
205 	u_int32_t msr;
206 	if (enable != 0)  {
207 		msr = ppc_mfmsr();
208 		msr |= PSL_EE;
209 		ppc_mtmsr(msr);
210 	}
211 }
212 
213 static __inline int
214 ppc_intr_disable(void)
215 {
216 	u_int32_t emsr, dmsr;
217 	emsr = ppc_mfmsr();
218 	dmsr = emsr & ~PSL_EE;
219 	ppc_mtmsr(dmsr);
220 	return (emsr & PSL_EE);
221 }
222 #endif	/* _POWERPC_CPU_H_ */
223