xref: /openbsd-src/sys/arch/powerpc/include/cpu.h (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: cpu.h,v 1.63 2016/05/07 22:46:54 kettenis Exp $	*/
2 /*	$NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef	_POWERPC_CPU_H_
35 #define	_POWERPC_CPU_H_
36 
37 #include <machine/frame.h>
38 
39 #include <sys/device.h>
40 #include <sys/sched.h>
41 
42 struct cpu_info {
43 	struct device *ci_dev;		/* our device */
44 	struct schedstate_percpu ci_schedstate; /* scheduler state */
45 
46 	struct proc *ci_curproc;
47 
48 	struct pcb *ci_curpcb;
49 	struct pmap *ci_curpm;
50 	struct proc *ci_fpuproc;
51 	struct proc *ci_vecproc;
52 	int ci_cpuid;
53 
54 	volatile int ci_want_resched;
55 	volatile int ci_cpl;
56 	volatile int ci_ipending;
57 
58 	volatile int	ci_flags;
59 #define	CI_FLAGS_SLEEPING		2
60 
61 #if defined(MULTIPROCESSOR)
62 	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
63 #endif
64 
65 	int ci_intrdepth;
66 	char *ci_intstk;
67 #define CPUSAVE_LEN	8
68 	register_t ci_tempsave[CPUSAVE_LEN];
69 	register_t ci_ddbsave[CPUSAVE_LEN];
70 #define DISISAVE_LEN	4
71 	register_t ci_disisave[DISISAVE_LEN];
72 
73 	volatile u_int64_t ci_nexttimerevent;
74 	volatile u_int64_t ci_prevtb;
75 	volatile u_int64_t ci_lasttb;
76 	volatile u_int64_t ci_nextstatevent;
77 	int ci_statspending;
78 
79 	volatile int    ci_ddb_paused;
80 #define	CI_DDB_RUNNING	0
81 #define	CI_DDB_SHOULDSTOP	1
82 #define	CI_DDB_STOPPED		2
83 #define	CI_DDB_ENTERDDB		3
84 #define	CI_DDB_INDDB		4
85 
86 	u_int32_t ci_randseed;
87 
88 #ifdef DIAGNOSTIC
89 	int	ci_mutex_level;
90 #endif
91 #ifdef GPROF
92 	struct gmonparam *ci_gmon;
93 #endif
94 };
95 
96 static __inline struct cpu_info *
97 curcpu(void)
98 {
99 	struct cpu_info *ci;
100 
101 	__asm volatile ("mfsprg %0,0" : "=r"(ci));
102 	return ci;
103 }
104 
105 #define	curpcb			(curcpu()->ci_curpcb)
106 #define	curpm			(curcpu()->ci_curpm)
107 
108 #define CPU_INFO_UNIT(ci)	((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
109 
110 #ifdef MULTIPROCESSOR
111 
112 #define PPC_MAXPROCS		4
113 
114 static __inline int
115 cpu_number(void)
116 {
117 	int pir;
118 
119 	pir = curcpu()->ci_cpuid;
120 	return pir;
121 }
122 
123 void	cpu_boot_secondary_processors(void);
124 
125 #define CPU_IS_PRIMARY(ci)	((ci)->ci_cpuid == 0)
126 #define CPU_INFO_ITERATOR		int
127 #define CPU_INFO_FOREACH(cii, ci)					\
128 	for (cii = 0, ci = &cpu_info[0]; cii < ncpusfound; cii++, ci++)
129 
130 void cpu_unidle(struct cpu_info *);
131 
132 #else
133 
134 #define PPC_MAXPROCS		1
135 
136 #define cpu_number()		0
137 
138 #define CPU_IS_PRIMARY(ci)	1
139 #define CPU_INFO_ITERATOR		int
140 #define CPU_INFO_FOREACH(cii, ci)					\
141 	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
142 
143 #define cpu_unidle(ci)
144 
145 #endif
146 
147 #define CPU_BUSY_CYCLE()	do {} while (0)
148 
149 #define MAXCPUS	PPC_MAXPROCS
150 
151 extern struct cpu_info cpu_info[PPC_MAXPROCS];
152 
153 #define	CLKF_USERMODE(frame)	(((frame)->srr1 & PSL_PR) != 0)
154 #define	CLKF_PC(frame)		((frame)->srr0)
155 #define	CLKF_INTR(frame)	((frame)->depth != 0)
156 
157 extern int ppc_cpuidle;
158 extern int ppc_proc_is_64b;
159 extern int ppc_nobat;
160 
161 void	cpu_bootstrap(void);
162 
163 /*
164  * This is used during profiling to integrate system time.
165  */
166 #define	PROC_PC(p)		(trapframe(p)->srr0)
167 #define	PROC_STACK(p)		(trapframe(p)->fixreg[1])
168 
169 void	delay(unsigned);
170 #define	DELAY(n)		delay(n)
171 
172 #define	aston(p)		((p)->p_md.md_astpending = 1)
173 
174 /*
175  * Preempt the current process if in interrupt from user mode,
176  * or after the current trap/syscall if in system mode.
177  */
178 #define	need_resched(ci) \
179 do {									\
180 	ci->ci_want_resched = 1;					\
181 	if (ci->ci_curproc != NULL)					\
182 		aston(ci->ci_curproc);					\
183 } while (0)
184 #define clear_resched(ci) (ci)->ci_want_resched = 0
185 
186 #define	need_proftick(p)	aston(p)
187 
188 void	signotify(struct proc *);
189 
190 extern char *bootpath;
191 
192 #ifndef	CACHELINESIZE
193 #define	CACHELINESIZE	32			/* For now		XXX */
194 #endif
195 
196 static __inline void
197 syncicache(void *from, int len)
198 {
199 	int l;
200 	char *p = from;
201 
202 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
203 	l = len;
204 
205 	do {
206 		__asm volatile ("dcbst 0,%0" :: "r"(p));
207 		p += CACHELINESIZE;
208 	} while ((l -= CACHELINESIZE) > 0);
209 	__asm volatile ("sync");
210 	p = from;
211 	l = len;
212 	do {
213 		__asm volatile ("icbi 0,%0" :: "r"(p));
214 		p += CACHELINESIZE;
215 	} while ((l -= CACHELINESIZE) > 0);
216 	__asm volatile ("isync");
217 }
218 
219 static __inline void
220 invdcache(void *from, int len)
221 {
222 	int l;
223 	char *p = from;
224 
225 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
226 	l = len;
227 
228 	do {
229 		__asm volatile ("dcbi 0,%0" :: "r"(p));
230 		p += CACHELINESIZE;
231 	} while ((l -= CACHELINESIZE) > 0);
232 	__asm volatile ("sync");
233 }
234 
235 static __inline void
236 flushdcache(void *from, int len)
237 {
238 	int l;
239 	char *p = from;
240 
241 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
242 	l = len;
243 
244 	do {
245 		__asm volatile ("dcbf 0,%0" :: "r"(p));
246 		p += CACHELINESIZE;
247 	} while ((l -= CACHELINESIZE) > 0);
248 	__asm volatile ("sync");
249 }
250 
251 #define FUNC_SPR(n, name) \
252 static __inline u_int32_t ppc_mf ## name (void)			\
253 {								\
254 	u_int32_t ret;						\
255 	__asm volatile ("mfspr %0," # n : "=r" (ret));		\
256 	return ret;						\
257 }								\
258 static __inline void ppc_mt ## name (u_int32_t val)		\
259 {								\
260 	__asm volatile ("mtspr "# n ",%0" :: "r" (val));	\
261 }								\
262 
263 FUNC_SPR(0, mq)
264 FUNC_SPR(1, xer)
265 FUNC_SPR(4, rtcu)
266 FUNC_SPR(5, rtcl)
267 FUNC_SPR(8, lr)
268 FUNC_SPR(9, ctr)
269 FUNC_SPR(18, dsisr)
270 FUNC_SPR(19, dar)
271 FUNC_SPR(22, dec)
272 FUNC_SPR(25, sdr1)
273 FUNC_SPR(26, srr0)
274 FUNC_SPR(27, srr1)
275 FUNC_SPR(256, vrsave)
276 FUNC_SPR(272, sprg0)
277 FUNC_SPR(273, sprg1)
278 FUNC_SPR(274, sprg2)
279 FUNC_SPR(275, sprg3)
280 FUNC_SPR(280, asr)
281 FUNC_SPR(282, ear)
282 FUNC_SPR(287, pvr)
283 FUNC_SPR(311, hior)
284 FUNC_SPR(528, ibat0u)
285 FUNC_SPR(529, ibat0l)
286 FUNC_SPR(530, ibat1u)
287 FUNC_SPR(531, ibat1l)
288 FUNC_SPR(532, ibat2u)
289 FUNC_SPR(533, ibat2l)
290 FUNC_SPR(534, ibat3u)
291 FUNC_SPR(535, ibat3l)
292 FUNC_SPR(560, ibat4u)
293 FUNC_SPR(561, ibat4l)
294 FUNC_SPR(562, ibat5u)
295 FUNC_SPR(563, ibat5l)
296 FUNC_SPR(564, ibat6u)
297 FUNC_SPR(565, ibat6l)
298 FUNC_SPR(566, ibat7u)
299 FUNC_SPR(567, ibat7l)
300 FUNC_SPR(536, dbat0u)
301 FUNC_SPR(537, dbat0l)
302 FUNC_SPR(538, dbat1u)
303 FUNC_SPR(539, dbat1l)
304 FUNC_SPR(540, dbat2u)
305 FUNC_SPR(541, dbat2l)
306 FUNC_SPR(542, dbat3u)
307 FUNC_SPR(543, dbat3l)
308 FUNC_SPR(568, dbat4u)
309 FUNC_SPR(569, dbat4l)
310 FUNC_SPR(570, dbat5u)
311 FUNC_SPR(571, dbat5l)
312 FUNC_SPR(572, dbat6u)
313 FUNC_SPR(573, dbat6l)
314 FUNC_SPR(574, dbat7u)
315 FUNC_SPR(575, dbat7l)
316 FUNC_SPR(1009, hid1)
317 FUNC_SPR(1010, iabr)
318 FUNC_SPR(1017, l2cr)
319 FUNC_SPR(1018, l3cr)
320 FUNC_SPR(1013, dabr)
321 FUNC_SPR(1023, pir)
322 
323 static __inline u_int32_t
324 ppc_mftbl (void)
325 {
326 	int ret;
327 	__asm volatile ("mftb %0" : "=r" (ret));
328 	return ret;
329 }
330 
331 
332 static __inline u_int64_t
333 ppc_mftb(void)
334 {
335 	u_long scratch;
336 	u_int64_t tb;
337 
338 	__asm volatile ("1: mftbu %0; mftb %0+1; mftbu %1;"
339 	    " cmpw 0,%0,%1; bne 1b" : "=r"(tb), "=r"(scratch));
340 	return tb;
341 }
342 
343 static __inline void
344 ppc_mttb(u_int64_t tb)
345 {
346 	__asm volatile ("mttbl %0" :: "r"(0));
347 	__asm volatile ("mttbu %0" :: "r"((u_int32_t)(tb >> 32)));
348 	__asm volatile ("mttbl %0" :: "r"((u_int32_t)(tb & 0xffffffff)));
349 }
350 
351 static __inline u_int32_t
352 ppc_mfmsr (void)
353 {
354 	int ret;
355         __asm volatile ("mfmsr %0" : "=r" (ret));
356 	return ret;
357 }
358 
359 static __inline void
360 ppc_mtmsr (u_int32_t val)
361 {
362         __asm volatile ("mtmsr %0" :: "r" (val));
363 }
364 
365 static __inline void
366 ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
367 {
368 	__asm volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted));
369 }
370 
371 u_int64_t ppc64_mfscomc(void);
372 void ppc_mtscomc(u_int32_t);
373 void ppc64_mtscomc(u_int64_t);
374 u_int64_t ppc64_mfscomd(void);
375 void ppc_mtscomd(u_int32_t);
376 u_int32_t ppc_mfhid0(void);
377 void ppc_mthid0(u_int32_t);
378 u_int64_t ppc64_mfhid1(void);
379 void ppc64_mthid1(u_int64_t);
380 u_int64_t ppc64_mfhid4(void);
381 void ppc64_mthid4(u_int64_t);
382 u_int64_t ppc64_mfhid5(void);
383 void ppc64_mthid5(u_int64_t);
384 
385 #include <machine/psl.h>
386 
387 /*
388  * General functions to enable and disable interrupts
389  * without having inlined assembly code in many functions.
390  */
391 static __inline void
392 ppc_intr_enable(int enable)
393 {
394 	u_int32_t msr;
395 	if (enable != 0) {
396 		msr = ppc_mfmsr();
397 		msr |= PSL_EE;
398 		ppc_mtmsr(msr);
399 	}
400 }
401 
402 static __inline int
403 ppc_intr_disable(void)
404 {
405 	u_int32_t emsr, dmsr;
406 	emsr = ppc_mfmsr();
407 	dmsr = emsr & ~PSL_EE;
408 	ppc_mtmsr(dmsr);
409 	return (emsr & PSL_EE);
410 }
411 
412 int ppc_cpuspeed(int *);
413 
414 /*
415  * PowerPC CPU types
416  */
417 #define	PPC_CPU_MPC601		1
418 #define	PPC_CPU_MPC603		3
419 #define	PPC_CPU_MPC604		4
420 #define	PPC_CPU_MPC603e		6
421 #define	PPC_CPU_MPC603ev	7
422 #define	PPC_CPU_MPC750		8
423 #define	PPC_CPU_MPC604ev	9
424 #define	PPC_CPU_MPC7400		12
425 #define	PPC_CPU_IBM970		0x0039
426 #define	PPC_CPU_IBM970FX	0x003c
427 #define	PPC_CPU_IBM970MP	0x0044
428 #define	PPC_CPU_IBM750FX	0x7000
429 #define	PPC_CPU_MPC7410		0x800c
430 #define	PPC_CPU_MPC7447A	0x8003
431 #define	PPC_CPU_MPC7448		0x8004
432 #define	PPC_CPU_MPC7450		0x8000
433 #define	PPC_CPU_MPC7455		0x8001
434 #define	PPC_CPU_MPC7457		0x8002
435 #define	PPC_CPU_MPC83xx		0x8083
436 
437 /*
438  * This needs to be included late since it relies on definitions higher
439  * up in this file.
440  */
441 #if defined(MULTIPROCESSOR) && defined(_KERNEL)
442 #include <sys/mplock.h>
443 #endif
444 
445 #endif	/* _POWERPC_CPU_H_ */
446