1 /* $OpenBSD: cpu.h,v 1.70 2021/07/06 09:34:06 kettenis Exp $ */ 2 /* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 #ifndef _POWERPC_CPU_H_ 35 #define _POWERPC_CPU_H_ 36 37 #include <machine/frame.h> 38 39 #include <sys/device.h> 40 #include <sys/sched.h> 41 #include <sys/srp.h> 42 43 struct cpu_info { 44 struct device *ci_dev; /* our device */ 45 struct schedstate_percpu ci_schedstate; /* scheduler state */ 46 47 struct proc *ci_curproc; 48 49 struct pcb *ci_curpcb; 50 struct pmap *ci_curpm; 51 struct proc *ci_fpuproc; 52 struct proc *ci_vecproc; 53 int ci_cpuid; 54 55 volatile int ci_want_resched; 56 volatile int ci_cpl; 57 volatile int ci_ipending; 58 59 volatile int ci_flags; 60 #define CI_FLAGS_SLEEPING 2 61 62 #if defined(MULTIPROCESSOR) 63 struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM]; 64 #endif 65 66 int ci_intrdepth; 67 char *ci_intstk; 68 #define CPUSAVE_LEN 8 69 register_t ci_tempsave[CPUSAVE_LEN]; 70 register_t ci_ddbsave[CPUSAVE_LEN]; 71 #define DISISAVE_LEN 4 72 register_t ci_disisave[DISISAVE_LEN]; 73 74 volatile u_int64_t ci_nexttimerevent; 75 volatile u_int64_t ci_prevtb; 76 volatile u_int64_t ci_lasttb; 77 volatile u_int64_t ci_nextstatevent; 78 int ci_statspending; 79 80 volatile int ci_ddb_paused; 81 #define CI_DDB_RUNNING 0 82 #define CI_DDB_SHOULDSTOP 1 83 #define CI_DDB_STOPPED 2 84 #define CI_DDB_ENTERDDB 3 85 #define CI_DDB_INDDB 4 86 87 u_int32_t ci_randseed; 88 89 #ifdef DIAGNOSTIC 90 int ci_mutex_level; 91 #endif 92 #ifdef GPROF 93 struct gmonparam *ci_gmon; 94 #endif 95 char ci_panicbuf[512]; 96 }; 97 98 static __inline struct cpu_info * 99 curcpu(void) 100 { 101 struct cpu_info *ci; 102 103 __asm volatile ("mfsprg %0,0" : "=r"(ci)); 104 return ci; 105 } 106 107 #define curpcb (curcpu()->ci_curpcb) 108 #define curpm (curcpu()->ci_curpm) 109 110 #define CPU_INFO_UNIT(ci) ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0) 111 112 #ifdef MULTIPROCESSOR 113 114 #define PPC_MAXPROCS 4 115 116 static __inline int 117 cpu_number(void) 118 { 119 int pir; 120 121 pir = curcpu()->ci_cpuid; 122 return pir; 123 } 124 125 void cpu_boot_secondary_processors(void); 126 127 #define CPU_IS_PRIMARY(ci) ((ci)->ci_cpuid == 0) 128 #define CPU_IS_RUNNING(ci) 1 129 #define CPU_INFO_ITERATOR int 130 #define CPU_INFO_FOREACH(cii, ci) \ 131 for (cii = 0, ci = &cpu_info[0]; cii < ncpusfound; cii++, ci++) 132 133 void cpu_unidle(struct cpu_info *); 134 135 #else 136 137 #define PPC_MAXPROCS 1 138 139 #define cpu_number() 0 140 141 #define CPU_IS_PRIMARY(ci) 1 142 #define CPU_IS_RUNNING(ci) 1 143 #define CPU_INFO_ITERATOR int 144 #define CPU_INFO_FOREACH(cii, ci) \ 145 for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL) 146 147 #define cpu_unidle(ci) 148 149 #endif 150 151 #define CPU_BUSY_CYCLE() do {} while (0) 152 153 #define MAXCPUS PPC_MAXPROCS 154 155 extern struct cpu_info cpu_info[PPC_MAXPROCS]; 156 157 #define CLKF_USERMODE(frame) (((frame)->srr1 & PSL_PR) != 0) 158 #define CLKF_PC(frame) ((frame)->srr0) 159 #define CLKF_INTR(frame) ((frame)->depth != 0) 160 161 extern int ppc_cpuidle; 162 extern int ppc_proc_is_64b; 163 extern int ppc_nobat; 164 165 void cpu_bootstrap(void); 166 167 static inline unsigned int 168 cpu_rnd_messybits(void) 169 { 170 unsigned int hi, lo; 171 172 __asm volatile("mftbu %0; mftb %1" : "=r" (hi), "=r" (lo)); 173 174 return (hi ^ lo); 175 } 176 177 /* 178 * This is used during profiling to integrate system time. 179 */ 180 #define PROC_PC(p) (trapframe(p)->srr0) 181 #define PROC_STACK(p) (trapframe(p)->fixreg[1]) 182 183 void delay(unsigned); 184 #define DELAY(n) delay(n) 185 186 #define aston(p) ((p)->p_md.md_astpending = 1) 187 188 /* 189 * Preempt the current process if in interrupt from user mode, 190 * or after the current trap/syscall if in system mode. 191 */ 192 #define need_resched(ci) \ 193 do { \ 194 ci->ci_want_resched = 1; \ 195 if (ci->ci_curproc != NULL) \ 196 aston(ci->ci_curproc); \ 197 } while (0) 198 #define clear_resched(ci) (ci)->ci_want_resched = 0 199 200 #define need_proftick(p) aston(p) 201 202 void signotify(struct proc *); 203 204 extern char *bootpath; 205 206 #ifndef CACHELINESIZE 207 #define CACHELINESIZE 32 /* For now XXX */ 208 #endif 209 210 static __inline void 211 syncicache(void *from, int len) 212 { 213 int l; 214 char *p = from; 215 216 len = len + (((u_int32_t) from) & (CACHELINESIZE - 1)); 217 l = len; 218 219 do { 220 __asm volatile ("dcbst 0,%0" :: "r"(p)); 221 p += CACHELINESIZE; 222 } while ((l -= CACHELINESIZE) > 0); 223 __asm volatile ("sync"); 224 p = from; 225 l = len; 226 do { 227 __asm volatile ("icbi 0,%0" :: "r"(p)); 228 p += CACHELINESIZE; 229 } while ((l -= CACHELINESIZE) > 0); 230 __asm volatile ("isync"); 231 } 232 233 static __inline void 234 invdcache(void *from, int len) 235 { 236 int l; 237 char *p = from; 238 239 len = len + (((u_int32_t) from) & (CACHELINESIZE - 1)); 240 l = len; 241 242 do { 243 __asm volatile ("dcbi 0,%0" :: "r"(p)); 244 p += CACHELINESIZE; 245 } while ((l -= CACHELINESIZE) > 0); 246 __asm volatile ("sync"); 247 } 248 249 static __inline void 250 flushdcache(void *from, int len) 251 { 252 int l; 253 char *p = from; 254 255 len = len + (((u_int32_t) from) & (CACHELINESIZE - 1)); 256 l = len; 257 258 do { 259 __asm volatile ("dcbf 0,%0" :: "r"(p)); 260 p += CACHELINESIZE; 261 } while ((l -= CACHELINESIZE) > 0); 262 __asm volatile ("sync"); 263 } 264 265 #define FUNC_SPR(n, name) \ 266 static __inline u_int32_t ppc_mf ## name (void) \ 267 { \ 268 u_int32_t ret; \ 269 __asm volatile ("mfspr %0," # n : "=r" (ret)); \ 270 return ret; \ 271 } \ 272 static __inline void ppc_mt ## name (u_int32_t val) \ 273 { \ 274 __asm volatile ("mtspr "# n ",%0" :: "r" (val)); \ 275 } \ 276 277 FUNC_SPR(0, mq) 278 FUNC_SPR(1, xer) 279 FUNC_SPR(4, rtcu) 280 FUNC_SPR(5, rtcl) 281 FUNC_SPR(8, lr) 282 FUNC_SPR(9, ctr) 283 FUNC_SPR(18, dsisr) 284 FUNC_SPR(19, dar) 285 FUNC_SPR(22, dec) 286 FUNC_SPR(25, sdr1) 287 FUNC_SPR(26, srr0) 288 FUNC_SPR(27, srr1) 289 FUNC_SPR(256, vrsave) 290 FUNC_SPR(272, sprg0) 291 FUNC_SPR(273, sprg1) 292 FUNC_SPR(274, sprg2) 293 FUNC_SPR(275, sprg3) 294 FUNC_SPR(280, asr) 295 FUNC_SPR(282, ear) 296 FUNC_SPR(287, pvr) 297 FUNC_SPR(311, hior) 298 FUNC_SPR(528, ibat0u) 299 FUNC_SPR(529, ibat0l) 300 FUNC_SPR(530, ibat1u) 301 FUNC_SPR(531, ibat1l) 302 FUNC_SPR(532, ibat2u) 303 FUNC_SPR(533, ibat2l) 304 FUNC_SPR(534, ibat3u) 305 FUNC_SPR(535, ibat3l) 306 FUNC_SPR(560, ibat4u) 307 FUNC_SPR(561, ibat4l) 308 FUNC_SPR(562, ibat5u) 309 FUNC_SPR(563, ibat5l) 310 FUNC_SPR(564, ibat6u) 311 FUNC_SPR(565, ibat6l) 312 FUNC_SPR(566, ibat7u) 313 FUNC_SPR(567, ibat7l) 314 FUNC_SPR(536, dbat0u) 315 FUNC_SPR(537, dbat0l) 316 FUNC_SPR(538, dbat1u) 317 FUNC_SPR(539, dbat1l) 318 FUNC_SPR(540, dbat2u) 319 FUNC_SPR(541, dbat2l) 320 FUNC_SPR(542, dbat3u) 321 FUNC_SPR(543, dbat3l) 322 FUNC_SPR(568, dbat4u) 323 FUNC_SPR(569, dbat4l) 324 FUNC_SPR(570, dbat5u) 325 FUNC_SPR(571, dbat5l) 326 FUNC_SPR(572, dbat6u) 327 FUNC_SPR(573, dbat6l) 328 FUNC_SPR(574, dbat7u) 329 FUNC_SPR(575, dbat7l) 330 FUNC_SPR(1009, hid1) 331 FUNC_SPR(1010, iabr) 332 FUNC_SPR(1017, l2cr) 333 FUNC_SPR(1018, l3cr) 334 FUNC_SPR(1013, dabr) 335 FUNC_SPR(1023, pir) 336 337 static __inline u_int32_t 338 ppc_mftbl (void) 339 { 340 int ret; 341 __asm volatile ("mftb %0" : "=r" (ret)); 342 return ret; 343 } 344 345 346 static __inline u_int64_t 347 ppc_mftb(void) 348 { 349 u_long scratch; 350 u_int64_t tb; 351 352 __asm volatile ("1: mftbu %0; mftb %L0; mftbu %1;" 353 " cmpw 0,%0,%1; bne 1b" : "=r"(tb), "=r"(scratch)); 354 return tb; 355 } 356 357 static __inline void 358 ppc_mttb(u_int64_t tb) 359 { 360 __asm volatile ("mttbl %0" :: "r"(0)); 361 __asm volatile ("mttbu %0" :: "r"((u_int32_t)(tb >> 32))); 362 __asm volatile ("mttbl %0" :: "r"((u_int32_t)(tb & 0xffffffff))); 363 } 364 365 static __inline u_int32_t 366 ppc_mfmsr (void) 367 { 368 int ret; 369 __asm volatile ("mfmsr %0" : "=r" (ret)); 370 return ret; 371 } 372 373 static __inline void 374 ppc_mtmsr (u_int32_t val) 375 { 376 __asm volatile ("mtmsr %0" :: "r" (val)); 377 } 378 379 static __inline void 380 ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted) 381 { 382 __asm volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted)); 383 } 384 385 u_int64_t ppc64_mfscomc(void); 386 void ppc_mtscomc(u_int32_t); 387 void ppc64_mtscomc(u_int64_t); 388 u_int64_t ppc64_mfscomd(void); 389 void ppc_mtscomd(u_int32_t); 390 u_int32_t ppc_mfhid0(void); 391 void ppc_mthid0(u_int32_t); 392 u_int64_t ppc64_mfhid1(void); 393 void ppc64_mthid1(u_int64_t); 394 u_int64_t ppc64_mfhid4(void); 395 void ppc64_mthid4(u_int64_t); 396 u_int64_t ppc64_mfhid5(void); 397 void ppc64_mthid5(u_int64_t); 398 399 #include <machine/psl.h> 400 401 /* 402 * General functions to enable and disable interrupts 403 * without having inlined assembly code in many functions. 404 */ 405 static __inline void 406 ppc_intr_enable(int enable) 407 { 408 u_int32_t msr; 409 if (enable != 0) { 410 msr = ppc_mfmsr(); 411 msr |= PSL_EE; 412 ppc_mtmsr(msr); 413 } 414 } 415 416 static __inline int 417 ppc_intr_disable(void) 418 { 419 u_int32_t emsr, dmsr; 420 emsr = ppc_mfmsr(); 421 dmsr = emsr & ~PSL_EE; 422 ppc_mtmsr(dmsr); 423 return (emsr & PSL_EE); 424 } 425 426 static __inline u_long 427 intr_disable(void) 428 { 429 return ppc_intr_disable(); 430 } 431 432 static __inline void 433 intr_restore(u_long s) 434 { 435 ppc_intr_enable(s); 436 } 437 438 int ppc_cpuspeed(int *); 439 440 /* 441 * PowerPC CPU types 442 */ 443 #define PPC_CPU_MPC601 1 444 #define PPC_CPU_MPC603 3 445 #define PPC_CPU_MPC604 4 446 #define PPC_CPU_MPC603e 6 447 #define PPC_CPU_MPC603ev 7 448 #define PPC_CPU_MPC750 8 449 #define PPC_CPU_MPC604ev 9 450 #define PPC_CPU_MPC7400 12 451 #define PPC_CPU_IBM970 0x0039 452 #define PPC_CPU_IBM970FX 0x003c 453 #define PPC_CPU_IBM970MP 0x0044 454 #define PPC_CPU_IBM750FX 0x7000 455 #define PPC_CPU_MPC7410 0x800c 456 #define PPC_CPU_MPC7447A 0x8003 457 #define PPC_CPU_MPC7448 0x8004 458 #define PPC_CPU_MPC7450 0x8000 459 #define PPC_CPU_MPC7455 0x8001 460 #define PPC_CPU_MPC7457 0x8002 461 #define PPC_CPU_MPC83xx 0x8083 462 463 /* 464 * This needs to be included late since it relies on definitions higher 465 * up in this file. 466 */ 467 #if defined(MULTIPROCESSOR) && defined(_KERNEL) 468 #include <sys/mplock.h> 469 #endif 470 471 #endif /* _POWERPC_CPU_H_ */ 472