1 /* $NetBSD: cpu_subr.c,v 1.108 2021/03/21 23:41:52 rin Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 Matt Thomas. 5 * Copyright (c) 2001 Tsubai Masanari. 6 * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by 20 * Internet Research Institute, Inc. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.108 2021/03/21 23:41:52 rin Exp $"); 38 39 #include "sysmon_envsys.h" 40 41 #ifdef _KERNEL_OPT 42 #include "opt_altivec.h" 43 #include "opt_multiprocessor.h" 44 #include "opt_ppcarch.h" 45 #include "opt_ppccache.h" 46 #include "opt_ppcparam.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/device.h> 52 #include <sys/types.h> 53 #include <sys/lwp.h> 54 #include <sys/xcall.h> 55 56 #include <uvm/uvm.h> 57 58 #include <powerpc/pcb.h> 59 #include <powerpc/psl.h> 60 #include <powerpc/spr.h> 61 #include <powerpc/oea/hid.h> 62 #include <powerpc/oea/hid_601.h> 63 #include <powerpc/oea/spr.h> 64 #include <powerpc/oea/cpufeat.h> 65 66 #include <dev/sysmon/sysmonvar.h> 67 68 static void cpu_enable_l2cr(register_t); 69 static void cpu_enable_l3cr(register_t); 70 static void cpu_config_l2cr(int); 71 static void cpu_config_l3cr(int); 72 static void cpu_probe_speed(struct cpu_info *); 73 static void cpu_idlespin(void); 74 static void cpu_set_dfs_xcall(void *, void *); 75 #if NSYSMON_ENVSYS > 0 76 static void cpu_tau_setup(struct cpu_info *); 77 static void cpu_tau_refresh(struct sysmon_envsys *, envsys_data_t *); 78 #endif 79 80 extern void init_scom_speedctl(void); 81 82 int cpu = -1; 83 int ncpus; 84 85 struct fmttab { 86 register_t fmt_mask; 87 register_t fmt_value; 88 const char *fmt_string; 89 }; 90 91 /* 92 * This should be one per CPU but since we only support it on 750 variants it 93 * doesn't really matter since none of them support SMP 94 */ 95 envsys_data_t sensor; 96 97 static const struct fmttab cpu_7450_l2cr_formats[] = { 98 { L2CR_L2E, 0, " disabled" }, 99 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 100 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 101 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 102 { L2CR_L2E, ~0, " 256KB L2 cache" }, 103 { L2CR_L2PE, 0, " no parity" }, 104 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 105 { 0, 0, NULL } 106 }; 107 108 static const struct fmttab cpu_7448_l2cr_formats[] = { 109 { L2CR_L2E, 0, " disabled" }, 110 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 111 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 112 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 113 { L2CR_L2E, ~0, " 1MB L2 cache" }, 114 { L2CR_L2PE, 0, " no parity" }, 115 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 116 { 0, 0, NULL } 117 }; 118 119 static const struct fmttab cpu_7457_l2cr_formats[] = { 120 { L2CR_L2E, 0, " disabled" }, 121 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 122 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 123 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 124 { L2CR_L2E, ~0, " 512KB L2 cache" }, 125 { L2CR_L2PE, 0, " no parity" }, 126 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 127 { 0, 0, NULL } 128 }; 129 130 static const struct fmttab cpu_7450_l3cr_formats[] = { 131 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO, " data-only" }, 132 { L3CR_L3DO|L3CR_L3IO, L3CR_L3IO, " instruction-only" }, 133 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO|L3CR_L3IO, " locked" }, 134 { L3CR_L3SIZ, L3SIZ_2M, " 2MB" }, 135 { L3CR_L3SIZ, L3SIZ_1M, " 1MB" }, 136 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE|L3CR_L3APE, " parity" }, 137 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE, " data-parity" }, 138 { L3CR_L3PE|L3CR_L3APE, L3CR_L3APE, " address-parity" }, 139 { L3CR_L3PE|L3CR_L3APE, 0, " no-parity" }, 140 { L3CR_L3SIZ, ~0, " L3 cache" }, 141 { L3CR_L3RT, L3RT_MSUG2_DDR, " (DDR SRAM)" }, 142 { L3CR_L3RT, L3RT_PIPELINE_LATE, " (LW SRAM)" }, 143 { L3CR_L3RT, L3RT_PB2_SRAM, " (PB2 SRAM)" }, 144 { L3CR_L3CLK, ~0, " at" }, 145 { L3CR_L3CLK, L3CLK_20, " 2:1" }, 146 { L3CR_L3CLK, L3CLK_25, " 2.5:1" }, 147 { L3CR_L3CLK, L3CLK_30, " 3:1" }, 148 { L3CR_L3CLK, L3CLK_35, " 3.5:1" }, 149 { L3CR_L3CLK, L3CLK_40, " 4:1" }, 150 { L3CR_L3CLK, L3CLK_50, " 5:1" }, 151 { L3CR_L3CLK, L3CLK_60, " 6:1" }, 152 { L3CR_L3CLK, ~0, " ratio" }, 153 { 0, 0, NULL }, 154 }; 155 156 static const struct fmttab cpu_ibm750_l2cr_formats[] = { 157 { L2CR_L2E, 0, " disabled" }, 158 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 159 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 160 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 161 { 0, ~0, " 512KB" }, 162 { L2CR_L2WT, L2CR_L2WT, " WT" }, 163 { L2CR_L2WT, 0, " WB" }, 164 { L2CR_L2PE, L2CR_L2PE, " with ECC" }, 165 { 0, ~0, " L2 cache" }, 166 { 0, 0, NULL } 167 }; 168 169 static const struct fmttab cpu_l2cr_formats[] = { 170 { L2CR_L2E, 0, " disabled" }, 171 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 172 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 173 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 174 { L2CR_L2PE, L2CR_L2PE, " parity" }, 175 { L2CR_L2PE, 0, " no-parity" }, 176 { L2CR_L2SIZ, L2SIZ_2M, " 2MB" }, 177 { L2CR_L2SIZ, L2SIZ_1M, " 1MB" }, 178 { L2CR_L2SIZ, L2SIZ_512K, " 512KB" }, 179 { L2CR_L2SIZ, L2SIZ_256K, " 256KB" }, 180 { L2CR_L2WT, L2CR_L2WT, " WT" }, 181 { L2CR_L2WT, 0, " WB" }, 182 { L2CR_L2E, ~0, " L2 cache" }, 183 { L2CR_L2RAM, L2RAM_FLOWTHRU_BURST, " (FB SRAM)" }, 184 { L2CR_L2RAM, L2RAM_PIPELINE_LATE, " (LW SRAM)" }, 185 { L2CR_L2RAM, L2RAM_PIPELINE_BURST, " (PB SRAM)" }, 186 { L2CR_L2CLK, ~0, " at" }, 187 { L2CR_L2CLK, L2CLK_10, " 1:1" }, 188 { L2CR_L2CLK, L2CLK_15, " 1.5:1" }, 189 { L2CR_L2CLK, L2CLK_20, " 2:1" }, 190 { L2CR_L2CLK, L2CLK_25, " 2.5:1" }, 191 { L2CR_L2CLK, L2CLK_30, " 3:1" }, 192 { L2CR_L2CLK, L2CLK_35, " 3.5:1" }, 193 { L2CR_L2CLK, L2CLK_40, " 4:1" }, 194 { L2CR_L2CLK, ~0, " ratio" }, 195 { 0, 0, NULL } 196 }; 197 198 static void cpu_fmttab_print(const struct fmttab *, register_t); 199 200 struct cputab { 201 const char name[8]; 202 uint16_t version; 203 uint16_t revfmt; 204 }; 205 #define REVFMT_MAJMIN 1 /* %u.%u */ 206 #define REVFMT_HEX 2 /* 0x%04x */ 207 #define REVFMT_DEC 3 /* %u */ 208 static const struct cputab models[] = { 209 { "601", MPC601, REVFMT_DEC }, 210 { "602", MPC602, REVFMT_DEC }, 211 { "603", MPC603, REVFMT_MAJMIN }, 212 { "603e", MPC603e, REVFMT_MAJMIN }, 213 { "603ev", MPC603ev, REVFMT_MAJMIN }, 214 { "G2", MPCG2, REVFMT_MAJMIN }, 215 { "604", MPC604, REVFMT_MAJMIN }, 216 { "604e", MPC604e, REVFMT_MAJMIN }, 217 { "604ev", MPC604ev, REVFMT_MAJMIN }, 218 { "620", MPC620, REVFMT_HEX }, 219 { "750", MPC750, REVFMT_MAJMIN }, 220 { "750FX", IBM750FX, REVFMT_MAJMIN }, 221 { "750GX", IBM750GX, REVFMT_MAJMIN }, 222 { "7400", MPC7400, REVFMT_MAJMIN }, 223 { "7410", MPC7410, REVFMT_MAJMIN }, 224 { "7450", MPC7450, REVFMT_MAJMIN }, 225 { "7455", MPC7455, REVFMT_MAJMIN }, 226 { "7457", MPC7457, REVFMT_MAJMIN }, 227 { "7447A", MPC7447A, REVFMT_MAJMIN }, 228 { "7448", MPC7448, REVFMT_MAJMIN }, 229 { "8240", MPC8240, REVFMT_MAJMIN }, 230 { "8245", MPC8245, REVFMT_MAJMIN }, 231 { "970", IBM970, REVFMT_MAJMIN }, 232 { "970FX", IBM970FX, REVFMT_MAJMIN }, 233 { "970MP", IBM970MP, REVFMT_MAJMIN }, 234 { "POWER3II", IBMPOWER3II, REVFMT_MAJMIN }, 235 { "", 0, REVFMT_HEX } 236 }; 237 238 #include <powerpc/oea/bat.h> 239 extern struct bat battable[]; 240 241 #ifdef MULTIPROCESSOR 242 struct cpu_info cpu_info[CPU_MAXNUM] = { 243 [0] = { 244 .ci_curlwp = &lwp0, 245 .ci_battable = battable, 246 }, 247 }; 248 volatile struct cpu_hatch_data *cpu_hatch_data; 249 volatile int cpu_hatch_stack; 250 #define HATCH_STACK_SIZE 0x1000 251 extern int ticks_per_intr; 252 #include <powerpc/pic/picvar.h> 253 #include <powerpc/pic/ipivar.h> 254 #else 255 struct cpu_info cpu_info[1] = { 256 [0] = { 257 .ci_curlwp = &lwp0, 258 .ci_battable = battable, 259 }, 260 }; 261 #endif /*MULTIPROCESSOR*/ 262 263 int cpu_altivec; 264 register_t cpu_psluserset; 265 register_t cpu_pslusermod; 266 register_t cpu_pslusermask = 0xffff; 267 268 unsigned long oeacpufeat; 269 270 void 271 cpu_features_probe(void) 272 { 273 static bool feature_probe_done; 274 275 u_int pvr, vers; 276 277 if (feature_probe_done) { 278 return; 279 } 280 281 pvr = mfpvr(); 282 vers = pvr >> 16; 283 284 if ((vers >= IBMRS64II && vers <= IBM970GX) || vers == MPC620 || 285 vers == IBMCELL || vers == IBMPOWER6P5) { 286 oeacpufeat |= OEACPU_64; 287 oeacpufeat |= OEACPU_64_BRIDGE; 288 oeacpufeat |= OEACPU_NOBAT; 289 290 } else if (vers == MPC601) { 291 oeacpufeat |= OEACPU_601; 292 293 } else if (MPC745X_P(vers)) { 294 if (vers != MPC7450) { 295 /* Enable more SPRG registers */ 296 oeacpufeat |= OEACPU_HIGHSPRG; 297 298 /* Enable more BAT registers */ 299 oeacpufeat |= OEACPU_HIGHBAT; 300 301 /* Enable larger BAT registers */ 302 oeacpufeat |= OEACPU_XBSEN; 303 } 304 305 } else if (vers == IBM750FX || vers == IBM750GX) { 306 oeacpufeat |= OEACPU_HIGHBAT; 307 } 308 309 feature_probe_done = true; 310 } 311 312 void 313 cpu_features_enable(void) 314 { 315 static bool feature_enable_done; 316 317 if (feature_enable_done) { 318 return; 319 } 320 321 u_int pvr, vers; 322 323 pvr = mfpvr(); 324 vers = pvr >> 16; 325 326 if (MPC745X_P(vers)) { 327 register_t hid0 = mfspr(SPR_HID0); 328 register_t hid1 = mfspr(SPR_HID1); 329 330 const register_t ohid0 = hid0; 331 332 if (oeacpufeat & OEACPU_HIGHBAT) { 333 hid0 |= HID0_HIGH_BAT_EN; 334 } 335 336 if (oeacpufeat & OEACPU_XBSEN) { 337 hid0 |= HID0_XBSEN; 338 } 339 340 if (hid0 != ohid0) { 341 mtspr(SPR_HID0, hid0); 342 __asm volatile("sync;isync"); 343 } 344 345 /* Enable address broadcasting for MP systems */ 346 hid1 |= HID1_SYNCBE | HID1_ABE; 347 348 mtspr(SPR_HID1, hid1); 349 __asm volatile("sync;isync"); 350 } 351 352 feature_enable_done = true; 353 } 354 355 /* This is to be called from locore.S, and nowhere else. */ 356 357 void 358 cpu_model_init(void) 359 { 360 /* 361 * This is just a wrapper for backwards-compatibility, and will 362 * probably be garbage-collected in the near future. 363 */ 364 cpu_features_probe(); 365 cpu_features_enable(); 366 } 367 368 void 369 cpu_fmttab_print(const struct fmttab *fmt, register_t data) 370 { 371 for (; fmt->fmt_mask != 0 || fmt->fmt_value != 0; fmt++) { 372 if ((~fmt->fmt_mask & fmt->fmt_value) != 0 || 373 (data & fmt->fmt_mask) == fmt->fmt_value) 374 aprint_normal("%s", fmt->fmt_string); 375 } 376 } 377 378 void 379 cpu_idlespin(void) 380 { 381 register_t msr; 382 383 if (powersave <= 0) 384 return; 385 386 #if defined(_ARCH_PPC64) || defined (PPC_OEA64_BRIDGE) 387 if (cpu_altivec) 388 __asm volatile("dssall"); 389 #endif 390 391 __asm volatile( 392 "sync;" 393 "mfmsr %0;" 394 "oris %0,%0,%1@h;" /* enter power saving mode */ 395 "mtmsr %0;" 396 "isync;" 397 : "=r"(msr) 398 : "J"(PSL_POW)); 399 } 400 401 void 402 cpu_probe_cache(void) 403 { 404 u_int assoc, pvr, vers; 405 406 pvr = mfpvr(); 407 vers = pvr >> 16; 408 409 410 /* Presently common across almost all implementations. */ 411 curcpu()->ci_ci.dcache_line_size = 32; 412 curcpu()->ci_ci.icache_line_size = 32; 413 414 415 switch (vers) { 416 #define K *1024 417 case IBM750FX: 418 case IBM750GX: 419 case MPC601: 420 case MPC750: 421 case MPC7400: 422 case MPC7447A: 423 case MPC7448: 424 case MPC7450: 425 case MPC7455: 426 case MPC7457: 427 curcpu()->ci_ci.dcache_size = 32 K; 428 curcpu()->ci_ci.icache_size = 32 K; 429 assoc = 8; 430 break; 431 case MPC603: 432 curcpu()->ci_ci.dcache_size = 8 K; 433 curcpu()->ci_ci.icache_size = 8 K; 434 assoc = 2; 435 break; 436 case MPC603e: 437 case MPC603ev: 438 case MPC604: 439 case MPC8240: 440 case MPC8245: 441 case MPCG2: 442 curcpu()->ci_ci.dcache_size = 16 K; 443 curcpu()->ci_ci.icache_size = 16 K; 444 assoc = 4; 445 break; 446 case MPC604e: 447 case MPC604ev: 448 curcpu()->ci_ci.dcache_size = 32 K; 449 curcpu()->ci_ci.icache_size = 32 K; 450 assoc = 4; 451 break; 452 case IBMPOWER3II: 453 curcpu()->ci_ci.dcache_size = 64 K; 454 curcpu()->ci_ci.icache_size = 32 K; 455 curcpu()->ci_ci.dcache_line_size = 128; 456 curcpu()->ci_ci.icache_line_size = 128; 457 assoc = 128; /* not a typo */ 458 break; 459 case IBM970: 460 case IBM970FX: 461 case IBM970MP: 462 curcpu()->ci_ci.dcache_size = 32 K; 463 curcpu()->ci_ci.icache_size = 64 K; 464 curcpu()->ci_ci.dcache_line_size = 128; 465 curcpu()->ci_ci.icache_line_size = 128; 466 assoc = 2; 467 break; 468 469 default: 470 curcpu()->ci_ci.dcache_size = PAGE_SIZE; 471 curcpu()->ci_ci.icache_size = PAGE_SIZE; 472 assoc = 1; 473 #undef K 474 } 475 476 /* 477 * Possibly recolor. 478 */ 479 uvm_page_recolor(atop(curcpu()->ci_ci.dcache_size / assoc)); 480 } 481 482 struct cpu_info * 483 cpu_attach_common(device_t self, int id) 484 { 485 struct cpu_info *ci; 486 u_int pvr, vers; 487 488 ci = &cpu_info[id]; 489 #ifndef MULTIPROCESSOR 490 /* 491 * If this isn't the primary CPU, print an error message 492 * and just bail out. 493 */ 494 if (id != 0) { 495 aprint_naive("\n"); 496 aprint_normal(": ID %d\n", id); 497 aprint_normal_dev(self, 498 "processor off-line; " 499 "multiprocessor support not present in kernel\n"); 500 return (NULL); 501 } 502 #endif 503 504 ci->ci_cpuid = id; 505 ci->ci_idepth = -1; 506 ci->ci_dev = self; 507 ci->ci_idlespin = cpu_idlespin; 508 509 #ifdef MULTIPROCESSOR 510 /* Register IPI Interrupt */ 511 if ((ipiops.ppc_establish_ipi) && (id == 0)) 512 ipiops.ppc_establish_ipi(IST_LEVEL, IPL_HIGH, NULL); 513 #endif 514 515 pvr = mfpvr(); 516 vers = (pvr >> 16) & 0xffff; 517 518 switch (id) { 519 case 0: 520 /* load my cpu_number to PIR */ 521 switch (vers) { 522 case MPC601: 523 case MPC604: 524 case MPC604e: 525 case MPC604ev: 526 case MPC7400: 527 case MPC7410: 528 case MPC7447A: 529 case MPC7448: 530 case MPC7450: 531 case MPC7455: 532 case MPC7457: 533 mtspr(SPR_PIR, id); 534 } 535 cpu_setup(self, ci); 536 break; 537 default: 538 aprint_naive("\n"); 539 if (id >= CPU_MAXNUM) { 540 aprint_normal(": more than %d cpus?\n", CPU_MAXNUM); 541 panic("cpuattach"); 542 } 543 #ifndef MULTIPROCESSOR 544 aprint_normal(" not configured\n"); 545 return NULL; 546 #else 547 mi_cpu_attach(ci); 548 break; 549 #endif 550 } 551 return (ci); 552 } 553 554 void 555 cpu_setup(device_t self, struct cpu_info *ci) 556 { 557 u_int pvr, vers; 558 const char * const xname = device_xname(self); 559 const char *bitmask; 560 char hidbuf[128]; 561 char model[80]; 562 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 563 char hidbuf_u[128]; 564 const char *bitmasku = NULL; 565 volatile uint64_t hid64_0, hid64_0_save; 566 #endif 567 #if !defined(_ARCH_PPC64) 568 register_t hid0 = 0, hid0_save = 0; 569 #endif 570 571 pvr = mfpvr(); 572 vers = (pvr >> 16) & 0xffff; 573 574 cpu_identify(model, sizeof(model)); 575 aprint_naive("\n"); 576 aprint_normal(": %s, ID %d%s\n", model, cpu_number(), 577 cpu_number() == 0 ? " (primary)" : ""); 578 579 /* set the cpu number */ 580 ci->ci_cpuid = cpu_number(); 581 #if defined(_ARCH_PPC64) 582 __asm volatile("mfspr %0,%1" : "=r"(hid64_0) : "K"(SPR_HID0)); 583 hid64_0_save = hid64_0; 584 #else 585 #if defined(PPC_OEA64_BRIDGE) 586 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) 587 hid64_0_save = hid64_0 = mfspr(SPR_HID0); 588 else 589 #endif 590 hid0_save = hid0 = mfspr(SPR_HID0); 591 #endif 592 593 594 cpu_probe_cache(); 595 596 /* 597 * Configure power-saving mode. 598 */ 599 switch (vers) { 600 #if !defined(_ARCH_PPC64) 601 case MPC604: 602 case MPC604e: 603 case MPC604ev: 604 /* 605 * Do not have HID0 support settings, but can support 606 * MSR[POW] off 607 */ 608 powersave = 1; 609 break; 610 611 case MPC603: 612 case MPC603e: 613 case MPC603ev: 614 case MPC7400: 615 case MPC7410: 616 case MPC8240: 617 case MPC8245: 618 case MPCG2: 619 /* Select DOZE mode. */ 620 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 621 hid0 |= HID0_DOZE | HID0_DPM; 622 powersave = 1; 623 break; 624 625 case MPC750: 626 case IBM750FX: 627 case IBM750GX: 628 /* Select NAP mode. */ 629 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 630 hid0 |= HID0_NAP | HID0_DPM; 631 powersave = 1; 632 break; 633 634 case MPC7447A: 635 case MPC7448: 636 case MPC7457: 637 case MPC7455: 638 case MPC7450: 639 /* Enable the 7450 branch caches */ 640 hid0 |= HID0_SGE | HID0_BTIC; 641 hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT; 642 /* Disable BTIC on 7450 Rev 2.0 or earlier */ 643 if (vers == MPC7450 && (pvr & 0xFFFF) <= 0x0200) 644 hid0 &= ~HID0_BTIC; 645 /* Select NAP mode. */ 646 hid0 &= ~HID0_SLEEP; 647 /* XXX my quicksilver hangs if nap is enabled */ 648 if (vers != MPC7450) { 649 hid0 |= HID0_NAP | HID0_DPM; 650 powersave = 1; 651 } 652 break; 653 #endif 654 655 case IBM970: 656 case IBM970FX: 657 case IBM970MP: 658 #if defined(_ARCH_PPC64) || defined (PPC_OEA64_BRIDGE) 659 #if !defined(_ARCH_PPC64) 660 KASSERT((oeacpufeat & OEACPU_64_BRIDGE) != 0); 661 #endif 662 hid64_0 &= ~(HID0_64_DOZE | HID0_64_NAP | HID0_64_DEEPNAP); 663 hid64_0 |= HID0_64_NAP | HID0_64_DPM | HID0_64_EX_TBEN | 664 HID0_64_TB_CTRL | HID0_64_EN_MCHK; 665 powersave = 1; 666 break; 667 #endif 668 case IBMPOWER3II: 669 default: 670 /* No power-saving mode is available. */ ; 671 } 672 673 #ifdef NAPMODE 674 switch (vers) { 675 case IBM750FX: 676 case IBM750GX: 677 case MPC750: 678 case MPC7400: 679 /* Select NAP mode. */ 680 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 681 hid0 |= HID0_NAP; 682 break; 683 } 684 #endif 685 686 switch (vers) { 687 case IBM750FX: 688 case IBM750GX: 689 case MPC750: 690 hid0 &= ~HID0_DBP; /* XXX correct? */ 691 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 692 break; 693 694 case MPC7400: 695 case MPC7410: 696 hid0 &= ~HID0_SPD; 697 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 698 hid0 |= HID0_EIEC; 699 break; 700 } 701 702 /* 703 * according to the 603e manual this is necessary for an external L2 704 * cache to work properly 705 */ 706 switch (vers) { 707 case MPC603e: 708 hid0 |= HID0_ABE; 709 } 710 711 #if defined(_ARCH_PPC64) || defined(PPC_OEA64_BRIDGE) 712 #if defined(PPC_OEA64_BRIDGE) 713 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) { 714 #endif 715 if (hid64_0 != hid64_0_save) { 716 mtspr64(SPR_HID0, hid64_0); 717 } 718 #if defined(PPC_OEA64_BRIDGE) 719 } else { 720 #endif 721 #endif 722 723 #if !defined(_ARCH_PPC64) 724 if (hid0 != hid0_save) { 725 mtspr(SPR_HID0, hid0); 726 __asm volatile("sync;isync"); 727 } 728 #endif 729 #if defined(PPC_OEA64_BRIDGE) 730 } 731 #endif 732 733 switch (vers) { 734 case MPC601: 735 bitmask = HID0_601_BITMASK; 736 break; 737 case MPC7447A: 738 case MPC7448: 739 case MPC7450: 740 case MPC7455: 741 case MPC7457: 742 bitmask = HID0_7450_BITMASK; 743 break; 744 case IBM970: 745 case IBM970FX: 746 case IBM970MP: 747 bitmask = HID0_970_BITMASK; 748 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 749 bitmasku = HID0_970_BITMASK_U; 750 #endif 751 break; 752 default: 753 bitmask = HID0_BITMASK; 754 break; 755 } 756 757 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 758 if (bitmasku != NULL) { 759 snprintb(hidbuf, sizeof hidbuf, bitmask, hid64_0 & 0xffffffff); 760 snprintb(hidbuf_u, sizeof hidbuf_u, bitmasku, hid64_0 >> 32); 761 aprint_normal_dev(self, "HID0 %s %s, powersave: %d\n", 762 hidbuf_u, hidbuf, powersave); 763 } else 764 #endif 765 { 766 snprintb(hidbuf, sizeof hidbuf, bitmask, hid0); 767 aprint_normal_dev(self, "HID0 %s, powersave: %d\n", 768 hidbuf, powersave); 769 } 770 771 ci->ci_khz = 0; 772 773 /* 774 * Display speed and cache configuration. 775 */ 776 switch (vers) { 777 case MPC604: 778 case MPC604e: 779 case MPC604ev: 780 case MPC750: 781 case IBM750FX: 782 case IBM750GX: 783 case MPC7400: 784 case MPC7410: 785 case MPC7447A: 786 case MPC7448: 787 case MPC7450: 788 case MPC7455: 789 case MPC7457: 790 aprint_normal_dev(self, ""); 791 cpu_probe_speed(ci); 792 aprint_normal("%u.%02u MHz", 793 ci->ci_khz / 1000, (ci->ci_khz / 10) % 100); 794 switch (vers) { 795 case MPC7450: /* 7441 does not have L3! */ 796 case MPC7455: /* 7445 does not have L3! */ 797 case MPC7457: /* 7447 does not have L3! */ 798 cpu_config_l3cr(vers); 799 break; 800 case IBM750FX: 801 case IBM750GX: 802 case MPC750: 803 case MPC7400: 804 case MPC7410: 805 case MPC7447A: 806 case MPC7448: 807 cpu_config_l2cr(pvr); 808 break; 809 default: 810 break; 811 } 812 aprint_normal("\n"); 813 break; 814 } 815 816 #if NSYSMON_ENVSYS > 0 817 /* 818 * Attach MPC750 temperature sensor to the envsys subsystem. 819 * XXX the 74xx series also has this sensor, but it is not 820 * XXX supported by Motorola and may return values that are off by 821 * XXX 35-55 degrees C. 822 */ 823 if (vers == MPC750 || vers == IBM750FX || vers == IBM750GX) 824 cpu_tau_setup(ci); 825 #endif 826 827 #if defined(PPC_OEA64) || defined(PPC_OEA64_BRIDGE) 828 if (vers == IBM970MP) 829 init_scom_speedctl(); 830 #endif 831 832 evcnt_attach_dynamic(&ci->ci_ev_clock, EVCNT_TYPE_INTR, 833 NULL, xname, "clock"); 834 evcnt_attach_dynamic(&ci->ci_ev_traps, EVCNT_TYPE_TRAP, 835 NULL, xname, "traps"); 836 evcnt_attach_dynamic(&ci->ci_ev_kdsi, EVCNT_TYPE_TRAP, 837 &ci->ci_ev_traps, xname, "kernel DSI traps"); 838 evcnt_attach_dynamic(&ci->ci_ev_udsi, EVCNT_TYPE_TRAP, 839 &ci->ci_ev_traps, xname, "user DSI traps"); 840 evcnt_attach_dynamic(&ci->ci_ev_udsi_fatal, EVCNT_TYPE_TRAP, 841 &ci->ci_ev_udsi, xname, "user DSI failures"); 842 evcnt_attach_dynamic(&ci->ci_ev_kisi, EVCNT_TYPE_TRAP, 843 &ci->ci_ev_traps, xname, "kernel ISI traps"); 844 evcnt_attach_dynamic(&ci->ci_ev_isi, EVCNT_TYPE_TRAP, 845 &ci->ci_ev_traps, xname, "user ISI traps"); 846 evcnt_attach_dynamic(&ci->ci_ev_isi_fatal, EVCNT_TYPE_TRAP, 847 &ci->ci_ev_isi, xname, "user ISI failures"); 848 evcnt_attach_dynamic(&ci->ci_ev_scalls, EVCNT_TYPE_TRAP, 849 &ci->ci_ev_traps, xname, "system call traps"); 850 evcnt_attach_dynamic(&ci->ci_ev_pgm, EVCNT_TYPE_TRAP, 851 &ci->ci_ev_traps, xname, "PGM traps"); 852 evcnt_attach_dynamic(&ci->ci_ev_fpu, EVCNT_TYPE_TRAP, 853 &ci->ci_ev_traps, xname, "FPU unavailable traps"); 854 evcnt_attach_dynamic(&ci->ci_ev_fpusw, EVCNT_TYPE_TRAP, 855 &ci->ci_ev_fpu, xname, "FPU context switches"); 856 evcnt_attach_dynamic(&ci->ci_ev_ali, EVCNT_TYPE_TRAP, 857 &ci->ci_ev_traps, xname, "user alignment traps"); 858 evcnt_attach_dynamic(&ci->ci_ev_ali_fatal, EVCNT_TYPE_TRAP, 859 &ci->ci_ev_ali, xname, "user alignment failures"); 860 evcnt_attach_dynamic(&ci->ci_ev_umchk, EVCNT_TYPE_TRAP, 861 &ci->ci_ev_umchk, xname, "user MCHK failures"); 862 evcnt_attach_dynamic(&ci->ci_ev_vec, EVCNT_TYPE_TRAP, 863 &ci->ci_ev_traps, xname, "AltiVec unavailable"); 864 #ifdef ALTIVEC 865 if (cpu_altivec) { 866 evcnt_attach_dynamic(&ci->ci_ev_vecsw, EVCNT_TYPE_TRAP, 867 &ci->ci_ev_vec, xname, "AltiVec context switches"); 868 } 869 #endif 870 evcnt_attach_dynamic(&ci->ci_ev_ipi, EVCNT_TYPE_INTR, 871 NULL, xname, "IPIs"); 872 } 873 874 /* 875 * According to a document labeled "PVR Register Settings": 876 ** For integrated microprocessors the PVR register inside the device 877 ** will identify the version of the microprocessor core. You must also 878 ** read the Device ID, PCI register 02, to identify the part and the 879 ** Revision ID, PCI register 08, to identify the revision of the 880 ** integrated microprocessor. 881 * This apparently applies to 8240/8245/8241, PVR 00810101 and 80811014 882 */ 883 884 void 885 cpu_identify(char *str, size_t len) 886 { 887 u_int pvr, major, minor; 888 uint16_t vers, rev, revfmt; 889 const struct cputab *cp; 890 size_t n; 891 892 pvr = mfpvr(); 893 vers = pvr >> 16; 894 rev = pvr; 895 896 switch (vers) { 897 case MPC7410: 898 minor = (pvr >> 0) & 0xff; 899 major = minor <= 4 ? 1 : 2; 900 break; 901 case MPCG2: /*XXX see note above */ 902 major = (pvr >> 4) & 0xf; 903 minor = (pvr >> 0) & 0xf; 904 break; 905 default: 906 major = (pvr >> 8) & 0xf; 907 minor = (pvr >> 0) & 0xf; 908 } 909 910 for (cp = models; cp->name[0] != '\0'; cp++) { 911 if (cp->version == vers) 912 break; 913 } 914 915 if (cpu == -1) 916 cpu = vers; 917 918 revfmt = cp->revfmt; 919 if (rev == MPC750 && pvr == 15) { 920 revfmt = REVFMT_HEX; 921 } 922 923 if (cp->name[0] != '\0') { 924 n = snprintf(str, len, "%s (Revision ", cp->name); 925 } else { 926 n = snprintf(str, len, "Version %#x (Revision ", vers); 927 } 928 if (len > n) { 929 switch (revfmt) { 930 case REVFMT_MAJMIN: 931 snprintf(str + n, len - n, "%u.%u)", major, minor); 932 break; 933 case REVFMT_HEX: 934 snprintf(str + n, len - n, "0x%04x)", rev); 935 break; 936 case REVFMT_DEC: 937 snprintf(str + n, len - n, "%u)", rev); 938 break; 939 } 940 } 941 } 942 943 #ifdef L2CR_CONFIG 944 u_int l2cr_config = L2CR_CONFIG; 945 #else 946 u_int l2cr_config = 0; 947 #endif 948 949 #ifdef L3CR_CONFIG 950 u_int l3cr_config = L3CR_CONFIG; 951 #else 952 u_int l3cr_config = 0; 953 #endif 954 955 void 956 cpu_enable_l2cr(register_t l2cr) 957 { 958 register_t msr, x; 959 uint16_t vers; 960 961 vers = mfpvr() >> 16; 962 963 /* Disable interrupts and set the cache config bits. */ 964 msr = mfmsr(); 965 mtmsr(msr & ~PSL_EE); 966 #ifdef ALTIVEC 967 if (cpu_altivec) 968 __asm volatile("dssall"); 969 #endif 970 __asm volatile("sync"); 971 mtspr(SPR_L2CR, l2cr & ~L2CR_L2E); 972 __asm volatile("sync"); 973 974 /* Wait for L2 clock to be stable (640 L2 clocks). */ 975 delay(100); 976 977 /* Invalidate all L2 contents. */ 978 if (MPC745X_P(vers)) { 979 mtspr(SPR_L2CR, l2cr | L2CR_L2I); 980 do { 981 x = mfspr(SPR_L2CR); 982 } while (x & L2CR_L2I); 983 } else { 984 mtspr(SPR_L2CR, l2cr | L2CR_L2I); 985 do { 986 x = mfspr(SPR_L2CR); 987 } while (x & L2CR_L2IP); 988 } 989 /* Enable L2 cache. */ 990 l2cr |= L2CR_L2E; 991 mtspr(SPR_L2CR, l2cr); 992 mtmsr(msr); 993 } 994 995 void 996 cpu_enable_l3cr(register_t l3cr) 997 { 998 register_t x; 999 1000 /* By The Book (numbered steps from section 3.7.1.3 of MPC7450UM) */ 1001 1002 /* 1003 * 1: Set all L3CR bits for final config except L3E, L3I, L3PE, and 1004 * L3CLKEN. (also mask off reserved bits in case they were included 1005 * in L3CR_CONFIG) 1006 */ 1007 l3cr &= ~(L3CR_L3E|L3CR_L3I|L3CR_L3PE|L3CR_L3CLKEN|L3CR_RESERVED); 1008 mtspr(SPR_L3CR, l3cr); 1009 1010 /* 2: Set L3CR[5] (otherwise reserved bit) to 1 */ 1011 l3cr |= 0x04000000; 1012 mtspr(SPR_L3CR, l3cr); 1013 1014 /* 3: Set L3CLKEN to 1*/ 1015 l3cr |= L3CR_L3CLKEN; 1016 mtspr(SPR_L3CR, l3cr); 1017 1018 /* 4/5: Perform a global cache invalidate (ref section 3.7.3.6) */ 1019 __asm volatile("dssall;sync"); 1020 /* L3 cache is already disabled, no need to clear L3E */ 1021 mtspr(SPR_L3CR, l3cr|L3CR_L3I); 1022 do { 1023 x = mfspr(SPR_L3CR); 1024 } while (x & L3CR_L3I); 1025 1026 /* 6: Clear L3CLKEN to 0 */ 1027 l3cr &= ~L3CR_L3CLKEN; 1028 mtspr(SPR_L3CR, l3cr); 1029 1030 /* 7: Perform a 'sync' and wait at least 100 CPU cycles */ 1031 __asm volatile("sync"); 1032 delay(100); 1033 1034 /* 8: Set L3E and L3CLKEN */ 1035 l3cr |= (L3CR_L3E|L3CR_L3CLKEN); 1036 mtspr(SPR_L3CR, l3cr); 1037 1038 /* 9: Perform a 'sync' and wait at least 100 CPU cycles */ 1039 __asm volatile("sync"); 1040 delay(100); 1041 } 1042 1043 void 1044 cpu_config_l2cr(int pvr) 1045 { 1046 register_t l2cr; 1047 u_int vers = (pvr >> 16) & 0xffff; 1048 1049 l2cr = mfspr(SPR_L2CR); 1050 1051 /* 1052 * For MP systems, the firmware may only configure the L2 cache 1053 * on the first CPU. In this case, assume that the other CPUs 1054 * should use the same value for L2CR. 1055 */ 1056 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 1057 l2cr_config = l2cr; 1058 } 1059 1060 /* 1061 * Configure L2 cache if not enabled. 1062 */ 1063 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 1064 cpu_enable_l2cr(l2cr_config); 1065 l2cr = mfspr(SPR_L2CR); 1066 } 1067 1068 if ((l2cr & L2CR_L2E) == 0) { 1069 aprint_normal(" L2 cache present but not enabled "); 1070 return; 1071 } 1072 aprint_normal(","); 1073 1074 switch (vers) { 1075 case IBM750FX: 1076 case IBM750GX: 1077 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr); 1078 break; 1079 case MPC750: 1080 if ((pvr & 0xffffff00) == 0x00082200 /* IBM750CX */ || 1081 (pvr & 0xffffef00) == 0x00082300 /* IBM750CXe */) 1082 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr); 1083 else 1084 cpu_fmttab_print(cpu_l2cr_formats, l2cr); 1085 break; 1086 case MPC7447A: 1087 case MPC7457: 1088 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr); 1089 return; 1090 case MPC7448: 1091 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr); 1092 return; 1093 case MPC7450: 1094 case MPC7455: 1095 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr); 1096 break; 1097 default: 1098 cpu_fmttab_print(cpu_l2cr_formats, l2cr); 1099 break; 1100 } 1101 } 1102 1103 void 1104 cpu_config_l3cr(int vers) 1105 { 1106 register_t l2cr; 1107 register_t l3cr; 1108 1109 l2cr = mfspr(SPR_L2CR); 1110 1111 /* 1112 * For MP systems, the firmware may only configure the L2 cache 1113 * on the first CPU. In this case, assume that the other CPUs 1114 * should use the same value for L2CR. 1115 */ 1116 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 1117 l2cr_config = l2cr; 1118 } 1119 1120 /* 1121 * Configure L2 cache if not enabled. 1122 */ 1123 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 1124 cpu_enable_l2cr(l2cr_config); 1125 l2cr = mfspr(SPR_L2CR); 1126 } 1127 1128 aprint_normal(","); 1129 switch (vers) { 1130 case MPC7447A: 1131 case MPC7457: 1132 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr); 1133 return; 1134 case MPC7448: 1135 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr); 1136 return; 1137 default: 1138 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr); 1139 break; 1140 } 1141 1142 l3cr = mfspr(SPR_L3CR); 1143 1144 /* 1145 * For MP systems, the firmware may only configure the L3 cache 1146 * on the first CPU. In this case, assume that the other CPUs 1147 * should use the same value for L3CR. 1148 */ 1149 if ((l3cr & L3CR_L3E) != 0 && l3cr_config == 0) { 1150 l3cr_config = l3cr; 1151 } 1152 1153 /* 1154 * Configure L3 cache if not enabled. 1155 */ 1156 if ((l3cr & L3CR_L3E) == 0 && l3cr_config != 0) { 1157 cpu_enable_l3cr(l3cr_config); 1158 l3cr = mfspr(SPR_L3CR); 1159 } 1160 1161 if (l3cr & L3CR_L3E) { 1162 aprint_normal(","); 1163 cpu_fmttab_print(cpu_7450_l3cr_formats, l3cr); 1164 } 1165 } 1166 1167 void 1168 cpu_probe_speed(struct cpu_info *ci) 1169 { 1170 uint64_t cps; 1171 1172 mtspr(SPR_MMCR0, MMCR0_FC); 1173 mtspr(SPR_PMC1, 0); 1174 mtspr(SPR_MMCR0, MMCR0_PMC1SEL(PMCN_CYCLES)); 1175 delay(100000); 1176 cps = (mfspr(SPR_PMC1) * 10) + 4999; 1177 1178 mtspr(SPR_MMCR0, MMCR0_FC); 1179 1180 ci->ci_khz = (cps * cpu_get_dfs()) / 1000; 1181 } 1182 1183 /* 1184 * Read the Dynamic Frequency Switching state and return a divisor for 1185 * the maximum frequency. 1186 */ 1187 int 1188 cpu_get_dfs(void) 1189 { 1190 u_int pvr, vers; 1191 1192 pvr = mfpvr(); 1193 vers = pvr >> 16; 1194 1195 switch (vers) { 1196 case MPC7448: 1197 if (mfspr(SPR_HID1) & HID1_DFS4) 1198 return 4; 1199 /* FALLTHROUGH */ 1200 case MPC7447A: 1201 if (mfspr(SPR_HID1) & HID1_DFS2) 1202 return 2; 1203 } 1204 return 1; 1205 } 1206 1207 /* 1208 * Set the Dynamic Frequency Switching divisor the same for all cpus. 1209 */ 1210 void 1211 cpu_set_dfs(int div) 1212 { 1213 u_int dfs_mask, pvr, vers; 1214 1215 pvr = mfpvr(); 1216 vers = pvr >> 16; 1217 dfs_mask = 0; 1218 1219 switch (vers) { 1220 case MPC7448: 1221 dfs_mask |= HID1_DFS4; 1222 /* FALLTHROUGH */ 1223 case MPC7447A: 1224 dfs_mask |= HID1_DFS2; 1225 break; 1226 default: 1227 printf("cpu_set_dfs: DFS not supported\n"); 1228 return; 1229 1230 } 1231 #ifdef MULTIPROCESSOR 1232 uint64_t where; 1233 where = xc_broadcast(0, (xcfunc_t)cpu_set_dfs_xcall, &div, &dfs_mask); 1234 xc_wait(where); 1235 #else 1236 cpu_set_dfs_xcall(&div, &dfs_mask); 1237 #endif 1238 } 1239 1240 static void 1241 cpu_set_dfs_xcall(void *arg1, void *arg2) 1242 { 1243 u_int dfs_mask, hid1, old_hid1; 1244 int *divisor, s; 1245 1246 divisor = arg1; 1247 dfs_mask = *(u_int *)arg2; 1248 1249 s = splhigh(); 1250 hid1 = old_hid1 = mfspr(SPR_HID1); 1251 1252 switch (*divisor) { 1253 case 1: 1254 hid1 &= ~dfs_mask; 1255 break; 1256 case 2: 1257 hid1 &= ~(dfs_mask & HID1_DFS4); 1258 hid1 |= dfs_mask & HID1_DFS2; 1259 break; 1260 case 4: 1261 hid1 &= ~(dfs_mask & HID1_DFS2); 1262 hid1 |= dfs_mask & HID1_DFS4; 1263 break; 1264 } 1265 1266 if (hid1 != old_hid1) { 1267 __asm volatile("sync"); 1268 mtspr(SPR_HID1, hid1); 1269 __asm volatile("sync;isync"); 1270 } 1271 1272 splx(s); 1273 } 1274 1275 #if NSYSMON_ENVSYS > 0 1276 void 1277 cpu_tau_setup(struct cpu_info *ci) 1278 { 1279 struct sysmon_envsys *sme; 1280 int error, therm_delay; 1281 1282 mtspr(SPR_THRM1, SPR_THRM_VALID); 1283 mtspr(SPR_THRM2, 0); 1284 1285 /* 1286 * we need to figure out how much 20+us in units of CPU clock cycles 1287 * are 1288 */ 1289 1290 therm_delay = ci->ci_khz / 40; /* 25us just to be safe */ 1291 1292 mtspr(SPR_THRM3, SPR_THRM_TIMER(therm_delay) | SPR_THRM_ENABLE); 1293 1294 sme = sysmon_envsys_create(); 1295 1296 sensor.units = ENVSYS_STEMP; 1297 sensor.state = ENVSYS_SINVALID; 1298 (void)strlcpy(sensor.desc, "CPU Temp", sizeof(sensor.desc)); 1299 if (sysmon_envsys_sensor_attach(sme, &sensor)) { 1300 sysmon_envsys_destroy(sme); 1301 return; 1302 } 1303 1304 sme->sme_name = device_xname(ci->ci_dev); 1305 sme->sme_cookie = ci; 1306 sme->sme_refresh = cpu_tau_refresh; 1307 1308 if ((error = sysmon_envsys_register(sme)) != 0) { 1309 aprint_error_dev(ci->ci_dev, 1310 " unable to register with sysmon (%d)\n", error); 1311 sysmon_envsys_destroy(sme); 1312 } 1313 } 1314 1315 /* Find the temperature of the CPU. */ 1316 void 1317 cpu_tau_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 1318 { 1319 int i, threshold, count; 1320 1321 threshold = 64; /* Half of the 7-bit sensor range */ 1322 1323 /* Successive-approximation code adapted from Motorola 1324 * application note AN1800/D, "Programming the Thermal Assist 1325 * Unit in the MPC750 Microprocessor". 1326 */ 1327 for (i = 5; i >= 0 ; i--) { 1328 mtspr(SPR_THRM1, 1329 SPR_THRM_THRESHOLD(threshold) | SPR_THRM_VALID); 1330 count = 0; 1331 while ((count < 100000) && 1332 ((mfspr(SPR_THRM1) & SPR_THRM_TIV) == 0)) { 1333 count++; 1334 delay(1); 1335 } 1336 if (mfspr(SPR_THRM1) & SPR_THRM_TIN) { 1337 /* The interrupt bit was set, meaning the 1338 * temperature was above the threshold 1339 */ 1340 threshold += 1 << i; 1341 } else { 1342 /* Temperature was below the threshold */ 1343 threshold -= 1 << i; 1344 } 1345 } 1346 threshold += 2; 1347 1348 /* Convert the temperature in degrees C to microkelvin */ 1349 edata->value_cur = (threshold * 1000000) + 273150000; 1350 edata->state = ENVSYS_SVALID; 1351 } 1352 #endif /* NSYSMON_ENVSYS > 0 */ 1353 1354 #ifdef MULTIPROCESSOR 1355 volatile u_int cpu_spinstart_ack, cpu_spinstart_cpunum; 1356 1357 int 1358 cpu_spinup(device_t self, struct cpu_info *ci) 1359 { 1360 volatile struct cpu_hatch_data hatch_data, *h = &hatch_data; 1361 struct pglist mlist; 1362 int i, error; 1363 char *hp; 1364 1365 KASSERT(ci != curcpu()); 1366 1367 /* Now allocate a hatch stack */ 1368 error = uvm_pglistalloc(HATCH_STACK_SIZE, 0x10000, 0x10000000, 16, 0, 1369 &mlist, 1, 1); 1370 if (error) { 1371 aprint_error(": unable to allocate hatch stack\n"); 1372 return -1; 1373 } 1374 1375 hp = (void *)VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); 1376 memset(hp, 0, HATCH_STACK_SIZE); 1377 1378 /* Initialize secondary cpu's initial lwp to its idlelwp. */ 1379 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 1380 ci->ci_curpcb = lwp_getpcb(ci->ci_curlwp); 1381 ci->ci_curpm = ci->ci_curpcb->pcb_pm; 1382 ci->ci_battable = battable; 1383 1384 cpu_hatch_data = h; 1385 h->hatch_running = 0; 1386 h->hatch_self = self; 1387 h->hatch_ci = ci; 1388 h->hatch_pir = ci->ci_cpuid; 1389 1390 cpu_hatch_stack = (uint32_t)hp + HATCH_STACK_SIZE - CALLFRAMELEN; 1391 ci->ci_lasttb = cpu_info[0].ci_lasttb; 1392 1393 /* copy special registers */ 1394 1395 h->hatch_hid0 = mfspr(SPR_HID0); 1396 #if defined(PPC_OEA64_BRIDGE) || defined (_ARCH_PPC64) 1397 h->hatch_hid1 = mfspr(SPR_HID1); 1398 h->hatch_hid4 = mfspr(SPR_HID4); 1399 h->hatch_hid5 = mfspr(SPR_HID5); 1400 #endif 1401 1402 __asm volatile ("mfsdr1 %0" : "=r"(h->hatch_sdr1)); 1403 for (i = 0; i < 16; i++) { 1404 __asm ("mfsrin %0,%1" : "=r"(h->hatch_sr[i]) : 1405 "r"(i << ADDR_SR_SHFT)); 1406 } 1407 if (oeacpufeat & OEACPU_64) 1408 h->hatch_asr = mfspr(SPR_ASR); 1409 else 1410 h->hatch_asr = 0; 1411 1412 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1413 /* copy the bat regs */ 1414 __asm volatile ("mfibatu %0,0" : "=r"(h->hatch_ibatu[0])); 1415 __asm volatile ("mfibatl %0,0" : "=r"(h->hatch_ibatl[0])); 1416 __asm volatile ("mfibatu %0,1" : "=r"(h->hatch_ibatu[1])); 1417 __asm volatile ("mfibatl %0,1" : "=r"(h->hatch_ibatl[1])); 1418 __asm volatile ("mfibatu %0,2" : "=r"(h->hatch_ibatu[2])); 1419 __asm volatile ("mfibatl %0,2" : "=r"(h->hatch_ibatl[2])); 1420 __asm volatile ("mfibatu %0,3" : "=r"(h->hatch_ibatu[3])); 1421 __asm volatile ("mfibatl %0,3" : "=r"(h->hatch_ibatl[3])); 1422 __asm volatile ("mfdbatu %0,0" : "=r"(h->hatch_dbatu[0])); 1423 __asm volatile ("mfdbatl %0,0" : "=r"(h->hatch_dbatl[0])); 1424 __asm volatile ("mfdbatu %0,1" : "=r"(h->hatch_dbatu[1])); 1425 __asm volatile ("mfdbatl %0,1" : "=r"(h->hatch_dbatl[1])); 1426 __asm volatile ("mfdbatu %0,2" : "=r"(h->hatch_dbatu[2])); 1427 __asm volatile ("mfdbatl %0,2" : "=r"(h->hatch_dbatl[2])); 1428 __asm volatile ("mfdbatu %0,3" : "=r"(h->hatch_dbatu[3])); 1429 __asm volatile ("mfdbatl %0,3" : "=r"(h->hatch_dbatl[3])); 1430 __asm volatile ("sync; isync"); 1431 } 1432 1433 if (md_setup_trampoline(h, ci) == -1) 1434 return -1; 1435 md_presync_timebase(h); 1436 md_start_timebase(h); 1437 1438 /* wait for secondary printf */ 1439 1440 delay(200000); 1441 1442 #ifdef CACHE_PROTO_MEI 1443 __asm volatile ("dcbi 0,%0"::"r"(&h->hatch_running):"memory"); 1444 __asm volatile ("sync; isync"); 1445 __asm volatile ("dcbst 0,%0"::"r"(&h->hatch_running):"memory"); 1446 __asm volatile ("sync; isync"); 1447 #endif 1448 int hatch_bail = 0; 1449 while ((h->hatch_running < 1) && (hatch_bail < 100000)) { 1450 delay(1); 1451 hatch_bail++; 1452 #ifdef CACHE_PROTO_MEI 1453 __asm volatile ("dcbi 0,%0"::"r"(&h->hatch_running):"memory"); 1454 __asm volatile ("sync; isync"); 1455 __asm volatile ("dcbst 0,%0"::"r"(&h->hatch_running):"memory"); 1456 __asm volatile ("sync; isync"); 1457 #endif 1458 } 1459 if (h->hatch_running < 1) { 1460 #ifdef CACHE_PROTO_MEI 1461 __asm volatile ("dcbi 0,%0"::"r"(&cpu_spinstart_ack):"memory"); 1462 __asm volatile ("sync; isync"); 1463 __asm volatile ("dcbst 0,%0"::"r"(&cpu_spinstart_ack):"memory"); 1464 __asm volatile ("sync; isync"); 1465 #endif 1466 aprint_error("%d:CPU %d didn't start %d\n", cpu_spinstart_ack, 1467 ci->ci_cpuid, cpu_spinstart_ack); 1468 Debugger(); 1469 return -1; 1470 } 1471 1472 return 0; 1473 } 1474 1475 static volatile int start_secondary_cpu; 1476 1477 register_t 1478 cpu_hatch(void) 1479 { 1480 volatile struct cpu_hatch_data *h = cpu_hatch_data; 1481 struct cpu_info * const ci = h->hatch_ci; 1482 struct pcb *pcb; 1483 u_int msr; 1484 int i; 1485 1486 /* Initialize timebase. */ 1487 __asm ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0)); 1488 1489 /* 1490 * Set PIR (Processor Identification Register). i.e. whoami 1491 * Note that PIR is read-only on some CPU versions, so we write to it 1492 * only if it has a different value than we need. 1493 */ 1494 1495 msr = mfspr(SPR_PIR); 1496 if (msr != h->hatch_pir) 1497 mtspr(SPR_PIR, h->hatch_pir); 1498 1499 __asm volatile ("mtsprg0 %0" :: "r"(ci)); 1500 curlwp = ci->ci_curlwp; 1501 cpu_spinstart_ack = 0; 1502 1503 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1504 /* Initialize MMU. */ 1505 __asm ("mtibatu 0,%0" :: "r"(h->hatch_ibatu[0])); 1506 __asm ("mtibatl 0,%0" :: "r"(h->hatch_ibatl[0])); 1507 __asm ("mtibatu 1,%0" :: "r"(h->hatch_ibatu[1])); 1508 __asm ("mtibatl 1,%0" :: "r"(h->hatch_ibatl[1])); 1509 __asm ("mtibatu 2,%0" :: "r"(h->hatch_ibatu[2])); 1510 __asm ("mtibatl 2,%0" :: "r"(h->hatch_ibatl[2])); 1511 __asm ("mtibatu 3,%0" :: "r"(h->hatch_ibatu[3])); 1512 __asm ("mtibatl 3,%0" :: "r"(h->hatch_ibatl[3])); 1513 __asm ("mtdbatu 0,%0" :: "r"(h->hatch_dbatu[0])); 1514 __asm ("mtdbatl 0,%0" :: "r"(h->hatch_dbatl[0])); 1515 __asm ("mtdbatu 1,%0" :: "r"(h->hatch_dbatu[1])); 1516 __asm ("mtdbatl 1,%0" :: "r"(h->hatch_dbatl[1])); 1517 __asm ("mtdbatu 2,%0" :: "r"(h->hatch_dbatu[2])); 1518 __asm ("mtdbatl 2,%0" :: "r"(h->hatch_dbatl[2])); 1519 __asm ("mtdbatu 3,%0" :: "r"(h->hatch_dbatu[3])); 1520 __asm ("mtdbatl 3,%0" :: "r"(h->hatch_dbatl[3])); 1521 } 1522 1523 #ifdef PPC_OEA64_BRIDGE 1524 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) { 1525 1526 mtspr64(SPR_HID0, h->hatch_hid0); 1527 mtspr64(SPR_HID1, h->hatch_hid1); 1528 mtspr64(SPR_HID4, h->hatch_hid4); 1529 mtspr64(SPR_HID5, h->hatch_hid5); 1530 mtspr64(SPR_HIOR, 0); 1531 } else 1532 #endif 1533 mtspr(SPR_HID0, h->hatch_hid0); 1534 1535 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1536 __asm ("mtibatl 0,%0; mtibatu 0,%1; mtdbatl 0,%0; mtdbatu 0,%1;" 1537 :: "r"(battable[0].batl), "r"(battable[0].batu)); 1538 } 1539 1540 __asm volatile ("sync"); 1541 for (i = 0; i < 16; i++) 1542 __asm ("mtsrin %0,%1" :: "r"(h->hatch_sr[i]), "r"(i << ADDR_SR_SHFT)); 1543 __asm volatile ("sync; isync"); 1544 1545 if (oeacpufeat & OEACPU_64) 1546 mtspr(SPR_ASR, h->hatch_asr); 1547 1548 cpu_spinstart_ack = 1; 1549 __asm ("ptesync"); 1550 __asm ("mtsdr1 %0" :: "r"(h->hatch_sdr1)); 1551 __asm volatile ("sync; isync"); 1552 1553 cpu_spinstart_ack = 5; 1554 for (i = 0; i < 16; i++) 1555 __asm ("mfsrin %0,%1" : "=r"(h->hatch_sr[i]) : 1556 "r"(i << ADDR_SR_SHFT)); 1557 1558 /* Enable I/D address translations. */ 1559 msr = mfmsr(); 1560 msr |= PSL_IR|PSL_DR|PSL_ME|PSL_RI; 1561 mtmsr(msr); 1562 __asm volatile ("sync; isync"); 1563 cpu_spinstart_ack = 2; 1564 1565 md_sync_timebase(h); 1566 1567 cpu_setup(h->hatch_self, ci); 1568 1569 h->hatch_running = 1; 1570 __asm volatile ("sync; isync"); 1571 1572 while (start_secondary_cpu == 0) 1573 ; 1574 1575 __asm volatile ("sync; isync"); 1576 1577 aprint_normal("cpu%d started\n", curcpu()->ci_index); 1578 __asm volatile ("mtdec %0" :: "r"(ticks_per_intr)); 1579 1580 md_setup_interrupts(); 1581 1582 ci->ci_ipending = 0; 1583 ci->ci_cpl = 0; 1584 1585 mtmsr(mfmsr() | PSL_EE); 1586 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 1587 return pcb->pcb_sp; 1588 } 1589 1590 void 1591 cpu_boot_secondary_processors(void) 1592 { 1593 start_secondary_cpu = 1; 1594 __asm volatile ("sync"); 1595 } 1596 1597 #endif /*MULTIPROCESSOR*/ 1598