1 /* $NetBSD: cpu_subr.c,v 1.97 2018/06/15 23:11:39 uwe Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 Matt Thomas. 5 * Copyright (c) 2001 Tsubai Masanari. 6 * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by 20 * Internet Research Institute, Inc. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.97 2018/06/15 23:11:39 uwe Exp $"); 38 39 #include "opt_ppcparam.h" 40 #include "opt_ppccache.h" 41 #include "opt_multiprocessor.h" 42 #include "opt_altivec.h" 43 #include "sysmon_envsys.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/device.h> 48 #include <sys/types.h> 49 #include <sys/lwp.h> 50 #include <sys/xcall.h> 51 52 #include <uvm/uvm.h> 53 54 #include <powerpc/pcb.h> 55 #include <powerpc/psl.h> 56 #include <powerpc/spr.h> 57 #include <powerpc/oea/hid.h> 58 #include <powerpc/oea/hid_601.h> 59 #include <powerpc/oea/spr.h> 60 #include <powerpc/oea/cpufeat.h> 61 62 #include <dev/sysmon/sysmonvar.h> 63 64 static void cpu_enable_l2cr(register_t); 65 static void cpu_enable_l3cr(register_t); 66 static void cpu_config_l2cr(int); 67 static void cpu_config_l3cr(int); 68 static void cpu_probe_speed(struct cpu_info *); 69 static void cpu_idlespin(void); 70 static void cpu_set_dfs_xcall(void *, void *); 71 #if NSYSMON_ENVSYS > 0 72 static void cpu_tau_setup(struct cpu_info *); 73 static void cpu_tau_refresh(struct sysmon_envsys *, envsys_data_t *); 74 #endif 75 76 extern void init_scom_speedctl(void); 77 78 int cpu = -1; 79 int ncpus; 80 81 struct fmttab { 82 register_t fmt_mask; 83 register_t fmt_value; 84 const char *fmt_string; 85 }; 86 87 /* 88 * This should be one per CPU but since we only support it on 750 variants it 89 * doesn't really matter since none of them support SMP 90 */ 91 envsys_data_t sensor; 92 93 static const struct fmttab cpu_7450_l2cr_formats[] = { 94 { L2CR_L2E, 0, " disabled" }, 95 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 96 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 97 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 98 { L2CR_L2E, ~0, " 256KB L2 cache" }, 99 { L2CR_L2PE, 0, " no parity" }, 100 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 101 { 0, 0, NULL } 102 }; 103 104 static const struct fmttab cpu_7448_l2cr_formats[] = { 105 { L2CR_L2E, 0, " disabled" }, 106 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 107 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 108 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 109 { L2CR_L2E, ~0, " 1MB L2 cache" }, 110 { L2CR_L2PE, 0, " no parity" }, 111 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 112 { 0, 0, NULL } 113 }; 114 115 static const struct fmttab cpu_7457_l2cr_formats[] = { 116 { L2CR_L2E, 0, " disabled" }, 117 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 118 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 119 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 120 { L2CR_L2E, ~0, " 512KB L2 cache" }, 121 { L2CR_L2PE, 0, " no parity" }, 122 { L2CR_L2PE, L2CR_L2PE, " parity enabled" }, 123 { 0, 0, NULL } 124 }; 125 126 static const struct fmttab cpu_7450_l3cr_formats[] = { 127 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO, " data-only" }, 128 { L3CR_L3DO|L3CR_L3IO, L3CR_L3IO, " instruction-only" }, 129 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO|L3CR_L3IO, " locked" }, 130 { L3CR_L3SIZ, L3SIZ_2M, " 2MB" }, 131 { L3CR_L3SIZ, L3SIZ_1M, " 1MB" }, 132 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE|L3CR_L3APE, " parity" }, 133 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE, " data-parity" }, 134 { L3CR_L3PE|L3CR_L3APE, L3CR_L3APE, " address-parity" }, 135 { L3CR_L3PE|L3CR_L3APE, 0, " no-parity" }, 136 { L3CR_L3SIZ, ~0, " L3 cache" }, 137 { L3CR_L3RT, L3RT_MSUG2_DDR, " (DDR SRAM)" }, 138 { L3CR_L3RT, L3RT_PIPELINE_LATE, " (LW SRAM)" }, 139 { L3CR_L3RT, L3RT_PB2_SRAM, " (PB2 SRAM)" }, 140 { L3CR_L3CLK, ~0, " at" }, 141 { L3CR_L3CLK, L3CLK_20, " 2:1" }, 142 { L3CR_L3CLK, L3CLK_25, " 2.5:1" }, 143 { L3CR_L3CLK, L3CLK_30, " 3:1" }, 144 { L3CR_L3CLK, L3CLK_35, " 3.5:1" }, 145 { L3CR_L3CLK, L3CLK_40, " 4:1" }, 146 { L3CR_L3CLK, L3CLK_50, " 5:1" }, 147 { L3CR_L3CLK, L3CLK_60, " 6:1" }, 148 { L3CR_L3CLK, ~0, " ratio" }, 149 { 0, 0, NULL }, 150 }; 151 152 static const struct fmttab cpu_ibm750_l2cr_formats[] = { 153 { L2CR_L2E, 0, " disabled" }, 154 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 155 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 156 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 157 { 0, ~0, " 512KB" }, 158 { L2CR_L2WT, L2CR_L2WT, " WT" }, 159 { L2CR_L2WT, 0, " WB" }, 160 { L2CR_L2PE, L2CR_L2PE, " with ECC" }, 161 { 0, ~0, " L2 cache" }, 162 { 0, 0, NULL } 163 }; 164 165 static const struct fmttab cpu_l2cr_formats[] = { 166 { L2CR_L2E, 0, " disabled" }, 167 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 168 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 169 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 170 { L2CR_L2PE, L2CR_L2PE, " parity" }, 171 { L2CR_L2PE, 0, " no-parity" }, 172 { L2CR_L2SIZ, L2SIZ_2M, " 2MB" }, 173 { L2CR_L2SIZ, L2SIZ_1M, " 1MB" }, 174 { L2CR_L2SIZ, L2SIZ_512K, " 512KB" }, 175 { L2CR_L2SIZ, L2SIZ_256K, " 256KB" }, 176 { L2CR_L2WT, L2CR_L2WT, " WT" }, 177 { L2CR_L2WT, 0, " WB" }, 178 { L2CR_L2E, ~0, " L2 cache" }, 179 { L2CR_L2RAM, L2RAM_FLOWTHRU_BURST, " (FB SRAM)" }, 180 { L2CR_L2RAM, L2RAM_PIPELINE_LATE, " (LW SRAM)" }, 181 { L2CR_L2RAM, L2RAM_PIPELINE_BURST, " (PB SRAM)" }, 182 { L2CR_L2CLK, ~0, " at" }, 183 { L2CR_L2CLK, L2CLK_10, " 1:1" }, 184 { L2CR_L2CLK, L2CLK_15, " 1.5:1" }, 185 { L2CR_L2CLK, L2CLK_20, " 2:1" }, 186 { L2CR_L2CLK, L2CLK_25, " 2.5:1" }, 187 { L2CR_L2CLK, L2CLK_30, " 3:1" }, 188 { L2CR_L2CLK, L2CLK_35, " 3.5:1" }, 189 { L2CR_L2CLK, L2CLK_40, " 4:1" }, 190 { L2CR_L2CLK, ~0, " ratio" }, 191 { 0, 0, NULL } 192 }; 193 194 static void cpu_fmttab_print(const struct fmttab *, register_t); 195 196 struct cputab { 197 const char name[8]; 198 uint16_t version; 199 uint16_t revfmt; 200 }; 201 #define REVFMT_MAJMIN 1 /* %u.%u */ 202 #define REVFMT_HEX 2 /* 0x%04x */ 203 #define REVFMT_DEC 3 /* %u */ 204 static const struct cputab models[] = { 205 { "601", MPC601, REVFMT_DEC }, 206 { "602", MPC602, REVFMT_DEC }, 207 { "603", MPC603, REVFMT_MAJMIN }, 208 { "603e", MPC603e, REVFMT_MAJMIN }, 209 { "603ev", MPC603ev, REVFMT_MAJMIN }, 210 { "G2", MPCG2, REVFMT_MAJMIN }, 211 { "604", MPC604, REVFMT_MAJMIN }, 212 { "604e", MPC604e, REVFMT_MAJMIN }, 213 { "604ev", MPC604ev, REVFMT_MAJMIN }, 214 { "620", MPC620, REVFMT_HEX }, 215 { "750", MPC750, REVFMT_MAJMIN }, 216 { "750FX", IBM750FX, REVFMT_MAJMIN }, 217 { "750GX", IBM750GX, REVFMT_MAJMIN }, 218 { "7400", MPC7400, REVFMT_MAJMIN }, 219 { "7410", MPC7410, REVFMT_MAJMIN }, 220 { "7450", MPC7450, REVFMT_MAJMIN }, 221 { "7455", MPC7455, REVFMT_MAJMIN }, 222 { "7457", MPC7457, REVFMT_MAJMIN }, 223 { "7447A", MPC7447A, REVFMT_MAJMIN }, 224 { "7448", MPC7448, REVFMT_MAJMIN }, 225 { "8240", MPC8240, REVFMT_MAJMIN }, 226 { "8245", MPC8245, REVFMT_MAJMIN }, 227 { "970", IBM970, REVFMT_MAJMIN }, 228 { "970FX", IBM970FX, REVFMT_MAJMIN }, 229 { "970MP", IBM970MP, REVFMT_MAJMIN }, 230 { "POWER3II", IBMPOWER3II, REVFMT_MAJMIN }, 231 { "", 0, REVFMT_HEX } 232 }; 233 234 #ifdef MULTIPROCESSOR 235 struct cpu_info cpu_info[CPU_MAXNUM] = { 236 [0] = { 237 .ci_curlwp = &lwp0, 238 }, 239 }; 240 volatile struct cpu_hatch_data *cpu_hatch_data; 241 volatile int cpu_hatch_stack; 242 #define HATCH_STACK_SIZE 0x1000 243 extern int ticks_per_intr; 244 #include <powerpc/oea/bat.h> 245 #include <powerpc/pic/picvar.h> 246 #include <powerpc/pic/ipivar.h> 247 extern struct bat battable[]; 248 #else 249 struct cpu_info cpu_info[1] = { 250 [0] = { 251 .ci_curlwp = &lwp0, 252 }, 253 }; 254 #endif /*MULTIPROCESSOR*/ 255 256 int cpu_altivec; 257 register_t cpu_psluserset; 258 register_t cpu_pslusermod; 259 register_t cpu_pslusermask = 0xffff; 260 261 /* This is to be called from locore.S, and nowhere else. */ 262 263 void 264 cpu_model_init(void) 265 { 266 u_int pvr, vers; 267 268 pvr = mfpvr(); 269 vers = pvr >> 16; 270 271 oeacpufeat = 0; 272 273 if ((vers >= IBMRS64II && vers <= IBM970GX) || vers == MPC620 || 274 vers == IBMCELL || vers == IBMPOWER6P5) { 275 oeacpufeat |= OEACPU_64; 276 oeacpufeat |= OEACPU_64_BRIDGE; 277 oeacpufeat |= OEACPU_NOBAT; 278 279 } else if (vers == MPC601) { 280 oeacpufeat |= OEACPU_601; 281 282 } else if (MPC745X_P(vers)) { 283 register_t hid1 = mfspr(SPR_HID1); 284 285 if (vers != MPC7450) { 286 register_t hid0 = mfspr(SPR_HID0); 287 288 /* Enable more SPRG registers */ 289 oeacpufeat |= OEACPU_HIGHSPRG; 290 291 /* Enable more BAT registers */ 292 oeacpufeat |= OEACPU_HIGHBAT; 293 hid0 |= HID0_HIGH_BAT_EN; 294 295 /* Enable larger BAT registers */ 296 oeacpufeat |= OEACPU_XBSEN; 297 hid0 |= HID0_XBSEN; 298 299 mtspr(SPR_HID0, hid0); 300 __asm volatile("sync;isync"); 301 } 302 303 /* Enable address broadcasting for MP systems */ 304 hid1 |= HID1_SYNCBE | HID1_ABE; 305 306 mtspr(SPR_HID1, hid1); 307 __asm volatile("sync;isync"); 308 309 } else if (vers == IBM750FX || vers == IBM750GX) { 310 oeacpufeat |= OEACPU_HIGHBAT; 311 } 312 } 313 314 void 315 cpu_fmttab_print(const struct fmttab *fmt, register_t data) 316 { 317 for (; fmt->fmt_mask != 0 || fmt->fmt_value != 0; fmt++) { 318 if ((~fmt->fmt_mask & fmt->fmt_value) != 0 || 319 (data & fmt->fmt_mask) == fmt->fmt_value) 320 aprint_normal("%s", fmt->fmt_string); 321 } 322 } 323 324 void 325 cpu_idlespin(void) 326 { 327 register_t msr; 328 329 if (powersave <= 0) 330 return; 331 332 __asm volatile( 333 #if defined(_ARCH_PPC64) || defined (PPC_OEA64_BRIDGE) 334 "dssall;" 335 #endif 336 "sync;" 337 "mfmsr %0;" 338 "oris %0,%0,%1@h;" /* enter power saving mode */ 339 "mtmsr %0;" 340 "isync;" 341 : "=r"(msr) 342 : "J"(PSL_POW)); 343 } 344 345 void 346 cpu_probe_cache(void) 347 { 348 u_int assoc, pvr, vers; 349 350 pvr = mfpvr(); 351 vers = pvr >> 16; 352 353 354 /* Presently common across almost all implementations. */ 355 curcpu()->ci_ci.dcache_line_size = 32; 356 curcpu()->ci_ci.icache_line_size = 32; 357 358 359 switch (vers) { 360 #define K *1024 361 case IBM750FX: 362 case IBM750GX: 363 case MPC601: 364 case MPC750: 365 case MPC7400: 366 case MPC7447A: 367 case MPC7448: 368 case MPC7450: 369 case MPC7455: 370 case MPC7457: 371 curcpu()->ci_ci.dcache_size = 32 K; 372 curcpu()->ci_ci.icache_size = 32 K; 373 assoc = 8; 374 break; 375 case MPC603: 376 curcpu()->ci_ci.dcache_size = 8 K; 377 curcpu()->ci_ci.icache_size = 8 K; 378 assoc = 2; 379 break; 380 case MPC603e: 381 case MPC603ev: 382 case MPC604: 383 case MPC8240: 384 case MPC8245: 385 case MPCG2: 386 curcpu()->ci_ci.dcache_size = 16 K; 387 curcpu()->ci_ci.icache_size = 16 K; 388 assoc = 4; 389 break; 390 case MPC604e: 391 case MPC604ev: 392 curcpu()->ci_ci.dcache_size = 32 K; 393 curcpu()->ci_ci.icache_size = 32 K; 394 assoc = 4; 395 break; 396 case IBMPOWER3II: 397 curcpu()->ci_ci.dcache_size = 64 K; 398 curcpu()->ci_ci.icache_size = 32 K; 399 curcpu()->ci_ci.dcache_line_size = 128; 400 curcpu()->ci_ci.icache_line_size = 128; 401 assoc = 128; /* not a typo */ 402 break; 403 case IBM970: 404 case IBM970FX: 405 case IBM970MP: 406 curcpu()->ci_ci.dcache_size = 32 K; 407 curcpu()->ci_ci.icache_size = 64 K; 408 curcpu()->ci_ci.dcache_line_size = 128; 409 curcpu()->ci_ci.icache_line_size = 128; 410 assoc = 2; 411 break; 412 413 default: 414 curcpu()->ci_ci.dcache_size = PAGE_SIZE; 415 curcpu()->ci_ci.icache_size = PAGE_SIZE; 416 assoc = 1; 417 #undef K 418 } 419 420 /* 421 * Possibly recolor. 422 */ 423 uvm_page_recolor(atop(curcpu()->ci_ci.dcache_size / assoc)); 424 } 425 426 struct cpu_info * 427 cpu_attach_common(device_t self, int id) 428 { 429 struct cpu_info *ci; 430 u_int pvr, vers; 431 432 ci = &cpu_info[id]; 433 #ifndef MULTIPROCESSOR 434 /* 435 * If this isn't the primary CPU, print an error message 436 * and just bail out. 437 */ 438 if (id != 0) { 439 aprint_naive("\n"); 440 aprint_normal(": ID %d\n", id); 441 aprint_normal_dev(self, 442 "processor off-line; " 443 "multiprocessor support not present in kernel\n"); 444 return (NULL); 445 } 446 #endif 447 448 ci->ci_cpuid = id; 449 ci->ci_idepth = -1; 450 ci->ci_dev = self; 451 ci->ci_idlespin = cpu_idlespin; 452 453 pvr = mfpvr(); 454 vers = (pvr >> 16) & 0xffff; 455 456 switch (id) { 457 case 0: 458 /* load my cpu_number to PIR */ 459 switch (vers) { 460 case MPC601: 461 case MPC604: 462 case MPC604e: 463 case MPC604ev: 464 case MPC7400: 465 case MPC7410: 466 case MPC7447A: 467 case MPC7448: 468 case MPC7450: 469 case MPC7455: 470 case MPC7457: 471 mtspr(SPR_PIR, id); 472 } 473 cpu_setup(self, ci); 474 break; 475 default: 476 aprint_naive("\n"); 477 if (id >= CPU_MAXNUM) { 478 aprint_normal(": more than %d cpus?\n", CPU_MAXNUM); 479 panic("cpuattach"); 480 } 481 #ifndef MULTIPROCESSOR 482 aprint_normal(" not configured\n"); 483 return NULL; 484 #else 485 mi_cpu_attach(ci); 486 break; 487 #endif 488 } 489 return (ci); 490 } 491 492 void 493 cpu_setup(device_t self, struct cpu_info *ci) 494 { 495 u_int pvr, vers; 496 const char * const xname = device_xname(self); 497 const char *bitmask; 498 char hidbuf[128]; 499 char model[80]; 500 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 501 char hidbuf_u[128]; 502 const char *bitmasku = NULL; 503 volatile uint64_t hid64_0, hid64_0_save; 504 #endif 505 #if !defined(_ARCH_PPC64) 506 register_t hid0 = 0, hid0_save = 0; 507 #endif 508 509 pvr = mfpvr(); 510 vers = (pvr >> 16) & 0xffff; 511 512 cpu_identify(model, sizeof(model)); 513 aprint_naive("\n"); 514 aprint_normal(": %s, ID %d%s\n", model, cpu_number(), 515 cpu_number() == 0 ? " (primary)" : ""); 516 517 /* set the cpu number */ 518 ci->ci_cpuid = cpu_number(); 519 #if defined(_ARCH_PPC64) 520 __asm volatile("mfspr %0,%1" : "=r"(hid64_0) : "K"(SPR_HID0)); 521 hid64_0_save = hid64_0; 522 #else 523 #if defined(PPC_OEA64_BRIDGE) 524 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) 525 hid64_0_save = hid64_0 = mfspr(SPR_HID0); 526 else 527 #endif 528 hid0_save = hid0 = mfspr(SPR_HID0); 529 #endif 530 531 532 cpu_probe_cache(); 533 534 /* 535 * Configure power-saving mode. 536 */ 537 switch (vers) { 538 #if !defined(_ARCH_PPC64) 539 case MPC604: 540 case MPC604e: 541 case MPC604ev: 542 /* 543 * Do not have HID0 support settings, but can support 544 * MSR[POW] off 545 */ 546 powersave = 1; 547 break; 548 549 case MPC603: 550 case MPC603e: 551 case MPC603ev: 552 case MPC7400: 553 case MPC7410: 554 case MPC8240: 555 case MPC8245: 556 case MPCG2: 557 /* Select DOZE mode. */ 558 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 559 hid0 |= HID0_DOZE | HID0_DPM; 560 powersave = 1; 561 break; 562 563 case MPC750: 564 case IBM750FX: 565 case IBM750GX: 566 /* Select NAP mode. */ 567 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 568 hid0 |= HID0_NAP | HID0_DPM; 569 powersave = 1; 570 break; 571 572 case MPC7447A: 573 case MPC7448: 574 case MPC7457: 575 case MPC7455: 576 case MPC7450: 577 /* Enable the 7450 branch caches */ 578 hid0 |= HID0_SGE | HID0_BTIC; 579 hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT; 580 /* Disable BTIC on 7450 Rev 2.0 or earlier */ 581 if (vers == MPC7450 && (pvr & 0xFFFF) <= 0x0200) 582 hid0 &= ~HID0_BTIC; 583 /* Select NAP mode. */ 584 hid0 &= ~HID0_SLEEP; 585 hid0 |= HID0_NAP | HID0_DPM; 586 powersave = 1; 587 break; 588 #endif 589 590 case IBM970: 591 case IBM970FX: 592 case IBM970MP: 593 #if defined(_ARCH_PPC64) || defined (PPC_OEA64_BRIDGE) 594 #if !defined(_ARCH_PPC64) 595 KASSERT((oeacpufeat & OEACPU_64_BRIDGE) != 0); 596 #endif 597 hid64_0 &= ~(HID0_64_DOZE | HID0_64_NAP | HID0_64_DEEPNAP); 598 hid64_0 |= HID0_64_NAP | HID0_64_DPM | HID0_64_EX_TBEN | 599 HID0_64_TB_CTRL | HID0_64_EN_MCHK; 600 powersave = 1; 601 break; 602 #endif 603 case IBMPOWER3II: 604 default: 605 /* No power-saving mode is available. */ ; 606 } 607 608 #ifdef NAPMODE 609 switch (vers) { 610 case IBM750FX: 611 case IBM750GX: 612 case MPC750: 613 case MPC7400: 614 /* Select NAP mode. */ 615 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 616 hid0 |= HID0_NAP; 617 break; 618 } 619 #endif 620 621 switch (vers) { 622 case IBM750FX: 623 case IBM750GX: 624 case MPC750: 625 hid0 &= ~HID0_DBP; /* XXX correct? */ 626 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 627 break; 628 629 case MPC7400: 630 case MPC7410: 631 hid0 &= ~HID0_SPD; 632 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 633 hid0 |= HID0_EIEC; 634 break; 635 } 636 637 /* 638 * according to the 603e manual this is necessary for an external L2 639 * cache to work properly 640 */ 641 switch (vers) { 642 case MPC603e: 643 hid0 |= HID0_ABE; 644 } 645 646 #if defined(_ARCH_PPC64) || defined(PPC_OEA64_BRIDGE) 647 #if defined(PPC_OEA64_BRIDGE) 648 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) { 649 #endif 650 if (hid64_0 != hid64_0_save) { 651 mtspr64(SPR_HID0, hid64_0); 652 } 653 #if defined(PPC_OEA64_BRIDGE) 654 } else { 655 #endif 656 #endif 657 658 #if !defined(_ARCH_PPC64) 659 if (hid0 != hid0_save) { 660 mtspr(SPR_HID0, hid0); 661 __asm volatile("sync;isync"); 662 } 663 #endif 664 #if defined(PPC_OEA64_BRIDGE) 665 } 666 #endif 667 668 switch (vers) { 669 case MPC601: 670 bitmask = HID0_601_BITMASK; 671 break; 672 case MPC7447A: 673 case MPC7448: 674 case MPC7450: 675 case MPC7455: 676 case MPC7457: 677 bitmask = HID0_7450_BITMASK; 678 break; 679 case IBM970: 680 case IBM970FX: 681 case IBM970MP: 682 bitmask = HID0_970_BITMASK; 683 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 684 bitmasku = HID0_970_BITMASK_U; 685 #endif 686 break; 687 default: 688 bitmask = HID0_BITMASK; 689 break; 690 } 691 692 #if defined(PPC_OEA64_BRIDGE) || defined(_ARCH_PPC64) 693 if (bitmasku != NULL) { 694 snprintb(hidbuf, sizeof hidbuf, bitmask, hid64_0 & 0xffffffff); 695 snprintb(hidbuf_u, sizeof hidbuf_u, bitmasku, hid64_0 >> 32); 696 aprint_normal_dev(self, "HID0 %s %s, powersave: %d\n", 697 hidbuf_u, hidbuf, powersave); 698 } else 699 #endif 700 { 701 snprintb(hidbuf, sizeof hidbuf, bitmask, hid0); 702 aprint_normal_dev(self, "HID0 %s, powersave: %d\n", 703 hidbuf, powersave); 704 } 705 706 ci->ci_khz = 0; 707 708 /* 709 * Display speed and cache configuration. 710 */ 711 switch (vers) { 712 case MPC604: 713 case MPC604e: 714 case MPC604ev: 715 case MPC750: 716 case IBM750FX: 717 case IBM750GX: 718 case MPC7400: 719 case MPC7410: 720 case MPC7447A: 721 case MPC7448: 722 case MPC7450: 723 case MPC7455: 724 case MPC7457: 725 aprint_normal_dev(self, ""); 726 cpu_probe_speed(ci); 727 aprint_normal("%u.%02u MHz", 728 ci->ci_khz / 1000, (ci->ci_khz / 10) % 100); 729 switch (vers) { 730 case MPC7450: /* 7441 does not have L3! */ 731 case MPC7455: /* 7445 does not have L3! */ 732 case MPC7457: /* 7447 does not have L3! */ 733 cpu_config_l3cr(vers); 734 break; 735 case IBM750FX: 736 case IBM750GX: 737 case MPC750: 738 case MPC7400: 739 case MPC7410: 740 case MPC7447A: 741 case MPC7448: 742 cpu_config_l2cr(pvr); 743 break; 744 default: 745 break; 746 } 747 aprint_normal("\n"); 748 break; 749 } 750 751 #if NSYSMON_ENVSYS > 0 752 /* 753 * Attach MPC750 temperature sensor to the envsys subsystem. 754 * XXX the 74xx series also has this sensor, but it is not 755 * XXX supported by Motorola and may return values that are off by 756 * XXX 35-55 degrees C. 757 */ 758 if (vers == MPC750 || vers == IBM750FX || vers == IBM750GX) 759 cpu_tau_setup(ci); 760 #endif 761 762 #if defined(PPC_OEA64) || defined(PPC_OEA64_BRIDGE) 763 if (vers == IBM970MP) 764 init_scom_speedctl(); 765 #endif 766 767 evcnt_attach_dynamic(&ci->ci_ev_clock, EVCNT_TYPE_INTR, 768 NULL, xname, "clock"); 769 evcnt_attach_dynamic(&ci->ci_ev_traps, EVCNT_TYPE_TRAP, 770 NULL, xname, "traps"); 771 evcnt_attach_dynamic(&ci->ci_ev_kdsi, EVCNT_TYPE_TRAP, 772 &ci->ci_ev_traps, xname, "kernel DSI traps"); 773 evcnt_attach_dynamic(&ci->ci_ev_udsi, EVCNT_TYPE_TRAP, 774 &ci->ci_ev_traps, xname, "user DSI traps"); 775 evcnt_attach_dynamic(&ci->ci_ev_udsi_fatal, EVCNT_TYPE_TRAP, 776 &ci->ci_ev_udsi, xname, "user DSI failures"); 777 evcnt_attach_dynamic(&ci->ci_ev_kisi, EVCNT_TYPE_TRAP, 778 &ci->ci_ev_traps, xname, "kernel ISI traps"); 779 evcnt_attach_dynamic(&ci->ci_ev_isi, EVCNT_TYPE_TRAP, 780 &ci->ci_ev_traps, xname, "user ISI traps"); 781 evcnt_attach_dynamic(&ci->ci_ev_isi_fatal, EVCNT_TYPE_TRAP, 782 &ci->ci_ev_isi, xname, "user ISI failures"); 783 evcnt_attach_dynamic(&ci->ci_ev_scalls, EVCNT_TYPE_TRAP, 784 &ci->ci_ev_traps, xname, "system call traps"); 785 evcnt_attach_dynamic(&ci->ci_ev_pgm, EVCNT_TYPE_TRAP, 786 &ci->ci_ev_traps, xname, "PGM traps"); 787 evcnt_attach_dynamic(&ci->ci_ev_fpu, EVCNT_TYPE_TRAP, 788 &ci->ci_ev_traps, xname, "FPU unavailable traps"); 789 evcnt_attach_dynamic(&ci->ci_ev_fpusw, EVCNT_TYPE_TRAP, 790 &ci->ci_ev_fpu, xname, "FPU context switches"); 791 evcnt_attach_dynamic(&ci->ci_ev_ali, EVCNT_TYPE_TRAP, 792 &ci->ci_ev_traps, xname, "user alignment traps"); 793 evcnt_attach_dynamic(&ci->ci_ev_ali_fatal, EVCNT_TYPE_TRAP, 794 &ci->ci_ev_ali, xname, "user alignment traps"); 795 evcnt_attach_dynamic(&ci->ci_ev_umchk, EVCNT_TYPE_TRAP, 796 &ci->ci_ev_umchk, xname, "user MCHK failures"); 797 evcnt_attach_dynamic(&ci->ci_ev_vec, EVCNT_TYPE_TRAP, 798 &ci->ci_ev_traps, xname, "AltiVec unavailable"); 799 #ifdef ALTIVEC 800 if (cpu_altivec) { 801 evcnt_attach_dynamic(&ci->ci_ev_vecsw, EVCNT_TYPE_TRAP, 802 &ci->ci_ev_vec, xname, "AltiVec context switches"); 803 } 804 #endif 805 evcnt_attach_dynamic(&ci->ci_ev_ipi, EVCNT_TYPE_INTR, 806 NULL, xname, "IPIs"); 807 } 808 809 /* 810 * According to a document labeled "PVR Register Settings": 811 ** For integrated microprocessors the PVR register inside the device 812 ** will identify the version of the microprocessor core. You must also 813 ** read the Device ID, PCI register 02, to identify the part and the 814 ** Revision ID, PCI register 08, to identify the revision of the 815 ** integrated microprocessor. 816 * This apparently applies to 8240/8245/8241, PVR 00810101 and 80811014 817 */ 818 819 void 820 cpu_identify(char *str, size_t len) 821 { 822 u_int pvr, major, minor; 823 uint16_t vers, rev, revfmt; 824 const struct cputab *cp; 825 size_t n; 826 827 pvr = mfpvr(); 828 vers = pvr >> 16; 829 rev = pvr; 830 831 switch (vers) { 832 case MPC7410: 833 minor = (pvr >> 0) & 0xff; 834 major = minor <= 4 ? 1 : 2; 835 break; 836 case MPCG2: /*XXX see note above */ 837 major = (pvr >> 4) & 0xf; 838 minor = (pvr >> 0) & 0xf; 839 break; 840 default: 841 major = (pvr >> 8) & 0xf; 842 minor = (pvr >> 0) & 0xf; 843 } 844 845 for (cp = models; cp->name[0] != '\0'; cp++) { 846 if (cp->version == vers) 847 break; 848 } 849 850 if (cpu == -1) 851 cpu = vers; 852 853 revfmt = cp->revfmt; 854 if (rev == MPC750 && pvr == 15) { 855 revfmt = REVFMT_HEX; 856 } 857 858 if (cp->name[0] != '\0') { 859 n = snprintf(str, len, "%s (Revision ", cp->name); 860 } else { 861 n = snprintf(str, len, "Version %#x (Revision ", vers); 862 } 863 if (len > n) { 864 switch (revfmt) { 865 case REVFMT_MAJMIN: 866 snprintf(str + n, len - n, "%u.%u)", major, minor); 867 break; 868 case REVFMT_HEX: 869 snprintf(str + n, len - n, "0x%04x)", rev); 870 break; 871 case REVFMT_DEC: 872 snprintf(str + n, len - n, "%u)", rev); 873 break; 874 } 875 } 876 } 877 878 #ifdef L2CR_CONFIG 879 u_int l2cr_config = L2CR_CONFIG; 880 #else 881 u_int l2cr_config = 0; 882 #endif 883 884 #ifdef L3CR_CONFIG 885 u_int l3cr_config = L3CR_CONFIG; 886 #else 887 u_int l3cr_config = 0; 888 #endif 889 890 void 891 cpu_enable_l2cr(register_t l2cr) 892 { 893 register_t msr, x; 894 uint16_t vers; 895 896 vers = mfpvr() >> 16; 897 898 /* Disable interrupts and set the cache config bits. */ 899 msr = mfmsr(); 900 mtmsr(msr & ~PSL_EE); 901 #ifdef ALTIVEC 902 if (cpu_altivec) 903 __asm volatile("dssall"); 904 #endif 905 __asm volatile("sync"); 906 mtspr(SPR_L2CR, l2cr & ~L2CR_L2E); 907 __asm volatile("sync"); 908 909 /* Wait for L2 clock to be stable (640 L2 clocks). */ 910 delay(100); 911 912 /* Invalidate all L2 contents. */ 913 if (MPC745X_P(vers)) { 914 mtspr(SPR_L2CR, l2cr | L2CR_L2I); 915 do { 916 x = mfspr(SPR_L2CR); 917 } while (x & L2CR_L2I); 918 } else { 919 mtspr(SPR_L2CR, l2cr | L2CR_L2I); 920 do { 921 x = mfspr(SPR_L2CR); 922 } while (x & L2CR_L2IP); 923 } 924 /* Enable L2 cache. */ 925 l2cr |= L2CR_L2E; 926 mtspr(SPR_L2CR, l2cr); 927 mtmsr(msr); 928 } 929 930 void 931 cpu_enable_l3cr(register_t l3cr) 932 { 933 register_t x; 934 935 /* By The Book (numbered steps from section 3.7.1.3 of MPC7450UM) */ 936 937 /* 938 * 1: Set all L3CR bits for final config except L3E, L3I, L3PE, and 939 * L3CLKEN. (also mask off reserved bits in case they were included 940 * in L3CR_CONFIG) 941 */ 942 l3cr &= ~(L3CR_L3E|L3CR_L3I|L3CR_L3PE|L3CR_L3CLKEN|L3CR_RESERVED); 943 mtspr(SPR_L3CR, l3cr); 944 945 /* 2: Set L3CR[5] (otherwise reserved bit) to 1 */ 946 l3cr |= 0x04000000; 947 mtspr(SPR_L3CR, l3cr); 948 949 /* 3: Set L3CLKEN to 1*/ 950 l3cr |= L3CR_L3CLKEN; 951 mtspr(SPR_L3CR, l3cr); 952 953 /* 4/5: Perform a global cache invalidate (ref section 3.7.3.6) */ 954 __asm volatile("dssall;sync"); 955 /* L3 cache is already disabled, no need to clear L3E */ 956 mtspr(SPR_L3CR, l3cr|L3CR_L3I); 957 do { 958 x = mfspr(SPR_L3CR); 959 } while (x & L3CR_L3I); 960 961 /* 6: Clear L3CLKEN to 0 */ 962 l3cr &= ~L3CR_L3CLKEN; 963 mtspr(SPR_L3CR, l3cr); 964 965 /* 7: Perform a 'sync' and wait at least 100 CPU cycles */ 966 __asm volatile("sync"); 967 delay(100); 968 969 /* 8: Set L3E and L3CLKEN */ 970 l3cr |= (L3CR_L3E|L3CR_L3CLKEN); 971 mtspr(SPR_L3CR, l3cr); 972 973 /* 9: Perform a 'sync' and wait at least 100 CPU cycles */ 974 __asm volatile("sync"); 975 delay(100); 976 } 977 978 void 979 cpu_config_l2cr(int pvr) 980 { 981 register_t l2cr; 982 u_int vers = (pvr >> 16) & 0xffff; 983 984 l2cr = mfspr(SPR_L2CR); 985 986 /* 987 * For MP systems, the firmware may only configure the L2 cache 988 * on the first CPU. In this case, assume that the other CPUs 989 * should use the same value for L2CR. 990 */ 991 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 992 l2cr_config = l2cr; 993 } 994 995 /* 996 * Configure L2 cache if not enabled. 997 */ 998 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 999 cpu_enable_l2cr(l2cr_config); 1000 l2cr = mfspr(SPR_L2CR); 1001 } 1002 1003 if ((l2cr & L2CR_L2E) == 0) { 1004 aprint_normal(" L2 cache present but not enabled "); 1005 return; 1006 } 1007 aprint_normal(","); 1008 1009 switch (vers) { 1010 case IBM750FX: 1011 case IBM750GX: 1012 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr); 1013 break; 1014 case MPC750: 1015 if ((pvr & 0xffffff00) == 0x00082200 /* IBM750CX */ || 1016 (pvr & 0xffffef00) == 0x00082300 /* IBM750CXe */) 1017 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr); 1018 else 1019 cpu_fmttab_print(cpu_l2cr_formats, l2cr); 1020 break; 1021 case MPC7447A: 1022 case MPC7457: 1023 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr); 1024 return; 1025 case MPC7448: 1026 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr); 1027 return; 1028 case MPC7450: 1029 case MPC7455: 1030 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr); 1031 break; 1032 default: 1033 cpu_fmttab_print(cpu_l2cr_formats, l2cr); 1034 break; 1035 } 1036 } 1037 1038 void 1039 cpu_config_l3cr(int vers) 1040 { 1041 register_t l2cr; 1042 register_t l3cr; 1043 1044 l2cr = mfspr(SPR_L2CR); 1045 1046 /* 1047 * For MP systems, the firmware may only configure the L2 cache 1048 * on the first CPU. In this case, assume that the other CPUs 1049 * should use the same value for L2CR. 1050 */ 1051 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 1052 l2cr_config = l2cr; 1053 } 1054 1055 /* 1056 * Configure L2 cache if not enabled. 1057 */ 1058 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 1059 cpu_enable_l2cr(l2cr_config); 1060 l2cr = mfspr(SPR_L2CR); 1061 } 1062 1063 aprint_normal(","); 1064 switch (vers) { 1065 case MPC7447A: 1066 case MPC7457: 1067 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr); 1068 return; 1069 case MPC7448: 1070 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr); 1071 return; 1072 default: 1073 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr); 1074 break; 1075 } 1076 1077 l3cr = mfspr(SPR_L3CR); 1078 1079 /* 1080 * For MP systems, the firmware may only configure the L3 cache 1081 * on the first CPU. In this case, assume that the other CPUs 1082 * should use the same value for L3CR. 1083 */ 1084 if ((l3cr & L3CR_L3E) != 0 && l3cr_config == 0) { 1085 l3cr_config = l3cr; 1086 } 1087 1088 /* 1089 * Configure L3 cache if not enabled. 1090 */ 1091 if ((l3cr & L3CR_L3E) == 0 && l3cr_config != 0) { 1092 cpu_enable_l3cr(l3cr_config); 1093 l3cr = mfspr(SPR_L3CR); 1094 } 1095 1096 if (l3cr & L3CR_L3E) { 1097 aprint_normal(","); 1098 cpu_fmttab_print(cpu_7450_l3cr_formats, l3cr); 1099 } 1100 } 1101 1102 void 1103 cpu_probe_speed(struct cpu_info *ci) 1104 { 1105 uint64_t cps; 1106 1107 mtspr(SPR_MMCR0, MMCR0_FC); 1108 mtspr(SPR_PMC1, 0); 1109 mtspr(SPR_MMCR0, MMCR0_PMC1SEL(PMCN_CYCLES)); 1110 delay(100000); 1111 cps = (mfspr(SPR_PMC1) * 10) + 4999; 1112 1113 mtspr(SPR_MMCR0, MMCR0_FC); 1114 1115 ci->ci_khz = (cps * cpu_get_dfs()) / 1000; 1116 } 1117 1118 /* 1119 * Read the Dynamic Frequency Switching state and return a divisor for 1120 * the maximum frequency. 1121 */ 1122 int 1123 cpu_get_dfs(void) 1124 { 1125 u_int pvr, vers; 1126 1127 pvr = mfpvr(); 1128 vers = pvr >> 16; 1129 1130 switch (vers) { 1131 case MPC7448: 1132 if (mfspr(SPR_HID1) & HID1_DFS4) 1133 return 4; 1134 case MPC7447A: 1135 if (mfspr(SPR_HID1) & HID1_DFS2) 1136 return 2; 1137 } 1138 return 1; 1139 } 1140 1141 /* 1142 * Set the Dynamic Frequency Switching divisor the same for all cpus. 1143 */ 1144 void 1145 cpu_set_dfs(int div) 1146 { 1147 u_int dfs_mask, pvr, vers; 1148 1149 pvr = mfpvr(); 1150 vers = pvr >> 16; 1151 dfs_mask = 0; 1152 1153 switch (vers) { 1154 case MPC7448: 1155 dfs_mask |= HID1_DFS4; 1156 case MPC7447A: 1157 dfs_mask |= HID1_DFS2; 1158 break; 1159 default: 1160 printf("cpu_set_dfs: DFS not supported\n"); 1161 return; 1162 1163 } 1164 #ifdef MULTIPROCESSOR 1165 uint64_t where; 1166 where = xc_broadcast(0, (xcfunc_t)cpu_set_dfs_xcall, &div, &dfs_mask); 1167 xc_wait(where); 1168 #else 1169 cpu_set_dfs_xcall(&div, &dfs_mask); 1170 #endif 1171 } 1172 1173 static void 1174 cpu_set_dfs_xcall(void *arg1, void *arg2) 1175 { 1176 u_int dfs_mask, hid1, old_hid1; 1177 int *divisor, s; 1178 1179 divisor = arg1; 1180 dfs_mask = *(u_int *)arg2; 1181 1182 s = splhigh(); 1183 hid1 = old_hid1 = mfspr(SPR_HID1); 1184 1185 switch (*divisor) { 1186 case 1: 1187 hid1 &= ~dfs_mask; 1188 break; 1189 case 2: 1190 hid1 &= ~(dfs_mask & HID1_DFS4); 1191 hid1 |= dfs_mask & HID1_DFS2; 1192 break; 1193 case 4: 1194 hid1 &= ~(dfs_mask & HID1_DFS2); 1195 hid1 |= dfs_mask & HID1_DFS4; 1196 break; 1197 } 1198 1199 if (hid1 != old_hid1) { 1200 __asm volatile("sync"); 1201 mtspr(SPR_HID1, hid1); 1202 __asm volatile("sync;isync"); 1203 } 1204 1205 splx(s); 1206 } 1207 1208 #if NSYSMON_ENVSYS > 0 1209 void 1210 cpu_tau_setup(struct cpu_info *ci) 1211 { 1212 struct sysmon_envsys *sme; 1213 int error, therm_delay; 1214 1215 mtspr(SPR_THRM1, SPR_THRM_VALID); 1216 mtspr(SPR_THRM2, 0); 1217 1218 /* 1219 * we need to figure out how much 20+us in units of CPU clock cycles 1220 * are 1221 */ 1222 1223 therm_delay = ci->ci_khz / 40; /* 25us just to be safe */ 1224 1225 mtspr(SPR_THRM3, SPR_THRM_TIMER(therm_delay) | SPR_THRM_ENABLE); 1226 1227 sme = sysmon_envsys_create(); 1228 1229 sensor.units = ENVSYS_STEMP; 1230 sensor.state = ENVSYS_SINVALID; 1231 (void)strlcpy(sensor.desc, "CPU Temp", sizeof(sensor.desc)); 1232 if (sysmon_envsys_sensor_attach(sme, &sensor)) { 1233 sysmon_envsys_destroy(sme); 1234 return; 1235 } 1236 1237 sme->sme_name = device_xname(ci->ci_dev); 1238 sme->sme_cookie = ci; 1239 sme->sme_refresh = cpu_tau_refresh; 1240 1241 if ((error = sysmon_envsys_register(sme)) != 0) { 1242 aprint_error_dev(ci->ci_dev, 1243 " unable to register with sysmon (%d)\n", error); 1244 sysmon_envsys_destroy(sme); 1245 } 1246 } 1247 1248 /* Find the temperature of the CPU. */ 1249 void 1250 cpu_tau_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 1251 { 1252 int i, threshold, count; 1253 1254 threshold = 64; /* Half of the 7-bit sensor range */ 1255 1256 /* Successive-approximation code adapted from Motorola 1257 * application note AN1800/D, "Programming the Thermal Assist 1258 * Unit in the MPC750 Microprocessor". 1259 */ 1260 for (i = 5; i >= 0 ; i--) { 1261 mtspr(SPR_THRM1, 1262 SPR_THRM_THRESHOLD(threshold) | SPR_THRM_VALID); 1263 count = 0; 1264 while ((count < 100000) && 1265 ((mfspr(SPR_THRM1) & SPR_THRM_TIV) == 0)) { 1266 count++; 1267 delay(1); 1268 } 1269 if (mfspr(SPR_THRM1) & SPR_THRM_TIN) { 1270 /* The interrupt bit was set, meaning the 1271 * temperature was above the threshold 1272 */ 1273 threshold += 1 << i; 1274 } else { 1275 /* Temperature was below the threshold */ 1276 threshold -= 1 << i; 1277 } 1278 } 1279 threshold += 2; 1280 1281 /* Convert the temperature in degrees C to microkelvin */ 1282 edata->value_cur = (threshold * 1000000) + 273150000; 1283 edata->state = ENVSYS_SVALID; 1284 } 1285 #endif /* NSYSMON_ENVSYS > 0 */ 1286 1287 #ifdef MULTIPROCESSOR 1288 volatile u_int cpu_spinstart_ack, cpu_spinstart_cpunum; 1289 1290 int 1291 cpu_spinup(device_t self, struct cpu_info *ci) 1292 { 1293 volatile struct cpu_hatch_data hatch_data, *h = &hatch_data; 1294 struct pglist mlist; 1295 int i, error; 1296 char *hp; 1297 1298 KASSERT(ci != curcpu()); 1299 1300 /* Now allocate a hatch stack */ 1301 error = uvm_pglistalloc(HATCH_STACK_SIZE, 0x10000, 0x10000000, 16, 0, 1302 &mlist, 1, 1); 1303 if (error) { 1304 aprint_error(": unable to allocate hatch stack\n"); 1305 return -1; 1306 } 1307 1308 hp = (void *)VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); 1309 memset(hp, 0, HATCH_STACK_SIZE); 1310 1311 /* Initialize secondary cpu's initial lwp to its idlelwp. */ 1312 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 1313 ci->ci_curpcb = lwp_getpcb(ci->ci_curlwp); 1314 ci->ci_curpm = ci->ci_curpcb->pcb_pm; 1315 1316 cpu_hatch_data = h; 1317 h->hatch_running = 0; 1318 h->hatch_self = self; 1319 h->hatch_ci = ci; 1320 h->hatch_pir = ci->ci_cpuid; 1321 1322 cpu_hatch_stack = (uint32_t)hp + HATCH_STACK_SIZE - CALLFRAMELEN; 1323 ci->ci_lasttb = cpu_info[0].ci_lasttb; 1324 1325 /* copy special registers */ 1326 1327 h->hatch_hid0 = mfspr(SPR_HID0); 1328 #if defined(PPC_OEA64_BRIDGE) || defined (_ARCH_PPC64) 1329 h->hatch_hid1 = mfspr(SPR_HID1); 1330 h->hatch_hid4 = mfspr(SPR_HID4); 1331 h->hatch_hid5 = mfspr(SPR_HID5); 1332 #endif 1333 1334 __asm volatile ("mfsdr1 %0" : "=r"(h->hatch_sdr1)); 1335 for (i = 0; i < 16; i++) { 1336 __asm ("mfsrin %0,%1" : "=r"(h->hatch_sr[i]) : 1337 "r"(i << ADDR_SR_SHFT)); 1338 } 1339 if (oeacpufeat & OEACPU_64) 1340 h->hatch_asr = mfspr(SPR_ASR); 1341 else 1342 h->hatch_asr = 0; 1343 1344 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1345 /* copy the bat regs */ 1346 __asm volatile ("mfibatu %0,0" : "=r"(h->hatch_ibatu[0])); 1347 __asm volatile ("mfibatl %0,0" : "=r"(h->hatch_ibatl[0])); 1348 __asm volatile ("mfibatu %0,1" : "=r"(h->hatch_ibatu[1])); 1349 __asm volatile ("mfibatl %0,1" : "=r"(h->hatch_ibatl[1])); 1350 __asm volatile ("mfibatu %0,2" : "=r"(h->hatch_ibatu[2])); 1351 __asm volatile ("mfibatl %0,2" : "=r"(h->hatch_ibatl[2])); 1352 __asm volatile ("mfibatu %0,3" : "=r"(h->hatch_ibatu[3])); 1353 __asm volatile ("mfibatl %0,3" : "=r"(h->hatch_ibatl[3])); 1354 __asm volatile ("mfdbatu %0,0" : "=r"(h->hatch_dbatu[0])); 1355 __asm volatile ("mfdbatl %0,0" : "=r"(h->hatch_dbatl[0])); 1356 __asm volatile ("mfdbatu %0,1" : "=r"(h->hatch_dbatu[1])); 1357 __asm volatile ("mfdbatl %0,1" : "=r"(h->hatch_dbatl[1])); 1358 __asm volatile ("mfdbatu %0,2" : "=r"(h->hatch_dbatu[2])); 1359 __asm volatile ("mfdbatl %0,2" : "=r"(h->hatch_dbatl[2])); 1360 __asm volatile ("mfdbatu %0,3" : "=r"(h->hatch_dbatu[3])); 1361 __asm volatile ("mfdbatl %0,3" : "=r"(h->hatch_dbatl[3])); 1362 __asm volatile ("sync; isync"); 1363 } 1364 1365 if (md_setup_trampoline(h, ci) == -1) 1366 return -1; 1367 md_presync_timebase(h); 1368 md_start_timebase(h); 1369 1370 /* wait for secondary printf */ 1371 1372 delay(200000); 1373 1374 #ifdef CACHE_PROTO_MEI 1375 __asm volatile ("dcbi 0,%0"::"r"(&h->hatch_running):"memory"); 1376 __asm volatile ("sync; isync"); 1377 __asm volatile ("dcbst 0,%0"::"r"(&h->hatch_running):"memory"); 1378 __asm volatile ("sync; isync"); 1379 #endif 1380 if (h->hatch_running < 1) { 1381 #ifdef CACHE_PROTO_MEI 1382 __asm volatile ("dcbi 0,%0"::"r"(&cpu_spinstart_ack):"memory"); 1383 __asm volatile ("sync; isync"); 1384 __asm volatile ("dcbst 0,%0"::"r"(&cpu_spinstart_ack):"memory"); 1385 __asm volatile ("sync; isync"); 1386 #endif 1387 aprint_error("%d:CPU %d didn't start %d\n", cpu_spinstart_ack, 1388 ci->ci_cpuid, cpu_spinstart_ack); 1389 Debugger(); 1390 return -1; 1391 } 1392 1393 /* Register IPI Interrupt */ 1394 if (ipiops.ppc_establish_ipi) 1395 ipiops.ppc_establish_ipi(IST_LEVEL, IPL_HIGH, NULL); 1396 1397 return 0; 1398 } 1399 1400 static volatile int start_secondary_cpu; 1401 1402 register_t 1403 cpu_hatch(void) 1404 { 1405 volatile struct cpu_hatch_data *h = cpu_hatch_data; 1406 struct cpu_info * const ci = h->hatch_ci; 1407 struct pcb *pcb; 1408 u_int msr; 1409 int i; 1410 1411 /* Initialize timebase. */ 1412 __asm ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0)); 1413 1414 /* 1415 * Set PIR (Processor Identification Register). i.e. whoami 1416 * Note that PIR is read-only on some CPU versions, so we write to it 1417 * only if it has a different value than we need. 1418 */ 1419 1420 msr = mfspr(SPR_PIR); 1421 if (msr != h->hatch_pir) 1422 mtspr(SPR_PIR, h->hatch_pir); 1423 1424 __asm volatile ("mtsprg0 %0" :: "r"(ci)); 1425 curlwp = ci->ci_curlwp; 1426 cpu_spinstart_ack = 0; 1427 1428 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1429 /* Initialize MMU. */ 1430 __asm ("mtibatu 0,%0" :: "r"(h->hatch_ibatu[0])); 1431 __asm ("mtibatl 0,%0" :: "r"(h->hatch_ibatl[0])); 1432 __asm ("mtibatu 1,%0" :: "r"(h->hatch_ibatu[1])); 1433 __asm ("mtibatl 1,%0" :: "r"(h->hatch_ibatl[1])); 1434 __asm ("mtibatu 2,%0" :: "r"(h->hatch_ibatu[2])); 1435 __asm ("mtibatl 2,%0" :: "r"(h->hatch_ibatl[2])); 1436 __asm ("mtibatu 3,%0" :: "r"(h->hatch_ibatu[3])); 1437 __asm ("mtibatl 3,%0" :: "r"(h->hatch_ibatl[3])); 1438 __asm ("mtdbatu 0,%0" :: "r"(h->hatch_dbatu[0])); 1439 __asm ("mtdbatl 0,%0" :: "r"(h->hatch_dbatl[0])); 1440 __asm ("mtdbatu 1,%0" :: "r"(h->hatch_dbatu[1])); 1441 __asm ("mtdbatl 1,%0" :: "r"(h->hatch_dbatl[1])); 1442 __asm ("mtdbatu 2,%0" :: "r"(h->hatch_dbatu[2])); 1443 __asm ("mtdbatl 2,%0" :: "r"(h->hatch_dbatl[2])); 1444 __asm ("mtdbatu 3,%0" :: "r"(h->hatch_dbatu[3])); 1445 __asm ("mtdbatl 3,%0" :: "r"(h->hatch_dbatl[3])); 1446 } 1447 1448 #ifdef PPC_OEA64_BRIDGE 1449 if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) { 1450 1451 mtspr64(SPR_HID0, h->hatch_hid0); 1452 mtspr64(SPR_HID1, h->hatch_hid1); 1453 mtspr64(SPR_HID4, h->hatch_hid4); 1454 mtspr64(SPR_HID5, h->hatch_hid5); 1455 mtspr64(SPR_HIOR, 0); 1456 } else 1457 #endif 1458 mtspr(SPR_HID0, h->hatch_hid0); 1459 1460 if ((oeacpufeat & OEACPU_NOBAT) == 0) { 1461 __asm ("mtibatl 0,%0; mtibatu 0,%1; mtdbatl 0,%0; mtdbatu 0,%1;" 1462 :: "r"(battable[0].batl), "r"(battable[0].batu)); 1463 } 1464 1465 __asm volatile ("sync"); 1466 for (i = 0; i < 16; i++) 1467 __asm ("mtsrin %0,%1" :: "r"(h->hatch_sr[i]), "r"(i << ADDR_SR_SHFT)); 1468 __asm volatile ("sync; isync"); 1469 1470 if (oeacpufeat & OEACPU_64) 1471 mtspr(SPR_ASR, h->hatch_asr); 1472 1473 cpu_spinstart_ack = 1; 1474 __asm ("ptesync"); 1475 __asm ("mtsdr1 %0" :: "r"(h->hatch_sdr1)); 1476 __asm volatile ("sync; isync"); 1477 1478 cpu_spinstart_ack = 5; 1479 for (i = 0; i < 16; i++) 1480 __asm ("mfsrin %0,%1" : "=r"(h->hatch_sr[i]) : 1481 "r"(i << ADDR_SR_SHFT)); 1482 1483 /* Enable I/D address translations. */ 1484 msr = mfmsr(); 1485 msr |= PSL_IR|PSL_DR|PSL_ME|PSL_RI; 1486 mtmsr(msr); 1487 __asm volatile ("sync; isync"); 1488 cpu_spinstart_ack = 2; 1489 1490 md_sync_timebase(h); 1491 1492 cpu_setup(h->hatch_self, ci); 1493 1494 h->hatch_running = 1; 1495 __asm volatile ("sync; isync"); 1496 1497 while (start_secondary_cpu == 0) 1498 ; 1499 1500 __asm volatile ("sync; isync"); 1501 1502 aprint_normal("cpu%d started\n", curcpu()->ci_index); 1503 __asm volatile ("mtdec %0" :: "r"(ticks_per_intr)); 1504 1505 md_setup_interrupts(); 1506 1507 ci->ci_ipending = 0; 1508 ci->ci_cpl = 0; 1509 1510 mtmsr(mfmsr() | PSL_EE); 1511 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 1512 return pcb->pcb_sp; 1513 } 1514 1515 void 1516 cpu_boot_secondary_processors(void) 1517 { 1518 start_secondary_cpu = 1; 1519 __asm volatile ("sync"); 1520 } 1521 1522 #endif /*MULTIPROCESSOR*/ 1523