1 /* $NetBSD: cpu_subr.c,v 1.35 2007/11/17 08:30:35 kefren Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 Matt Thomas. 5 * Copyright (c) 2001 Tsubai Masanari. 6 * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by 20 * Internet Research Institute, Inc. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.35 2007/11/17 08:30:35 kefren Exp $"); 38 39 #include "opt_ppcparam.h" 40 #include "opt_multiprocessor.h" 41 #include "opt_altivec.h" 42 #include "sysmon_envsys.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/device.h> 47 #include <sys/types.h> 48 #include <sys/lwp.h> 49 #include <sys/user.h> 50 #include <sys/malloc.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <powerpc/oea/hid.h> 55 #include <powerpc/oea/hid_601.h> 56 #include <powerpc/spr.h> 57 58 #include <dev/sysmon/sysmonvar.h> 59 60 static void cpu_enable_l2cr(register_t); 61 static void cpu_enable_l3cr(register_t); 62 static void cpu_config_l2cr(int); 63 static void cpu_config_l3cr(int); 64 static void cpu_probe_speed(struct cpu_info *); 65 static void cpu_idlespin(void); 66 #if NSYSMON_ENVSYS > 0 67 static void cpu_tau_setup(struct cpu_info *); 68 static void cpu_tau_refresh(struct sysmon_envsys *, envsys_data_t *); 69 #endif 70 71 int cpu; 72 int ncpus; 73 74 struct fmttab { 75 register_t fmt_mask; 76 register_t fmt_value; 77 const char *fmt_string; 78 }; 79 80 static const struct fmttab cpu_7450_l2cr_formats[] = { 81 { L2CR_L2E, 0, " disabled" }, 82 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 83 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 84 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 85 { L2CR_L2E, ~0, " 256KB L2 cache" }, 86 { 0, 0, NULL } 87 }; 88 89 static const struct fmttab cpu_7448_l2cr_formats[] = { 90 { L2CR_L2E, 0, " disabled" }, 91 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 92 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 93 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 94 { L2CR_L2E, ~0, " 1MB L2 cache" }, 95 { 0, 0, NULL } 96 }; 97 98 static const struct fmttab cpu_7457_l2cr_formats[] = { 99 { L2CR_L2E, 0, " disabled" }, 100 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 101 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 102 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 103 { L2CR_L2E, ~0, " 512KB L2 cache" }, 104 { 0, 0, NULL } 105 }; 106 107 static const struct fmttab cpu_7450_l3cr_formats[] = { 108 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO, " data-only" }, 109 { L3CR_L3DO|L3CR_L3IO, L3CR_L3IO, " instruction-only" }, 110 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO|L3CR_L3IO, " locked" }, 111 { L3CR_L3SIZ, L3SIZ_2M, " 2MB" }, 112 { L3CR_L3SIZ, L3SIZ_1M, " 1MB" }, 113 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE|L3CR_L3APE, " parity" }, 114 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE, " data-parity" }, 115 { L3CR_L3PE|L3CR_L3APE, L3CR_L3APE, " address-parity" }, 116 { L3CR_L3PE|L3CR_L3APE, 0, " no-parity" }, 117 { L3CR_L3SIZ, ~0, " L3 cache" }, 118 { L3CR_L3RT, L3RT_MSUG2_DDR, " (DDR SRAM)" }, 119 { L3CR_L3RT, L3RT_PIPELINE_LATE, " (LW SRAM)" }, 120 { L3CR_L3RT, L3RT_PB2_SRAM, " (PB2 SRAM)" }, 121 { L3CR_L3CLK, ~0, " at" }, 122 { L3CR_L3CLK, L3CLK_20, " 2:1" }, 123 { L3CR_L3CLK, L3CLK_25, " 2.5:1" }, 124 { L3CR_L3CLK, L3CLK_30, " 3:1" }, 125 { L3CR_L3CLK, L3CLK_35, " 3.5:1" }, 126 { L3CR_L3CLK, L3CLK_40, " 4:1" }, 127 { L3CR_L3CLK, L3CLK_50, " 5:1" }, 128 { L3CR_L3CLK, L3CLK_60, " 6:1" }, 129 { L3CR_L3CLK, ~0, " ratio" }, 130 { 0, 0, NULL }, 131 }; 132 133 static const struct fmttab cpu_ibm750_l2cr_formats[] = { 134 { L2CR_L2E, 0, " disabled" }, 135 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 136 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 137 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 138 { 0, ~0, " 512KB" }, 139 { L2CR_L2WT, L2CR_L2WT, " WT" }, 140 { L2CR_L2WT, 0, " WB" }, 141 { L2CR_L2PE, L2CR_L2PE, " with ECC" }, 142 { 0, ~0, " L2 cache" }, 143 { 0, 0, NULL } 144 }; 145 146 static const struct fmttab cpu_l2cr_formats[] = { 147 { L2CR_L2E, 0, " disabled" }, 148 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" }, 149 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" }, 150 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" }, 151 { L2CR_L2PE, L2CR_L2PE, " parity" }, 152 { L2CR_L2PE, 0, " no-parity" }, 153 { L2CR_L2SIZ, L2SIZ_2M, " 2MB" }, 154 { L2CR_L2SIZ, L2SIZ_1M, " 1MB" }, 155 { L2CR_L2SIZ, L2SIZ_512K, " 512KB" }, 156 { L2CR_L2SIZ, L2SIZ_256K, " 256KB" }, 157 { L2CR_L2WT, L2CR_L2WT, " WT" }, 158 { L2CR_L2WT, 0, " WB" }, 159 { L2CR_L2E, ~0, " L2 cache" }, 160 { L2CR_L2RAM, L2RAM_FLOWTHRU_BURST, " (FB SRAM)" }, 161 { L2CR_L2RAM, L2RAM_PIPELINE_LATE, " (LW SRAM)" }, 162 { L2CR_L2RAM, L2RAM_PIPELINE_BURST, " (PB SRAM)" }, 163 { L2CR_L2CLK, ~0, " at" }, 164 { L2CR_L2CLK, L2CLK_10, " 1:1" }, 165 { L2CR_L2CLK, L2CLK_15, " 1.5:1" }, 166 { L2CR_L2CLK, L2CLK_20, " 2:1" }, 167 { L2CR_L2CLK, L2CLK_25, " 2.5:1" }, 168 { L2CR_L2CLK, L2CLK_30, " 3:1" }, 169 { L2CR_L2CLK, L2CLK_35, " 3.5:1" }, 170 { L2CR_L2CLK, L2CLK_40, " 4:1" }, 171 { L2CR_L2CLK, ~0, " ratio" }, 172 { 0, 0, NULL } 173 }; 174 175 static void cpu_fmttab_print(const struct fmttab *, register_t); 176 177 struct cputab { 178 const char name[8]; 179 uint16_t version; 180 uint16_t revfmt; 181 }; 182 #define REVFMT_MAJMIN 1 /* %u.%u */ 183 #define REVFMT_HEX 2 /* 0x%04x */ 184 #define REVFMT_DEC 3 /* %u */ 185 static const struct cputab models[] = { 186 { "601", MPC601, REVFMT_DEC }, 187 { "602", MPC602, REVFMT_DEC }, 188 { "603", MPC603, REVFMT_MAJMIN }, 189 { "603e", MPC603e, REVFMT_MAJMIN }, 190 { "603ev", MPC603ev, REVFMT_MAJMIN }, 191 { "G2", MPCG2, REVFMT_MAJMIN }, 192 { "604", MPC604, REVFMT_MAJMIN }, 193 { "604e", MPC604e, REVFMT_MAJMIN }, 194 { "604ev", MPC604ev, REVFMT_MAJMIN }, 195 { "620", MPC620, REVFMT_HEX }, 196 { "750", MPC750, REVFMT_MAJMIN }, 197 { "750FX", IBM750FX, REVFMT_MAJMIN }, 198 { "7400", MPC7400, REVFMT_MAJMIN }, 199 { "7410", MPC7410, REVFMT_MAJMIN }, 200 { "7450", MPC7450, REVFMT_MAJMIN }, 201 { "7455", MPC7455, REVFMT_MAJMIN }, 202 { "7457", MPC7457, REVFMT_MAJMIN }, 203 { "7447A", MPC7447A, REVFMT_MAJMIN }, 204 { "7448", MPC7448, REVFMT_MAJMIN }, 205 { "8240", MPC8240, REVFMT_MAJMIN }, 206 { "8245", MPC8245, REVFMT_MAJMIN }, 207 { "970", IBM970, REVFMT_MAJMIN }, 208 { "970FX", IBM970FX, REVFMT_MAJMIN }, 209 { "", 0, REVFMT_HEX } 210 }; 211 212 #ifdef MULTIPROCESSOR 213 struct cpu_info cpu_info[CPU_MAXNUM] = { { .ci_curlwp = &lwp0, }, }; 214 volatile struct cpu_hatch_data *cpu_hatch_data; 215 volatile int cpu_hatch_stack; 216 extern int ticks_per_intr; 217 #include <powerpc/oea/bat.h> 218 #include <arch/powerpc/pic/picvar.h> 219 #include <arch/powerpc/pic/ipivar.h> 220 extern struct bat battable[]; 221 #else 222 struct cpu_info cpu_info[1] = { { .ci_curlwp = &lwp0, }, }; 223 #endif /*MULTIPROCESSOR*/ 224 225 int cpu_altivec; 226 int cpu_psluserset, cpu_pslusermod; 227 char cpu_model[80]; 228 229 void 230 cpu_fmttab_print(const struct fmttab *fmt, register_t data) 231 { 232 for (; fmt->fmt_mask != 0 || fmt->fmt_value != 0; fmt++) { 233 if ((~fmt->fmt_mask & fmt->fmt_value) != 0 || 234 (data & fmt->fmt_mask) == fmt->fmt_value) 235 aprint_normal("%s", fmt->fmt_string); 236 } 237 } 238 239 void 240 cpu_idlespin(void) 241 { 242 register_t msr; 243 244 if (powersave <= 0) 245 return; 246 247 __asm volatile( 248 "sync;" 249 "mfmsr %0;" 250 "oris %0,%0,%1@h;" /* enter power saving mode */ 251 "mtmsr %0;" 252 "isync;" 253 : "=r"(msr) 254 : "J"(PSL_POW)); 255 } 256 257 void 258 cpu_probe_cache(void) 259 { 260 u_int assoc, pvr, vers; 261 262 pvr = mfpvr(); 263 vers = pvr >> 16; 264 265 266 /* Presently common across almost all implementations. */ 267 curcpu()->ci_ci.dcache_line_size = CACHELINESIZE; 268 curcpu()->ci_ci.icache_line_size = CACHELINESIZE; 269 270 271 switch (vers) { 272 #define K *1024 273 case IBM750FX: 274 case MPC601: 275 case MPC750: 276 case MPC7447A: 277 case MPC7448: 278 case MPC7450: 279 case MPC7455: 280 case MPC7457: 281 curcpu()->ci_ci.dcache_size = 32 K; 282 curcpu()->ci_ci.icache_size = 32 K; 283 assoc = 8; 284 break; 285 case MPC603: 286 curcpu()->ci_ci.dcache_size = 8 K; 287 curcpu()->ci_ci.icache_size = 8 K; 288 assoc = 2; 289 break; 290 case MPC603e: 291 case MPC603ev: 292 case MPC604: 293 case MPC8240: 294 case MPC8245: 295 case MPCG2: 296 curcpu()->ci_ci.dcache_size = 16 K; 297 curcpu()->ci_ci.icache_size = 16 K; 298 assoc = 4; 299 break; 300 case MPC604e: 301 case MPC604ev: 302 curcpu()->ci_ci.dcache_size = 32 K; 303 curcpu()->ci_ci.icache_size = 32 K; 304 assoc = 4; 305 break; 306 case IBM970: 307 case IBM970FX: 308 curcpu()->ci_ci.dcache_size = 32 K; 309 curcpu()->ci_ci.icache_size = 64 K; 310 curcpu()->ci_ci.dcache_line_size = 128; 311 curcpu()->ci_ci.icache_line_size = 128; 312 assoc = 2; 313 break; 314 315 default: 316 curcpu()->ci_ci.dcache_size = PAGE_SIZE; 317 curcpu()->ci_ci.icache_size = PAGE_SIZE; 318 assoc = 1; 319 #undef K 320 } 321 322 /* 323 * Possibly recolor. 324 */ 325 uvm_page_recolor(atop(curcpu()->ci_ci.dcache_size / assoc)); 326 } 327 328 struct cpu_info * 329 cpu_attach_common(struct device *self, int id) 330 { 331 struct cpu_info *ci; 332 u_int pvr, vers; 333 334 ci = &cpu_info[id]; 335 #ifndef MULTIPROCESSOR 336 /* 337 * If this isn't the primary CPU, print an error message 338 * and just bail out. 339 */ 340 if (id != 0) { 341 aprint_normal(": ID %d\n", id); 342 aprint_normal("%s: processor off-line; multiprocessor support " 343 "not present in kernel\n", self->dv_xname); 344 return (NULL); 345 } 346 #endif 347 348 ci->ci_cpuid = id; 349 ci->ci_intrdepth = -1; 350 ci->ci_dev = self; 351 ci->ci_idlespin = cpu_idlespin; 352 353 pvr = mfpvr(); 354 vers = (pvr >> 16) & 0xffff; 355 356 switch (id) { 357 case 0: 358 /* load my cpu_number to PIR */ 359 switch (vers) { 360 case MPC601: 361 case MPC604: 362 case MPC604e: 363 case MPC604ev: 364 case MPC7400: 365 case MPC7410: 366 case MPC7447A: 367 case MPC7448: 368 case MPC7450: 369 case MPC7455: 370 case MPC7457: 371 mtspr(SPR_PIR, id); 372 } 373 cpu_setup(self, ci); 374 break; 375 default: 376 if (id >= CPU_MAXNUM) { 377 aprint_normal(": more than %d cpus?\n", CPU_MAXNUM); 378 panic("cpuattach"); 379 } 380 #ifndef MULTIPROCESSOR 381 aprint_normal(" not configured\n"); 382 return NULL; 383 #else 384 mi_cpu_attach(ci); 385 break; 386 #endif 387 } 388 return (ci); 389 } 390 391 void 392 cpu_setup(self, ci) 393 struct device *self; 394 struct cpu_info *ci; 395 { 396 u_int hid0, pvr, vers; 397 const char *bitmask; 398 char hidbuf[128]; 399 char model[80]; 400 401 pvr = mfpvr(); 402 vers = (pvr >> 16) & 0xffff; 403 404 cpu_identify(model, sizeof(model)); 405 aprint_normal(": %s, ID %d%s\n", model, cpu_number(), 406 cpu_number() == 0 ? " (primary)" : ""); 407 408 #if defined (PPC_OEA) || defined (PPC_OEA64) 409 hid0 = mfspr(SPR_HID0); 410 #elif defined (PPC_OEA64_BRIDGE) 411 hid0 = mfspr(SPR_HID0); 412 #endif 413 414 cpu_probe_cache(); 415 416 /* 417 * Configure power-saving mode. 418 */ 419 switch (vers) { 420 case MPC604: 421 case MPC604e: 422 case MPC604ev: 423 /* 424 * Do not have HID0 support settings, but can support 425 * MSR[POW] off 426 */ 427 powersave = 1; 428 break; 429 430 case MPC603: 431 case MPC603e: 432 case MPC603ev: 433 case MPC750: 434 case IBM750FX: 435 case MPC7400: 436 case MPC7410: 437 case MPC8240: 438 case MPC8245: 439 case MPCG2: 440 /* Select DOZE mode. */ 441 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 442 hid0 |= HID0_DOZE | HID0_DPM; 443 powersave = 1; 444 break; 445 446 case MPC7447A: 447 case MPC7448: 448 case MPC7457: 449 case MPC7455: 450 case MPC7450: 451 /* Enable the 7450 branch caches */ 452 hid0 |= HID0_SGE | HID0_BTIC; 453 hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT; 454 /* Disable BTIC on 7450 Rev 2.0 or earlier */ 455 if (vers == MPC7450 && (pvr & 0xFFFF) <= 0x0200) 456 hid0 &= ~HID0_BTIC; 457 /* Select NAP mode. */ 458 hid0 &= ~(HID0_HIGH_BAT_EN | HID0_SLEEP); 459 hid0 |= HID0_NAP | HID0_DPM /* | HID0_XBSEN */; 460 powersave = 1; 461 break; 462 463 case IBM970: 464 case IBM970FX: 465 default: 466 /* No power-saving mode is available. */ ; 467 } 468 469 #ifdef NAPMODE 470 switch (vers) { 471 case IBM750FX: 472 case MPC750: 473 case MPC7400: 474 /* Select NAP mode. */ 475 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); 476 hid0 |= HID0_NAP; 477 break; 478 } 479 #endif 480 481 switch (vers) { 482 case IBM750FX: 483 case MPC750: 484 hid0 &= ~HID0_DBP; /* XXX correct? */ 485 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 486 break; 487 488 case MPC7400: 489 case MPC7410: 490 hid0 &= ~HID0_SPD; 491 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; 492 hid0 |= HID0_EIEC; 493 break; 494 } 495 496 #if defined (PPC_OEA) 497 mtspr(SPR_HID0, hid0); 498 __asm volatile("sync;isync"); 499 #endif 500 501 switch (vers) { 502 case MPC601: 503 bitmask = HID0_601_BITMASK; 504 break; 505 case MPC7450: 506 case MPC7455: 507 case MPC7457: 508 bitmask = HID0_7450_BITMASK; 509 break; 510 case IBM970: 511 case IBM970FX: 512 bitmask = 0; 513 break; 514 default: 515 bitmask = HID0_BITMASK; 516 break; 517 } 518 bitmask_snprintf(hid0, bitmask, hidbuf, sizeof hidbuf); 519 aprint_normal("%s: HID0 %s, powersave: %d\n", self->dv_xname, hidbuf, powersave); 520 521 ci->ci_khz = 0; 522 523 /* 524 * Display speed and cache configuration. 525 */ 526 switch (vers) { 527 case MPC604: 528 case MPC604e: 529 case MPC604ev: 530 case MPC750: 531 case IBM750FX: 532 case MPC7400: 533 case MPC7410: 534 case MPC7447A: 535 case MPC7448: 536 case MPC7450: 537 case MPC7455: 538 case MPC7457: 539 aprint_normal("%s: ", self->dv_xname); 540 cpu_probe_speed(ci); 541 aprint_normal("%u.%02u MHz", 542 ci->ci_khz / 1000, (ci->ci_khz / 10) % 100); 543 544 if (vers == IBM750FX || vers == MPC750 || 545 vers == MPC7400 || vers == MPC7410 || MPC745X_P(vers)) { 546 if (MPC745X_P(vers)) { 547 cpu_config_l3cr(vers); 548 } else { 549 cpu_config_l2cr(pvr); 550 } 551 } 552 aprint_normal("\n"); 553 break; 554 } 555 556 #if NSYSMON_ENVSYS > 0 557 /* 558 * Attach MPC750 temperature sensor to the envsys subsystem. 559 * XXX the 74xx series also has this sensor, but it is not 560 * XXX supported by Motorola and may return values that are off by 561 * XXX 35-55 degrees C. 562 */ 563 if (vers == MPC750 || vers == IBM750FX) 564 cpu_tau_setup(ci); 565 #endif 566 567 evcnt_attach_dynamic(&ci->ci_ev_clock, EVCNT_TYPE_INTR, 568 NULL, self->dv_xname, "clock"); 569 evcnt_attach_dynamic(&ci->ci_ev_softclock, EVCNT_TYPE_INTR, 570 NULL, self->dv_xname, "soft clock"); 571 evcnt_attach_dynamic(&ci->ci_ev_softnet, EVCNT_TYPE_INTR, 572 NULL, self->dv_xname, "soft net"); 573 evcnt_attach_dynamic(&ci->ci_ev_softserial, EVCNT_TYPE_INTR, 574 NULL, self->dv_xname, "soft serial"); 575 evcnt_attach_dynamic(&ci->ci_ev_traps, EVCNT_TYPE_TRAP, 576 NULL, self->dv_xname, "traps"); 577 evcnt_attach_dynamic(&ci->ci_ev_kdsi, EVCNT_TYPE_TRAP, 578 &ci->ci_ev_traps, self->dv_xname, "kernel DSI traps"); 579 evcnt_attach_dynamic(&ci->ci_ev_udsi, EVCNT_TYPE_TRAP, 580 &ci->ci_ev_traps, self->dv_xname, "user DSI traps"); 581 evcnt_attach_dynamic(&ci->ci_ev_udsi_fatal, EVCNT_TYPE_TRAP, 582 &ci->ci_ev_udsi, self->dv_xname, "user DSI failures"); 583 evcnt_attach_dynamic(&ci->ci_ev_kisi, EVCNT_TYPE_TRAP, 584 &ci->ci_ev_traps, self->dv_xname, "kernel ISI traps"); 585 evcnt_attach_dynamic(&ci->ci_ev_isi, EVCNT_TYPE_TRAP, 586 &ci->ci_ev_traps, self->dv_xname, "user ISI traps"); 587 evcnt_attach_dynamic(&ci->ci_ev_isi_fatal, EVCNT_TYPE_TRAP, 588 &ci->ci_ev_isi, self->dv_xname, "user ISI failures"); 589 evcnt_attach_dynamic(&ci->ci_ev_scalls, EVCNT_TYPE_TRAP, 590 &ci->ci_ev_traps, self->dv_xname, "system call traps"); 591 evcnt_attach_dynamic(&ci->ci_ev_pgm, EVCNT_TYPE_TRAP, 592 &ci->ci_ev_traps, self->dv_xname, "PGM traps"); 593 evcnt_attach_dynamic(&ci->ci_ev_fpu, EVCNT_TYPE_TRAP, 594 &ci->ci_ev_traps, self->dv_xname, "FPU unavailable traps"); 595 evcnt_attach_dynamic(&ci->ci_ev_fpusw, EVCNT_TYPE_TRAP, 596 &ci->ci_ev_fpu, self->dv_xname, "FPU context switches"); 597 evcnt_attach_dynamic(&ci->ci_ev_ali, EVCNT_TYPE_TRAP, 598 &ci->ci_ev_traps, self->dv_xname, "user alignment traps"); 599 evcnt_attach_dynamic(&ci->ci_ev_ali_fatal, EVCNT_TYPE_TRAP, 600 &ci->ci_ev_ali, self->dv_xname, "user alignment traps"); 601 evcnt_attach_dynamic(&ci->ci_ev_umchk, EVCNT_TYPE_TRAP, 602 &ci->ci_ev_umchk, self->dv_xname, "user MCHK failures"); 603 evcnt_attach_dynamic(&ci->ci_ev_vec, EVCNT_TYPE_TRAP, 604 &ci->ci_ev_traps, self->dv_xname, "AltiVec unavailable"); 605 #ifdef ALTIVEC 606 if (cpu_altivec) { 607 evcnt_attach_dynamic(&ci->ci_ev_vecsw, EVCNT_TYPE_TRAP, 608 &ci->ci_ev_vec, self->dv_xname, "AltiVec context switches"); 609 } 610 #endif 611 evcnt_attach_dynamic(&ci->ci_ev_ipi, EVCNT_TYPE_INTR, 612 NULL, self->dv_xname, "IPIs"); 613 } 614 615 void 616 cpu_identify(char *str, size_t len) 617 { 618 u_int pvr, major, minor; 619 uint16_t vers, rev, revfmt; 620 const struct cputab *cp; 621 const char *name; 622 size_t n; 623 624 pvr = mfpvr(); 625 vers = pvr >> 16; 626 rev = pvr; 627 628 switch (vers) { 629 case MPC7410: 630 minor = (pvr >> 0) & 0xff; 631 major = minor <= 4 ? 1 : 2; 632 break; 633 default: 634 major = (pvr >> 4) & 0xf; 635 minor = (pvr >> 0) & 0xf; 636 } 637 638 for (cp = models; cp->name[0] != '\0'; cp++) { 639 if (cp->version == vers) 640 break; 641 } 642 643 if (str == NULL) { 644 str = cpu_model; 645 len = sizeof(cpu_model); 646 cpu = vers; 647 } 648 649 revfmt = cp->revfmt; 650 name = cp->name; 651 if (rev == MPC750 && pvr == 15) { 652 name = "755"; 653 revfmt = REVFMT_HEX; 654 } 655 656 if (cp->name[0] != '\0') { 657 n = snprintf(str, len, "%s (Revision ", cp->name); 658 } else { 659 n = snprintf(str, len, "Version %#x (Revision ", vers); 660 } 661 if (len > n) { 662 switch (revfmt) { 663 case REVFMT_MAJMIN: 664 snprintf(str + n, len - n, "%u.%u)", major, minor); 665 break; 666 case REVFMT_HEX: 667 snprintf(str + n, len - n, "0x%04x)", rev); 668 break; 669 case REVFMT_DEC: 670 snprintf(str + n, len - n, "%u)", rev); 671 break; 672 } 673 } 674 } 675 676 #ifdef L2CR_CONFIG 677 u_int l2cr_config = L2CR_CONFIG; 678 #else 679 u_int l2cr_config = 0; 680 #endif 681 682 #ifdef L3CR_CONFIG 683 u_int l3cr_config = L3CR_CONFIG; 684 #else 685 u_int l3cr_config = 0; 686 #endif 687 688 void 689 cpu_enable_l2cr(register_t l2cr) 690 { 691 register_t msr, x; 692 693 /* Disable interrupts and set the cache config bits. */ 694 msr = mfmsr(); 695 mtmsr(msr & ~PSL_EE); 696 #ifdef ALTIVEC 697 if (cpu_altivec) 698 __asm volatile("dssall"); 699 #endif 700 __asm volatile("sync"); 701 mtspr(SPR_L2CR, l2cr & ~L2CR_L2E); 702 __asm volatile("sync"); 703 704 /* Wait for L2 clock to be stable (640 L2 clocks). */ 705 delay(100); 706 707 /* Invalidate all L2 contents. */ 708 mtspr(SPR_L2CR, l2cr | L2CR_L2I); 709 do { 710 x = mfspr(SPR_L2CR); 711 } while (x & L2CR_L2IP); 712 713 /* Enable L2 cache. */ 714 l2cr |= L2CR_L2E; 715 mtspr(SPR_L2CR, l2cr); 716 mtmsr(msr); 717 } 718 719 void 720 cpu_enable_l3cr(register_t l3cr) 721 { 722 register_t x; 723 724 /* By The Book (numbered steps from section 3.7.1.3 of MPC7450UM) */ 725 726 /* 727 * 1: Set all L3CR bits for final config except L3E, L3I, L3PE, and 728 * L3CLKEN. (also mask off reserved bits in case they were included 729 * in L3CR_CONFIG) 730 */ 731 l3cr &= ~(L3CR_L3E|L3CR_L3I|L3CR_L3PE|L3CR_L3CLKEN|L3CR_RESERVED); 732 mtspr(SPR_L3CR, l3cr); 733 734 /* 2: Set L3CR[5] (otherwise reserved bit) to 1 */ 735 l3cr |= 0x04000000; 736 mtspr(SPR_L3CR, l3cr); 737 738 /* 3: Set L3CLKEN to 1*/ 739 l3cr |= L3CR_L3CLKEN; 740 mtspr(SPR_L3CR, l3cr); 741 742 /* 4/5: Perform a global cache invalidate (ref section 3.7.3.6) */ 743 __asm volatile("dssall;sync"); 744 /* L3 cache is already disabled, no need to clear L3E */ 745 mtspr(SPR_L3CR, l3cr|L3CR_L3I); 746 do { 747 x = mfspr(SPR_L3CR); 748 } while (x & L3CR_L3I); 749 750 /* 6: Clear L3CLKEN to 0 */ 751 l3cr &= ~L3CR_L3CLKEN; 752 mtspr(SPR_L3CR, l3cr); 753 754 /* 7: Perform a 'sync' and wait at least 100 CPU cycles */ 755 __asm volatile("sync"); 756 delay(100); 757 758 /* 8: Set L3E and L3CLKEN */ 759 l3cr |= (L3CR_L3E|L3CR_L3CLKEN); 760 mtspr(SPR_L3CR, l3cr); 761 762 /* 9: Perform a 'sync' and wait at least 100 CPU cycles */ 763 __asm volatile("sync"); 764 delay(100); 765 } 766 767 void 768 cpu_config_l2cr(int pvr) 769 { 770 register_t l2cr; 771 772 l2cr = mfspr(SPR_L2CR); 773 774 /* 775 * For MP systems, the firmware may only configure the L2 cache 776 * on the first CPU. In this case, assume that the other CPUs 777 * should use the same value for L2CR. 778 */ 779 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 780 l2cr_config = l2cr; 781 } 782 783 /* 784 * Configure L2 cache if not enabled. 785 */ 786 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 787 cpu_enable_l2cr(l2cr_config); 788 l2cr = mfspr(SPR_L2CR); 789 } 790 791 if ((l2cr & L2CR_L2E) == 0) { 792 aprint_normal(" L2 cache present but not enabled "); 793 return; 794 } 795 796 aprint_normal(","); 797 if ((pvr >> 16) == IBM750FX || 798 (pvr & 0xffffff00) == 0x00082200 /* IBM750CX */ || 799 (pvr & 0xffffef00) == 0x00082300 /* IBM750CXe */) { 800 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr); 801 } else { 802 cpu_fmttab_print(cpu_l2cr_formats, l2cr); 803 } 804 } 805 806 void 807 cpu_config_l3cr(int vers) 808 { 809 register_t l2cr; 810 register_t l3cr; 811 812 l2cr = mfspr(SPR_L2CR); 813 814 /* 815 * For MP systems, the firmware may only configure the L2 cache 816 * on the first CPU. In this case, assume that the other CPUs 817 * should use the same value for L2CR. 818 */ 819 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) { 820 l2cr_config = l2cr; 821 } 822 823 /* 824 * Configure L2 cache if not enabled. 825 */ 826 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) { 827 cpu_enable_l2cr(l2cr_config); 828 l2cr = mfspr(SPR_L2CR); 829 } 830 831 aprint_normal(","); 832 switch (vers) { 833 case MPC7447A: 834 case MPC7457: 835 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr); 836 return; 837 case MPC7448: 838 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr); 839 return; 840 default: 841 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr); 842 break; 843 } 844 845 l3cr = mfspr(SPR_L3CR); 846 847 /* 848 * For MP systems, the firmware may only configure the L3 cache 849 * on the first CPU. In this case, assume that the other CPUs 850 * should use the same value for L3CR. 851 */ 852 if ((l3cr & L3CR_L3E) != 0 && l3cr_config == 0) { 853 l3cr_config = l3cr; 854 } 855 856 /* 857 * Configure L3 cache if not enabled. 858 */ 859 if ((l3cr & L3CR_L3E) == 0 && l3cr_config != 0) { 860 cpu_enable_l3cr(l3cr_config); 861 l3cr = mfspr(SPR_L3CR); 862 } 863 864 if (l3cr & L3CR_L3E) { 865 aprint_normal(","); 866 cpu_fmttab_print(cpu_7450_l3cr_formats, l3cr); 867 } 868 } 869 870 void 871 cpu_probe_speed(struct cpu_info *ci) 872 { 873 uint64_t cps; 874 875 mtspr(SPR_MMCR0, MMCR0_FC); 876 mtspr(SPR_PMC1, 0); 877 mtspr(SPR_MMCR0, MMCR0_PMC1SEL(PMCN_CYCLES)); 878 delay(100000); 879 cps = (mfspr(SPR_PMC1) * 10) + 4999; 880 881 mtspr(SPR_MMCR0, MMCR0_FC); 882 883 ci->ci_khz = cps / 1000; 884 } 885 886 #if NSYSMON_ENVSYS > 0 887 void 888 cpu_tau_setup(struct cpu_info *ci) 889 { 890 struct sysmon_envsys *sme; 891 envsys_data_t sensor; 892 int error; 893 894 sme = sysmon_envsys_create(); 895 896 sensor.state = ENVSYS_SVALID; 897 sensor.units = ENVSYS_STEMP; 898 (void)strlcpy(sensor.desc, "CPU Temp", sizeof(sensor.desc)); 899 if (sysmon_envsys_sensor_attach(sme, &sensor)) { 900 sysmon_envsys_destroy(sme); 901 return; 902 } 903 904 sme->sme_name = ci->ci_dev->dv_xname; 905 sme->sme_cookie = ci; 906 sme->sme_refresh = cpu_tau_refresh; 907 908 if ((error = sysmon_envsys_register(sme)) != 0) { 909 aprint_error("%s: unable to register with sysmon (%d)\n", 910 ci->ci_dev->dv_xname, error); 911 sysmon_envsys_destroy(sme); 912 } 913 } 914 915 916 /* Find the temperature of the CPU. */ 917 void 918 cpu_tau_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 919 { 920 int i, threshold, count; 921 922 threshold = 64; /* Half of the 7-bit sensor range */ 923 mtspr(SPR_THRM1, 0); 924 mtspr(SPR_THRM2, 0); 925 /* XXX This counter is supposed to be "at least 20 microseonds, in 926 * XXX units of clock cycles". Since we don't have convenient 927 * XXX access to the CPU speed, set it to a conservative value, 928 * XXX that is, assuming a fast (1GHz) G3 CPU (As of February 2002, 929 * XXX the fastest G3 processor is 700MHz) . The cost is that 930 * XXX measuring the temperature takes a bit longer. 931 */ 932 mtspr(SPR_THRM3, SPR_THRM_TIMER(20000) | SPR_THRM_ENABLE); 933 934 /* Successive-approximation code adapted from Motorola 935 * application note AN1800/D, "Programming the Thermal Assist 936 * Unit in the MPC750 Microprocessor". 937 */ 938 for (i = 4; i >= 0 ; i--) { 939 mtspr(SPR_THRM1, 940 SPR_THRM_THRESHOLD(threshold) | SPR_THRM_VALID); 941 count = 0; 942 while ((count < 100) && 943 ((mfspr(SPR_THRM1) & SPR_THRM_TIV) == 0)) { 944 count++; 945 delay(1); 946 } 947 if (mfspr(SPR_THRM1) & SPR_THRM_TIN) { 948 /* The interrupt bit was set, meaning the 949 * temperature was above the threshold 950 */ 951 threshold += 2 << i; 952 } else { 953 /* Temperature was below the threshold */ 954 threshold -= 2 << i; 955 } 956 } 957 threshold += 2; 958 959 /* Convert the temperature in degrees C to microkelvin */ 960 edata->value_cur = (threshold * 1000000) + 273150000; 961 } 962 #endif /* NSYSMON_ENVSYS > 0 */ 963 964 #ifdef MULTIPROCESSOR 965 int 966 cpu_spinup(struct device *self, struct cpu_info *ci) 967 { 968 volatile struct cpu_hatch_data hatch_data, *h = &hatch_data; 969 struct pglist mlist; 970 int i, error, pvr, vers; 971 char *cp; 972 973 pvr = mfpvr(); 974 vers = pvr >> 16; 975 KASSERT(ci != curcpu()); 976 977 /* 978 * Allocate some contiguous pages for the intteup PCB and stack 979 * from the lowest 256MB (because bat0 always maps it va == pa). 980 */ 981 error = uvm_pglistalloc(INTSTK, 0x0, 0x10000000, 0, 0, &mlist, 1, 1); 982 if (error) { 983 aprint_error(": unable to allocate idle stack\n"); 984 return -1; 985 } 986 987 KASSERT(ci != &cpu_info[0]); 988 989 cp = (void *)VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); 990 memset(cp, 0, INTSTK); 991 992 ci->ci_intstk = cp; 993 994 /* Initialize secondary cpu's initial lwp to its idlelwp. */ 995 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 996 ci->ci_curpcb = &ci->ci_curlwp->l_addr->u_pcb; 997 ci->ci_curpm = ci->ci_curpcb->pcb_pm; 998 999 cpu_hatch_data = h; 1000 h->running = 0; 1001 h->self = self; 1002 h->ci = ci; 1003 h->pir = ci->ci_cpuid; 1004 cpu_hatch_stack = (uint32_t)cp + INTSTK - sizeof(struct trapframe); 1005 ci->ci_lasttb = cpu_info[0].ci_lasttb; 1006 1007 /* copy special registers */ 1008 h->hid0 = mfspr(SPR_HID0); 1009 __asm volatile ("mfsdr1 %0" : "=r"(h->sdr1)); 1010 for (i = 0; i < 16; i++) 1011 __asm ("mfsrin %0,%1" : "=r"(h->sr[i]) : 1012 "r"(i << ADDR_SR_SHFT)); 1013 /* copy the bat regs */ 1014 __asm volatile ("mfibatu %0,0" : "=r"(h->batu[0])); 1015 __asm volatile ("mfibatl %0,0" : "=r"(h->batl[0])); 1016 __asm volatile ("mfibatu %0,1" : "=r"(h->batu[1])); 1017 __asm volatile ("mfibatl %0,1" : "=r"(h->batl[1])); 1018 __asm volatile ("mfibatu %0,2" : "=r"(h->batu[2])); 1019 __asm volatile ("mfibatl %0,2" : "=r"(h->batl[2])); 1020 __asm volatile ("mfibatu %0,3" : "=r"(h->batu[3])); 1021 __asm volatile ("mfibatl %0,3" : "=r"(h->batl[3])); 1022 __asm volatile ("sync; isync"); 1023 1024 if (md_setup_trampoline(h, ci) == -1) 1025 return -1; 1026 md_presync_timebase(h); 1027 md_start_timebase(h); 1028 1029 /* wait for secondary printf */ 1030 delay(200000); 1031 1032 if (h->running == 0) { 1033 aprint_error(":CPU %d didn't start\n", ci->ci_cpuid); 1034 return -1; 1035 } 1036 1037 /* Register IPI Interrupt */ 1038 ipiops.ppc_establish_ipi(IST_LEVEL, IPL_HIGH, NULL); 1039 1040 return 0; 1041 } 1042 1043 static volatile int start_secondary_cpu; 1044 1045 void 1046 cpu_hatch() 1047 { 1048 volatile struct cpu_hatch_data *h = cpu_hatch_data; 1049 struct cpu_info * const ci = h->ci; 1050 u_int msr; 1051 int i; 1052 1053 /* Initialize timebase. */ 1054 __asm ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0)); 1055 1056 /* Set PIR (Processor Identification Register). i.e. whoami */ 1057 mtspr(SPR_PIR, h->pir); 1058 __asm volatile ("mtsprg 0,%0" :: "r"(ci)); 1059 1060 /* Initialize MMU. */ 1061 __asm ("mtibatu 0,%0" :: "r"(h->batu[0])); 1062 __asm ("mtibatl 0,%0" :: "r"(h->batl[0])); 1063 __asm ("mtibatu 1,%0" :: "r"(h->batu[1])); 1064 __asm ("mtibatl 1,%0" :: "r"(h->batl[1])); 1065 __asm ("mtibatu 2,%0" :: "r"(h->batu[2])); 1066 __asm ("mtibatl 2,%0" :: "r"(h->batl[2])); 1067 __asm ("mtibatu 3,%0" :: "r"(h->batu[3])); 1068 __asm ("mtibatl 3,%0" :: "r"(h->batl[3])); 1069 1070 mtspr(SPR_HID0, h->hid0); 1071 1072 __asm ("mtibatl 0,%0; mtibatu 0,%1; mtdbatl 0,%0; mtdbatu 0,%1;" 1073 :: "r"(battable[0].batl), "r"(battable[0].batu)); 1074 1075 for (i = 0; i < 16; i++) 1076 __asm ("mtsrin %0,%1" :: "r"(h->sr[i]), "r"(i << ADDR_SR_SHFT)); 1077 1078 __asm ("mtsdr1 %0" :: "r"(h->sdr1)); 1079 __asm volatile ("isync"); 1080 1081 /* Enable I/D address translations. */ 1082 __asm volatile ("mfmsr %0" : "=r"(msr)); 1083 msr |= PSL_IR|PSL_DR|PSL_ME|PSL_RI; 1084 __asm volatile ("mtmsr %0" :: "r"(msr)); 1085 __asm volatile ("sync; isync"); 1086 1087 md_sync_timebase(h); 1088 1089 cpu_setup(h->self, ci); 1090 1091 h->running = 1; 1092 __asm volatile ("sync; isync"); 1093 1094 while (start_secondary_cpu == 0) 1095 ; 1096 1097 __asm volatile ("sync; isync"); 1098 1099 aprint_normal("cpu%d: started\n", cpu_number()); 1100 __asm volatile ("mtdec %0" :: "r"(ticks_per_intr)); 1101 1102 md_setup_interrupts(); 1103 1104 ci->ci_ipending = 0; 1105 ci->ci_cpl = 0; 1106 1107 mtmsr(mfmsr() | PSL_EE); 1108 } 1109 1110 void 1111 cpu_boot_secondary_processors() 1112 { 1113 start_secondary_cpu = 1; 1114 __asm volatile ("sync"); 1115 } 1116 1117 #endif /*MULTIPROCESSOR*/ 1118