1 /* $NetBSD: cpu.c,v 1.117 2014/09/01 19:01:55 palle Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Harvard University. 16 * This product includes software developed by the University of 17 * California, Lawrence Berkeley Laboratory. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * 1. Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * 2. Redistributions in binary form must reproduce the above copyright 26 * notice, this list of conditions and the following disclaimer in the 27 * documentation and/or other materials provided with the distribution. 28 * 3. All advertising materials mentioning features or use of this software 29 * must display the following acknowledgement: 30 * This product includes software developed by Aaron Brown and 31 * Harvard University. 32 * This product includes software developed by the University of 33 * California, Berkeley and its contributors. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93 51 * 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.117 2014/09/01 19:01:55 palle Exp $"); 56 57 #include "opt_multiprocessor.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/device.h> 62 #include <sys/kernel.h> 63 #include <sys/reboot.h> 64 #include <sys/cpu.h> 65 66 #include <uvm/uvm.h> 67 68 #include <machine/autoconf.h> 69 #include <machine/cpu.h> 70 #include <machine/reg.h> 71 #include <machine/trap.h> 72 #include <machine/pmap.h> 73 #include <machine/sparc64.h> 74 #include <machine/openfirm.h> 75 76 #include <sparc64/sparc64/cache.h> 77 #ifdef SUN4V 78 #include <sparc64/hypervisor.h> 79 #endif 80 81 #ifdef SUN4V 82 #define SUN4V_MONDO_QUEUE_SIZE 32 83 #define SUN4V_QUEUE_ENTRY_SIZE 64 84 #endif 85 86 int ecache_min_line_size; 87 88 /* Linked list of all CPUs in system. */ 89 #if defined(MULTIPROCESSOR) 90 int sparc_ncpus = 0; 91 #endif 92 struct cpu_info *cpus = NULL; 93 94 volatile sparc64_cpuset_t cpus_active;/* set of active cpus */ 95 struct cpu_bootargs *cpu_args; /* allocated very early in pmap_bootstrap. */ 96 struct pool_cache *fpstate_cache; 97 98 static struct cpu_info *alloc_cpuinfo(u_int); 99 100 /* The following are used externally (sysctl_hw). */ 101 char machine[] = MACHINE; /* from <machine/param.h> */ 102 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 103 104 /* These are used in locore.s, and are maximums */ 105 int dcache_line_size; 106 int dcache_size; 107 int icache_line_size; 108 int icache_size; 109 110 #ifdef MULTIPROCESSOR 111 static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES; 112 #endif 113 114 static void cpu_reset_fpustate(void); 115 116 volatile int sync_tick = 0; 117 118 /* The CPU configuration driver. */ 119 void cpu_attach(device_t, device_t, void *); 120 int cpu_match(device_t, cfdata_t, void *); 121 122 CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL); 123 124 static int 125 cpuid_from_node(u_int cpu_node) 126 { 127 /* 128 * Determine the cpuid by examining the nodes properties 129 * in the following order: 130 * upa-portid 131 * portid 132 * cpuid 133 * reg (sun4v only) 134 */ 135 136 int id; 137 138 id = prom_getpropint(cpu_node, "upa-portid", -1); 139 if (id == -1) 140 id = prom_getpropint(cpu_node, "portid", -1); 141 if (id == -1) 142 id = prom_getpropint(cpu_node, "cpuid", -1); 143 #ifdef SUN4V 144 if (CPU_ISSUN4V) { 145 int reg[4]; 146 int* regp=reg; 147 int len = 4; 148 int rc = prom_getprop(cpu_node, "reg", sizeof(int), 149 &len, ®p); 150 if ( rc != 0) 151 panic("No reg property found\n"); 152 /* cpuid in the lower 24 bits - sun4v hypervisor arch */ 153 id = reg[0] & 0x0fffffff; 154 } 155 #endif 156 if (id == -1) 157 panic("failed to determine cpuid"); 158 159 return id; 160 } 161 162 struct cpu_info * 163 alloc_cpuinfo(u_int cpu_node) 164 { 165 paddr_t pa0, pa; 166 vaddr_t va, va0; 167 vsize_t sz = 8 * PAGE_SIZE; 168 int cpuid; 169 struct cpu_info *cpi, *ci; 170 extern paddr_t cpu0paddr; 171 172 /* 173 * Check for matching cpuid in the cpus list. 174 */ 175 cpuid = cpuid_from_node(cpu_node); 176 177 for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next) 178 if (cpi->ci_cpuid == cpuid) 179 return cpi; 180 181 /* Allocate the aligned VA and determine the size. */ 182 va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY); 183 if (!va) 184 panic("alloc_cpuinfo: no virtual space"); 185 va0 = va; 186 187 pa0 = cpu0paddr; 188 cpu0paddr += sz; 189 190 for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE) 191 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 192 193 pmap_update(pmap_kernel()); 194 195 cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK); 196 197 memset((void *)va0, 0, sz); 198 199 /* 200 * Initialize cpuinfo structure. 201 * 202 * Arrange pcb, idle stack and interrupt stack in the same 203 * way as is done for the boot CPU in pmap.c. 204 */ 205 cpi->ci_next = NULL; 206 cpi->ci_curlwp = NULL; 207 cpi->ci_cpuid = cpuid; 208 cpi->ci_fplwp = NULL; 209 cpi->ci_eintstack = NULL; 210 cpi->ci_spinup = NULL; 211 cpi->ci_paddr = pa0; 212 cpi->ci_self = cpi; 213 #ifdef SUN4V 214 if (CPU_ISSUN4V) 215 cpi->ci_mmfsa = pa0; 216 #endif 217 cpi->ci_node = cpu_node; 218 cpi->ci_idepth = -1; 219 memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending)); 220 221 /* 222 * Finally, add itself to the list of active cpus. 223 */ 224 for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next) 225 ; 226 #ifdef MULTIPROCESSOR 227 ci->ci_next = cpi; 228 #endif 229 return (cpi); 230 } 231 232 int 233 cpu_match(device_t parent, cfdata_t cf, void *aux) 234 { 235 struct mainbus_attach_args *ma = aux; 236 237 if (strcmp(cf->cf_name, ma->ma_name) != 0) 238 return 0; 239 240 /* 241 * If we are going to only attach a single cpu, make sure 242 * to pick the one we are running on right now. 243 */ 244 if (cpuid_from_node(ma->ma_node) != cpu_myid()) { 245 #ifdef MULTIPROCESSOR 246 if (boothowto & RB_MD1) 247 #endif 248 return 0; 249 } 250 251 return 1; 252 } 253 254 static void 255 cpu_reset_fpustate(void) 256 { 257 struct fpstate64 *fpstate; 258 struct fpstate64 fps[2]; 259 260 /* This needs to be 64-byte aligned */ 261 fpstate = ALIGNFPSTATE(&fps[1]); 262 263 /* 264 * Get the FSR and clear any exceptions. If we do not unload 265 * the queue here and it is left over from a previous crash, we 266 * will panic in the first loadfpstate(), due to a sequence error, 267 * so we need to dump the whole state anyway. 268 */ 269 fpstate->fs_fsr = 7 << FSR_VER_SHIFT; /* 7 is reserved for "none" */ 270 savefpstate(fpstate); 271 } 272 273 /* 274 * Attach the CPU. 275 * Discover interesting goop about the virtual address cache 276 * (slightly funny place to do it, but this is where it is to be found). 277 */ 278 void 279 cpu_attach(device_t parent, device_t dev, void *aux) 280 { 281 int node; 282 long clk, sclk = 0; 283 struct mainbus_attach_args *ma = aux; 284 struct cpu_info *ci; 285 const char *sep; 286 register int i, l; 287 uint64_t ver; 288 int bigcache, cachesize; 289 char buf[100]; 290 int totalsize = 0; 291 int linesize, dcachesize, icachesize; 292 293 /* tell them what we have */ 294 node = ma->ma_node; 295 296 /* 297 * Allocate cpu_info structure if needed. 298 */ 299 ci = alloc_cpuinfo((u_int)node); 300 301 /* 302 * Only do this on the boot cpu. Other cpu's call 303 * cpu_reset_fpustate() from cpu_hatch() before they 304 * call into the idle loop. 305 * For other cpus, we need to call mi_cpu_attach() 306 * and complete setting up cpcb. 307 */ 308 if (ci->ci_flags & CPUF_PRIMARY) { 309 fpstate_cache = pool_cache_init(sizeof(struct fpstate64), 310 SPARC64_BLOCK_SIZE, 0, 0, "fpstate", 311 NULL, IPL_NONE, NULL, NULL, NULL); 312 cpu_reset_fpustate(); 313 } 314 #ifdef MULTIPROCESSOR 315 else { 316 mi_cpu_attach(ci); 317 ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 318 } 319 for (i = 0; i < IPI_EVCNT_NUM; ++i) 320 evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, 321 NULL, device_xname(dev), ipi_evcnt_names[i]); 322 #endif 323 evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, 324 device_xname(dev), "timer"); 325 mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); 326 327 clk = prom_getpropint(node, "clock-frequency", 0); 328 if (clk == 0) { 329 /* 330 * Try to find it in the OpenPROM root... 331 */ 332 clk = prom_getpropint(findroot(), "clock-frequency", 0); 333 } 334 if (clk) { 335 /* Tell OS what frequency we run on */ 336 ci->ci_cpu_clockrate[0] = clk; 337 ci->ci_cpu_clockrate[1] = clk / 1000000; 338 } 339 340 sclk = prom_getpropint(findroot(), "stick-frequency", 0); 341 342 ci->ci_system_clockrate[0] = sclk; 343 ci->ci_system_clockrate[1] = sclk / 1000000; 344 345 snprintf(buf, sizeof buf, "%s @ %s MHz", 346 prom_getpropstring(node, "name"), clockfreq(clk)); 347 cpu_setmodel("%s (%s)", machine_model, buf); 348 349 aprint_normal(": %s, CPU id %d\n", buf, ci->ci_cpuid); 350 aprint_naive("\n"); 351 if (CPU_ISSUN4U || CPU_ISSUN4US) { 352 ver = getver(); 353 aprint_normal_dev(dev, "manuf %x, impl %x, mask %x\n", 354 (u_int)((ver & VER_MANUF) >> VER_MANUF_SHIFT), 355 (u_int)((ver & VER_IMPL) >> VER_IMPL_SHIFT), 356 (u_int)((ver & VER_MASK) >> VER_MASK_SHIFT)); 357 } 358 359 if (ci->ci_system_clockrate[0] != 0) { 360 aprint_normal_dev(dev, "system tick frequency %s MHz\n", 361 clockfreq(ci->ci_system_clockrate[0])); 362 } 363 aprint_normal_dev(dev, ""); 364 365 /* XXX sun4v mising cache info printout */ 366 bigcache = 0; 367 368 icachesize = prom_getpropint(node, "icache-size", 0); 369 if (icachesize > icache_size) 370 icache_size = icachesize; 371 linesize = l = prom_getpropint(node, "icache-line-size", 0); 372 if (linesize > icache_line_size) 373 icache_line_size = linesize; 374 375 for (i = 0; (1 << i) < l && l; i++) 376 /* void */; 377 if ((1 << i) != l && l) 378 panic("bad icache line size %d", l); 379 totalsize = icachesize; 380 if (totalsize == 0) 381 totalsize = l * 382 prom_getpropint(node, "icache-nlines", 64) * 383 prom_getpropint(node, "icache-associativity", 1); 384 385 cachesize = totalsize / 386 prom_getpropint(node, "icache-associativity", 1); 387 bigcache = cachesize; 388 389 sep = ""; 390 if (totalsize > 0) { 391 aprint_normal("%s%ldK instruction (%ld b/l)", sep, 392 (long)totalsize/1024, 393 (long)linesize); 394 sep = ", "; 395 } 396 397 dcachesize = prom_getpropint(node, "dcache-size", 0); 398 if (dcachesize > dcache_size) 399 dcache_size = dcachesize; 400 linesize = l = prom_getpropint(node, "dcache-line-size", 0); 401 if (linesize > dcache_line_size) 402 dcache_line_size = linesize; 403 404 for (i = 0; (1 << i) < l && l; i++) 405 /* void */; 406 if ((1 << i) != l && l) 407 panic("bad dcache line size %d", l); 408 totalsize = dcachesize; 409 if (totalsize == 0) 410 totalsize = l * 411 prom_getpropint(node, "dcache-nlines", 128) * 412 prom_getpropint(node, "dcache-associativity", 1); 413 414 cachesize = totalsize / 415 prom_getpropint(node, "dcache-associativity", 1); 416 if (cachesize > bigcache) 417 bigcache = cachesize; 418 419 if (totalsize > 0) { 420 aprint_normal("%s%ldK data (%ld b/l)", sep, 421 (long)totalsize/1024, 422 (long)linesize); 423 sep = ", "; 424 } 425 426 linesize = l = 427 prom_getpropint(node, "ecache-line-size", 0); 428 for (i = 0; (1 << i) < l && l; i++) 429 /* void */; 430 if ((1 << i) != l && l) 431 panic("bad ecache line size %d", l); 432 totalsize = prom_getpropint(node, "ecache-size", 0); 433 if (totalsize == 0) 434 totalsize = l * 435 prom_getpropint(node, "ecache-nlines", 32768) * 436 prom_getpropint(node, "ecache-associativity", 1); 437 438 cachesize = totalsize / 439 prom_getpropint(node, "ecache-associativity", 1); 440 if (cachesize > bigcache) 441 bigcache = cachesize; 442 443 if (totalsize > 0) { 444 aprint_normal("%s%ldK external (%ld b/l)", sep, 445 (long)totalsize/1024, 446 (long)linesize); 447 } 448 aprint_normal("\n"); 449 450 if (ecache_min_line_size == 0 || 451 linesize < ecache_min_line_size) 452 ecache_min_line_size = linesize; 453 454 /* 455 * Now that we know the size of the largest cache on this CPU, 456 * re-color our pages. 457 */ 458 uvm_page_recolor(atop(bigcache)); /* XXX */ 459 460 /* 461 * CPU specific ipi setup 462 * Currently only necessary for SUN4V 463 */ 464 #ifdef SUN4V 465 if (CPU_ISSUN4V) { 466 paddr_t pa = ci->ci_paddr; 467 int err; 468 469 pa += CPUINFO_VA - INTSTACK; 470 pa += PAGE_SIZE; 471 472 ci->ci_cpumq = pa; 473 err = hv_cpu_qconf(CPU_MONDO_QUEUE, ci->ci_cpumq, SUN4V_MONDO_QUEUE_SIZE); 474 if (err != H_EOK) 475 panic("Unable to set cpu mondo queue: %d", err); 476 pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE; 477 478 ci->ci_devmq = pa; 479 err = hv_cpu_qconf(DEVICE_MONDO_QUEUE, ci->ci_devmq, SUN4V_MONDO_QUEUE_SIZE); 480 if (err != H_EOK) 481 panic("Unable to set device mondo queue: %d", err); 482 pa += SUN4V_MONDO_QUEUE_SIZE * SUN4V_QUEUE_ENTRY_SIZE; 483 484 ci->ci_mondo = pa; 485 pa += 64; /* mondo message is 64 bytes */ 486 487 ci->ci_cpuset = pa; 488 pa += 64; 489 } 490 #endif 491 492 } 493 494 int 495 cpu_myid(void) 496 { 497 char buf[32]; 498 int impl; 499 500 #ifdef SUN4V 501 if (CPU_ISSUN4V) { 502 uint64_t myid; 503 hv_cpu_myid(&myid); 504 return myid; 505 } 506 #endif 507 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 && 508 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0) 509 return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED); 510 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT; 511 switch (impl) { 512 case IMPL_OLYMPUS_C: 513 case IMPL_JUPITER: 514 return CPU_JUPITERID; 515 case IMPL_CHEETAH: 516 case IMPL_CHEETAH_PLUS: 517 case IMPL_JAGUAR: 518 case IMPL_PANTHER: 519 return CPU_FIREPLANEID; 520 default: 521 return CPU_UPAID; 522 } 523 } 524 525 #if defined(MULTIPROCESSOR) 526 vaddr_t cpu_spinup_trampoline; 527 528 /* 529 * Start secondary processors in motion. 530 */ 531 void 532 cpu_boot_secondary_processors(void) 533 { 534 int i, pstate; 535 struct cpu_info *ci; 536 537 sync_tick = 0; 538 539 sparc64_ipi_init(); 540 541 if (boothowto & RB_MD1) { 542 cpus[0].ci_next = NULL; 543 sparc_ncpus = ncpu = ncpuonline = 1; 544 return; 545 } 546 547 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 548 if (ci->ci_cpuid == cpu_myid()) 549 continue; 550 551 cpu_pmap_prepare(ci, false); 552 cpu_args->cb_node = ci->ci_node; 553 cpu_args->cb_cpuinfo = ci->ci_paddr; 554 membar_Sync(); 555 556 /* Disable interrupts and start another CPU. */ 557 pstate = getpstate(); 558 setpstate(PSTATE_KERN); 559 560 prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); 561 562 for (i = 0; i < 2000; i++) { 563 membar_Sync(); 564 if (CPUSET_HAS(cpus_active, ci->ci_index)) 565 break; 566 delay(10000); 567 } 568 569 /* synchronize %tick ( to some degree at least ) */ 570 delay(1000); 571 sync_tick = 1; 572 membar_Sync(); 573 if (CPU_ISSUN4U || CPU_ISSUN4US) 574 settick(0); 575 if (ci->ci_system_clockrate[0] != 0) 576 if (CPU_ISSUN4U || CPU_ISSUN4US) 577 setstick(0); 578 579 setpstate(pstate); 580 581 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 582 printf("cpu%d: startup failed\n", ci->ci_cpuid); 583 } 584 } 585 586 void 587 cpu_hatch(void) 588 { 589 char *v = (char*)CPUINFO_VA; 590 int i; 591 592 for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long)) 593 flush(v + i); 594 595 cpu_pmap_init(curcpu()); 596 CPUSET_ADD(cpus_active, cpu_number()); 597 cpu_reset_fpustate(); 598 curlwp = curcpu()->ci_data.cpu_idlelwp; 599 membar_Sync(); 600 601 /* wait for the boot CPU to flip the switch */ 602 while (sync_tick == 0) { 603 /* we do nothing here */ 604 } 605 if (CPU_ISSUN4U || CPU_ISSUN4US) 606 settick(0); 607 if (curcpu()->ci_system_clockrate[0] != 0) { 608 if (CPU_ISSUN4U || CPU_ISSUN4US) 609 setstick(0); 610 stickintr_establish(PIL_CLOCK, stickintr); 611 } else { 612 tickintr_establish(PIL_CLOCK, tickintr); 613 } 614 spl0(); 615 } 616 #endif /* MULTIPROCESSOR */ 617