1 /* $NetBSD: cpu.c,v 1.106 2013/12/16 20:17:35 palle Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Harvard University. 16 * This product includes software developed by the University of 17 * California, Lawrence Berkeley Laboratory. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * 1. Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * 2. Redistributions in binary form must reproduce the above copyright 26 * notice, this list of conditions and the following disclaimer in the 27 * documentation and/or other materials provided with the distribution. 28 * 3. All advertising materials mentioning features or use of this software 29 * must display the following acknowledgement: 30 * This product includes software developed by Aaron Brown and 31 * Harvard University. 32 * This product includes software developed by the University of 33 * California, Berkeley and its contributors. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93 51 * 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.106 2013/12/16 20:17:35 palle Exp $"); 56 57 #include "opt_multiprocessor.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/device.h> 62 #include <sys/kernel.h> 63 #include <sys/reboot.h> 64 65 #include <uvm/uvm.h> 66 67 #include <machine/autoconf.h> 68 #include <machine/cpu.h> 69 #include <machine/reg.h> 70 #include <machine/trap.h> 71 #include <machine/pmap.h> 72 #include <machine/sparc64.h> 73 #include <machine/openfirm.h> 74 75 #include <sparc64/sparc64/cache.h> 76 #ifdef SUN4V 77 #include <sparc64/hypervisor.h> 78 #endif 79 80 int ecache_min_line_size; 81 82 /* Linked list of all CPUs in system. */ 83 #if defined(MULTIPROCESSOR) 84 int sparc_ncpus = 0; 85 #endif 86 struct cpu_info *cpus = NULL; 87 88 volatile sparc64_cpuset_t cpus_active;/* set of active cpus */ 89 struct cpu_bootargs *cpu_args; /* allocated very early in pmap_bootstrap. */ 90 struct pool_cache *fpstate_cache; 91 92 static struct cpu_info *alloc_cpuinfo(u_int); 93 94 /* The following are used externally (sysctl_hw). */ 95 char machine[] = MACHINE; /* from <machine/param.h> */ 96 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 97 char cpu_model[100]; /* machine model (primary CPU) */ 98 99 /* These are used in locore.s, and are maximums */ 100 int dcache_line_size; 101 int dcache_size; 102 int icache_line_size; 103 int icache_size; 104 105 #ifdef MULTIPROCESSOR 106 static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES; 107 #endif 108 109 static void cpu_reset_fpustate(void); 110 111 volatile int sync_tick = 0; 112 113 /* The CPU configuration driver. */ 114 void cpu_attach(device_t, device_t, void *); 115 int cpu_match(device_t, cfdata_t, void *); 116 117 CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL); 118 119 static int 120 upaid_from_node(u_int cpu_node) 121 { 122 int portid; 123 124 if (OF_getprop(cpu_node, "upa-portid", &portid, sizeof(portid)) <= 0 && 125 OF_getprop(cpu_node, "portid", &portid, sizeof(portid)) <= 0) 126 panic("cpu node w/o upa-portid"); 127 128 return portid; 129 } 130 131 struct cpu_info * 132 alloc_cpuinfo(u_int cpu_node) 133 { 134 paddr_t pa0, pa; 135 vaddr_t va, va0; 136 vsize_t sz = 8 * PAGE_SIZE; 137 int portid; 138 struct cpu_info *cpi, *ci; 139 extern paddr_t cpu0paddr; 140 141 /* 142 * Check for UPAID in the cpus list. 143 */ 144 portid = upaid_from_node(cpu_node); 145 146 for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next) 147 if (cpi->ci_cpuid == portid) 148 return cpi; 149 150 /* Allocate the aligned VA and determine the size. */ 151 va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY); 152 if (!va) 153 panic("alloc_cpuinfo: no virtual space"); 154 va0 = va; 155 156 pa0 = cpu0paddr; 157 cpu0paddr += sz; 158 159 for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE) 160 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 161 162 pmap_update(pmap_kernel()); 163 164 cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK); 165 166 memset((void *)va0, 0, sz); 167 168 /* 169 * Initialize cpuinfo structure. 170 * 171 * Arrange pcb, idle stack and interrupt stack in the same 172 * way as is done for the boot CPU in pmap.c. 173 */ 174 cpi->ci_next = NULL; 175 cpi->ci_curlwp = NULL; 176 cpi->ci_cpuid = portid; 177 cpi->ci_fplwp = NULL; 178 cpi->ci_eintstack = NULL; 179 cpi->ci_spinup = NULL; 180 cpi->ci_paddr = pa0; 181 cpi->ci_self = cpi; 182 cpi->ci_node = cpu_node; 183 cpi->ci_idepth = -1; 184 memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending)); 185 186 /* 187 * Finally, add itself to the list of active cpus. 188 */ 189 for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next) 190 ; 191 #ifdef MULTIPROCESSOR 192 ci->ci_next = cpi; 193 #endif 194 return (cpi); 195 } 196 197 int 198 cpu_match(device_t parent, cfdata_t cf, void *aux) 199 { 200 struct mainbus_attach_args *ma = aux; 201 202 if (strcmp(cf->cf_name, ma->ma_name) != 0) 203 return 0; 204 205 /* 206 * If we are going to only attach a single cpu, make sure 207 * to pick the one we are running on right now. 208 */ 209 if (upaid_from_node(ma->ma_node) != CPU_UPAID) { 210 #ifdef MULTIPROCESSOR 211 if (boothowto & RB_MD1) 212 #endif 213 return 0; 214 } 215 216 return 1; 217 } 218 219 static void 220 cpu_reset_fpustate(void) 221 { 222 struct fpstate64 *fpstate; 223 struct fpstate64 fps[2]; 224 225 /* This needs to be 64-byte aligned */ 226 fpstate = ALIGNFPSTATE(&fps[1]); 227 228 /* 229 * Get the FSR and clear any exceptions. If we do not unload 230 * the queue here and it is left over from a previous crash, we 231 * will panic in the first loadfpstate(), due to a sequence error, 232 * so we need to dump the whole state anyway. 233 */ 234 fpstate->fs_fsr = 7 << FSR_VER_SHIFT; /* 7 is reserved for "none" */ 235 savefpstate(fpstate); 236 } 237 238 /* 239 * Attach the CPU. 240 * Discover interesting goop about the virtual address cache 241 * (slightly funny place to do it, but this is where it is to be found). 242 */ 243 void 244 cpu_attach(device_t parent, device_t dev, void *aux) 245 { 246 int node; 247 long clk, sclk = 0; 248 struct mainbus_attach_args *ma = aux; 249 struct cpu_info *ci; 250 const char *sep; 251 register int i, l; 252 int bigcache, cachesize; 253 char buf[100]; 254 int totalsize = 0; 255 int linesize, dcachesize, icachesize; 256 257 /* tell them what we have */ 258 node = ma->ma_node; 259 260 /* 261 * Allocate cpu_info structure if needed. 262 */ 263 ci = alloc_cpuinfo((u_int)node); 264 265 /* 266 * Only do this on the boot cpu. Other cpu's call 267 * cpu_reset_fpustate() from cpu_hatch() before they 268 * call into the idle loop. 269 * For other cpus, we need to call mi_cpu_attach() 270 * and complete setting up cpcb. 271 */ 272 if (ci->ci_flags & CPUF_PRIMARY) { 273 fpstate_cache = pool_cache_init(sizeof(struct fpstate64), 274 SPARC64_BLOCK_SIZE, 0, 0, "fpstate", 275 NULL, IPL_NONE, NULL, NULL, NULL); 276 cpu_reset_fpustate(); 277 } 278 #ifdef MULTIPROCESSOR 279 else { 280 mi_cpu_attach(ci); 281 ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 282 } 283 for (i = 0; i < IPI_EVCNT_NUM; ++i) 284 evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, 285 NULL, device_xname(dev), ipi_evcnt_names[i]); 286 #endif 287 evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, 288 device_xname(dev), "timer"); 289 mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); 290 291 clk = prom_getpropint(node, "clock-frequency", 0); 292 if (clk == 0) { 293 /* 294 * Try to find it in the OpenPROM root... 295 */ 296 clk = prom_getpropint(findroot(), "clock-frequency", 0); 297 } 298 if (clk) { 299 /* Tell OS what frequency we run on */ 300 ci->ci_cpu_clockrate[0] = clk; 301 ci->ci_cpu_clockrate[1] = clk / 1000000; 302 } 303 304 sclk = prom_getpropint(findroot(), "stick-frequency", 0); 305 306 ci->ci_system_clockrate[0] = sclk; 307 ci->ci_system_clockrate[1] = sclk / 1000000; 308 309 snprintf(buf, sizeof buf, "%s @ %s MHz", 310 prom_getpropstring(node, "name"), clockfreq(clk)); 311 snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf); 312 313 aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid); 314 aprint_naive("\n"); 315 316 if (ci->ci_system_clockrate[0] != 0) { 317 aprint_normal_dev(dev, "system tick frequency %d MHz\n", 318 (int)ci->ci_system_clockrate[1]); 319 } 320 aprint_normal_dev(dev, ""); 321 322 bigcache = 0; 323 324 icachesize = prom_getpropint(node, "icache-size", 0); 325 if (icachesize > icache_size) 326 icache_size = icachesize; 327 linesize = l = prom_getpropint(node, "icache-line-size", 0); 328 if (linesize > icache_line_size) 329 icache_line_size = linesize; 330 331 for (i = 0; (1 << i) < l && l; i++) 332 /* void */; 333 if ((1 << i) != l && l) 334 panic("bad icache line size %d", l); 335 totalsize = icachesize; 336 if (totalsize == 0) 337 totalsize = l * 338 prom_getpropint(node, "icache-nlines", 64) * 339 prom_getpropint(node, "icache-associativity", 1); 340 341 cachesize = totalsize / 342 prom_getpropint(node, "icache-associativity", 1); 343 bigcache = cachesize; 344 345 sep = ""; 346 if (totalsize > 0) { 347 aprint_normal("%s%ldK instruction (%ld b/l)", sep, 348 (long)totalsize/1024, 349 (long)linesize); 350 sep = ", "; 351 } 352 353 dcachesize = prom_getpropint(node, "dcache-size", 0); 354 if (dcachesize > dcache_size) 355 dcache_size = dcachesize; 356 linesize = l = prom_getpropint(node, "dcache-line-size", 0); 357 if (linesize > dcache_line_size) 358 dcache_line_size = linesize; 359 360 for (i = 0; (1 << i) < l && l; i++) 361 /* void */; 362 if ((1 << i) != l && l) 363 panic("bad dcache line size %d", l); 364 totalsize = dcachesize; 365 if (totalsize == 0) 366 totalsize = l * 367 prom_getpropint(node, "dcache-nlines", 128) * 368 prom_getpropint(node, "dcache-associativity", 1); 369 370 cachesize = totalsize / 371 prom_getpropint(node, "dcache-associativity", 1); 372 if (cachesize > bigcache) 373 bigcache = cachesize; 374 375 if (totalsize > 0) { 376 aprint_normal("%s%ldK data (%ld b/l)", sep, 377 (long)totalsize/1024, 378 (long)linesize); 379 sep = ", "; 380 } 381 382 linesize = l = 383 prom_getpropint(node, "ecache-line-size", 0); 384 for (i = 0; (1 << i) < l && l; i++) 385 /* void */; 386 if ((1 << i) != l && l) 387 panic("bad ecache line size %d", l); 388 totalsize = prom_getpropint(node, "ecache-size", 0); 389 if (totalsize == 0) 390 totalsize = l * 391 prom_getpropint(node, "ecache-nlines", 32768) * 392 prom_getpropint(node, "ecache-associativity", 1); 393 394 cachesize = totalsize / 395 prom_getpropint(node, "ecache-associativity", 1); 396 if (cachesize > bigcache) 397 bigcache = cachesize; 398 399 if (totalsize > 0) { 400 aprint_normal("%s%ldK external (%ld b/l)", sep, 401 (long)totalsize/1024, 402 (long)linesize); 403 } 404 aprint_normal("\n"); 405 406 if (ecache_min_line_size == 0 || 407 linesize < ecache_min_line_size) 408 ecache_min_line_size = linesize; 409 410 /* 411 * Now that we know the size of the largest cache on this CPU, 412 * re-color our pages. 413 */ 414 uvm_page_recolor(atop(bigcache)); /* XXX */ 415 416 } 417 418 int 419 cpu_myid(void) 420 { 421 char buf[32]; 422 int impl; 423 424 #ifdef SUN4V 425 if (CPU_ISSUN4V) { 426 uint64_t myid; 427 hv_cpu_myid(&myid); 428 return myid; 429 } 430 #endif 431 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 && 432 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0) 433 return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED); 434 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT; 435 switch (impl) { 436 case IMPL_OLYMPUS_C: 437 case IMPL_JUPITER: 438 return CPU_JUPITERID; 439 case IMPL_CHEETAH: 440 case IMPL_CHEETAH_PLUS: 441 case IMPL_JAGUAR: 442 case IMPL_PANTHER: 443 return CPU_FIREPLANEID; 444 default: 445 return CPU_UPAID; 446 } 447 } 448 449 #if defined(MULTIPROCESSOR) 450 vaddr_t cpu_spinup_trampoline; 451 452 /* 453 * Start secondary processors in motion. 454 */ 455 void 456 cpu_boot_secondary_processors(void) 457 { 458 int i, pstate; 459 struct cpu_info *ci; 460 461 sync_tick = 0; 462 463 sparc64_ipi_init(); 464 465 if (boothowto & RB_MD1) { 466 cpus[0].ci_next = NULL; 467 sparc_ncpus = ncpu = ncpuonline = 1; 468 return; 469 } 470 471 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 472 if (ci->ci_cpuid == CPU_UPAID) 473 continue; 474 475 cpu_pmap_prepare(ci, false); 476 cpu_args->cb_node = ci->ci_node; 477 cpu_args->cb_cpuinfo = ci->ci_paddr; 478 membar_Sync(); 479 480 /* Disable interrupts and start another CPU. */ 481 pstate = getpstate(); 482 setpstate(PSTATE_KERN); 483 484 prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); 485 486 for (i = 0; i < 2000; i++) { 487 membar_Sync(); 488 if (CPUSET_HAS(cpus_active, ci->ci_index)) 489 break; 490 delay(10000); 491 } 492 493 /* synchronize %tick ( to some degree at least ) */ 494 delay(1000); 495 sync_tick = 1; 496 membar_Sync(); 497 settick(0); 498 if (ci->ci_system_clockrate[0] != 0) 499 setstick(0); 500 501 setpstate(pstate); 502 503 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 504 printf("cpu%d: startup failed\n", ci->ci_cpuid); 505 } 506 } 507 508 void 509 cpu_hatch(void) 510 { 511 char *v = (char*)CPUINFO_VA; 512 int i; 513 514 for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long)) 515 flush(v + i); 516 517 cpu_pmap_init(curcpu()); 518 CPUSET_ADD(cpus_active, cpu_number()); 519 cpu_reset_fpustate(); 520 curlwp = curcpu()->ci_data.cpu_idlelwp; 521 membar_Sync(); 522 523 /* wait for the boot CPU to flip the switch */ 524 while (sync_tick == 0) { 525 /* we do nothing here */ 526 } 527 settick(0); 528 if (curcpu()->ci_system_clockrate[0] != 0) { 529 setstick(0); 530 stickintr_establish(PIL_CLOCK, stickintr); 531 } else { 532 tickintr_establish(PIL_CLOCK, tickintr); 533 } 534 spl0(); 535 } 536 #endif /* MULTIPROCESSOR */ 537