1 /* $NetBSD: cpu.c,v 1.8 2008/01/16 02:08:49 dogcow Exp $ */ 2 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */ 3 4 /*- 5 * Copyright (c) 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by RedBack Networks Inc. 10 * 11 * Author: Bill Sommerfeld 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the NetBSD 24 * Foundation, Inc. and its contributors. 25 * 4. Neither the name of The NetBSD Foundation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * Copyright (c) 1999 Stefan Grefen 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. All advertising materials mentioning features or use of this software 54 * must display the following acknowledgement: 55 * This product includes software developed by the NetBSD 56 * Foundation, Inc. and its contributors. 57 * 4. Neither the name of The NetBSD Foundation nor the names of its 58 * contributors may be used to endorse or promote products derived 59 * from this software without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 62 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 */ 73 74 #include <sys/cdefs.h> 75 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.8 2008/01/16 02:08:49 dogcow Exp $"); 76 77 #include "opt_ddb.h" 78 #include "opt_multiprocessor.h" 79 #include "opt_mpbios.h" /* for MPDEBUG */ 80 #include "opt_mtrr.h" 81 #include "opt_xen.h" 82 83 #include "lapic.h" 84 #include "ioapic.h" 85 86 #include <sys/param.h> 87 #include <sys/proc.h> 88 #include <sys/user.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 93 #include <uvm/uvm_extern.h> 94 95 #include <machine/cpu.h> 96 #include <machine/cpufunc.h> 97 #include <machine/cpuvar.h> 98 #include <machine/pmap.h> 99 #include <machine/vmparam.h> 100 #include <machine/mpbiosvar.h> 101 #include <machine/pcb.h> 102 #include <machine/specialreg.h> 103 #include <machine/segments.h> 104 #include <machine/gdt.h> 105 #include <machine/mtrr.h> 106 #include <machine/pio.h> 107 108 #ifdef XEN3 109 #include <xen/vcpuvar.h> 110 #endif 111 112 #if NLAPIC > 0 113 #include <machine/apicvar.h> 114 #include <machine/i82489reg.h> 115 #include <machine/i82489var.h> 116 #endif 117 118 #if NIOAPIC > 0 119 #include <machine/i82093var.h> 120 #endif 121 122 #include <dev/ic/mc146818reg.h> 123 #include <dev/isa/isareg.h> 124 125 int cpu_match(struct device *, struct cfdata *, void *); 126 void cpu_attach(struct device *, struct device *, void *); 127 #ifdef XEN3 128 int vcpu_match(struct device *, struct cfdata *, void *); 129 void vcpu_attach(struct device *, struct device *, void *); 130 #endif 131 void cpu_attach_common(struct device *, struct device *, void *); 132 void cpu_offline_md(void); 133 134 struct cpu_softc { 135 struct device sc_dev; /* device tree glue */ 136 struct cpu_info *sc_info; /* pointer to CPU info */ 137 }; 138 139 int mp_cpu_start(struct cpu_info *, paddr_t); 140 void mp_cpu_start_cleanup(struct cpu_info *); 141 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, 142 mp_cpu_start_cleanup }; 143 144 CFATTACH_DECL(cpu, sizeof(struct cpu_softc), 145 cpu_match, cpu_attach, NULL, NULL); 146 #ifdef XEN3 147 CFATTACH_DECL(vcpu, sizeof(struct cpu_softc), 148 vcpu_match, vcpu_attach, NULL, NULL); 149 #endif 150 151 /* 152 * Statically-allocated CPU info for the primary CPU (or the only 153 * CPU, on uniprocessors). The CPU info list is initialized to 154 * point at it. 155 */ 156 #ifdef TRAPLOG 157 #include <machine/tlog.h> 158 struct tlog tlog_primary; 159 #endif 160 struct cpu_info cpu_info_primary = { 161 .ci_dev = 0, 162 .ci_self = &cpu_info_primary, 163 .ci_idepth = -1, 164 .ci_curlwp = &lwp0, 165 #ifdef TRAPLOG 166 .ci_tlog = &tlog_primary, 167 #endif 168 169 }; 170 struct cpu_info phycpu_info_primary = { 171 .ci_dev = 0, 172 .ci_self = &phycpu_info_primary, 173 }; 174 175 struct cpu_info *cpu_info_list = &cpu_info_primary; 176 177 static void cpu_set_tss_gates(struct cpu_info *ci); 178 179 u_int32_t cpus_attached = 0; 180 181 struct cpu_info *phycpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 182 183 #ifdef MULTIPROCESSOR 184 /* 185 * Array of CPU info structures. Must be statically-allocated because 186 * curproc, etc. are used early. 187 */ 188 struct cpu_info *cpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 189 190 u_int32_t cpus_running = 0; 191 192 void cpu_hatch(void *); 193 static void cpu_boot_secondary(struct cpu_info *ci); 194 static void cpu_start_secondary(struct cpu_info *ci); 195 static void cpu_copy_trampoline(void); 196 197 /* 198 * Runs once per boot once multiprocessor goo has been detected and 199 * the local APIC on the boot processor has been mapped. 200 * 201 * Called from lapic_boot_init() (from mpbios_scan()). 202 */ 203 void 204 cpu_init_first() 205 { 206 int cpunum = lapic_cpu_number(); 207 208 if (cpunum != 0) { 209 cpu_info[0] = NULL; 210 cpu_info[cpunum] = &cpu_info_primary; 211 } 212 213 cpu_copy_trampoline(); 214 } 215 #endif 216 217 int 218 cpu_match(parent, match, aux) 219 struct device *parent; 220 struct cfdata *match; 221 void *aux; 222 { 223 224 return 1; 225 } 226 227 void 228 cpu_attach(parent, self, aux) 229 struct device *parent, *self; 230 void *aux; 231 { 232 #ifdef XEN3 233 struct cpu_softc *sc = (void *) self; 234 struct cpu_attach_args *caa = aux; 235 struct cpu_info *ci; 236 int cpunum = caa->cpu_number; 237 238 /* 239 * If we're an Application Processor, allocate a cpu_info 240 * structure, otherwise use the primary's. 241 */ 242 if (caa->cpu_role == CPU_ROLE_AP) { 243 ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO); 244 if (phycpu_info[cpunum] != NULL) 245 panic("cpu at apic id %d already attached?", cpunum); 246 phycpu_info[cpunum] = ci; 247 } else { 248 ci = &phycpu_info_primary; 249 if (cpunum != 0) { 250 phycpu_info[0] = NULL; 251 phycpu_info[cpunum] = ci; 252 } 253 } 254 255 ci->ci_self = ci; 256 sc->sc_info = ci; 257 258 ci->ci_dev = self; 259 ci->ci_apicid = caa->cpu_number; 260 ci->ci_cpuid = ci->ci_apicid; 261 262 printf(": "); 263 switch (caa->cpu_role) { 264 case CPU_ROLE_SP: 265 printf("(uniprocessor)\n"); 266 ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; 267 break; 268 269 case CPU_ROLE_BP: 270 printf("(boot processor)\n"); 271 ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; 272 #if NIOAPIC > 0 273 ioapic_bsp_id = caa->cpu_number; 274 #endif 275 break; 276 277 case CPU_ROLE_AP: 278 /* 279 * report on an AP 280 */ 281 printf("(application processor)\n"); 282 break; 283 284 default: 285 panic("unknown processor type??\n"); 286 } 287 return; 288 #else 289 cpu_attach_common(parent, self, aux); 290 #endif 291 } 292 293 #ifdef XEN3 294 int 295 vcpu_match(parent, match, aux) 296 struct device *parent; 297 struct cfdata *match; 298 void *aux; 299 { 300 struct vcpu_attach_args *vcaa = aux; 301 302 if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) 303 return 1; 304 return 0; 305 } 306 307 void 308 vcpu_attach(parent, self, aux) 309 struct device *parent, *self; 310 void *aux; 311 { 312 struct vcpu_attach_args *vcaa = aux; 313 314 cpu_attach_common(parent, self, &vcaa->vcaa_caa); 315 } 316 #endif 317 318 static void 319 cpu_vm_init(struct cpu_info *ci) 320 { 321 int ncolors = 2, i; 322 323 for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { 324 struct x86_cache_info *cai; 325 int tcolors; 326 327 cai = &ci->ci_cinfo[i]; 328 329 tcolors = atop(cai->cai_totalsize); 330 switch(cai->cai_associativity) { 331 case 0xff: 332 tcolors = 1; /* fully associative */ 333 break; 334 case 0: 335 case 1: 336 break; 337 default: 338 tcolors /= cai->cai_associativity; 339 } 340 ncolors = max(ncolors, tcolors); 341 } 342 343 /* 344 * Knowing the size of the largest cache on this CPU, re-color 345 * our pages. 346 */ 347 if (ncolors <= uvmexp.ncolors) 348 return; 349 printf("%s: %d page colors\n", ci->ci_dev->dv_xname, ncolors); 350 uvm_page_recolor(ncolors); 351 } 352 353 void 354 cpu_attach_common(parent, self, aux) 355 struct device *parent, *self; 356 void *aux; 357 { 358 struct cpu_softc *sc = (void *) self; 359 struct cpu_attach_args *caa = aux; 360 struct cpu_info *ci; 361 #if defined(MULTIPROCESSOR) 362 int cpunum = caa->cpu_number; 363 #endif 364 365 /* 366 * If we're an Application Processor, allocate a cpu_info 367 * structure, otherwise use the primary's. 368 */ 369 if (caa->cpu_role == CPU_ROLE_AP) { 370 ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO); 371 #if defined(MULTIPROCESSOR) 372 if (cpu_info[cpunum] != NULL) 373 panic("cpu at apic id %d already attached?", cpunum); 374 cpu_info[cpunum] = ci; 375 #endif 376 #ifdef TRAPLOG 377 ci->ci_tlog_base = malloc(sizeof(struct tlog), 378 M_DEVBUF, M_WAITOK); 379 #endif 380 } else { 381 ci = &cpu_info_primary; 382 #if defined(MULTIPROCESSOR) 383 if (cpunum != lapic_cpu_number()) { 384 panic("%s: running CPU is at apic %d" 385 " instead of at expected %d", 386 sc->sc_dev.dv_xname, lapic_cpu_number(), cpunum); 387 } 388 #endif 389 } 390 391 ci->ci_self = ci; 392 sc->sc_info = ci; 393 394 ci->ci_dev = self; 395 ci->ci_apicid = caa->cpu_number; 396 #ifdef MULTIPROCESSOR 397 ci->ci_cpuid = ci->ci_apicid; 398 #else 399 ci->ci_cpuid = 0; /* False for APs, but they're not used anyway */ 400 #endif 401 ci->ci_cpumask = (1 << ci->ci_cpuid); 402 ci->ci_func = caa->cpu_func; 403 404 if (caa->cpu_role == CPU_ROLE_AP) { 405 #if defined(MULTIPROCESSOR) 406 int error; 407 408 error = mi_cpu_attach(ci); 409 if (error != 0) { 410 aprint_normal("\n"); 411 aprint_error("%s: mi_cpu_attach failed with %d\n", 412 sc->sc_dev.dv_xname, error); 413 return; 414 } 415 #endif 416 } else { 417 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 418 } 419 420 pmap_reference(pmap_kernel()); 421 ci->ci_pmap = pmap_kernel(); 422 ci->ci_tlbstate = TLBSTATE_STALE; 423 424 /* further PCB init done later. */ 425 426 printf(": "); 427 428 switch (caa->cpu_role) { 429 case CPU_ROLE_SP: 430 printf("(uniprocessor)\n"); 431 ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; 432 cpu_intr_init(ci); 433 identifycpu(ci); 434 cpu_init(ci); 435 cpu_set_tss_gates(ci); 436 break; 437 438 case CPU_ROLE_BP: 439 printf("apid %d (boot processor)\n", caa->cpu_number); 440 ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; 441 cpu_intr_init(ci); 442 identifycpu(ci); 443 cpu_init(ci); 444 cpu_set_tss_gates(ci); 445 break; 446 447 case CPU_ROLE_AP: 448 /* 449 * report on an AP 450 */ 451 printf("apid %d (application processor)\n", caa->cpu_number); 452 453 #if defined(MULTIPROCESSOR) 454 cpu_intr_init(ci); 455 gdt_alloc_cpu(ci); 456 cpu_set_tss_gates(ci); 457 cpu_start_secondary(ci); 458 if (ci->ci_flags & CPUF_PRESENT) { 459 identifycpu(ci); 460 ci->ci_next = cpu_info_list->ci_next; 461 cpu_info_list->ci_next = ci; 462 } 463 #else 464 printf("%s: not started\n", sc->sc_dev.dv_xname); 465 #endif 466 break; 467 468 default: 469 panic("unknown processor type??\n"); 470 } 471 cpu_vm_init(ci); 472 473 cpus_attached |= (1 << ci->ci_cpuid); 474 475 #if defined(MULTIPROCESSOR) 476 if (mp_verbose) { 477 struct lwp *l = ci->ci_data.cpu_idlelwp; 478 479 aprint_verbose("%s: idle lwp at %p, idle sp at 0x%x\n", 480 sc->sc_dev.dv_xname, l, l->l_addr->u_pcb.pcb_esp); 481 } 482 #endif 483 } 484 485 /* 486 * Initialize the processor appropriately. 487 */ 488 489 void 490 cpu_init(ci) 491 struct cpu_info *ci; 492 { 493 /* configure the CPU if needed */ 494 if (ci->cpu_setup != NULL) 495 (*ci->cpu_setup)(ci); 496 497 /* 498 * On a P6 or above, enable global TLB caching if the 499 * hardware supports it. 500 */ 501 if (cpu_feature & CPUID_PGE) 502 lcr4(rcr4() | CR4_PGE); /* enable global TLB caching */ 503 504 #ifdef XXXMTRR 505 /* 506 * On a P6 or above, initialize MTRR's if the hardware supports them. 507 */ 508 if (cpu_feature & CPUID_MTRR) { 509 if ((ci->ci_flags & CPUF_AP) == 0) 510 i686_mtrr_init_first(); 511 mtrr_init_cpu(ci); 512 } 513 #endif 514 /* 515 * If we have FXSAVE/FXRESTOR, use them. 516 */ 517 if (cpu_feature & CPUID_FXSR) { 518 lcr4(rcr4() | CR4_OSFXSR); 519 520 /* 521 * If we have SSE/SSE2, enable XMM exceptions. 522 */ 523 if (cpu_feature & (CPUID_SSE|CPUID_SSE2)) 524 lcr4(rcr4() | CR4_OSXMMEXCPT); 525 } 526 527 #ifdef MULTIPROCESSOR 528 ci->ci_flags |= CPUF_RUNNING; 529 cpus_running |= 1 << ci->ci_cpuid; 530 #endif 531 } 532 533 534 #ifdef MULTIPROCESSOR 535 void 536 cpu_boot_secondary_processors() 537 { 538 struct cpu_info *ci; 539 u_long i; 540 541 for (i=0; i < X86_MAXPROCS; i++) { 542 ci = cpu_info[i]; 543 if (ci == NULL) 544 continue; 545 if (ci->ci_data.cpu_idlelwp == NULL) 546 continue; 547 if ((ci->ci_flags & CPUF_PRESENT) == 0) 548 continue; 549 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) 550 continue; 551 cpu_boot_secondary(ci); 552 } 553 } 554 555 static void 556 cpu_init_idle_lwp(struct cpu_info *ci) 557 { 558 struct lwp *l = ci->ci_data.cpu_idlelwp; 559 struct pcb *pcb = &l->l_addr->u_pcb; 560 561 pcb->pcb_cr0 = rcr0(); 562 } 563 564 void 565 cpu_init_idle_lwps() 566 { 567 struct cpu_info *ci; 568 u_long i; 569 570 for (i = 0; i < X86_MAXPROCS; i++) { 571 ci = cpu_info[i]; 572 if (ci == NULL) 573 continue; 574 if (ci->ci_data.cpu_idlelwp == NULL) 575 continue; 576 if ((ci->ci_flags & CPUF_PRESENT) == 0) 577 continue; 578 cpu_init_idle_lwp(ci); 579 } 580 } 581 582 void 583 cpu_start_secondary (ci) 584 struct cpu_info *ci; 585 { 586 int i; 587 struct pmap *kpm = pmap_kernel(); 588 extern u_int32_t mp_pdirpa; 589 590 mp_pdirpa = kpm->pm_pdirpa; /* XXX move elsewhere, not per CPU. */ 591 592 ci->ci_flags |= CPUF_AP; 593 594 printf("%s: starting\n", ci->ci_dev->dv_xname); 595 596 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 597 CPU_STARTUP(ci); 598 599 /* 600 * wait for it to become ready 601 */ 602 for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i>0;i--) { 603 delay(10); 604 } 605 if (! (ci->ci_flags & CPUF_PRESENT)) { 606 printf("%s: failed to become ready\n", ci->ci_dev->dv_xname); 607 #if defined(MPDEBUG) && defined(DDB) 608 printf("dropping into debugger; continue from here to resume boot\n"); 609 Debugger(); 610 #endif 611 } 612 613 CPU_START_CLEANUP(ci); 614 } 615 616 void 617 cpu_boot_secondary(ci) 618 struct cpu_info *ci; 619 { 620 int i; 621 622 ci->ci_flags |= CPUF_GO; /* XXX atomic */ 623 624 for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i>0;i--) { 625 delay(10); 626 } 627 if (! (ci->ci_flags & CPUF_RUNNING)) { 628 printf("CPU failed to start\n"); 629 #if defined(MPDEBUG) && defined(DDB) 630 printf("dropping into debugger; continue from here to resume boot\n"); 631 Debugger(); 632 #endif 633 } 634 } 635 636 /* 637 * The CPU ends up here when its ready to run 638 * This is called from code in mptramp.s; at this point, we are running 639 * in the idle pcb/idle stack of the new CPU. When this function returns, 640 * this processor will enter the idle loop and start looking for work. 641 * 642 * XXX should share some of this with init386 in machdep.c 643 */ 644 void 645 cpu_hatch(void *v) 646 { 647 struct cpu_info *ci = (struct cpu_info *)v; 648 int s; 649 #ifdef __x86_64__ 650 cpu_init_msrs(ci); 651 #endif 652 653 cpu_probe_features(ci); 654 cpu_feature &= ci->ci_feature_flags; 655 /* not on Xen... */ 656 cpu_feature &= ~(CPUID_PGE|CPUID_PSE|CPUID_MTRR|CPUID_FXSR|CPUID_NOX); 657 658 #ifdef DEBUG 659 if (ci->ci_flags & CPUF_PRESENT) 660 panic("%s: already running!?", ci->ci_dev->dv_xname); 661 #endif 662 663 ci->ci_flags |= CPUF_PRESENT; 664 665 lapic_enable(); 666 lapic_initclocks(); 667 668 while ((ci->ci_flags & CPUF_GO) == 0) 669 delay(10); 670 #ifdef DEBUG 671 if (ci->ci_flags & CPUF_RUNNING) 672 panic("%s: already running!?", ci->ci_dev->dv_xname); 673 #endif 674 675 lcr0(ci->ci_data.cpu_idlelwp->l_addr->u_pcb.pcb_cr0); 676 cpu_init_idt(); 677 lapic_set_lvt(); 678 gdt_init_cpu(ci); 679 npxinit(ci); 680 681 lldt(GSEL(GLDT_SEL, SEL_KPL)); 682 683 cpu_init(ci); 684 685 s = splhigh(); 686 lapic_tpr = 0; 687 enable_intr(); 688 689 printf("%s: CPU %ld running\n",ci->ci_dev->dv_xname, ci->ci_cpuid); 690 if (ci->ci_feature_flags & CPUID_TSC) 691 cc_microset(ci); 692 splx(s); 693 } 694 695 #if defined(DDB) 696 697 #include <ddb/db_output.h> 698 #include <machine/db_machdep.h> 699 700 /* 701 * Dump CPU information from ddb. 702 */ 703 void 704 cpu_debug_dump(void) 705 { 706 struct cpu_info *ci; 707 CPU_INFO_ITERATOR cii; 708 709 db_printf("addr dev id flags ipis curproc fpcurproc\n"); 710 for (CPU_INFO_FOREACH(cii, ci)) { 711 db_printf("%p %s %ld %x %x %10p %10p\n", 712 ci, 713 ci->ci_dev == NULL ? "BOOT" : ci->ci_dev->dv_xname, 714 ci->ci_cpuid, 715 ci->ci_flags, ci->ci_ipis, 716 ci->ci_curlwp, 717 ci->ci_fpcurlwp); 718 } 719 } 720 #endif 721 722 static void 723 cpu_copy_trampoline() 724 { 725 /* 726 * Copy boot code. 727 */ 728 extern u_char cpu_spinup_trampoline[]; 729 extern u_char cpu_spinup_trampoline_end[]; 730 pmap_kenter_pa((vaddr_t)MP_TRAMPOLINE, /* virtual */ 731 (paddr_t)MP_TRAMPOLINE, /* physical */ 732 VM_PROT_ALL); /* protection */ 733 memcpy((void *)MP_TRAMPOLINE, 734 cpu_spinup_trampoline, 735 cpu_spinup_trampoline_end-cpu_spinup_trampoline); 736 } 737 738 #endif 739 740 741 /* XXX */ 742 #define IDTVEC(name) __CONCAT(X, name) 743 typedef void (vector)(void); 744 extern vector IDTVEC(tss_trap08); 745 #ifdef DDB 746 extern vector Xintrddbipi; 747 extern int ddb_vec; 748 #endif 749 750 static void 751 cpu_set_tss_gates(struct cpu_info *ci) 752 { 753 #if defined(DDB) && defined(MULTIPROCESSOR) 754 /* 755 * Set up separate handler for the DDB IPI, so that it doesn't 756 * stomp on a possibly corrupted stack. 757 * 758 * XXX overwriting the gate set in db_machine_init. 759 * Should rearrange the code so that it's set only once. 760 */ 761 ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0, 762 UVM_KMF_WIRED); 763 tss_init(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, 764 Xintrddbipi); 765 766 setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1, 767 SDT_SYS386TSS, SEL_KPL, 0, 0); 768 ci->ci_gdt[GIPITSS_SEL].sd = sd; 769 770 setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL, 771 GSEL(GIPITSS_SEL, SEL_KPL)); 772 #endif 773 } 774 775 int 776 mp_cpu_start(struct cpu_info *ci, paddr_t target) 777 { 778 #if 0 779 #if NLAPIC > 0 780 int error; 781 #endif 782 unsigned short dwordptr[2]; 783 784 /* 785 * "The BSP must initialize CMOS shutdown code to 0Ah ..." 786 */ 787 788 outb(IO_RTC, NVRAM_RESET); 789 outb(IO_RTC+1, NVRAM_RESET_JUMP); 790 791 /* 792 * "and the warm reset vector (DWORD based at 40:67) to point 793 * to the AP startup code ..." 794 */ 795 796 dwordptr[0] = 0; 797 dwordptr[1] = target >> 4; 798 799 pmap_kenter_pa (0, 0, VM_PROT_READ|VM_PROT_WRITE); 800 memcpy ((u_int8_t *) 0x467, dwordptr, 4); 801 pmap_kremove (0, PAGE_SIZE); 802 803 #if NLAPIC > 0 804 /* 805 * ... prior to executing the following sequence:" 806 */ 807 808 if (ci->ci_flags & CPUF_AP) { 809 if ((error = x86_ipi_init(ci->ci_apicid)) != 0) 810 return error; 811 812 delay(10000); 813 814 if (cpu_feature & CPUID_APIC) { 815 816 if ((error = x86_ipi(target/PAGE_SIZE, 817 ci->ci_apicid, 818 LAPIC_DLMODE_STARTUP)) != 0) 819 return error; 820 delay(200); 821 822 if ((error = x86_ipi(target/PAGE_SIZE, 823 ci->ci_apicid, 824 LAPIC_DLMODE_STARTUP)) != 0) 825 return error; 826 delay(200); 827 } 828 } 829 #endif 830 #endif /* 0 */ 831 return 0; 832 } 833 834 void 835 mp_cpu_start_cleanup(struct cpu_info *ci) 836 { 837 #if 0 838 /* 839 * Ensure the NVRAM reset byte contains something vaguely sane. 840 */ 841 842 outb(IO_RTC, NVRAM_RESET); 843 outb(IO_RTC+1, NVRAM_RESET_RST); 844 #endif 845 } 846 847 #ifdef __x86_64__ 848 849 void 850 cpu_init_msrs(struct cpu_info *ci, bool full) 851 { 852 if (full) { 853 HYPERVISOR_set_segment_base (SEGBASE_FS, 0); 854 HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (u_int64_t) ci); 855 HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0); 856 } 857 } 858 #endif /* __x86_64__ */ 859 860 void 861 cpu_get_tsc_freq(struct cpu_info *ci) 862 { 863 #ifdef XEN3 864 const volatile vcpu_time_info_t *tinfo = 865 &HYPERVISOR_shared_info->vcpu_info[0].time; 866 delay(1000000); 867 uint64_t freq = 1000000000ULL << 32; 868 freq = freq / (uint64_t)tinfo->tsc_to_system_mul; 869 if ( tinfo->tsc_shift < 0 ) 870 freq = freq << -tinfo->tsc_shift; 871 else 872 freq = freq >> tinfo->tsc_shift; 873 ci->ci_tsc_freq = freq; 874 #else 875 /* XXX this needs to read the shared_info of the CPU being probed.. */ 876 ci->ci_tsc_freq = HYPERVISOR_shared_info->cpu_freq; 877 #endif /* XEN3 */ 878 } 879 880 void 881 cpu_offline_md(void) 882 { 883 int s; 884 885 s = splhigh(); 886 #ifdef __i386__ 887 npxsave_cpu(true); 888 #else 889 fpusave_cpu(true); 890 #endif 891 splx(s); 892 } 893