1 /* $NetBSD: cpu.c,v 1.4 2007/12/12 19:25:38 bouyer Exp $ */ 2 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */ 3 4 /*- 5 * Copyright (c) 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by RedBack Networks Inc. 10 * 11 * Author: Bill Sommerfeld 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the NetBSD 24 * Foundation, Inc. and its contributors. 25 * 4. Neither the name of The NetBSD Foundation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * Copyright (c) 1999 Stefan Grefen 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. All advertising materials mentioning features or use of this software 54 * must display the following acknowledgement: 55 * This product includes software developed by the NetBSD 56 * Foundation, Inc. and its contributors. 57 * 4. Neither the name of The NetBSD Foundation nor the names of its 58 * contributors may be used to endorse or promote products derived 59 * from this software without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 62 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 */ 73 74 #include <sys/cdefs.h> 75 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.4 2007/12/12 19:25:38 bouyer Exp $"); 76 77 #include "opt_ddb.h" 78 #include "opt_multiprocessor.h" 79 #include "opt_mpbios.h" /* for MPDEBUG */ 80 #include "opt_mtrr.h" 81 #include "opt_xen.h" 82 83 #include "lapic.h" 84 #include "ioapic.h" 85 86 #include <sys/param.h> 87 #include <sys/proc.h> 88 #include <sys/user.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 93 #include <uvm/uvm_extern.h> 94 95 #include <machine/cpu.h> 96 #include <machine/cpufunc.h> 97 #include <machine/cpuvar.h> 98 #include <machine/pmap.h> 99 #include <machine/vmparam.h> 100 #include <machine/mpbiosvar.h> 101 #include <machine/pcb.h> 102 #include <machine/specialreg.h> 103 #include <machine/segments.h> 104 #include <machine/gdt.h> 105 #include <machine/mtrr.h> 106 #include <machine/pio.h> 107 108 #ifdef XEN3 109 #include <xen/vcpuvar.h> 110 #endif 111 112 #if NLAPIC > 0 113 #include <machine/apicvar.h> 114 #include <machine/i82489reg.h> 115 #include <machine/i82489var.h> 116 #endif 117 118 #if NIOAPIC > 0 119 #include <machine/i82093var.h> 120 #endif 121 122 #include <dev/ic/mc146818reg.h> 123 #include <dev/isa/isareg.h> 124 125 int cpu_match(struct device *, struct cfdata *, void *); 126 void cpu_attach(struct device *, struct device *, void *); 127 #ifdef XEN3 128 int vcpu_match(struct device *, struct cfdata *, void *); 129 void vcpu_attach(struct device *, struct device *, void *); 130 #endif 131 void cpu_attach_common(struct device *, struct device *, void *); 132 133 struct cpu_softc { 134 struct device sc_dev; /* device tree glue */ 135 struct cpu_info *sc_info; /* pointer to CPU info */ 136 }; 137 138 int mp_cpu_start(struct cpu_info *); 139 void mp_cpu_start_cleanup(struct cpu_info *); 140 const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, 141 mp_cpu_start_cleanup }; 142 143 CFATTACH_DECL(cpu, sizeof(struct cpu_softc), 144 cpu_match, cpu_attach, NULL, NULL); 145 #ifdef XEN3 146 CFATTACH_DECL(vcpu, sizeof(struct cpu_softc), 147 vcpu_match, vcpu_attach, NULL, NULL); 148 #endif 149 150 /* 151 * Statically-allocated CPU info for the primary CPU (or the only 152 * CPU, on uniprocessors). The CPU info list is initialized to 153 * point at it. 154 */ 155 #ifdef TRAPLOG 156 #include <machine/tlog.h> 157 struct tlog tlog_primary; 158 #endif 159 struct cpu_info cpu_info_primary = { 160 .ci_self = &cpu_info_primary, 161 #ifndef __x86_64__ 162 .ci_self150 = (uint8_t *)&cpu_info_primary + 0x150, 163 #endif 164 .ci_idepth = -1, 165 .ci_curlwp = &lwp0, 166 #ifdef TRAPLOG 167 .ci_tlog = &tlog_primary, 168 #endif 169 170 }; 171 struct cpu_info phycpu_info_primary = { 172 .ci_self = &phycpu_info_primary, 173 #ifndef __x86_64__ 174 .ci_self150 = (uint8_t *)&phycpu_info_primary + 0x150, 175 #endif 176 }; 177 178 struct cpu_info *cpu_info_list = &cpu_info_primary; 179 180 static void cpu_set_tss_gates(struct cpu_info *ci); 181 182 u_int32_t cpus_attached = 0; 183 184 struct cpu_info *phycpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 185 186 #ifdef MULTIPROCESSOR 187 /* 188 * Array of CPU info structures. Must be statically-allocated because 189 * curproc, etc. are used early. 190 */ 191 struct cpu_info *cpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 192 193 u_int32_t cpus_running = 0; 194 195 void cpu_hatch(void *); 196 static void cpu_boot_secondary(struct cpu_info *ci); 197 static void cpu_start_secondary(struct cpu_info *ci); 198 static void cpu_copy_trampoline(void); 199 200 /* 201 * Runs once per boot once multiprocessor goo has been detected and 202 * the local APIC on the boot processor has been mapped. 203 * 204 * Called from lapic_boot_init() (from mpbios_scan()). 205 */ 206 void 207 cpu_init_first() 208 { 209 int cpunum = lapic_cpu_number(); 210 211 if (cpunum != 0) { 212 cpu_info[0] = NULL; 213 cpu_info[cpunum] = &cpu_info_primary; 214 } 215 216 cpu_copy_trampoline(); 217 } 218 #endif 219 220 int 221 cpu_match(parent, match, aux) 222 struct device *parent; 223 struct cfdata *match; 224 void *aux; 225 { 226 227 return 1; 228 } 229 230 void 231 cpu_attach(parent, self, aux) 232 struct device *parent, *self; 233 void *aux; 234 { 235 #ifdef XEN3 236 struct cpu_softc *sc = (void *) self; 237 struct cpu_attach_args *caa = aux; 238 struct cpu_info *ci; 239 int cpunum = caa->cpu_number; 240 241 /* 242 * If we're an Application Processor, allocate a cpu_info 243 * structure, otherwise use the primary's. 244 */ 245 if (caa->cpu_role == CPU_ROLE_AP) { 246 ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO); 247 if (phycpu_info[cpunum] != NULL) 248 panic("cpu at apic id %d already attached?", cpunum); 249 phycpu_info[cpunum] = ci; 250 } else { 251 ci = &phycpu_info_primary; 252 if (cpunum != 0) { 253 phycpu_info[0] = NULL; 254 phycpu_info[cpunum] = ci; 255 } 256 } 257 258 ci->ci_self = ci; 259 sc->sc_info = ci; 260 261 ci->ci_dev = self; 262 ci->ci_apicid = caa->cpu_number; 263 ci->ci_cpuid = ci->ci_apicid; 264 265 printf(": "); 266 switch (caa->cpu_role) { 267 case CPU_ROLE_SP: 268 printf("(uniprocessor)\n"); 269 ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; 270 break; 271 272 case CPU_ROLE_BP: 273 printf("(boot processor)\n"); 274 ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; 275 #if NIOAPIC > 0 276 ioapic_bsp_id = caa->cpu_number; 277 #endif 278 break; 279 280 case CPU_ROLE_AP: 281 /* 282 * report on an AP 283 */ 284 printf("(application processor)\n"); 285 break; 286 287 default: 288 panic("unknown processor type??\n"); 289 } 290 return; 291 #else 292 cpu_attach_common(parent, self, aux); 293 #endif 294 } 295 296 #ifdef XEN3 297 int 298 vcpu_match(parent, match, aux) 299 struct device *parent; 300 struct cfdata *match; 301 void *aux; 302 { 303 struct vcpu_attach_args *vcaa = aux; 304 305 if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) 306 return 1; 307 return 0; 308 } 309 310 void 311 vcpu_attach(parent, self, aux) 312 struct device *parent, *self; 313 void *aux; 314 { 315 struct vcpu_attach_args *vcaa = aux; 316 317 cpu_attach_common(parent, self, &vcaa->vcaa_caa); 318 } 319 #endif 320 321 static void 322 cpu_vm_init(struct cpu_info *ci) 323 { 324 int ncolors = 2, i; 325 326 for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { 327 struct x86_cache_info *cai; 328 int tcolors; 329 330 cai = &ci->ci_cinfo[i]; 331 332 tcolors = atop(cai->cai_totalsize); 333 switch(cai->cai_associativity) { 334 case 0xff: 335 tcolors = 1; /* fully associative */ 336 break; 337 case 0: 338 case 1: 339 break; 340 default: 341 tcolors /= cai->cai_associativity; 342 } 343 ncolors = max(ncolors, tcolors); 344 } 345 346 /* 347 * Knowing the size of the largest cache on this CPU, re-color 348 * our pages. 349 */ 350 if (ncolors <= uvmexp.ncolors) 351 return; 352 printf("%s: %d page colors\n", ci->ci_dev->dv_xname, ncolors); 353 uvm_page_recolor(ncolors); 354 } 355 356 void 357 cpu_attach_common(parent, self, aux) 358 struct device *parent, *self; 359 void *aux; 360 { 361 struct cpu_softc *sc = (void *) self; 362 struct cpu_attach_args *caa = aux; 363 struct cpu_info *ci; 364 #if defined(MULTIPROCESSOR) 365 int cpunum = caa->cpu_number; 366 #endif 367 368 /* 369 * If we're an Application Processor, allocate a cpu_info 370 * structure, otherwise use the primary's. 371 */ 372 if (caa->cpu_role == CPU_ROLE_AP) { 373 ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO); 374 #if defined(MULTIPROCESSOR) 375 if (cpu_info[cpunum] != NULL) 376 panic("cpu at apic id %d already attached?", cpunum); 377 cpu_info[cpunum] = ci; 378 #endif 379 #ifdef TRAPLOG 380 ci->ci_tlog_base = malloc(sizeof(struct tlog), 381 M_DEVBUF, M_WAITOK); 382 #endif 383 } else { 384 ci = &cpu_info_primary; 385 #if defined(MULTIPROCESSOR) 386 if (cpunum != lapic_cpu_number()) { 387 panic("%s: running CPU is at apic %d" 388 " instead of at expected %d", 389 sc->sc_dev.dv_xname, lapic_cpu_number(), cpunum); 390 } 391 #endif 392 } 393 394 ci->ci_self = ci; 395 sc->sc_info = ci; 396 397 ci->ci_dev = self; 398 ci->ci_apicid = caa->cpu_number; 399 #ifdef MULTIPROCESSOR 400 ci->ci_cpuid = ci->ci_apicid; 401 #else 402 ci->ci_cpuid = 0; /* False for APs, but they're not used anyway */ 403 #endif 404 ci->ci_cpumask = (1 << ci->ci_cpuid); 405 ci->ci_func = caa->cpu_func; 406 407 #ifndef __x86_64__ 408 simple_lock_init(&ci->ci_slock); 409 #endif 410 411 if (caa->cpu_role == CPU_ROLE_AP) { 412 #if defined(MULTIPROCESSOR) 413 int error; 414 415 error = mi_cpu_attach(ci); 416 if (error != 0) { 417 aprint_normal("\n"); 418 aprint_error("%s: mi_cpu_attach failed with %d\n", 419 sc->sc_dev.dv_xname, error); 420 return; 421 } 422 #endif 423 } else { 424 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 425 } 426 427 pmap_reference(pmap_kernel()); 428 ci->ci_pmap = pmap_kernel(); 429 ci->ci_tlbstate = TLBSTATE_STALE; 430 431 /* further PCB init done later. */ 432 433 printf(": "); 434 435 switch (caa->cpu_role) { 436 case CPU_ROLE_SP: 437 printf("(uniprocessor)\n"); 438 ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; 439 cpu_intr_init(ci); 440 identifycpu(ci); 441 cpu_init(ci); 442 cpu_set_tss_gates(ci); 443 break; 444 445 case CPU_ROLE_BP: 446 printf("apid %d (boot processor)\n", caa->cpu_number); 447 ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; 448 cpu_intr_init(ci); 449 identifycpu(ci); 450 cpu_init(ci); 451 cpu_set_tss_gates(ci); 452 break; 453 454 case CPU_ROLE_AP: 455 /* 456 * report on an AP 457 */ 458 printf("apid %d (application processor)\n", caa->cpu_number); 459 460 #if defined(MULTIPROCESSOR) 461 cpu_intr_init(ci); 462 gdt_alloc_cpu(ci); 463 cpu_set_tss_gates(ci); 464 cpu_start_secondary(ci); 465 if (ci->ci_flags & CPUF_PRESENT) { 466 identifycpu(ci); 467 ci->ci_next = cpu_info_list->ci_next; 468 cpu_info_list->ci_next = ci; 469 } 470 #else 471 printf("%s: not started\n", sc->sc_dev.dv_xname); 472 #endif 473 break; 474 475 default: 476 panic("unknown processor type??\n"); 477 } 478 cpu_vm_init(ci); 479 480 cpus_attached |= (1 << ci->ci_cpuid); 481 482 #if defined(MULTIPROCESSOR) 483 if (mp_verbose) { 484 struct lwp *l = ci->ci_data.cpu_idlelwp; 485 486 aprint_verbose("%s: idle lwp at %p, idle sp at 0x%x\n", 487 sc->sc_dev.dv_xname, l, l->l_addr->u_pcb.pcb_esp); 488 } 489 #endif 490 } 491 492 /* 493 * Initialize the processor appropriately. 494 */ 495 496 void 497 cpu_init(ci) 498 struct cpu_info *ci; 499 { 500 /* configure the CPU if needed */ 501 if (ci->cpu_setup != NULL) 502 (*ci->cpu_setup)(ci); 503 504 /* 505 * On a P6 or above, enable global TLB caching if the 506 * hardware supports it. 507 */ 508 if (cpu_feature & CPUID_PGE) 509 lcr4(rcr4() | CR4_PGE); /* enable global TLB caching */ 510 511 #ifdef XXXMTRR 512 /* 513 * On a P6 or above, initialize MTRR's if the hardware supports them. 514 */ 515 if (cpu_feature & CPUID_MTRR) { 516 if ((ci->ci_flags & CPUF_AP) == 0) 517 i686_mtrr_init_first(); 518 mtrr_init_cpu(ci); 519 } 520 #endif 521 /* 522 * If we have FXSAVE/FXRESTOR, use them. 523 */ 524 if (cpu_feature & CPUID_FXSR) { 525 lcr4(rcr4() | CR4_OSFXSR); 526 527 /* 528 * If we have SSE/SSE2, enable XMM exceptions. 529 */ 530 if (cpu_feature & (CPUID_SSE|CPUID_SSE2)) 531 lcr4(rcr4() | CR4_OSXMMEXCPT); 532 } 533 534 #ifdef MULTIPROCESSOR 535 ci->ci_flags |= CPUF_RUNNING; 536 cpus_running |= 1 << ci->ci_cpuid; 537 #endif 538 } 539 540 541 #ifdef MULTIPROCESSOR 542 void 543 cpu_boot_secondary_processors() 544 { 545 struct cpu_info *ci; 546 u_long i; 547 548 for (i=0; i < X86_MAXPROCS; i++) { 549 ci = cpu_info[i]; 550 if (ci == NULL) 551 continue; 552 if (ci->ci_data.cpu_idlelwp == NULL) 553 continue; 554 if ((ci->ci_flags & CPUF_PRESENT) == 0) 555 continue; 556 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) 557 continue; 558 cpu_boot_secondary(ci); 559 } 560 } 561 562 static void 563 cpu_init_idle_lwp(struct cpu_info *ci) 564 { 565 struct lwp *l = ci->ci_data.cpu_idlelwp; 566 struct pcb *pcb = &l->l_addr->u_pcb; 567 568 pcb->pcb_cr0 = rcr0(); 569 } 570 571 void 572 cpu_init_idle_lwps() 573 { 574 struct cpu_info *ci; 575 u_long i; 576 577 for (i = 0; i < X86_MAXPROCS; i++) { 578 ci = cpu_info[i]; 579 if (ci == NULL) 580 continue; 581 if (ci->ci_data.cpu_idlelwp == NULL) 582 continue; 583 if ((ci->ci_flags & CPUF_PRESENT) == 0) 584 continue; 585 cpu_init_idle_lwp(ci); 586 } 587 } 588 589 void 590 cpu_start_secondary (ci) 591 struct cpu_info *ci; 592 { 593 int i; 594 struct pmap *kpm = pmap_kernel(); 595 extern u_int32_t mp_pdirpa; 596 597 mp_pdirpa = kpm->pm_pdirpa; /* XXX move elsewhere, not per CPU. */ 598 599 ci->ci_flags |= CPUF_AP; 600 601 printf("%s: starting\n", ci->ci_dev->dv_xname); 602 603 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 604 CPU_STARTUP(ci); 605 606 /* 607 * wait for it to become ready 608 */ 609 for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i>0;i--) { 610 delay(10); 611 } 612 if (! (ci->ci_flags & CPUF_PRESENT)) { 613 printf("%s: failed to become ready\n", ci->ci_dev->dv_xname); 614 #if defined(MPDEBUG) && defined(DDB) 615 printf("dropping into debugger; continue from here to resume boot\n"); 616 Debugger(); 617 #endif 618 } 619 620 CPU_START_CLEANUP(ci); 621 } 622 623 void 624 cpu_boot_secondary(ci) 625 struct cpu_info *ci; 626 { 627 int i; 628 629 ci->ci_flags |= CPUF_GO; /* XXX atomic */ 630 631 for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i>0;i--) { 632 delay(10); 633 } 634 if (! (ci->ci_flags & CPUF_RUNNING)) { 635 printf("CPU failed to start\n"); 636 #if defined(MPDEBUG) && defined(DDB) 637 printf("dropping into debugger; continue from here to resume boot\n"); 638 Debugger(); 639 #endif 640 } 641 } 642 643 /* 644 * The CPU ends up here when its ready to run 645 * This is called from code in mptramp.s; at this point, we are running 646 * in the idle pcb/idle stack of the new CPU. When this function returns, 647 * this processor will enter the idle loop and start looking for work. 648 * 649 * XXX should share some of this with init386 in machdep.c 650 */ 651 void 652 cpu_hatch(void *v) 653 { 654 struct cpu_info *ci = (struct cpu_info *)v; 655 int s; 656 #ifdef __x86_64__ 657 cpu_init_msrs(ci); 658 #endif 659 660 cpu_probe_features(ci); 661 cpu_feature &= ci->ci_feature_flags; 662 /* not on Xen... */ 663 cpu_feature &= ~(CPUID_PGE|CPUID_PSE|CPUID_MTRR|CPUID_FXSR|CPUID_NOX); 664 665 #ifdef DEBUG 666 if (ci->ci_flags & CPUF_PRESENT) 667 panic("%s: already running!?", ci->ci_dev->dv_xname); 668 #endif 669 670 ci->ci_flags |= CPUF_PRESENT; 671 672 lapic_enable(); 673 lapic_initclocks(); 674 675 while ((ci->ci_flags & CPUF_GO) == 0) 676 delay(10); 677 #ifdef DEBUG 678 if (ci->ci_flags & CPUF_RUNNING) 679 panic("%s: already running!?", ci->ci_dev->dv_xname); 680 #endif 681 682 lcr0(ci->ci_data.cpu_idlelwp->l_addr->u_pcb.pcb_cr0); 683 cpu_init_idt(); 684 lapic_set_lvt(); 685 gdt_init_cpu(ci); 686 npxinit(ci); 687 688 lldt(GSEL(GLDT_SEL, SEL_KPL)); 689 690 cpu_init(ci); 691 692 s = splhigh(); 693 lapic_tpr = 0; 694 enable_intr(); 695 696 printf("%s: CPU %ld running\n",ci->ci_dev->dv_xname, ci->ci_cpuid); 697 if (ci->ci_feature_flags & CPUID_TSC) 698 cc_microset(ci); 699 splx(s); 700 } 701 702 #if defined(DDB) 703 704 #include <ddb/db_output.h> 705 #include <machine/db_machdep.h> 706 707 /* 708 * Dump CPU information from ddb. 709 */ 710 void 711 cpu_debug_dump(void) 712 { 713 struct cpu_info *ci; 714 CPU_INFO_ITERATOR cii; 715 716 db_printf("addr dev id flags ipis curproc fpcurproc\n"); 717 for (CPU_INFO_FOREACH(cii, ci)) { 718 db_printf("%p %s %ld %x %x %10p %10p\n", 719 ci, 720 ci->ci_dev == NULL ? "BOOT" : ci->ci_dev->dv_xname, 721 ci->ci_cpuid, 722 ci->ci_flags, ci->ci_ipis, 723 ci->ci_curlwp, 724 ci->ci_fpcurlwp); 725 } 726 } 727 #endif 728 729 static void 730 cpu_copy_trampoline() 731 { 732 /* 733 * Copy boot code. 734 */ 735 extern u_char cpu_spinup_trampoline[]; 736 extern u_char cpu_spinup_trampoline_end[]; 737 pmap_kenter_pa((vaddr_t)MP_TRAMPOLINE, /* virtual */ 738 (paddr_t)MP_TRAMPOLINE, /* physical */ 739 VM_PROT_ALL); /* protection */ 740 memcpy((void *)MP_TRAMPOLINE, 741 cpu_spinup_trampoline, 742 cpu_spinup_trampoline_end-cpu_spinup_trampoline); 743 } 744 745 #endif 746 747 748 /* XXX */ 749 #define IDTVEC(name) __CONCAT(X, name) 750 typedef void (vector)(void); 751 extern vector IDTVEC(tss_trap08); 752 #ifdef DDB 753 extern vector Xintrddbipi; 754 extern int ddb_vec; 755 #endif 756 757 static void 758 cpu_set_tss_gates(struct cpu_info *ci) 759 { 760 #if defined(DDB) && defined(MULTIPROCESSOR) 761 /* 762 * Set up separate handler for the DDB IPI, so that it doesn't 763 * stomp on a possibly corrupted stack. 764 * 765 * XXX overwriting the gate set in db_machine_init. 766 * Should rearrange the code so that it's set only once. 767 */ 768 ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0, 769 UVM_KMF_WIRED); 770 cpu_init_tss(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack, 771 Xintrddbipi); 772 773 setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1, 774 SDT_SYS386TSS, SEL_KPL, 0, 0); 775 ci->ci_gdt[GIPITSS_SEL].sd = sd; 776 777 setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL, 778 GSEL(GIPITSS_SEL, SEL_KPL)); 779 #endif 780 } 781 782 int 783 mp_cpu_start(struct cpu_info *ci) 784 { 785 #if 0 786 #if NLAPIC > 0 787 int error; 788 #endif 789 unsigned short dwordptr[2]; 790 791 /* 792 * "The BSP must initialize CMOS shutdown code to 0Ah ..." 793 */ 794 795 outb(IO_RTC, NVRAM_RESET); 796 outb(IO_RTC+1, NVRAM_RESET_JUMP); 797 798 /* 799 * "and the warm reset vector (DWORD based at 40:67) to point 800 * to the AP startup code ..." 801 */ 802 803 dwordptr[0] = 0; 804 dwordptr[1] = MP_TRAMPOLINE >> 4; 805 806 pmap_kenter_pa (0, 0, VM_PROT_READ|VM_PROT_WRITE); 807 memcpy ((u_int8_t *) 0x467, dwordptr, 4); 808 pmap_kremove (0, PAGE_SIZE); 809 810 #if NLAPIC > 0 811 /* 812 * ... prior to executing the following sequence:" 813 */ 814 815 if (ci->ci_flags & CPUF_AP) { 816 if ((error = x86_ipi_init(ci->ci_apicid)) != 0) 817 return error; 818 819 delay(10000); 820 821 if (cpu_feature & CPUID_APIC) { 822 823 if ((error = x86_ipi(MP_TRAMPOLINE/PAGE_SIZE, 824 ci->ci_apicid, 825 LAPIC_DLMODE_STARTUP)) != 0) 826 return error; 827 delay(200); 828 829 if ((error = x86_ipi(MP_TRAMPOLINE/PAGE_SIZE, 830 ci->ci_apicid, 831 LAPIC_DLMODE_STARTUP)) != 0) 832 return error; 833 delay(200); 834 } 835 } 836 #endif 837 #endif /* 0 */ 838 return 0; 839 } 840 841 void 842 mp_cpu_start_cleanup(struct cpu_info *ci) 843 { 844 #if 0 845 /* 846 * Ensure the NVRAM reset byte contains something vaguely sane. 847 */ 848 849 outb(IO_RTC, NVRAM_RESET); 850 outb(IO_RTC+1, NVRAM_RESET_RST); 851 #endif 852 } 853 854 #ifdef __x86_64__ 855 856 void 857 cpu_init_msrs(struct cpu_info *ci, bool full) 858 { 859 if (full) { 860 HYPERVISOR_set_segment_base (SEGBASE_FS, 0); 861 HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (u_int64_t) ci); 862 HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0); 863 } 864 } 865 #endif /* __x86_64__ */ 866 867 void 868 cpu_get_tsc_freq(struct cpu_info *ci) 869 { 870 #ifdef XEN3 871 const volatile vcpu_time_info_t *tinfo = 872 &HYPERVISOR_shared_info->vcpu_info[0].time; 873 delay(1000000); 874 uint64_t freq = 1000000000ULL << 32; 875 freq = freq / (uint64_t)tinfo->tsc_to_system_mul; 876 if ( tinfo->tsc_shift < 0 ) 877 freq = freq << -tinfo->tsc_shift; 878 else 879 freq = freq >> tinfo->tsc_shift; 880 ci->ci_tsc_freq = freq; 881 #else 882 /* XXX this needs to read the shared_info of the CPU being probed.. */ 883 ci->ci_tsc_freq = HYPERVISOR_shared_info->cpu_freq; 884 #endif /* XEN3 */ 885 } 886