1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/kernel.h> 31 #include <sys/bus.h> 32 #include <sys/machintr.h> 33 #include <machine/globaldata.h> 34 #include <machine/smp.h> 35 #include <machine/md_var.h> 36 #include <machine/pmap.h> 37 #include <machine/specialreg.h> 38 #include <machine_base/apic/lapic.h> 39 #include <machine_base/apic/ioapic.h> 40 #include <machine_base/apic/ioapic_abi.h> 41 #include <machine_base/apic/apicvar.h> 42 #include <machine_base/icu/icu_var.h> 43 #include <machine/segments.h> 44 #include <sys/thread2.h> 45 46 #include <machine/cputypes.h> 47 #include <machine/intr_machdep.h> 48 49 extern int naps; 50 51 volatile lapic_t *lapic; 52 53 static void lapic_timer_calibrate(void); 54 static void lapic_timer_set_divisor(int); 55 static void lapic_timer_fixup_handler(void *); 56 static void lapic_timer_restart_handler(void *); 57 58 59 static int lapic_timer_enable = 1; 60 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable); 61 62 static void lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t); 63 static void lapic_timer_intr_enable(struct cputimer_intr *); 64 static void lapic_timer_intr_restart(struct cputimer_intr *); 65 static void lapic_timer_intr_pmfixup(struct cputimer_intr *); 66 67 static struct cputimer_intr lapic_cputimer_intr = { 68 .freq = 0, 69 .reload = lapic_timer_intr_reload, 70 .enable = lapic_timer_intr_enable, 71 .config = cputimer_intr_default_config, 72 .restart = lapic_timer_intr_restart, 73 .pmfixup = lapic_timer_intr_pmfixup, 74 .initclock = cputimer_intr_default_initclock, 75 .pcpuhand = NULL, 76 .next = SLIST_ENTRY_INITIALIZER, 77 .name = "lapic", 78 .type = CPUTIMER_INTR_LAPIC, 79 .prio = CPUTIMER_INTR_PRIO_LAPIC, 80 .caps = CPUTIMER_INTR_CAP_NONE, 81 .priv = NULL 82 }; 83 84 static int lapic_timer_divisor_idx = -1; 85 static const uint32_t lapic_timer_divisors[] = { 86 APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16, 87 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128, APIC_TDCR_1 88 }; 89 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors)) 90 91 /* 92 * APIC ID <-> CPU ID mapping structures. 93 */ 94 int cpu_id_to_apic_id[NAPICID]; 95 int apic_id_to_cpu_id[NAPICID]; 96 int lapic_enable = 1; 97 98 /* 99 * Enable LAPIC, configure interrupts. 100 */ 101 void 102 lapic_init(boolean_t bsp) 103 { 104 uint32_t timer; 105 u_int temp; 106 107 /* 108 * Install vectors 109 * 110 * Since IDT is shared between BSP and APs, these vectors 111 * only need to be installed once; we do it on BSP. 112 */ 113 if (bsp) { 114 if (cpu_vendor_id == CPU_VENDOR_AMD && 115 CPUID_TO_FAMILY(cpu_id) >= 0xf) { 116 uint32_t tcr; 117 118 /* 119 * Set the LINTEN bit in the HyperTransport 120 * Transaction Control Register. 121 * 122 * This will cause EXTINT and NMI interrupts 123 * routed over the hypertransport bus to be 124 * fed into the LAPIC LINT0/LINT1. If the bit 125 * isn't set, the interrupts will go to the 126 * general cpu INTR/NMI pins. On a dual-core 127 * cpu the interrupt winds up going to BOTH cpus. 128 * The first cpu that does the interrupt ack 129 * cycle will get the correct interrupt. The 130 * second cpu that does it will get a spurious 131 * interrupt vector (typically IRQ 7). 132 */ 133 outl(0x0cf8, 134 (1 << 31) | /* enable */ 135 (0 << 16) | /* bus */ 136 (0x18 << 11) | /* dev (cpu + 0x18) */ 137 (0 << 8) | /* func */ 138 0x68 /* reg */ 139 ); 140 tcr = inl(0xcfc); 141 if ((tcr & 0x00010000) == 0) { 142 kprintf("LAPIC: AMD LINTEN on\n"); 143 outl(0xcfc, tcr|0x00010000); 144 } 145 outl(0x0cf8, 0); 146 } 147 148 /* Install a 'Spurious INTerrupt' vector */ 149 setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint, 150 SDT_SYSIGT, SEL_KPL, 0); 151 152 /* Install a timer vector */ 153 setidt_global(XTIMER_OFFSET, Xtimer, 154 SDT_SYSIGT, SEL_KPL, 0); 155 156 /* Install an inter-CPU IPI for TLB invalidation */ 157 setidt_global(XINVLTLB_OFFSET, Xinvltlb, 158 SDT_SYSIGT, SEL_KPL, 0); 159 160 /* Install an inter-CPU IPI for IPIQ messaging */ 161 setidt_global(XIPIQ_OFFSET, Xipiq, 162 SDT_SYSIGT, SEL_KPL, 0); 163 164 /* Install an inter-CPU IPI for CPU stop/restart */ 165 setidt_global(XCPUSTOP_OFFSET, Xcpustop, 166 SDT_SYSIGT, SEL_KPL, 0); 167 } 168 169 /* 170 * Setup LINT0 as ExtINT on the BSP. This is theoretically an 171 * aggregate interrupt input from the 8259. The INTA cycle 172 * will be routed to the external controller (the 8259) which 173 * is expected to supply the vector. 174 * 175 * Must be setup edge triggered, active high. 176 * 177 * Disable LINT0 on BSP, if I/O APIC is enabled. 178 * 179 * Disable LINT0 on the APs. It doesn't matter what delivery 180 * mode we use because we leave it masked. 181 */ 182 temp = lapic->lvt_lint0; 183 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | 184 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); 185 if (bsp) { 186 temp |= APIC_LVT_DM_EXTINT; 187 if (ioapic_enable) 188 temp |= APIC_LVT_MASKED; 189 } else { 190 temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED; 191 } 192 lapic->lvt_lint0 = temp; 193 194 /* 195 * Setup LINT1 as NMI. 196 * 197 * Must be setup edge trigger, active high. 198 * 199 * Enable LINT1 on BSP, if I/O APIC is enabled. 200 * 201 * Disable LINT1 on the APs. 202 */ 203 temp = lapic->lvt_lint1; 204 temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK | 205 APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK); 206 temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI; 207 if (bsp && ioapic_enable) 208 temp &= ~APIC_LVT_MASKED; 209 lapic->lvt_lint1 = temp; 210 211 /* 212 * Mask the LAPIC error interrupt, LAPIC performance counter 213 * interrupt. 214 */ 215 lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED; 216 lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED; 217 218 /* 219 * Set LAPIC timer vector and mask the LAPIC timer interrupt. 220 */ 221 timer = lapic->lvt_timer; 222 timer &= ~APIC_LVTT_VECTOR; 223 timer |= XTIMER_OFFSET; 224 timer |= APIC_LVTT_MASKED; 225 lapic->lvt_timer = timer; 226 227 /* 228 * Set the Task Priority Register as needed. At the moment allow 229 * interrupts on all cpus (the APs will remain CLId until they are 230 * ready to deal). 231 */ 232 temp = lapic->tpr; 233 temp &= ~APIC_TPR_PRIO; /* clear priority field */ 234 lapic->tpr = temp; 235 236 /* 237 * Enable the LAPIC 238 */ 239 temp = lapic->svr; 240 temp |= APIC_SVR_ENABLE; /* enable the LAPIC */ 241 temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */ 242 243 /* 244 * Set the spurious interrupt vector. The low 4 bits of the vector 245 * must be 1111. 246 */ 247 if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F) 248 panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET); 249 temp &= ~APIC_SVR_VECTOR; 250 temp |= XSPURIOUSINT_OFFSET; 251 252 lapic->svr = temp; 253 254 /* 255 * Pump out a few EOIs to clean out interrupts that got through 256 * before we were able to set the TPR. 257 */ 258 lapic->eoi = 0; 259 lapic->eoi = 0; 260 lapic->eoi = 0; 261 262 if (bsp) { 263 lapic_timer_calibrate(); 264 if (lapic_timer_enable) { 265 if (cpu_thermal_feature & CPUID_THERMAL_ARAT) { 266 /* 267 * Local APIC timer will not stop 268 * in deep C-state. 269 */ 270 lapic_cputimer_intr.caps |= 271 CPUTIMER_INTR_CAP_PS; 272 } 273 cputimer_intr_register(&lapic_cputimer_intr); 274 cputimer_intr_select(&lapic_cputimer_intr, 0); 275 } 276 } else { 277 lapic_timer_set_divisor(lapic_timer_divisor_idx); 278 } 279 280 if (bootverbose) 281 apic_dump("apic_initialize()"); 282 } 283 284 static void 285 lapic_timer_set_divisor(int divisor_idx) 286 { 287 KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS); 288 lapic->dcr_timer = lapic_timer_divisors[divisor_idx]; 289 } 290 291 static void 292 lapic_timer_oneshot(u_int count) 293 { 294 uint32_t value; 295 296 value = lapic->lvt_timer; 297 value &= ~APIC_LVTT_PERIODIC; 298 lapic->lvt_timer = value; 299 lapic->icr_timer = count; 300 } 301 302 static void 303 lapic_timer_oneshot_quick(u_int count) 304 { 305 lapic->icr_timer = count; 306 } 307 308 static void 309 lapic_timer_calibrate(void) 310 { 311 sysclock_t value; 312 313 /* Try to calibrate the local APIC timer. */ 314 for (lapic_timer_divisor_idx = 0; 315 lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS; 316 lapic_timer_divisor_idx++) { 317 lapic_timer_set_divisor(lapic_timer_divisor_idx); 318 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT); 319 DELAY(2000000); 320 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer; 321 if (value != APIC_TIMER_MAX_COUNT) 322 break; 323 } 324 if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS) 325 panic("lapic: no proper timer divisor?!"); 326 lapic_cputimer_intr.freq = value / 2; 327 328 kprintf("lapic: divisor index %d, frequency %u Hz\n", 329 lapic_timer_divisor_idx, lapic_cputimer_intr.freq); 330 } 331 332 static void 333 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload) 334 { 335 struct globaldata *gd = mycpu; 336 337 reload = (int64_t)reload * cti->freq / sys_cputimer->freq; 338 if (reload < 2) 339 reload = 2; 340 341 if (gd->gd_timer_running) { 342 if (reload < lapic->ccr_timer) 343 lapic_timer_oneshot_quick(reload); 344 } else { 345 gd->gd_timer_running = 1; 346 lapic_timer_oneshot_quick(reload); 347 } 348 } 349 350 static void 351 lapic_timer_intr_enable(struct cputimer_intr *cti __unused) 352 { 353 uint32_t timer; 354 355 timer = lapic->lvt_timer; 356 timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC); 357 lapic->lvt_timer = timer; 358 359 lapic_timer_fixup_handler(NULL); 360 } 361 362 static void 363 lapic_timer_fixup_handler(void *arg) 364 { 365 int *started = arg; 366 367 if (started != NULL) 368 *started = 0; 369 370 if (cpu_vendor_id == CPU_VENDOR_AMD) { 371 /* 372 * Detect the presence of C1E capability mostly on latest 373 * dual-cores (or future) k8 family. This feature renders 374 * the local APIC timer dead, so we disable it by reading 375 * the Interrupt Pending Message register and clearing both 376 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). 377 * 378 * Reference: 379 * "BIOS and Kernel Developer's Guide for AMD NPT 380 * Family 0Fh Processors" 381 * #32559 revision 3.00 382 */ 383 if ((cpu_id & 0x00000f00) == 0x00000f00 && 384 (cpu_id & 0x0fff0000) >= 0x00040000) { 385 uint64_t msr; 386 387 msr = rdmsr(0xc0010055); 388 if (msr & 0x18000000) { 389 struct globaldata *gd = mycpu; 390 391 kprintf("cpu%d: AMD C1E detected\n", 392 gd->gd_cpuid); 393 wrmsr(0xc0010055, msr & ~0x18000000ULL); 394 395 /* 396 * We are kinda stalled; 397 * kick start again. 398 */ 399 gd->gd_timer_running = 1; 400 lapic_timer_oneshot_quick(2); 401 402 if (started != NULL) 403 *started = 1; 404 } 405 } 406 } 407 } 408 409 static void 410 lapic_timer_restart_handler(void *dummy __unused) 411 { 412 int started; 413 414 lapic_timer_fixup_handler(&started); 415 if (!started) { 416 struct globaldata *gd = mycpu; 417 418 gd->gd_timer_running = 1; 419 lapic_timer_oneshot_quick(2); 420 } 421 } 422 423 /* 424 * This function is called only by ACPICA code currently: 425 * - AMD C1E fixup. AMD C1E only seems to happen after ACPI 426 * module controls PM. So once ACPICA is attached, we try 427 * to apply the fixup to prevent LAPIC timer from hanging. 428 */ 429 static void 430 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused) 431 { 432 lwkt_send_ipiq_mask(smp_active_mask, 433 lapic_timer_fixup_handler, NULL); 434 } 435 436 static void 437 lapic_timer_intr_restart(struct cputimer_intr *cti __unused) 438 { 439 lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL); 440 } 441 442 443 /* 444 * dump contents of local APIC registers 445 */ 446 void 447 apic_dump(char* str) 448 { 449 kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str); 450 kprintf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n", 451 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr); 452 } 453 454 /* 455 * Inter Processor Interrupt functions. 456 */ 457 458 /* 459 * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'. 460 * 461 * destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF 462 * vector is any valid SYSTEM INT vector 463 * delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO 464 * 465 * WARNINGS! 466 * 467 * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than 468 * one IPI from being sent to any given cpu at a time. Thus we no longer 469 * have to process incoming IPIs while waiting for the status to clear. 470 * No deadlock should be possible. 471 * 472 * We now physically disable interrupts for the lapic ICR operation. If 473 * we do not do this then it looks like an EOI sent to the lapic (which 474 * occurs even with a critical section) can interfere with the command 475 * register ready status and cause an IPI to be lost. 476 * 477 * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command 478 * register to busy just before we write to icr_lo, resulting in a lost 479 * issuance. This only appears to occur on Intel cpus and is not 480 * documented. It could simply be that cpus are so fast these days that 481 * it was always an issue, but is only now rearing its ugly head. This 482 * is conjecture. 483 */ 484 int 485 apic_ipi(int dest_type, int vector, int delivery_mode) 486 { 487 unsigned long rflags; 488 u_long icr_lo; 489 int loops = 1; 490 491 rflags = read_rflags(); 492 cpu_disable_intr(); 493 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 494 cpu_pause(); 495 if (++loops == 10000000) 496 kprintf("apic_ipi stall cpu %d\n", mycpuid); 497 } 498 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type | 499 delivery_mode | vector; 500 lapic->icr_lo = icr_lo; 501 write_rflags(rflags); 502 503 return 0; 504 } 505 506 void 507 single_apic_ipi(int cpu, int vector, int delivery_mode) 508 { 509 unsigned long rflags; 510 u_long icr_lo; 511 u_long icr_hi; 512 int loops = 1; 513 514 rflags = read_rflags(); 515 cpu_disable_intr(); 516 while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 517 cpu_pause(); 518 if (++loops == 10000000) 519 kprintf("apic_ipi stall cpu %d (sing)\n", mycpuid); 520 } 521 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 522 icr_hi |= (CPUID_TO_APICID(cpu) << 24); 523 lapic->icr_hi = icr_hi; 524 525 /* build ICR_LOW */ 526 icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | 527 APIC_DEST_DESTFLD | delivery_mode | vector; 528 529 /* write APIC ICR */ 530 lapic->icr_lo = icr_lo; 531 write_rflags(rflags); 532 } 533 534 #if 0 535 536 /* 537 * Returns 0 if the apic is busy, 1 if we were able to queue the request. 538 * 539 * NOT WORKING YET! The code as-is may end up not queueing an IPI at all 540 * to the target, and the scheduler does not 'poll' for IPI messages. 541 */ 542 int 543 single_apic_ipi_passive(int cpu, int vector, int delivery_mode) 544 { 545 u_long icr_lo; 546 u_long icr_hi; 547 unsigned long rflags; 548 549 rflags = read_rflags(); 550 cpu_disable_intr(); 551 if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) { 552 write_rflags(rflags); 553 return(0); 554 } 555 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 556 icr_hi |= (CPUID_TO_APICID(cpu) << 24); 557 lapic->icr_hi = icr_hi; 558 559 /* build IRC_LOW */ 560 icr_lo = (lapic->icr_lo & APIC_RESV2_MASK) | 561 APIC_DEST_DESTFLD | delivery_mode | vector; 562 563 /* write APIC ICR */ 564 lapic->icr_lo = icr_lo; 565 write_rflags(rflags); 566 567 return(1); 568 } 569 570 #endif 571 572 /* 573 * Send APIC IPI 'vector' to 'target's via 'delivery_mode'. 574 * 575 * target is a bitmask of destination cpus. Vector is any 576 * valid system INT vector. Delivery mode may be either 577 * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO. 578 */ 579 void 580 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode) 581 { 582 crit_enter(); 583 while (CPUMASK_TESTNZERO(target)) { 584 int n = BSFCPUMASK(target); 585 CPUMASK_NANDBIT(target, n); 586 single_apic_ipi(n, vector, delivery_mode); 587 } 588 crit_exit(); 589 } 590 591 /* 592 * Timer code, in development... 593 * - suggested by rgrimes@gndrsh.aac.dev.com 594 */ 595 int 596 get_apic_timer_frequency(void) 597 { 598 return(lapic_cputimer_intr.freq); 599 } 600 601 /* 602 * Load a 'downcount time' in uSeconds. 603 */ 604 void 605 set_apic_timer(int us) 606 { 607 u_int count; 608 609 /* 610 * When we reach here, lapic timer's frequency 611 * must have been calculated as well as the 612 * divisor (lapic->dcr_timer is setup during the 613 * divisor calculation). 614 */ 615 KKASSERT(lapic_cputimer_intr.freq != 0 && 616 lapic_timer_divisor_idx >= 0); 617 618 count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000; 619 lapic_timer_oneshot(count); 620 } 621 622 623 /* 624 * Read remaining time in timer. 625 */ 626 int 627 read_apic_timer(void) 628 { 629 #if 0 630 /** XXX FIXME: we need to return the actual remaining time, 631 * for now we just return the remaining count. 632 */ 633 #else 634 return lapic->ccr_timer; 635 #endif 636 } 637 638 639 /* 640 * Spin-style delay, set delay time in uS, spin till it drains. 641 */ 642 void 643 u_sleep(int count) 644 { 645 set_apic_timer(count); 646 while (read_apic_timer()) 647 /* spin */ ; 648 } 649 650 int 651 lapic_unused_apic_id(int start) 652 { 653 int i; 654 655 for (i = start; i < APICID_MAX; ++i) { 656 if (APICID_TO_CPUID(i) == -1) 657 return i; 658 } 659 return NAPICID; 660 } 661 662 void 663 lapic_map(vm_paddr_t lapic_addr) 664 { 665 lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC)); 666 } 667 668 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators = 669 TAILQ_HEAD_INITIALIZER(lapic_enumerators); 670 671 int 672 lapic_config(void) 673 { 674 struct lapic_enumerator *e; 675 int error, i, ap_max; 676 677 KKASSERT(lapic_enable); 678 679 for (i = 0; i < NAPICID; ++i) 680 APICID_TO_CPUID(i) = -1; 681 682 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) { 683 error = e->lapic_probe(e); 684 if (!error) 685 break; 686 } 687 if (e == NULL) { 688 kprintf("LAPIC: Can't find LAPIC\n"); 689 return ENXIO; 690 } 691 692 error = e->lapic_enumerate(e); 693 if (error) { 694 kprintf("LAPIC: enumeration failed\n"); 695 return ENXIO; 696 } 697 698 ap_max = MAXCPU - 1; 699 TUNABLE_INT_FETCH("hw.ap_max", &ap_max); 700 if (ap_max > MAXCPU - 1) 701 ap_max = MAXCPU - 1; 702 703 if (naps > ap_max) { 704 kprintf("LAPIC: Warning use only %d out of %d " 705 "available APs\n", 706 ap_max, naps); 707 naps = ap_max; 708 } 709 710 return 0; 711 } 712 713 void 714 lapic_enumerator_register(struct lapic_enumerator *ne) 715 { 716 struct lapic_enumerator *e; 717 718 TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) { 719 if (e->lapic_prio < ne->lapic_prio) { 720 TAILQ_INSERT_BEFORE(e, ne, lapic_link); 721 return; 722 } 723 } 724 TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link); 725 } 726 727 void 728 lapic_set_cpuid(int cpu_id, int apic_id) 729 { 730 CPUID_TO_APICID(cpu_id) = apic_id; 731 APICID_TO_CPUID(apic_id) = cpu_id; 732 } 733 734 void 735 lapic_fixup_noioapic(void) 736 { 737 u_int temp; 738 739 /* Only allowed on BSP */ 740 KKASSERT(mycpuid == 0); 741 KKASSERT(!ioapic_enable); 742 743 temp = lapic->lvt_lint0; 744 temp &= ~APIC_LVT_MASKED; 745 lapic->lvt_lint0 = temp; 746 747 temp = lapic->lvt_lint1; 748 temp |= APIC_LVT_MASKED; 749 lapic->lvt_lint1 = temp; 750 } 751 752 static void 753 lapic_sysinit(void *dummy __unused) 754 { 755 if (lapic_enable) { 756 int error; 757 758 error = lapic_config(); 759 if (error) 760 lapic_enable = 0; 761 } 762 763 if (lapic_enable) { 764 /* Initialize BSP's local APIC */ 765 lapic_init(TRUE); 766 } else if (ioapic_enable) { 767 ioapic_enable = 0; 768 icu_reinit_noioapic(); 769 } 770 } 771 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL); 772