1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1996, by Steve Passe 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. The name of the developer may NOT be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 #include "opt_apic.h" 33 #include "opt_cpu.h" 34 #include "opt_kstack_pages.h" 35 #include "opt_pmap.h" 36 #include "opt_sched.h" 37 #include "opt_smp.h" 38 39 #if !defined(lint) 40 #if !defined(SMP) 41 #error How did you get here? 42 #endif 43 44 #ifndef DEV_APIC 45 #error The apic device is required for SMP, add "device apic" to your config file. 46 #endif 47 #endif /* not lint */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bus.h> 52 #include <sys/cons.h> /* cngetc() */ 53 #include <sys/cpuset.h> 54 #ifdef GPROF 55 #include <sys/gmon.h> 56 #endif 57 #include <sys/kdb.h> 58 #include <sys/kernel.h> 59 #include <sys/ktr.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/memrange.h> 63 #include <sys/mutex.h> 64 #include <sys/pcpu.h> 65 #include <sys/proc.h> 66 #include <sys/sched.h> 67 #include <sys/smp.h> 68 #include <sys/sysctl.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_kern.h> 74 #include <vm/vm_extern.h> 75 76 #include <x86/apicreg.h> 77 #include <machine/clock.h> 78 #include <machine/cpu.h> 79 #include <machine/cputypes.h> 80 #include <x86/mca.h> 81 #include <machine/md_var.h> 82 #include <machine/pcb.h> 83 #include <machine/psl.h> 84 #include <machine/smp.h> 85 #include <machine/specialreg.h> 86 #include <x86/ucode.h> 87 88 #ifdef DEV_ACPI 89 #include <contrib/dev/acpica/include/acpi.h> 90 #include <dev/acpica/acpivar.h> 91 #endif 92 93 #define WARMBOOT_TARGET 0 94 #define WARMBOOT_OFF (PMAP_MAP_LOW + 0x0467) 95 #define WARMBOOT_SEG (PMAP_MAP_LOW + 0x0469) 96 97 #define CMOS_REG (0x70) 98 #define CMOS_DATA (0x71) 99 #define BIOS_RESET (0x0f) 100 #define BIOS_WARM (0x0a) 101 102 /* 103 * this code MUST be enabled here and in mpboot.s. 104 * it follows the very early stages of AP boot by placing values in CMOS ram. 105 * it NORMALLY will never be needed and thus the primitive method for enabling. 106 * 107 #define CHECK_POINTS 108 */ 109 110 #if defined(CHECK_POINTS) 111 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 112 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 113 114 #define CHECK_INIT(D); \ 115 CHECK_WRITE(0x34, (D)); \ 116 CHECK_WRITE(0x35, (D)); \ 117 CHECK_WRITE(0x36, (D)); \ 118 CHECK_WRITE(0x37, (D)); \ 119 CHECK_WRITE(0x38, (D)); \ 120 CHECK_WRITE(0x39, (D)); 121 122 #define CHECK_PRINT(S); \ 123 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 124 (S), \ 125 CHECK_READ(0x34), \ 126 CHECK_READ(0x35), \ 127 CHECK_READ(0x36), \ 128 CHECK_READ(0x37), \ 129 CHECK_READ(0x38), \ 130 CHECK_READ(0x39)); 131 132 #else /* CHECK_POINTS */ 133 134 #define CHECK_INIT(D) 135 #define CHECK_PRINT(S) 136 #define CHECK_WRITE(A, D) 137 138 #endif /* CHECK_POINTS */ 139 140 /* 141 * Local data and functions. 142 */ 143 144 static void install_ap_tramp(void); 145 static int start_all_aps(void); 146 static int start_ap(int apic_id); 147 148 static char *ap_copyout_buf; 149 static char *ap_tramp_stack_base; 150 /* 151 * Initialize the IPI handlers and start up the AP's. 152 */ 153 void 154 cpu_mp_start(void) 155 { 156 int i; 157 158 /* Initialize the logical ID to APIC ID table. */ 159 for (i = 0; i < MAXCPU; i++) { 160 cpu_apic_ids[i] = -1; 161 } 162 163 /* Install an inter-CPU IPI for TLB invalidation */ 164 setidt(IPI_INVLTLB, IDTVEC(invltlb), 165 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 166 setidt(IPI_INVLPG, IDTVEC(invlpg), 167 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 168 setidt(IPI_INVLRNG, IDTVEC(invlrng), 169 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 170 171 /* Install an inter-CPU IPI for cache invalidation. */ 172 setidt(IPI_INVLCACHE, IDTVEC(invlcache), 173 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 174 175 /* Install an inter-CPU IPI for all-CPU rendezvous */ 176 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), 177 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 178 179 /* Install generic inter-CPU IPI handler */ 180 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler), 181 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 182 183 /* Install an inter-CPU IPI for CPU stop/restart */ 184 setidt(IPI_STOP, IDTVEC(cpustop), 185 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 186 187 /* Install an inter-CPU IPI for CPU suspend/resume */ 188 setidt(IPI_SUSPEND, IDTVEC(cpususpend), 189 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 190 191 /* Install an IPI for calling delayed SWI */ 192 setidt(IPI_SWI, IDTVEC(ipi_swi), 193 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 194 195 /* Set boot_cpu_id if needed. */ 196 if (boot_cpu_id == -1) { 197 boot_cpu_id = PCPU_GET(apic_id); 198 cpu_info[boot_cpu_id].cpu_bsp = 1; 199 } else 200 KASSERT(boot_cpu_id == PCPU_GET(apic_id), 201 ("BSP's APIC ID doesn't match boot_cpu_id")); 202 203 /* Probe logical/physical core configuration. */ 204 topo_probe(); 205 206 assign_cpu_ids(); 207 208 /* Start each Application Processor */ 209 start_all_aps(); 210 211 set_interrupt_apic_ids(); 212 213 #if defined(DEV_ACPI) && MAXMEMDOM > 1 214 acpi_pxm_set_cpu_locality(); 215 #endif 216 } 217 218 /* 219 * AP CPU's call this to initialize themselves. 220 */ 221 void 222 init_secondary(void) 223 { 224 struct pcpu *pc; 225 struct i386tss *common_tssp; 226 struct region_descriptor r_gdt, r_idt; 227 int gsel_tss, myid, x; 228 u_int cr0; 229 230 /* bootAP is set in start_ap() to our ID. */ 231 myid = bootAP; 232 233 /* Update microcode before doing anything else. */ 234 ucode_load_ap(myid); 235 236 /* Get per-cpu data */ 237 pc = &__pcpu[myid]; 238 239 /* prime data page for it to use */ 240 pcpu_init(pc, myid, sizeof(struct pcpu)); 241 dpcpu_init(dpcpu, myid); 242 pc->pc_apic_id = cpu_apic_ids[myid]; 243 pc->pc_prvspace = pc; 244 pc->pc_curthread = 0; 245 pc->pc_common_tssp = common_tssp = &(__pcpu[0].pc_common_tssp)[myid]; 246 247 fix_cpuid(); 248 249 gdt_segs[GPRIV_SEL].ssd_base = (int)pc; 250 gdt_segs[GPROC0_SEL].ssd_base = (int)common_tssp; 251 gdt_segs[GLDT_SEL].ssd_base = (int)ldt; 252 253 for (x = 0; x < NGDT; x++) { 254 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 255 } 256 257 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 258 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 259 lgdt(&r_gdt); /* does magic intra-segment return */ 260 261 r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1; 262 r_idt.rd_base = (int)idt; 263 lidt(&r_idt); 264 265 lldt(_default_ldt); 266 PCPU_SET(currentldt, _default_ldt); 267 268 PCPU_SET(trampstk, (uintptr_t)ap_tramp_stack_base + TRAMP_STACK_SZ - 269 VM86_STACK_SPACE); 270 271 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 272 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 273 common_tssp->tss_esp0 = PCPU_GET(trampstk); 274 common_tssp->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 275 common_tssp->tss_ioopt = sizeof(struct i386tss) << 16; 276 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 277 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 278 ltr(gsel_tss); 279 280 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd); 281 PCPU_SET(copyout_buf, ap_copyout_buf); 282 283 /* 284 * Set to a known state: 285 * Set by mpboot.s: CR0_PG, CR0_PE 286 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 287 */ 288 cr0 = rcr0(); 289 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 290 load_cr0(cr0); 291 CHECK_WRITE(0x38, 5); 292 293 /* signal our startup to the BSP. */ 294 mp_naps++; 295 CHECK_WRITE(0x39, 6); 296 297 /* Spin until the BSP releases the AP's. */ 298 while (atomic_load_acq_int(&aps_ready) == 0) 299 ia32_pause(); 300 301 /* BSP may have changed PTD while we were waiting */ 302 invltlb(); 303 304 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 305 lidt(&r_idt); 306 #endif 307 308 init_secondary_tail(); 309 } 310 311 /* 312 * start each AP in our list 313 */ 314 #define TMPMAP_START 1 315 static int 316 start_all_aps(void) 317 { 318 u_char mpbiosreason; 319 u_int32_t mpbioswarmvec; 320 int apic_id, cpu; 321 322 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 323 324 pmap_remap_lower(true); 325 326 /* install the AP 1st level boot code */ 327 install_ap_tramp(); 328 329 /* save the current value of the warm-start vector */ 330 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 331 outb(CMOS_REG, BIOS_RESET); 332 mpbiosreason = inb(CMOS_DATA); 333 334 /* take advantage of the P==V mapping for PTD[0] for AP boot */ 335 336 /* start each AP */ 337 for (cpu = 1; cpu < mp_ncpus; cpu++) { 338 apic_id = cpu_apic_ids[cpu]; 339 340 /* allocate and set up a boot stack data page */ 341 bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE, 342 M_WAITOK | M_ZERO); 343 dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); 344 /* setup a vector to our boot code */ 345 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 346 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 347 outb(CMOS_REG, BIOS_RESET); 348 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 349 350 bootSTK = (char *)bootstacks[cpu] + kstack_pages * 351 PAGE_SIZE - 4; 352 bootAP = cpu; 353 354 ap_tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT); 355 ap_copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT); 356 357 /* attempt to start the Application Processor */ 358 CHECK_INIT(99); /* setup checkpoints */ 359 if (!start_ap(apic_id)) { 360 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id); 361 CHECK_PRINT("trace"); /* show checkpoints */ 362 /* better panic as the AP may be running loose */ 363 printf("panic y/n? [y] "); 364 if (cngetc() != 'n') 365 panic("bye-bye"); 366 } 367 CHECK_PRINT("trace"); /* show checkpoints */ 368 369 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */ 370 } 371 372 pmap_remap_lower(false); 373 374 /* restore the warmstart vector */ 375 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; 376 377 outb(CMOS_REG, BIOS_RESET); 378 outb(CMOS_DATA, mpbiosreason); 379 380 /* number of APs actually started */ 381 return mp_naps; 382 } 383 384 /* 385 * load the 1st level AP boot code into base memory. 386 */ 387 388 /* targets for relocation */ 389 extern void bigJump(void); 390 extern void bootCodeSeg(void); 391 extern void bootDataSeg(void); 392 extern void MPentry(void); 393 extern u_int MP_GDT; 394 extern u_int mp_gdtbase; 395 396 static void 397 install_ap_tramp(void) 398 { 399 int x; 400 int size = *(int *) ((u_long) & bootMP_size); 401 vm_offset_t va = boot_address; 402 u_char *src = (u_char *) ((u_long) bootMP); 403 u_char *dst = (u_char *) va; 404 u_int boot_base = (u_int) bootMP; 405 u_int8_t *dst8; 406 u_int16_t *dst16; 407 u_int32_t *dst32; 408 409 KASSERT (size <= PAGE_SIZE, 410 ("'size' do not fit into PAGE_SIZE, as expected.")); 411 pmap_kenter(va, boot_address); 412 pmap_invalidate_page (kernel_pmap, va); 413 for (x = 0; x < size; ++x) 414 *dst++ = *src++; 415 416 /* 417 * modify addresses in code we just moved to basemem. unfortunately we 418 * need fairly detailed info about mpboot.s for this to work. changes 419 * to mpboot.s might require changes here. 420 */ 421 422 /* boot code is located in KERNEL space */ 423 dst = (u_char *) va; 424 425 /* modify the lgdt arg */ 426 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 427 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base); 428 429 /* modify the ljmp target for MPentry() */ 430 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 431 *dst32 = (u_int)MPentry; 432 433 /* modify the target for boot code segment */ 434 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 435 dst8 = (u_int8_t *) (dst16 + 1); 436 *dst16 = (u_int) boot_address & 0xffff; 437 *dst8 = ((u_int) boot_address >> 16) & 0xff; 438 439 /* modify the target for boot data segment */ 440 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 441 dst8 = (u_int8_t *) (dst16 + 1); 442 *dst16 = (u_int) boot_address & 0xffff; 443 *dst8 = ((u_int) boot_address >> 16) & 0xff; 444 } 445 446 /* 447 * This function starts the AP (application processor) identified 448 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 449 * to accomplish this. This is necessary because of the nuances 450 * of the different hardware we might encounter. It isn't pretty, 451 * but it seems to work. 452 */ 453 static int 454 start_ap(int apic_id) 455 { 456 int vector, ms; 457 int cpus; 458 459 /* calculate the vector */ 460 vector = (boot_address >> 12) & 0xff; 461 462 /* used as a watchpoint to signal AP startup */ 463 cpus = mp_naps; 464 465 ipi_startup(apic_id, vector); 466 467 /* Wait up to 5 seconds for it to start. */ 468 for (ms = 0; ms < 5000; ms++) { 469 if (mp_naps > cpus) 470 return 1; /* return SUCCESS */ 471 DELAY(1000); 472 } 473 return 0; /* return FAILURE */ 474 } 475 476 /* 477 * Flush the TLB on other CPU's 478 */ 479 480 /* Variables needed for SMP tlb shootdown. */ 481 vm_offset_t smp_tlb_addr1, smp_tlb_addr2; 482 pmap_t smp_tlb_pmap; 483 volatile uint32_t smp_tlb_generation; 484 485 /* 486 * Used by pmap to request cache or TLB invalidation on local and 487 * remote processors. Mask provides the set of remote CPUs which are 488 * to be signalled with the invalidation IPI. Vector specifies which 489 * invalidation IPI is used. As an optimization, the curcpu_cb 490 * callback is invoked on the calling CPU while waiting for remote 491 * CPUs to complete the operation. 492 * 493 * The callback function is called unconditionally on the caller's 494 * underlying processor, even when this processor is not set in the 495 * mask. So, the callback function must be prepared to handle such 496 * spurious invocations. 497 */ 498 static void 499 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap, 500 vm_offset_t addr1, vm_offset_t addr2, smp_invl_cb_t curcpu_cb) 501 { 502 cpuset_t other_cpus; 503 volatile uint32_t *p_cpudone; 504 uint32_t generation; 505 int cpu; 506 507 /* 508 * It is not necessary to signal other CPUs while booting or 509 * when in the debugger. 510 */ 511 if (kdb_active || KERNEL_PANICKED() || !smp_started) { 512 curcpu_cb(pmap, addr1, addr2); 513 return; 514 } 515 516 sched_pin(); 517 518 /* 519 * Check for other cpus. Return if none. 520 */ 521 if (CPU_ISFULLSET(&mask)) { 522 if (mp_ncpus <= 1) 523 goto nospinexit; 524 } else { 525 CPU_CLR(PCPU_GET(cpuid), &mask); 526 if (CPU_EMPTY(&mask)) 527 goto nospinexit; 528 } 529 530 KASSERT((read_eflags() & PSL_I) != 0, 531 ("smp_targeted_tlb_shootdown: interrupts disabled")); 532 mtx_lock_spin(&smp_ipi_mtx); 533 smp_tlb_addr1 = addr1; 534 smp_tlb_addr2 = addr2; 535 smp_tlb_pmap = pmap; 536 generation = ++smp_tlb_generation; 537 if (CPU_ISFULLSET(&mask)) { 538 ipi_all_but_self(vector); 539 other_cpus = all_cpus; 540 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 541 } else { 542 other_cpus = mask; 543 ipi_selected(mask, vector); 544 } 545 curcpu_cb(pmap, addr1, addr2); 546 while ((cpu = CPU_FFS(&other_cpus)) != 0) { 547 cpu--; 548 CPU_CLR(cpu, &other_cpus); 549 p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done; 550 while (*p_cpudone != generation) 551 ia32_pause(); 552 } 553 mtx_unlock_spin(&smp_ipi_mtx); 554 sched_unpin(); 555 return; 556 557 nospinexit: 558 curcpu_cb(pmap, addr1, addr2); 559 sched_unpin(); 560 } 561 562 void 563 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb) 564 { 565 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0, curcpu_cb); 566 #ifdef COUNT_XINVLTLB_HITS 567 ipi_global++; 568 #endif 569 } 570 571 void 572 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap, 573 smp_invl_cb_t curcpu_cb) 574 { 575 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0, curcpu_cb); 576 #ifdef COUNT_XINVLTLB_HITS 577 ipi_page++; 578 #endif 579 } 580 581 void 582 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2, 583 pmap_t pmap, smp_invl_cb_t curcpu_cb) 584 { 585 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1, addr2, 586 curcpu_cb); 587 #ifdef COUNT_XINVLTLB_HITS 588 ipi_range++; 589 ipi_range_size += (addr2 - addr1) / PAGE_SIZE; 590 #endif 591 } 592 593 void 594 smp_cache_flush(smp_invl_cb_t curcpu_cb) 595 { 596 smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL, 0, 0, 597 curcpu_cb); 598 } 599 600 /* 601 * Handlers for TLB related IPIs 602 */ 603 void 604 invltlb_handler(void) 605 { 606 uint32_t generation; 607 608 #ifdef COUNT_XINVLTLB_HITS 609 xhits_gbl[PCPU_GET(cpuid)]++; 610 #endif /* COUNT_XINVLTLB_HITS */ 611 #ifdef COUNT_IPIS 612 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 613 #endif /* COUNT_IPIS */ 614 615 /* 616 * Reading the generation here allows greater parallelism 617 * since invalidating the TLB is a serializing operation. 618 */ 619 generation = smp_tlb_generation; 620 if (smp_tlb_pmap == kernel_pmap) 621 invltlb_glob(); 622 PCPU_SET(smp_tlb_done, generation); 623 } 624 625 void 626 invlpg_handler(void) 627 { 628 uint32_t generation; 629 630 #ifdef COUNT_XINVLTLB_HITS 631 xhits_pg[PCPU_GET(cpuid)]++; 632 #endif /* COUNT_XINVLTLB_HITS */ 633 #ifdef COUNT_IPIS 634 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++; 635 #endif /* COUNT_IPIS */ 636 637 generation = smp_tlb_generation; /* Overlap with serialization */ 638 if (smp_tlb_pmap == kernel_pmap) 639 invlpg(smp_tlb_addr1); 640 PCPU_SET(smp_tlb_done, generation); 641 } 642 643 void 644 invlrng_handler(void) 645 { 646 vm_offset_t addr, addr2; 647 uint32_t generation; 648 649 #ifdef COUNT_XINVLTLB_HITS 650 xhits_rng[PCPU_GET(cpuid)]++; 651 #endif /* COUNT_XINVLTLB_HITS */ 652 #ifdef COUNT_IPIS 653 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++; 654 #endif /* COUNT_IPIS */ 655 656 addr = smp_tlb_addr1; 657 addr2 = smp_tlb_addr2; 658 generation = smp_tlb_generation; /* Overlap with serialization */ 659 if (smp_tlb_pmap == kernel_pmap) { 660 do { 661 invlpg(addr); 662 addr += PAGE_SIZE; 663 } while (addr < addr2); 664 } 665 666 PCPU_SET(smp_tlb_done, generation); 667 } 668 669 void 670 invlcache_handler(void) 671 { 672 uint32_t generation; 673 674 #ifdef COUNT_IPIS 675 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++; 676 #endif /* COUNT_IPIS */ 677 678 /* 679 * Reading the generation here allows greater parallelism 680 * since wbinvd is a serializing instruction. Without the 681 * temporary, we'd wait for wbinvd to complete, then the read 682 * would execute, then the dependent write, which must then 683 * complete before return from interrupt. 684 */ 685 generation = smp_tlb_generation; 686 wbinvd(); 687 PCPU_SET(smp_tlb_done, generation); 688 } 689