1 /* 2 * Copyright (c) 2003-2016 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * This module implements IPI message queueing and the MI portion of IPI 37 * message processing. 38 */ 39 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/thread2.h> 49 #include <sys/sysctl.h> 50 #include <sys/ktr.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_param.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_zone.h> 64 65 #include <machine/stdarg.h> 66 #include <machine/smp.h> 67 #include <machine/clock.h> 68 #include <machine/atomic.h> 69 70 #ifdef _KERNEL_VIRTUAL 71 #include <pthread.h> 72 #endif 73 74 struct ipiq_stats { 75 int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 76 int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 77 int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 78 int64_t ipiq_passive; /* passive IPI messages */ 79 int64_t ipiq_cscount; /* number of cpu synchronizations */ 80 } __cachealign; 81 82 static struct ipiq_stats ipiq_stats_percpu[MAXCPU]; 83 #define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid] 84 85 static int ipiq_debug; /* set to 1 for debug */ 86 #ifdef PANIC_DEBUG 87 static int panic_ipiq_cpu = -1; 88 static int panic_ipiq_count = 100; 89 #endif 90 91 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0, 92 ""); 93 #ifdef PANIC_DEBUG 94 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 95 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 96 #endif 97 98 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 99 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu 100 101 #if !defined(KTR_IPIQ) 102 #define KTR_IPIQ KTR_ALL 103 #endif 104 KTR_INFO_MASTER(ipiq); 105 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS); 106 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS); 107 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS); 108 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask); 109 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask); 110 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS); 111 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS); 112 KTR_INFO(KTR_IPIQ, ipiq, sync_quick, 9, "cpumask=%08lx", unsigned long mask); 113 114 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 115 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 116 #define logipiq2(name, arg) \ 117 KTR_LOG(ipiq_ ## name, arg) 118 119 static void lwkt_process_ipiq_nested(void); 120 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 121 struct intrframe *frame, int limit); 122 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); 123 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs); 124 125 #define IPIQ_SYSCTL(name) \ 126 static int \ 127 sysctl_##name(SYSCTL_HANDLER_ARGS) \ 128 { \ 129 int64_t val = 0; \ 130 int cpu, error; \ 131 \ 132 for (cpu = 0; cpu < ncpus; ++cpu) \ 133 val += ipiq_stats_percpu[cpu].name; \ 134 \ 135 error = sysctl_handle_quad(oidp, &val, 0, req); \ 136 if (error || req->newptr == NULL) \ 137 return error; \ 138 \ 139 for (cpu = 0; cpu < ncpus; ++cpu) \ 140 ipiq_stats_percpu[cpu].name = val; \ 141 \ 142 return 0; \ 143 } 144 145 IPIQ_SYSCTL(ipiq_count); 146 IPIQ_SYSCTL(ipiq_fifofull); 147 IPIQ_SYSCTL(ipiq_avoided); 148 IPIQ_SYSCTL(ipiq_passive); 149 IPIQ_SYSCTL(ipiq_cscount); 150 151 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW), 152 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent"); 153 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW), 154 0, 0, sysctl_ipiq_fifofull, "Q", 155 "Number of fifo full conditions detected"); 156 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW), 157 0, 0, sysctl_ipiq_avoided, "Q", 158 "Number of IPI's avoided by interlock with target cpu"); 159 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW), 160 0, 0, sysctl_ipiq_passive, "Q", 161 "Number of passive IPI messages sent"); 162 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW), 163 0, 0, sysctl_ipiq_cscount, "Q", 164 "Number of cpu synchronizations"); 165 166 /* 167 * Send a function execution request to another cpu. The request is queued 168 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 169 * possible target cpu. The FIFO can be written. 170 * 171 * If the FIFO fills up we have to enable interrupts to avoid an APIC 172 * deadlock and process pending IPIQs while waiting for it to empty. 173 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 174 * 175 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 176 * end will take care of any pending interrupts. 177 * 178 * The actual hardware IPI is avoided if the target cpu is already processing 179 * the queue from a prior IPI. It is possible to pipeline IPI messages 180 * very quickly between cpus due to the FIFO hysteresis. 181 * 182 * Need not be called from a critical section. 183 */ 184 int 185 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 186 { 187 lwkt_ipiq_t ip; 188 int windex; 189 int level1; 190 int level2; 191 long rflags; 192 struct globaldata *gd = mycpu; 193 194 logipiq(send_norm, func, arg1, arg2, gd, target); 195 196 if (target == gd) { 197 func(arg1, arg2, NULL); 198 logipiq(send_end, func, arg1, arg2, gd, target); 199 return(0); 200 } 201 crit_enter(); 202 ++gd->gd_intr_nesting_level; 203 #ifdef INVARIANTS 204 if (gd->gd_intr_nesting_level > 20) 205 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 206 #endif 207 KKASSERT(curthread->td_critcount); 208 ++ipiq_stat(gd).ipiq_count; 209 ip = &gd->gd_ipiq[target->gd_cpuid]; 210 211 /* 212 * Do not allow the FIFO to become full. Interrupts must be physically 213 * enabled while we liveloop to avoid deadlocking the APIC. 214 * 215 * When we are not nested inside a processing loop we allow the FIFO 216 * to get 1/2 full. Once it exceeds 1/2 full we must wait for it to 217 * drain, executing any incoming IPIs while we wait. 218 * 219 * When we are nested we allow the FIFO to get almost completely full. 220 * This allows us to queue IPIs sent from IPI callbacks. The processing 221 * code will only process incoming FIFOs that are trying to drain while 222 * we wait, and only to the only-slightly-less-full point, to avoid a 223 * deadlock. 224 * 225 * We are guaranteed 226 */ 227 228 if (gd->gd_processing_ipiq == 0) { 229 level1 = MAXCPUFIFO / 2; 230 level2 = MAXCPUFIFO / 4; 231 } else { 232 level1 = MAXCPUFIFO - 3; 233 level2 = MAXCPUFIFO - 5; 234 } 235 236 if (ip->ip_windex - ip->ip_rindex > level1) { 237 #ifndef _KERNEL_VIRTUAL 238 uint64_t tsc_base = rdtsc(); 239 #endif 240 int repeating = 0; 241 int olimit; 242 243 rflags = read_rflags(); 244 cpu_enable_intr(); 245 ++ipiq_stat(gd).ipiq_fifofull; 246 DEBUG_PUSH_INFO("send_ipiq3"); 247 olimit = atomic_swap_int(&ip->ip_drain, level2); 248 while (ip->ip_windex - ip->ip_rindex > level2) { 249 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 250 lwkt_process_ipiq_nested(); 251 cpu_pause(); 252 253 /* 254 * Check for target not draining issue. This should be fixed but 255 * leave the code in-place anyway as it can recover an otherwise 256 * dead system. 257 */ 258 #ifdef _KERNEL_VIRTUAL 259 if (repeating++ > 10) 260 pthread_yield(); 261 #else 262 if (rdtsc() - tsc_base > tsc_frequency) { 263 ++repeating; 264 if (repeating > 10) { 265 kprintf("send_ipiq %d->%d tgt not draining (%d) sniff=%p,%p\n", 266 gd->gd_cpuid, target->gd_cpuid, repeating, 267 target->gd_sample_pc, target->gd_sample_sp); 268 smp_sniff(); 269 cpu_disable_intr(); 270 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 271 cpu_send_ipiq(target->gd_cpuid); 272 cpu_enable_intr(); 273 } else { 274 kprintf("send_ipiq %d->%d tgt not draining (%d)\n", 275 gd->gd_cpuid, target->gd_cpuid, repeating); 276 smp_sniff(); 277 } 278 tsc_base = rdtsc(); 279 } 280 #endif 281 } 282 atomic_swap_int(&ip->ip_drain, olimit); 283 DEBUG_POP_INFO(); 284 #if defined(__x86_64__) 285 write_rflags(rflags); 286 #else 287 #error "no write_*flags" 288 #endif 289 } 290 291 /* 292 * Queue the new message and signal the target cpu. For now we need to 293 * physically disable interrupts because the target will not get signalled 294 * by other cpus once we set target->gd_npoll and we don't want to get 295 * interrupted. 296 * 297 * XXX not sure why this is a problem, the critical section should prevent 298 * any stalls (incoming interrupts except Xinvltlb and Xsnoop will 299 * just be made pending). 300 */ 301 rflags = read_rflags(); 302 #ifndef _KERNEL_VIRTUAL 303 cpu_disable_intr(); 304 #endif 305 306 windex = ip->ip_windex & MAXCPUFIFO_MASK; 307 ip->ip_info[windex].func = func; 308 ip->ip_info[windex].arg1 = arg1; 309 ip->ip_info[windex].arg2 = arg2; 310 cpu_sfence(); 311 ++ip->ip_windex; 312 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 313 314 /* 315 * signal the target cpu that there is work pending. 316 */ 317 if (atomic_swap_int(&target->gd_npoll, 1) == 0) { 318 logipiq(cpu_send, func, arg1, arg2, gd, target); 319 cpu_send_ipiq(target->gd_cpuid); 320 } else { 321 ++ipiq_stat(gd).ipiq_avoided; 322 } 323 write_rflags(rflags); 324 325 --gd->gd_intr_nesting_level; 326 crit_exit(); 327 logipiq(send_end, func, arg1, arg2, gd, target); 328 329 return(ip->ip_windex); 330 } 331 332 /* 333 * Similar to lwkt_send_ipiq() but this function does not actually initiate 334 * the IPI to the target cpu unless the FIFO is greater than 1/4 full. 335 * This function is usually very fast. 336 * 337 * This function is used for non-critical IPI messages, such as memory 338 * deallocations. The queue will typically be flushed by the target cpu at 339 * the next clock interrupt. 340 * 341 * Need not be called from a critical section. 342 */ 343 int 344 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 345 void *arg1, int arg2) 346 { 347 lwkt_ipiq_t ip; 348 int windex; 349 struct globaldata *gd = mycpu; 350 351 KKASSERT(target != gd); 352 crit_enter_gd(gd); 353 ++gd->gd_intr_nesting_level; 354 ip = &gd->gd_ipiq[target->gd_cpuid]; 355 356 /* 357 * If the FIFO is too full send the IPI actively. 358 * 359 * WARNING! This level must be low enough not to trigger a wait loop 360 * in the active sending code since we are not signalling the 361 * target cpu. 362 */ 363 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO / 4) { 364 --gd->gd_intr_nesting_level; 365 crit_exit_gd(gd); 366 return lwkt_send_ipiq3(target, func, arg1, arg2); 367 } 368 369 /* 370 * Else we can do it passively. 371 */ 372 logipiq(send_pasv, func, arg1, arg2, gd, target); 373 ++ipiq_stat(gd).ipiq_count; 374 ++ipiq_stat(gd).ipiq_passive; 375 376 /* 377 * Queue the new message 378 */ 379 windex = ip->ip_windex & MAXCPUFIFO_MASK; 380 ip->ip_info[windex].func = func; 381 ip->ip_info[windex].arg1 = arg1; 382 ip->ip_info[windex].arg2 = arg2; 383 cpu_sfence(); 384 ++ip->ip_windex; 385 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 386 --gd->gd_intr_nesting_level; 387 388 /* 389 * Do not signal the target cpu, it will pick up the IPI when it next 390 * polls (typically on the next tick). 391 */ 392 crit_exit(); 393 logipiq(send_end, func, arg1, arg2, gd, target); 394 395 return(ip->ip_windex); 396 } 397 398 /* 399 * deprecated, used only by fast int forwarding. 400 */ 401 int 402 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 403 { 404 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 405 } 406 407 /* 408 * Send a message to several target cpus. Typically used for scheduling. 409 * The message will not be sent to stopped cpus. 410 * 411 * To prevent treating low-numbered cpus as favored sons, the IPIs are 412 * issued in order starting at mycpu upward, then from 0 through mycpu. 413 * This is particularly important to prevent random scheduler pickups 414 * from favoring cpu 0. 415 */ 416 int 417 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2) 418 { 419 int cpuid; 420 int count = 0; 421 cpumask_t amask; 422 423 CPUMASK_NANDMASK(mask, stopped_cpus); 424 425 /* 426 * All cpus in mask which are >= mycpu 427 */ 428 CPUMASK_ASSBMASK(amask, mycpu->gd_cpuid); 429 CPUMASK_INVMASK(amask); 430 CPUMASK_ANDMASK(amask, mask); 431 while (CPUMASK_TESTNZERO(amask)) { 432 cpuid = BSFCPUMASK(amask); 433 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 434 CPUMASK_NANDBIT(amask, cpuid); 435 ++count; 436 } 437 438 /* 439 * All cpus in mask which are < mycpu 440 */ 441 CPUMASK_ASSBMASK(amask, mycpu->gd_cpuid); 442 CPUMASK_ANDMASK(amask, mask); 443 while (CPUMASK_TESTNZERO(amask)) { 444 cpuid = BSFCPUMASK(amask); 445 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 446 CPUMASK_NANDBIT(amask, cpuid); 447 ++count; 448 } 449 return(count); 450 } 451 452 /* 453 * Wait for the remote cpu to finish processing a function. 454 * 455 * YYY we have to enable interrupts and process the IPIQ while waiting 456 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 457 * function to do this! YYY we really should 'block' here. 458 * 459 * MUST be called from a critical section. This routine may be called 460 * from an interrupt (for example, if an interrupt wakes a foreign thread 461 * up). 462 */ 463 void 464 lwkt_wait_ipiq(globaldata_t target, int seq) 465 { 466 lwkt_ipiq_t ip; 467 468 if (target != mycpu) { 469 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 470 if ((int)(ip->ip_xindex - seq) < 0) { 471 #if defined(__x86_64__) 472 unsigned long rflags = read_rflags(); 473 #else 474 #error "no read_*flags" 475 #endif 476 int64_t time_tgt = tsc_get_target(1000000000LL); 477 int time_loops = 10; 478 int benice = 0; 479 #ifdef _KERNEL_VIRTUAL 480 int repeating = 0; 481 #endif 482 483 cpu_enable_intr(); 484 DEBUG_PUSH_INFO("wait_ipiq"); 485 while ((int)(ip->ip_xindex - seq) < 0) { 486 crit_enter(); 487 lwkt_process_ipiq(); 488 crit_exit(); 489 #ifdef _KERNEL_VIRTUAL 490 if (repeating++ > 10) 491 pthread_yield(); 492 #endif 493 494 /* 495 * IPIQs must be handled within 10 seconds and this code 496 * will warn after one second. 497 */ 498 if ((benice & 255) == 0 && tsc_test_target(time_tgt) > 0) { 499 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", 500 mycpu->gd_cpuid, target->gd_cpuid, 501 ip->ip_xindex - seq); 502 if (--time_loops == 0) 503 panic("LWKT_WAIT_IPIQ"); 504 time_tgt = tsc_get_target(1000000000LL); 505 } 506 ++benice; 507 508 /* 509 * xindex may be modified by another cpu, use a load fence 510 * to ensure that the loop does not use a speculative value 511 * (which may improve performance). 512 */ 513 cpu_pause(); 514 cpu_lfence(); 515 } 516 DEBUG_POP_INFO(); 517 #if defined(__x86_64__) 518 write_rflags(rflags); 519 #else 520 #error "no write_*flags" 521 #endif 522 } 523 } 524 } 525 526 /* 527 * Called from IPI interrupt (like a fast interrupt), which has placed 528 * us in a critical section. The MP lock may or may not be held. 529 * May also be called from doreti or splz, or be reentrantly called 530 * indirectly through the ip_info[].func we run. 531 * 532 * There are two versions, one where no interrupt frame is available (when 533 * called from the send code and from splz, and one where an interrupt 534 * frame is available. 535 * 536 * When the current cpu is mastering a cpusync we do NOT internally loop 537 * on the cpusyncq poll. We also do not re-flag a pending ipi due to 538 * the cpusyncq poll because this can cause doreti/splz to loop internally. 539 * The cpusync master's own loop must be allowed to run to avoid a deadlock. 540 */ 541 void 542 lwkt_process_ipiq(void) 543 { 544 globaldata_t gd = mycpu; 545 globaldata_t sgd; 546 lwkt_ipiq_t ip; 547 cpumask_t mask; 548 int n; 549 550 ++gd->gd_processing_ipiq; 551 again: 552 mask = gd->gd_ipimask; 553 cpu_ccfence(); 554 while (CPUMASK_TESTNZERO(mask)) { 555 n = BSFCPUMASK(mask); 556 if (n != gd->gd_cpuid) { 557 sgd = globaldata_find(n); 558 ip = sgd->gd_ipiq; 559 if (ip != NULL) { 560 ip += gd->gd_cpuid; 561 while (lwkt_process_ipiq_core(sgd, ip, NULL, 0)) 562 ; 563 ATOMIC_CPUMASK_NANDBIT(gd->gd_ipimask, n); 564 if (ip->ip_rindex != ip->ip_windex) 565 ATOMIC_CPUMASK_ORBIT(gd->gd_ipimask, n); 566 } 567 } 568 CPUMASK_NANDBIT(mask, n); 569 } 570 571 /* 572 * Process pending cpusyncs. If the current thread has a cpusync 573 * active cpusync we only run the list once and do not re-flag 574 * as the thread itself is processing its interlock. 575 */ 576 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL, 0)) { 577 if (gd->gd_curthread->td_cscount == 0) 578 goto again; 579 /* need_ipiq(); do not reflag */ 580 } 581 582 /* 583 * Interlock to allow more IPI interrupts. 584 */ 585 --gd->gd_processing_ipiq; 586 } 587 588 void 589 lwkt_process_ipiq_frame(struct intrframe *frame) 590 { 591 globaldata_t gd = mycpu; 592 globaldata_t sgd; 593 lwkt_ipiq_t ip; 594 cpumask_t mask; 595 int n; 596 597 ++gd->gd_processing_ipiq; 598 again: 599 mask = gd->gd_ipimask; 600 cpu_ccfence(); 601 while (CPUMASK_TESTNZERO(mask)) { 602 n = BSFCPUMASK(mask); 603 if (n != gd->gd_cpuid) { 604 sgd = globaldata_find(n); 605 ip = sgd->gd_ipiq; 606 if (ip != NULL) { 607 ip += gd->gd_cpuid; 608 while (lwkt_process_ipiq_core(sgd, ip, frame, 0)) 609 ; 610 ATOMIC_CPUMASK_NANDBIT(gd->gd_ipimask, n); 611 if (ip->ip_rindex != ip->ip_windex) 612 ATOMIC_CPUMASK_ORBIT(gd->gd_ipimask, n); 613 } 614 } 615 CPUMASK_NANDBIT(mask, n); 616 } 617 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 618 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame, 0)) { 619 if (gd->gd_curthread->td_cscount == 0) 620 goto again; 621 /* need_ipiq(); do not reflag */ 622 } 623 } 624 --gd->gd_processing_ipiq; 625 } 626 627 /* 628 * Only process incoming IPIQs from draining senders and only process them 629 * to the point where the draining sender is able to continue. This is 630 * necessary to avoid deadlocking the IPI subsystem because we are acting on 631 * incoming messages and the callback may queue additional messages. 632 * 633 * We only want to have to act on senders that are blocked to limit the 634 * number of additional messages sent. At the same time, recipients are 635 * trying to drain our own queue. Theoretically this create a pipeline that 636 * cannot deadlock. 637 */ 638 static void 639 lwkt_process_ipiq_nested(void) 640 { 641 globaldata_t gd = mycpu; 642 globaldata_t sgd; 643 lwkt_ipiq_t ip; 644 cpumask_t mask; 645 int n; 646 int limit; 647 648 ++gd->gd_processing_ipiq; 649 again: 650 mask = gd->gd_ipimask; 651 cpu_ccfence(); 652 while (CPUMASK_TESTNZERO(mask)) { 653 n = BSFCPUMASK(mask); 654 if (n != gd->gd_cpuid) { 655 sgd = globaldata_find(n); 656 ip = sgd->gd_ipiq; 657 658 /* 659 * NOTE: We do not mess with the cpumask at all, instead we allow 660 * the top-level ipiq processor deal with it. 661 */ 662 if (ip != NULL) { 663 ip += gd->gd_cpuid; 664 if ((limit = ip->ip_drain) != 0) { 665 lwkt_process_ipiq_core(sgd, ip, NULL, limit); 666 /* no gd_ipimask when doing limited processing */ 667 } 668 } 669 } 670 CPUMASK_NANDBIT(mask, n); 671 } 672 673 /* 674 * Process pending cpusyncs. If the current thread has a cpusync 675 * active cpusync we only run the list once and do not re-flag 676 * as the thread itself is processing its interlock. 677 */ 678 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL, 0)) { 679 if (gd->gd_curthread->td_cscount == 0) 680 goto again; 681 /* need_ipiq(); do not reflag */ 682 } 683 --gd->gd_processing_ipiq; 684 } 685 686 /* 687 * Process incoming IPI requests until only <limit> are left (0 to exhaust 688 * all incoming IPI requests). 689 */ 690 static int 691 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 692 struct intrframe *frame, int limit) 693 { 694 globaldata_t mygd = mycpu; 695 int ri; 696 int wi; 697 ipifunc3_t copy_func; 698 void *copy_arg1; 699 int copy_arg2; 700 701 /* 702 * Clear the originating core from our ipimask, we will process all 703 * incoming messages. 704 * 705 * Obtain the current write index, which is modified by a remote cpu. 706 * Issue a load fence to prevent speculative reads of e.g. data written 707 * by the other cpu prior to it updating the index. 708 */ 709 KKASSERT(curthread->td_critcount); 710 wi = ip->ip_windex; 711 cpu_lfence(); 712 ++mygd->gd_intr_nesting_level; 713 714 /* 715 * NOTE: xindex is only updated after we are sure the function has 716 * finished execution. Beware lwkt_process_ipiq() reentrancy! 717 * The function may send an IPI which may block/drain. 718 * 719 * NOTE: Due to additional IPI operations that the callback function 720 * may make, it is possible for both rindex and windex to advance and 721 * thus for rindex to advance passed our cached windex. 722 * 723 * NOTE: A load fence is required to prevent speculative loads prior 724 * to the loading of ip_rindex. Even though stores might be 725 * ordered, loads are probably not. A memory fence is required 726 * to prevent reordering of the loads after the ip_rindex update. 727 * 728 * NOTE: Single pass only. Returns non-zero if the queue is not empty 729 * on return. 730 */ 731 while (wi - (ri = ip->ip_rindex) > limit) { 732 ri &= MAXCPUFIFO_MASK; 733 cpu_lfence(); 734 copy_func = ip->ip_info[ri].func; 735 copy_arg1 = ip->ip_info[ri].arg1; 736 copy_arg2 = ip->ip_info[ri].arg2; 737 cpu_mfence(); 738 ++ip->ip_rindex; 739 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == 740 ((ri + 1) & MAXCPUFIFO_MASK)); 741 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 742 #ifdef INVARIANTS 743 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) { 744 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n", 745 mycpu->gd_cpuid, 746 copy_func, copy_arg1, copy_arg2, 747 #if defined(__x86_64__) 748 (frame ? (void *)frame->if_rip : NULL)); 749 #else 750 NULL); 751 #endif 752 } 753 #endif 754 copy_func(copy_arg1, copy_arg2, frame); 755 cpu_sfence(); 756 ip->ip_xindex = ip->ip_rindex; 757 758 #ifdef PANIC_DEBUG 759 /* 760 * Simulate panics during the processing of an IPI 761 */ 762 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 763 if (--panic_ipiq_count == 0) { 764 #ifdef DDB 765 Debugger("PANIC_DEBUG"); 766 #else 767 panic("PANIC_DEBUG"); 768 #endif 769 } 770 } 771 #endif 772 } 773 --mygd->gd_intr_nesting_level; 774 775 /* 776 * Return non-zero if there is still more in the queue. Don't worry 777 * about fencing, we will get another interrupt if necessary. 778 */ 779 return (ip->ip_rindex != ip->ip_windex); 780 } 781 782 static void 783 lwkt_sync_ipiq(void *arg) 784 { 785 volatile cpumask_t *cpumask = arg; 786 787 ATOMIC_CPUMASK_NANDBIT(*cpumask, mycpu->gd_cpuid); 788 if (CPUMASK_TESTZERO(*cpumask)) 789 wakeup(cpumask); 790 } 791 792 void 793 lwkt_synchronize_ipiqs(const char *wmesg) 794 { 795 volatile cpumask_t other_cpumask; 796 797 other_cpumask = smp_active_mask; 798 CPUMASK_ANDMASK(other_cpumask, mycpu->gd_other_cpus); 799 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, 800 __DEVOLATILE(void *, &other_cpumask)); 801 802 while (CPUMASK_TESTNZERO(other_cpumask)) { 803 tsleep_interlock(&other_cpumask, 0); 804 if (CPUMASK_TESTNZERO(other_cpumask)) 805 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0); 806 } 807 } 808 809 /* 810 * CPU Synchronization Support 811 * 812 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state. 813 * The current cpu is placed in a hard critical 814 * section. 815 * 816 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including 817 * current cpu if specified, then return. 818 */ 819 void 820 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) 821 { 822 struct lwkt_cpusync cs; 823 824 lwkt_cpusync_init(&cs, mask, func, arg); 825 lwkt_cpusync_interlock(&cs); 826 lwkt_cpusync_deinterlock(&cs); 827 } 828 829 830 void 831 lwkt_cpusync_interlock(lwkt_cpusync_t cs) 832 { 833 globaldata_t gd = mycpu; 834 cpumask_t mask; 835 836 /* 837 * mask acknowledge (cs_mack): 0->mask for stage 1 838 * 839 * mack does not include the current cpu. 840 */ 841 mask = cs->cs_mask; 842 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 843 CPUMASK_ANDMASK(mask, smp_active_mask); 844 CPUMASK_ASSZERO(cs->cs_mack); 845 846 crit_enter_id("cpusync"); 847 if (CPUMASK_TESTNZERO(mask)) { 848 DEBUG_PUSH_INFO("cpusync_interlock"); 849 ++ipiq_stat(gd).ipiq_cscount; 850 ++gd->gd_curthread->td_cscount; 851 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs); 852 logipiq2(sync_start, (long)CPUMASK_LOWMASK(mask)); 853 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 854 lwkt_process_ipiq(); 855 cpu_pause(); 856 #ifdef _KERNEL_VIRTUAL 857 pthread_yield(); 858 #endif 859 } 860 DEBUG_POP_INFO(); 861 } 862 } 863 864 /* 865 * Interlocked cpus have executed remote1 and are polling in remote2. 866 * To deinterlock we clear cs_mack and wait for the cpus to execute 867 * the func and set their bit in cs_mack again. 868 * 869 */ 870 void 871 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) 872 { 873 globaldata_t gd = mycpu; 874 cpumask_t mask; 875 876 /* 877 * mask acknowledge (cs_mack): mack->0->mack for stage 2 878 * 879 * Clearing cpu bits for polling cpus in cs_mack will cause them to 880 * execute stage 2, which executes the cs_func(cs_data) and then sets 881 * their bit in cs_mack again. 882 * 883 * mack does not include the current cpu. 884 */ 885 mask = cs->cs_mack; 886 cpu_ccfence(); 887 CPUMASK_ASSZERO(cs->cs_mack); 888 cpu_ccfence(); 889 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid)) 890 cs->cs_func(cs->cs_data); 891 if (CPUMASK_TESTNZERO(mask)) { 892 DEBUG_PUSH_INFO("cpusync_deinterlock"); 893 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 894 lwkt_process_ipiq(); 895 cpu_pause(); 896 #ifdef _KERNEL_VIRTUAL 897 pthread_yield(); 898 #endif 899 } 900 DEBUG_POP_INFO(); 901 /* 902 * cpusyncq ipis may be left queued without the RQF flag set due to 903 * a non-zero td_cscount, so be sure to process any laggards after 904 * decrementing td_cscount. 905 */ 906 --gd->gd_curthread->td_cscount; 907 lwkt_process_ipiq(); 908 logipiq2(sync_end, (long)CPUMASK_LOWMASK(mask)); 909 } 910 crit_exit_id("cpusync"); 911 } 912 913 /* 914 * The quick version does not quiesce the target cpu(s) but instead executes 915 * the function on the target cpu(s) and waits for all to acknowledge. This 916 * avoids spinning on the target cpus. 917 * 918 * This function is typically only used for kernel_pmap updates. User pmaps 919 * have to be quiesced. 920 */ 921 void 922 lwkt_cpusync_quick(lwkt_cpusync_t cs) 923 { 924 globaldata_t gd = mycpu; 925 cpumask_t mask; 926 927 /* 928 * stage-2 cs_mack only. 929 */ 930 mask = cs->cs_mask; 931 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 932 CPUMASK_ANDMASK(mask, smp_active_mask); 933 CPUMASK_ASSZERO(cs->cs_mack); 934 935 crit_enter_id("cpusync"); 936 if (CPUMASK_TESTNZERO(mask)) { 937 DEBUG_PUSH_INFO("cpusync_interlock"); 938 ++ipiq_stat(gd).ipiq_cscount; 939 ++gd->gd_curthread->td_cscount; 940 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote2, cs); 941 logipiq2(sync_quick, (long)CPUMASK_LOWMASK(mask)); 942 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 943 lwkt_process_ipiq(); 944 cpu_pause(); 945 #ifdef _KERNEL_VIRTUAL 946 pthread_yield(); 947 #endif 948 } 949 950 /* 951 * cpusyncq ipis may be left queued without the RQF flag set due to 952 * a non-zero td_cscount, so be sure to process any laggards after 953 * decrementing td_cscount. 954 */ 955 DEBUG_POP_INFO(); 956 --gd->gd_curthread->td_cscount; 957 lwkt_process_ipiq(); 958 } 959 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid)) 960 cs->cs_func(cs->cs_data); 961 crit_exit_id("cpusync"); 962 } 963 964 /* 965 * helper IPI remote messaging function. 966 * 967 * Called on remote cpu when a new cpu synchronization request has been 968 * sent to us. Execute the run function and adjust cs_count, then requeue 969 * the request so we spin on it. 970 */ 971 static void 972 lwkt_cpusync_remote1(lwkt_cpusync_t cs) 973 { 974 globaldata_t gd = mycpu; 975 976 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid); 977 lwkt_cpusync_remote2(cs); 978 } 979 980 /* 981 * helper IPI remote messaging function. 982 * 983 * Poll for the originator telling us to finish. If it hasn't, requeue 984 * our request so we spin on it. 985 */ 986 static void 987 lwkt_cpusync_remote2(lwkt_cpusync_t cs) 988 { 989 globaldata_t gd = mycpu; 990 991 if (CPUMASK_TESTMASK(cs->cs_mack, gd->gd_cpumask) == 0) { 992 if (cs->cs_func) 993 cs->cs_func(cs->cs_data); 994 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid); 995 /* cs can be ripped out at this point */ 996 } else { 997 lwkt_ipiq_t ip; 998 int wi; 999 1000 cpu_pause(); 1001 #ifdef _KERNEL_VIRTUAL 1002 pthread_yield(); 1003 #endif 1004 cpu_lfence(); 1005 1006 /* 1007 * Requeue our IPI to avoid a deep stack recursion. If no other 1008 * IPIs are pending we can just loop up, which should help VMs 1009 * better-detect spin loops. 1010 */ 1011 ip = &gd->gd_cpusyncq; 1012 1013 wi = ip->ip_windex & MAXCPUFIFO_MASK; 1014 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 1015 ip->ip_info[wi].arg1 = cs; 1016 ip->ip_info[wi].arg2 = 0; 1017 cpu_sfence(); 1018 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO); 1019 ++ip->ip_windex; 1020 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) { 1021 kprintf("cpu %d cm=%016jx %016jx f=%p\n", 1022 gd->gd_cpuid, 1023 (intmax_t)CPUMASK_LOWMASK(cs->cs_mask), 1024 (intmax_t)CPUMASK_LOWMASK(cs->cs_mack), 1025 cs->cs_func); 1026 } 1027 } 1028 } 1029 1030 #define LWKT_IPIQ_NLATENCY 8 1031 #define LWKT_IPIQ_NLATENCY_MASK (LWKT_IPIQ_NLATENCY - 1) 1032 1033 struct lwkt_ipiq_latency_log { 1034 int idx; /* unmasked index */ 1035 int pad; 1036 uint64_t latency[LWKT_IPIQ_NLATENCY]; 1037 }; 1038 1039 static struct lwkt_ipiq_latency_log lwkt_ipiq_latency_logs[MAXCPU]; 1040 static uint64_t save_tsc; 1041 1042 /* 1043 * IPI callback (already in a critical section) 1044 */ 1045 static void 1046 lwkt_ipiq_latency_testfunc(void *arg __unused) 1047 { 1048 uint64_t delta_tsc; 1049 struct globaldata *gd; 1050 struct lwkt_ipiq_latency_log *lat; 1051 1052 /* 1053 * Get delta TSC (assume TSCs are synchronized) as quickly as 1054 * possible and then convert to nanoseconds. 1055 */ 1056 delta_tsc = rdtsc_ordered() - save_tsc; 1057 delta_tsc = delta_tsc * 1000000000LU / tsc_frequency; 1058 1059 /* 1060 * Record in our save array. 1061 */ 1062 gd = mycpu; 1063 lat = &lwkt_ipiq_latency_logs[gd->gd_cpuid]; 1064 lat->latency[lat->idx & LWKT_IPIQ_NLATENCY_MASK] = delta_tsc; 1065 ++lat->idx; 1066 } 1067 1068 /* 1069 * Send IPI from cpu0 to other cpus 1070 * 1071 * NOTE: Machine must be idle for test to run dependably, and also probably 1072 * a good idea not to be running powerd. 1073 * 1074 * NOTE: Caller should use 'usched :1 <command>' to lock itself to cpu 0. 1075 * See 'ipitest' script in /usr/src/test/sysperf/ipitest 1076 */ 1077 static int 1078 lwkt_ipiq_latency_test(SYSCTL_HANDLER_ARGS) 1079 { 1080 struct globaldata *gd; 1081 int cpu = 0, orig_cpu, error; 1082 1083 error = sysctl_handle_int(oidp, &cpu, arg2, req); 1084 if (error || req->newptr == NULL) 1085 return error; 1086 1087 if (cpu == 0) 1088 return 0; 1089 else if (cpu >= ncpus || cpu < 0) 1090 return EINVAL; 1091 1092 orig_cpu = mycpuid; 1093 lwkt_migratecpu(0); 1094 1095 gd = globaldata_find(cpu); 1096 1097 save_tsc = rdtsc_ordered(); 1098 lwkt_send_ipiq(gd, lwkt_ipiq_latency_testfunc, NULL); 1099 1100 lwkt_migratecpu(orig_cpu); 1101 return 0; 1102 } 1103 1104 SYSCTL_NODE(_debug, OID_AUTO, ipiq, CTLFLAG_RW, 0, ""); 1105 SYSCTL_PROC(_debug_ipiq, OID_AUTO, latency_test, CTLTYPE_INT | CTLFLAG_RW, 1106 NULL, 0, lwkt_ipiq_latency_test, "I", 1107 "ipi latency test, arg: remote cpuid"); 1108 1109 static int 1110 lwkt_ipiq_latency(SYSCTL_HANDLER_ARGS) 1111 { 1112 struct lwkt_ipiq_latency_log *latency = arg1; 1113 uint64_t lat[LWKT_IPIQ_NLATENCY]; 1114 int i; 1115 1116 for (i = 0; i < LWKT_IPIQ_NLATENCY; ++i) 1117 lat[i] = latency->latency[i]; 1118 1119 return sysctl_handle_opaque(oidp, lat, sizeof(lat), req); 1120 } 1121 1122 static void 1123 lwkt_ipiq_latency_init(void *dummy __unused) 1124 { 1125 int cpu; 1126 1127 for (cpu = 0; cpu < ncpus; ++cpu) { 1128 char name[32]; 1129 1130 ksnprintf(name, sizeof(name), "latency%d", cpu); 1131 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_debug_ipiq), 1132 OID_AUTO, name, CTLTYPE_OPAQUE | CTLFLAG_RD, 1133 &lwkt_ipiq_latency_logs[cpu], 0, lwkt_ipiq_latency, 1134 "LU", "7 latest ipi latency measurement results"); 1135 } 1136 } 1137 SYSINIT(lwkt_ipiq_latency, SI_SUB_CONFIGURE, SI_ORDER_ANY, 1138 lwkt_ipiq_latency_init, NULL); 1139