1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * This module implements IPI message queueing and the MI portion of IPI 37 * message processing. 38 */ 39 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/thread2.h> 49 #include <sys/sysctl.h> 50 #include <sys/ktr.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 #include <sys/caps.h> 55 56 #include <vm/vm.h> 57 #include <vm/vm_param.h> 58 #include <vm/vm_kern.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_pager.h> 63 #include <vm/vm_extern.h> 64 #include <vm/vm_zone.h> 65 66 #include <machine/stdarg.h> 67 #include <machine/smp.h> 68 #include <machine/atomic.h> 69 70 #ifdef SMP 71 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 72 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 73 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 74 static __int64_t ipiq_passive; /* passive IPI messages */ 75 static __int64_t ipiq_cscount; /* number of cpu synchronizations */ 76 static int ipiq_debug; /* set to 1 for debug */ 77 #ifdef PANIC_DEBUG 78 static int panic_ipiq_cpu = -1; 79 static int panic_ipiq_count = 100; 80 #endif 81 #endif 82 83 #ifdef SMP 84 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, 85 "Number of IPI's sent"); 86 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, 87 "Number of fifo full conditions detected"); 88 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0, 89 "Number of IPI's avoided by interlock with target cpu"); 90 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0, 91 "Number of passive IPI messages sent"); 92 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, 93 "Number of cpu synchronizations"); 94 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0, 95 ""); 96 #ifdef PANIC_DEBUG 97 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 98 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 99 #endif 100 101 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 102 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3) 103 104 #if !defined(KTR_IPIQ) 105 #define KTR_IPIQ KTR_ALL 106 #endif 107 KTR_INFO_MASTER(ipiq); 108 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE); 109 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE); 110 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE); 111 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE); 112 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE); 113 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t)); 114 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08x", sizeof(cpumask_t)); 115 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE); 116 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE); 117 118 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 119 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 120 #define logipiq2(name, arg) \ 121 KTR_LOG(ipiq_ ## name, arg) 122 123 #endif /* SMP */ 124 125 #ifdef SMP 126 127 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 128 struct intrframe *frame); 129 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); 130 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs); 131 132 /* 133 * Send a function execution request to another cpu. The request is queued 134 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 135 * possible target cpu. The FIFO can be written. 136 * 137 * If the FIFO fills up we have to enable interrupts to avoid an APIC 138 * deadlock and process pending IPIQs while waiting for it to empty. 139 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 140 * 141 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 142 * end will take care of any pending interrupts. 143 * 144 * The actual hardware IPI is avoided if the target cpu is already processing 145 * the queue from a prior IPI. It is possible to pipeline IPI messages 146 * very quickly between cpus due to the FIFO hysteresis. 147 * 148 * Need not be called from a critical section. 149 */ 150 int 151 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 152 { 153 lwkt_ipiq_t ip; 154 int windex; 155 struct globaldata *gd = mycpu; 156 157 logipiq(send_norm, func, arg1, arg2, gd, target); 158 159 if (target == gd) { 160 func(arg1, arg2, NULL); 161 logipiq(send_end, func, arg1, arg2, gd, target); 162 return(0); 163 } 164 crit_enter(); 165 ++gd->gd_intr_nesting_level; 166 #ifdef INVARIANTS 167 if (gd->gd_intr_nesting_level > 20) 168 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 169 #endif 170 KKASSERT(curthread->td_critcount); 171 ++ipiq_count; 172 ip = &gd->gd_ipiq[target->gd_cpuid]; 173 174 /* 175 * Do not allow the FIFO to become full. Interrupts must be physically 176 * enabled while we liveloop to avoid deadlocking the APIC. 177 * 178 * The target ipiq may have gotten filled up due to passive IPIs and thus 179 * not be aware that its queue is too full, so be sure to issue an 180 * ipiq interrupt to the target cpu. 181 */ 182 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 183 #if defined(__i386__) 184 unsigned int eflags = read_eflags(); 185 #elif defined(__x86_64__) 186 unsigned long rflags = read_rflags(); 187 #endif 188 189 cpu_enable_intr(); 190 ++ipiq_fifofull; 191 DEBUG_PUSH_INFO("send_ipiq3"); 192 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 193 if (atomic_poll_acquire_int(&target->gd_npoll)) { 194 logipiq(cpu_send, func, arg1, arg2, gd, target); 195 cpu_send_ipiq(target->gd_cpuid); 196 } 197 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 198 lwkt_process_ipiq(); 199 cpu_pause(); 200 } 201 DEBUG_POP_INFO(); 202 #if defined(__i386__) 203 write_eflags(eflags); 204 #elif defined(__x86_64__) 205 write_rflags(rflags); 206 #endif 207 } 208 209 /* 210 * Queue the new message 211 */ 212 windex = ip->ip_windex & MAXCPUFIFO_MASK; 213 ip->ip_info[windex].func = func; 214 ip->ip_info[windex].arg1 = arg1; 215 ip->ip_info[windex].arg2 = arg2; 216 cpu_sfence(); 217 ++ip->ip_windex; 218 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 219 220 /* 221 * signal the target cpu that there is work pending. 222 */ 223 if (atomic_poll_acquire_int(&target->gd_npoll)) { 224 logipiq(cpu_send, func, arg1, arg2, gd, target); 225 cpu_send_ipiq(target->gd_cpuid); 226 } else { 227 ++ipiq_avoided; 228 } 229 --gd->gd_intr_nesting_level; 230 crit_exit(); 231 logipiq(send_end, func, arg1, arg2, gd, target); 232 233 return(ip->ip_windex); 234 } 235 236 /* 237 * Similar to lwkt_send_ipiq() but this function does not actually initiate 238 * the IPI to the target cpu unless the FIFO has become too full, so it is 239 * very fast. 240 * 241 * This function is used for non-critical IPI messages, such as memory 242 * deallocations. The queue will typically be flushed by the target cpu at 243 * the next clock interrupt. 244 * 245 * Need not be called from a critical section. 246 */ 247 int 248 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 249 void *arg1, int arg2) 250 { 251 lwkt_ipiq_t ip; 252 int windex; 253 struct globaldata *gd = mycpu; 254 255 KKASSERT(target != gd); 256 crit_enter(); 257 ++gd->gd_intr_nesting_level; 258 logipiq(send_pasv, func, arg1, arg2, gd, target); 259 #ifdef INVARIANTS 260 if (gd->gd_intr_nesting_level > 20) 261 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 262 #endif 263 KKASSERT(curthread->td_critcount); 264 ++ipiq_count; 265 ++ipiq_passive; 266 ip = &gd->gd_ipiq[target->gd_cpuid]; 267 268 /* 269 * Do not allow the FIFO to become full. Interrupts must be physically 270 * enabled while we liveloop to avoid deadlocking the APIC. 271 */ 272 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 273 #if defined(__i386__) 274 unsigned int eflags = read_eflags(); 275 #elif defined(__x86_64__) 276 unsigned long rflags = read_rflags(); 277 #endif 278 279 cpu_enable_intr(); 280 ++ipiq_fifofull; 281 DEBUG_PUSH_INFO("send_ipiq3_passive"); 282 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 283 if (atomic_poll_acquire_int(&target->gd_npoll)) { 284 logipiq(cpu_send, func, arg1, arg2, gd, target); 285 cpu_send_ipiq(target->gd_cpuid); 286 } 287 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 288 lwkt_process_ipiq(); 289 cpu_pause(); 290 } 291 DEBUG_POP_INFO(); 292 #if defined(__i386__) 293 write_eflags(eflags); 294 #elif defined(__x86_64__) 295 write_rflags(rflags); 296 #endif 297 } 298 299 /* 300 * Queue the new message 301 */ 302 windex = ip->ip_windex & MAXCPUFIFO_MASK; 303 ip->ip_info[windex].func = func; 304 ip->ip_info[windex].arg1 = arg1; 305 ip->ip_info[windex].arg2 = arg2; 306 cpu_sfence(); 307 ++ip->ip_windex; 308 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 309 --gd->gd_intr_nesting_level; 310 311 /* 312 * Do not signal the target cpu, it will pick up the IPI when it next 313 * polls (typically on the next tick). 314 */ 315 crit_exit(); 316 logipiq(send_end, func, arg1, arg2, gd, target); 317 318 return(ip->ip_windex); 319 } 320 321 /* 322 * Send an IPI request without blocking, return 0 on success, ENOENT on 323 * failure. The actual queueing of the hardware IPI may still force us 324 * to spin and process incoming IPIs but that will eventually go away 325 * when we've gotten rid of the other general IPIs. 326 */ 327 int 328 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func, 329 void *arg1, int arg2) 330 { 331 lwkt_ipiq_t ip; 332 int windex; 333 struct globaldata *gd = mycpu; 334 335 logipiq(send_nbio, func, arg1, arg2, gd, target); 336 KKASSERT(curthread->td_critcount); 337 if (target == gd) { 338 func(arg1, arg2, NULL); 339 logipiq(send_end, func, arg1, arg2, gd, target); 340 return(0); 341 } 342 crit_enter(); 343 ++gd->gd_intr_nesting_level; 344 ++ipiq_count; 345 ip = &gd->gd_ipiq[target->gd_cpuid]; 346 347 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) { 348 logipiq(send_fail, func, arg1, arg2, gd, target); 349 --gd->gd_intr_nesting_level; 350 crit_exit(); 351 return(ENOENT); 352 } 353 windex = ip->ip_windex & MAXCPUFIFO_MASK; 354 ip->ip_info[windex].func = func; 355 ip->ip_info[windex].arg1 = arg1; 356 ip->ip_info[windex].arg2 = arg2; 357 cpu_sfence(); 358 ++ip->ip_windex; 359 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 360 361 /* 362 * This isn't a passive IPI, we still have to signal the target cpu. 363 */ 364 if (atomic_poll_acquire_int(&target->gd_npoll)) { 365 logipiq(cpu_send, func, arg1, arg2, gd, target); 366 cpu_send_ipiq(target->gd_cpuid); 367 } else { 368 ++ipiq_avoided; 369 } 370 --gd->gd_intr_nesting_level; 371 crit_exit(); 372 373 logipiq(send_end, func, arg1, arg2, gd, target); 374 return(0); 375 } 376 377 /* 378 * deprecated, used only by fast int forwarding. 379 */ 380 int 381 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 382 { 383 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 384 } 385 386 /* 387 * Send a message to several target cpus. Typically used for scheduling. 388 * The message will not be sent to stopped cpus. 389 */ 390 int 391 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2) 392 { 393 int cpuid; 394 int count = 0; 395 396 mask &= ~stopped_cpus; 397 while (mask) { 398 cpuid = BSFCPUMASK(mask); 399 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 400 mask &= ~CPUMASK(cpuid); 401 ++count; 402 } 403 return(count); 404 } 405 406 /* 407 * Wait for the remote cpu to finish processing a function. 408 * 409 * YYY we have to enable interrupts and process the IPIQ while waiting 410 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 411 * function to do this! YYY we really should 'block' here. 412 * 413 * MUST be called from a critical section. This routine may be called 414 * from an interrupt (for example, if an interrupt wakes a foreign thread 415 * up). 416 */ 417 void 418 lwkt_wait_ipiq(globaldata_t target, int seq) 419 { 420 lwkt_ipiq_t ip; 421 int maxc = 100000000; 422 423 if (target != mycpu) { 424 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 425 if ((int)(ip->ip_xindex - seq) < 0) { 426 #if defined(__i386__) 427 unsigned int eflags = read_eflags(); 428 #elif defined(__x86_64__) 429 unsigned long rflags = read_rflags(); 430 #endif 431 cpu_enable_intr(); 432 DEBUG_PUSH_INFO("wait_ipiq"); 433 while ((int)(ip->ip_xindex - seq) < 0) { 434 crit_enter(); 435 lwkt_process_ipiq(); 436 crit_exit(); 437 if (--maxc == 0) 438 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq); 439 if (maxc < -1000000) 440 panic("LWKT_WAIT_IPIQ"); 441 /* 442 * xindex may be modified by another cpu, use a load fence 443 * to ensure that the loop does not use a speculative value 444 * (which may improve performance). 445 */ 446 cpu_lfence(); 447 } 448 DEBUG_POP_INFO(); 449 #if defined(__i386__) 450 write_eflags(eflags); 451 #elif defined(__x86_64__) 452 write_rflags(rflags); 453 #endif 454 } 455 } 456 } 457 458 int 459 lwkt_seq_ipiq(globaldata_t target) 460 { 461 lwkt_ipiq_t ip; 462 463 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 464 return(ip->ip_windex); 465 } 466 467 /* 468 * Called from IPI interrupt (like a fast interrupt), which has placed 469 * us in a critical section. The MP lock may or may not be held. 470 * May also be called from doreti or splz, or be reentrantly called 471 * indirectly through the ip_info[].func we run. 472 * 473 * There are two versions, one where no interrupt frame is available (when 474 * called from the send code and from splz, and one where an interrupt 475 * frame is available. 476 * 477 * When the current cpu is mastering a cpusync we do NOT internally loop 478 * on the cpusyncq poll. We also do not re-flag a pending ipi due to 479 * the cpusyncq poll because this can cause doreti/splz to loop internally. 480 * The cpusync master's own loop must be allowed to run to avoid a deadlock. 481 */ 482 void 483 lwkt_process_ipiq(void) 484 { 485 globaldata_t gd = mycpu; 486 globaldata_t sgd; 487 lwkt_ipiq_t ip; 488 cpumask_t mask; 489 int n; 490 491 ++gd->gd_processing_ipiq; 492 again: 493 cpu_lfence(); 494 mask = gd->gd_ipimask; 495 atomic_clear_cpumask(&gd->gd_ipimask, mask); 496 while (mask) { 497 n = BSFCPUMASK(mask); 498 if (n != gd->gd_cpuid) { 499 sgd = globaldata_find(n); 500 ip = sgd->gd_ipiq; 501 if (ip != NULL) { 502 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL)) 503 ; 504 } 505 } 506 mask &= ~CPUMASK(n); 507 } 508 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) { 509 if (gd->gd_curthread->td_cscount == 0) 510 goto again; 511 /* need_ipiq(); do not reflag */ 512 } 513 514 /* 515 * Interlock to allow more IPI interrupts. Recheck ipimask after 516 * releasing gd_npoll. 517 */ 518 if (gd->gd_ipimask) 519 goto again; 520 atomic_poll_release_int(&gd->gd_npoll); 521 cpu_mfence(); 522 if (gd->gd_ipimask) 523 goto again; 524 --gd->gd_processing_ipiq; 525 } 526 527 void 528 lwkt_process_ipiq_frame(struct intrframe *frame) 529 { 530 globaldata_t gd = mycpu; 531 globaldata_t sgd; 532 lwkt_ipiq_t ip; 533 cpumask_t mask; 534 int n; 535 536 again: 537 cpu_lfence(); 538 mask = gd->gd_ipimask; 539 atomic_clear_cpumask(&gd->gd_ipimask, mask); 540 while (mask) { 541 n = BSFCPUMASK(mask); 542 if (n != gd->gd_cpuid) { 543 sgd = globaldata_find(n); 544 ip = sgd->gd_ipiq; 545 if (ip != NULL) { 546 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame)) 547 ; 548 } 549 } 550 mask &= ~CPUMASK(n); 551 } 552 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 553 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) { 554 if (gd->gd_curthread->td_cscount == 0) 555 goto again; 556 /* need_ipiq(); do not reflag */ 557 } 558 } 559 560 /* 561 * Interlock to allow more IPI interrupts. Recheck ipimask after 562 * releasing gd_npoll. 563 */ 564 if (gd->gd_ipimask) 565 goto again; 566 atomic_poll_release_int(&gd->gd_npoll); 567 cpu_mfence(); 568 if (gd->gd_ipimask) 569 goto again; 570 } 571 572 #if 0 573 static int iqticks[SMP_MAXCPU]; 574 static int iqcount[SMP_MAXCPU]; 575 #endif 576 #if 0 577 static int iqterm[SMP_MAXCPU]; 578 #endif 579 580 static int 581 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 582 struct intrframe *frame) 583 { 584 globaldata_t mygd = mycpu; 585 int ri; 586 int wi; 587 ipifunc3_t copy_func; 588 void *copy_arg1; 589 int copy_arg2; 590 591 #if 0 592 if (iqticks[mygd->gd_cpuid] != ticks) { 593 iqticks[mygd->gd_cpuid] = ticks; 594 iqcount[mygd->gd_cpuid] = 0; 595 } 596 if (++iqcount[mygd->gd_cpuid] > 3000000) { 597 kprintf("cpu %d ipiq maxed cscount %d spin %d\n", 598 mygd->gd_cpuid, 599 mygd->gd_curthread->td_cscount, 600 mygd->gd_spinlocks_wr); 601 iqcount[mygd->gd_cpuid] = 0; 602 #if 0 603 if (++iqterm[mygd->gd_cpuid] > 10) 604 panic("cpu %d ipiq maxed", mygd->gd_cpuid); 605 #endif 606 int i; 607 for (i = 0; i < ncpus; ++i) { 608 if (globaldata_find(i)->gd_infomsg) 609 kprintf(" %s", globaldata_find(i)->gd_infomsg); 610 } 611 kprintf("\n"); 612 } 613 #endif 614 615 /* 616 * Clear the originating core from our ipimask, we will process all 617 * incoming messages. 618 * 619 * Obtain the current write index, which is modified by a remote cpu. 620 * Issue a load fence to prevent speculative reads of e.g. data written 621 * by the other cpu prior to it updating the index. 622 */ 623 KKASSERT(curthread->td_critcount); 624 wi = ip->ip_windex; 625 cpu_lfence(); 626 ++mygd->gd_intr_nesting_level; 627 628 /* 629 * NOTE: xindex is only updated after we are sure the function has 630 * finished execution. Beware lwkt_process_ipiq() reentrancy! 631 * The function may send an IPI which may block/drain. 632 * 633 * NOTE: Due to additional IPI operations that the callback function 634 * may make, it is possible for both rindex and windex to advance and 635 * thus for rindex to advance passed our cached windex. 636 * 637 * NOTE: A load fence is required to prevent speculative loads prior 638 * to the loading of ip_rindex. Even though stores might be 639 * ordered, loads are probably not. A memory fence is required 640 * to prevent reordering of the loads after the ip_rindex update. 641 */ 642 while (wi - (ri = ip->ip_rindex) > 0) { 643 ri &= MAXCPUFIFO_MASK; 644 cpu_lfence(); 645 copy_func = ip->ip_info[ri].func; 646 copy_arg1 = ip->ip_info[ri].arg1; 647 copy_arg2 = ip->ip_info[ri].arg2; 648 cpu_mfence(); 649 ++ip->ip_rindex; 650 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == 651 ((ri + 1) & MAXCPUFIFO_MASK)); 652 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 653 #ifdef INVARIANTS 654 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) { 655 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n", 656 mycpu->gd_cpuid, 657 copy_func, copy_arg1, copy_arg2, 658 #if defined(__i386__) 659 (frame ? (void *)frame->if_eip : NULL)); 660 #elif defined(__amd64__) 661 (frame ? (void *)frame->if_rip : NULL)); 662 #else 663 NULL); 664 #endif 665 } 666 #endif 667 copy_func(copy_arg1, copy_arg2, frame); 668 cpu_sfence(); 669 ip->ip_xindex = ip->ip_rindex; 670 671 #ifdef PANIC_DEBUG 672 /* 673 * Simulate panics during the processing of an IPI 674 */ 675 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 676 if (--panic_ipiq_count == 0) { 677 #ifdef DDB 678 Debugger("PANIC_DEBUG"); 679 #else 680 panic("PANIC_DEBUG"); 681 #endif 682 } 683 } 684 #endif 685 } 686 --mygd->gd_intr_nesting_level; 687 688 /* 689 * Return non-zero if there is still more in the queue. 690 */ 691 cpu_lfence(); 692 return (ip->ip_rindex != ip->ip_windex); 693 } 694 695 static void 696 lwkt_sync_ipiq(void *arg) 697 { 698 volatile cpumask_t *cpumask = arg; 699 700 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask); 701 if (*cpumask == 0) 702 wakeup(cpumask); 703 } 704 705 void 706 lwkt_synchronize_ipiqs(const char *wmesg) 707 { 708 volatile cpumask_t other_cpumask; 709 710 other_cpumask = mycpu->gd_other_cpus & smp_active_mask; 711 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, 712 __DEVOLATILE(void *, &other_cpumask)); 713 714 while (other_cpumask != 0) { 715 tsleep_interlock(&other_cpumask, 0); 716 if (other_cpumask != 0) 717 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0); 718 } 719 } 720 721 #endif 722 723 /* 724 * CPU Synchronization Support 725 * 726 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state. 727 * The current cpu is placed in a hard critical 728 * section. 729 * 730 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including 731 * current cpu if specified, then return. 732 */ 733 void 734 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) 735 { 736 struct lwkt_cpusync cs; 737 738 lwkt_cpusync_init(&cs, mask, func, arg); 739 lwkt_cpusync_interlock(&cs); 740 lwkt_cpusync_deinterlock(&cs); 741 } 742 743 744 void 745 lwkt_cpusync_interlock(lwkt_cpusync_t cs) 746 { 747 #ifdef SMP 748 #if 0 749 const char *smsg = "SMPSYNL"; 750 #endif 751 globaldata_t gd = mycpu; 752 cpumask_t mask; 753 754 /* 755 * mask acknowledge (cs_mack): 0->mask for stage 1 756 * 757 * mack does not include the current cpu. 758 */ 759 mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask; 760 cs->cs_mack = 0; 761 crit_enter_id("cpusync"); 762 if (mask) { 763 DEBUG_PUSH_INFO("cpusync_interlock"); 764 ++ipiq_cscount; 765 ++gd->gd_curthread->td_cscount; 766 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs); 767 logipiq2(sync_start, mask); 768 #if 0 769 if (gd->gd_curthread->td_wmesg == NULL) 770 gd->gd_curthread->td_wmesg = smsg; 771 #endif 772 while (cs->cs_mack != mask) { 773 lwkt_process_ipiq(); 774 cpu_pause(); 775 } 776 #if 0 777 if (gd->gd_curthread->td_wmesg == smsg) 778 gd->gd_curthread->td_wmesg = NULL; 779 #endif 780 DEBUG_POP_INFO(); 781 } 782 #else 783 cs->cs_mack = 0; 784 #endif 785 } 786 787 /* 788 * Interlocked cpus have executed remote1 and are polling in remote2. 789 * To deinterlock we clear cs_mack and wait for the cpus to execute 790 * the func and set their bit in cs_mack again. 791 * 792 */ 793 void 794 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) 795 { 796 globaldata_t gd = mycpu; 797 #ifdef SMP 798 #if 0 799 const char *smsg = "SMPSYNU"; 800 #endif 801 cpumask_t mask; 802 803 /* 804 * mask acknowledge (cs_mack): mack->0->mack for stage 2 805 * 806 * Clearing cpu bits for polling cpus in cs_mack will cause them to 807 * execute stage 2, which executes the cs_func(cs_data) and then sets 808 * their bit in cs_mack again. 809 * 810 * mack does not include the current cpu. 811 */ 812 mask = cs->cs_mack; 813 cpu_ccfence(); 814 cs->cs_mack = 0; 815 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) 816 cs->cs_func(cs->cs_data); 817 if (mask) { 818 DEBUG_PUSH_INFO("cpusync_deinterlock"); 819 #if 0 820 if (gd->gd_curthread->td_wmesg == NULL) 821 gd->gd_curthread->td_wmesg = smsg; 822 #endif 823 while (cs->cs_mack != mask) { 824 lwkt_process_ipiq(); 825 cpu_pause(); 826 } 827 #if 0 828 if (gd->gd_curthread->td_wmesg == smsg) 829 gd->gd_curthread->td_wmesg = NULL; 830 #endif 831 DEBUG_POP_INFO(); 832 /* 833 * cpusyncq ipis may be left queued without the RQF flag set due to 834 * a non-zero td_cscount, so be sure to process any laggards after 835 * decrementing td_cscount. 836 */ 837 --gd->gd_curthread->td_cscount; 838 lwkt_process_ipiq(); 839 logipiq2(sync_end, mask); 840 } 841 crit_exit_id("cpusync"); 842 #else 843 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) 844 cs->cs_func(cs->cs_data); 845 #endif 846 } 847 848 #ifdef SMP 849 850 /* 851 * helper IPI remote messaging function. 852 * 853 * Called on remote cpu when a new cpu synchronization request has been 854 * sent to us. Execute the run function and adjust cs_count, then requeue 855 * the request so we spin on it. 856 */ 857 static void 858 lwkt_cpusync_remote1(lwkt_cpusync_t cs) 859 { 860 globaldata_t gd = mycpu; 861 862 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 863 lwkt_cpusync_remote2(cs); 864 } 865 866 /* 867 * helper IPI remote messaging function. 868 * 869 * Poll for the originator telling us to finish. If it hasn't, requeue 870 * our request so we spin on it. 871 */ 872 static void 873 lwkt_cpusync_remote2(lwkt_cpusync_t cs) 874 { 875 globaldata_t gd = mycpu; 876 877 if ((cs->cs_mack & gd->gd_cpumask) == 0) { 878 if (cs->cs_func) 879 cs->cs_func(cs->cs_data); 880 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 881 } else { 882 lwkt_ipiq_t ip; 883 int wi; 884 885 ip = &gd->gd_cpusyncq; 886 wi = ip->ip_windex & MAXCPUFIFO_MASK; 887 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 888 ip->ip_info[wi].arg1 = cs; 889 ip->ip_info[wi].arg2 = 0; 890 cpu_sfence(); 891 ++ip->ip_windex; 892 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) { 893 kprintf("cpu %d cm=%016jx %016jx f=%p\n", 894 gd->gd_cpuid, 895 (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack, 896 cs->cs_func); 897 } 898 } 899 } 900 901 #endif 902