xref: /dflybsd-src/sys/kern/lwkt_ipiq.c (revision f41d807a0c7c535d8f66f0593fb6e95fa20f82d4)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $
35  */
36 
37 /*
38  * This module implements IPI message queueing and the MI portion of IPI
39  * message processing.
40  */
41 
42 #include "opt_ddb.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48 #include <sys/rtprio.h>
49 #include <sys/queue.h>
50 #include <sys/thread2.h>
51 #include <sys/sysctl.h>
52 #include <sys/ktr.h>
53 #include <sys/kthread.h>
54 #include <machine/cpu.h>
55 #include <sys/lock.h>
56 #include <sys/caps.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
67 
68 #include <machine/stdarg.h>
69 #include <machine/smp.h>
70 #include <machine/atomic.h>
71 
72 #ifdef SMP
73 static __int64_t ipiq_count;	/* total calls to lwkt_send_ipiq*() */
74 static __int64_t ipiq_fifofull;	/* number of fifo full conditions detected */
75 static __int64_t ipiq_avoided;	/* interlock with target avoids cpu ipi */
76 static __int64_t ipiq_passive;	/* passive IPI messages */
77 static __int64_t ipiq_cscount;	/* number of cpu synchronizations */
78 static int ipiq_optimized = 1;	/* XXX temporary sysctl */
79 static int ipiq_debug;		/* set to 1 for debug */
80 #ifdef PANIC_DEBUG
81 static int	panic_ipiq_cpu = -1;
82 static int	panic_ipiq_count = 100;
83 #endif
84 #endif
85 
86 #ifdef SMP
87 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0,
88     "Number of IPI's sent");
89 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0,
90     "Number of fifo full conditions detected");
91 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0,
92     "Number of IPI's avoided by interlock with target cpu");
93 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0,
94     "Number of passive IPI messages sent");
95 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0,
96     "Number of cpu synchronizations");
97 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0,
98     "");
99 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
100     "");
101 #ifdef PANIC_DEBUG
102 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
103 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
104 #endif
105 
106 #define IPIQ_STRING	"func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
107 #define IPIQ_ARG_SIZE	(sizeof(void *) * 2 + sizeof(int) * 3)
108 
109 #if !defined(KTR_IPIQ)
110 #define KTR_IPIQ	KTR_ALL
111 #endif
112 KTR_INFO_MASTER(ipiq);
113 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE);
114 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE);
115 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE);
116 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE);
117 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE);
118 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t));
119 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08x", sizeof(cpumask_t));
120 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE);
121 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE);
122 
123 #define logipiq(name, func, arg1, arg2, sgd, dgd)	\
124 	KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
125 #define logipiq2(name, arg)	\
126 	KTR_LOG(ipiq_ ## name, arg)
127 
128 #endif	/* SMP */
129 
130 #ifdef SMP
131 
132 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
133 				  struct intrframe *frame);
134 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
135 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
136 
137 /*
138  * Send a function execution request to another cpu.  The request is queued
139  * on the cpu<->cpu ipiq matrix.  Each cpu owns a unique ipiq FIFO for every
140  * possible target cpu.  The FIFO can be written.
141  *
142  * If the FIFO fills up we have to enable interrupts to avoid an APIC
143  * deadlock and process pending IPIQs while waiting for it to empty.
144  * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
145  *
146  * We can safely bump gd_intr_nesting_level because our crit_exit() at the
147  * end will take care of any pending interrupts.
148  *
149  * The actual hardware IPI is avoided if the target cpu is already processing
150  * the queue from a prior IPI.  It is possible to pipeline IPI messages
151  * very quickly between cpus due to the FIFO hysteresis.
152  *
153  * Need not be called from a critical section.
154  */
155 int
156 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
157 {
158     lwkt_ipiq_t ip;
159     int windex;
160     struct globaldata *gd = mycpu;
161 
162     logipiq(send_norm, func, arg1, arg2, gd, target);
163 
164     if (target == gd) {
165 	func(arg1, arg2, NULL);
166 	logipiq(send_end, func, arg1, arg2, gd, target);
167 	return(0);
168     }
169     crit_enter();
170     ++gd->gd_intr_nesting_level;
171 #ifdef INVARIANTS
172     if (gd->gd_intr_nesting_level > 20)
173 	panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
174 #endif
175     KKASSERT(curthread->td_critcount);
176     ++ipiq_count;
177     ip = &gd->gd_ipiq[target->gd_cpuid];
178 
179     /*
180      * Do not allow the FIFO to become full.  Interrupts must be physically
181      * enabled while we liveloop to avoid deadlocking the APIC.
182      *
183      * The target ipiq may have gotten filled up due to passive IPIs and thus
184      * not be aware that its queue is too full, so be sure to issue an
185      * ipiq interrupt to the target cpu.
186      */
187     if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
188 #if defined(__i386__)
189 	unsigned int eflags = read_eflags();
190 #elif defined(__x86_64__)
191 	unsigned long rflags = read_rflags();
192 #endif
193 
194 	cpu_enable_intr();
195 	++ipiq_fifofull;
196 	DEBUG_PUSH_INFO("send_ipiq3");
197 	while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
198 	    if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
199 		logipiq(cpu_send, func, arg1, arg2, gd, target);
200 		cpu_send_ipiq(target->gd_cpuid);
201 	    }
202 	    KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
203 	    lwkt_process_ipiq();
204 	    cpu_pause();
205 	}
206 	DEBUG_POP_INFO();
207 #if defined(__i386__)
208 	write_eflags(eflags);
209 #elif defined(__x86_64__)
210 	write_rflags(rflags);
211 #endif
212     }
213 
214     /*
215      * Queue the new message
216      */
217     windex = ip->ip_windex & MAXCPUFIFO_MASK;
218     ip->ip_func[windex] = func;
219     ip->ip_arg1[windex] = arg1;
220     ip->ip_arg2[windex] = arg2;
221     cpu_sfence();
222     ++ip->ip_windex;
223 
224     /*
225      * signal the target cpu that there is work pending.
226      */
227     if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
228 	logipiq(cpu_send, func, arg1, arg2, gd, target);
229 	cpu_send_ipiq(target->gd_cpuid);
230     } else {
231 	++ipiq_avoided;
232     }
233     --gd->gd_intr_nesting_level;
234     crit_exit();
235     logipiq(send_end, func, arg1, arg2, gd, target);
236 
237     return(ip->ip_windex);
238 }
239 
240 /*
241  * Similar to lwkt_send_ipiq() but this function does not actually initiate
242  * the IPI to the target cpu unless the FIFO has become too full, so it is
243  * very fast.
244  *
245  * This function is used for non-critical IPI messages, such as memory
246  * deallocations.  The queue will typically be flushed by the target cpu at
247  * the next clock interrupt.
248  *
249  * Need not be called from a critical section.
250  */
251 int
252 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
253 			void *arg1, int arg2)
254 {
255     lwkt_ipiq_t ip;
256     int windex;
257     struct globaldata *gd = mycpu;
258 
259     KKASSERT(target != gd);
260     crit_enter();
261     ++gd->gd_intr_nesting_level;
262     logipiq(send_pasv, func, arg1, arg2, gd, target);
263 #ifdef INVARIANTS
264     if (gd->gd_intr_nesting_level > 20)
265 	panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
266 #endif
267     KKASSERT(curthread->td_critcount);
268     ++ipiq_count;
269     ++ipiq_passive;
270     ip = &gd->gd_ipiq[target->gd_cpuid];
271 
272     /*
273      * Do not allow the FIFO to become full.  Interrupts must be physically
274      * enabled while we liveloop to avoid deadlocking the APIC.
275      */
276     if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
277 #if defined(__i386__)
278 	unsigned int eflags = read_eflags();
279 #elif defined(__x86_64__)
280 	unsigned long rflags = read_rflags();
281 #endif
282 
283 	cpu_enable_intr();
284 	++ipiq_fifofull;
285 	DEBUG_PUSH_INFO("send_ipiq3_passive");
286 	while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
287 	    if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
288 		logipiq(cpu_send, func, arg1, arg2, gd, target);
289 		cpu_send_ipiq(target->gd_cpuid);
290 	    }
291 	    KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
292 	    lwkt_process_ipiq();
293 	    cpu_pause();
294 	}
295 	DEBUG_POP_INFO();
296 #if defined(__i386__)
297 	write_eflags(eflags);
298 #elif defined(__x86_64__)
299 	write_rflags(rflags);
300 #endif
301     }
302 
303     /*
304      * Queue the new message
305      */
306     windex = ip->ip_windex & MAXCPUFIFO_MASK;
307     ip->ip_func[windex] = func;
308     ip->ip_arg1[windex] = arg1;
309     ip->ip_arg2[windex] = arg2;
310     cpu_sfence();
311     ++ip->ip_windex;
312     --gd->gd_intr_nesting_level;
313 
314     /*
315      * Do not signal the target cpu, it will pick up the IPI when it next
316      * polls (typically on the next tick).
317      */
318     crit_exit();
319     logipiq(send_end, func, arg1, arg2, gd, target);
320 
321     return(ip->ip_windex);
322 }
323 
324 /*
325  * Send an IPI request without blocking, return 0 on success, ENOENT on
326  * failure.  The actual queueing of the hardware IPI may still force us
327  * to spin and process incoming IPIs but that will eventually go away
328  * when we've gotten rid of the other general IPIs.
329  */
330 int
331 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
332 		       void *arg1, int arg2)
333 {
334     lwkt_ipiq_t ip;
335     int windex;
336     struct globaldata *gd = mycpu;
337 
338     logipiq(send_nbio, func, arg1, arg2, gd, target);
339     KKASSERT(curthread->td_critcount);
340     if (target == gd) {
341 	func(arg1, arg2, NULL);
342 	logipiq(send_end, func, arg1, arg2, gd, target);
343 	return(0);
344     }
345     crit_enter();
346     ++gd->gd_intr_nesting_level;
347     ++ipiq_count;
348     ip = &gd->gd_ipiq[target->gd_cpuid];
349 
350     if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
351 	logipiq(send_fail, func, arg1, arg2, gd, target);
352 	--gd->gd_intr_nesting_level;
353 	crit_exit();
354 	return(ENOENT);
355     }
356     windex = ip->ip_windex & MAXCPUFIFO_MASK;
357     ip->ip_func[windex] = func;
358     ip->ip_arg1[windex] = arg1;
359     ip->ip_arg2[windex] = arg2;
360     cpu_sfence();
361     ++ip->ip_windex;
362 
363     /*
364      * This isn't a passive IPI, we still have to signal the target cpu.
365      */
366     if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
367 	logipiq(cpu_send, func, arg1, arg2, gd, target);
368 	cpu_send_ipiq(target->gd_cpuid);
369     } else {
370 	++ipiq_avoided;
371     }
372     --gd->gd_intr_nesting_level;
373     crit_exit();
374 
375     logipiq(send_end, func, arg1, arg2, gd, target);
376     return(0);
377 }
378 
379 /*
380  * deprecated, used only by fast int forwarding.
381  */
382 int
383 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
384 {
385     return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
386 }
387 
388 /*
389  * Send a message to several target cpus.  Typically used for scheduling.
390  * The message will not be sent to stopped cpus.
391  */
392 int
393 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
394 {
395     int cpuid;
396     int count = 0;
397 
398     mask &= ~stopped_cpus;
399     while (mask) {
400 	cpuid = BSFCPUMASK(mask);
401 	lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
402 	mask &= ~CPUMASK(cpuid);
403 	++count;
404     }
405     return(count);
406 }
407 
408 /*
409  * Wait for the remote cpu to finish processing a function.
410  *
411  * YYY we have to enable interrupts and process the IPIQ while waiting
412  * for it to empty or we may deadlock with another cpu.  Create a CPU_*()
413  * function to do this!  YYY we really should 'block' here.
414  *
415  * MUST be called from a critical section.  This routine may be called
416  * from an interrupt (for example, if an interrupt wakes a foreign thread
417  * up).
418  */
419 void
420 lwkt_wait_ipiq(globaldata_t target, int seq)
421 {
422     lwkt_ipiq_t ip;
423     int maxc = 100000000;
424 
425     if (target != mycpu) {
426 	ip = &mycpu->gd_ipiq[target->gd_cpuid];
427 	if ((int)(ip->ip_xindex - seq) < 0) {
428 #if defined(__i386__)
429 	    unsigned int eflags = read_eflags();
430 #elif defined(__x86_64__)
431 	    unsigned long rflags = read_rflags();
432 #endif
433 	    cpu_enable_intr();
434 	    DEBUG_PUSH_INFO("wait_ipiq");
435 	    while ((int)(ip->ip_xindex - seq) < 0) {
436 		crit_enter();
437 		lwkt_process_ipiq();
438 		crit_exit();
439 		if (--maxc == 0)
440 			kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
441 		if (maxc < -1000000)
442 			panic("LWKT_WAIT_IPIQ");
443 		/*
444 		 * xindex may be modified by another cpu, use a load fence
445 		 * to ensure that the loop does not use a speculative value
446 		 * (which may improve performance).
447 		 */
448 		cpu_lfence();
449 	    }
450 	    DEBUG_POP_INFO();
451 #if defined(__i386__)
452 	    write_eflags(eflags);
453 #elif defined(__x86_64__)
454 	    write_rflags(rflags);
455 #endif
456 	}
457     }
458 }
459 
460 int
461 lwkt_seq_ipiq(globaldata_t target)
462 {
463     lwkt_ipiq_t ip;
464 
465     ip = &mycpu->gd_ipiq[target->gd_cpuid];
466     return(ip->ip_windex);
467 }
468 
469 /*
470  * Called from IPI interrupt (like a fast interrupt), which has placed
471  * us in a critical section.  The MP lock may or may not be held.
472  * May also be called from doreti or splz, or be reentrantly called
473  * indirectly through the ip_func[] we run.
474  *
475  * There are two versions, one where no interrupt frame is available (when
476  * called from the send code and from splz, and one where an interrupt
477  * frame is available.
478  *
479  * When the current cpu is mastering a cpusync we do NOT internally loop
480  * on the cpusyncq poll.  We also do not re-flag a pending ipi due to
481  * the cpusyncq poll because this can cause doreti/splz to loop internally.
482  * The cpusync master's own loop must be allowed to run to avoid a deadlock.
483  */
484 void
485 lwkt_process_ipiq(void)
486 {
487     globaldata_t gd = mycpu;
488     globaldata_t sgd;
489     lwkt_ipiq_t ip;
490     int n;
491 
492     ++gd->gd_processing_ipiq;
493 again:
494     for (n = 0; n < ncpus; ++n) {
495 	if (n != gd->gd_cpuid) {
496 	    sgd = globaldata_find(n);
497 	    ip = sgd->gd_ipiq;
498 	    if (ip != NULL) {
499 		while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
500 		    ;
501 	    }
502 	}
503     }
504     if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
505 	if (gd->gd_curthread->td_cscount == 0)
506 	    goto again;
507 	/* need_ipiq(); do not reflag */
508     }
509     --gd->gd_processing_ipiq;
510 }
511 
512 void
513 lwkt_process_ipiq_frame(struct intrframe *frame)
514 {
515     globaldata_t gd = mycpu;
516     globaldata_t sgd;
517     lwkt_ipiq_t ip;
518     int n;
519 
520 again:
521     for (n = 0; n < ncpus; ++n) {
522 	if (n != gd->gd_cpuid) {
523 	    sgd = globaldata_find(n);
524 	    ip = sgd->gd_ipiq;
525 	    if (ip != NULL) {
526 		while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
527 		    ;
528 	    }
529 	}
530     }
531     if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
532 	if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
533 	    if (gd->gd_curthread->td_cscount == 0)
534 		goto again;
535 	    /* need_ipiq(); do not reflag */
536 	}
537     }
538 }
539 
540 #if 0
541 static int iqticks[SMP_MAXCPU];
542 static int iqcount[SMP_MAXCPU];
543 #endif
544 #if 0
545 static int iqterm[SMP_MAXCPU];
546 #endif
547 
548 static int
549 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
550 		       struct intrframe *frame)
551 {
552     globaldata_t mygd = mycpu;
553     int ri;
554     int wi;
555     ipifunc3_t copy_func;
556     void *copy_arg1;
557     int copy_arg2;
558 
559 #if 0
560     if (iqticks[mygd->gd_cpuid] != ticks) {
561 	    iqticks[mygd->gd_cpuid] = ticks;
562 	    iqcount[mygd->gd_cpuid] = 0;
563     }
564     if (++iqcount[mygd->gd_cpuid] > 3000000) {
565 	kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
566 		mygd->gd_cpuid,
567 		mygd->gd_curthread->td_cscount,
568 		mygd->gd_spinlocks_wr);
569 	iqcount[mygd->gd_cpuid] = 0;
570 #if 0
571 	if (++iqterm[mygd->gd_cpuid] > 10)
572 		panic("cpu %d ipiq maxed", mygd->gd_cpuid);
573 #endif
574 	int i;
575 	for (i = 0; i < ncpus; ++i) {
576 		if (globaldata_find(i)->gd_infomsg)
577 			kprintf(" %s", globaldata_find(i)->gd_infomsg);
578 	}
579 	kprintf("\n");
580     }
581 #endif
582 
583     /*
584      * Obtain the current write index, which is modified by a remote cpu.
585      * Issue a load fence to prevent speculative reads of e.g. data written
586      * by the other cpu prior to it updating the index.
587      */
588     KKASSERT(curthread->td_critcount);
589     wi = ip->ip_windex;
590     cpu_lfence();
591     ++mygd->gd_intr_nesting_level;
592 
593     /*
594      * NOTE: xindex is only updated after we are sure the function has
595      *	     finished execution.  Beware lwkt_process_ipiq() reentrancy!
596      *	     The function may send an IPI which may block/drain.
597      *
598      * NOTE: Due to additional IPI operations that the callback function
599      *	     may make, it is possible for both rindex and windex to advance and
600      *	     thus for rindex to advance passed our cached windex.
601      *
602      * NOTE: A load fence is required to prevent speculative loads prior
603      *	     to the loading of ip_rindex.  Even though stores might be
604      *	     ordered, loads are probably not.  A memory fence is required
605      *	     to prevent reordering of the loads after the ip_rindex update.
606      */
607     while (wi - (ri = ip->ip_rindex) > 0) {
608 	ri &= MAXCPUFIFO_MASK;
609 	cpu_lfence();
610 	copy_func = ip->ip_func[ri];
611 	copy_arg1 = ip->ip_arg1[ri];
612 	copy_arg2 = ip->ip_arg2[ri];
613 	cpu_mfence();
614 	++ip->ip_rindex;
615 	KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
616 		 ((ri + 1) & MAXCPUFIFO_MASK));
617 	logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
618 #ifdef INVARIANTS
619 	if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
620 		kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
621 			mycpu->gd_cpuid,
622 			copy_func, copy_arg1, copy_arg2,
623 #if defined(__i386__)
624 			(frame ? (void *)frame->if_eip : NULL));
625 #elif defined(__amd64__)
626 			(frame ? (void *)frame->if_rip : NULL));
627 #else
628 			NULL);
629 #endif
630 	}
631 #endif
632 	copy_func(copy_arg1, copy_arg2, frame);
633 	cpu_sfence();
634 	ip->ip_xindex = ip->ip_rindex;
635 
636 #ifdef PANIC_DEBUG
637 	/*
638 	 * Simulate panics during the processing of an IPI
639 	 */
640 	if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
641 		if (--panic_ipiq_count == 0) {
642 #ifdef DDB
643 			Debugger("PANIC_DEBUG");
644 #else
645 			panic("PANIC_DEBUG");
646 #endif
647 		}
648 	}
649 #endif
650     }
651     --mygd->gd_intr_nesting_level;
652 
653     /*
654      * If the queue is empty release ip_npoll to enable the other cpu to
655      * send us an IPI interrupt again.
656      *
657      * Return non-zero if there is still more in the queue.  Note that we
658      * must re-check the indexes after potentially releasing ip_npoll.  The
659      * caller must loop or otherwise ensure that a loop will occur prior to
660      * blocking.
661      */
662     if (ip->ip_rindex == ip->ip_windex);
663 	    atomic_poll_release_int(&ip->ip_npoll);
664     cpu_lfence();
665     return (ip->ip_rindex != ip->ip_windex);
666 }
667 
668 static void
669 lwkt_sync_ipiq(void *arg)
670 {
671     volatile cpumask_t *cpumask = arg;
672 
673     atomic_clear_cpumask(cpumask, mycpu->gd_cpumask);
674     if (*cpumask == 0)
675 	wakeup(cpumask);
676 }
677 
678 void
679 lwkt_synchronize_ipiqs(const char *wmesg)
680 {
681     volatile cpumask_t other_cpumask;
682 
683     other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
684     lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
685     	__DEVOLATILE(void *, &other_cpumask));
686 
687     while (other_cpumask != 0) {
688 	tsleep_interlock(&other_cpumask, 0);
689 	if (other_cpumask != 0)
690 	    tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
691     }
692 }
693 
694 #endif
695 
696 /*
697  * CPU Synchronization Support
698  *
699  * lwkt_cpusync_interlock()	- Place specified cpus in a quiescent state.
700  *				  The current cpu is placed in a hard critical
701  *				  section.
702  *
703  * lwkt_cpusync_deinterlock()	- Execute cs_func on specified cpus, including
704  *				  current cpu if specified, then return.
705  */
706 void
707 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
708 {
709     struct lwkt_cpusync cs;
710 
711     lwkt_cpusync_init(&cs, mask, func, arg);
712     lwkt_cpusync_interlock(&cs);
713     lwkt_cpusync_deinterlock(&cs);
714 }
715 
716 
717 void
718 lwkt_cpusync_interlock(lwkt_cpusync_t cs)
719 {
720 #ifdef SMP
721     globaldata_t gd = mycpu;
722     cpumask_t mask;
723 
724     /*
725      * mask acknowledge (cs_mack):  0->mask for stage 1
726      *
727      * mack does not include the current cpu.
728      */
729     mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask;
730     cs->cs_mack = 0;
731     crit_enter_id("cpusync");
732     if (mask) {
733 	DEBUG_PUSH_INFO("cpusync_interlock");
734 	++ipiq_cscount;
735 	++gd->gd_curthread->td_cscount;
736 	lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
737 	logipiq2(sync_start, mask);
738 	while (cs->cs_mack != mask) {
739 	    lwkt_process_ipiq();
740 	    cpu_pause();
741 	}
742 	DEBUG_POP_INFO();
743     }
744 #else
745     cs->cs_mack = 0;
746 #endif
747 }
748 
749 /*
750  * Interlocked cpus have executed remote1 and are polling in remote2.
751  * To deinterlock we clear cs_mack and wait for the cpus to execute
752  * the func and set their bit in cs_mack again.
753  *
754  */
755 void
756 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
757 {
758     globaldata_t gd = mycpu;
759 #ifdef SMP
760     cpumask_t mask;
761 
762     /*
763      * mask acknowledge (cs_mack):  mack->0->mack for stage 2
764      *
765      * Clearing cpu bits for polling cpus in cs_mack will cause them to
766      * execute stage 2, which executes the cs_func(cs_data) and then sets
767      * their bit in cs_mack again.
768      *
769      * mack does not include the current cpu.
770      */
771     mask = cs->cs_mack;
772     cpu_ccfence();
773     cs->cs_mack = 0;
774     if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
775 	    cs->cs_func(cs->cs_data);
776     if (mask) {
777 	DEBUG_PUSH_INFO("cpusync_deinterlock");
778 	while (cs->cs_mack != mask) {
779 	    lwkt_process_ipiq();
780 	    cpu_pause();
781 	}
782 	DEBUG_POP_INFO();
783 	/*
784 	 * cpusyncq ipis may be left queued without the RQF flag set due to
785 	 * a non-zero td_cscount, so be sure to process any laggards after
786 	 * decrementing td_cscount.
787 	 */
788 	--gd->gd_curthread->td_cscount;
789 	lwkt_process_ipiq();
790 	logipiq2(sync_end, mask);
791     }
792     crit_exit_id("cpusync");
793 #else
794     if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
795 	cs->cs_func(cs->cs_data);
796 #endif
797 }
798 
799 #ifdef SMP
800 
801 /*
802  * helper IPI remote messaging function.
803  *
804  * Called on remote cpu when a new cpu synchronization request has been
805  * sent to us.  Execute the run function and adjust cs_count, then requeue
806  * the request so we spin on it.
807  */
808 static void
809 lwkt_cpusync_remote1(lwkt_cpusync_t cs)
810 {
811     globaldata_t gd = mycpu;
812 
813     atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
814     lwkt_cpusync_remote2(cs);
815 }
816 
817 /*
818  * helper IPI remote messaging function.
819  *
820  * Poll for the originator telling us to finish.  If it hasn't, requeue
821  * our request so we spin on it.
822  */
823 static void
824 lwkt_cpusync_remote2(lwkt_cpusync_t cs)
825 {
826     globaldata_t gd = mycpu;
827 
828     if ((cs->cs_mack & gd->gd_cpumask) == 0) {
829 	if (cs->cs_func)
830 		cs->cs_func(cs->cs_data);
831 	atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
832     } else {
833 	lwkt_ipiq_t ip;
834 	int wi;
835 
836 	ip = &gd->gd_cpusyncq;
837 	wi = ip->ip_windex & MAXCPUFIFO_MASK;
838 	ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
839 	ip->ip_arg1[wi] = cs;
840 	ip->ip_arg2[wi] = 0;
841 	cpu_sfence();
842 	++ip->ip_windex;
843 	if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
844 		kprintf("cpu %d cm=%016jx %016jx f=%p\n",
845 			gd->gd_cpuid,
846 			(intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack,
847 			cs->cs_func);
848 	}
849     }
850 }
851 
852 #endif
853