1*7dd29855Sriastradh /* $NetBSD: subr_ipi.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $ */
28011b285Srmind
38011b285Srmind /*-
48011b285Srmind * Copyright (c) 2014 The NetBSD Foundation, Inc.
58011b285Srmind * All rights reserved.
68011b285Srmind *
78011b285Srmind * This code is derived from software contributed to The NetBSD Foundation
88011b285Srmind * by Mindaugas Rasiukevicius.
98011b285Srmind *
108011b285Srmind * Redistribution and use in source and binary forms, with or without
118011b285Srmind * modification, are permitted provided that the following conditions
128011b285Srmind * are met:
138011b285Srmind * 1. Redistributions of source code must retain the above copyright
148011b285Srmind * notice, this list of conditions and the following disclaimer.
158011b285Srmind * 2. Redistributions in binary form must reproduce the above copyright
168011b285Srmind * notice, this list of conditions and the following disclaimer in the
178011b285Srmind * documentation and/or other materials provided with the distribution.
188011b285Srmind *
198011b285Srmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
208011b285Srmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
218011b285Srmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
228011b285Srmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
238011b285Srmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
248011b285Srmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
258011b285Srmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
268011b285Srmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
278011b285Srmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
288011b285Srmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
298011b285Srmind * POSSIBILITY OF SUCH DAMAGE.
308011b285Srmind */
318011b285Srmind
328011b285Srmind /*
333da69dd6Srmind * Inter-processor interrupt (IPI) interface: asynchronous IPIs to
343da69dd6Srmind * invoke functions with a constant argument and synchronous IPIs
353da69dd6Srmind * with the cross-call support.
368011b285Srmind */
378011b285Srmind
388011b285Srmind #include <sys/cdefs.h>
39*7dd29855Sriastradh __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $");
408011b285Srmind
418011b285Srmind #include <sys/param.h>
428011b285Srmind #include <sys/types.h>
438011b285Srmind
448011b285Srmind #include <sys/atomic.h>
458011b285Srmind #include <sys/evcnt.h>
468011b285Srmind #include <sys/cpu.h>
478011b285Srmind #include <sys/ipi.h>
482472877aSrmind #include <sys/intr.h>
498011b285Srmind #include <sys/kcpuset.h>
508011b285Srmind #include <sys/kmem.h>
518011b285Srmind #include <sys/lock.h>
523da69dd6Srmind #include <sys/mutex.h>
533da69dd6Srmind
543da69dd6Srmind /*
553da69dd6Srmind * An array of the IPI handlers used for asynchronous invocation.
563da69dd6Srmind * The lock protects the slot allocation.
573da69dd6Srmind */
583da69dd6Srmind
593da69dd6Srmind typedef struct {
603da69dd6Srmind ipi_func_t func;
613da69dd6Srmind void * arg;
623da69dd6Srmind } ipi_intr_t;
633da69dd6Srmind
643da69dd6Srmind static kmutex_t ipi_mngmt_lock;
653da69dd6Srmind static ipi_intr_t ipi_intrs[IPI_MAXREG] __cacheline_aligned;
668011b285Srmind
678011b285Srmind /*
688011b285Srmind * Per-CPU mailbox for IPI messages: it is a single cache line storing
693da69dd6Srmind * up to IPI_MSG_MAX messages. This interface is built on top of the
703da69dd6Srmind * synchronous IPIs.
718011b285Srmind */
728011b285Srmind
738011b285Srmind #define IPI_MSG_SLOTS (CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
748011b285Srmind #define IPI_MSG_MAX IPI_MSG_SLOTS
758011b285Srmind
768011b285Srmind typedef struct {
778011b285Srmind ipi_msg_t * msg[IPI_MSG_SLOTS];
788011b285Srmind } ipi_mbox_t;
798011b285Srmind
803da69dd6Srmind
813da69dd6Srmind /* Mailboxes for the synchronous IPIs. */
828011b285Srmind static ipi_mbox_t * ipi_mboxes __read_mostly;
838011b285Srmind static struct evcnt ipi_mboxfull_ev __cacheline_aligned;
843da69dd6Srmind static void ipi_msg_cpu_handler(void *);
853da69dd6Srmind
863da69dd6Srmind /* Handler for the synchronous IPIs - it must be zero. */
873da69dd6Srmind #define IPI_SYNCH_ID 0
888011b285Srmind
898011b285Srmind #ifndef MULTIPROCESSOR
908011b285Srmind #define cpu_ipi(ci) KASSERT(ci == NULL)
918011b285Srmind #endif
928011b285Srmind
938011b285Srmind void
ipi_sysinit(void)948011b285Srmind ipi_sysinit(void)
958011b285Srmind {
968011b285Srmind
973da69dd6Srmind mutex_init(&ipi_mngmt_lock, MUTEX_DEFAULT, IPL_NONE);
983da69dd6Srmind memset(ipi_intrs, 0, sizeof(ipi_intrs));
993da69dd6Srmind
1003da69dd6Srmind /*
1013da69dd6Srmind * Register the handler for synchronous IPIs. This mechanism
1023da69dd6Srmind * is built on top of the asynchronous interface. Slot zero is
1033da69dd6Srmind * reserved permanently; it is also handy to use zero as a failure
1043da69dd6Srmind * for other registers (as it is potentially less error-prone).
1053da69dd6Srmind */
1063da69dd6Srmind ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
1073da69dd6Srmind
1088011b285Srmind evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
1098011b285Srmind "ipi", "full");
1108011b285Srmind }
1118011b285Srmind
112da697e67Sriastradh void
ipi_percpu_init(void)113da697e67Sriastradh ipi_percpu_init(void)
114da697e67Sriastradh {
115da697e67Sriastradh const size_t len = ncpu * sizeof(ipi_mbox_t);
116da697e67Sriastradh
117da697e67Sriastradh /* Initialise the per-CPU bit fields. */
118da697e67Sriastradh for (u_int i = 0; i < ncpu; i++) {
119da697e67Sriastradh struct cpu_info *ci = cpu_lookup(i);
120da697e67Sriastradh memset(&ci->ci_ipipend, 0, sizeof(ci->ci_ipipend));
121da697e67Sriastradh }
122da697e67Sriastradh
123da697e67Sriastradh /* Allocate per-CPU IPI mailboxes. */
124da697e67Sriastradh ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
125da697e67Sriastradh KASSERT(ipi_mboxes != NULL);
126da697e67Sriastradh }
127da697e67Sriastradh
1288011b285Srmind /*
1293da69dd6Srmind * ipi_register: register an asynchronous IPI handler.
1303da69dd6Srmind *
1313da69dd6Srmind * => Returns IPI ID which is greater than zero; on failure - zero.
1323da69dd6Srmind */
1333da69dd6Srmind u_int
ipi_register(ipi_func_t func,void * arg)1343da69dd6Srmind ipi_register(ipi_func_t func, void *arg)
1353da69dd6Srmind {
1363da69dd6Srmind mutex_enter(&ipi_mngmt_lock);
1373da69dd6Srmind for (u_int i = 0; i < IPI_MAXREG; i++) {
1383da69dd6Srmind if (ipi_intrs[i].func == NULL) {
1393da69dd6Srmind /* Register the function. */
1403da69dd6Srmind ipi_intrs[i].func = func;
1413da69dd6Srmind ipi_intrs[i].arg = arg;
1423da69dd6Srmind mutex_exit(&ipi_mngmt_lock);
1433da69dd6Srmind
1443da69dd6Srmind KASSERT(i != IPI_SYNCH_ID);
1453da69dd6Srmind return i;
1463da69dd6Srmind }
1473da69dd6Srmind }
1483da69dd6Srmind mutex_exit(&ipi_mngmt_lock);
1493da69dd6Srmind printf("WARNING: ipi_register: table full, increase IPI_MAXREG\n");
1503da69dd6Srmind return 0;
1513da69dd6Srmind }
1523da69dd6Srmind
1533da69dd6Srmind /*
1543da69dd6Srmind * ipi_unregister: release the IPI handler given the ID.
1553da69dd6Srmind */
1563da69dd6Srmind void
ipi_unregister(u_int ipi_id)1573da69dd6Srmind ipi_unregister(u_int ipi_id)
1583da69dd6Srmind {
159176ada4bSchristos ipi_msg_t ipimsg = { .func = __FPTRCAST(ipi_func_t, nullop) };
1603da69dd6Srmind
1613da69dd6Srmind KASSERT(ipi_id != IPI_SYNCH_ID);
1623da69dd6Srmind KASSERT(ipi_id < IPI_MAXREG);
1633da69dd6Srmind
1643da69dd6Srmind /* Release the slot. */
1653da69dd6Srmind mutex_enter(&ipi_mngmt_lock);
1663da69dd6Srmind KASSERT(ipi_intrs[ipi_id].func != NULL);
1673da69dd6Srmind ipi_intrs[ipi_id].func = NULL;
1683da69dd6Srmind
1693da69dd6Srmind /* Ensure that there are no IPIs in flight. */
1703da69dd6Srmind kpreempt_disable();
171e653e7ffSthorpej ipi_broadcast(&ipimsg, false);
1723da69dd6Srmind ipi_wait(&ipimsg);
1733da69dd6Srmind kpreempt_enable();
1743da69dd6Srmind mutex_exit(&ipi_mngmt_lock);
1753da69dd6Srmind }
1763da69dd6Srmind
1773da69dd6Srmind /*
178e653e7ffSthorpej * ipi_mark_pending: internal routine to mark an IPI pending on the
179e653e7ffSthorpej * specified CPU (which might be curcpu()).
180e653e7ffSthorpej */
181e653e7ffSthorpej static bool
ipi_mark_pending(u_int ipi_id,struct cpu_info * ci)182e653e7ffSthorpej ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
183e653e7ffSthorpej {
184e653e7ffSthorpej const u_int i = ipi_id >> IPI_BITW_SHIFT;
185e653e7ffSthorpej const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
186e653e7ffSthorpej
187e653e7ffSthorpej KASSERT(ipi_id < IPI_MAXREG);
188e653e7ffSthorpej KASSERT(kpreempt_disabled());
189e653e7ffSthorpej
19069109c6cSriastradh /* Mark as pending and return true if not previously marked. */
19169109c6cSriastradh if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) {
1923e173906Sriastradh membar_release();
193e653e7ffSthorpej atomic_or_32(&ci->ci_ipipend[i], bitm);
194e653e7ffSthorpej return true;
195e653e7ffSthorpej }
196e653e7ffSthorpej return false;
197e653e7ffSthorpej }
198e653e7ffSthorpej
199e653e7ffSthorpej /*
2003da69dd6Srmind * ipi_trigger: asynchronously send an IPI to the specified CPU.
2013da69dd6Srmind */
2023da69dd6Srmind void
ipi_trigger(u_int ipi_id,struct cpu_info * ci)2033da69dd6Srmind ipi_trigger(u_int ipi_id, struct cpu_info *ci)
2043da69dd6Srmind {
2053da69dd6Srmind
2063da69dd6Srmind KASSERT(curcpu() != ci);
207e653e7ffSthorpej if (ipi_mark_pending(ipi_id, ci)) {
2083da69dd6Srmind cpu_ipi(ci);
2093da69dd6Srmind }
2103da69dd6Srmind }
2113da69dd6Srmind
2123da69dd6Srmind /*
213e653e7ffSthorpej * ipi_trigger_multi_internal: the guts of ipi_trigger_multi() and
214e653e7ffSthorpej * ipi_trigger_broadcast().
2152472877aSrmind */
216e653e7ffSthorpej static void
ipi_trigger_multi_internal(u_int ipi_id,const kcpuset_t * target,bool skip_self)217e653e7ffSthorpej ipi_trigger_multi_internal(u_int ipi_id, const kcpuset_t *target,
218e653e7ffSthorpej bool skip_self)
2192472877aSrmind {
2202472877aSrmind const cpuid_t selfid = cpu_index(curcpu());
2212472877aSrmind CPU_INFO_ITERATOR cii;
2222472877aSrmind struct cpu_info *ci;
2232472877aSrmind
2242472877aSrmind KASSERT(kpreempt_disabled());
2252472877aSrmind KASSERT(target != NULL);
2262472877aSrmind
2272472877aSrmind for (CPU_INFO_FOREACH(cii, ci)) {
2282472877aSrmind const cpuid_t cpuid = cpu_index(ci);
2292472877aSrmind
2302472877aSrmind if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
2312472877aSrmind continue;
2322472877aSrmind }
2332472877aSrmind ipi_trigger(ipi_id, ci);
2342472877aSrmind }
235e653e7ffSthorpej if (!skip_self && kcpuset_isset(target, selfid)) {
236e653e7ffSthorpej ipi_mark_pending(ipi_id, curcpu());
2372472877aSrmind int s = splhigh();
2382472877aSrmind ipi_cpu_handler();
2392472877aSrmind splx(s);
2402472877aSrmind }
2412472877aSrmind }
2422472877aSrmind
2432472877aSrmind /*
244e653e7ffSthorpej * ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
245e653e7ffSthorpej * CPUs given the target CPU set.
246e653e7ffSthorpej */
247e653e7ffSthorpej void
ipi_trigger_multi(u_int ipi_id,const kcpuset_t * target)248e653e7ffSthorpej ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
249e653e7ffSthorpej {
250e653e7ffSthorpej ipi_trigger_multi_internal(ipi_id, target, false);
251e653e7ffSthorpej }
252e653e7ffSthorpej
253e653e7ffSthorpej /*
254e653e7ffSthorpej * ipi_trigger_broadcast: same as ipi_trigger_multi() to kcpuset_attached,
255e653e7ffSthorpej * optionally skipping the sending CPU.
256e653e7ffSthorpej */
257e653e7ffSthorpej void
ipi_trigger_broadcast(u_int ipi_id,bool skip_self)258e653e7ffSthorpej ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
259e653e7ffSthorpej {
260e653e7ffSthorpej ipi_trigger_multi_internal(ipi_id, kcpuset_attached, skip_self);
261e653e7ffSthorpej }
262e653e7ffSthorpej
263e653e7ffSthorpej /*
2648011b285Srmind * put_msg: insert message into the mailbox.
26569109c6cSriastradh *
2663e173906Sriastradh * Caller is responsible for issuing membar_release first.
2678011b285Srmind */
2688011b285Srmind static inline void
put_msg(ipi_mbox_t * mbox,ipi_msg_t * msg)2698011b285Srmind put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
2708011b285Srmind {
2718011b285Srmind int count = SPINLOCK_BACKOFF_MIN;
2728011b285Srmind again:
2738011b285Srmind for (u_int i = 0; i < IPI_MSG_MAX; i++) {
27469109c6cSriastradh if (atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
2758011b285Srmind return;
2768011b285Srmind }
2778011b285Srmind }
2788011b285Srmind
2798011b285Srmind /* All slots are full: we have to spin-wait. */
2808011b285Srmind ipi_mboxfull_ev.ev_count++;
2818011b285Srmind SPINLOCK_BACKOFF(count);
2828011b285Srmind goto again;
2838011b285Srmind }
2848011b285Srmind
2858011b285Srmind /*
2868011b285Srmind * ipi_cpu_handler: the IPI handler.
2878011b285Srmind */
2888011b285Srmind void
ipi_cpu_handler(void)2898011b285Srmind ipi_cpu_handler(void)
2908011b285Srmind {
2913da69dd6Srmind struct cpu_info * const ci = curcpu();
2923da69dd6Srmind
2933da69dd6Srmind /*
2943da69dd6Srmind * Handle asynchronous IPIs: inspect per-CPU bit field, extract
2953da69dd6Srmind * IPI ID numbers and execute functions in those slots.
2963da69dd6Srmind */
2973da69dd6Srmind for (u_int i = 0; i < IPI_BITWORDS; i++) {
2983da69dd6Srmind uint32_t pending, bit;
2993da69dd6Srmind
30069109c6cSriastradh if (atomic_load_relaxed(&ci->ci_ipipend[i]) == 0) {
3013da69dd6Srmind continue;
3023da69dd6Srmind }
3033da69dd6Srmind pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
3043e173906Sriastradh membar_acquire();
3053da69dd6Srmind while ((bit = ffs(pending)) != 0) {
3063da69dd6Srmind const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
3073da69dd6Srmind ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id];
3083da69dd6Srmind
3093da69dd6Srmind pending &= ~(1U << bit);
3103da69dd6Srmind KASSERT(ipi_hdl->func != NULL);
3113da69dd6Srmind ipi_hdl->func(ipi_hdl->arg);
3123da69dd6Srmind }
3133da69dd6Srmind }
3143da69dd6Srmind }
3153da69dd6Srmind
3163da69dd6Srmind /*
3173da69dd6Srmind * ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
3183da69dd6Srmind * execute the passed functions and acknowledge the messages.
3193da69dd6Srmind */
3203da69dd6Srmind static void
ipi_msg_cpu_handler(void * arg __unused)3213da69dd6Srmind ipi_msg_cpu_handler(void *arg __unused)
3223da69dd6Srmind {
3238011b285Srmind const struct cpu_info * const ci = curcpu();
3248011b285Srmind ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
3258011b285Srmind
3268011b285Srmind for (u_int i = 0; i < IPI_MSG_MAX; i++) {
3278011b285Srmind ipi_msg_t *msg;
3288011b285Srmind
3298011b285Srmind /* Get the message. */
33069109c6cSriastradh if ((msg = atomic_load_acquire(&mbox->msg[i])) == NULL) {
3318011b285Srmind continue;
3328011b285Srmind }
33369109c6cSriastradh atomic_store_relaxed(&mbox->msg[i], NULL);
3348011b285Srmind
3358011b285Srmind /* Execute the handler. */
3368011b285Srmind KASSERT(msg->func);
3378011b285Srmind msg->func(msg->arg);
3388011b285Srmind
3398011b285Srmind /* Ack the request. */
3403e173906Sriastradh membar_release();
3418011b285Srmind atomic_dec_uint(&msg->_pending);
3428011b285Srmind }
3438011b285Srmind }
3448011b285Srmind
3458011b285Srmind /*
3468011b285Srmind * ipi_unicast: send an IPI to a single CPU.
3478011b285Srmind *
3488011b285Srmind * => The CPU must be remote; must not be local.
3498011b285Srmind * => The caller must ipi_wait() on the message for completion.
3508011b285Srmind */
3518011b285Srmind void
ipi_unicast(ipi_msg_t * msg,struct cpu_info * ci)3528011b285Srmind ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
3538011b285Srmind {
3548011b285Srmind const cpuid_t id = cpu_index(ci);
3558011b285Srmind
3568011b285Srmind KASSERT(msg->func != NULL);
3578011b285Srmind KASSERT(kpreempt_disabled());
3588011b285Srmind KASSERT(curcpu() != ci);
3598011b285Srmind
3608011b285Srmind msg->_pending = 1;
3613e173906Sriastradh membar_release();
3628011b285Srmind
3638011b285Srmind put_msg(&ipi_mboxes[id], msg);
3643da69dd6Srmind ipi_trigger(IPI_SYNCH_ID, ci);
3658011b285Srmind }
3668011b285Srmind
3678011b285Srmind /*
3688011b285Srmind * ipi_multicast: send an IPI to each CPU in the specified set.
3698011b285Srmind *
3708011b285Srmind * => The caller must ipi_wait() on the message for completion.
3718011b285Srmind */
3728011b285Srmind void
ipi_multicast(ipi_msg_t * msg,const kcpuset_t * target)3738011b285Srmind ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
3748011b285Srmind {
3758011b285Srmind const struct cpu_info * const self = curcpu();
3768011b285Srmind CPU_INFO_ITERATOR cii;
3778011b285Srmind struct cpu_info *ci;
3788011b285Srmind u_int local;
3798011b285Srmind
3808011b285Srmind KASSERT(msg->func != NULL);
3818011b285Srmind KASSERT(kpreempt_disabled());
3828011b285Srmind
3838011b285Srmind local = !!kcpuset_isset(target, cpu_index(self));
3848011b285Srmind msg->_pending = kcpuset_countset(target) - local;
3853e173906Sriastradh membar_release();
3868011b285Srmind
3878011b285Srmind for (CPU_INFO_FOREACH(cii, ci)) {
3888011b285Srmind cpuid_t id;
3898011b285Srmind
3908011b285Srmind if (__predict_false(ci == self)) {
3918011b285Srmind continue;
3928011b285Srmind }
3938011b285Srmind id = cpu_index(ci);
3948011b285Srmind if (!kcpuset_isset(target, id)) {
3958011b285Srmind continue;
3968011b285Srmind }
3978011b285Srmind put_msg(&ipi_mboxes[id], msg);
3983da69dd6Srmind ipi_trigger(IPI_SYNCH_ID, ci);
3998011b285Srmind }
4008011b285Srmind if (local) {
4018011b285Srmind msg->func(msg->arg);
4028011b285Srmind }
4038011b285Srmind }
4048011b285Srmind
4058011b285Srmind /*
4068011b285Srmind * ipi_broadcast: send an IPI to all CPUs.
4078011b285Srmind *
4088011b285Srmind * => The caller must ipi_wait() on the message for completion.
4098011b285Srmind */
4108011b285Srmind void
ipi_broadcast(ipi_msg_t * msg,bool skip_self)411e653e7ffSthorpej ipi_broadcast(ipi_msg_t *msg, bool skip_self)
4128011b285Srmind {
4138011b285Srmind const struct cpu_info * const self = curcpu();
4148011b285Srmind CPU_INFO_ITERATOR cii;
4158011b285Srmind struct cpu_info *ci;
4168011b285Srmind
4178011b285Srmind KASSERT(msg->func != NULL);
4188011b285Srmind KASSERT(kpreempt_disabled());
4198011b285Srmind
4208011b285Srmind msg->_pending = ncpu - 1;
4213e173906Sriastradh membar_release();
4228011b285Srmind
4238011b285Srmind /* Broadcast IPIs for remote CPUs. */
4248011b285Srmind for (CPU_INFO_FOREACH(cii, ci)) {
4258011b285Srmind cpuid_t id;
4268011b285Srmind
4278011b285Srmind if (__predict_false(ci == self)) {
4288011b285Srmind continue;
4298011b285Srmind }
4308011b285Srmind id = cpu_index(ci);
4318011b285Srmind put_msg(&ipi_mboxes[id], msg);
4323da69dd6Srmind ipi_trigger(IPI_SYNCH_ID, ci);
4338011b285Srmind }
4348011b285Srmind
435e653e7ffSthorpej if (!skip_self) {
4368011b285Srmind /* Finally, execute locally. */
4378011b285Srmind msg->func(msg->arg);
4388011b285Srmind }
439e653e7ffSthorpej }
4408011b285Srmind
4418011b285Srmind /*
4428011b285Srmind * ipi_wait: spin-wait until the message is processed.
4438011b285Srmind */
4448011b285Srmind void
ipi_wait(ipi_msg_t * msg)4458011b285Srmind ipi_wait(ipi_msg_t *msg)
4468011b285Srmind {
4478011b285Srmind int count = SPINLOCK_BACKOFF_MIN;
4488011b285Srmind
44969109c6cSriastradh while (atomic_load_acquire(&msg->_pending)) {
45069109c6cSriastradh KASSERT(atomic_load_relaxed(&msg->_pending) < ncpu);
4518011b285Srmind SPINLOCK_BACKOFF(count);
4528011b285Srmind }
4538011b285Srmind }
454