xref: /netbsd-src/sys/kern/subr_ipi.c (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: subr_ipi.c,v 1.8 2020/09/08 16:00:35 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Inter-processor interrupt (IPI) interface: asynchronous IPIs to
34  * invoke functions with a constant argument and synchronous IPIs
35  * with the cross-call support.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.8 2020/09/08 16:00:35 riastradh Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/types.h>
43 
44 #include <sys/atomic.h>
45 #include <sys/evcnt.h>
46 #include <sys/cpu.h>
47 #include <sys/ipi.h>
48 #include <sys/intr.h>
49 #include <sys/kcpuset.h>
50 #include <sys/kmem.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 
54 /*
55  * An array of the IPI handlers used for asynchronous invocation.
56  * The lock protects the slot allocation.
57  */
58 
59 typedef struct {
60 	ipi_func_t	func;
61 	void *		arg;
62 } ipi_intr_t;
63 
64 static kmutex_t		ipi_mngmt_lock;
65 static ipi_intr_t	ipi_intrs[IPI_MAXREG]	__cacheline_aligned;
66 
67 /*
68  * Per-CPU mailbox for IPI messages: it is a single cache line storing
69  * up to IPI_MSG_MAX messages.  This interface is built on top of the
70  * synchronous IPIs.
71  */
72 
73 #define	IPI_MSG_SLOTS	(CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
74 #define	IPI_MSG_MAX	IPI_MSG_SLOTS
75 
76 typedef struct {
77 	ipi_msg_t *	msg[IPI_MSG_SLOTS];
78 } ipi_mbox_t;
79 
80 
81 /* Mailboxes for the synchronous IPIs. */
82 static ipi_mbox_t *	ipi_mboxes	__read_mostly;
83 static struct evcnt	ipi_mboxfull_ev	__cacheline_aligned;
84 static void		ipi_msg_cpu_handler(void *);
85 
86 /* Handler for the synchronous IPIs - it must be zero. */
87 #define	IPI_SYNCH_ID	0
88 
89 #ifndef MULTIPROCESSOR
90 #define	cpu_ipi(ci)	KASSERT(ci == NULL)
91 #endif
92 
93 void
94 ipi_sysinit(void)
95 {
96 
97 	mutex_init(&ipi_mngmt_lock, MUTEX_DEFAULT, IPL_NONE);
98 	memset(ipi_intrs, 0, sizeof(ipi_intrs));
99 
100 	/*
101 	 * Register the handler for synchronous IPIs.  This mechanism
102 	 * is built on top of the asynchronous interface.  Slot zero is
103 	 * reserved permanently; it is also handy to use zero as a failure
104 	 * for other registers (as it is potentially less error-prone).
105 	 */
106 	ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
107 
108 	evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
109 	   "ipi", "full");
110 }
111 
112 void
113 ipi_percpu_init(void)
114 {
115 	const size_t len = ncpu * sizeof(ipi_mbox_t);
116 
117 	/* Initialise the per-CPU bit fields. */
118 	for (u_int i = 0; i < ncpu; i++) {
119 		struct cpu_info *ci = cpu_lookup(i);
120 		memset(&ci->ci_ipipend, 0, sizeof(ci->ci_ipipend));
121 	}
122 
123 	/* Allocate per-CPU IPI mailboxes. */
124 	ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
125 	KASSERT(ipi_mboxes != NULL);
126 }
127 
128 /*
129  * ipi_register: register an asynchronous IPI handler.
130  *
131  * => Returns IPI ID which is greater than zero; on failure - zero.
132  */
133 u_int
134 ipi_register(ipi_func_t func, void *arg)
135 {
136 	mutex_enter(&ipi_mngmt_lock);
137 	for (u_int i = 0; i < IPI_MAXREG; i++) {
138 		if (ipi_intrs[i].func == NULL) {
139 			/* Register the function. */
140 			ipi_intrs[i].func = func;
141 			ipi_intrs[i].arg = arg;
142 			mutex_exit(&ipi_mngmt_lock);
143 
144 			KASSERT(i != IPI_SYNCH_ID);
145 			return i;
146 		}
147 	}
148 	mutex_exit(&ipi_mngmt_lock);
149 	printf("WARNING: ipi_register: table full, increase IPI_MAXREG\n");
150 	return 0;
151 }
152 
153 /*
154  * ipi_unregister: release the IPI handler given the ID.
155  */
156 void
157 ipi_unregister(u_int ipi_id)
158 {
159 	ipi_msg_t ipimsg = { .func = __FPTRCAST(ipi_func_t, nullop) };
160 
161 	KASSERT(ipi_id != IPI_SYNCH_ID);
162 	KASSERT(ipi_id < IPI_MAXREG);
163 
164 	/* Release the slot. */
165 	mutex_enter(&ipi_mngmt_lock);
166 	KASSERT(ipi_intrs[ipi_id].func != NULL);
167 	ipi_intrs[ipi_id].func = NULL;
168 
169 	/* Ensure that there are no IPIs in flight. */
170 	kpreempt_disable();
171 	ipi_broadcast(&ipimsg, false);
172 	ipi_wait(&ipimsg);
173 	kpreempt_enable();
174 	mutex_exit(&ipi_mngmt_lock);
175 }
176 
177 /*
178  * ipi_mark_pending: internal routine to mark an IPI pending on the
179  * specified CPU (which might be curcpu()).
180  */
181 static bool
182 ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
183 {
184 	const u_int i = ipi_id >> IPI_BITW_SHIFT;
185 	const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
186 
187 	KASSERT(ipi_id < IPI_MAXREG);
188 	KASSERT(kpreempt_disabled());
189 
190 	/* Mark as pending and send an IPI. */
191 	if (membar_consumer(), (ci->ci_ipipend[i] & bitm) == 0) {
192 		atomic_or_32(&ci->ci_ipipend[i], bitm);
193 		return true;
194 	}
195 	return false;
196 }
197 
198 /*
199  * ipi_trigger: asynchronously send an IPI to the specified CPU.
200  */
201 void
202 ipi_trigger(u_int ipi_id, struct cpu_info *ci)
203 {
204 
205 	KASSERT(curcpu() != ci);
206 	if (ipi_mark_pending(ipi_id, ci)) {
207 		cpu_ipi(ci);
208 	}
209 }
210 
211 /*
212  * ipi_trigger_multi_internal: the guts of ipi_trigger_multi() and
213  * ipi_trigger_broadcast().
214  */
215 static void
216 ipi_trigger_multi_internal(u_int ipi_id, const kcpuset_t *target,
217     bool skip_self)
218 {
219 	const cpuid_t selfid = cpu_index(curcpu());
220 	CPU_INFO_ITERATOR cii;
221 	struct cpu_info *ci;
222 
223 	KASSERT(kpreempt_disabled());
224 	KASSERT(target != NULL);
225 
226 	for (CPU_INFO_FOREACH(cii, ci)) {
227 		const cpuid_t cpuid = cpu_index(ci);
228 
229 		if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
230 			continue;
231 		}
232 		ipi_trigger(ipi_id, ci);
233 	}
234 	if (!skip_self && kcpuset_isset(target, selfid)) {
235 		ipi_mark_pending(ipi_id, curcpu());
236 		int s = splhigh();
237 		ipi_cpu_handler();
238 		splx(s);
239 	}
240 }
241 
242 /*
243  * ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
244  * CPUs given the target CPU set.
245  */
246 void
247 ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
248 {
249 	ipi_trigger_multi_internal(ipi_id, target, false);
250 }
251 
252 /*
253  * ipi_trigger_broadcast: same as ipi_trigger_multi() to kcpuset_attached,
254  * optionally skipping the sending CPU.
255  */
256 void
257 ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
258 {
259 	ipi_trigger_multi_internal(ipi_id, kcpuset_attached, skip_self);
260 }
261 
262 /*
263  * put_msg: insert message into the mailbox.
264  */
265 static inline void
266 put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
267 {
268 	int count = SPINLOCK_BACKOFF_MIN;
269 again:
270 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
271 		if (__predict_true(mbox->msg[i] == NULL) &&
272 		    atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
273 			return;
274 		}
275 	}
276 
277 	/* All slots are full: we have to spin-wait. */
278 	ipi_mboxfull_ev.ev_count++;
279 	SPINLOCK_BACKOFF(count);
280 	goto again;
281 }
282 
283 /*
284  * ipi_cpu_handler: the IPI handler.
285  */
286 void
287 ipi_cpu_handler(void)
288 {
289 	struct cpu_info * const ci = curcpu();
290 
291 	/*
292 	 * Handle asynchronous IPIs: inspect per-CPU bit field, extract
293 	 * IPI ID numbers and execute functions in those slots.
294 	 */
295 	for (u_int i = 0; i < IPI_BITWORDS; i++) {
296 		uint32_t pending, bit;
297 
298 		if (ci->ci_ipipend[i] == 0) {
299 			continue;
300 		}
301 		pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
302 #ifndef __HAVE_ATOMIC_AS_MEMBAR
303 		membar_producer();
304 #endif
305 		while ((bit = ffs(pending)) != 0) {
306 			const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
307 			ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id];
308 
309 			pending &= ~(1U << bit);
310 			KASSERT(ipi_hdl->func != NULL);
311 			ipi_hdl->func(ipi_hdl->arg);
312 		}
313 	}
314 }
315 
316 /*
317  * ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
318  * execute the passed functions and acknowledge the messages.
319  */
320 static void
321 ipi_msg_cpu_handler(void *arg __unused)
322 {
323 	const struct cpu_info * const ci = curcpu();
324 	ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
325 
326 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
327 		ipi_msg_t *msg;
328 
329 		/* Get the message. */
330 		if ((msg = mbox->msg[i]) == NULL) {
331 			continue;
332 		}
333 		mbox->msg[i] = NULL;
334 
335 		/* Execute the handler. */
336 		KASSERT(msg->func);
337 		msg->func(msg->arg);
338 
339 		/* Ack the request. */
340 #ifndef __HAVE_ATOMIC_AS_MEMBAR
341 		membar_producer();
342 #endif
343 		atomic_dec_uint(&msg->_pending);
344 	}
345 }
346 
347 /*
348  * ipi_unicast: send an IPI to a single CPU.
349  *
350  * => The CPU must be remote; must not be local.
351  * => The caller must ipi_wait() on the message for completion.
352  */
353 void
354 ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
355 {
356 	const cpuid_t id = cpu_index(ci);
357 
358 	KASSERT(msg->func != NULL);
359 	KASSERT(kpreempt_disabled());
360 	KASSERT(curcpu() != ci);
361 
362 	msg->_pending = 1;
363 	membar_producer();
364 
365 	put_msg(&ipi_mboxes[id], msg);
366 	ipi_trigger(IPI_SYNCH_ID, ci);
367 }
368 
369 /*
370  * ipi_multicast: send an IPI to each CPU in the specified set.
371  *
372  * => The caller must ipi_wait() on the message for completion.
373  */
374 void
375 ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
376 {
377 	const struct cpu_info * const self = curcpu();
378 	CPU_INFO_ITERATOR cii;
379 	struct cpu_info *ci;
380 	u_int local;
381 
382 	KASSERT(msg->func != NULL);
383 	KASSERT(kpreempt_disabled());
384 
385 	local = !!kcpuset_isset(target, cpu_index(self));
386 	msg->_pending = kcpuset_countset(target) - local;
387 	membar_producer();
388 
389 	for (CPU_INFO_FOREACH(cii, ci)) {
390 		cpuid_t id;
391 
392 		if (__predict_false(ci == self)) {
393 			continue;
394 		}
395 		id = cpu_index(ci);
396 		if (!kcpuset_isset(target, id)) {
397 			continue;
398 		}
399 		put_msg(&ipi_mboxes[id], msg);
400 		ipi_trigger(IPI_SYNCH_ID, ci);
401 	}
402 	if (local) {
403 		msg->func(msg->arg);
404 	}
405 }
406 
407 /*
408  * ipi_broadcast: send an IPI to all CPUs.
409  *
410  * => The caller must ipi_wait() on the message for completion.
411  */
412 void
413 ipi_broadcast(ipi_msg_t *msg, bool skip_self)
414 {
415 	const struct cpu_info * const self = curcpu();
416 	CPU_INFO_ITERATOR cii;
417 	struct cpu_info *ci;
418 
419 	KASSERT(msg->func != NULL);
420 	KASSERT(kpreempt_disabled());
421 
422 	msg->_pending = ncpu - 1;
423 	membar_producer();
424 
425 	/* Broadcast IPIs for remote CPUs. */
426 	for (CPU_INFO_FOREACH(cii, ci)) {
427 		cpuid_t id;
428 
429 		if (__predict_false(ci == self)) {
430 			continue;
431 		}
432 		id = cpu_index(ci);
433 		put_msg(&ipi_mboxes[id], msg);
434 		ipi_trigger(IPI_SYNCH_ID, ci);
435 	}
436 
437 	if (!skip_self) {
438 		/* Finally, execute locally. */
439 		msg->func(msg->arg);
440 	}
441 }
442 
443 /*
444  * ipi_wait: spin-wait until the message is processed.
445  */
446 void
447 ipi_wait(ipi_msg_t *msg)
448 {
449 	int count = SPINLOCK_BACKOFF_MIN;
450 
451 	while (msg->_pending) {
452 		KASSERT(msg->_pending < ncpu);
453 		SPINLOCK_BACKOFF(count);
454 	}
455 }
456