xref: /netbsd-src/sys/kern/subr_xcall.c (revision 04615d5639e70b4bfdaf3942e603ac11f4f45d74)
1 /*	$NetBSD: subr_xcall.c,v 1.38 2024/03/01 04:32:38 mrg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Cross call support
34  *
35  * Background
36  *
37  *	Sometimes it is necessary to modify hardware state that is tied
38  *	directly to individual CPUs (such as a CPU's local timer), and
39  *	these updates can not be done remotely by another CPU.  The LWP
40  *	requesting the update may be unable to guarantee that it will be
41  *	running on the CPU where the update must occur, when the update
42  *	occurs.
43  *
44  *	Additionally, it's sometimes necessary to modify per-CPU software
45  *	state from a remote CPU.  Where these update operations are so
46  *	rare or the access to the per-CPU data so frequent that the cost
47  *	of using locking or atomic operations to provide coherency is
48  *	prohibitive, another way must be found.
49  *
50  *	Cross calls help to solve these types of problem by allowing
51  *	any LWP in the system to request that an arbitrary function be
52  *	executed on a specific CPU.
53  *
54  * Implementation
55  *
56  *	A slow mechanism for making low priority cross calls is
57  *	provided.  The function to be executed runs on the remote CPU
58  *	within a bound kthread.  No queueing is provided, and the
59  *	implementation uses global state.  The function being called may
60  *	block briefly on locks, but in doing so must be careful to not
61  *	interfere with other cross calls in the system.  The function is
62  *	called with thread context and not from a soft interrupt, so it
63  *	can ensure that it is not interrupting other code running on the
64  *	CPU, and so has exclusive access to the CPU.  Since this facility
65  *	is heavyweight, it's expected that it will not be used often.
66  *
67  *	Cross calls must not allocate memory, as the pagedaemon uses cross
68  *	calls (and memory allocation may need to wait on the pagedaemon).
69  *
70  *	A low-overhead mechanism for high priority calls (XC_HIGHPRI) is
71  *	also provided.  The function to be executed runs in software
72  *	interrupt context at IPL_SOFTSERIAL level, and is expected to
73  *	be very lightweight, e.g. avoid blocking.
74  */
75 
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.38 2024/03/01 04:32:38 mrg Exp $");
78 
79 #include <sys/types.h>
80 #include <sys/param.h>
81 #include <sys/xcall.h>
82 #include <sys/mutex.h>
83 #include <sys/condvar.h>
84 #include <sys/evcnt.h>
85 #include <sys/kthread.h>
86 #include <sys/cpu.h>
87 #include <sys/atomic.h>
88 
89 #ifdef _RUMPKERNEL
90 #include "rump_private.h"
91 #endif
92 
93 /* Cross-call state box. */
94 typedef struct {
95 	kmutex_t	xc_lock;
96 	kcondvar_t	xc_busy;
97 	xcfunc_t	xc_func;
98 	void *		xc_arg1;
99 	void *		xc_arg2;
100 	uint64_t	xc_headp;
101 	uint64_t	xc_donep;
102 	unsigned int	xc_ipl;
103 } xc_state_t;
104 
105 /* Bit indicating high (1) or low (0) priority. */
106 #define	XC_PRI_BIT	(1ULL << 63)
107 
108 /* Low priority xcall structures. */
109 static xc_state_t	xc_low_pri	__cacheline_aligned;
110 
111 /* High priority xcall structures. */
112 static xc_state_t	xc_high_pri	__cacheline_aligned;
113 static void *		xc_sihs[4]	__cacheline_aligned;
114 
115 /* Event counters. */
116 static struct evcnt	xc_unicast_ev	__cacheline_aligned;
117 static struct evcnt	xc_broadcast_ev	__cacheline_aligned;
118 
119 static void		xc_init(void);
120 static void		xc_thread(void *);
121 
122 static inline uint64_t	xc_highpri(xcfunc_t, void *, void *, struct cpu_info *,
123 			    unsigned int);
124 static inline uint64_t	xc_lowpri(xcfunc_t, void *, void *, struct cpu_info *);
125 
126 /* The internal form of IPL */
127 #define XC_IPL_MASK		0xff00
128 /*
129  * Assign 0 to XC_IPL_SOFTSERIAL to treat IPL_SOFTSERIAL as the default value
130  * (just XC_HIGHPRI).
131  */
132 #define XC_IPL_SOFTSERIAL	0
133 #define XC_IPL_SOFTNET		1
134 #define XC_IPL_SOFTBIO		2
135 #define XC_IPL_SOFTCLOCK	3
136 #define XC_IPL_MAX		XC_IPL_SOFTCLOCK
137 
138 CTASSERT(XC_IPL_MAX <= __arraycount(xc_sihs));
139 
140 /*
141  * xc_init:
142  *
143  *	Initialize low and high priority cross-call structures.
144  */
145 static void
xc_init(void)146 xc_init(void)
147 {
148 	xc_state_t *xclo = &xc_low_pri, *xchi = &xc_high_pri;
149 
150 	memset(xclo, 0, sizeof(xc_state_t));
151 	mutex_init(&xclo->xc_lock, MUTEX_DEFAULT, IPL_NONE);
152 	cv_init(&xclo->xc_busy, "xclow");
153 
154 	memset(xchi, 0, sizeof(xc_state_t));
155 	mutex_init(&xchi->xc_lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
156 	cv_init(&xchi->xc_busy, "xchigh");
157 
158 	/* Set up a softint for each IPL_SOFT*. */
159 #define SETUP_SOFTINT(xipl, sipl) do {					\
160 		xc_sihs[(xipl)] = softint_establish( (sipl) | SOFTINT_MPSAFE,\
161 		    xc__highpri_intr, NULL);				\
162 		KASSERT(xc_sihs[(xipl)] != NULL);			\
163 	} while (0)
164 
165 	SETUP_SOFTINT(XC_IPL_SOFTSERIAL, SOFTINT_SERIAL);
166 	/*
167 	 * If a IPL_SOFTXXX have the same value of the previous, we don't use
168 	 * the IPL (see xc_encode_ipl).  So we don't need to allocate a softint
169 	 * for it.
170 	 */
171 #if IPL_SOFTNET != IPL_SOFTSERIAL
172 	SETUP_SOFTINT(XC_IPL_SOFTNET, SOFTINT_NET);
173 #endif
174 #if IPL_SOFTBIO != IPL_SOFTNET
175 	SETUP_SOFTINT(XC_IPL_SOFTBIO, SOFTINT_BIO);
176 #endif
177 #if IPL_SOFTCLOCK != IPL_SOFTBIO
178 	SETUP_SOFTINT(XC_IPL_SOFTCLOCK, SOFTINT_CLOCK);
179 #endif
180 
181 #undef SETUP_SOFTINT
182 
183 	evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL,
184 	   "crosscall", "unicast");
185 	evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL,
186 	   "crosscall", "broadcast");
187 }
188 
189 /*
190  * Encode an IPL to a form that can be embedded into flags of xc_broadcast
191  * or xc_unicast.
192  */
193 unsigned int
xc_encode_ipl(int ipl)194 xc_encode_ipl(int ipl)
195 {
196 
197 	switch (ipl) {
198 	case IPL_SOFTSERIAL:
199 		return __SHIFTIN(XC_IPL_SOFTSERIAL, XC_IPL_MASK);
200 	/* IPL_SOFT* can be the same value (e.g., on sparc or mips). */
201 #if IPL_SOFTNET != IPL_SOFTSERIAL
202 	case IPL_SOFTNET:
203 		return __SHIFTIN(XC_IPL_SOFTNET, XC_IPL_MASK);
204 #endif
205 #if IPL_SOFTBIO != IPL_SOFTNET
206 	case IPL_SOFTBIO:
207 		return __SHIFTIN(XC_IPL_SOFTBIO, XC_IPL_MASK);
208 #endif
209 #if IPL_SOFTCLOCK != IPL_SOFTBIO
210 	case IPL_SOFTCLOCK:
211 		return __SHIFTIN(XC_IPL_SOFTCLOCK, XC_IPL_MASK);
212 #endif
213 	}
214 
215 	panic("Invalid IPL: %d", ipl);
216 }
217 
218 /*
219  * Extract an XC_IPL from flags of xc_broadcast or xc_unicast.
220  */
221 static inline unsigned int
xc_extract_ipl(unsigned int flags)222 xc_extract_ipl(unsigned int flags)
223 {
224 
225 	return __SHIFTOUT(flags, XC_IPL_MASK);
226 }
227 
228 /*
229  * xc_init_cpu:
230  *
231  *	Initialize the cross-call subsystem.  Called once for each CPU
232  *	in the system as they are attached.
233  */
234 void
xc_init_cpu(struct cpu_info * ci)235 xc_init_cpu(struct cpu_info *ci)
236 {
237 	static bool again = false;
238 	int error __diagused;
239 
240 	if (!again) {
241 		/* Autoconfiguration will prevent re-entry. */
242 		xc_init();
243 		again = true;
244 	}
245 	cv_init(&ci->ci_data.cpu_xcall, "xcall");
246 	error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread,
247 	    NULL, NULL, "xcall/%u", ci->ci_index);
248 	KASSERT(error == 0);
249 }
250 
251 /*
252  * xc_broadcast:
253  *
254  *	Trigger a call on all CPUs in the system.
255  */
256 uint64_t
xc_broadcast(unsigned int flags,xcfunc_t func,void * arg1,void * arg2)257 xc_broadcast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2)
258 {
259 
260 	KASSERT(!cpu_intr_p());
261 	KASSERT(!cpu_softintr_p());
262 	ASSERT_SLEEPABLE();
263 
264 	if (__predict_false(!mp_online)) {
265 		int s, bound;
266 
267 		if (flags & XC_HIGHPRI)
268 			s = splsoftserial();
269 		else
270 			bound = curlwp_bind();
271 		(*func)(arg1, arg2);
272 		if (flags & XC_HIGHPRI)
273 			splx(s);
274 		else
275 			curlwp_bindx(bound);
276 		return 0;
277 	}
278 
279 	if ((flags & XC_HIGHPRI) != 0) {
280 		int ipl = xc_extract_ipl(flags);
281 		return xc_highpri(func, arg1, arg2, NULL, ipl);
282 	} else {
283 		return xc_lowpri(func, arg1, arg2, NULL);
284 	}
285 }
286 
287 static void
xc_nop(void * arg1,void * arg2)288 xc_nop(void *arg1, void *arg2)
289 {
290 
291 	return;
292 }
293 
294 /*
295  * xc_barrier:
296  *
297  *	Broadcast a nop to all CPUs in the system.
298  */
299 void
xc_barrier(unsigned int flags)300 xc_barrier(unsigned int flags)
301 {
302 	uint64_t where;
303 
304 	where = xc_broadcast(flags, xc_nop, NULL, NULL);
305 	xc_wait(where);
306 }
307 
308 /*
309  * xc_unicast:
310  *
311  *	Trigger a call on one CPU.
312  */
313 uint64_t
xc_unicast(unsigned int flags,xcfunc_t func,void * arg1,void * arg2,struct cpu_info * ci)314 xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2,
315     struct cpu_info *ci)
316 {
317 
318 	KASSERT(ci != NULL);
319 	KASSERT(!cpu_intr_p());
320 	KASSERT(!cpu_softintr_p());
321 	ASSERT_SLEEPABLE();
322 
323 	if (__predict_false(!mp_online)) {
324 		int s, bound;
325 
326 		KASSERT(ci == curcpu());
327 
328 		if (flags & XC_HIGHPRI)
329 			s = splsoftserial();
330 		else
331 			bound = curlwp_bind();
332 		(*func)(arg1, arg2);
333 		if (flags & XC_HIGHPRI)
334 			splx(s);
335 		else
336 			curlwp_bindx(bound);
337 
338 		return 0;
339 	}
340 
341 	if ((flags & XC_HIGHPRI) != 0) {
342 		int ipl = xc_extract_ipl(flags);
343 		return xc_highpri(func, arg1, arg2, ci, ipl);
344 	} else {
345 		return xc_lowpri(func, arg1, arg2, ci);
346 	}
347 }
348 
349 /*
350  * xc_wait:
351  *
352  *	Wait for a cross call to complete.
353  */
354 void
xc_wait(uint64_t where)355 xc_wait(uint64_t where)
356 {
357 	xc_state_t *xc;
358 
359 	KASSERT(!cpu_intr_p());
360 	KASSERT(!cpu_softintr_p());
361 	ASSERT_SLEEPABLE();
362 
363 	if (__predict_false(!mp_online)) {
364 		return;
365 	}
366 
367 	/* Determine whether it is high or low priority cross-call. */
368 	if ((where & XC_PRI_BIT) != 0) {
369 		xc = &xc_high_pri;
370 		where &= ~XC_PRI_BIT;
371 	} else {
372 		xc = &xc_low_pri;
373 	}
374 
375 #ifdef __HAVE_ATOMIC64_LOADSTORE
376 	/* Fast path, if already done. */
377 	if (atomic_load_acquire(&xc->xc_donep) >= where) {
378 		return;
379 	}
380 #endif
381 
382 	/* Slow path: block until awoken. */
383 	mutex_enter(&xc->xc_lock);
384 	while (xc->xc_donep < where) {
385 		cv_wait(&xc->xc_busy, &xc->xc_lock);
386 	}
387 	mutex_exit(&xc->xc_lock);
388 }
389 
390 /*
391  * xc_lowpri:
392  *
393  *	Trigger a low priority call on one or more CPUs.
394  */
395 static inline uint64_t
xc_lowpri(xcfunc_t func,void * arg1,void * arg2,struct cpu_info * ci)396 xc_lowpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci)
397 {
398 	xc_state_t *xc = &xc_low_pri;
399 	CPU_INFO_ITERATOR cii;
400 	uint64_t where;
401 
402 	mutex_enter(&xc->xc_lock);
403 	while (xc->xc_headp != xc->xc_donep) {
404 		cv_wait(&xc->xc_busy, &xc->xc_lock);
405 	}
406 	xc->xc_arg1 = arg1;
407 	xc->xc_arg2 = arg2;
408 	xc->xc_func = func;
409 	if (ci == NULL) {
410 		xc_broadcast_ev.ev_count++;
411 		for (CPU_INFO_FOREACH(cii, ci)) {
412 			if ((ci->ci_schedstate.spc_flags & SPCF_RUNNING) == 0)
413 				continue;
414 			xc->xc_headp += 1;
415 			ci->ci_data.cpu_xcall_pending = true;
416 			cv_signal(&ci->ci_data.cpu_xcall);
417 		}
418 	} else {
419 		xc_unicast_ev.ev_count++;
420 		xc->xc_headp += 1;
421 		ci->ci_data.cpu_xcall_pending = true;
422 		cv_signal(&ci->ci_data.cpu_xcall);
423 	}
424 	KASSERT(xc->xc_donep < xc->xc_headp);
425 	where = xc->xc_headp;
426 	mutex_exit(&xc->xc_lock);
427 
428 	/* Return a low priority ticket. */
429 	KASSERT((where & XC_PRI_BIT) == 0);
430 	return where;
431 }
432 
433 /*
434  * xc_thread:
435  *
436  *	One thread per-CPU to dispatch low priority calls.
437  */
438 static void
xc_thread(void * cookie)439 xc_thread(void *cookie)
440 {
441 	struct cpu_info *ci = curcpu();
442 	xc_state_t *xc = &xc_low_pri;
443 	void *arg1, *arg2;
444 	xcfunc_t func;
445 	struct lwp *l = curlwp;
446 
447 	KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
448 	    l, l->l_nopreempt);
449 
450 	mutex_enter(&xc->xc_lock);
451 	for (;;) {
452 		while (!ci->ci_data.cpu_xcall_pending) {
453 			if (xc->xc_headp == xc->xc_donep) {
454 				cv_broadcast(&xc->xc_busy);
455 			}
456 			cv_wait(&ci->ci_data.cpu_xcall, &xc->xc_lock);
457 			KASSERT(ci == curcpu());
458 		}
459 		ci->ci_data.cpu_xcall_pending = false;
460 		func = xc->xc_func;
461 		arg1 = xc->xc_arg1;
462 		arg2 = xc->xc_arg2;
463 		mutex_exit(&xc->xc_lock);
464 
465 		KASSERT(func != NULL);
466 		(*func)(arg1, arg2);
467 
468 		KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d func %p",
469 		    l, l->l_nopreempt, func);
470 
471 		mutex_enter(&xc->xc_lock);
472 #ifdef __HAVE_ATOMIC64_LOADSTORE
473 		atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
474 #else
475 		xc->xc_donep++;
476 #endif
477 	}
478 	/* NOTREACHED */
479 }
480 
481 /*
482  * xc_ipi_handler:
483  *
484  *	Handler of cross-call IPI.
485  */
486 void
xc_ipi_handler(void)487 xc_ipi_handler(void)
488 {
489 	xc_state_t *xc = & xc_high_pri;
490 
491 	KASSERT(xc->xc_ipl < __arraycount(xc_sihs));
492 	KASSERT(xc_sihs[xc->xc_ipl] != NULL);
493 
494 	/* Executes xc__highpri_intr() via software interrupt. */
495 	softint_schedule(xc_sihs[xc->xc_ipl]);
496 }
497 
498 /*
499  * xc__highpri_intr:
500  *
501  *	A software interrupt handler for high priority calls.
502  */
503 void
xc__highpri_intr(void * dummy)504 xc__highpri_intr(void *dummy)
505 {
506 	xc_state_t *xc = &xc_high_pri;
507 	void *arg1, *arg2;
508 	xcfunc_t func;
509 
510 	KASSERTMSG(!cpu_intr_p(), "high priority xcall for function %p",
511 	    xc->xc_func);
512 	/*
513 	 * Lock-less fetch of function and its arguments.
514 	 * Safe since it cannot change at this point.
515 	 */
516 	func = xc->xc_func;
517 	arg1 = xc->xc_arg1;
518 	arg2 = xc->xc_arg2;
519 
520 	KASSERT(func != NULL);
521 	(*func)(arg1, arg2);
522 
523 	/*
524 	 * Note the request as done, and if we have reached the head,
525 	 * cross-call has been processed - notify waiters, if any.
526 	 */
527 	mutex_enter(&xc->xc_lock);
528 	KASSERT(xc->xc_donep < xc->xc_headp);
529 #ifdef __HAVE_ATOMIC64_LOADSTORE
530 	atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
531 #else
532 	xc->xc_donep++;
533 #endif
534 	if (xc->xc_donep == xc->xc_headp) {
535 		cv_broadcast(&xc->xc_busy);
536 	}
537 	mutex_exit(&xc->xc_lock);
538 }
539 
540 /*
541  * xc_highpri:
542  *
543  *	Trigger a high priority call on one or more CPUs.
544  */
545 static inline uint64_t
xc_highpri(xcfunc_t func,void * arg1,void * arg2,struct cpu_info * ci,unsigned int ipl)546 xc_highpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci,
547     unsigned int ipl)
548 {
549 	xc_state_t *xc = &xc_high_pri;
550 	uint64_t where;
551 
552 	mutex_enter(&xc->xc_lock);
553 	while (xc->xc_headp != xc->xc_donep) {
554 		cv_wait(&xc->xc_busy, &xc->xc_lock);
555 	}
556 	xc->xc_func = func;
557 	xc->xc_arg1 = arg1;
558 	xc->xc_arg2 = arg2;
559 	xc->xc_headp += (ci ? 1 : ncpu);
560 	xc->xc_ipl = ipl;
561 	where = xc->xc_headp;
562 	mutex_exit(&xc->xc_lock);
563 
564 	/*
565 	 * Send the IPI once lock is released.
566 	 * Note: it will handle the local CPU case.
567 	 */
568 
569 #ifdef _RUMPKERNEL
570 	rump_xc_highpri(ci);
571 #else
572 #ifdef MULTIPROCESSOR
573 	kpreempt_disable();
574 	if (curcpu() == ci) {
575 		/* Unicast: local CPU. */
576 		xc_ipi_handler();
577 	} else if (ci) {
578 		/* Unicast: remote CPU. */
579 		xc_send_ipi(ci);
580 	} else {
581 		/* Broadcast: all, including local. */
582 		xc_send_ipi(NULL);
583 		xc_ipi_handler();
584 	}
585 	kpreempt_enable();
586 #else
587 	KASSERT(ci == NULL || curcpu() == ci);
588 	xc_ipi_handler();
589 #endif
590 #endif
591 
592 	/* Indicate a high priority ticket. */
593 	return (where | XC_PRI_BIT);
594 }
595