1 /* $NetBSD: subr_xcall.c,v 1.37 2023/08/06 17:50:20 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran and Mindaugas Rasiukevicius. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Cross call support 34 * 35 * Background 36 * 37 * Sometimes it is necessary to modify hardware state that is tied 38 * directly to individual CPUs (such as a CPU's local timer), and 39 * these updates can not be done remotely by another CPU. The LWP 40 * requesting the update may be unable to guarantee that it will be 41 * running on the CPU where the update must occur, when the update 42 * occurs. 43 * 44 * Additionally, it's sometimes necessary to modify per-CPU software 45 * state from a remote CPU. Where these update operations are so 46 * rare or the access to the per-CPU data so frequent that the cost 47 * of using locking or atomic operations to provide coherency is 48 * prohibitive, another way must be found. 49 * 50 * Cross calls help to solve these types of problem by allowing 51 * any LWP in the system to request that an arbitrary function be 52 * executed on a specific CPU. 53 * 54 * Implementation 55 * 56 * A slow mechanism for making low priority cross calls is 57 * provided. The function to be executed runs on the remote CPU 58 * within a bound kthread. No queueing is provided, and the 59 * implementation uses global state. The function being called may 60 * block briefly on locks, but in doing so must be careful to not 61 * interfere with other cross calls in the system. The function is 62 * called with thread context and not from a soft interrupt, so it 63 * can ensure that it is not interrupting other code running on the 64 * CPU, and so has exclusive access to the CPU. Since this facility 65 * is heavyweight, it's expected that it will not be used often. 66 * 67 * Cross calls must not allocate memory, as the pagedaemon uses cross 68 * calls (and memory allocation may need to wait on the pagedaemon). 69 * 70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is 71 * also provided. The function to be executed runs in software 72 * interrupt context at IPL_SOFTSERIAL level, and is expected to 73 * be very lightweight, e.g. avoid blocking. 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.37 2023/08/06 17:50:20 riastradh Exp $"); 78 79 #include <sys/types.h> 80 #include <sys/param.h> 81 #include <sys/xcall.h> 82 #include <sys/mutex.h> 83 #include <sys/condvar.h> 84 #include <sys/evcnt.h> 85 #include <sys/kthread.h> 86 #include <sys/cpu.h> 87 #include <sys/atomic.h> 88 89 #ifdef _RUMPKERNEL 90 #include "rump_private.h" 91 #endif 92 93 /* Cross-call state box. */ 94 typedef struct { 95 kmutex_t xc_lock; 96 kcondvar_t xc_busy; 97 xcfunc_t xc_func; 98 void * xc_arg1; 99 void * xc_arg2; 100 uint64_t xc_headp; 101 uint64_t xc_donep; 102 unsigned int xc_ipl; 103 } xc_state_t; 104 105 /* Bit indicating high (1) or low (0) priority. */ 106 #define XC_PRI_BIT (1ULL << 63) 107 108 /* Low priority xcall structures. */ 109 static xc_state_t xc_low_pri __cacheline_aligned; 110 111 /* High priority xcall structures. */ 112 static xc_state_t xc_high_pri __cacheline_aligned; 113 static void * xc_sihs[4] __cacheline_aligned; 114 115 /* Event counters. */ 116 static struct evcnt xc_unicast_ev __cacheline_aligned; 117 static struct evcnt xc_broadcast_ev __cacheline_aligned; 118 119 static void xc_init(void); 120 static void xc_thread(void *); 121 122 static inline uint64_t xc_highpri(xcfunc_t, void *, void *, struct cpu_info *, 123 unsigned int); 124 static inline uint64_t xc_lowpri(xcfunc_t, void *, void *, struct cpu_info *); 125 126 /* The internal form of IPL */ 127 #define XC_IPL_MASK 0xff00 128 /* 129 * Assign 0 to XC_IPL_SOFTSERIAL to treat IPL_SOFTSERIAL as the default value 130 * (just XC_HIGHPRI). 131 */ 132 #define XC_IPL_SOFTSERIAL 0 133 #define XC_IPL_SOFTNET 1 134 #define XC_IPL_SOFTBIO 2 135 #define XC_IPL_SOFTCLOCK 3 136 #define XC_IPL_MAX XC_IPL_SOFTCLOCK 137 138 CTASSERT(XC_IPL_MAX <= __arraycount(xc_sihs)); 139 140 /* 141 * xc_init: 142 * 143 * Initialize low and high priority cross-call structures. 144 */ 145 static void 146 xc_init(void) 147 { 148 xc_state_t *xclo = &xc_low_pri, *xchi = &xc_high_pri; 149 150 memset(xclo, 0, sizeof(xc_state_t)); 151 mutex_init(&xclo->xc_lock, MUTEX_DEFAULT, IPL_NONE); 152 cv_init(&xclo->xc_busy, "xclow"); 153 154 memset(xchi, 0, sizeof(xc_state_t)); 155 mutex_init(&xchi->xc_lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); 156 cv_init(&xchi->xc_busy, "xchigh"); 157 158 /* Set up a softint for each IPL_SOFT*. */ 159 #define SETUP_SOFTINT(xipl, sipl) do { \ 160 xc_sihs[(xipl)] = softint_establish( (sipl) | SOFTINT_MPSAFE,\ 161 xc__highpri_intr, NULL); \ 162 KASSERT(xc_sihs[(xipl)] != NULL); \ 163 } while (0) 164 165 SETUP_SOFTINT(XC_IPL_SOFTSERIAL, SOFTINT_SERIAL); 166 /* 167 * If a IPL_SOFTXXX have the same value of the previous, we don't use 168 * the IPL (see xc_encode_ipl). So we don't need to allocate a softint 169 * for it. 170 */ 171 #if IPL_SOFTNET != IPL_SOFTSERIAL 172 SETUP_SOFTINT(XC_IPL_SOFTNET, SOFTINT_NET); 173 #endif 174 #if IPL_SOFTBIO != IPL_SOFTNET 175 SETUP_SOFTINT(XC_IPL_SOFTBIO, SOFTINT_BIO); 176 #endif 177 #if IPL_SOFTCLOCK != IPL_SOFTBIO 178 SETUP_SOFTINT(XC_IPL_SOFTCLOCK, SOFTINT_CLOCK); 179 #endif 180 181 #undef SETUP_SOFTINT 182 183 evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL, 184 "crosscall", "unicast"); 185 evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL, 186 "crosscall", "broadcast"); 187 } 188 189 /* 190 * Encode an IPL to a form that can be embedded into flags of xc_broadcast 191 * or xc_unicast. 192 */ 193 unsigned int 194 xc_encode_ipl(int ipl) 195 { 196 197 switch (ipl) { 198 case IPL_SOFTSERIAL: 199 return __SHIFTIN(XC_IPL_SOFTSERIAL, XC_IPL_MASK); 200 /* IPL_SOFT* can be the same value (e.g., on sparc or mips). */ 201 #if IPL_SOFTNET != IPL_SOFTSERIAL 202 case IPL_SOFTNET: 203 return __SHIFTIN(XC_IPL_SOFTNET, XC_IPL_MASK); 204 #endif 205 #if IPL_SOFTBIO != IPL_SOFTNET 206 case IPL_SOFTBIO: 207 return __SHIFTIN(XC_IPL_SOFTBIO, XC_IPL_MASK); 208 #endif 209 #if IPL_SOFTCLOCK != IPL_SOFTBIO 210 case IPL_SOFTCLOCK: 211 return __SHIFTIN(XC_IPL_SOFTCLOCK, XC_IPL_MASK); 212 #endif 213 } 214 215 panic("Invalid IPL: %d", ipl); 216 } 217 218 /* 219 * Extract an XC_IPL from flags of xc_broadcast or xc_unicast. 220 */ 221 static inline unsigned int 222 xc_extract_ipl(unsigned int flags) 223 { 224 225 return __SHIFTOUT(flags, XC_IPL_MASK); 226 } 227 228 /* 229 * xc_init_cpu: 230 * 231 * Initialize the cross-call subsystem. Called once for each CPU 232 * in the system as they are attached. 233 */ 234 void 235 xc_init_cpu(struct cpu_info *ci) 236 { 237 static bool again = false; 238 int error __diagused; 239 240 if (!again) { 241 /* Autoconfiguration will prevent re-entry. */ 242 xc_init(); 243 again = true; 244 } 245 cv_init(&ci->ci_data.cpu_xcall, "xcall"); 246 error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread, 247 NULL, NULL, "xcall/%u", ci->ci_index); 248 KASSERT(error == 0); 249 } 250 251 /* 252 * xc_broadcast: 253 * 254 * Trigger a call on all CPUs in the system. 255 */ 256 uint64_t 257 xc_broadcast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2) 258 { 259 260 KASSERT(!cpu_intr_p()); 261 KASSERT(!cpu_softintr_p()); 262 ASSERT_SLEEPABLE(); 263 264 if (__predict_false(!mp_online)) { 265 int s, bound; 266 267 if (flags & XC_HIGHPRI) 268 s = splsoftserial(); 269 else 270 bound = curlwp_bind(); 271 (*func)(arg1, arg2); 272 if (flags & XC_HIGHPRI) 273 splx(s); 274 else 275 curlwp_bindx(bound); 276 return 0; 277 } 278 279 if ((flags & XC_HIGHPRI) != 0) { 280 int ipl = xc_extract_ipl(flags); 281 return xc_highpri(func, arg1, arg2, NULL, ipl); 282 } else { 283 return xc_lowpri(func, arg1, arg2, NULL); 284 } 285 } 286 287 static void 288 xc_nop(void *arg1, void *arg2) 289 { 290 291 return; 292 } 293 294 /* 295 * xc_barrier: 296 * 297 * Broadcast a nop to all CPUs in the system. 298 */ 299 void 300 xc_barrier(unsigned int flags) 301 { 302 uint64_t where; 303 304 where = xc_broadcast(flags, xc_nop, NULL, NULL); 305 xc_wait(where); 306 } 307 308 /* 309 * xc_unicast: 310 * 311 * Trigger a call on one CPU. 312 */ 313 uint64_t 314 xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2, 315 struct cpu_info *ci) 316 { 317 318 KASSERT(ci != NULL); 319 KASSERT(!cpu_intr_p()); 320 KASSERT(!cpu_softintr_p()); 321 ASSERT_SLEEPABLE(); 322 323 if (__predict_false(!mp_online)) { 324 int s, bound; 325 326 KASSERT(ci == curcpu()); 327 328 if (flags & XC_HIGHPRI) 329 s = splsoftserial(); 330 else 331 bound = curlwp_bind(); 332 (*func)(arg1, arg2); 333 if (flags & XC_HIGHPRI) 334 splx(s); 335 else 336 curlwp_bindx(bound); 337 338 return 0; 339 } 340 341 if ((flags & XC_HIGHPRI) != 0) { 342 int ipl = xc_extract_ipl(flags); 343 return xc_highpri(func, arg1, arg2, ci, ipl); 344 } else { 345 return xc_lowpri(func, arg1, arg2, ci); 346 } 347 } 348 349 /* 350 * xc_wait: 351 * 352 * Wait for a cross call to complete. 353 */ 354 void 355 xc_wait(uint64_t where) 356 { 357 xc_state_t *xc; 358 359 KASSERT(!cpu_intr_p()); 360 KASSERT(!cpu_softintr_p()); 361 ASSERT_SLEEPABLE(); 362 363 if (__predict_false(!mp_online)) { 364 return; 365 } 366 367 /* Determine whether it is high or low priority cross-call. */ 368 if ((where & XC_PRI_BIT) != 0) { 369 xc = &xc_high_pri; 370 where &= ~XC_PRI_BIT; 371 } else { 372 xc = &xc_low_pri; 373 } 374 375 #ifdef __HAVE_ATOMIC64_LOADSTORE 376 /* Fast path, if already done. */ 377 if (atomic_load_acquire(&xc->xc_donep) >= where) { 378 return; 379 } 380 #endif 381 382 /* Slow path: block until awoken. */ 383 mutex_enter(&xc->xc_lock); 384 while (xc->xc_donep < where) { 385 cv_wait(&xc->xc_busy, &xc->xc_lock); 386 } 387 mutex_exit(&xc->xc_lock); 388 } 389 390 /* 391 * xc_lowpri: 392 * 393 * Trigger a low priority call on one or more CPUs. 394 */ 395 static inline uint64_t 396 xc_lowpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci) 397 { 398 xc_state_t *xc = &xc_low_pri; 399 CPU_INFO_ITERATOR cii; 400 uint64_t where; 401 402 mutex_enter(&xc->xc_lock); 403 while (xc->xc_headp != xc->xc_donep) { 404 cv_wait(&xc->xc_busy, &xc->xc_lock); 405 } 406 xc->xc_arg1 = arg1; 407 xc->xc_arg2 = arg2; 408 xc->xc_func = func; 409 if (ci == NULL) { 410 xc_broadcast_ev.ev_count++; 411 for (CPU_INFO_FOREACH(cii, ci)) { 412 if ((ci->ci_schedstate.spc_flags & SPCF_RUNNING) == 0) 413 continue; 414 xc->xc_headp += 1; 415 ci->ci_data.cpu_xcall_pending = true; 416 cv_signal(&ci->ci_data.cpu_xcall); 417 } 418 } else { 419 xc_unicast_ev.ev_count++; 420 xc->xc_headp += 1; 421 ci->ci_data.cpu_xcall_pending = true; 422 cv_signal(&ci->ci_data.cpu_xcall); 423 } 424 KASSERT(xc->xc_donep < xc->xc_headp); 425 where = xc->xc_headp; 426 mutex_exit(&xc->xc_lock); 427 428 /* Return a low priority ticket. */ 429 KASSERT((where & XC_PRI_BIT) == 0); 430 return where; 431 } 432 433 /* 434 * xc_thread: 435 * 436 * One thread per-CPU to dispatch low priority calls. 437 */ 438 static void 439 xc_thread(void *cookie) 440 { 441 struct cpu_info *ci = curcpu(); 442 xc_state_t *xc = &xc_low_pri; 443 void *arg1, *arg2; 444 xcfunc_t func; 445 446 mutex_enter(&xc->xc_lock); 447 for (;;) { 448 while (!ci->ci_data.cpu_xcall_pending) { 449 if (xc->xc_headp == xc->xc_donep) { 450 cv_broadcast(&xc->xc_busy); 451 } 452 cv_wait(&ci->ci_data.cpu_xcall, &xc->xc_lock); 453 KASSERT(ci == curcpu()); 454 } 455 ci->ci_data.cpu_xcall_pending = false; 456 func = xc->xc_func; 457 arg1 = xc->xc_arg1; 458 arg2 = xc->xc_arg2; 459 mutex_exit(&xc->xc_lock); 460 461 KASSERT(func != NULL); 462 (*func)(arg1, arg2); 463 464 mutex_enter(&xc->xc_lock); 465 #ifdef __HAVE_ATOMIC64_LOADSTORE 466 atomic_store_release(&xc->xc_donep, xc->xc_donep + 1); 467 #else 468 xc->xc_donep++; 469 #endif 470 } 471 /* NOTREACHED */ 472 } 473 474 /* 475 * xc_ipi_handler: 476 * 477 * Handler of cross-call IPI. 478 */ 479 void 480 xc_ipi_handler(void) 481 { 482 xc_state_t *xc = & xc_high_pri; 483 484 KASSERT(xc->xc_ipl < __arraycount(xc_sihs)); 485 KASSERT(xc_sihs[xc->xc_ipl] != NULL); 486 487 /* Executes xc__highpri_intr() via software interrupt. */ 488 softint_schedule(xc_sihs[xc->xc_ipl]); 489 } 490 491 /* 492 * xc__highpri_intr: 493 * 494 * A software interrupt handler for high priority calls. 495 */ 496 void 497 xc__highpri_intr(void *dummy) 498 { 499 xc_state_t *xc = &xc_high_pri; 500 void *arg1, *arg2; 501 xcfunc_t func; 502 503 KASSERTMSG(!cpu_intr_p(), "high priority xcall for function %p", 504 xc->xc_func); 505 /* 506 * Lock-less fetch of function and its arguments. 507 * Safe since it cannot change at this point. 508 */ 509 func = xc->xc_func; 510 arg1 = xc->xc_arg1; 511 arg2 = xc->xc_arg2; 512 513 KASSERT(func != NULL); 514 (*func)(arg1, arg2); 515 516 /* 517 * Note the request as done, and if we have reached the head, 518 * cross-call has been processed - notify waiters, if any. 519 */ 520 mutex_enter(&xc->xc_lock); 521 KASSERT(xc->xc_donep < xc->xc_headp); 522 #ifdef __HAVE_ATOMIC64_LOADSTORE 523 atomic_store_release(&xc->xc_donep, xc->xc_donep + 1); 524 #else 525 xc->xc_donep++; 526 #endif 527 if (xc->xc_donep == xc->xc_headp) { 528 cv_broadcast(&xc->xc_busy); 529 } 530 mutex_exit(&xc->xc_lock); 531 } 532 533 /* 534 * xc_highpri: 535 * 536 * Trigger a high priority call on one or more CPUs. 537 */ 538 static inline uint64_t 539 xc_highpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci, 540 unsigned int ipl) 541 { 542 xc_state_t *xc = &xc_high_pri; 543 uint64_t where; 544 545 mutex_enter(&xc->xc_lock); 546 while (xc->xc_headp != xc->xc_donep) { 547 cv_wait(&xc->xc_busy, &xc->xc_lock); 548 } 549 xc->xc_func = func; 550 xc->xc_arg1 = arg1; 551 xc->xc_arg2 = arg2; 552 xc->xc_headp += (ci ? 1 : ncpu); 553 xc->xc_ipl = ipl; 554 where = xc->xc_headp; 555 mutex_exit(&xc->xc_lock); 556 557 /* 558 * Send the IPI once lock is released. 559 * Note: it will handle the local CPU case. 560 */ 561 562 #ifdef _RUMPKERNEL 563 rump_xc_highpri(ci); 564 #else 565 #ifdef MULTIPROCESSOR 566 kpreempt_disable(); 567 if (curcpu() == ci) { 568 /* Unicast: local CPU. */ 569 xc_ipi_handler(); 570 } else if (ci) { 571 /* Unicast: remote CPU. */ 572 xc_send_ipi(ci); 573 } else { 574 /* Broadcast: all, including local. */ 575 xc_send_ipi(NULL); 576 xc_ipi_handler(); 577 } 578 kpreempt_enable(); 579 #else 580 KASSERT(ci == NULL || curcpu() == ci); 581 xc_ipi_handler(); 582 #endif 583 #endif 584 585 /* Indicate a high priority ticket. */ 586 return (where | XC_PRI_BIT); 587 } 588