1 /* $NetBSD: subr_xcall.c,v 1.9 2008/04/28 20:24:04 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Cross call support 34 * 35 * Background 36 * 37 * Sometimes it is necessary to modify hardware state that is tied 38 * directly to individual CPUs (such as a CPU's local timer), and 39 * these updates can not be done remotely by another CPU. The LWP 40 * requesting the update may be unable to guarantee that it will be 41 * running on the CPU where the update must occur, when the update 42 * occurs. 43 * 44 * Additionally, it's sometimes necessary to modify per-CPU software 45 * state from a remote CPU. Where these update operations are so 46 * rare or the access to the per-CPU data so frequent that the cost 47 * of using locking or atomic operations to provide coherency is 48 * prohibitive, another way must be found. 49 * 50 * Cross calls help to solve these types of problem by allowing 51 * any CPU in the system to request that an arbitrary function be 52 * executed on any other CPU. 53 * 54 * Implementation 55 * 56 * A slow mechanism for making 'low priority' cross calls is 57 * provided. The function to be executed runs on the remote CPU 58 * within a bound kthread. No queueing is provided, and the 59 * implementation uses global state. The function being called may 60 * block briefly on locks, but in doing so must be careful to not 61 * interfere with other cross calls in the system. The function is 62 * called with thread context and not from a soft interrupt, so it 63 * can ensure that it is not interrupting other code running on the 64 * CPU, and so has exclusive access to the CPU. Since this facility 65 * is heavyweight, it's expected that it will not be used often. 66 * 67 * Cross calls must not allocate memory, as the pagedaemon uses 68 * them (and memory allocation may need to wait on the pagedaemon). 69 * 70 * Future directions 71 * 72 * Add a low-overhead mechanism to run cross calls in interrupt 73 * context (XC_HIGHPRI). 74 */ 75 76 #include <sys/cdefs.h> 77 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.9 2008/04/28 20:24:04 martin Exp $"); 78 79 #include <sys/types.h> 80 #include <sys/param.h> 81 #include <sys/xcall.h> 82 #include <sys/mutex.h> 83 #include <sys/condvar.h> 84 #include <sys/evcnt.h> 85 #include <sys/kthread.h> 86 #include <sys/cpu.h> 87 88 static void xc_thread(void *); 89 static uint64_t xc_lowpri(u_int, xcfunc_t, void *, void *, struct cpu_info *); 90 91 static kmutex_t xc_lock; 92 static xcfunc_t xc_func; 93 static void *xc_arg1; 94 static void *xc_arg2; 95 static kcondvar_t xc_busy; 96 static struct evcnt xc_unicast_ev; 97 static struct evcnt xc_broadcast_ev; 98 static uint64_t xc_headp; 99 static uint64_t xc_tailp; 100 static uint64_t xc_donep; 101 102 /* 103 * xc_init_cpu: 104 * 105 * Initialize the cross-call subsystem. Called once for each CPU 106 * in the system as they are attached. 107 */ 108 void 109 xc_init_cpu(struct cpu_info *ci) 110 { 111 static bool again; 112 int error; 113 114 if (!again) { 115 /* Autoconfiguration will prevent re-entry. */ 116 again = true; 117 mutex_init(&xc_lock, MUTEX_DEFAULT, IPL_NONE); 118 cv_init(&xc_busy, "xcallbsy"); 119 evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL, 120 "crosscall", "unicast"); 121 evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL, 122 "crosscall", "broadcast"); 123 } 124 125 cv_init(&ci->ci_data.cpu_xcall, "xcall"); 126 error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread, 127 NULL, NULL, "xcall/%u", ci->ci_index); 128 if (error != 0) 129 panic("xc_init_cpu: error %d", error); 130 } 131 132 /* 133 * xc_broadcast: 134 * 135 * Trigger a call on all CPUs in the system. 136 */ 137 uint64_t 138 xc_broadcast(u_int flags, xcfunc_t func, void *arg1, void *arg2) 139 { 140 141 if ((flags & XC_HIGHPRI) != 0) { 142 panic("xc_broadcast: no high priority crosscalls yet"); 143 } else { 144 return xc_lowpri(flags, func, arg1, arg2, NULL); 145 } 146 } 147 148 /* 149 * xc_unicast: 150 * 151 * Trigger a call on one CPU. 152 */ 153 uint64_t 154 xc_unicast(u_int flags, xcfunc_t func, void *arg1, void *arg2, 155 struct cpu_info *ci) 156 { 157 158 if ((flags & XC_HIGHPRI) != 0) { 159 panic("xc_unicast: no high priority crosscalls yet"); 160 } else { 161 KASSERT(ci != NULL); 162 return xc_lowpri(flags, func, arg1, arg2, ci); 163 } 164 } 165 166 /* 167 * xc_lowpri: 168 * 169 * Trigger a low priority call on one or more CPUs. 170 */ 171 static uint64_t 172 xc_lowpri(u_int flags, xcfunc_t func, void *arg1, void *arg2, 173 struct cpu_info *ci) 174 { 175 CPU_INFO_ITERATOR cii; 176 u_int where; 177 178 mutex_enter(&xc_lock); 179 while (xc_headp != xc_tailp) 180 cv_wait(&xc_busy, &xc_lock); 181 xc_arg1 = arg1; 182 xc_arg2 = arg2; 183 xc_func = func; 184 if (ci == NULL) { 185 xc_broadcast_ev.ev_count++; 186 for (CPU_INFO_FOREACH(cii, ci)) { 187 if ((ci->ci_schedstate.spc_flags & SPCF_RUNNING) == 0) 188 continue; 189 xc_headp += 1; 190 ci->ci_data.cpu_xcall_pending = true; 191 cv_signal(&ci->ci_data.cpu_xcall); 192 } 193 } else { 194 xc_unicast_ev.ev_count++; 195 xc_headp += 1; 196 ci->ci_data.cpu_xcall_pending = true; 197 cv_signal(&ci->ci_data.cpu_xcall); 198 } 199 KASSERT(xc_tailp < xc_headp); 200 where = xc_headp; 201 mutex_exit(&xc_lock); 202 203 return where; 204 } 205 206 /* 207 * xc_wait: 208 * 209 * Wait for a cross call to complete. 210 */ 211 void 212 xc_wait(uint64_t where) 213 { 214 215 if (xc_donep >= where) 216 return; 217 218 mutex_enter(&xc_lock); 219 while (xc_donep < where) 220 cv_wait(&xc_busy, &xc_lock); 221 mutex_exit(&xc_lock); 222 } 223 224 /* 225 * xc_thread: 226 * 227 * One thread per-CPU to dispatch low priority calls. 228 */ 229 static void 230 xc_thread(void *cookie) 231 { 232 void *arg1, *arg2; 233 struct cpu_info *ci; 234 xcfunc_t func; 235 236 ci = curcpu(); 237 238 mutex_enter(&xc_lock); 239 for (;;) { 240 while (!ci->ci_data.cpu_xcall_pending) { 241 if (xc_headp == xc_tailp) 242 cv_broadcast(&xc_busy); 243 cv_wait(&ci->ci_data.cpu_xcall, &xc_lock); 244 KASSERT(ci == curcpu()); 245 } 246 ci->ci_data.cpu_xcall_pending = false; 247 func = xc_func; 248 arg1 = xc_arg1; 249 arg2 = xc_arg2; 250 xc_tailp++; 251 mutex_exit(&xc_lock); 252 253 (*func)(arg1, arg2); 254 255 mutex_enter(&xc_lock); 256 xc_donep++; 257 } 258 /* NOTREACHED */ 259 } 260