1 /* $NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $"); 60 61 #include "opt_compat_netbsd.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/idle.h> 66 #include <sys/sched.h> 67 #include <sys/intr.h> 68 #include <sys/conf.h> 69 #include <sys/cpu.h> 70 #include <sys/cpuio.h> 71 #include <sys/proc.h> 72 #include <sys/percpu.h> 73 #include <sys/kernel.h> 74 #include <sys/kauth.h> 75 #include <sys/xcall.h> 76 #include <sys/pool.h> 77 #include <sys/kmem.h> 78 #include <sys/select.h> 79 #include <sys/namei.h> 80 #include <sys/callout.h> 81 82 #include <uvm/uvm_extern.h> 83 84 #ifdef COMPAT_50 85 #include <compat/sys/cpuio.h> 86 #endif 87 88 void cpuctlattach(int); 89 90 static void cpu_xc_online(struct cpu_info *); 91 static void cpu_xc_offline(struct cpu_info *); 92 93 dev_type_ioctl(cpuctl_ioctl); 94 95 const struct cdevsw cpuctl_cdevsw = { 96 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 97 nullstop, notty, nopoll, nommap, nokqfilter, 98 D_OTHER | D_MPSAFE 99 }; 100 101 kmutex_t cpu_lock; 102 int ncpu; 103 int ncpuonline; 104 bool mp_online; 105 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 106 107 static struct cpu_info *cpu_infos[MAXCPUS]; 108 109 int 110 mi_cpu_attach(struct cpu_info *ci) 111 { 112 int error; 113 114 ci->ci_index = ncpu; 115 cpu_infos[cpu_index(ci)] = ci; 116 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 117 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 118 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 119 120 sched_cpuattach(ci); 121 122 error = create_idle_lwp(ci); 123 if (error != 0) { 124 /* XXX revert sched_cpuattach */ 125 return error; 126 } 127 128 if (ci == curcpu()) 129 ci->ci_data.cpu_onproc = curlwp; 130 else 131 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 132 133 percpu_init_cpu(ci); 134 softint_init(ci); 135 callout_init_cpu(ci); 136 xc_init_cpu(ci); 137 pool_cache_cpu_init(ci); 138 selsysinit(ci); 139 cache_cpu_init(ci); 140 TAILQ_INIT(&ci->ci_data.cpu_biodone); 141 ncpu++; 142 ncpuonline++; 143 144 return 0; 145 } 146 147 void 148 cpuctlattach(int dummy) 149 { 150 151 } 152 153 int 154 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 155 { 156 CPU_INFO_ITERATOR cii; 157 cpustate_t *cs; 158 struct cpu_info *ci; 159 int error, i; 160 u_int id; 161 162 error = 0; 163 164 mutex_enter(&cpu_lock); 165 switch (cmd) { 166 #ifdef IOC_CPU_OSETSTATE 167 cpustate_t csb; 168 169 case IOC_CPU_OSETSTATE: { 170 cpustate50_t *ocs = data; 171 cpustate50_to_cpustate(ocs, &csb); 172 cs = &csb; 173 error = 1; 174 /*FALLTHROUGH*/ 175 } 176 #endif 177 case IOC_CPU_SETSTATE: 178 if (error == 0) 179 cs = data; 180 error = kauth_authorize_system(l->l_cred, 181 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 182 NULL); 183 if (error != 0) 184 break; 185 if (cs->cs_id >= __arraycount(cpu_infos) || 186 (ci = cpu_lookup(cs->cs_id)) == NULL) { 187 error = ESRCH; 188 break; 189 } 190 if (!cs->cs_intr) { 191 error = EOPNOTSUPP; 192 break; 193 } 194 error = cpu_setstate(ci, cs->cs_online); 195 break; 196 197 #ifdef IOC_CPU_OGETSTATE 198 case IOC_CPU_OGETSTATE: { 199 cpustate50_t *ocs = data; 200 cpustate50_to_cpustate(ocs, &csb); 201 cs = &csb; 202 error = 1; 203 /*FALLTHROUGH*/ 204 } 205 #endif 206 case IOC_CPU_GETSTATE: 207 if (error == 0) 208 cs = data; 209 id = cs->cs_id; 210 memset(cs, 0, sizeof(*cs)); 211 cs->cs_id = id; 212 if (cs->cs_id >= __arraycount(cpu_infos) || 213 (ci = cpu_lookup(id)) == NULL) { 214 error = ESRCH; 215 break; 216 } 217 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 218 cs->cs_online = false; 219 else 220 cs->cs_online = true; 221 cs->cs_intr = true; 222 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 223 #ifdef IOC_CPU_OGETSTATE 224 if (cmd == IOC_CPU_OGETSTATE) { 225 cpustate50_t *ocs = data; 226 cpustate_to_cpustate50(cs, ocs); 227 error = 0; 228 } 229 #endif 230 break; 231 232 case IOC_CPU_MAPID: 233 i = 0; 234 for (CPU_INFO_FOREACH(cii, ci)) { 235 if (i++ == *(int *)data) 236 break; 237 } 238 if (ci == NULL) 239 error = ESRCH; 240 else 241 *(int *)data = cpu_index(ci); 242 break; 243 244 case IOC_CPU_GETCOUNT: 245 *(int *)data = ncpu; 246 break; 247 248 default: 249 error = ENOTTY; 250 break; 251 } 252 mutex_exit(&cpu_lock); 253 254 return error; 255 } 256 257 struct cpu_info * 258 cpu_lookup(u_int idx) 259 { 260 struct cpu_info *ci = cpu_infos[idx]; 261 262 KASSERT(idx < __arraycount(cpu_infos)); 263 KASSERT(ci == NULL || cpu_index(ci) == idx); 264 265 return ci; 266 } 267 268 static void 269 cpu_xc_offline(struct cpu_info *ci) 270 { 271 struct schedstate_percpu *spc, *mspc = NULL; 272 struct cpu_info *target_ci; 273 struct lwp *l; 274 CPU_INFO_ITERATOR cii; 275 int s; 276 277 /* 278 * Thread which sent unicast (separate context) is holding 279 * the cpu_lock for us. 280 */ 281 spc = &ci->ci_schedstate; 282 s = splsched(); 283 spc->spc_flags |= SPCF_OFFLINE; 284 splx(s); 285 286 /* Take the first available CPU for the migration */ 287 for (CPU_INFO_FOREACH(cii, target_ci)) { 288 mspc = &target_ci->ci_schedstate; 289 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 290 break; 291 } 292 KASSERT(target_ci != NULL); 293 294 /* 295 * Migrate all non-bound threads to the other CPU. Note that this 296 * runs from the xcall thread, thus handling of LSONPROC is not needed. 297 */ 298 mutex_enter(proc_lock); 299 LIST_FOREACH(l, &alllwp, l_list) { 300 struct cpu_info *mci; 301 302 lwp_lock(l); 303 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) { 304 lwp_unlock(l); 305 continue; 306 } 307 /* Normal case - no affinity */ 308 if ((l->l_flag & LW_AFFINITY) == 0) { 309 lwp_migrate(l, target_ci); 310 continue; 311 } 312 /* Affinity is set, find an online CPU in the set */ 313 KASSERT(l->l_affinity != NULL); 314 for (CPU_INFO_FOREACH(cii, mci)) { 315 mspc = &mci->ci_schedstate; 316 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && 317 kcpuset_isset(cpu_index(mci), l->l_affinity)) 318 break; 319 } 320 if (mci == NULL) { 321 lwp_unlock(l); 322 mutex_exit(proc_lock); 323 goto fail; 324 } 325 lwp_migrate(l, mci); 326 } 327 mutex_exit(proc_lock); 328 329 #ifdef __HAVE_MD_CPU_OFFLINE 330 cpu_offline_md(); 331 #endif 332 return; 333 fail: 334 /* Just unset the SPCF_OFFLINE flag, caller will check */ 335 s = splsched(); 336 spc->spc_flags &= ~SPCF_OFFLINE; 337 splx(s); 338 } 339 340 static void 341 cpu_xc_online(struct cpu_info *ci) 342 { 343 struct schedstate_percpu *spc; 344 int s; 345 346 spc = &ci->ci_schedstate; 347 s = splsched(); 348 spc->spc_flags &= ~SPCF_OFFLINE; 349 splx(s); 350 } 351 352 int 353 cpu_setstate(struct cpu_info *ci, bool online) 354 { 355 struct schedstate_percpu *spc; 356 CPU_INFO_ITERATOR cii; 357 struct cpu_info *ci2; 358 uint64_t where; 359 xcfunc_t func; 360 int nonline; 361 362 spc = &ci->ci_schedstate; 363 364 KASSERT(mutex_owned(&cpu_lock)); 365 366 if (online) { 367 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 368 return 0; 369 func = (xcfunc_t)cpu_xc_online; 370 ncpuonline++; 371 } else { 372 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 373 return 0; 374 nonline = 0; 375 /* 376 * Ensure that at least one CPU within the processor set 377 * stays online. Revisit this later. 378 */ 379 for (CPU_INFO_FOREACH(cii, ci2)) { 380 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 381 continue; 382 if (ci2->ci_schedstate.spc_psid != spc->spc_psid) 383 continue; 384 nonline++; 385 } 386 if (nonline == 1) 387 return EBUSY; 388 func = (xcfunc_t)cpu_xc_offline; 389 ncpuonline--; 390 } 391 392 where = xc_unicast(0, func, ci, NULL, ci); 393 xc_wait(where); 394 if (online) { 395 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 396 } else if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 397 /* If was not set offline, then it is busy */ 398 return EBUSY; 399 } 400 401 spc->spc_lastmod = time_second; 402 return 0; 403 } 404 405 bool 406 cpu_softintr_p(void) 407 { 408 409 return (curlwp->l_pflag & LP_INTR) != 0; 410 } 411