1 /* $NetBSD: kern_cpu.c,v 1.30 2008/05/06 18:40:57 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 60 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.30 2008/05/06 18:40:57 ad Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/idle.h> 65 #include <sys/sched.h> 66 #include <sys/intr.h> 67 #include <sys/conf.h> 68 #include <sys/cpu.h> 69 #include <sys/cpuio.h> 70 #include <sys/proc.h> 71 #include <sys/percpu.h> 72 #include <sys/kernel.h> 73 #include <sys/kauth.h> 74 #include <sys/xcall.h> 75 #include <sys/pool.h> 76 #include <sys/kmem.h> 77 #include <sys/select.h> 78 #include <sys/namei.h> 79 #include <sys/callout.h> 80 81 #include <uvm/uvm_extern.h> 82 83 void cpuctlattach(int); 84 85 static void cpu_xc_online(struct cpu_info *); 86 static void cpu_xc_offline(struct cpu_info *); 87 88 dev_type_ioctl(cpuctl_ioctl); 89 90 const struct cdevsw cpuctl_cdevsw = { 91 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 92 nullstop, notty, nopoll, nommap, nokqfilter, 93 D_OTHER | D_MPSAFE 94 }; 95 96 kmutex_t cpu_lock; 97 int ncpu; 98 int ncpuonline; 99 bool mp_online; 100 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 101 102 static struct cpu_info *cpu_infos[MAXCPUS]; 103 104 int 105 mi_cpu_attach(struct cpu_info *ci) 106 { 107 int error; 108 109 ci->ci_index = ncpu; 110 cpu_infos[cpu_index(ci)] = ci; 111 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 112 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 113 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 114 115 sched_cpuattach(ci); 116 uvm_cpu_attach(ci); 117 118 error = create_idle_lwp(ci); 119 if (error != 0) { 120 /* XXX revert sched_cpuattach */ 121 return error; 122 } 123 124 if (ci == curcpu()) 125 ci->ci_data.cpu_onproc = curlwp; 126 else 127 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 128 129 percpu_init_cpu(ci); 130 softint_init(ci); 131 callout_init_cpu(ci); 132 xc_init_cpu(ci); 133 pool_cache_cpu_init(ci); 134 selsysinit(ci); 135 cache_cpu_init(ci); 136 TAILQ_INIT(&ci->ci_data.cpu_biodone); 137 ncpu++; 138 ncpuonline++; 139 140 return 0; 141 } 142 143 void 144 cpuctlattach(int dummy) 145 { 146 147 } 148 149 int 150 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 151 { 152 CPU_INFO_ITERATOR cii; 153 cpustate_t *cs; 154 struct cpu_info *ci; 155 int error, i; 156 u_int id; 157 158 error = 0; 159 160 mutex_enter(&cpu_lock); 161 switch (cmd) { 162 case IOC_CPU_SETSTATE: 163 cs = data; 164 error = kauth_authorize_system(l->l_cred, 165 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 166 NULL); 167 if (error != 0) 168 break; 169 if ((ci = cpu_lookup(cs->cs_id)) == NULL) { 170 error = ESRCH; 171 break; 172 } 173 if (!cs->cs_intr) { 174 error = EOPNOTSUPP; 175 break; 176 } 177 error = cpu_setonline(ci, cs->cs_online); 178 break; 179 180 case IOC_CPU_GETSTATE: 181 cs = data; 182 id = cs->cs_id; 183 memset(cs, 0, sizeof(*cs)); 184 cs->cs_id = id; 185 if ((ci = cpu_lookup(id)) == NULL) { 186 error = ESRCH; 187 break; 188 } 189 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 190 cs->cs_online = false; 191 else 192 cs->cs_online = true; 193 cs->cs_intr = true; 194 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 195 break; 196 197 case IOC_CPU_MAPID: 198 i = 0; 199 for (CPU_INFO_FOREACH(cii, ci)) { 200 if (i++ == *(int *)data) 201 break; 202 } 203 if (ci == NULL) 204 error = ESRCH; 205 else 206 *(int *)data = ci->ci_cpuid; 207 break; 208 209 case IOC_CPU_GETCOUNT: 210 *(int *)data = ncpu; 211 break; 212 213 default: 214 error = ENOTTY; 215 break; 216 } 217 mutex_exit(&cpu_lock); 218 219 return error; 220 } 221 222 struct cpu_info * 223 cpu_lookup(cpuid_t id) 224 { 225 CPU_INFO_ITERATOR cii; 226 struct cpu_info *ci; 227 228 for (CPU_INFO_FOREACH(cii, ci)) { 229 if (ci->ci_cpuid == id) 230 return ci; 231 } 232 233 return NULL; 234 } 235 236 struct cpu_info * 237 cpu_lookup_byindex(u_int idx) 238 { 239 struct cpu_info *ci = cpu_infos[idx]; 240 241 KASSERT(idx < MAXCPUS); 242 KASSERT(ci == NULL || cpu_index(ci) == idx); 243 244 return ci; 245 } 246 247 static void 248 cpu_xc_offline(struct cpu_info *ci) 249 { 250 struct schedstate_percpu *spc, *mspc = NULL; 251 struct cpu_info *mci; 252 struct lwp *l; 253 CPU_INFO_ITERATOR cii; 254 int s; 255 256 spc = &ci->ci_schedstate; 257 s = splsched(); 258 spc->spc_flags |= SPCF_OFFLINE; 259 splx(s); 260 261 /* Take the first available CPU for the migration */ 262 for (CPU_INFO_FOREACH(cii, mci)) { 263 mspc = &mci->ci_schedstate; 264 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 265 break; 266 } 267 KASSERT(mci != NULL); 268 269 /* 270 * Migrate all non-bound threads to the other CPU. 271 * Please note, that this runs from the xcall thread, thus handling 272 * of LSONPROC is not needed. 273 */ 274 mutex_enter(proc_lock); 275 276 /* 277 * Note that threads on the runqueue might sleep after this, but 278 * sched_takecpu() would migrate such threads to the appropriate CPU. 279 */ 280 LIST_FOREACH(l, &alllwp, l_list) { 281 lwp_lock(l); 282 if (l->l_cpu == ci && (l->l_stat == LSSLEEP || 283 l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED)) { 284 KASSERT((l->l_flag & LW_RUNNING) == 0); 285 l->l_cpu = mci; 286 } 287 lwp_unlock(l); 288 } 289 290 /* Double-lock the run-queues */ 291 spc_dlock(ci, mci); 292 293 /* Handle LSRUN and LSIDL cases */ 294 LIST_FOREACH(l, &alllwp, l_list) { 295 if (l->l_cpu != ci || (l->l_pflag & LP_BOUND)) 296 continue; 297 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 298 sched_dequeue(l); 299 l->l_cpu = mci; 300 lwp_setlock(l, mspc->spc_mutex); 301 sched_enqueue(l, false); 302 } else if (l->l_stat == LSRUN || l->l_stat == LSIDL) { 303 l->l_cpu = mci; 304 lwp_setlock(l, mspc->spc_mutex); 305 } 306 } 307 spc_dunlock(ci, mci); 308 mutex_exit(proc_lock); 309 310 #ifdef __HAVE_MD_CPU_OFFLINE 311 cpu_offline_md(); 312 #endif 313 } 314 315 static void 316 cpu_xc_online(struct cpu_info *ci) 317 { 318 struct schedstate_percpu *spc; 319 int s; 320 321 spc = &ci->ci_schedstate; 322 s = splsched(); 323 spc->spc_flags &= ~SPCF_OFFLINE; 324 splx(s); 325 } 326 327 int 328 cpu_setonline(struct cpu_info *ci, bool online) 329 { 330 struct schedstate_percpu *spc; 331 CPU_INFO_ITERATOR cii; 332 struct cpu_info *ci2; 333 uint64_t where; 334 xcfunc_t func; 335 int nonline; 336 337 spc = &ci->ci_schedstate; 338 339 KASSERT(mutex_owned(&cpu_lock)); 340 341 if (online) { 342 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 343 return 0; 344 func = (xcfunc_t)cpu_xc_online; 345 ncpuonline++; 346 } else { 347 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 348 return 0; 349 nonline = 0; 350 for (CPU_INFO_FOREACH(cii, ci2)) { 351 nonline += ((ci2->ci_schedstate.spc_flags & 352 SPCF_OFFLINE) == 0); 353 } 354 if (nonline == 1) 355 return EBUSY; 356 func = (xcfunc_t)cpu_xc_offline; 357 ncpuonline--; 358 } 359 360 where = xc_unicast(0, func, ci, NULL, ci); 361 xc_wait(where); 362 if (online) { 363 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 364 } else { 365 KASSERT(spc->spc_flags & SPCF_OFFLINE); 366 } 367 spc->spc_lastmod = time_second; 368 369 return 0; 370 } 371