1 /* $NetBSD: kern_cpu.c,v 1.29 2008/04/28 20:24:02 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 60 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.29 2008/04/28 20:24:02 martin Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/idle.h> 65 #include <sys/sched.h> 66 #include <sys/intr.h> 67 #include <sys/conf.h> 68 #include <sys/cpu.h> 69 #include <sys/cpuio.h> 70 #include <sys/proc.h> 71 #include <sys/percpu.h> 72 #include <sys/kernel.h> 73 #include <sys/kauth.h> 74 #include <sys/xcall.h> 75 #include <sys/pool.h> 76 #include <sys/kmem.h> 77 #include <sys/select.h> 78 #include <sys/namei.h> 79 #include <sys/callout.h> 80 81 #include <uvm/uvm_extern.h> 82 83 void cpuctlattach(int); 84 85 static void cpu_xc_online(struct cpu_info *); 86 static void cpu_xc_offline(struct cpu_info *); 87 88 dev_type_ioctl(cpuctl_ioctl); 89 90 const struct cdevsw cpuctl_cdevsw = { 91 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 92 nullstop, notty, nopoll, nommap, nokqfilter, 93 D_OTHER | D_MPSAFE 94 }; 95 96 kmutex_t cpu_lock; 97 int ncpu; 98 int ncpuonline; 99 bool mp_online; 100 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 101 102 static struct cpu_info *cpu_infos[MAXCPUS]; 103 104 int 105 mi_cpu_attach(struct cpu_info *ci) 106 { 107 int error; 108 109 ci->ci_index = ncpu; 110 cpu_infos[cpu_index(ci)] = ci; 111 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 112 113 sched_cpuattach(ci); 114 uvm_cpu_attach(ci); 115 116 error = create_idle_lwp(ci); 117 if (error != 0) { 118 /* XXX revert sched_cpuattach */ 119 return error; 120 } 121 122 if (ci == curcpu()) 123 ci->ci_data.cpu_onproc = curlwp; 124 else 125 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 126 127 percpu_init_cpu(ci); 128 softint_init(ci); 129 callout_init_cpu(ci); 130 xc_init_cpu(ci); 131 pool_cache_cpu_init(ci); 132 selsysinit(ci); 133 cache_cpu_init(ci); 134 TAILQ_INIT(&ci->ci_data.cpu_biodone); 135 ncpu++; 136 ncpuonline++; 137 138 return 0; 139 } 140 141 void 142 cpuctlattach(int dummy) 143 { 144 145 } 146 147 int 148 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 149 { 150 CPU_INFO_ITERATOR cii; 151 cpustate_t *cs; 152 struct cpu_info *ci; 153 int error, i; 154 u_int id; 155 156 error = 0; 157 158 mutex_enter(&cpu_lock); 159 switch (cmd) { 160 case IOC_CPU_SETSTATE: 161 cs = data; 162 error = kauth_authorize_system(l->l_cred, 163 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 164 NULL); 165 if (error != 0) 166 break; 167 if ((ci = cpu_lookup(cs->cs_id)) == NULL) { 168 error = ESRCH; 169 break; 170 } 171 if (!cs->cs_intr) { 172 error = EOPNOTSUPP; 173 break; 174 } 175 error = cpu_setonline(ci, cs->cs_online); 176 break; 177 178 case IOC_CPU_GETSTATE: 179 cs = data; 180 id = cs->cs_id; 181 memset(cs, 0, sizeof(*cs)); 182 cs->cs_id = id; 183 if ((ci = cpu_lookup(id)) == NULL) { 184 error = ESRCH; 185 break; 186 } 187 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 188 cs->cs_online = false; 189 else 190 cs->cs_online = true; 191 cs->cs_intr = true; 192 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 193 break; 194 195 case IOC_CPU_MAPID: 196 i = 0; 197 for (CPU_INFO_FOREACH(cii, ci)) { 198 if (i++ == *(int *)data) 199 break; 200 } 201 if (ci == NULL) 202 error = ESRCH; 203 else 204 *(int *)data = ci->ci_cpuid; 205 break; 206 207 case IOC_CPU_GETCOUNT: 208 *(int *)data = ncpu; 209 break; 210 211 default: 212 error = ENOTTY; 213 break; 214 } 215 mutex_exit(&cpu_lock); 216 217 return error; 218 } 219 220 struct cpu_info * 221 cpu_lookup(cpuid_t id) 222 { 223 CPU_INFO_ITERATOR cii; 224 struct cpu_info *ci; 225 226 for (CPU_INFO_FOREACH(cii, ci)) { 227 if (ci->ci_cpuid == id) 228 return ci; 229 } 230 231 return NULL; 232 } 233 234 struct cpu_info * 235 cpu_lookup_byindex(u_int idx) 236 { 237 struct cpu_info *ci = cpu_infos[idx]; 238 239 KASSERT(idx < MAXCPUS); 240 KASSERT(ci == NULL || cpu_index(ci) == idx); 241 242 return ci; 243 } 244 245 static void 246 cpu_xc_offline(struct cpu_info *ci) 247 { 248 struct schedstate_percpu *spc, *mspc = NULL; 249 struct cpu_info *mci; 250 struct lwp *l; 251 CPU_INFO_ITERATOR cii; 252 int s; 253 254 spc = &ci->ci_schedstate; 255 s = splsched(); 256 spc->spc_flags |= SPCF_OFFLINE; 257 splx(s); 258 259 /* Take the first available CPU for the migration */ 260 for (CPU_INFO_FOREACH(cii, mci)) { 261 mspc = &mci->ci_schedstate; 262 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 263 break; 264 } 265 KASSERT(mci != NULL); 266 267 /* 268 * Migrate all non-bound threads to the other CPU. 269 * Please note, that this runs from the xcall thread, thus handling 270 * of LSONPROC is not needed. 271 */ 272 mutex_enter(proc_lock); 273 274 /* 275 * Note that threads on the runqueue might sleep after this, but 276 * sched_takecpu() would migrate such threads to the appropriate CPU. 277 */ 278 LIST_FOREACH(l, &alllwp, l_list) { 279 lwp_lock(l); 280 if (l->l_cpu == ci && (l->l_stat == LSSLEEP || 281 l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED)) { 282 KASSERT((l->l_flag & LW_RUNNING) == 0); 283 l->l_cpu = mci; 284 } 285 lwp_unlock(l); 286 } 287 288 /* Double-lock the run-queues */ 289 spc_dlock(ci, mci); 290 291 /* Handle LSRUN and LSIDL cases */ 292 LIST_FOREACH(l, &alllwp, l_list) { 293 if (l->l_cpu != ci || (l->l_pflag & LP_BOUND)) 294 continue; 295 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 296 sched_dequeue(l); 297 l->l_cpu = mci; 298 lwp_setlock(l, mspc->spc_mutex); 299 sched_enqueue(l, false); 300 } else if (l->l_stat == LSRUN || l->l_stat == LSIDL) { 301 l->l_cpu = mci; 302 lwp_setlock(l, mspc->spc_mutex); 303 } 304 } 305 spc_dunlock(ci, mci); 306 mutex_exit(proc_lock); 307 308 #ifdef __HAVE_MD_CPU_OFFLINE 309 cpu_offline_md(); 310 #endif 311 } 312 313 static void 314 cpu_xc_online(struct cpu_info *ci) 315 { 316 struct schedstate_percpu *spc; 317 int s; 318 319 spc = &ci->ci_schedstate; 320 s = splsched(); 321 spc->spc_flags &= ~SPCF_OFFLINE; 322 splx(s); 323 } 324 325 int 326 cpu_setonline(struct cpu_info *ci, bool online) 327 { 328 struct schedstate_percpu *spc; 329 CPU_INFO_ITERATOR cii; 330 struct cpu_info *ci2; 331 uint64_t where; 332 xcfunc_t func; 333 int nonline; 334 335 spc = &ci->ci_schedstate; 336 337 KASSERT(mutex_owned(&cpu_lock)); 338 339 if (online) { 340 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 341 return 0; 342 func = (xcfunc_t)cpu_xc_online; 343 ncpuonline++; 344 } else { 345 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 346 return 0; 347 nonline = 0; 348 for (CPU_INFO_FOREACH(cii, ci2)) { 349 nonline += ((ci2->ci_schedstate.spc_flags & 350 SPCF_OFFLINE) == 0); 351 } 352 if (nonline == 1) 353 return EBUSY; 354 func = (xcfunc_t)cpu_xc_offline; 355 ncpuonline--; 356 } 357 358 where = xc_unicast(0, func, ci, NULL, ci); 359 xc_wait(where); 360 if (online) { 361 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 362 } else { 363 KASSERT(spc->spc_flags & SPCF_OFFLINE); 364 } 365 spc->spc_lastmod = time_second; 366 367 return 0; 368 } 369