1 /* $NetBSD: kern_cpu.c,v 1.27 2008/04/22 11:45:28 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c)2007 YAMAMOTO Takashi, 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 67 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.27 2008/04/22 11:45:28 ad Exp $"); 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/idle.h> 72 #include <sys/sched.h> 73 #include <sys/intr.h> 74 #include <sys/conf.h> 75 #include <sys/cpu.h> 76 #include <sys/cpuio.h> 77 #include <sys/proc.h> 78 #include <sys/percpu.h> 79 #include <sys/kernel.h> 80 #include <sys/kauth.h> 81 #include <sys/xcall.h> 82 #include <sys/pool.h> 83 #include <sys/kmem.h> 84 #include <sys/select.h> 85 #include <sys/namei.h> 86 #include <sys/callout.h> 87 88 #include <uvm/uvm_extern.h> 89 90 void cpuctlattach(int); 91 92 static void cpu_xc_online(struct cpu_info *); 93 static void cpu_xc_offline(struct cpu_info *); 94 95 dev_type_ioctl(cpuctl_ioctl); 96 97 const struct cdevsw cpuctl_cdevsw = { 98 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 99 nullstop, notty, nopoll, nommap, nokqfilter, 100 D_OTHER | D_MPSAFE 101 }; 102 103 kmutex_t cpu_lock; 104 int ncpu; 105 int ncpuonline; 106 bool mp_online; 107 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 108 109 static struct cpu_info *cpu_infos[MAXCPUS]; 110 111 int 112 mi_cpu_attach(struct cpu_info *ci) 113 { 114 int error; 115 116 ci->ci_index = ncpu; 117 cpu_infos[cpu_index(ci)] = ci; 118 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 119 120 sched_cpuattach(ci); 121 uvm_cpu_attach(ci); 122 123 error = create_idle_lwp(ci); 124 if (error != 0) { 125 /* XXX revert sched_cpuattach */ 126 return error; 127 } 128 129 if (ci == curcpu()) 130 ci->ci_data.cpu_onproc = curlwp; 131 else 132 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 133 134 percpu_init_cpu(ci); 135 softint_init(ci); 136 callout_init_cpu(ci); 137 xc_init_cpu(ci); 138 pool_cache_cpu_init(ci); 139 selsysinit(ci); 140 cache_cpu_init(ci); 141 TAILQ_INIT(&ci->ci_data.cpu_biodone); 142 ncpu++; 143 ncpuonline++; 144 145 return 0; 146 } 147 148 void 149 cpuctlattach(int dummy) 150 { 151 152 } 153 154 int 155 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 156 { 157 CPU_INFO_ITERATOR cii; 158 cpustate_t *cs; 159 struct cpu_info *ci; 160 int error, i; 161 u_int id; 162 163 error = 0; 164 165 mutex_enter(&cpu_lock); 166 switch (cmd) { 167 case IOC_CPU_SETSTATE: 168 cs = data; 169 error = kauth_authorize_system(l->l_cred, 170 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 171 NULL); 172 if (error != 0) 173 break; 174 if ((ci = cpu_lookup(cs->cs_id)) == NULL) { 175 error = ESRCH; 176 break; 177 } 178 if (!cs->cs_intr) { 179 error = EOPNOTSUPP; 180 break; 181 } 182 error = cpu_setonline(ci, cs->cs_online); 183 break; 184 185 case IOC_CPU_GETSTATE: 186 cs = data; 187 id = cs->cs_id; 188 memset(cs, 0, sizeof(*cs)); 189 cs->cs_id = id; 190 if ((ci = cpu_lookup(id)) == NULL) { 191 error = ESRCH; 192 break; 193 } 194 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 195 cs->cs_online = false; 196 else 197 cs->cs_online = true; 198 cs->cs_intr = true; 199 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 200 break; 201 202 case IOC_CPU_MAPID: 203 i = 0; 204 for (CPU_INFO_FOREACH(cii, ci)) { 205 if (i++ == *(int *)data) 206 break; 207 } 208 if (ci == NULL) 209 error = ESRCH; 210 else 211 *(int *)data = ci->ci_cpuid; 212 break; 213 214 case IOC_CPU_GETCOUNT: 215 *(int *)data = ncpu; 216 break; 217 218 default: 219 error = ENOTTY; 220 break; 221 } 222 mutex_exit(&cpu_lock); 223 224 return error; 225 } 226 227 struct cpu_info * 228 cpu_lookup(cpuid_t id) 229 { 230 CPU_INFO_ITERATOR cii; 231 struct cpu_info *ci; 232 233 for (CPU_INFO_FOREACH(cii, ci)) { 234 if (ci->ci_cpuid == id) 235 return ci; 236 } 237 238 return NULL; 239 } 240 241 struct cpu_info * 242 cpu_lookup_byindex(u_int idx) 243 { 244 struct cpu_info *ci = cpu_infos[idx]; 245 246 KASSERT(idx < MAXCPUS); 247 KASSERT(ci == NULL || cpu_index(ci) == idx); 248 249 return ci; 250 } 251 252 static void 253 cpu_xc_offline(struct cpu_info *ci) 254 { 255 struct schedstate_percpu *spc, *mspc = NULL; 256 struct cpu_info *mci; 257 struct lwp *l; 258 CPU_INFO_ITERATOR cii; 259 int s; 260 261 spc = &ci->ci_schedstate; 262 s = splsched(); 263 spc->spc_flags |= SPCF_OFFLINE; 264 splx(s); 265 266 /* Take the first available CPU for the migration */ 267 for (CPU_INFO_FOREACH(cii, mci)) { 268 mspc = &mci->ci_schedstate; 269 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 270 break; 271 } 272 KASSERT(mci != NULL); 273 274 /* 275 * Migrate all non-bound threads to the other CPU. 276 * Please note, that this runs from the xcall thread, thus handling 277 * of LSONPROC is not needed. 278 */ 279 mutex_enter(&proclist_lock); 280 281 /* 282 * Note that threads on the runqueue might sleep after this, but 283 * sched_takecpu() would migrate such threads to the appropriate CPU. 284 */ 285 LIST_FOREACH(l, &alllwp, l_list) { 286 lwp_lock(l); 287 if (l->l_cpu == ci && (l->l_stat == LSSLEEP || 288 l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED)) { 289 KASSERT((l->l_flag & LW_RUNNING) == 0); 290 l->l_cpu = mci; 291 } 292 lwp_unlock(l); 293 } 294 295 /* Double-lock the run-queues */ 296 spc_dlock(ci, mci); 297 298 /* Handle LSRUN and LSIDL cases */ 299 LIST_FOREACH(l, &alllwp, l_list) { 300 if (l->l_cpu != ci || (l->l_pflag & LP_BOUND)) 301 continue; 302 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 303 sched_dequeue(l); 304 l->l_cpu = mci; 305 lwp_setlock(l, mspc->spc_mutex); 306 sched_enqueue(l, false); 307 } else if (l->l_stat == LSRUN || l->l_stat == LSIDL) { 308 l->l_cpu = mci; 309 lwp_setlock(l, mspc->spc_mutex); 310 } 311 } 312 spc_dunlock(ci, mci); 313 mutex_exit(&proclist_lock); 314 315 #ifdef __HAVE_MD_CPU_OFFLINE 316 cpu_offline_md(); 317 #endif 318 } 319 320 static void 321 cpu_xc_online(struct cpu_info *ci) 322 { 323 struct schedstate_percpu *spc; 324 int s; 325 326 spc = &ci->ci_schedstate; 327 s = splsched(); 328 spc->spc_flags &= ~SPCF_OFFLINE; 329 splx(s); 330 } 331 332 int 333 cpu_setonline(struct cpu_info *ci, bool online) 334 { 335 struct schedstate_percpu *spc; 336 CPU_INFO_ITERATOR cii; 337 struct cpu_info *ci2; 338 uint64_t where; 339 xcfunc_t func; 340 int nonline; 341 342 spc = &ci->ci_schedstate; 343 344 KASSERT(mutex_owned(&cpu_lock)); 345 346 if (online) { 347 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 348 return 0; 349 func = (xcfunc_t)cpu_xc_online; 350 ncpuonline++; 351 } else { 352 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 353 return 0; 354 nonline = 0; 355 for (CPU_INFO_FOREACH(cii, ci2)) { 356 nonline += ((ci2->ci_schedstate.spc_flags & 357 SPCF_OFFLINE) == 0); 358 } 359 if (nonline == 1) 360 return EBUSY; 361 func = (xcfunc_t)cpu_xc_offline; 362 ncpuonline--; 363 } 364 365 where = xc_unicast(0, func, ci, NULL, ci); 366 xc_wait(where); 367 if (online) { 368 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 369 } else { 370 KASSERT(spc->spc_flags & SPCF_OFFLINE); 371 } 372 spc->spc_lastmod = time_second; 373 374 return 0; 375 } 376