1 /* $NetBSD: kern_cpu.c,v 1.36 2008/10/15 08:13:17 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 60 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.36 2008/10/15 08:13:17 ad Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/idle.h> 65 #include <sys/sched.h> 66 #include <sys/intr.h> 67 #include <sys/conf.h> 68 #include <sys/cpu.h> 69 #include <sys/cpuio.h> 70 #include <sys/proc.h> 71 #include <sys/percpu.h> 72 #include <sys/kernel.h> 73 #include <sys/kauth.h> 74 #include <sys/xcall.h> 75 #include <sys/pool.h> 76 #include <sys/kmem.h> 77 #include <sys/select.h> 78 #include <sys/namei.h> 79 #include <sys/callout.h> 80 81 #include <uvm/uvm_extern.h> 82 83 void cpuctlattach(int); 84 85 static void cpu_xc_online(struct cpu_info *); 86 static void cpu_xc_offline(struct cpu_info *); 87 88 dev_type_ioctl(cpuctl_ioctl); 89 90 const struct cdevsw cpuctl_cdevsw = { 91 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 92 nullstop, notty, nopoll, nommap, nokqfilter, 93 D_OTHER | D_MPSAFE 94 }; 95 96 kmutex_t cpu_lock; 97 int ncpu; 98 int ncpuonline; 99 bool mp_online; 100 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 101 102 static struct cpu_info *cpu_infos[MAXCPUS]; 103 104 int 105 mi_cpu_attach(struct cpu_info *ci) 106 { 107 int error; 108 109 ci->ci_index = ncpu; 110 cpu_infos[cpu_index(ci)] = ci; 111 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 112 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 113 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 114 115 sched_cpuattach(ci); 116 117 error = create_idle_lwp(ci); 118 if (error != 0) { 119 /* XXX revert sched_cpuattach */ 120 return error; 121 } 122 123 if (ci == curcpu()) 124 ci->ci_data.cpu_onproc = curlwp; 125 else 126 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 127 128 percpu_init_cpu(ci); 129 softint_init(ci); 130 callout_init_cpu(ci); 131 xc_init_cpu(ci); 132 pool_cache_cpu_init(ci); 133 selsysinit(ci); 134 cache_cpu_init(ci); 135 TAILQ_INIT(&ci->ci_data.cpu_biodone); 136 ncpu++; 137 ncpuonline++; 138 139 return 0; 140 } 141 142 void 143 cpuctlattach(int dummy) 144 { 145 146 } 147 148 int 149 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 150 { 151 CPU_INFO_ITERATOR cii; 152 cpustate_t *cs; 153 struct cpu_info *ci; 154 int error, i; 155 u_int id; 156 157 error = 0; 158 159 mutex_enter(&cpu_lock); 160 switch (cmd) { 161 case IOC_CPU_SETSTATE: 162 cs = data; 163 error = kauth_authorize_system(l->l_cred, 164 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 165 NULL); 166 if (error != 0) 167 break; 168 if (cs->cs_id >= __arraycount(cpu_infos) || 169 (ci = cpu_lookup(cs->cs_id)) == NULL) { 170 error = ESRCH; 171 break; 172 } 173 if (!cs->cs_intr) { 174 error = EOPNOTSUPP; 175 break; 176 } 177 error = cpu_setonline(ci, cs->cs_online); 178 break; 179 180 case IOC_CPU_GETSTATE: 181 cs = data; 182 id = cs->cs_id; 183 memset(cs, 0, sizeof(*cs)); 184 cs->cs_id = id; 185 if (cs->cs_id >= __arraycount(cpu_infos) || 186 (ci = cpu_lookup(id)) == NULL) { 187 error = ESRCH; 188 break; 189 } 190 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 191 cs->cs_online = false; 192 else 193 cs->cs_online = true; 194 cs->cs_intr = true; 195 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 196 break; 197 198 case IOC_CPU_MAPID: 199 i = 0; 200 for (CPU_INFO_FOREACH(cii, ci)) { 201 if (i++ == *(int *)data) 202 break; 203 } 204 if (ci == NULL) 205 error = ESRCH; 206 else 207 *(int *)data = ci->ci_cpuid; 208 break; 209 210 case IOC_CPU_GETCOUNT: 211 *(int *)data = ncpu; 212 break; 213 214 default: 215 error = ENOTTY; 216 break; 217 } 218 mutex_exit(&cpu_lock); 219 220 return error; 221 } 222 223 struct cpu_info * 224 cpu_lookup(u_int idx) 225 { 226 struct cpu_info *ci = cpu_infos[idx]; 227 228 KASSERT(idx < __arraycount(cpu_infos)); 229 KASSERT(ci == NULL || cpu_index(ci) == idx); 230 231 return ci; 232 } 233 234 static void 235 cpu_xc_offline(struct cpu_info *ci) 236 { 237 struct schedstate_percpu *spc, *mspc = NULL; 238 struct cpu_info *mci; 239 struct lwp *l; 240 CPU_INFO_ITERATOR cii; 241 int s; 242 243 spc = &ci->ci_schedstate; 244 s = splsched(); 245 spc->spc_flags |= SPCF_OFFLINE; 246 splx(s); 247 248 /* Take the first available CPU for the migration */ 249 for (CPU_INFO_FOREACH(cii, mci)) { 250 mspc = &mci->ci_schedstate; 251 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 252 break; 253 } 254 KASSERT(mci != NULL); 255 256 /* 257 * Migrate all non-bound threads to the other CPU. 258 * 259 * Please note, that this runs from the xcall thread, thus handling 260 * of LSONPROC is not needed. Threads which change the state will 261 * be handled by sched_takecpu(). 262 */ 263 mutex_enter(proc_lock); 264 LIST_FOREACH(l, &alllwp, l_list) { 265 lwp_lock(l); 266 if ((l->l_pflag & LP_BOUND) == 0 && l->l_cpu == ci) { 267 lwp_migrate(l, mci); 268 } else { 269 lwp_unlock(l); 270 } 271 } 272 mutex_exit(proc_lock); 273 274 #ifdef __HAVE_MD_CPU_OFFLINE 275 cpu_offline_md(); 276 #endif 277 } 278 279 static void 280 cpu_xc_online(struct cpu_info *ci) 281 { 282 struct schedstate_percpu *spc; 283 int s; 284 285 spc = &ci->ci_schedstate; 286 s = splsched(); 287 spc->spc_flags &= ~SPCF_OFFLINE; 288 splx(s); 289 } 290 291 int 292 cpu_setonline(struct cpu_info *ci, bool online) 293 { 294 struct schedstate_percpu *spc; 295 CPU_INFO_ITERATOR cii; 296 struct cpu_info *ci2; 297 uint64_t where; 298 xcfunc_t func; 299 int nonline; 300 301 spc = &ci->ci_schedstate; 302 303 KASSERT(mutex_owned(&cpu_lock)); 304 305 if (online) { 306 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 307 return 0; 308 func = (xcfunc_t)cpu_xc_online; 309 ncpuonline++; 310 } else { 311 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 312 return 0; 313 nonline = 0; 314 /* 315 * Ensure that at least one CPU within the processor set 316 * stays online. Revisit this later. 317 */ 318 for (CPU_INFO_FOREACH(cii, ci2)) { 319 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 320 continue; 321 if (ci2->ci_schedstate.spc_psid != spc->spc_psid) 322 continue; 323 nonline++; 324 } 325 if (nonline == 1) 326 return EBUSY; 327 func = (xcfunc_t)cpu_xc_offline; 328 ncpuonline--; 329 } 330 331 where = xc_unicast(0, func, ci, NULL, ci); 332 xc_wait(where); 333 if (online) { 334 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 335 } else { 336 KASSERT(spc->spc_flags & SPCF_OFFLINE); 337 } 338 spc->spc_lastmod = time_second; 339 340 return 0; 341 } 342