1 /* $NetBSD: kern_cpu.c,v 1.31 2008/05/29 22:33:27 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 60 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.31 2008/05/29 22:33:27 rmind Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/idle.h> 65 #include <sys/sched.h> 66 #include <sys/intr.h> 67 #include <sys/conf.h> 68 #include <sys/cpu.h> 69 #include <sys/cpuio.h> 70 #include <sys/proc.h> 71 #include <sys/percpu.h> 72 #include <sys/kernel.h> 73 #include <sys/kauth.h> 74 #include <sys/xcall.h> 75 #include <sys/pool.h> 76 #include <sys/kmem.h> 77 #include <sys/select.h> 78 #include <sys/namei.h> 79 #include <sys/callout.h> 80 81 #include <uvm/uvm_extern.h> 82 83 void cpuctlattach(int); 84 85 static void cpu_xc_online(struct cpu_info *); 86 static void cpu_xc_offline(struct cpu_info *); 87 88 dev_type_ioctl(cpuctl_ioctl); 89 90 const struct cdevsw cpuctl_cdevsw = { 91 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 92 nullstop, notty, nopoll, nommap, nokqfilter, 93 D_OTHER | D_MPSAFE 94 }; 95 96 kmutex_t cpu_lock; 97 int ncpu; 98 int ncpuonline; 99 bool mp_online; 100 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 101 102 static struct cpu_info *cpu_infos[MAXCPUS]; 103 104 int 105 mi_cpu_attach(struct cpu_info *ci) 106 { 107 int error; 108 109 ci->ci_index = ncpu; 110 cpu_infos[cpu_index(ci)] = ci; 111 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 112 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 113 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 114 115 sched_cpuattach(ci); 116 uvm_cpu_attach(ci); 117 118 error = create_idle_lwp(ci); 119 if (error != 0) { 120 /* XXX revert sched_cpuattach */ 121 return error; 122 } 123 124 if (ci == curcpu()) 125 ci->ci_data.cpu_onproc = curlwp; 126 else 127 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 128 129 percpu_init_cpu(ci); 130 softint_init(ci); 131 callout_init_cpu(ci); 132 xc_init_cpu(ci); 133 pool_cache_cpu_init(ci); 134 selsysinit(ci); 135 cache_cpu_init(ci); 136 TAILQ_INIT(&ci->ci_data.cpu_biodone); 137 ncpu++; 138 ncpuonline++; 139 140 return 0; 141 } 142 143 void 144 cpuctlattach(int dummy) 145 { 146 147 } 148 149 int 150 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 151 { 152 CPU_INFO_ITERATOR cii; 153 cpustate_t *cs; 154 struct cpu_info *ci; 155 int error, i; 156 u_int id; 157 158 error = 0; 159 160 mutex_enter(&cpu_lock); 161 switch (cmd) { 162 case IOC_CPU_SETSTATE: 163 cs = data; 164 error = kauth_authorize_system(l->l_cred, 165 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 166 NULL); 167 if (error != 0) 168 break; 169 if ((ci = cpu_lookup(cs->cs_id)) == NULL) { 170 error = ESRCH; 171 break; 172 } 173 if (!cs->cs_intr) { 174 error = EOPNOTSUPP; 175 break; 176 } 177 error = cpu_setonline(ci, cs->cs_online); 178 break; 179 180 case IOC_CPU_GETSTATE: 181 cs = data; 182 id = cs->cs_id; 183 memset(cs, 0, sizeof(*cs)); 184 cs->cs_id = id; 185 if ((ci = cpu_lookup(id)) == NULL) { 186 error = ESRCH; 187 break; 188 } 189 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 190 cs->cs_online = false; 191 else 192 cs->cs_online = true; 193 cs->cs_intr = true; 194 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod; 195 break; 196 197 case IOC_CPU_MAPID: 198 i = 0; 199 for (CPU_INFO_FOREACH(cii, ci)) { 200 if (i++ == *(int *)data) 201 break; 202 } 203 if (ci == NULL) 204 error = ESRCH; 205 else 206 *(int *)data = ci->ci_cpuid; 207 break; 208 209 case IOC_CPU_GETCOUNT: 210 *(int *)data = ncpu; 211 break; 212 213 default: 214 error = ENOTTY; 215 break; 216 } 217 mutex_exit(&cpu_lock); 218 219 return error; 220 } 221 222 struct cpu_info * 223 cpu_lookup(cpuid_t id) 224 { 225 CPU_INFO_ITERATOR cii; 226 struct cpu_info *ci; 227 228 for (CPU_INFO_FOREACH(cii, ci)) { 229 if (ci->ci_cpuid == id) 230 return ci; 231 } 232 233 return NULL; 234 } 235 236 struct cpu_info * 237 cpu_lookup_byindex(u_int idx) 238 { 239 struct cpu_info *ci = cpu_infos[idx]; 240 241 KASSERT(idx < MAXCPUS); 242 KASSERT(ci == NULL || cpu_index(ci) == idx); 243 244 return ci; 245 } 246 247 static void 248 cpu_xc_offline(struct cpu_info *ci) 249 { 250 struct schedstate_percpu *spc, *mspc = NULL; 251 struct cpu_info *mci; 252 struct lwp *l; 253 CPU_INFO_ITERATOR cii; 254 int s; 255 256 spc = &ci->ci_schedstate; 257 s = splsched(); 258 spc->spc_flags |= SPCF_OFFLINE; 259 splx(s); 260 261 /* Take the first available CPU for the migration */ 262 for (CPU_INFO_FOREACH(cii, mci)) { 263 mspc = &mci->ci_schedstate; 264 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 265 break; 266 } 267 KASSERT(mci != NULL); 268 269 /* 270 * Migrate all non-bound threads to the other CPU. 271 * 272 * Please note, that this runs from the xcall thread, thus handling 273 * of LSONPROC is not needed. Threads which change the state will 274 * be handled by sched_takecpu(). 275 */ 276 mutex_enter(proc_lock); 277 spc_dlock(ci, mci); 278 LIST_FOREACH(l, &alllwp, l_list) { 279 lwp_lock(l); 280 if (l->l_cpu != ci || (l->l_pflag & LP_BOUND) != 0) { 281 lwp_unlock(l); 282 continue; 283 } 284 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 285 sched_dequeue(l); 286 l->l_cpu = mci; 287 lwp_setlock(l, mspc->spc_mutex); 288 sched_enqueue(l, false); 289 lwp_unlock(l); 290 } else { 291 lwp_migrate(l, mci); 292 } 293 } 294 spc_dunlock(ci, mci); 295 mutex_exit(proc_lock); 296 297 #ifdef __HAVE_MD_CPU_OFFLINE 298 cpu_offline_md(); 299 #endif 300 } 301 302 static void 303 cpu_xc_online(struct cpu_info *ci) 304 { 305 struct schedstate_percpu *spc; 306 int s; 307 308 spc = &ci->ci_schedstate; 309 s = splsched(); 310 spc->spc_flags &= ~SPCF_OFFLINE; 311 splx(s); 312 } 313 314 int 315 cpu_setonline(struct cpu_info *ci, bool online) 316 { 317 struct schedstate_percpu *spc; 318 CPU_INFO_ITERATOR cii; 319 struct cpu_info *ci2; 320 uint64_t where; 321 xcfunc_t func; 322 int nonline; 323 324 spc = &ci->ci_schedstate; 325 326 KASSERT(mutex_owned(&cpu_lock)); 327 328 if (online) { 329 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 330 return 0; 331 func = (xcfunc_t)cpu_xc_online; 332 ncpuonline++; 333 } else { 334 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 335 return 0; 336 nonline = 0; 337 for (CPU_INFO_FOREACH(cii, ci2)) { 338 nonline += ((ci2->ci_schedstate.spc_flags & 339 SPCF_OFFLINE) == 0); 340 } 341 if (nonline == 1) 342 return EBUSY; 343 func = (xcfunc_t)cpu_xc_offline; 344 ncpuonline--; 345 } 346 347 where = xc_unicast(0, func, ci, NULL, ci); 348 xc_wait(where); 349 if (online) { 350 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 351 } else { 352 KASSERT(spc->spc_flags & SPCF_OFFLINE); 353 } 354 spc->spc_lastmod = time_second; 355 356 return 0; 357 } 358