1 /* $NetBSD: kern_cpu.c,v 1.60 2013/08/22 19:50:55 drochner Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008, 2009, 2010, 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.60 2013/08/22 19:50:55 drochner Exp $"); 60 61 #include "opt_cpu_ucode.h" 62 #include "opt_compat_netbsd.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/idle.h> 67 #include <sys/sched.h> 68 #include <sys/intr.h> 69 #include <sys/conf.h> 70 #include <sys/cpu.h> 71 #include <sys/cpuio.h> 72 #include <sys/proc.h> 73 #include <sys/percpu.h> 74 #include <sys/kernel.h> 75 #include <sys/kauth.h> 76 #include <sys/xcall.h> 77 #include <sys/pool.h> 78 #include <sys/kmem.h> 79 #include <sys/select.h> 80 #include <sys/namei.h> 81 #include <sys/callout.h> 82 #include <sys/pcu.h> 83 84 #include <uvm/uvm_extern.h> 85 86 /* 87 * If the port has stated that cpu_data is the first thing in cpu_info, 88 * verify that the claim is true. This will prevent them from getting out 89 * of sync. 90 */ 91 #ifdef __HAVE_CPU_DATA_FIRST 92 CTASSERT(offsetof(struct cpu_info, ci_data) == 0); 93 #else 94 CTASSERT(offsetof(struct cpu_info, ci_data) != 0); 95 #endif 96 97 void cpuctlattach(int); 98 99 static void cpu_xc_online(struct cpu_info *); 100 static void cpu_xc_offline(struct cpu_info *); 101 102 dev_type_ioctl(cpuctl_ioctl); 103 104 const struct cdevsw cpuctl_cdevsw = { 105 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl, 106 nullstop, notty, nopoll, nommap, nokqfilter, 107 D_OTHER | D_MPSAFE 108 }; 109 110 kmutex_t cpu_lock __cacheline_aligned; 111 int ncpu __read_mostly; 112 int ncpuonline __read_mostly; 113 bool mp_online __read_mostly; 114 115 /* Note: set on mi_cpu_attach() and idle_loop(). */ 116 kcpuset_t * kcpuset_attached __read_mostly = NULL; 117 kcpuset_t * kcpuset_running __read_mostly = NULL; 118 119 struct cpuqueue cpu_queue __cacheline_aligned 120 = CIRCLEQ_HEAD_INITIALIZER(cpu_queue); 121 122 static struct cpu_info **cpu_infos __read_mostly; 123 124 /* 125 * mi_cpu_init: early initialisation of MI CPU related structures. 126 * 127 * Note: may not block and memory allocator is not yet available. 128 */ 129 void 130 mi_cpu_init(void) 131 { 132 133 mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE); 134 135 kcpuset_create(&kcpuset_attached, true); 136 kcpuset_create(&kcpuset_running, true); 137 kcpuset_set(kcpuset_running, 0); 138 } 139 140 int 141 mi_cpu_attach(struct cpu_info *ci) 142 { 143 int error; 144 145 KASSERT(maxcpus > 0); 146 147 ci->ci_index = ncpu; 148 kcpuset_set(kcpuset_attached, cpu_index(ci)); 149 150 /* 151 * Create a convenience cpuset of just ourselves. 152 */ 153 kcpuset_create(&ci->ci_data.cpu_kcpuset, true); 154 kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci)); 155 156 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain); 157 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 158 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 159 160 /* This is useful for eg, per-cpu evcnt */ 161 snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d", 162 cpu_index(ci)); 163 164 if (__predict_false(cpu_infos == NULL)) { 165 cpu_infos = 166 kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP); 167 } 168 cpu_infos[cpu_index(ci)] = ci; 169 170 sched_cpuattach(ci); 171 172 error = create_idle_lwp(ci); 173 if (error != 0) { 174 /* XXX revert sched_cpuattach */ 175 return error; 176 } 177 178 if (ci == curcpu()) 179 ci->ci_data.cpu_onproc = curlwp; 180 else 181 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 182 183 percpu_init_cpu(ci); 184 softint_init(ci); 185 callout_init_cpu(ci); 186 xc_init_cpu(ci); 187 pool_cache_cpu_init(ci); 188 selsysinit(ci); 189 cache_cpu_init(ci); 190 TAILQ_INIT(&ci->ci_data.cpu_biodone); 191 ncpu++; 192 ncpuonline++; 193 194 return 0; 195 } 196 197 void 198 cpuctlattach(int dummy) 199 { 200 201 KASSERT(cpu_infos != NULL); 202 } 203 204 int 205 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 206 { 207 CPU_INFO_ITERATOR cii; 208 cpustate_t *cs; 209 struct cpu_info *ci; 210 int error, i; 211 u_int id; 212 213 error = 0; 214 215 mutex_enter(&cpu_lock); 216 switch (cmd) { 217 case IOC_CPU_SETSTATE: 218 cs = data; 219 error = kauth_authorize_system(l->l_cred, 220 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 221 NULL); 222 if (error != 0) 223 break; 224 if (cs->cs_id >= maxcpus || 225 (ci = cpu_lookup(cs->cs_id)) == NULL) { 226 error = ESRCH; 227 break; 228 } 229 cpu_setintr(ci, cs->cs_intr); 230 error = cpu_setstate(ci, cs->cs_online); 231 break; 232 233 case IOC_CPU_GETSTATE: 234 cs = data; 235 id = cs->cs_id; 236 memset(cs, 0, sizeof(*cs)); 237 cs->cs_id = id; 238 if (cs->cs_id >= maxcpus || 239 (ci = cpu_lookup(id)) == NULL) { 240 error = ESRCH; 241 break; 242 } 243 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 244 cs->cs_online = false; 245 else 246 cs->cs_online = true; 247 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 248 cs->cs_intr = false; 249 else 250 cs->cs_intr = true; 251 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod; 252 cs->cs_lastmodhi = (int32_t) 253 (ci->ci_schedstate.spc_lastmod >> 32); 254 cs->cs_intrcnt = cpu_intr_count(ci) + 1; 255 cs->cs_hwid = ci->ci_cpuid; 256 break; 257 258 case IOC_CPU_MAPID: 259 i = 0; 260 for (CPU_INFO_FOREACH(cii, ci)) { 261 if (i++ == *(int *)data) 262 break; 263 } 264 if (ci == NULL) 265 error = ESRCH; 266 else 267 *(int *)data = cpu_index(ci); 268 break; 269 270 case IOC_CPU_GETCOUNT: 271 *(int *)data = ncpu; 272 break; 273 274 #ifdef CPU_UCODE 275 case IOC_CPU_UCODE_GET_VERSION: 276 error = cpu_ucode_get_version((struct cpu_ucode_version *)data); 277 break; 278 279 #ifdef COMPAT_60 280 case OIOC_CPU_UCODE_GET_VERSION: 281 error = compat6_cpu_ucode_get_version((struct compat6_cpu_ucode *)data); 282 break; 283 #endif 284 285 case IOC_CPU_UCODE_APPLY: 286 error = kauth_authorize_machdep(l->l_cred, 287 KAUTH_MACHDEP_CPU_UCODE_APPLY, 288 NULL, NULL, NULL, NULL); 289 if (error != 0) 290 break; 291 error = cpu_ucode_apply((const struct cpu_ucode *)data); 292 break; 293 294 #ifdef COMPAT_60 295 case OIOC_CPU_UCODE_APPLY: 296 error = kauth_authorize_machdep(l->l_cred, 297 KAUTH_MACHDEP_CPU_UCODE_APPLY, 298 NULL, NULL, NULL, NULL); 299 if (error != 0) 300 break; 301 error = compat6_cpu_ucode_apply((const struct compat6_cpu_ucode *)data); 302 break; 303 #endif 304 #endif 305 306 default: 307 error = ENOTTY; 308 break; 309 } 310 mutex_exit(&cpu_lock); 311 312 return error; 313 } 314 315 struct cpu_info * 316 cpu_lookup(u_int idx) 317 { 318 struct cpu_info *ci; 319 320 KASSERT(idx < maxcpus); 321 322 if (__predict_false(cpu_infos == NULL)) { 323 KASSERT(idx == 0); 324 return curcpu(); 325 } 326 327 ci = cpu_infos[idx]; 328 KASSERT(ci == NULL || cpu_index(ci) == idx); 329 330 return ci; 331 } 332 333 static void 334 cpu_xc_offline(struct cpu_info *ci) 335 { 336 struct schedstate_percpu *spc, *mspc = NULL; 337 struct cpu_info *target_ci; 338 struct lwp *l; 339 CPU_INFO_ITERATOR cii; 340 int s; 341 342 /* 343 * Thread that made the cross call (separate context) holds 344 * cpu_lock on our behalf. 345 */ 346 spc = &ci->ci_schedstate; 347 s = splsched(); 348 spc->spc_flags |= SPCF_OFFLINE; 349 splx(s); 350 351 /* Take the first available CPU for the migration. */ 352 for (CPU_INFO_FOREACH(cii, target_ci)) { 353 mspc = &target_ci->ci_schedstate; 354 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 355 break; 356 } 357 KASSERT(target_ci != NULL); 358 359 /* 360 * Migrate all non-bound threads to the other CPU. Note that this 361 * runs from the xcall thread, thus handling of LSONPROC is not needed. 362 */ 363 mutex_enter(proc_lock); 364 LIST_FOREACH(l, &alllwp, l_list) { 365 struct cpu_info *mci; 366 367 lwp_lock(l); 368 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) { 369 lwp_unlock(l); 370 continue; 371 } 372 /* Regular case - no affinity. */ 373 if (l->l_affinity == NULL) { 374 lwp_migrate(l, target_ci); 375 continue; 376 } 377 /* Affinity is set, find an online CPU in the set. */ 378 for (CPU_INFO_FOREACH(cii, mci)) { 379 mspc = &mci->ci_schedstate; 380 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && 381 kcpuset_isset(l->l_affinity, cpu_index(mci))) 382 break; 383 } 384 if (mci == NULL) { 385 lwp_unlock(l); 386 mutex_exit(proc_lock); 387 goto fail; 388 } 389 lwp_migrate(l, mci); 390 } 391 mutex_exit(proc_lock); 392 393 #if PCU_UNIT_COUNT > 0 394 pcu_save_all_on_cpu(); 395 #endif 396 397 #ifdef __HAVE_MD_CPU_OFFLINE 398 cpu_offline_md(); 399 #endif 400 return; 401 fail: 402 /* Just unset the SPCF_OFFLINE flag, caller will check */ 403 s = splsched(); 404 spc->spc_flags &= ~SPCF_OFFLINE; 405 splx(s); 406 } 407 408 static void 409 cpu_xc_online(struct cpu_info *ci) 410 { 411 struct schedstate_percpu *spc; 412 int s; 413 414 spc = &ci->ci_schedstate; 415 s = splsched(); 416 spc->spc_flags &= ~SPCF_OFFLINE; 417 splx(s); 418 } 419 420 int 421 cpu_setstate(struct cpu_info *ci, bool online) 422 { 423 struct schedstate_percpu *spc; 424 CPU_INFO_ITERATOR cii; 425 struct cpu_info *ci2; 426 uint64_t where; 427 xcfunc_t func; 428 int nonline; 429 430 spc = &ci->ci_schedstate; 431 432 KASSERT(mutex_owned(&cpu_lock)); 433 434 if (online) { 435 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 436 return 0; 437 func = (xcfunc_t)cpu_xc_online; 438 ncpuonline++; 439 } else { 440 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 441 return 0; 442 nonline = 0; 443 /* 444 * Ensure that at least one CPU within the processor set 445 * stays online. Revisit this later. 446 */ 447 for (CPU_INFO_FOREACH(cii, ci2)) { 448 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 449 continue; 450 if (ci2->ci_schedstate.spc_psid != spc->spc_psid) 451 continue; 452 nonline++; 453 } 454 if (nonline == 1) 455 return EBUSY; 456 func = (xcfunc_t)cpu_xc_offline; 457 ncpuonline--; 458 } 459 460 where = xc_unicast(0, func, ci, NULL, ci); 461 xc_wait(where); 462 if (online) { 463 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 464 } else if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 465 /* If was not set offline, then it is busy */ 466 return EBUSY; 467 } 468 469 spc->spc_lastmod = time_second; 470 return 0; 471 } 472 473 #ifdef __HAVE_INTR_CONTROL 474 static void 475 cpu_xc_intr(struct cpu_info *ci) 476 { 477 struct schedstate_percpu *spc; 478 int s; 479 480 spc = &ci->ci_schedstate; 481 s = splsched(); 482 spc->spc_flags &= ~SPCF_NOINTR; 483 splx(s); 484 } 485 486 static void 487 cpu_xc_nointr(struct cpu_info *ci) 488 { 489 struct schedstate_percpu *spc; 490 int s; 491 492 spc = &ci->ci_schedstate; 493 s = splsched(); 494 spc->spc_flags |= SPCF_NOINTR; 495 splx(s); 496 } 497 498 int 499 cpu_setintr(struct cpu_info *ci, bool intr) 500 { 501 struct schedstate_percpu *spc; 502 CPU_INFO_ITERATOR cii; 503 struct cpu_info *ci2; 504 uint64_t where; 505 xcfunc_t func; 506 int nintr; 507 508 spc = &ci->ci_schedstate; 509 510 KASSERT(mutex_owned(&cpu_lock)); 511 512 if (intr) { 513 if ((spc->spc_flags & SPCF_NOINTR) == 0) 514 return 0; 515 func = (xcfunc_t)cpu_xc_intr; 516 } else { 517 if ((spc->spc_flags & SPCF_NOINTR) != 0) 518 return 0; 519 /* 520 * Ensure that at least one CPU within the system 521 * is handing device interrupts. 522 */ 523 nintr = 0; 524 for (CPU_INFO_FOREACH(cii, ci2)) { 525 if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 526 continue; 527 if (ci2 == ci) 528 continue; 529 nintr++; 530 } 531 if (nintr == 0) 532 return EBUSY; 533 func = (xcfunc_t)cpu_xc_nointr; 534 } 535 536 where = xc_unicast(0, func, ci, NULL, ci); 537 xc_wait(where); 538 if (intr) { 539 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0); 540 } else if ((spc->spc_flags & SPCF_NOINTR) == 0) { 541 /* If was not set offline, then it is busy */ 542 return EBUSY; 543 } 544 545 /* Direct interrupts away from the CPU and record the change. */ 546 cpu_intr_redistribute(); 547 spc->spc_lastmod = time_second; 548 return 0; 549 } 550 #else /* __HAVE_INTR_CONTROL */ 551 int 552 cpu_setintr(struct cpu_info *ci, bool intr) 553 { 554 555 return EOPNOTSUPP; 556 } 557 558 u_int 559 cpu_intr_count(struct cpu_info *ci) 560 { 561 562 return 0; /* 0 == "don't know" */ 563 } 564 #endif /* __HAVE_INTR_CONTROL */ 565 566 bool 567 cpu_softintr_p(void) 568 { 569 570 return (curlwp->l_pflag & LP_INTR) != 0; 571 } 572 573 #ifdef CPU_UCODE 574 int 575 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname) 576 { 577 firmware_handle_t fwh; 578 int error; 579 580 if (sc->sc_blob != NULL) { 581 firmware_free(sc->sc_blob, 0); 582 sc->sc_blob = NULL; 583 sc->sc_blobsize = 0; 584 } 585 586 error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname); 587 if (error != 0) { 588 aprint_error("ucode: firmware_open failed: %i\n", error); 589 goto err0; 590 } 591 592 sc->sc_blobsize = firmware_get_size(fwh); 593 sc->sc_blob = firmware_malloc(sc->sc_blobsize); 594 if (sc->sc_blob == NULL) { 595 error = ENOMEM; 596 firmware_close(fwh); 597 goto err0; 598 } 599 600 error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize); 601 firmware_close(fwh); 602 if (error != 0) 603 goto err1; 604 605 return 0; 606 607 err1: 608 firmware_free(sc->sc_blob, 0); 609 sc->sc_blob = NULL; 610 sc->sc_blobsize = 0; 611 err0: 612 return error; 613 } 614 #endif 615