1 /* $NetBSD: kern_cpu.c,v 1.74 2018/07/04 07:25:47 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2007, 2008, 2009, 2010, 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c)2007 YAMAMOTO Takashi, 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.74 2018/07/04 07:25:47 msaitoh Exp $"); 60 61 #include "opt_cpu_ucode.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/idle.h> 66 #include <sys/sched.h> 67 #include <sys/intr.h> 68 #include <sys/conf.h> 69 #include <sys/cpu.h> 70 #include <sys/cpuio.h> 71 #include <sys/proc.h> 72 #include <sys/percpu.h> 73 #include <sys/kernel.h> 74 #include <sys/kauth.h> 75 #include <sys/xcall.h> 76 #include <sys/pool.h> 77 #include <sys/kmem.h> 78 #include <sys/select.h> 79 #include <sys/namei.h> 80 #include <sys/callout.h> 81 #include <sys/pcu.h> 82 83 #include <uvm/uvm_extern.h> 84 85 #include "ioconf.h" 86 87 /* 88 * If the port has stated that cpu_data is the first thing in cpu_info, 89 * verify that the claim is true. This will prevent them from getting out 90 * of sync. 91 */ 92 #ifdef __HAVE_CPU_DATA_FIRST 93 CTASSERT(offsetof(struct cpu_info, ci_data) == 0); 94 #else 95 CTASSERT(offsetof(struct cpu_info, ci_data) != 0); 96 #endif 97 98 static void cpu_xc_online(struct cpu_info *); 99 static void cpu_xc_offline(struct cpu_info *); 100 101 dev_type_ioctl(cpuctl_ioctl); 102 103 const struct cdevsw cpuctl_cdevsw = { 104 .d_open = nullopen, 105 .d_close = nullclose, 106 .d_read = nullread, 107 .d_write = nullwrite, 108 .d_ioctl = cpuctl_ioctl, 109 .d_stop = nullstop, 110 .d_tty = notty, 111 .d_poll = nopoll, 112 .d_mmap = nommap, 113 .d_kqfilter = nokqfilter, 114 .d_discard = nodiscard, 115 .d_flag = D_OTHER | D_MPSAFE 116 }; 117 118 kmutex_t cpu_lock __cacheline_aligned; 119 int ncpu __read_mostly; 120 int ncpuonline __read_mostly; 121 bool mp_online __read_mostly; 122 123 /* An array of CPUs. There are ncpu entries. */ 124 struct cpu_info **cpu_infos __read_mostly; 125 126 /* Note: set on mi_cpu_attach() and idle_loop(). */ 127 kcpuset_t * kcpuset_attached __read_mostly = NULL; 128 kcpuset_t * kcpuset_running __read_mostly = NULL; 129 130 int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys; 131 132 static char cpu_model[128]; 133 134 /* 135 * mi_cpu_init: early initialisation of MI CPU related structures. 136 * 137 * Note: may not block and memory allocator is not yet available. 138 */ 139 void 140 mi_cpu_init(void) 141 { 142 143 mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE); 144 145 kcpuset_create(&kcpuset_attached, true); 146 kcpuset_create(&kcpuset_running, true); 147 kcpuset_set(kcpuset_running, 0); 148 } 149 150 int 151 mi_cpu_attach(struct cpu_info *ci) 152 { 153 int error; 154 155 KASSERT(maxcpus > 0); 156 157 ci->ci_index = ncpu; 158 kcpuset_set(kcpuset_attached, cpu_index(ci)); 159 160 /* 161 * Create a convenience cpuset of just ourselves. 162 */ 163 kcpuset_create(&ci->ci_data.cpu_kcpuset, true); 164 kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci)); 165 166 TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 167 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 168 169 /* This is useful for eg, per-cpu evcnt */ 170 snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d", 171 cpu_index(ci)); 172 173 if (__predict_false(cpu_infos == NULL)) { 174 size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *); 175 cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP); 176 } 177 cpu_infos[cpu_index(ci)] = ci; 178 179 sched_cpuattach(ci); 180 181 error = create_idle_lwp(ci); 182 if (error != 0) { 183 /* XXX revert sched_cpuattach */ 184 return error; 185 } 186 187 if (ci == curcpu()) 188 ci->ci_data.cpu_onproc = curlwp; 189 else 190 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; 191 192 percpu_init_cpu(ci); 193 softint_init(ci); 194 callout_init_cpu(ci); 195 xc_init_cpu(ci); 196 pool_cache_cpu_init(ci); 197 selsysinit(ci); 198 cache_cpu_init(ci); 199 TAILQ_INIT(&ci->ci_data.cpu_biodone); 200 ncpu++; 201 ncpuonline++; 202 203 return 0; 204 } 205 206 void 207 cpuctlattach(int dummy __unused) 208 { 209 210 KASSERT(cpu_infos != NULL); 211 } 212 213 int 214 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 215 { 216 CPU_INFO_ITERATOR cii; 217 cpustate_t *cs; 218 struct cpu_info *ci; 219 int error, i; 220 u_int id; 221 222 error = 0; 223 224 mutex_enter(&cpu_lock); 225 switch (cmd) { 226 case IOC_CPU_SETSTATE: 227 cs = data; 228 error = kauth_authorize_system(l->l_cred, 229 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 230 NULL); 231 if (error != 0) 232 break; 233 if (cs->cs_id >= maxcpus || 234 (ci = cpu_lookup(cs->cs_id)) == NULL) { 235 error = ESRCH; 236 break; 237 } 238 cpu_setintr(ci, cs->cs_intr); 239 error = cpu_setstate(ci, cs->cs_online); 240 break; 241 242 case IOC_CPU_GETSTATE: 243 cs = data; 244 id = cs->cs_id; 245 memset(cs, 0, sizeof(*cs)); 246 cs->cs_id = id; 247 if (cs->cs_id >= maxcpus || 248 (ci = cpu_lookup(id)) == NULL) { 249 error = ESRCH; 250 break; 251 } 252 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 253 cs->cs_online = false; 254 else 255 cs->cs_online = true; 256 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 257 cs->cs_intr = false; 258 else 259 cs->cs_intr = true; 260 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod; 261 cs->cs_lastmodhi = (int32_t) 262 (ci->ci_schedstate.spc_lastmod >> 32); 263 cs->cs_intrcnt = cpu_intr_count(ci) + 1; 264 cs->cs_hwid = ci->ci_cpuid; 265 break; 266 267 case IOC_CPU_MAPID: 268 i = 0; 269 for (CPU_INFO_FOREACH(cii, ci)) { 270 if (i++ == *(int *)data) 271 break; 272 } 273 if (ci == NULL) 274 error = ESRCH; 275 else 276 *(int *)data = cpu_index(ci); 277 break; 278 279 case IOC_CPU_GETCOUNT: 280 *(int *)data = ncpu; 281 break; 282 283 #ifdef CPU_UCODE 284 case IOC_CPU_UCODE_GET_VERSION: 285 error = cpu_ucode_get_version((struct cpu_ucode_version *)data); 286 break; 287 288 case IOC_CPU_UCODE_APPLY: 289 error = kauth_authorize_machdep(l->l_cred, 290 KAUTH_MACHDEP_CPU_UCODE_APPLY, 291 NULL, NULL, NULL, NULL); 292 if (error != 0) 293 break; 294 error = cpu_ucode_apply((const struct cpu_ucode *)data); 295 break; 296 #endif 297 298 default: 299 error = (*compat_cpuctl_ioctl)(l, cmd, data); 300 break; 301 } 302 mutex_exit(&cpu_lock); 303 304 return error; 305 } 306 307 struct cpu_info * 308 cpu_lookup(u_int idx) 309 { 310 struct cpu_info *ci; 311 312 KASSERT(idx < maxcpus); 313 314 if (__predict_false(cpu_infos == NULL)) { 315 KASSERT(idx == 0); 316 return curcpu(); 317 } 318 319 ci = cpu_infos[idx]; 320 KASSERT(ci == NULL || cpu_index(ci) == idx); 321 322 return ci; 323 } 324 325 static void 326 cpu_xc_offline(struct cpu_info *ci) 327 { 328 struct schedstate_percpu *spc, *mspc = NULL; 329 struct cpu_info *target_ci; 330 struct lwp *l; 331 CPU_INFO_ITERATOR cii; 332 int s; 333 334 /* 335 * Thread that made the cross call (separate context) holds 336 * cpu_lock on our behalf. 337 */ 338 spc = &ci->ci_schedstate; 339 s = splsched(); 340 spc->spc_flags |= SPCF_OFFLINE; 341 splx(s); 342 343 /* Take the first available CPU for the migration. */ 344 for (CPU_INFO_FOREACH(cii, target_ci)) { 345 mspc = &target_ci->ci_schedstate; 346 if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 347 break; 348 } 349 KASSERT(target_ci != NULL); 350 351 /* 352 * Migrate all non-bound threads to the other CPU. Note that this 353 * runs from the xcall thread, thus handling of LSONPROC is not needed. 354 */ 355 mutex_enter(proc_lock); 356 LIST_FOREACH(l, &alllwp, l_list) { 357 struct cpu_info *mci; 358 359 lwp_lock(l); 360 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) { 361 lwp_unlock(l); 362 continue; 363 } 364 /* Regular case - no affinity. */ 365 if (l->l_affinity == NULL) { 366 lwp_migrate(l, target_ci); 367 continue; 368 } 369 /* Affinity is set, find an online CPU in the set. */ 370 for (CPU_INFO_FOREACH(cii, mci)) { 371 mspc = &mci->ci_schedstate; 372 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && 373 kcpuset_isset(l->l_affinity, cpu_index(mci))) 374 break; 375 } 376 if (mci == NULL) { 377 lwp_unlock(l); 378 mutex_exit(proc_lock); 379 goto fail; 380 } 381 lwp_migrate(l, mci); 382 } 383 mutex_exit(proc_lock); 384 385 #if PCU_UNIT_COUNT > 0 386 pcu_save_all_on_cpu(); 387 #endif 388 389 #ifdef __HAVE_MD_CPU_OFFLINE 390 cpu_offline_md(); 391 #endif 392 return; 393 fail: 394 /* Just unset the SPCF_OFFLINE flag, caller will check */ 395 s = splsched(); 396 spc->spc_flags &= ~SPCF_OFFLINE; 397 splx(s); 398 } 399 400 static void 401 cpu_xc_online(struct cpu_info *ci) 402 { 403 struct schedstate_percpu *spc; 404 int s; 405 406 spc = &ci->ci_schedstate; 407 s = splsched(); 408 spc->spc_flags &= ~SPCF_OFFLINE; 409 splx(s); 410 } 411 412 int 413 cpu_setstate(struct cpu_info *ci, bool online) 414 { 415 struct schedstate_percpu *spc; 416 CPU_INFO_ITERATOR cii; 417 struct cpu_info *ci2; 418 uint64_t where; 419 xcfunc_t func; 420 int nonline; 421 422 spc = &ci->ci_schedstate; 423 424 KASSERT(mutex_owned(&cpu_lock)); 425 426 if (online) { 427 if ((spc->spc_flags & SPCF_OFFLINE) == 0) 428 return 0; 429 func = (xcfunc_t)cpu_xc_online; 430 } else { 431 if ((spc->spc_flags & SPCF_OFFLINE) != 0) 432 return 0; 433 nonline = 0; 434 /* 435 * Ensure that at least one CPU within the processor set 436 * stays online. Revisit this later. 437 */ 438 for (CPU_INFO_FOREACH(cii, ci2)) { 439 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 440 continue; 441 if (ci2->ci_schedstate.spc_psid != spc->spc_psid) 442 continue; 443 nonline++; 444 } 445 if (nonline == 1) 446 return EBUSY; 447 func = (xcfunc_t)cpu_xc_offline; 448 } 449 450 where = xc_unicast(0, func, ci, NULL, ci); 451 xc_wait(where); 452 if (online) { 453 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 454 ncpuonline++; 455 } else { 456 if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 457 /* If was not set offline, then it is busy */ 458 return EBUSY; 459 } 460 ncpuonline--; 461 } 462 463 spc->spc_lastmod = time_second; 464 return 0; 465 } 466 467 int 468 cpu_setmodel(const char *fmt, ...) 469 { 470 int len; 471 va_list ap; 472 473 va_start(ap, fmt); 474 len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap); 475 va_end(ap); 476 return len; 477 } 478 479 const char * 480 cpu_getmodel(void) 481 { 482 return cpu_model; 483 } 484 485 #ifdef __HAVE_INTR_CONTROL 486 static void 487 cpu_xc_intr(struct cpu_info *ci) 488 { 489 struct schedstate_percpu *spc; 490 int s; 491 492 spc = &ci->ci_schedstate; 493 s = splsched(); 494 spc->spc_flags &= ~SPCF_NOINTR; 495 splx(s); 496 } 497 498 static void 499 cpu_xc_nointr(struct cpu_info *ci) 500 { 501 struct schedstate_percpu *spc; 502 int s; 503 504 spc = &ci->ci_schedstate; 505 s = splsched(); 506 spc->spc_flags |= SPCF_NOINTR; 507 splx(s); 508 } 509 510 int 511 cpu_setintr(struct cpu_info *ci, bool intr) 512 { 513 struct schedstate_percpu *spc; 514 CPU_INFO_ITERATOR cii; 515 struct cpu_info *ci2; 516 uint64_t where; 517 xcfunc_t func; 518 int nintr; 519 520 spc = &ci->ci_schedstate; 521 522 KASSERT(mutex_owned(&cpu_lock)); 523 524 if (intr) { 525 if ((spc->spc_flags & SPCF_NOINTR) == 0) 526 return 0; 527 func = (xcfunc_t)cpu_xc_intr; 528 } else { 529 if ((spc->spc_flags & SPCF_NOINTR) != 0) 530 return 0; 531 /* 532 * Ensure that at least one CPU within the system 533 * is handing device interrupts. 534 */ 535 nintr = 0; 536 for (CPU_INFO_FOREACH(cii, ci2)) { 537 if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 538 continue; 539 if (ci2 == ci) 540 continue; 541 nintr++; 542 } 543 if (nintr == 0) 544 return EBUSY; 545 func = (xcfunc_t)cpu_xc_nointr; 546 } 547 548 where = xc_unicast(0, func, ci, NULL, ci); 549 xc_wait(where); 550 if (intr) { 551 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0); 552 } else if ((spc->spc_flags & SPCF_NOINTR) == 0) { 553 /* If was not set offline, then it is busy */ 554 return EBUSY; 555 } 556 557 /* Direct interrupts away from the CPU and record the change. */ 558 cpu_intr_redistribute(); 559 spc->spc_lastmod = time_second; 560 return 0; 561 } 562 #else /* __HAVE_INTR_CONTROL */ 563 int 564 cpu_setintr(struct cpu_info *ci, bool intr) 565 { 566 567 return EOPNOTSUPP; 568 } 569 570 u_int 571 cpu_intr_count(struct cpu_info *ci) 572 { 573 574 return 0; /* 0 == "don't know" */ 575 } 576 #endif /* __HAVE_INTR_CONTROL */ 577 578 bool 579 cpu_softintr_p(void) 580 { 581 582 return (curlwp->l_pflag & LP_INTR) != 0; 583 } 584 585 #ifdef CPU_UCODE 586 int 587 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname) 588 { 589 firmware_handle_t fwh; 590 int error; 591 592 if (sc->sc_blob != NULL) { 593 firmware_free(sc->sc_blob, sc->sc_blobsize); 594 sc->sc_blob = NULL; 595 sc->sc_blobsize = 0; 596 } 597 598 error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname); 599 if (error != 0) { 600 aprint_error("ucode: firmware_open failed: %i\n", error); 601 goto err0; 602 } 603 604 sc->sc_blobsize = firmware_get_size(fwh); 605 if (sc->sc_blobsize == 0) { 606 error = EFTYPE; 607 firmware_close(fwh); 608 goto err0; 609 } 610 sc->sc_blob = firmware_malloc(sc->sc_blobsize); 611 if (sc->sc_blob == NULL) { 612 error = ENOMEM; 613 firmware_close(fwh); 614 goto err0; 615 } 616 617 error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize); 618 firmware_close(fwh); 619 if (error != 0) 620 goto err1; 621 622 return 0; 623 624 err1: 625 firmware_free(sc->sc_blob, sc->sc_blobsize); 626 sc->sc_blob = NULL; 627 sc->sc_blobsize = 0; 628 err0: 629 return error; 630 } 631 #endif 632