1 /* $OpenBSD: kern_sched.c,v 1.49 2018/06/30 14:43:36 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 20 #include <sys/sched.h> 21 #include <sys/proc.h> 22 #include <sys/kthread.h> 23 #include <sys/systm.h> 24 #include <sys/resourcevar.h> 25 #include <sys/signalvar.h> 26 #include <sys/mutex.h> 27 #include <sys/task.h> 28 29 #include <uvm/uvm_extern.h> 30 31 void sched_kthreads_create(void *); 32 33 int sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p); 34 struct proc *sched_steal_proc(struct cpu_info *); 35 36 /* 37 * To help choosing which cpu should run which process we keep track 38 * of cpus which are currently idle and which cpus have processes 39 * queued. 40 */ 41 struct cpuset sched_idle_cpus; 42 struct cpuset sched_queued_cpus; 43 struct cpuset sched_all_cpus; 44 45 /* 46 * Some general scheduler counters. 47 */ 48 uint64_t sched_nmigrations; /* Cpu migration counter */ 49 uint64_t sched_nomigrations; /* Cpu no migration counter */ 50 uint64_t sched_noidle; /* Times we didn't pick the idle task */ 51 uint64_t sched_stolen; /* Times we stole proc from other cpus */ 52 uint64_t sched_choose; /* Times we chose a cpu */ 53 uint64_t sched_wasidle; /* Times we came out of idle */ 54 55 #ifdef MULTIPROCESSOR 56 struct taskq *sbartq; 57 #endif 58 59 int sched_smt; 60 61 /* 62 * A few notes about cpu_switchto that is implemented in MD code. 63 * 64 * cpu_switchto takes two arguments, the old proc and the proc 65 * it should switch to. The new proc will never be NULL, so we always have 66 * a saved state that we need to switch to. The old proc however can 67 * be NULL if the process is exiting. NULL for the old proc simply 68 * means "don't bother saving old state". 69 * 70 * cpu_switchto is supposed to atomically load the new state of the process 71 * including the pcb, pmap and setting curproc, the p_cpu pointer in the 72 * proc and p_stat to SONPROC. Atomically with respect to interrupts, other 73 * cpus in the system must not depend on this state being consistent. 74 * Therefore no locking is necessary in cpu_switchto other than blocking 75 * interrupts during the context switch. 76 */ 77 78 /* 79 * sched_init_cpu is called from main() for the boot cpu, then it's the 80 * responsibility of the MD code to call it for all other cpus. 81 */ 82 void 83 sched_init_cpu(struct cpu_info *ci) 84 { 85 struct schedstate_percpu *spc = &ci->ci_schedstate; 86 int i; 87 88 for (i = 0; i < SCHED_NQS; i++) 89 TAILQ_INIT(&spc->spc_qs[i]); 90 91 spc->spc_idleproc = NULL; 92 93 kthread_create_deferred(sched_kthreads_create, ci); 94 95 LIST_INIT(&spc->spc_deadproc); 96 97 /* 98 * Slight hack here until the cpuset code handles cpu_info 99 * structures. 100 */ 101 cpuset_init_cpu(ci); 102 103 #ifdef __HAVE_CPU_TOPOLOGY 104 if (!sched_smt && ci->ci_smt_id > 0) 105 return; 106 #endif 107 cpuset_add(&sched_all_cpus, ci); 108 } 109 110 void 111 sched_kthreads_create(void *v) 112 { 113 struct cpu_info *ci = v; 114 struct schedstate_percpu *spc = &ci->ci_schedstate; 115 static int num; 116 117 if (fork1(&proc0, FORK_SHAREVM|FORK_SHAREFILES|FORK_NOZOMBIE| 118 FORK_SYSTEM|FORK_SIGHAND|FORK_IDLE, sched_idle, ci, NULL, 119 &spc->spc_idleproc)) 120 panic("fork idle"); 121 122 /* Name it as specified. */ 123 snprintf(spc->spc_idleproc->p_p->ps_comm, 124 sizeof(spc->spc_idleproc->p_p->ps_comm), 125 "idle%d", num); 126 127 num++; 128 } 129 130 void 131 sched_idle(void *v) 132 { 133 struct schedstate_percpu *spc; 134 struct proc *p = curproc; 135 struct cpu_info *ci = v; 136 int s; 137 138 KERNEL_UNLOCK(); 139 140 spc = &ci->ci_schedstate; 141 142 /* 143 * First time we enter here, we're not supposed to idle, 144 * just go away for a while. 145 */ 146 SCHED_LOCK(s); 147 cpuset_add(&sched_idle_cpus, ci); 148 p->p_stat = SSLEEP; 149 p->p_cpu = ci; 150 atomic_setbits_int(&p->p_flag, P_CPUPEG); 151 mi_switch(); 152 cpuset_del(&sched_idle_cpus, ci); 153 SCHED_UNLOCK(s); 154 155 KASSERT(ci == curcpu()); 156 KASSERT(curproc == spc->spc_idleproc); 157 158 while (1) { 159 while (!cpu_is_idle(curcpu())) { 160 struct proc *dead; 161 162 SCHED_LOCK(s); 163 p->p_stat = SSLEEP; 164 mi_switch(); 165 SCHED_UNLOCK(s); 166 167 while ((dead = LIST_FIRST(&spc->spc_deadproc))) { 168 LIST_REMOVE(dead, p_hash); 169 exit2(dead); 170 } 171 } 172 173 splassert(IPL_NONE); 174 175 cpuset_add(&sched_idle_cpus, ci); 176 cpu_idle_enter(); 177 while (spc->spc_whichqs == 0) { 178 #ifdef MULTIPROCESSOR 179 if (spc->spc_schedflags & SPCF_SHOULDHALT && 180 (spc->spc_schedflags & SPCF_HALTED) == 0) { 181 cpuset_del(&sched_idle_cpus, ci); 182 SCHED_LOCK(s); 183 atomic_setbits_int(&spc->spc_schedflags, 184 spc->spc_whichqs ? 0 : SPCF_HALTED); 185 SCHED_UNLOCK(s); 186 wakeup(spc); 187 } 188 #endif 189 cpu_idle_cycle(); 190 } 191 cpu_idle_leave(); 192 cpuset_del(&sched_idle_cpus, ci); 193 } 194 } 195 196 /* 197 * To free our address space we have to jump through a few hoops. 198 * The freeing is done by the reaper, but until we have one reaper 199 * per cpu, we have no way of putting this proc on the deadproc list 200 * and waking up the reaper without risking having our address space and 201 * stack torn from under us before we manage to switch to another proc. 202 * Therefore we have a per-cpu list of dead processes where we put this 203 * proc and have idle clean up that list and move it to the reaper list. 204 * All this will be unnecessary once we can bind the reaper this cpu 205 * and not risk having it switch to another in case it sleeps. 206 */ 207 void 208 sched_exit(struct proc *p) 209 { 210 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 211 struct timespec ts; 212 struct proc *idle; 213 int s; 214 215 nanouptime(&ts); 216 timespecsub(&ts, &spc->spc_runtime, &ts); 217 timespecadd(&p->p_rtime, &ts, &p->p_rtime); 218 219 LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash); 220 221 /* This process no longer needs to hold the kernel lock. */ 222 KERNEL_UNLOCK(); 223 224 SCHED_LOCK(s); 225 idle = spc->spc_idleproc; 226 idle->p_stat = SRUN; 227 cpu_switchto(NULL, idle); 228 panic("cpu_switchto returned"); 229 } 230 231 /* 232 * Run queue management. 233 */ 234 void 235 sched_init_runqueues(void) 236 { 237 } 238 239 void 240 setrunqueue(struct proc *p) 241 { 242 struct schedstate_percpu *spc; 243 int queue = p->p_priority >> 2; 244 245 SCHED_ASSERT_LOCKED(); 246 spc = &p->p_cpu->ci_schedstate; 247 spc->spc_nrun++; 248 249 TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq); 250 spc->spc_whichqs |= (1 << queue); 251 cpuset_add(&sched_queued_cpus, p->p_cpu); 252 253 if (cpuset_isset(&sched_idle_cpus, p->p_cpu)) 254 cpu_unidle(p->p_cpu); 255 } 256 257 void 258 remrunqueue(struct proc *p) 259 { 260 struct schedstate_percpu *spc; 261 int queue = p->p_priority >> 2; 262 263 SCHED_ASSERT_LOCKED(); 264 spc = &p->p_cpu->ci_schedstate; 265 spc->spc_nrun--; 266 267 TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq); 268 if (TAILQ_EMPTY(&spc->spc_qs[queue])) { 269 spc->spc_whichqs &= ~(1 << queue); 270 if (spc->spc_whichqs == 0) 271 cpuset_del(&sched_queued_cpus, p->p_cpu); 272 } 273 } 274 275 struct proc * 276 sched_chooseproc(void) 277 { 278 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 279 struct proc *p; 280 int queue; 281 282 SCHED_ASSERT_LOCKED(); 283 284 #ifdef MULTIPROCESSOR 285 if (spc->spc_schedflags & SPCF_SHOULDHALT) { 286 if (spc->spc_whichqs) { 287 for (queue = 0; queue < SCHED_NQS; queue++) { 288 while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) { 289 remrunqueue(p); 290 p->p_cpu = sched_choosecpu(p); 291 setrunqueue(p); 292 if (p->p_cpu == curcpu()) { 293 KASSERT(p->p_flag & P_CPUPEG); 294 goto again; 295 } 296 } 297 } 298 } 299 p = spc->spc_idleproc; 300 KASSERT(p); 301 KASSERT(p->p_wchan == NULL); 302 p->p_stat = SRUN; 303 return (p); 304 } 305 #endif 306 307 again: 308 if (spc->spc_whichqs) { 309 queue = ffs(spc->spc_whichqs) - 1; 310 p = TAILQ_FIRST(&spc->spc_qs[queue]); 311 remrunqueue(p); 312 sched_noidle++; 313 KASSERT(p->p_stat == SRUN); 314 } else if ((p = sched_steal_proc(curcpu())) == NULL) { 315 p = spc->spc_idleproc; 316 if (p == NULL) { 317 int s; 318 /* 319 * We get here if someone decides to switch during 320 * boot before forking kthreads, bleh. 321 * This is kind of like a stupid idle loop. 322 */ 323 #ifdef MULTIPROCESSOR 324 __mp_unlock(&sched_lock); 325 #endif 326 spl0(); 327 delay(10); 328 SCHED_LOCK(s); 329 goto again; 330 } 331 KASSERT(p); 332 p->p_stat = SRUN; 333 } 334 335 KASSERT(p->p_wchan == NULL); 336 return (p); 337 } 338 339 struct cpu_info * 340 sched_choosecpu_fork(struct proc *parent, int flags) 341 { 342 #ifdef MULTIPROCESSOR 343 struct cpu_info *choice = NULL; 344 fixpt_t load, best_load = ~0; 345 int run, best_run = INT_MAX; 346 struct cpu_info *ci; 347 struct cpuset set; 348 349 #if 0 350 /* 351 * XXX 352 * Don't do this until we have a painless way to move the cpu in exec. 353 * Preferably when nuking the old pmap and getting a new one on a 354 * new cpu. 355 */ 356 /* 357 * PPWAIT forks are simple. We know that the parent will not 358 * run until we exec and choose another cpu, so we just steal its 359 * cpu. 360 */ 361 if (flags & FORK_PPWAIT) 362 return (parent->p_cpu); 363 #endif 364 365 /* 366 * Look at all cpus that are currently idle and have nothing queued. 367 * If there are none, pick the one with least queued procs first, 368 * then the one with lowest load average. 369 */ 370 cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus); 371 cpuset_intersection(&set, &set, &sched_all_cpus); 372 if (cpuset_first(&set) == NULL) 373 cpuset_copy(&set, &sched_all_cpus); 374 375 while ((ci = cpuset_first(&set)) != NULL) { 376 cpuset_del(&set, ci); 377 378 load = ci->ci_schedstate.spc_ldavg; 379 run = ci->ci_schedstate.spc_nrun; 380 381 if (choice == NULL || run < best_run || 382 (run == best_run &&load < best_load)) { 383 choice = ci; 384 best_load = load; 385 best_run = run; 386 } 387 } 388 389 return (choice); 390 #else 391 return (curcpu()); 392 #endif 393 } 394 395 struct cpu_info * 396 sched_choosecpu(struct proc *p) 397 { 398 #ifdef MULTIPROCESSOR 399 struct cpu_info *choice = NULL; 400 int last_cost = INT_MAX; 401 struct cpu_info *ci; 402 struct cpuset set; 403 404 /* 405 * If pegged to a cpu, don't allow it to move. 406 */ 407 if (p->p_flag & P_CPUPEG) 408 return (p->p_cpu); 409 410 sched_choose++; 411 412 /* 413 * Look at all cpus that are currently idle and have nothing queued. 414 * If there are none, pick the cheapest of those. 415 * (idle + queued could mean that the cpu is handling an interrupt 416 * at this moment and haven't had time to leave idle yet). 417 */ 418 cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus); 419 cpuset_intersection(&set, &set, &sched_all_cpus); 420 421 /* 422 * First, just check if our current cpu is in that set, if it is, 423 * this is simple. 424 * Also, our cpu might not be idle, but if it's the current cpu 425 * and it has nothing else queued and we're curproc, take it. 426 */ 427 if (cpuset_isset(&set, p->p_cpu) || 428 (p->p_cpu == curcpu() && p->p_cpu->ci_schedstate.spc_nrun == 0 && 429 (p->p_cpu->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0 && 430 curproc == p)) { 431 sched_wasidle++; 432 return (p->p_cpu); 433 } 434 435 if (cpuset_first(&set) == NULL) 436 cpuset_copy(&set, &sched_all_cpus); 437 438 while ((ci = cpuset_first(&set)) != NULL) { 439 int cost = sched_proc_to_cpu_cost(ci, p); 440 441 if (choice == NULL || cost < last_cost) { 442 choice = ci; 443 last_cost = cost; 444 } 445 cpuset_del(&set, ci); 446 } 447 448 if (p->p_cpu != choice) 449 sched_nmigrations++; 450 else 451 sched_nomigrations++; 452 453 return (choice); 454 #else 455 return (curcpu()); 456 #endif 457 } 458 459 /* 460 * Attempt to steal a proc from some cpu. 461 */ 462 struct proc * 463 sched_steal_proc(struct cpu_info *self) 464 { 465 struct proc *best = NULL; 466 #ifdef MULTIPROCESSOR 467 struct schedstate_percpu *spc; 468 int bestcost = INT_MAX; 469 struct cpu_info *ci; 470 struct cpuset set; 471 472 KASSERT((self->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0); 473 474 /* Don't steal if we don't want to schedule processes in this CPU. */ 475 if (!cpuset_isset(&sched_all_cpus, self)) 476 return (NULL); 477 478 cpuset_copy(&set, &sched_queued_cpus); 479 480 while ((ci = cpuset_first(&set)) != NULL) { 481 struct proc *p; 482 int queue; 483 int cost; 484 485 cpuset_del(&set, ci); 486 487 spc = &ci->ci_schedstate; 488 489 queue = ffs(spc->spc_whichqs) - 1; 490 TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) { 491 if (p->p_flag & P_CPUPEG) 492 continue; 493 494 cost = sched_proc_to_cpu_cost(self, p); 495 496 if (best == NULL || cost < bestcost) { 497 best = p; 498 bestcost = cost; 499 } 500 } 501 } 502 if (best == NULL) 503 return (NULL); 504 505 spc = &best->p_cpu->ci_schedstate; 506 remrunqueue(best); 507 best->p_cpu = self; 508 509 sched_stolen++; 510 #endif 511 return (best); 512 } 513 514 #ifdef MULTIPROCESSOR 515 /* 516 * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know). 517 */ 518 static int 519 log2(unsigned int i) 520 { 521 int ret = 0; 522 523 while (i >>= 1) 524 ret++; 525 526 return (ret); 527 } 528 529 /* 530 * Calculate the cost of moving the proc to this cpu. 531 * 532 * What we want is some guesstimate of how much "performance" it will 533 * cost us to move the proc here. Not just for caches and TLBs and NUMA 534 * memory, but also for the proc itself. A highly loaded cpu might not 535 * be the best candidate for this proc since it won't get run. 536 * 537 * Just total guesstimates for now. 538 */ 539 540 int sched_cost_load = 1; 541 int sched_cost_priority = 1; 542 int sched_cost_runnable = 3; 543 int sched_cost_resident = 1; 544 #endif 545 546 int 547 sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p) 548 { 549 int cost = 0; 550 #ifdef MULTIPROCESSOR 551 struct schedstate_percpu *spc; 552 int l2resident = 0; 553 554 spc = &ci->ci_schedstate; 555 556 /* 557 * First, account for the priority of the proc we want to move. 558 * More willing to move, the lower the priority of the destination 559 * and the higher the priority of the proc. 560 */ 561 if (!cpuset_isset(&sched_idle_cpus, ci)) { 562 cost += (p->p_priority - spc->spc_curpriority) * 563 sched_cost_priority; 564 cost += sched_cost_runnable; 565 } 566 if (cpuset_isset(&sched_queued_cpus, ci)) 567 cost += spc->spc_nrun * sched_cost_runnable; 568 569 /* 570 * Try to avoid the primary cpu as it handles hardware interrupts. 571 * 572 * XXX Needs to be revisited when we distribute interrupts 573 * over cpus. 574 */ 575 if (CPU_IS_PRIMARY(ci)) 576 cost += sched_cost_runnable; 577 578 /* 579 * Higher load on the destination means we don't want to go there. 580 */ 581 cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT); 582 583 /* 584 * If the proc is on this cpu already, lower the cost by how much 585 * it has been running and an estimate of its footprint. 586 */ 587 if (p->p_cpu == ci && p->p_slptime == 0) { 588 l2resident = 589 log2(pmap_resident_count(p->p_vmspace->vm_map.pmap)); 590 cost -= l2resident * sched_cost_resident; 591 } 592 #endif 593 return (cost); 594 } 595 596 /* 597 * Peg a proc to a cpu. 598 */ 599 void 600 sched_peg_curproc(struct cpu_info *ci) 601 { 602 struct proc *p = curproc; 603 int s; 604 605 SCHED_LOCK(s); 606 p->p_priority = p->p_usrpri; 607 p->p_stat = SRUN; 608 p->p_cpu = ci; 609 atomic_setbits_int(&p->p_flag, P_CPUPEG); 610 setrunqueue(p); 611 p->p_ru.ru_nvcsw++; 612 mi_switch(); 613 SCHED_UNLOCK(s); 614 } 615 616 #ifdef MULTIPROCESSOR 617 618 void 619 sched_start_secondary_cpus(void) 620 { 621 CPU_INFO_ITERATOR cii; 622 struct cpu_info *ci; 623 624 CPU_INFO_FOREACH(cii, ci) { 625 struct schedstate_percpu *spc = &ci->ci_schedstate; 626 627 if (CPU_IS_PRIMARY(ci)) 628 continue; 629 atomic_clearbits_int(&spc->spc_schedflags, 630 SPCF_SHOULDHALT | SPCF_HALTED); 631 #ifdef __HAVE_CPU_TOPOLOGY 632 if (!sched_smt && ci->ci_smt_id > 0) 633 continue; 634 #endif 635 cpuset_add(&sched_all_cpus, ci); 636 } 637 } 638 639 void 640 sched_stop_secondary_cpus(void) 641 { 642 CPU_INFO_ITERATOR cii; 643 struct cpu_info *ci; 644 645 /* 646 * Make sure we stop the secondary CPUs. 647 */ 648 CPU_INFO_FOREACH(cii, ci) { 649 struct schedstate_percpu *spc = &ci->ci_schedstate; 650 651 if (CPU_IS_PRIMARY(ci)) 652 continue; 653 cpuset_del(&sched_all_cpus, ci); 654 atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT); 655 } 656 CPU_INFO_FOREACH(cii, ci) { 657 struct schedstate_percpu *spc = &ci->ci_schedstate; 658 struct sleep_state sls; 659 660 if (CPU_IS_PRIMARY(ci)) 661 continue; 662 while ((spc->spc_schedflags & SPCF_HALTED) == 0) { 663 sleep_setup(&sls, spc, PZERO, "schedstate"); 664 sleep_finish(&sls, 665 (spc->spc_schedflags & SPCF_HALTED) == 0); 666 } 667 } 668 } 669 670 struct sched_barrier_state { 671 struct cpu_info *ci; 672 struct cond cond; 673 }; 674 675 void 676 sched_barrier_task(void *arg) 677 { 678 struct sched_barrier_state *sb = arg; 679 struct cpu_info *ci = sb->ci; 680 681 sched_peg_curproc(ci); 682 cond_signal(&sb->cond); 683 atomic_clearbits_int(&curproc->p_flag, P_CPUPEG); 684 } 685 686 void 687 sched_barrier(struct cpu_info *ci) 688 { 689 struct sched_barrier_state sb; 690 struct task task; 691 CPU_INFO_ITERATOR cii; 692 693 if (ci == NULL) { 694 CPU_INFO_FOREACH(cii, ci) { 695 if (CPU_IS_PRIMARY(ci)) 696 break; 697 } 698 } 699 KASSERT(ci != NULL); 700 701 if (ci == curcpu()) 702 return; 703 704 sb.ci = ci; 705 cond_init(&sb.cond); 706 task_set(&task, sched_barrier_task, &sb); 707 708 task_add(systqmp, &task); 709 cond_wait(&sb.cond, "sbar"); 710 } 711 712 #else 713 714 void 715 sched_barrier(struct cpu_info *ci) 716 { 717 } 718 719 #endif 720 721 /* 722 * Functions to manipulate cpu sets. 723 */ 724 struct cpu_info *cpuset_infos[MAXCPUS]; 725 static struct cpuset cpuset_all; 726 727 void 728 cpuset_init_cpu(struct cpu_info *ci) 729 { 730 cpuset_add(&cpuset_all, ci); 731 cpuset_infos[CPU_INFO_UNIT(ci)] = ci; 732 } 733 734 void 735 cpuset_clear(struct cpuset *cs) 736 { 737 memset(cs, 0, sizeof(*cs)); 738 } 739 740 void 741 cpuset_add(struct cpuset *cs, struct cpu_info *ci) 742 { 743 unsigned int num = CPU_INFO_UNIT(ci); 744 atomic_setbits_int(&cs->cs_set[num/32], (1 << (num % 32))); 745 } 746 747 void 748 cpuset_del(struct cpuset *cs, struct cpu_info *ci) 749 { 750 unsigned int num = CPU_INFO_UNIT(ci); 751 atomic_clearbits_int(&cs->cs_set[num/32], (1 << (num % 32))); 752 } 753 754 int 755 cpuset_isset(struct cpuset *cs, struct cpu_info *ci) 756 { 757 unsigned int num = CPU_INFO_UNIT(ci); 758 return (cs->cs_set[num/32] & (1 << (num % 32))); 759 } 760 761 void 762 cpuset_add_all(struct cpuset *cs) 763 { 764 cpuset_copy(cs, &cpuset_all); 765 } 766 767 void 768 cpuset_copy(struct cpuset *to, struct cpuset *from) 769 { 770 memcpy(to, from, sizeof(*to)); 771 } 772 773 struct cpu_info * 774 cpuset_first(struct cpuset *cs) 775 { 776 int i; 777 778 for (i = 0; i < CPUSET_ASIZE(ncpus); i++) 779 if (cs->cs_set[i]) 780 return (cpuset_infos[i * 32 + ffs(cs->cs_set[i]) - 1]); 781 782 return (NULL); 783 } 784 785 void 786 cpuset_union(struct cpuset *to, struct cpuset *a, struct cpuset *b) 787 { 788 int i; 789 790 for (i = 0; i < CPUSET_ASIZE(ncpus); i++) 791 to->cs_set[i] = a->cs_set[i] | b->cs_set[i]; 792 } 793 794 void 795 cpuset_intersection(struct cpuset *to, struct cpuset *a, struct cpuset *b) 796 { 797 int i; 798 799 for (i = 0; i < CPUSET_ASIZE(ncpus); i++) 800 to->cs_set[i] = a->cs_set[i] & b->cs_set[i]; 801 } 802 803 void 804 cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b) 805 { 806 int i; 807 808 for (i = 0; i < CPUSET_ASIZE(ncpus); i++) 809 to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i]; 810 } 811 812 #ifdef __HAVE_CPU_TOPOLOGY 813 814 #include <sys/sysctl.h> 815 816 int 817 sysctl_hwsmt(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 818 { 819 CPU_INFO_ITERATOR cii; 820 struct cpu_info *ci; 821 int err, newsmt; 822 823 newsmt = sched_smt; 824 err = sysctl_int(oldp, oldlenp, newp, newlen, &newsmt); 825 if (err) 826 return err; 827 if (newsmt > 1) 828 newsmt = 1; 829 if (newsmt < 0) 830 newsmt = 0; 831 if (newsmt == sched_smt) 832 return 0; 833 834 sched_smt = newsmt; 835 CPU_INFO_FOREACH(cii, ci) { 836 if (CPU_IS_PRIMARY(ci)) 837 continue; 838 if (ci->ci_smt_id == 0) 839 continue; 840 if (sched_smt) 841 cpuset_add(&sched_all_cpus, ci); 842 else 843 cpuset_del(&sched_all_cpus, ci); 844 } 845 846 return 0; 847 } 848 849 #endif 850