xref: /openbsd-src/sys/kern/kern_sched.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*	$OpenBSD: kern_sched.c,v 1.76 2022/12/05 23:18:37 deraadt Exp $	*/
2 /*
3  * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 
20 #include <sys/sched.h>
21 #include <sys/proc.h>
22 #include <sys/kthread.h>
23 #include <sys/systm.h>
24 #include <sys/task.h>
25 #include <sys/smr.h>
26 #include <sys/tracepoint.h>
27 
28 #include <uvm/uvm_extern.h>
29 
30 void sched_kthreads_create(void *);
31 
32 int sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p);
33 struct proc *sched_steal_proc(struct cpu_info *);
34 
35 /*
36  * To help choosing which cpu should run which process we keep track
37  * of cpus which are currently idle and which cpus have processes
38  * queued.
39  */
40 struct cpuset sched_idle_cpus;
41 struct cpuset sched_queued_cpus;
42 struct cpuset sched_all_cpus;
43 
44 /*
45  * Some general scheduler counters.
46  */
47 uint64_t sched_nmigrations;	/* Cpu migration counter */
48 uint64_t sched_nomigrations;	/* Cpu no migration counter */
49 uint64_t sched_noidle;		/* Times we didn't pick the idle task */
50 uint64_t sched_stolen;		/* Times we stole proc from other cpus */
51 uint64_t sched_choose;		/* Times we chose a cpu */
52 uint64_t sched_wasidle;		/* Times we came out of idle */
53 
54 int sched_smt;
55 
56 /*
57  * A few notes about cpu_switchto that is implemented in MD code.
58  *
59  * cpu_switchto takes two arguments, the old proc and the proc
60  * it should switch to. The new proc will never be NULL, so we always have
61  * a saved state that we need to switch to. The old proc however can
62  * be NULL if the process is exiting. NULL for the old proc simply
63  * means "don't bother saving old state".
64  *
65  * cpu_switchto is supposed to atomically load the new state of the process
66  * including the pcb, pmap and setting curproc, the p_cpu pointer in the
67  * proc and p_stat to SONPROC. Atomically with respect to interrupts, other
68  * cpus in the system must not depend on this state being consistent.
69  * Therefore no locking is necessary in cpu_switchto other than blocking
70  * interrupts during the context switch.
71  */
72 
73 /*
74  * sched_init_cpu is called from main() for the boot cpu, then it's the
75  * responsibility of the MD code to call it for all other cpus.
76  */
77 void
78 sched_init_cpu(struct cpu_info *ci)
79 {
80 	struct schedstate_percpu *spc = &ci->ci_schedstate;
81 	int i;
82 
83 	for (i = 0; i < SCHED_NQS; i++)
84 		TAILQ_INIT(&spc->spc_qs[i]);
85 
86 	spc->spc_idleproc = NULL;
87 
88 	kthread_create_deferred(sched_kthreads_create, ci);
89 
90 	LIST_INIT(&spc->spc_deadproc);
91 	SIMPLEQ_INIT(&spc->spc_deferred);
92 
93 	/*
94 	 * Slight hack here until the cpuset code handles cpu_info
95 	 * structures.
96 	 */
97 	cpuset_init_cpu(ci);
98 
99 #ifdef __HAVE_CPU_TOPOLOGY
100 	if (!sched_smt && ci->ci_smt_id > 0)
101 		return;
102 #endif
103 	cpuset_add(&sched_all_cpus, ci);
104 }
105 
106 void
107 sched_kthreads_create(void *v)
108 {
109 	struct cpu_info *ci = v;
110 	struct schedstate_percpu *spc = &ci->ci_schedstate;
111 	static int num;
112 
113 	if (fork1(&proc0, FORK_SHAREVM|FORK_SHAREFILES|FORK_NOZOMBIE|
114 	    FORK_SYSTEM|FORK_IDLE, sched_idle, ci, NULL,
115 	    &spc->spc_idleproc))
116 		panic("fork idle");
117 
118 	/* Name it as specified. */
119 	snprintf(spc->spc_idleproc->p_p->ps_comm,
120 	    sizeof(spc->spc_idleproc->p_p->ps_comm),
121 	    "idle%d", num);
122 
123 	num++;
124 }
125 
126 void
127 sched_idle(void *v)
128 {
129 	struct schedstate_percpu *spc;
130 	struct proc *p = curproc;
131 	struct cpu_info *ci = v;
132 	int s;
133 
134 	KERNEL_UNLOCK();
135 
136 	spc = &ci->ci_schedstate;
137 
138 	/*
139 	 * First time we enter here, we're not supposed to idle,
140 	 * just go away for a while.
141 	 */
142 	SCHED_LOCK(s);
143 	cpuset_add(&sched_idle_cpus, ci);
144 	p->p_stat = SSLEEP;
145 	p->p_cpu = ci;
146 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
147 	mi_switch();
148 	cpuset_del(&sched_idle_cpus, ci);
149 	SCHED_UNLOCK(s);
150 
151 	KASSERT(ci == curcpu());
152 	KASSERT(curproc == spc->spc_idleproc);
153 
154 	while (1) {
155 		while (!cpu_is_idle(curcpu())) {
156 			struct proc *dead;
157 
158 			SCHED_LOCK(s);
159 			p->p_stat = SSLEEP;
160 			mi_switch();
161 			SCHED_UNLOCK(s);
162 
163 			while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
164 				LIST_REMOVE(dead, p_hash);
165 				exit2(dead);
166 			}
167 		}
168 
169 		splassert(IPL_NONE);
170 
171 		smr_idle();
172 
173 		cpuset_add(&sched_idle_cpus, ci);
174 		cpu_idle_enter();
175 		while (spc->spc_whichqs == 0) {
176 #ifdef MULTIPROCESSOR
177 			if (spc->spc_schedflags & SPCF_SHOULDHALT &&
178 			    (spc->spc_schedflags & SPCF_HALTED) == 0) {
179 				cpuset_del(&sched_idle_cpus, ci);
180 				SCHED_LOCK(s);
181 				atomic_setbits_int(&spc->spc_schedflags,
182 				    spc->spc_whichqs ? 0 : SPCF_HALTED);
183 				SCHED_UNLOCK(s);
184 				wakeup(spc);
185 			}
186 #endif
187 			cpu_idle_cycle();
188 		}
189 		cpu_idle_leave();
190 		cpuset_del(&sched_idle_cpus, ci);
191 	}
192 }
193 
194 /*
195  * To free our address space we have to jump through a few hoops.
196  * The freeing is done by the reaper, but until we have one reaper
197  * per cpu, we have no way of putting this proc on the deadproc list
198  * and waking up the reaper without risking having our address space and
199  * stack torn from under us before we manage to switch to another proc.
200  * Therefore we have a per-cpu list of dead processes where we put this
201  * proc and have idle clean up that list and move it to the reaper list.
202  * All this will be unnecessary once we can bind the reaper this cpu
203  * and not risk having it switch to another in case it sleeps.
204  */
205 void
206 sched_exit(struct proc *p)
207 {
208 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
209 	struct timespec ts;
210 	struct proc *idle;
211 	int s;
212 
213 	nanouptime(&ts);
214 	timespecsub(&ts, &spc->spc_runtime, &ts);
215 	timespecadd(&p->p_rtime, &ts, &p->p_rtime);
216 
217 	LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
218 
219 #ifdef MULTIPROCESSOR
220 	/* This process no longer needs to hold the kernel lock. */
221 	KERNEL_ASSERT_LOCKED();
222 	__mp_release_all(&kernel_lock);
223 #endif
224 
225 	SCHED_LOCK(s);
226 	idle = spc->spc_idleproc;
227 	idle->p_stat = SRUN;
228 	cpu_switchto(NULL, idle);
229 	panic("cpu_switchto returned");
230 }
231 
232 /*
233  * Run queue management.
234  */
235 void
236 sched_init_runqueues(void)
237 {
238 }
239 
240 void
241 setrunqueue(struct cpu_info *ci, struct proc *p, uint8_t prio)
242 {
243 	struct schedstate_percpu *spc;
244 	int queue = prio >> 2;
245 
246 	if (ci == NULL)
247 		ci = sched_choosecpu(p);
248 
249 	KASSERT(ci != NULL);
250 	SCHED_ASSERT_LOCKED();
251 
252 	p->p_cpu = ci;
253 	p->p_stat = SRUN;
254 	p->p_runpri = prio;
255 
256 	spc = &p->p_cpu->ci_schedstate;
257 	spc->spc_nrun++;
258 	TRACEPOINT(sched, enqueue, p->p_tid + THREAD_PID_OFFSET,
259 	    p->p_p->ps_pid);
260 
261 	TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
262 	spc->spc_whichqs |= (1U << queue);
263 	cpuset_add(&sched_queued_cpus, p->p_cpu);
264 
265 	if (cpuset_isset(&sched_idle_cpus, p->p_cpu))
266 		cpu_unidle(p->p_cpu);
267 
268 	if (prio < spc->spc_curpriority)
269 		need_resched(ci);
270 }
271 
272 void
273 remrunqueue(struct proc *p)
274 {
275 	struct schedstate_percpu *spc;
276 	int queue = p->p_runpri >> 2;
277 
278 	SCHED_ASSERT_LOCKED();
279 	spc = &p->p_cpu->ci_schedstate;
280 	spc->spc_nrun--;
281 	TRACEPOINT(sched, dequeue, p->p_tid + THREAD_PID_OFFSET,
282 	    p->p_p->ps_pid);
283 
284 	TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
285 	if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
286 		spc->spc_whichqs &= ~(1U << queue);
287 		if (spc->spc_whichqs == 0)
288 			cpuset_del(&sched_queued_cpus, p->p_cpu);
289 	}
290 }
291 
292 struct proc *
293 sched_chooseproc(void)
294 {
295 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
296 	struct proc *p;
297 	int queue;
298 
299 	SCHED_ASSERT_LOCKED();
300 
301 #ifdef MULTIPROCESSOR
302 	if (spc->spc_schedflags & SPCF_SHOULDHALT) {
303 		if (spc->spc_whichqs) {
304 			for (queue = 0; queue < SCHED_NQS; queue++) {
305 				while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
306 					remrunqueue(p);
307 					setrunqueue(NULL, p, p->p_runpri);
308 					if (p->p_cpu == curcpu()) {
309 						KASSERT(p->p_flag & P_CPUPEG);
310 						goto again;
311 					}
312 				}
313 			}
314 		}
315 		p = spc->spc_idleproc;
316 		KASSERT(p);
317 		KASSERT(p->p_wchan == NULL);
318 		p->p_stat = SRUN;
319 		return (p);
320 	}
321 #endif
322 
323 again:
324 	if (spc->spc_whichqs) {
325 		queue = ffs(spc->spc_whichqs) - 1;
326 		p = TAILQ_FIRST(&spc->spc_qs[queue]);
327 		remrunqueue(p);
328 		sched_noidle++;
329 		if (p->p_stat != SRUN)
330 			panic("thread %d not in SRUN: %d", p->p_tid, p->p_stat);
331 	} else if ((p = sched_steal_proc(curcpu())) == NULL) {
332 		p = spc->spc_idleproc;
333 		if (p == NULL) {
334                         int s;
335 			/*
336 			 * We get here if someone decides to switch during
337 			 * boot before forking kthreads, bleh.
338 			 * This is kind of like a stupid idle loop.
339 			 */
340 #ifdef MULTIPROCESSOR
341 			__mp_unlock(&sched_lock);
342 #endif
343 			spl0();
344 			delay(10);
345 			SCHED_LOCK(s);
346 			goto again;
347                 }
348 		KASSERT(p);
349 		p->p_stat = SRUN;
350 	}
351 
352 	KASSERT(p->p_wchan == NULL);
353 	return (p);
354 }
355 
356 struct cpu_info *
357 sched_choosecpu_fork(struct proc *parent, int flags)
358 {
359 #ifdef MULTIPROCESSOR
360 	struct cpu_info *choice = NULL;
361 	fixpt_t load, best_load = ~0;
362 	int run, best_run = INT_MAX;
363 	struct cpu_info *ci;
364 	struct cpuset set;
365 
366 #if 0
367 	/*
368 	 * XXX
369 	 * Don't do this until we have a painless way to move the cpu in exec.
370 	 * Preferably when nuking the old pmap and getting a new one on a
371 	 * new cpu.
372 	 */
373 	/*
374 	 * PPWAIT forks are simple. We know that the parent will not
375 	 * run until we exec and choose another cpu, so we just steal its
376 	 * cpu.
377 	 */
378 	if (flags & FORK_PPWAIT)
379 		return (parent->p_cpu);
380 #endif
381 
382 	/*
383 	 * Look at all cpus that are currently idle and have nothing queued.
384 	 * If there are none, pick the one with least queued procs first,
385 	 * then the one with lowest load average.
386 	 */
387 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
388 	cpuset_intersection(&set, &set, &sched_all_cpus);
389 	if (cpuset_first(&set) == NULL)
390 		cpuset_copy(&set, &sched_all_cpus);
391 
392 	while ((ci = cpuset_first(&set)) != NULL) {
393 		cpuset_del(&set, ci);
394 
395 		load = ci->ci_schedstate.spc_ldavg;
396 		run = ci->ci_schedstate.spc_nrun;
397 
398 		if (choice == NULL || run < best_run ||
399 		    (run == best_run &&load < best_load)) {
400 			choice = ci;
401 			best_load = load;
402 			best_run = run;
403 		}
404 	}
405 
406 	return (choice);
407 #else
408 	return (curcpu());
409 #endif
410 }
411 
412 struct cpu_info *
413 sched_choosecpu(struct proc *p)
414 {
415 #ifdef MULTIPROCESSOR
416 	struct cpu_info *choice = NULL;
417 	int last_cost = INT_MAX;
418 	struct cpu_info *ci;
419 	struct cpuset set;
420 
421 	/*
422 	 * If pegged to a cpu, don't allow it to move.
423 	 */
424 	if (p->p_flag & P_CPUPEG)
425 		return (p->p_cpu);
426 
427 	sched_choose++;
428 
429 	/*
430 	 * Look at all cpus that are currently idle and have nothing queued.
431 	 * If there are none, pick the cheapest of those.
432 	 * (idle + queued could mean that the cpu is handling an interrupt
433 	 * at this moment and haven't had time to leave idle yet).
434 	 */
435 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
436 	cpuset_intersection(&set, &set, &sched_all_cpus);
437 
438 	/*
439 	 * First, just check if our current cpu is in that set, if it is,
440 	 * this is simple.
441 	 * Also, our cpu might not be idle, but if it's the current cpu
442 	 * and it has nothing else queued and we're curproc, take it.
443 	 */
444 	if (cpuset_isset(&set, p->p_cpu) ||
445 	    (p->p_cpu == curcpu() && p->p_cpu->ci_schedstate.spc_nrun == 0 &&
446 	    (p->p_cpu->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0 &&
447 	    curproc == p)) {
448 		sched_wasidle++;
449 		return (p->p_cpu);
450 	}
451 
452 	if (cpuset_first(&set) == NULL)
453 		cpuset_copy(&set, &sched_all_cpus);
454 
455 	while ((ci = cpuset_first(&set)) != NULL) {
456 		int cost = sched_proc_to_cpu_cost(ci, p);
457 
458 		if (choice == NULL || cost < last_cost) {
459 			choice = ci;
460 			last_cost = cost;
461 		}
462 		cpuset_del(&set, ci);
463 	}
464 
465 	if (p->p_cpu != choice)
466 		sched_nmigrations++;
467 	else
468 		sched_nomigrations++;
469 
470 	return (choice);
471 #else
472 	return (curcpu());
473 #endif
474 }
475 
476 /*
477  * Attempt to steal a proc from some cpu.
478  */
479 struct proc *
480 sched_steal_proc(struct cpu_info *self)
481 {
482 	struct proc *best = NULL;
483 #ifdef MULTIPROCESSOR
484 	struct schedstate_percpu *spc;
485 	int bestcost = INT_MAX;
486 	struct cpu_info *ci;
487 	struct cpuset set;
488 
489 	KASSERT((self->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0);
490 
491 	/* Don't steal if we don't want to schedule processes in this CPU. */
492 	if (!cpuset_isset(&sched_all_cpus, self))
493 		return (NULL);
494 
495 	cpuset_copy(&set, &sched_queued_cpus);
496 
497 	while ((ci = cpuset_first(&set)) != NULL) {
498 		struct proc *p;
499 		int queue;
500 		int cost;
501 
502 		cpuset_del(&set, ci);
503 
504 		spc = &ci->ci_schedstate;
505 
506 		queue = ffs(spc->spc_whichqs) - 1;
507 		TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
508 			if (p->p_flag & P_CPUPEG)
509 				continue;
510 
511 			cost = sched_proc_to_cpu_cost(self, p);
512 
513 			if (best == NULL || cost < bestcost) {
514 				best = p;
515 				bestcost = cost;
516 			}
517 		}
518 	}
519 	if (best == NULL)
520 		return (NULL);
521 
522 	remrunqueue(best);
523 	best->p_cpu = self;
524 
525 	sched_stolen++;
526 #endif
527 	return (best);
528 }
529 
530 #ifdef MULTIPROCESSOR
531 /*
532  * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
533  */
534 static int
535 log2(unsigned int i)
536 {
537 	int ret = 0;
538 
539 	while (i >>= 1)
540 		ret++;
541 
542 	return (ret);
543 }
544 
545 /*
546  * Calculate the cost of moving the proc to this cpu.
547  *
548  * What we want is some guesstimate of how much "performance" it will
549  * cost us to move the proc here. Not just for caches and TLBs and NUMA
550  * memory, but also for the proc itself. A highly loaded cpu might not
551  * be the best candidate for this proc since it won't get run.
552  *
553  * Just total guesstimates for now.
554  */
555 
556 int sched_cost_load = 1;
557 int sched_cost_priority = 1;
558 int sched_cost_runnable = 3;
559 int sched_cost_resident = 1;
560 #endif
561 
562 int
563 sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
564 {
565 	int cost = 0;
566 #ifdef MULTIPROCESSOR
567 	struct schedstate_percpu *spc;
568 	int l2resident = 0;
569 
570 	spc = &ci->ci_schedstate;
571 
572 	/*
573 	 * First, account for the priority of the proc we want to move.
574 	 * More willing to move, the lower the priority of the destination
575 	 * and the higher the priority of the proc.
576 	 */
577 	if (!cpuset_isset(&sched_idle_cpus, ci)) {
578 		cost += (p->p_usrpri - spc->spc_curpriority) *
579 		    sched_cost_priority;
580 		cost += sched_cost_runnable;
581 	}
582 	if (cpuset_isset(&sched_queued_cpus, ci))
583 		cost += spc->spc_nrun * sched_cost_runnable;
584 
585 	/*
586 	 * Try to avoid the primary cpu as it handles hardware interrupts.
587 	 *
588 	 * XXX Needs to be revisited when we distribute interrupts
589 	 * over cpus.
590 	 */
591 	if (CPU_IS_PRIMARY(ci))
592 		cost += sched_cost_runnable;
593 
594 	/*
595 	 * Higher load on the destination means we don't want to go there.
596 	 */
597 	cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
598 
599 	/*
600 	 * If the proc is on this cpu already, lower the cost by how much
601 	 * it has been running and an estimate of its footprint.
602 	 */
603 	if (p->p_cpu == ci && p->p_slptime == 0) {
604 		l2resident =
605 		    log2(pmap_resident_count(p->p_vmspace->vm_map.pmap));
606 		cost -= l2resident * sched_cost_resident;
607 	}
608 #endif
609 	return (cost);
610 }
611 
612 /*
613  * Peg a proc to a cpu.
614  */
615 void
616 sched_peg_curproc(struct cpu_info *ci)
617 {
618 	struct proc *p = curproc;
619 	int s;
620 
621 	SCHED_LOCK(s);
622 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
623 	setrunqueue(ci, p, p->p_usrpri);
624 	p->p_ru.ru_nvcsw++;
625 	mi_switch();
626 	SCHED_UNLOCK(s);
627 }
628 
629 #ifdef MULTIPROCESSOR
630 
631 void
632 sched_start_secondary_cpus(void)
633 {
634 	CPU_INFO_ITERATOR cii;
635 	struct cpu_info *ci;
636 
637 	CPU_INFO_FOREACH(cii, ci) {
638 		struct schedstate_percpu *spc = &ci->ci_schedstate;
639 
640 		if (CPU_IS_PRIMARY(ci) || !CPU_IS_RUNNING(ci))
641 			continue;
642 		atomic_clearbits_int(&spc->spc_schedflags,
643 		    SPCF_SHOULDHALT | SPCF_HALTED);
644 #ifdef __HAVE_CPU_TOPOLOGY
645 		if (!sched_smt && ci->ci_smt_id > 0)
646 			continue;
647 #endif
648 		cpuset_add(&sched_all_cpus, ci);
649 	}
650 }
651 
652 void
653 sched_stop_secondary_cpus(void)
654 {
655 	CPU_INFO_ITERATOR cii;
656 	struct cpu_info *ci;
657 
658 	/*
659 	 * Make sure we stop the secondary CPUs.
660 	 */
661 	CPU_INFO_FOREACH(cii, ci) {
662 		struct schedstate_percpu *spc = &ci->ci_schedstate;
663 
664 		if (CPU_IS_PRIMARY(ci) || !CPU_IS_RUNNING(ci))
665 			continue;
666 		cpuset_del(&sched_all_cpus, ci);
667 		atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
668 	}
669 	CPU_INFO_FOREACH(cii, ci) {
670 		struct schedstate_percpu *spc = &ci->ci_schedstate;
671 		struct sleep_state sls;
672 
673 		if (CPU_IS_PRIMARY(ci) || !CPU_IS_RUNNING(ci))
674 			continue;
675 		while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
676 			sleep_setup(&sls, spc, PZERO, "schedstate", 0);
677 			sleep_finish(&sls,
678 			    (spc->spc_schedflags & SPCF_HALTED) == 0);
679 		}
680 	}
681 }
682 
683 struct sched_barrier_state {
684 	struct cpu_info *ci;
685 	struct cond cond;
686 };
687 
688 void
689 sched_barrier_task(void *arg)
690 {
691 	struct sched_barrier_state *sb = arg;
692 	struct cpu_info *ci = sb->ci;
693 
694 	sched_peg_curproc(ci);
695 	cond_signal(&sb->cond);
696 	atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
697 }
698 
699 void
700 sched_barrier(struct cpu_info *ci)
701 {
702 	struct sched_barrier_state sb;
703 	struct task task;
704 	CPU_INFO_ITERATOR cii;
705 
706 	if (ci == NULL) {
707 		CPU_INFO_FOREACH(cii, ci) {
708 			if (CPU_IS_PRIMARY(ci))
709 				break;
710 		}
711 	}
712 	KASSERT(ci != NULL);
713 
714 	if (ci == curcpu())
715 		return;
716 
717 	sb.ci = ci;
718 	cond_init(&sb.cond);
719 	task_set(&task, sched_barrier_task, &sb);
720 
721 	task_add(systqmp, &task);
722 	cond_wait(&sb.cond, "sbar");
723 }
724 
725 #else
726 
727 void
728 sched_barrier(struct cpu_info *ci)
729 {
730 }
731 
732 #endif
733 
734 /*
735  * Functions to manipulate cpu sets.
736  */
737 struct cpu_info *cpuset_infos[MAXCPUS];
738 static struct cpuset cpuset_all;
739 
740 void
741 cpuset_init_cpu(struct cpu_info *ci)
742 {
743 	cpuset_add(&cpuset_all, ci);
744 	cpuset_infos[CPU_INFO_UNIT(ci)] = ci;
745 }
746 
747 void
748 cpuset_clear(struct cpuset *cs)
749 {
750 	memset(cs, 0, sizeof(*cs));
751 }
752 
753 void
754 cpuset_add(struct cpuset *cs, struct cpu_info *ci)
755 {
756 	unsigned int num = CPU_INFO_UNIT(ci);
757 	atomic_setbits_int(&cs->cs_set[num/32], (1U << (num % 32)));
758 }
759 
760 void
761 cpuset_del(struct cpuset *cs, struct cpu_info *ci)
762 {
763 	unsigned int num = CPU_INFO_UNIT(ci);
764 	atomic_clearbits_int(&cs->cs_set[num/32], (1U << (num % 32)));
765 }
766 
767 int
768 cpuset_isset(struct cpuset *cs, struct cpu_info *ci)
769 {
770 	unsigned int num = CPU_INFO_UNIT(ci);
771 	return (cs->cs_set[num/32] & (1U << (num % 32)));
772 }
773 
774 void
775 cpuset_add_all(struct cpuset *cs)
776 {
777 	cpuset_copy(cs, &cpuset_all);
778 }
779 
780 void
781 cpuset_copy(struct cpuset *to, struct cpuset *from)
782 {
783 	memcpy(to, from, sizeof(*to));
784 }
785 
786 struct cpu_info *
787 cpuset_first(struct cpuset *cs)
788 {
789 	int i;
790 
791 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
792 		if (cs->cs_set[i])
793 			return (cpuset_infos[i * 32 + ffs(cs->cs_set[i]) - 1]);
794 
795 	return (NULL);
796 }
797 
798 void
799 cpuset_union(struct cpuset *to, struct cpuset *a, struct cpuset *b)
800 {
801 	int i;
802 
803 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
804 		to->cs_set[i] = a->cs_set[i] | b->cs_set[i];
805 }
806 
807 void
808 cpuset_intersection(struct cpuset *to, struct cpuset *a, struct cpuset *b)
809 {
810 	int i;
811 
812 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
813 		to->cs_set[i] = a->cs_set[i] & b->cs_set[i];
814 }
815 
816 void
817 cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b)
818 {
819 	int i;
820 
821 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
822 		to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i];
823 }
824 
825 int
826 cpuset_cardinality(struct cpuset *cs)
827 {
828 	int cardinality, i, n;
829 
830 	cardinality = 0;
831 
832 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
833 		for (n = cs->cs_set[i]; n != 0; n &= n - 1)
834 			cardinality++;
835 
836 	return (cardinality);
837 }
838 
839 int
840 sysctl_hwncpuonline(void)
841 {
842 	return cpuset_cardinality(&sched_all_cpus);
843 }
844 
845 int
846 cpu_is_online(struct cpu_info *ci)
847 {
848 	return cpuset_isset(&sched_all_cpus, ci);
849 }
850 
851 #ifdef __HAVE_CPU_TOPOLOGY
852 
853 #include <sys/sysctl.h>
854 
855 int
856 sysctl_hwsmt(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
857 {
858 	CPU_INFO_ITERATOR cii;
859 	struct cpu_info *ci;
860 	int err, newsmt;
861 
862 	newsmt = sched_smt;
863 	err = sysctl_int_bounded(oldp, oldlenp, newp, newlen, &newsmt, 0, 1);
864 	if (err)
865 		return err;
866 	if (newsmt == sched_smt)
867 		return 0;
868 
869 	sched_smt = newsmt;
870 	CPU_INFO_FOREACH(cii, ci) {
871 		if (CPU_IS_PRIMARY(ci) || !CPU_IS_RUNNING(ci))
872 			continue;
873 		if (ci->ci_smt_id == 0)
874 			continue;
875 		if (sched_smt)
876 			cpuset_add(&sched_all_cpus, ci);
877 		else
878 			cpuset_del(&sched_all_cpus, ci);
879 	}
880 
881 	return 0;
882 }
883 
884 #endif
885