xref: /openbsd-src/sys/kern/kern_sched.c (revision b260213123b66fa1a724635f45eef9df9a614682)
1 /*	$OpenBSD: kern_sched.c,v 1.42 2016/03/17 13:18:47 mpi Exp $	*/
2 /*
3  * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 
20 #include <sys/sched.h>
21 #include <sys/proc.h>
22 #include <sys/kthread.h>
23 #include <sys/systm.h>
24 #include <sys/resourcevar.h>
25 #include <sys/signalvar.h>
26 #include <sys/mutex.h>
27 #include <sys/task.h>
28 
29 #include <uvm/uvm_extern.h>
30 
31 void sched_kthreads_create(void *);
32 
33 int sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p);
34 struct proc *sched_steal_proc(struct cpu_info *);
35 
36 /*
37  * To help choosing which cpu should run which process we keep track
38  * of cpus which are currently idle and which cpus have processes
39  * queued.
40  */
41 struct cpuset sched_idle_cpus;
42 struct cpuset sched_queued_cpus;
43 struct cpuset sched_all_cpus;
44 
45 /*
46  * Some general scheduler counters.
47  */
48 uint64_t sched_nmigrations;	/* Cpu migration counter */
49 uint64_t sched_nomigrations;	/* Cpu no migration counter */
50 uint64_t sched_noidle;		/* Times we didn't pick the idle task */
51 uint64_t sched_stolen;		/* Times we stole proc from other cpus */
52 uint64_t sched_choose;		/* Times we chose a cpu */
53 uint64_t sched_wasidle;		/* Times we came out of idle */
54 
55 #ifdef MULTIPROCESSOR
56 struct taskq *sbartq;
57 #endif
58 
59 /*
60  * A few notes about cpu_switchto that is implemented in MD code.
61  *
62  * cpu_switchto takes two arguments, the old proc and the proc
63  * it should switch to. The new proc will never be NULL, so we always have
64  * a saved state that we need to switch to. The old proc however can
65  * be NULL if the process is exiting. NULL for the old proc simply
66  * means "don't bother saving old state".
67  *
68  * cpu_switchto is supposed to atomically load the new state of the process
69  * including the pcb, pmap and setting curproc, the p_cpu pointer in the
70  * proc and p_stat to SONPROC. Atomically with respect to interrupts, other
71  * cpus in the system must not depend on this state being consistent.
72  * Therefore no locking is necessary in cpu_switchto other than blocking
73  * interrupts during the context switch.
74  */
75 
76 /*
77  * sched_init_cpu is called from main() for the boot cpu, then it's the
78  * responsibility of the MD code to call it for all other cpus.
79  */
80 void
81 sched_init_cpu(struct cpu_info *ci)
82 {
83 	struct schedstate_percpu *spc = &ci->ci_schedstate;
84 	int i;
85 
86 	for (i = 0; i < SCHED_NQS; i++)
87 		TAILQ_INIT(&spc->spc_qs[i]);
88 
89 	spc->spc_idleproc = NULL;
90 
91 	kthread_create_deferred(sched_kthreads_create, ci);
92 
93 	LIST_INIT(&spc->spc_deadproc);
94 
95 	/*
96 	 * Slight hack here until the cpuset code handles cpu_info
97 	 * structures.
98 	 */
99 	cpuset_init_cpu(ci);
100 	cpuset_add(&sched_all_cpus, ci);
101 }
102 
103 void
104 sched_kthreads_create(void *v)
105 {
106 	struct cpu_info *ci = v;
107 	struct schedstate_percpu *spc = &ci->ci_schedstate;
108 	static int num;
109 
110 	if (fork1(&proc0, FORK_SHAREVM|FORK_SHAREFILES|FORK_NOZOMBIE|
111 	    FORK_SYSTEM|FORK_SIGHAND|FORK_IDLE, NULL, 0, sched_idle, ci, NULL,
112 	    &spc->spc_idleproc))
113 		panic("fork idle");
114 
115 	/* Name it as specified. */
116 	snprintf(spc->spc_idleproc->p_comm, sizeof(spc->spc_idleproc->p_comm),
117 	    "idle%d", num);
118 
119 	num++;
120 }
121 
122 void
123 sched_idle(void *v)
124 {
125 	struct schedstate_percpu *spc;
126 	struct proc *p = curproc;
127 	struct cpu_info *ci = v;
128 	int s;
129 
130 	KERNEL_UNLOCK();
131 
132 	spc = &ci->ci_schedstate;
133 
134 	/*
135 	 * First time we enter here, we're not supposed to idle,
136 	 * just go away for a while.
137 	 */
138 	SCHED_LOCK(s);
139 	cpuset_add(&sched_idle_cpus, ci);
140 	p->p_stat = SSLEEP;
141 	p->p_cpu = ci;
142 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
143 	mi_switch();
144 	cpuset_del(&sched_idle_cpus, ci);
145 	SCHED_UNLOCK(s);
146 
147 	KASSERT(ci == curcpu());
148 	KASSERT(curproc == spc->spc_idleproc);
149 
150 	while (1) {
151 		while (!cpu_is_idle(curcpu())) {
152 			struct proc *dead;
153 
154 			SCHED_LOCK(s);
155 			p->p_stat = SSLEEP;
156 			mi_switch();
157 			SCHED_UNLOCK(s);
158 
159 			while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
160 				LIST_REMOVE(dead, p_hash);
161 				exit2(dead);
162 			}
163 		}
164 
165 		splassert(IPL_NONE);
166 
167 		cpuset_add(&sched_idle_cpus, ci);
168 		cpu_idle_enter();
169 		while (spc->spc_whichqs == 0) {
170 #ifdef MULTIPROCESSOR
171 			if (spc->spc_schedflags & SPCF_SHOULDHALT &&
172 			    (spc->spc_schedflags & SPCF_HALTED) == 0) {
173 				cpuset_del(&sched_idle_cpus, ci);
174 				SCHED_LOCK(s);
175 				atomic_setbits_int(&spc->spc_schedflags,
176 				    spc->spc_whichqs ? 0 : SPCF_HALTED);
177 				SCHED_UNLOCK(s);
178 				wakeup(spc);
179 			}
180 #endif
181 			cpu_idle_cycle();
182 		}
183 		cpu_idle_leave();
184 		cpuset_del(&sched_idle_cpus, ci);
185 	}
186 }
187 
188 /*
189  * To free our address space we have to jump through a few hoops.
190  * The freeing is done by the reaper, but until we have one reaper
191  * per cpu, we have no way of putting this proc on the deadproc list
192  * and waking up the reaper without risking having our address space and
193  * stack torn from under us before we manage to switch to another proc.
194  * Therefore we have a per-cpu list of dead processes where we put this
195  * proc and have idle clean up that list and move it to the reaper list.
196  * All this will be unnecessary once we can bind the reaper this cpu
197  * and not risk having it switch to another in case it sleeps.
198  */
199 void
200 sched_exit(struct proc *p)
201 {
202 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
203 	struct timespec ts;
204 	struct proc *idle;
205 	int s;
206 
207 	nanouptime(&ts);
208 	timespecsub(&ts, &spc->spc_runtime, &ts);
209 	timespecadd(&p->p_rtime, &ts, &p->p_rtime);
210 
211 	LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
212 
213 	/* This process no longer needs to hold the kernel lock. */
214 	KERNEL_UNLOCK();
215 
216 	SCHED_LOCK(s);
217 	idle = spc->spc_idleproc;
218 	idle->p_stat = SRUN;
219 	cpu_switchto(NULL, idle);
220 	panic("cpu_switchto returned");
221 }
222 
223 /*
224  * Run queue management.
225  */
226 void
227 sched_init_runqueues(void)
228 {
229 #ifdef MULTIPROCESSOR
230 	sbartq = taskq_create("sbar", 1, IPL_NONE,
231 	    TASKQ_MPSAFE | TASKQ_CANTSLEEP);
232 	if (sbartq == NULL)
233 		panic("unable to create sbar taskq");
234 #endif
235 }
236 
237 void
238 setrunqueue(struct proc *p)
239 {
240 	struct schedstate_percpu *spc;
241 	int queue = p->p_priority >> 2;
242 
243 	SCHED_ASSERT_LOCKED();
244 	spc = &p->p_cpu->ci_schedstate;
245 	spc->spc_nrun++;
246 
247 	TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
248 	spc->spc_whichqs |= (1 << queue);
249 	cpuset_add(&sched_queued_cpus, p->p_cpu);
250 
251 	if (cpuset_isset(&sched_idle_cpus, p->p_cpu))
252 		cpu_unidle(p->p_cpu);
253 }
254 
255 void
256 remrunqueue(struct proc *p)
257 {
258 	struct schedstate_percpu *spc;
259 	int queue = p->p_priority >> 2;
260 
261 	SCHED_ASSERT_LOCKED();
262 	spc = &p->p_cpu->ci_schedstate;
263 	spc->spc_nrun--;
264 
265 	TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
266 	if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
267 		spc->spc_whichqs &= ~(1 << queue);
268 		if (spc->spc_whichqs == 0)
269 			cpuset_del(&sched_queued_cpus, p->p_cpu);
270 	}
271 }
272 
273 struct proc *
274 sched_chooseproc(void)
275 {
276 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
277 	struct proc *p;
278 	int queue;
279 
280 	SCHED_ASSERT_LOCKED();
281 
282 #ifdef MULTIPROCESSOR
283 	if (spc->spc_schedflags & SPCF_SHOULDHALT) {
284 		if (spc->spc_whichqs) {
285 			for (queue = 0; queue < SCHED_NQS; queue++) {
286 				while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
287 					remrunqueue(p);
288 					p->p_cpu = sched_choosecpu(p);
289 					KASSERT(p->p_cpu != curcpu());
290 					setrunqueue(p);
291 				}
292 			}
293 		}
294 		p = spc->spc_idleproc;
295 		KASSERT(p);
296 		KASSERT(p->p_wchan == NULL);
297 		p->p_stat = SRUN;
298 		return (p);
299 	}
300 #endif
301 
302 again:
303 	if (spc->spc_whichqs) {
304 		queue = ffs(spc->spc_whichqs) - 1;
305 		p = TAILQ_FIRST(&spc->spc_qs[queue]);
306 		remrunqueue(p);
307 		sched_noidle++;
308 		KASSERT(p->p_stat == SRUN);
309 	} else if ((p = sched_steal_proc(curcpu())) == NULL) {
310 		p = spc->spc_idleproc;
311 		if (p == NULL) {
312                         int s;
313 			/*
314 			 * We get here if someone decides to switch during
315 			 * boot before forking kthreads, bleh.
316 			 * This is kind of like a stupid idle loop.
317 			 */
318 #ifdef MULTIPROCESSOR
319 			__mp_unlock(&sched_lock);
320 #endif
321 			spl0();
322 			delay(10);
323 			SCHED_LOCK(s);
324 			goto again;
325                 }
326 		KASSERT(p);
327 		p->p_stat = SRUN;
328 	}
329 
330 	KASSERT(p->p_wchan == NULL);
331 	return (p);
332 }
333 
334 struct cpu_info *
335 sched_choosecpu_fork(struct proc *parent, int flags)
336 {
337 #ifdef MULTIPROCESSOR
338 	struct cpu_info *choice = NULL;
339 	fixpt_t load, best_load = ~0;
340 	int run, best_run = INT_MAX;
341 	struct cpu_info *ci;
342 	struct cpuset set;
343 
344 #if 0
345 	/*
346 	 * XXX
347 	 * Don't do this until we have a painless way to move the cpu in exec.
348 	 * Preferably when nuking the old pmap and getting a new one on a
349 	 * new cpu.
350 	 */
351 	/*
352 	 * PPWAIT forks are simple. We know that the parent will not
353 	 * run until we exec and choose another cpu, so we just steal its
354 	 * cpu.
355 	 */
356 	if (flags & FORK_PPWAIT)
357 		return (parent->p_cpu);
358 #endif
359 
360 	/*
361 	 * Look at all cpus that are currently idle and have nothing queued.
362 	 * If there are none, pick the one with least queued procs first,
363 	 * then the one with lowest load average.
364 	 */
365 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
366 	cpuset_intersection(&set, &set, &sched_all_cpus);
367 	if (cpuset_first(&set) == NULL)
368 		cpuset_copy(&set, &sched_all_cpus);
369 
370 	while ((ci = cpuset_first(&set)) != NULL) {
371 		cpuset_del(&set, ci);
372 
373 		load = ci->ci_schedstate.spc_ldavg;
374 		run = ci->ci_schedstate.spc_nrun;
375 
376 		if (choice == NULL || run < best_run ||
377 		    (run == best_run &&load < best_load)) {
378 			choice = ci;
379 			best_load = load;
380 			best_run = run;
381 		}
382 	}
383 
384 	return (choice);
385 #else
386 	return (curcpu());
387 #endif
388 }
389 
390 struct cpu_info *
391 sched_choosecpu(struct proc *p)
392 {
393 #ifdef MULTIPROCESSOR
394 	struct cpu_info *choice = NULL;
395 	int last_cost = INT_MAX;
396 	struct cpu_info *ci;
397 	struct cpuset set;
398 
399 	/*
400 	 * If pegged to a cpu, don't allow it to move.
401 	 */
402 	if (p->p_flag & P_CPUPEG)
403 		return (p->p_cpu);
404 
405 	sched_choose++;
406 
407 	/*
408 	 * Look at all cpus that are currently idle and have nothing queued.
409 	 * If there are none, pick the cheapest of those.
410 	 * (idle + queued could mean that the cpu is handling an interrupt
411 	 * at this moment and haven't had time to leave idle yet).
412 	 */
413 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
414 	cpuset_intersection(&set, &set, &sched_all_cpus);
415 
416 	/*
417 	 * First, just check if our current cpu is in that set, if it is,
418 	 * this is simple.
419 	 * Also, our cpu might not be idle, but if it's the current cpu
420 	 * and it has nothing else queued and we're curproc, take it.
421 	 */
422 	if (cpuset_isset(&set, p->p_cpu) ||
423 	    (p->p_cpu == curcpu() && p->p_cpu->ci_schedstate.spc_nrun == 0 &&
424 	    (p->p_cpu->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0 &&
425 	    curproc == p)) {
426 		sched_wasidle++;
427 		return (p->p_cpu);
428 	}
429 
430 	if (cpuset_first(&set) == NULL)
431 		cpuset_copy(&set, &sched_all_cpus);
432 
433 	while ((ci = cpuset_first(&set)) != NULL) {
434 		int cost = sched_proc_to_cpu_cost(ci, p);
435 
436 		if (choice == NULL || cost < last_cost) {
437 			choice = ci;
438 			last_cost = cost;
439 		}
440 		cpuset_del(&set, ci);
441 	}
442 
443 	if (p->p_cpu != choice)
444 		sched_nmigrations++;
445 	else
446 		sched_nomigrations++;
447 
448 	return (choice);
449 #else
450 	return (curcpu());
451 #endif
452 }
453 
454 /*
455  * Attempt to steal a proc from some cpu.
456  */
457 struct proc *
458 sched_steal_proc(struct cpu_info *self)
459 {
460 	struct proc *best = NULL;
461 #ifdef MULTIPROCESSOR
462 	struct schedstate_percpu *spc;
463 	int bestcost = INT_MAX;
464 	struct cpu_info *ci;
465 	struct cpuset set;
466 
467 	KASSERT((self->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0);
468 
469 	cpuset_copy(&set, &sched_queued_cpus);
470 
471 	while ((ci = cpuset_first(&set)) != NULL) {
472 		struct proc *p;
473 		int queue;
474 		int cost;
475 
476 		cpuset_del(&set, ci);
477 
478 		spc = &ci->ci_schedstate;
479 
480 		queue = ffs(spc->spc_whichqs) - 1;
481 		TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
482 			if (p->p_flag & P_CPUPEG)
483 				continue;
484 
485 			cost = sched_proc_to_cpu_cost(self, p);
486 
487 			if (best == NULL || cost < bestcost) {
488 				best = p;
489 				bestcost = cost;
490 			}
491 		}
492 	}
493 	if (best == NULL)
494 		return (NULL);
495 
496 	spc = &best->p_cpu->ci_schedstate;
497 	remrunqueue(best);
498 	best->p_cpu = self;
499 
500 	sched_stolen++;
501 #endif
502 	return (best);
503 }
504 
505 #ifdef MULTIPROCESSOR
506 /*
507  * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
508  */
509 static int
510 log2(unsigned int i)
511 {
512 	int ret = 0;
513 
514 	while (i >>= 1)
515 		ret++;
516 
517 	return (ret);
518 }
519 
520 /*
521  * Calculate the cost of moving the proc to this cpu.
522  *
523  * What we want is some guesstimate of how much "performance" it will
524  * cost us to move the proc here. Not just for caches and TLBs and NUMA
525  * memory, but also for the proc itself. A highly loaded cpu might not
526  * be the best candidate for this proc since it won't get run.
527  *
528  * Just total guesstimates for now.
529  */
530 
531 int sched_cost_load = 1;
532 int sched_cost_priority = 1;
533 int sched_cost_runnable = 3;
534 int sched_cost_resident = 1;
535 #endif
536 
537 int
538 sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
539 {
540 	int cost = 0;
541 #ifdef MULTIPROCESSOR
542 	struct schedstate_percpu *spc;
543 	int l2resident = 0;
544 
545 	spc = &ci->ci_schedstate;
546 
547 	/*
548 	 * First, account for the priority of the proc we want to move.
549 	 * More willing to move, the lower the priority of the destination
550 	 * and the higher the priority of the proc.
551 	 */
552 	if (!cpuset_isset(&sched_idle_cpus, ci)) {
553 		cost += (p->p_priority - spc->spc_curpriority) *
554 		    sched_cost_priority;
555 		cost += sched_cost_runnable;
556 	}
557 	if (cpuset_isset(&sched_queued_cpus, ci))
558 		cost += spc->spc_nrun * sched_cost_runnable;
559 
560 	/*
561 	 * Try to avoid the primary cpu as it handles hardware interrupts.
562 	 *
563 	 * XXX Needs to be revisited when we distribute interrupts
564 	 * over cpus.
565 	 */
566 	if (CPU_IS_PRIMARY(ci))
567 		cost += sched_cost_runnable;
568 
569 	/*
570 	 * Higher load on the destination means we don't want to go there.
571 	 */
572 	cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
573 
574 	/*
575 	 * If the proc is on this cpu already, lower the cost by how much
576 	 * it has been running and an estimate of its footprint.
577 	 */
578 	if (p->p_cpu == ci && p->p_slptime == 0) {
579 		l2resident =
580 		    log2(pmap_resident_count(p->p_vmspace->vm_map.pmap));
581 		cost -= l2resident * sched_cost_resident;
582 	}
583 #endif
584 	return (cost);
585 }
586 
587 /*
588  * Peg a proc to a cpu.
589  */
590 void
591 sched_peg_curproc(struct cpu_info *ci)
592 {
593 	struct proc *p = curproc;
594 	int s;
595 
596 	SCHED_LOCK(s);
597 	p->p_priority = p->p_usrpri;
598 	p->p_stat = SRUN;
599 	p->p_cpu = ci;
600 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
601 	setrunqueue(p);
602 	p->p_ru.ru_nvcsw++;
603 	mi_switch();
604 	SCHED_UNLOCK(s);
605 }
606 
607 #ifdef MULTIPROCESSOR
608 
609 void
610 sched_start_secondary_cpus(void)
611 {
612 	CPU_INFO_ITERATOR cii;
613 	struct cpu_info *ci;
614 
615 	CPU_INFO_FOREACH(cii, ci) {
616 		struct schedstate_percpu *spc = &ci->ci_schedstate;
617 
618 		if (CPU_IS_PRIMARY(ci))
619 			continue;
620 		cpuset_add(&sched_all_cpus, ci);
621 		atomic_clearbits_int(&spc->spc_schedflags,
622 		    SPCF_SHOULDHALT | SPCF_HALTED);
623 	}
624 }
625 
626 void
627 sched_stop_secondary_cpus(void)
628 {
629 	CPU_INFO_ITERATOR cii;
630 	struct cpu_info *ci;
631 
632 	/*
633 	 * Make sure we stop the secondary CPUs.
634 	 */
635 	CPU_INFO_FOREACH(cii, ci) {
636 		struct schedstate_percpu *spc = &ci->ci_schedstate;
637 
638 		if (CPU_IS_PRIMARY(ci))
639 			continue;
640 		cpuset_del(&sched_all_cpus, ci);
641 		atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
642 	}
643 	CPU_INFO_FOREACH(cii, ci) {
644 		struct schedstate_percpu *spc = &ci->ci_schedstate;
645 		struct sleep_state sls;
646 
647 		if (CPU_IS_PRIMARY(ci))
648 			continue;
649 		while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
650 			sleep_setup(&sls, spc, PZERO, "schedstate");
651 			sleep_finish(&sls,
652 			    (spc->spc_schedflags & SPCF_HALTED) == 0);
653 		}
654 	}
655 }
656 
657 void
658 sched_barrier_task(void *arg)
659 {
660 	struct cpu_info *ci = arg;
661 
662 	sched_peg_curproc(ci);
663 	ci->ci_schedstate.spc_barrier = 1;
664 	wakeup(&ci->ci_schedstate.spc_barrier);
665 	atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
666 }
667 
668 void
669 sched_barrier(struct cpu_info *ci)
670 {
671 	struct sleep_state sls;
672 	struct task task;
673 	CPU_INFO_ITERATOR cii;
674 	struct schedstate_percpu *spc;
675 
676 	if (ci == NULL) {
677 		CPU_INFO_FOREACH(cii, ci) {
678 			if (CPU_IS_PRIMARY(ci))
679 				break;
680 		}
681 	}
682 	KASSERT(ci != NULL);
683 
684 	if (ci == curcpu())
685 		return;
686 
687 	task_set(&task, sched_barrier_task, ci);
688 	spc = &ci->ci_schedstate;
689 	spc->spc_barrier = 0;
690 	task_add(sbartq, &task);
691 	while (!spc->spc_barrier) {
692 		sleep_setup(&sls, &spc->spc_barrier, PWAIT, "sbar");
693 		sleep_finish(&sls, !spc->spc_barrier);
694 	}
695 }
696 
697 #else
698 
699 void
700 sched_barrier(struct cpu_info *ci)
701 {
702 }
703 
704 #endif
705 
706 /*
707  * Functions to manipulate cpu sets.
708  */
709 struct cpu_info *cpuset_infos[MAXCPUS];
710 static struct cpuset cpuset_all;
711 
712 void
713 cpuset_init_cpu(struct cpu_info *ci)
714 {
715 	cpuset_add(&cpuset_all, ci);
716 	cpuset_infos[CPU_INFO_UNIT(ci)] = ci;
717 }
718 
719 void
720 cpuset_clear(struct cpuset *cs)
721 {
722 	memset(cs, 0, sizeof(*cs));
723 }
724 
725 void
726 cpuset_add(struct cpuset *cs, struct cpu_info *ci)
727 {
728 	unsigned int num = CPU_INFO_UNIT(ci);
729 	atomic_setbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
730 }
731 
732 void
733 cpuset_del(struct cpuset *cs, struct cpu_info *ci)
734 {
735 	unsigned int num = CPU_INFO_UNIT(ci);
736 	atomic_clearbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
737 }
738 
739 int
740 cpuset_isset(struct cpuset *cs, struct cpu_info *ci)
741 {
742 	unsigned int num = CPU_INFO_UNIT(ci);
743 	return (cs->cs_set[num/32] & (1 << (num % 32)));
744 }
745 
746 void
747 cpuset_add_all(struct cpuset *cs)
748 {
749 	cpuset_copy(cs, &cpuset_all);
750 }
751 
752 void
753 cpuset_copy(struct cpuset *to, struct cpuset *from)
754 {
755 	memcpy(to, from, sizeof(*to));
756 }
757 
758 struct cpu_info *
759 cpuset_first(struct cpuset *cs)
760 {
761 	int i;
762 
763 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
764 		if (cs->cs_set[i])
765 			return (cpuset_infos[i * 32 + ffs(cs->cs_set[i]) - 1]);
766 
767 	return (NULL);
768 }
769 
770 void
771 cpuset_union(struct cpuset *to, struct cpuset *a, struct cpuset *b)
772 {
773 	int i;
774 
775 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
776 		to->cs_set[i] = a->cs_set[i] | b->cs_set[i];
777 }
778 
779 void
780 cpuset_intersection(struct cpuset *to, struct cpuset *a, struct cpuset *b)
781 {
782 	int i;
783 
784 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
785 		to->cs_set[i] = a->cs_set[i] & b->cs_set[i];
786 }
787 
788 void
789 cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b)
790 {
791 	int i;
792 
793 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
794 		to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i];
795 }
796