xref: /openbsd-src/sys/kern/kern_sched.c (revision 077ff91642f0418d85cd763919c5462c31c1eefb)
1 /*	$OpenBSD: kern_sched.c,v 1.38 2015/09/20 22:05:14 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 
20 #include <sys/sched.h>
21 #include <sys/proc.h>
22 #include <sys/kthread.h>
23 #include <sys/systm.h>
24 #include <sys/resourcevar.h>
25 #include <sys/signalvar.h>
26 #include <sys/mutex.h>
27 #include <sys/task.h>
28 
29 #include <uvm/uvm_extern.h>
30 
31 void sched_kthreads_create(void *);
32 
33 int sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p);
34 struct proc *sched_steal_proc(struct cpu_info *);
35 
36 /*
37  * To help choosing which cpu should run which process we keep track
38  * of cpus which are currently idle and which cpus have processes
39  * queued.
40  */
41 struct cpuset sched_idle_cpus;
42 struct cpuset sched_queued_cpus;
43 struct cpuset sched_all_cpus;
44 
45 /*
46  * Some general scheduler counters.
47  */
48 uint64_t sched_nmigrations;	/* Cpu migration counter */
49 uint64_t sched_nomigrations;	/* Cpu no migration counter */
50 uint64_t sched_noidle;		/* Times we didn't pick the idle task */
51 uint64_t sched_stolen;		/* Times we stole proc from other cpus */
52 uint64_t sched_choose;		/* Times we chose a cpu */
53 uint64_t sched_wasidle;		/* Times we came out of idle */
54 
55 /*
56  * A few notes about cpu_switchto that is implemented in MD code.
57  *
58  * cpu_switchto takes two arguments, the old proc and the proc
59  * it should switch to. The new proc will never be NULL, so we always have
60  * a saved state that we need to switch to. The old proc however can
61  * be NULL if the process is exiting. NULL for the old proc simply
62  * means "don't bother saving old state".
63  *
64  * cpu_switchto is supposed to atomically load the new state of the process
65  * including the pcb, pmap and setting curproc, the p_cpu pointer in the
66  * proc and p_stat to SONPROC. Atomically with respect to interrupts, other
67  * cpus in the system must not depend on this state being consistent.
68  * Therefore no locking is necessary in cpu_switchto other than blocking
69  * interrupts during the context switch.
70  */
71 
72 /*
73  * sched_init_cpu is called from main() for the boot cpu, then it's the
74  * responsibility of the MD code to call it for all other cpus.
75  */
76 void
77 sched_init_cpu(struct cpu_info *ci)
78 {
79 	struct schedstate_percpu *spc = &ci->ci_schedstate;
80 	int i;
81 
82 	for (i = 0; i < SCHED_NQS; i++)
83 		TAILQ_INIT(&spc->spc_qs[i]);
84 
85 	spc->spc_idleproc = NULL;
86 
87 	kthread_create_deferred(sched_kthreads_create, ci);
88 
89 	LIST_INIT(&spc->spc_deadproc);
90 
91 	/*
92 	 * Slight hack here until the cpuset code handles cpu_info
93 	 * structures.
94 	 */
95 	cpuset_init_cpu(ci);
96 	cpuset_add(&sched_all_cpus, ci);
97 }
98 
99 void
100 sched_kthreads_create(void *v)
101 {
102 	struct cpu_info *ci = v;
103 	struct schedstate_percpu *spc = &ci->ci_schedstate;
104 	static int num;
105 
106 	if (fork1(&proc0, FORK_SHAREVM|FORK_SHAREFILES|FORK_NOZOMBIE|
107 	    FORK_SYSTEM|FORK_SIGHAND|FORK_IDLE, NULL, 0, sched_idle, ci, NULL,
108 	    &spc->spc_idleproc))
109 		panic("fork idle");
110 
111 	/* Name it as specified. */
112 	snprintf(spc->spc_idleproc->p_comm, sizeof(spc->spc_idleproc->p_comm),
113 	    "idle%d", num);
114 
115 	num++;
116 }
117 
118 void
119 sched_idle(void *v)
120 {
121 	struct schedstate_percpu *spc;
122 	struct proc *p = curproc;
123 	struct cpu_info *ci = v;
124 	int s;
125 
126 	KERNEL_UNLOCK();
127 
128 	spc = &ci->ci_schedstate;
129 
130 	/*
131 	 * First time we enter here, we're not supposed to idle,
132 	 * just go away for a while.
133 	 */
134 	SCHED_LOCK(s);
135 	cpuset_add(&sched_idle_cpus, ci);
136 	p->p_stat = SSLEEP;
137 	p->p_cpu = ci;
138 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
139 	mi_switch();
140 	cpuset_del(&sched_idle_cpus, ci);
141 	SCHED_UNLOCK(s);
142 
143 	KASSERT(ci == curcpu());
144 	KASSERT(curproc == spc->spc_idleproc);
145 
146 	while (1) {
147 		while (!curcpu_is_idle()) {
148 			struct proc *dead;
149 
150 			SCHED_LOCK(s);
151 			p->p_stat = SSLEEP;
152 			mi_switch();
153 			SCHED_UNLOCK(s);
154 
155 			while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
156 				LIST_REMOVE(dead, p_hash);
157 				exit2(dead);
158 			}
159 		}
160 
161 		splassert(IPL_NONE);
162 
163 		cpuset_add(&sched_idle_cpus, ci);
164 		cpu_idle_enter();
165 		while (spc->spc_whichqs == 0) {
166 #ifdef MULTIPROCESSOR
167 			if (spc->spc_schedflags & SPCF_SHOULDHALT &&
168 			    (spc->spc_schedflags & SPCF_HALTED) == 0) {
169 				cpuset_del(&sched_idle_cpus, ci);
170 				SCHED_LOCK(s);
171 				atomic_setbits_int(&spc->spc_schedflags,
172 				    spc->spc_whichqs ? 0 : SPCF_HALTED);
173 				SCHED_UNLOCK(s);
174 				wakeup(spc);
175 			}
176 #endif
177 			cpu_idle_cycle();
178 		}
179 		cpu_idle_leave();
180 		cpuset_del(&sched_idle_cpus, ci);
181 	}
182 }
183 
184 /*
185  * To free our address space we have to jump through a few hoops.
186  * The freeing is done by the reaper, but until we have one reaper
187  * per cpu, we have no way of putting this proc on the deadproc list
188  * and waking up the reaper without risking having our address space and
189  * stack torn from under us before we manage to switch to another proc.
190  * Therefore we have a per-cpu list of dead processes where we put this
191  * proc and have idle clean up that list and move it to the reaper list.
192  * All this will be unnecessary once we can bind the reaper this cpu
193  * and not risk having it switch to another in case it sleeps.
194  */
195 void
196 sched_exit(struct proc *p)
197 {
198 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
199 	struct timespec ts;
200 	struct proc *idle;
201 	int s;
202 
203 	nanouptime(&ts);
204 	timespecsub(&ts, &spc->spc_runtime, &ts);
205 	timespecadd(&p->p_rtime, &ts, &p->p_rtime);
206 
207 	LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
208 
209 	/* This process no longer needs to hold the kernel lock. */
210 	KERNEL_UNLOCK();
211 
212 	SCHED_LOCK(s);
213 	idle = spc->spc_idleproc;
214 	idle->p_stat = SRUN;
215 	cpu_switchto(NULL, idle);
216 	panic("cpu_switchto returned");
217 }
218 
219 /*
220  * Run queue management.
221  */
222 void
223 sched_init_runqueues(void)
224 {
225 }
226 
227 void
228 setrunqueue(struct proc *p)
229 {
230 	struct schedstate_percpu *spc;
231 	int queue = p->p_priority >> 2;
232 
233 	SCHED_ASSERT_LOCKED();
234 	spc = &p->p_cpu->ci_schedstate;
235 	spc->spc_nrun++;
236 
237 	TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
238 	spc->spc_whichqs |= (1 << queue);
239 	cpuset_add(&sched_queued_cpus, p->p_cpu);
240 
241 	if (cpuset_isset(&sched_idle_cpus, p->p_cpu))
242 		cpu_unidle(p->p_cpu);
243 }
244 
245 void
246 remrunqueue(struct proc *p)
247 {
248 	struct schedstate_percpu *spc;
249 	int queue = p->p_priority >> 2;
250 
251 	SCHED_ASSERT_LOCKED();
252 	spc = &p->p_cpu->ci_schedstate;
253 	spc->spc_nrun--;
254 
255 	TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
256 	if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
257 		spc->spc_whichqs &= ~(1 << queue);
258 		if (spc->spc_whichqs == 0)
259 			cpuset_del(&sched_queued_cpus, p->p_cpu);
260 	}
261 }
262 
263 struct proc *
264 sched_chooseproc(void)
265 {
266 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
267 	struct proc *p;
268 	int queue;
269 
270 	SCHED_ASSERT_LOCKED();
271 
272 #ifdef MULTIPROCESSOR
273 	if (spc->spc_schedflags & SPCF_SHOULDHALT) {
274 		if (spc->spc_whichqs) {
275 			for (queue = 0; queue < SCHED_NQS; queue++) {
276 				while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
277 					remrunqueue(p);
278 					p->p_cpu = sched_choosecpu(p);
279 					KASSERT(p->p_cpu != curcpu());
280 					setrunqueue(p);
281 				}
282 			}
283 		}
284 		p = spc->spc_idleproc;
285 		KASSERT(p);
286 		KASSERT(p->p_wchan == NULL);
287 		p->p_stat = SRUN;
288 		return (p);
289 	}
290 #endif
291 
292 again:
293 	if (spc->spc_whichqs) {
294 		queue = ffs(spc->spc_whichqs) - 1;
295 		p = TAILQ_FIRST(&spc->spc_qs[queue]);
296 		remrunqueue(p);
297 		sched_noidle++;
298 		KASSERT(p->p_stat == SRUN);
299 	} else if ((p = sched_steal_proc(curcpu())) == NULL) {
300 		p = spc->spc_idleproc;
301 		if (p == NULL) {
302                         int s;
303 			/*
304 			 * We get here if someone decides to switch during
305 			 * boot before forking kthreads, bleh.
306 			 * This is kind of like a stupid idle loop.
307 			 */
308 #ifdef MULTIPROCESSOR
309 			__mp_unlock(&sched_lock);
310 #endif
311 			spl0();
312 			delay(10);
313 			SCHED_LOCK(s);
314 			goto again;
315                 }
316 		KASSERT(p);
317 		p->p_stat = SRUN;
318 	}
319 
320 	KASSERT(p->p_wchan == NULL);
321 	return (p);
322 }
323 
324 struct cpu_info *
325 sched_choosecpu_fork(struct proc *parent, int flags)
326 {
327 #ifdef MULTIPROCESSOR
328 	struct cpu_info *choice = NULL;
329 	fixpt_t load, best_load = ~0;
330 	int run, best_run = INT_MAX;
331 	struct cpu_info *ci;
332 	struct cpuset set;
333 
334 #if 0
335 	/*
336 	 * XXX
337 	 * Don't do this until we have a painless way to move the cpu in exec.
338 	 * Preferably when nuking the old pmap and getting a new one on a
339 	 * new cpu.
340 	 */
341 	/*
342 	 * PPWAIT forks are simple. We know that the parent will not
343 	 * run until we exec and choose another cpu, so we just steal its
344 	 * cpu.
345 	 */
346 	if (flags & FORK_PPWAIT)
347 		return (parent->p_cpu);
348 #endif
349 
350 	/*
351 	 * Look at all cpus that are currently idle and have nothing queued.
352 	 * If there are none, pick the one with least queued procs first,
353 	 * then the one with lowest load average.
354 	 */
355 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
356 	cpuset_intersection(&set, &set, &sched_all_cpus);
357 	if (cpuset_first(&set) == NULL)
358 		cpuset_copy(&set, &sched_all_cpus);
359 
360 	while ((ci = cpuset_first(&set)) != NULL) {
361 		cpuset_del(&set, ci);
362 
363 		load = ci->ci_schedstate.spc_ldavg;
364 		run = ci->ci_schedstate.spc_nrun;
365 
366 		if (choice == NULL || run < best_run ||
367 		    (run == best_run &&load < best_load)) {
368 			choice = ci;
369 			best_load = load;
370 			best_run = run;
371 		}
372 	}
373 
374 	return (choice);
375 #else
376 	return (curcpu());
377 #endif
378 }
379 
380 struct cpu_info *
381 sched_choosecpu(struct proc *p)
382 {
383 #ifdef MULTIPROCESSOR
384 	struct cpu_info *choice = NULL;
385 	int last_cost = INT_MAX;
386 	struct cpu_info *ci;
387 	struct cpuset set;
388 
389 	/*
390 	 * If pegged to a cpu, don't allow it to move.
391 	 */
392 	if (p->p_flag & P_CPUPEG)
393 		return (p->p_cpu);
394 
395 	sched_choose++;
396 
397 	/*
398 	 * Look at all cpus that are currently idle and have nothing queued.
399 	 * If there are none, pick the cheapest of those.
400 	 * (idle + queued could mean that the cpu is handling an interrupt
401 	 * at this moment and haven't had time to leave idle yet).
402 	 */
403 	cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
404 	cpuset_intersection(&set, &set, &sched_all_cpus);
405 
406 	/*
407 	 * First, just check if our current cpu is in that set, if it is,
408 	 * this is simple.
409 	 * Also, our cpu might not be idle, but if it's the current cpu
410 	 * and it has nothing else queued and we're curproc, take it.
411 	 */
412 	if (cpuset_isset(&set, p->p_cpu) ||
413 	    (p->p_cpu == curcpu() && p->p_cpu->ci_schedstate.spc_nrun == 0 &&
414 	    (p->p_cpu->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0 &&
415 	    curproc == p)) {
416 		sched_wasidle++;
417 		return (p->p_cpu);
418 	}
419 
420 	if (cpuset_first(&set) == NULL)
421 		cpuset_copy(&set, &sched_all_cpus);
422 
423 	while ((ci = cpuset_first(&set)) != NULL) {
424 		int cost = sched_proc_to_cpu_cost(ci, p);
425 
426 		if (choice == NULL || cost < last_cost) {
427 			choice = ci;
428 			last_cost = cost;
429 		}
430 		cpuset_del(&set, ci);
431 	}
432 
433 	if (p->p_cpu != choice)
434 		sched_nmigrations++;
435 	else
436 		sched_nomigrations++;
437 
438 	return (choice);
439 #else
440 	return (curcpu());
441 #endif
442 }
443 
444 /*
445  * Attempt to steal a proc from some cpu.
446  */
447 struct proc *
448 sched_steal_proc(struct cpu_info *self)
449 {
450 	struct proc *best = NULL;
451 #ifdef MULTIPROCESSOR
452 	struct schedstate_percpu *spc;
453 	int bestcost = INT_MAX;
454 	struct cpu_info *ci;
455 	struct cpuset set;
456 
457 	KASSERT((self->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0);
458 
459 	cpuset_copy(&set, &sched_queued_cpus);
460 
461 	while ((ci = cpuset_first(&set)) != NULL) {
462 		struct proc *p;
463 		int queue;
464 		int cost;
465 
466 		cpuset_del(&set, ci);
467 
468 		spc = &ci->ci_schedstate;
469 
470 		queue = ffs(spc->spc_whichqs) - 1;
471 		TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
472 			if (p->p_flag & P_CPUPEG)
473 				continue;
474 
475 			cost = sched_proc_to_cpu_cost(self, p);
476 
477 			if (best == NULL || cost < bestcost) {
478 				best = p;
479 				bestcost = cost;
480 			}
481 		}
482 	}
483 	if (best == NULL)
484 		return (NULL);
485 
486 	spc = &best->p_cpu->ci_schedstate;
487 	remrunqueue(best);
488 	best->p_cpu = self;
489 
490 	sched_stolen++;
491 #endif
492 	return (best);
493 }
494 
495 #ifdef MULTIPROCESSOR
496 /*
497  * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
498  */
499 static int
500 log2(unsigned int i)
501 {
502 	int ret = 0;
503 
504 	while (i >>= 1)
505 		ret++;
506 
507 	return (ret);
508 }
509 
510 /*
511  * Calculate the cost of moving the proc to this cpu.
512  *
513  * What we want is some guesstimate of how much "performance" it will
514  * cost us to move the proc here. Not just for caches and TLBs and NUMA
515  * memory, but also for the proc itself. A highly loaded cpu might not
516  * be the best candidate for this proc since it won't get run.
517  *
518  * Just total guesstimates for now.
519  */
520 
521 int sched_cost_load = 1;
522 int sched_cost_priority = 1;
523 int sched_cost_runnable = 3;
524 int sched_cost_resident = 1;
525 #endif
526 
527 int
528 sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
529 {
530 	int cost = 0;
531 #ifdef MULTIPROCESSOR
532 	struct schedstate_percpu *spc;
533 	int l2resident = 0;
534 
535 	spc = &ci->ci_schedstate;
536 
537 	/*
538 	 * First, account for the priority of the proc we want to move.
539 	 * More willing to move, the lower the priority of the destination
540 	 * and the higher the priority of the proc.
541 	 */
542 	if (!cpuset_isset(&sched_idle_cpus, ci)) {
543 		cost += (p->p_priority - spc->spc_curpriority) *
544 		    sched_cost_priority;
545 		cost += sched_cost_runnable;
546 	}
547 	if (cpuset_isset(&sched_queued_cpus, ci))
548 		cost += spc->spc_nrun * sched_cost_runnable;
549 
550 	/*
551 	 * Higher load on the destination means we don't want to go there.
552 	 */
553 	cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
554 
555 	/*
556 	 * If the proc is on this cpu already, lower the cost by how much
557 	 * it has been running and an estimate of its footprint.
558 	 */
559 	if (p->p_cpu == ci && p->p_slptime == 0) {
560 		l2resident =
561 		    log2(pmap_resident_count(p->p_vmspace->vm_map.pmap));
562 		cost -= l2resident * sched_cost_resident;
563 	}
564 #endif
565 	return (cost);
566 }
567 
568 /*
569  * Peg a proc to a cpu.
570  */
571 void
572 sched_peg_curproc(struct cpu_info *ci)
573 {
574 	struct proc *p = curproc;
575 	int s;
576 
577 	SCHED_LOCK(s);
578 	p->p_priority = p->p_usrpri;
579 	p->p_stat = SRUN;
580 	p->p_cpu = ci;
581 	atomic_setbits_int(&p->p_flag, P_CPUPEG);
582 	setrunqueue(p);
583 	p->p_ru.ru_nvcsw++;
584 	mi_switch();
585 	SCHED_UNLOCK(s);
586 }
587 
588 #ifdef MULTIPROCESSOR
589 
590 void
591 sched_start_secondary_cpus(void)
592 {
593 	CPU_INFO_ITERATOR cii;
594 	struct cpu_info *ci;
595 
596 	CPU_INFO_FOREACH(cii, ci) {
597 		struct schedstate_percpu *spc = &ci->ci_schedstate;
598 
599 		if (CPU_IS_PRIMARY(ci))
600 			continue;
601 		cpuset_add(&sched_all_cpus, ci);
602 		atomic_clearbits_int(&spc->spc_schedflags,
603 		    SPCF_SHOULDHALT | SPCF_HALTED);
604 	}
605 }
606 
607 void
608 sched_stop_secondary_cpus(void)
609 {
610 	CPU_INFO_ITERATOR cii;
611 	struct cpu_info *ci;
612 
613 	/*
614 	 * Make sure we stop the secondary CPUs.
615 	 */
616 	CPU_INFO_FOREACH(cii, ci) {
617 		struct schedstate_percpu *spc = &ci->ci_schedstate;
618 
619 		if (CPU_IS_PRIMARY(ci))
620 			continue;
621 		cpuset_del(&sched_all_cpus, ci);
622 		atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
623 	}
624 	CPU_INFO_FOREACH(cii, ci) {
625 		struct schedstate_percpu *spc = &ci->ci_schedstate;
626 		struct sleep_state sls;
627 
628 		if (CPU_IS_PRIMARY(ci))
629 			continue;
630 		while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
631 			sleep_setup(&sls, spc, PZERO, "schedstate");
632 			sleep_finish(&sls,
633 			    (spc->spc_schedflags & SPCF_HALTED) == 0);
634 		}
635 	}
636 }
637 
638 void
639 sched_barrier_task(void *arg)
640 {
641 	struct cpu_info *ci = arg;
642 
643 	sched_peg_curproc(ci);
644 	ci->ci_schedstate.spc_barrier = 1;
645 	wakeup(&ci->ci_schedstate.spc_barrier);
646 	atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
647 }
648 
649 void
650 sched_barrier(struct cpu_info *ci)
651 {
652 	struct sleep_state sls;
653 	struct task task;
654 	CPU_INFO_ITERATOR cii;
655 	struct schedstate_percpu *spc;
656 
657 	if (ci == NULL) {
658 		CPU_INFO_FOREACH(cii, ci) {
659 			if (CPU_IS_PRIMARY(ci))
660 				break;
661 		}
662 	}
663 	KASSERT(ci != NULL);
664 
665 	if (ci == curcpu())
666 		return;
667 
668 	task_set(&task, sched_barrier_task, ci);
669 	spc = &ci->ci_schedstate;
670 	spc->spc_barrier = 0;
671 	task_add(systq, &task);
672 	while (!spc->spc_barrier) {
673 		sleep_setup(&sls, &spc->spc_barrier, PWAIT, "sbar");
674 		sleep_finish(&sls, !spc->spc_barrier);
675 	}
676 }
677 
678 #else
679 
680 void
681 sched_barrier(struct cpu_info *ci)
682 {
683 }
684 
685 #endif
686 
687 /*
688  * Functions to manipulate cpu sets.
689  */
690 struct cpu_info *cpuset_infos[MAXCPUS];
691 static struct cpuset cpuset_all;
692 
693 void
694 cpuset_init_cpu(struct cpu_info *ci)
695 {
696 	cpuset_add(&cpuset_all, ci);
697 	cpuset_infos[CPU_INFO_UNIT(ci)] = ci;
698 }
699 
700 void
701 cpuset_clear(struct cpuset *cs)
702 {
703 	memset(cs, 0, sizeof(*cs));
704 }
705 
706 void
707 cpuset_add(struct cpuset *cs, struct cpu_info *ci)
708 {
709 	unsigned int num = CPU_INFO_UNIT(ci);
710 	atomic_setbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
711 }
712 
713 void
714 cpuset_del(struct cpuset *cs, struct cpu_info *ci)
715 {
716 	unsigned int num = CPU_INFO_UNIT(ci);
717 	atomic_clearbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
718 }
719 
720 int
721 cpuset_isset(struct cpuset *cs, struct cpu_info *ci)
722 {
723 	unsigned int num = CPU_INFO_UNIT(ci);
724 	return (cs->cs_set[num/32] & (1 << (num % 32)));
725 }
726 
727 void
728 cpuset_add_all(struct cpuset *cs)
729 {
730 	cpuset_copy(cs, &cpuset_all);
731 }
732 
733 void
734 cpuset_copy(struct cpuset *to, struct cpuset *from)
735 {
736 	memcpy(to, from, sizeof(*to));
737 }
738 
739 struct cpu_info *
740 cpuset_first(struct cpuset *cs)
741 {
742 	int i;
743 
744 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
745 		if (cs->cs_set[i])
746 			return (cpuset_infos[i * 32 + ffs(cs->cs_set[i]) - 1]);
747 
748 	return (NULL);
749 }
750 
751 void
752 cpuset_union(struct cpuset *to, struct cpuset *a, struct cpuset *b)
753 {
754 	int i;
755 
756 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
757 		to->cs_set[i] = a->cs_set[i] | b->cs_set[i];
758 }
759 
760 void
761 cpuset_intersection(struct cpuset *to, struct cpuset *a, struct cpuset *b)
762 {
763 	int i;
764 
765 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
766 		to->cs_set[i] = a->cs_set[i] & b->cs_set[i];
767 }
768 
769 void
770 cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b)
771 {
772 	int i;
773 
774 	for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
775 		to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i];
776 }
777