xref: /netbsd-src/sys/kern/kern_runq.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: kern_runq.c,v 1.23 2008/12/02 17:57:32 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.23 2008/12/02 17:57:32 ad Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/bitops.h>
35 #include <sys/cpu.h>
36 #include <sys/idle.h>
37 #include <sys/intr.h>
38 #include <sys/kmem.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/syscallargs.h>
44 #include <sys/sysctl.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
47 #include <sys/evcnt.h>
48 
49 /*
50  * Priority related defintions.
51  */
52 #define	PRI_TS_COUNT	(NPRI_USER)
53 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
54 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
55 
56 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
57 
58 /*
59  * Bits per map.
60  */
61 #define	BITMAP_BITS	(32)
62 #define	BITMAP_SHIFT	(5)
63 #define	BITMAP_MSB	(0x80000000U)
64 #define	BITMAP_MASK	(BITMAP_BITS - 1)
65 
66 /*
67  * Structures, runqueue.
68  */
69 
70 const int	schedppq = 1;
71 
72 typedef struct {
73 	TAILQ_HEAD(, lwp) q_head;
74 } queue_t;
75 
76 typedef struct {
77 	/* Lock and bitmap */
78 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
79 	/* Counters */
80 	u_int		r_count;	/* Count of the threads */
81 	u_int		r_avgcount;	/* Average count of threads */
82 	u_int		r_mcount;	/* Count of migratable threads */
83 	/* Runqueues */
84 	queue_t		r_rt_queue[PRI_RT_COUNT];
85 	queue_t		r_ts_queue[PRI_TS_COUNT];
86 	/* Event counters */
87 	struct evcnt	r_ev_pull;
88 	struct evcnt	r_ev_push;
89 	struct evcnt	r_ev_stay;
90 	struct evcnt	r_ev_localize;
91 } runqueue_t;
92 
93 static void *	sched_getrq(runqueue_t *, const pri_t);
94 #ifdef MULTIPROCESSOR
95 static lwp_t	*sched_catchlwp(struct cpu_info *);
96 static void	sched_balance(void *);
97 #endif
98 
99 /*
100  * Preemption control.
101  */
102 int		sched_upreempt_pri = PRI_KERNEL;
103 #if defined(__HAVE_PREEMPTION)
104 int		sched_kpreempt_pri = PRI_USER_RT;
105 #else
106 int		sched_kpreempt_pri = 1000;
107 #endif
108 
109 /*
110  * Migration and balancing.
111  */
112 static u_int	cacheht_time;		/* Cache hotness time */
113 static u_int	min_catch;		/* Minimal LWP count for catching */
114 static u_int	balance_period;		/* Balance period */
115 static struct cpu_info *worker_ci;	/* Victim CPU */
116 #ifdef MULTIPROCESSOR
117 static struct callout balance_ch;	/* Callout of balancer */
118 #endif
119 
120 void
121 runq_init(void)
122 {
123 
124 	/* Balancing */
125 	worker_ci = curcpu();
126 	cacheht_time = mstohz(3);		/*   ~3 ms */
127 	balance_period = mstohz(300);		/* ~300 ms */
128 
129 	/* Minimal count of LWPs for catching */
130 	min_catch = 1;
131 
132 	/* Initialize balancing callout and run it */
133 #ifdef MULTIPROCESSOR
134 	callout_init(&balance_ch, CALLOUT_MPSAFE);
135 	callout_setfunc(&balance_ch, sched_balance, NULL);
136 	callout_schedule(&balance_ch, balance_period);
137 #endif
138 }
139 
140 void
141 sched_cpuattach(struct cpu_info *ci)
142 {
143 	runqueue_t *ci_rq;
144 	void *rq_ptr;
145 	u_int i, size;
146 	char *cpuname;
147 
148 	if (ci->ci_schedstate.spc_lwplock == NULL) {
149 		ci->ci_schedstate.spc_lwplock =
150 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
151 	}
152 	if (ci == lwp0.l_cpu) {
153 		/* Initialize the scheduler structure of the primary LWP */
154 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
155 	}
156 	if (ci->ci_schedstate.spc_mutex != NULL) {
157 		/* Already initialized. */
158 		return;
159 	}
160 
161 	/* Allocate the run queue */
162 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
163 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
164 	if (rq_ptr == NULL) {
165 		panic("sched_cpuattach: could not allocate the runqueue");
166 	}
167 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
168 
169 	/* Initialize run queues */
170 	ci->ci_schedstate.spc_mutex =
171 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
172 	for (i = 0; i < PRI_RT_COUNT; i++)
173 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
174 	for (i = 0; i < PRI_TS_COUNT; i++)
175 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
176 
177 	ci->ci_schedstate.spc_sched_info = ci_rq;
178 
179 	cpuname = kmem_alloc(8, KM_SLEEP);
180 	snprintf(cpuname, 8, "cpu%d", cpu_index(ci));
181 
182 	evcnt_attach_dynamic(&ci_rq->r_ev_pull, EVCNT_TYPE_MISC, NULL,
183 	   cpuname, "runqueue pull");
184 	evcnt_attach_dynamic(&ci_rq->r_ev_push, EVCNT_TYPE_MISC, NULL,
185 	   cpuname, "runqueue push");
186 	evcnt_attach_dynamic(&ci_rq->r_ev_stay, EVCNT_TYPE_MISC, NULL,
187 	   cpuname, "runqueue stay");
188 	evcnt_attach_dynamic(&ci_rq->r_ev_localize, EVCNT_TYPE_MISC, NULL,
189 	   cpuname, "runqueue localize");
190 }
191 
192 /*
193  * Control of the runqueue.
194  */
195 
196 static void *
197 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
198 {
199 
200 	KASSERT(prio < PRI_COUNT);
201 	return (prio <= PRI_HIGHEST_TS) ?
202 	    &ci_rq->r_ts_queue[prio].q_head :
203 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
204 }
205 
206 void
207 sched_enqueue(struct lwp *l, bool swtch)
208 {
209 	runqueue_t *ci_rq;
210 	struct schedstate_percpu *spc;
211 	TAILQ_HEAD(, lwp) *q_head;
212 	const pri_t eprio = lwp_eprio(l);
213 	struct cpu_info *ci;
214 	int type;
215 
216 	ci = l->l_cpu;
217 	spc = &ci->ci_schedstate;
218 	ci_rq = spc->spc_sched_info;
219 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
220 
221 	/* Update the last run time on switch */
222 	if (__predict_true(swtch == true))
223 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
224 	else if (l->l_rticks == 0)
225 		l->l_rticks = hardclock_ticks;
226 
227 	/* Enqueue the thread */
228 	q_head = sched_getrq(ci_rq, eprio);
229 	if (TAILQ_EMPTY(q_head)) {
230 		u_int i;
231 		uint32_t q;
232 
233 		/* Mark bit */
234 		i = eprio >> BITMAP_SHIFT;
235 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
236 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
237 		ci_rq->r_bitmap[i] |= q;
238 	}
239 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
240 	ci_rq->r_count++;
241 	if ((l->l_pflag & LP_BOUND) == 0)
242 		ci_rq->r_mcount++;
243 
244 	/*
245 	 * Update the value of highest priority in the runqueue,
246 	 * if priority of this thread is higher.
247 	 */
248 	if (eprio > spc->spc_maxpriority)
249 		spc->spc_maxpriority = eprio;
250 
251 	sched_newts(l);
252 
253 	/*
254 	 * Wake the chosen CPU or cause a preemption if the newly
255 	 * enqueued thread has higher priority.  Don't cause a
256 	 * preemption if the thread is yielding (swtch).
257 	 */
258 	if (!swtch && eprio > spc->spc_curpriority) {
259 		if (eprio >= sched_kpreempt_pri)
260 			type = RESCHED_KPREEMPT;
261 		else if (eprio >= sched_upreempt_pri)
262 			type = RESCHED_IMMED;
263 		else
264 			type = RESCHED_LAZY;
265 		cpu_need_resched(ci, type);
266 	}
267 }
268 
269 void
270 sched_dequeue(struct lwp *l)
271 {
272 	runqueue_t *ci_rq;
273 	TAILQ_HEAD(, lwp) *q_head;
274 	struct schedstate_percpu *spc;
275 	const pri_t eprio = lwp_eprio(l);
276 
277 	spc = & l->l_cpu->ci_schedstate;
278 	ci_rq = spc->spc_sched_info;
279 	KASSERT(lwp_locked(l, spc->spc_mutex));
280 
281 	KASSERT(eprio <= spc->spc_maxpriority);
282 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
283 	KASSERT(ci_rq->r_count > 0);
284 
285 	if (spc->spc_migrating == l)
286 		spc->spc_migrating = NULL;
287 
288 	ci_rq->r_count--;
289 	if ((l->l_pflag & LP_BOUND) == 0)
290 		ci_rq->r_mcount--;
291 
292 	q_head = sched_getrq(ci_rq, eprio);
293 	TAILQ_REMOVE(q_head, l, l_runq);
294 	if (TAILQ_EMPTY(q_head)) {
295 		u_int i;
296 		uint32_t q;
297 
298 		/* Unmark bit */
299 		i = eprio >> BITMAP_SHIFT;
300 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
301 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
302 		ci_rq->r_bitmap[i] &= ~q;
303 
304 		/*
305 		 * Update the value of highest priority in the runqueue, in a
306 		 * case it was a last thread in the queue of highest priority.
307 		 */
308 		if (eprio != spc->spc_maxpriority)
309 			return;
310 
311 		do {
312 			if (ci_rq->r_bitmap[i] != 0) {
313 				q = ffs(ci_rq->r_bitmap[i]);
314 				spc->spc_maxpriority =
315 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
316 				return;
317 			}
318 		} while (i--);
319 
320 		/* If not found - set the lowest value */
321 		spc->spc_maxpriority = 0;
322 	}
323 }
324 
325 /*
326  * Migration and balancing.
327  */
328 
329 #ifdef MULTIPROCESSOR
330 
331 /* Estimate if LWP is cache-hot */
332 static inline bool
333 lwp_cache_hot(const struct lwp *l)
334 {
335 
336 	if (l->l_slptime || l->l_rticks == 0)
337 		return false;
338 
339 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
340 }
341 
342 /* Check if LWP can migrate to the chosen CPU */
343 static inline bool
344 sched_migratable(const struct lwp *l, struct cpu_info *ci)
345 {
346 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
347 	KASSERT(lwp_locked(__UNCONST(l), NULL));
348 
349 	/* CPU is offline */
350 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
351 		return false;
352 
353 	/* Affinity bind */
354 	if (__predict_false(l->l_flag & LW_AFFINITY))
355 		return kcpuset_isset(cpu_index(ci), l->l_affinity);
356 
357 	/* Processor-set */
358 	return (spc->spc_psid == l->l_psid);
359 }
360 
361 /*
362  * Estimate the migration of LWP to the other CPU.
363  * Take and return the CPU, if migration is needed.
364  */
365 struct cpu_info *
366 sched_takecpu(struct lwp *l)
367 {
368 	struct cpu_info *ci, *tci, *first, *next;
369 	struct schedstate_percpu *spc;
370 	runqueue_t *ci_rq, *ici_rq;
371 	pri_t eprio, lpri, pri;
372 
373 	KASSERT(lwp_locked(l, NULL));
374 
375 	ci = l->l_cpu;
376 	spc = &ci->ci_schedstate;
377 	ci_rq = spc->spc_sched_info;
378 
379 	/*
380 	 * If thread is strictly bound, do not estimate other CPUs.
381 	 * If CPU of this thread is idling - run there.
382 	 */
383 	if ((l->l_pflag & LP_BOUND) != 0 || ci_rq->r_count == 0) {
384 		ci_rq->r_ev_stay.ev_count++;
385 		return ci;
386 	}
387 
388 	/* Stay if thread is cache-hot. */
389 	eprio = lwp_eprio(l);
390 	if (__predict_true(l->l_stat != LSIDL) &&
391 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority) {
392 		ci_rq->r_ev_stay.ev_count++;
393 		return ci;
394 	}
395 
396 	/* Run on current CPU if priority of thread is higher */
397 	ci = curcpu();
398 	spc = &ci->ci_schedstate;
399 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci)) {
400 		ci_rq->r_ev_localize.ev_count++;
401 		return ci;
402 	}
403 
404 	/*
405 	 * Look for the CPU with the lowest priority thread.  In case of
406 	 * equal priority, choose the CPU with the fewest of threads.
407 	 */
408 	first = l->l_cpu;
409 	ci = first;
410 	tci = first;
411 	lpri = PRI_COUNT;
412 	do {
413 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
414 		spc = &ci->ci_schedstate;
415 		ici_rq = spc->spc_sched_info;
416 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
417 		if (pri > lpri)
418 			continue;
419 
420 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
421 			continue;
422 
423 		if (!sched_migratable(l, ci))
424 			continue;
425 
426 		lpri = pri;
427 		tci = ci;
428 		ci_rq = ici_rq;
429 	} while (ci = next, ci != first);
430 
431 	ci_rq = tci->ci_schedstate.spc_sched_info;
432 	ci_rq->r_ev_push.ev_count++;
433 
434 	return tci;
435 }
436 
437 /*
438  * Tries to catch an LWP from the runqueue of other CPU.
439  */
440 static struct lwp *
441 sched_catchlwp(struct cpu_info *ci)
442 {
443 	struct cpu_info *curci = curcpu();
444 	struct schedstate_percpu *spc;
445 	TAILQ_HEAD(, lwp) *q_head;
446 	runqueue_t *ci_rq;
447 	struct lwp *l;
448 
449 	spc = &ci->ci_schedstate;
450 	ci_rq = spc->spc_sched_info;
451 	if (ci_rq->r_mcount < min_catch) {
452 		spc_unlock(ci);
453 		return NULL;
454 	}
455 
456 	/* Take the highest priority thread */
457 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
458 	l = TAILQ_FIRST(q_head);
459 
460 	for (;;) {
461 		/* Check the first and next result from the queue */
462 		if (l == NULL)
463 			break;
464 		KASSERT(l->l_stat == LSRUN);
465 		KASSERT(l->l_flag & LW_INMEM);
466 
467 		/* Look for threads, whose are allowed to migrate */
468 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
469 		    !sched_migratable(l, curci)) {
470 			l = TAILQ_NEXT(l, l_runq);
471 			continue;
472 		}
473 
474 		/* Grab the thread, and move to the local run queue */
475 		sched_dequeue(l);
476 		l->l_cpu = curci;
477 		ci_rq->r_ev_pull.ev_count++;
478 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
479 		sched_enqueue(l, false);
480 		return l;
481 	}
482 	spc_unlock(ci);
483 
484 	return l;
485 }
486 
487 /*
488  * Periodical calculations for balancing.
489  */
490 static void
491 sched_balance(void *nocallout)
492 {
493 	struct cpu_info *ci, *hci;
494 	runqueue_t *ci_rq;
495 	CPU_INFO_ITERATOR cii;
496 	u_int highest;
497 
498 	hci = curcpu();
499 	highest = 0;
500 
501 	/* Make lockless countings */
502 	for (CPU_INFO_FOREACH(cii, ci)) {
503 		ci_rq = ci->ci_schedstate.spc_sched_info;
504 
505 		/* Average count of the threads */
506 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
507 
508 		/* Look for CPU with the highest average */
509 		if (ci_rq->r_avgcount > highest) {
510 			hci = ci;
511 			highest = ci_rq->r_avgcount;
512 		}
513 	}
514 
515 	/* Update the worker */
516 	worker_ci = hci;
517 
518 	if (nocallout == NULL)
519 		callout_schedule(&balance_ch, balance_period);
520 }
521 
522 /*
523  * Called from each CPU's idle loop.
524  */
525 void
526 sched_idle(void)
527 {
528 	struct cpu_info *ci = curcpu(), *tci = NULL;
529 	struct schedstate_percpu *spc, *tspc;
530 	runqueue_t *ci_rq;
531 	bool dlock = false;
532 
533 	/* Check if there is a migrating LWP */
534 	spc = &ci->ci_schedstate;
535 	if (spc->spc_migrating == NULL)
536 		goto no_migration;
537 
538 	spc_lock(ci);
539 	for (;;) {
540 		struct lwp *l;
541 
542 		l = spc->spc_migrating;
543 		if (l == NULL)
544 			break;
545 
546 		/*
547 		 * If second attempt, and target CPU has changed,
548 		 * drop the old lock.
549 		 */
550 		if (dlock == true && tci != l->l_target_cpu) {
551 			KASSERT(tci != NULL);
552 			spc_unlock(tci);
553 			dlock = false;
554 		}
555 
556 		/*
557 		 * Nothing to do if destination has changed to the
558 		 * local CPU, or migration was done by other CPU.
559 		 */
560 		tci = l->l_target_cpu;
561 		if (tci == NULL || tci == ci) {
562 			spc->spc_migrating = NULL;
563 			l->l_target_cpu = NULL;
564 			break;
565 		}
566 		tspc = &tci->ci_schedstate;
567 
568 		/*
569 		 * Double-lock the runqueues.
570 		 * We do that only once.
571 		 */
572 		if (dlock == false) {
573 			dlock = true;
574 			if (ci < tci) {
575 				spc_lock(tci);
576 			} else if (!mutex_tryenter(tspc->spc_mutex)) {
577 				spc_unlock(ci);
578 				spc_lock(tci);
579 				spc_lock(ci);
580 				/* Check the situation again.. */
581 				continue;
582 			}
583 		}
584 
585 		/* Migrate the thread */
586 		KASSERT(l->l_stat == LSRUN);
587 		spc->spc_migrating = NULL;
588 		l->l_target_cpu = NULL;
589 		sched_dequeue(l);
590 		l->l_cpu = tci;
591 		lwp_setlock(l, tspc->spc_mutex);
592 		sched_enqueue(l, false);
593 		break;
594 	}
595 	if (dlock == true) {
596 		KASSERT(tci != NULL);
597 		spc_unlock(tci);
598 	}
599 	spc_unlock(ci);
600 
601 no_migration:
602 	ci_rq = spc->spc_sched_info;
603 	if ((spc->spc_flags & SPCF_OFFLINE) != 0 || ci_rq->r_count != 0) {
604 		return;
605 	}
606 
607 	/* Reset the counter, and call the balancer */
608 	ci_rq->r_avgcount = 0;
609 	sched_balance(ci);
610 	tci = worker_ci;
611 	if (ci == tci)
612 		return;
613 	spc_dlock(ci, tci);
614 	(void)sched_catchlwp(tci);
615 	spc_unlock(ci);
616 }
617 
618 #else
619 
620 struct cpu_info *
621 sched_takecpu(struct lwp *l)
622 {
623 
624 	return l->l_cpu;
625 }
626 
627 void
628 sched_idle(void)
629 {
630 
631 }
632 #endif	/* MULTIPROCESSOR */
633 
634 /*
635  * Scheduling statistics and balancing.
636  */
637 void
638 sched_lwp_stats(struct lwp *l)
639 {
640 	int batch;
641 
642 	KASSERT(lwp_locked(l, NULL));
643 
644 	/* Update sleep time */
645 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
646 	    l->l_stat == LSSUSPENDED)
647 		l->l_slptime++;
648 
649 	/*
650 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
651 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
652 	 */
653 	batch = (l->l_rticksum > l->l_slpticksum);
654 	if (batch != 0) {
655 		if ((l->l_flag & LW_BATCH) == 0)
656 			batch = 0;
657 		l->l_flag |= LW_BATCH;
658 	} else
659 		l->l_flag &= ~LW_BATCH;
660 
661 	/*
662 	 * If thread is CPU-bound and never sleeps, it would occupy the CPU.
663 	 * In such case reset the value of last sleep, and check it later, if
664 	 * it is still zero - perform the migration, unmark the batch flag.
665 	 */
666 	if (batch && (l->l_slptime + l->l_slpticksum) == 0) {
667 		if (l->l_slpticks == 0) {
668 			if (l->l_target_cpu == NULL &&
669 			    (l->l_stat == LSRUN || l->l_stat == LSONPROC)) {
670 				struct cpu_info *ci = sched_takecpu(l);
671 				l->l_target_cpu = (ci != l->l_cpu) ? ci : NULL;
672 			}
673 			l->l_flag &= ~LW_BATCH;
674 		} else {
675 			l->l_slpticks = 0;
676 		}
677 	}
678 
679 	/* Reset the time sums */
680 	l->l_slpticksum = 0;
681 	l->l_rticksum = 0;
682 
683 	/* Scheduler-specific hook */
684 	sched_pstats_hook(l, batch);
685 }
686 
687 /*
688  * Scheduler mill.
689  */
690 struct lwp *
691 sched_nextlwp(void)
692 {
693 	struct cpu_info *ci = curcpu();
694 	struct schedstate_percpu *spc;
695 	TAILQ_HEAD(, lwp) *q_head;
696 	runqueue_t *ci_rq;
697 	struct lwp *l;
698 
699 	/* Return to idle LWP if there is a migrating thread */
700 	spc = &ci->ci_schedstate;
701 	if (__predict_false(spc->spc_migrating != NULL))
702 		return NULL;
703 	ci_rq = spc->spc_sched_info;
704 
705 #ifdef MULTIPROCESSOR
706 	/* If runqueue is empty, try to catch some thread from other CPU */
707 	if (__predict_false(ci_rq->r_count == 0)) {
708 		struct cpu_info *cci;
709 
710 		/* Offline CPUs should not perform this, however */
711 		if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
712 			return NULL;
713 
714 		/* Reset the counter, and call the balancer */
715 		ci_rq->r_avgcount = 0;
716 		sched_balance(ci);
717 		cci = worker_ci;
718 		if (ci == cci || !mutex_tryenter(cci->ci_schedstate.spc_mutex))
719 			return NULL;
720 		return sched_catchlwp(cci);
721 	}
722 #else
723 	if (__predict_false(ci_rq->r_count == 0))
724 		return NULL;
725 #endif
726 
727 	/* Take the highest priority thread */
728 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
729 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
730 	l = TAILQ_FIRST(q_head);
731 	KASSERT(l != NULL);
732 
733 	sched_oncpu(l);
734 	l->l_rticks = hardclock_ticks;
735 
736 	return l;
737 }
738 
739 bool
740 sched_curcpu_runnable_p(void)
741 {
742 	const struct cpu_info *ci;
743 	const struct schedstate_percpu *spc;
744 	const runqueue_t *ci_rq;
745 	bool rv;
746 
747 	kpreempt_disable();
748 	ci = curcpu();
749 	spc = &ci->ci_schedstate;
750 	ci_rq = spc->spc_sched_info;
751 
752 #ifndef __HAVE_FAST_SOFTINTS
753 	if (ci->ci_data.cpu_softints) {
754 		kpreempt_enable();
755 		return true;
756 	}
757 #endif
758 
759 	rv = (ci_rq->r_count != 0) ? true : false;
760 	kpreempt_enable();
761 
762 	return rv;
763 }
764 
765 /*
766  * Sysctl nodes and initialization.
767  */
768 
769 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
770 {
771 	const struct sysctlnode *node = NULL;
772 
773 	sysctl_createv(clog, 0, NULL, NULL,
774 		CTLFLAG_PERMANENT,
775 		CTLTYPE_NODE, "kern", NULL,
776 		NULL, 0, NULL, 0,
777 		CTL_KERN, CTL_EOL);
778 	sysctl_createv(clog, 0, NULL, &node,
779 		CTLFLAG_PERMANENT,
780 		CTLTYPE_NODE, "sched",
781 		SYSCTL_DESCR("Scheduler options"),
782 		NULL, 0, NULL, 0,
783 		CTL_KERN, CTL_CREATE, CTL_EOL);
784 
785 	if (node == NULL)
786 		return;
787 
788 	sysctl_createv(clog, 0, &node, NULL,
789 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
790 		CTLTYPE_INT, "cacheht_time",
791 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
792 		NULL, 0, &cacheht_time, 0,
793 		CTL_CREATE, CTL_EOL);
794 	sysctl_createv(clog, 0, &node, NULL,
795 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
796 		CTLTYPE_INT, "balance_period",
797 		SYSCTL_DESCR("Balance period (in ticks)"),
798 		NULL, 0, &balance_period, 0,
799 		CTL_CREATE, CTL_EOL);
800 	sysctl_createv(clog, 0, &node, NULL,
801 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
802 		CTLTYPE_INT, "min_catch",
803 		SYSCTL_DESCR("Minimal count of threads for catching"),
804 		NULL, 0, &min_catch, 0,
805 		CTL_CREATE, CTL_EOL);
806 	sysctl_createv(clog, 0, &node, NULL,
807 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
808 		CTLTYPE_INT, "timesoftints",
809 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
810 		NULL, 0, &softint_timing, 0,
811 		CTL_CREATE, CTL_EOL);
812 	sysctl_createv(clog, 0, &node, NULL,
813 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
814 		CTLTYPE_INT, "kpreempt_pri",
815 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
816 		NULL, 0, &sched_kpreempt_pri, 0,
817 		CTL_CREATE, CTL_EOL);
818 	sysctl_createv(clog, 0, &node, NULL,
819 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
820 		CTLTYPE_INT, "upreempt_pri",
821 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
822 		NULL, 0, &sched_upreempt_pri, 0,
823 		CTL_CREATE, CTL_EOL);
824 }
825 
826 /*
827  * Debugging.
828  */
829 
830 #ifdef DDB
831 
832 void
833 sched_print_runqueue(void (*pr)(const char *, ...)
834     __attribute__((__format__(__printf__,1,2))))
835 {
836 	runqueue_t *ci_rq;
837 	struct cpu_info *ci, *tci;
838 	struct schedstate_percpu *spc;
839 	struct lwp *l;
840 	struct proc *p;
841 	CPU_INFO_ITERATOR cii;
842 
843 	for (CPU_INFO_FOREACH(cii, ci)) {
844 		int i;
845 
846 		spc = &ci->ci_schedstate;
847 		ci_rq = spc->spc_sched_info;
848 
849 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
850 		(*pr)(" pid.lid = %d.%d, r_count = %u, r_avgcount = %u, "
851 		    "maxpri = %d, mlwp = %p\n",
852 #ifdef MULTIPROCESSOR
853 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
854 #else
855 		    curlwp->l_proc->p_pid, curlwp->l_lid,
856 #endif
857 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority,
858 		    spc->spc_migrating);
859 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
860 		do {
861 			uint32_t q;
862 			q = ci_rq->r_bitmap[i];
863 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
864 		} while (i--);
865 	}
866 
867 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %4s %s\n",
868 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "TCI", "LRTICKS");
869 
870 	PROCLIST_FOREACH(p, &allproc) {
871 		if ((p->p_flag & PK_MARKER) != 0)
872 			continue;
873 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
874 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
875 			ci = l->l_cpu;
876 			tci = l->l_target_cpu;
877 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %4d %u\n",
878 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
879 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
880 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
881 			    l, ci->ci_index, (tci ? tci->ci_index : -1),
882 			    (u_int)(hardclock_ticks - l->l_rticks));
883 		}
884 	}
885 }
886 
887 #endif
888