xref: /netbsd-src/sys/kern/kern_runq.c (revision 4b71a66d0f279143147d63ebfcfd8a59499a3684)
1 /*	$NetBSD: kern_runq.c,v 1.16 2008/05/30 12:18:14 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.16 2008/05/30 12:18:14 ad Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/bitops.h>
35 #include <sys/cpu.h>
36 #include <sys/idle.h>
37 #include <sys/intr.h>
38 #include <sys/kmem.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/syscallargs.h>
44 #include <sys/sysctl.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
47 #include <sys/evcnt.h>
48 
49 /*
50  * Priority related defintions.
51  */
52 #define	PRI_TS_COUNT	(NPRI_USER)
53 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
54 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
55 
56 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
57 
58 /*
59  * Bits per map.
60  */
61 #define	BITMAP_BITS	(32)
62 #define	BITMAP_SHIFT	(5)
63 #define	BITMAP_MSB	(0x80000000U)
64 #define	BITMAP_MASK	(BITMAP_BITS - 1)
65 
66 /*
67  * Structures, runqueue.
68  */
69 
70 const int	schedppq = 1;
71 
72 typedef struct {
73 	TAILQ_HEAD(, lwp) q_head;
74 } queue_t;
75 
76 typedef struct {
77 	/* Lock and bitmap */
78 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
79 	/* Counters */
80 	u_int		r_count;	/* Count of the threads */
81 	u_int		r_avgcount;	/* Average count of threads */
82 	u_int		r_mcount;	/* Count of migratable threads */
83 	/* Runqueues */
84 	queue_t		r_rt_queue[PRI_RT_COUNT];
85 	queue_t		r_ts_queue[PRI_TS_COUNT];
86 	/* Event counters */
87 	struct evcnt	r_ev_pull;
88 	struct evcnt	r_ev_push;
89 	struct evcnt	r_ev_stay;
90 	struct evcnt	r_ev_localize;
91 } runqueue_t;
92 
93 static void *	sched_getrq(runqueue_t *, const pri_t);
94 #ifdef MULTIPROCESSOR
95 static lwp_t	*sched_catchlwp(struct cpu_info *);
96 static void	sched_balance(void *);
97 #endif
98 
99 /*
100  * Preemption control.
101  */
102 int		sched_upreempt_pri = PRI_KERNEL;
103 #if defined(__HAVE_PREEMPTION)
104 int		sched_kpreempt_pri = PRI_USER_RT;
105 #else
106 int		sched_kpreempt_pri = 1000;
107 #endif
108 
109 /*
110  * Migration and balancing.
111  */
112 static u_int	cacheht_time;		/* Cache hotness time */
113 static u_int	min_catch;		/* Minimal LWP count for catching */
114 static u_int	balance_period;		/* Balance period */
115 static struct cpu_info *worker_ci;	/* Victim CPU */
116 #ifdef MULTIPROCESSOR
117 static struct callout balance_ch;	/* Callout of balancer */
118 #endif
119 
120 void
121 runq_init(void)
122 {
123 
124 	/* Balancing */
125 	worker_ci = curcpu();
126 	cacheht_time = mstohz(3);		/* ~3 ms  */
127 	balance_period = mstohz(300);		/* ~300ms */
128 
129 	/* Minimal count of LWPs for catching */
130 	min_catch = 1;
131 
132 	/* Initialize balancing callout and run it */
133 #ifdef MULTIPROCESSOR
134 	callout_init(&balance_ch, CALLOUT_MPSAFE);
135 	callout_setfunc(&balance_ch, sched_balance, NULL);
136 	callout_schedule(&balance_ch, balance_period);
137 #endif
138 }
139 
140 void
141 sched_cpuattach(struct cpu_info *ci)
142 {
143 	runqueue_t *ci_rq;
144 	void *rq_ptr;
145 	u_int i, size;
146 	char *cpuname;
147 
148 	if (ci->ci_schedstate.spc_lwplock == NULL) {
149 		ci->ci_schedstate.spc_lwplock =
150 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
151 	}
152 	if (ci == lwp0.l_cpu) {
153 		/* Initialize the scheduler structure of the primary LWP */
154 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
155 	}
156 	if (ci->ci_schedstate.spc_mutex != NULL) {
157 		/* Already initialized. */
158 		return;
159 	}
160 
161 	/* Allocate the run queue */
162 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
163 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
164 	if (rq_ptr == NULL) {
165 		panic("sched_cpuattach: could not allocate the runqueue");
166 	}
167 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
168 
169 	/* Initialize run queues */
170 	ci->ci_schedstate.spc_mutex =
171 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
172 	for (i = 0; i < PRI_RT_COUNT; i++)
173 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
174 	for (i = 0; i < PRI_TS_COUNT; i++)
175 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
176 
177 	ci->ci_schedstate.spc_sched_info = ci_rq;
178 
179 	cpuname = kmem_alloc(8, KM_SLEEP);
180 	snprintf(cpuname, 8, "cpu%d", cpu_index(ci));
181 
182 	evcnt_attach_dynamic(&ci_rq->r_ev_pull, EVCNT_TYPE_MISC, NULL,
183 	   cpuname, "runqueue pull");
184 	evcnt_attach_dynamic(&ci_rq->r_ev_push, EVCNT_TYPE_MISC, NULL,
185 	   cpuname, "runqueue push");
186 	evcnt_attach_dynamic(&ci_rq->r_ev_stay, EVCNT_TYPE_MISC, NULL,
187 	   cpuname, "runqueue stay");
188 	evcnt_attach_dynamic(&ci_rq->r_ev_localize, EVCNT_TYPE_MISC, NULL,
189 	   cpuname, "runqueue localize");
190 }
191 
192 /*
193  * Control of the runqueue.
194  */
195 
196 static void *
197 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
198 {
199 
200 	KASSERT(prio < PRI_COUNT);
201 	return (prio <= PRI_HIGHEST_TS) ?
202 	    &ci_rq->r_ts_queue[prio].q_head :
203 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
204 }
205 
206 void
207 sched_enqueue(struct lwp *l, bool swtch)
208 {
209 	runqueue_t *ci_rq;
210 	struct schedstate_percpu *spc;
211 	TAILQ_HEAD(, lwp) *q_head;
212 	const pri_t eprio = lwp_eprio(l);
213 	struct cpu_info *ci;
214 	int type;
215 
216 	ci = l->l_cpu;
217 	spc = &ci->ci_schedstate;
218 	ci_rq = spc->spc_sched_info;
219 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
220 
221 	/* Update the last run time on switch */
222 	if (__predict_true(swtch == true))
223 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
224 	else if (l->l_rticks == 0)
225 		l->l_rticks = hardclock_ticks;
226 
227 	/* Enqueue the thread */
228 	q_head = sched_getrq(ci_rq, eprio);
229 	if (TAILQ_EMPTY(q_head)) {
230 		u_int i;
231 		uint32_t q;
232 
233 		/* Mark bit */
234 		i = eprio >> BITMAP_SHIFT;
235 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
236 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
237 		ci_rq->r_bitmap[i] |= q;
238 	}
239 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
240 	ci_rq->r_count++;
241 	if ((l->l_pflag & LP_BOUND) == 0)
242 		ci_rq->r_mcount++;
243 
244 	/*
245 	 * Update the value of highest priority in the runqueue,
246 	 * if priority of this thread is higher.
247 	 */
248 	if (eprio > spc->spc_maxpriority)
249 		spc->spc_maxpriority = eprio;
250 
251 	sched_newts(l);
252 
253 	/*
254 	 * Wake the chosen CPU or cause a preemption if the newly
255 	 * enqueued thread has higher priority.  Don't cause a
256 	 * preemption if the thread is yielding (swtch).
257 	 */
258 	if (!swtch && eprio > spc->spc_curpriority) {
259 		if (eprio >= sched_kpreempt_pri)
260 			type = RESCHED_KPREEMPT;
261 		else if (eprio >= sched_upreempt_pri)
262 			type = RESCHED_IMMED;
263 		else
264 			type = 0;
265 		cpu_need_resched(ci, type);
266 	}
267 }
268 
269 void
270 sched_dequeue(struct lwp *l)
271 {
272 	runqueue_t *ci_rq;
273 	TAILQ_HEAD(, lwp) *q_head;
274 	struct schedstate_percpu *spc;
275 	const pri_t eprio = lwp_eprio(l);
276 
277 	spc = & l->l_cpu->ci_schedstate;
278 	ci_rq = spc->spc_sched_info;
279 	KASSERT(lwp_locked(l, spc->spc_mutex));
280 
281 	KASSERT(eprio <= spc->spc_maxpriority);
282 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
283 	KASSERT(ci_rq->r_count > 0);
284 
285 	if (spc->spc_migrating == l)
286 		spc->spc_migrating = NULL;
287 
288 	ci_rq->r_count--;
289 	if ((l->l_pflag & LP_BOUND) == 0)
290 		ci_rq->r_mcount--;
291 
292 	q_head = sched_getrq(ci_rq, eprio);
293 	TAILQ_REMOVE(q_head, l, l_runq);
294 	if (TAILQ_EMPTY(q_head)) {
295 		u_int i;
296 		uint32_t q;
297 
298 		/* Unmark bit */
299 		i = eprio >> BITMAP_SHIFT;
300 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
301 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
302 		ci_rq->r_bitmap[i] &= ~q;
303 
304 		/*
305 		 * Update the value of highest priority in the runqueue, in a
306 		 * case it was a last thread in the queue of highest priority.
307 		 */
308 		if (eprio != spc->spc_maxpriority)
309 			return;
310 
311 		do {
312 			if (ci_rq->r_bitmap[i] != 0) {
313 				q = ffs(ci_rq->r_bitmap[i]);
314 				spc->spc_maxpriority =
315 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
316 				return;
317 			}
318 		} while (i--);
319 
320 		/* If not found - set the lowest value */
321 		spc->spc_maxpriority = 0;
322 	}
323 }
324 
325 /*
326  * Migration and balancing.
327  */
328 
329 #ifdef MULTIPROCESSOR
330 
331 /* Estimate if LWP is cache-hot */
332 static inline bool
333 lwp_cache_hot(const struct lwp *l)
334 {
335 
336 	if (l->l_slptime || l->l_rticks == 0)
337 		return false;
338 
339 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
340 }
341 
342 /* Check if LWP can migrate to the chosen CPU */
343 static inline bool
344 sched_migratable(const struct lwp *l, struct cpu_info *ci)
345 {
346 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
347 
348 	/* CPU is offline */
349 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
350 		return false;
351 
352 	/* Affinity bind */
353 	if (__predict_false(l->l_flag & LW_AFFINITY))
354 		return CPU_ISSET(cpu_index(ci), &l->l_affinity);
355 
356 	/* Processor-set */
357 	return (spc->spc_psid == l->l_psid);
358 }
359 
360 /*
361  * Estimate the migration of LWP to the other CPU.
362  * Take and return the CPU, if migration is needed.
363  */
364 struct cpu_info *
365 sched_takecpu(struct lwp *l)
366 {
367 	struct cpu_info *ci, *tci, *first, *next;
368 	struct schedstate_percpu *spc;
369 	runqueue_t *ci_rq, *ici_rq;
370 	pri_t eprio, lpri, pri;
371 
372 	KASSERT(lwp_locked(l, NULL));
373 
374 	ci = l->l_cpu;
375 	spc = &ci->ci_schedstate;
376 	ci_rq = spc->spc_sched_info;
377 
378 	/*
379 	 * If thread is strictly bound, do not estimate other CPUs.
380 	 * If CPU of this thread is idling - run there.
381 	 */
382 	if ((l->l_pflag & LP_BOUND) != 0 || ci_rq->r_count == 0) {
383 	    	ci_rq->r_ev_stay.ev_count++;
384 		return ci;
385 	}
386 
387 	/* Stay if thread is cache-hot. */
388 	eprio = lwp_eprio(l);
389 	if (__predict_true(l->l_stat != LSIDL) &&
390 	    lwp_cache_hot(l) && eprio >= spc->spc_curpriority) {
391 	    	ci_rq->r_ev_stay.ev_count++;
392 		return ci;
393 	}
394 
395 	/* Run on current CPU if priority of thread is higher */
396 	ci = curcpu();
397 	spc = &ci->ci_schedstate;
398 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci)) {
399 		ci_rq->r_ev_localize.ev_count++;
400 		return ci;
401 	}
402 
403 	/*
404 	 * Look for the CPU with the lowest priority thread.  In case of
405 	 * equal priority, choose the CPU with the fewest of threads.
406 	 */
407 	first = l->l_cpu;
408 	ci = first;
409 	tci = first;
410 	lpri = PRI_COUNT;
411 	do {
412 		next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
413 		spc = &ci->ci_schedstate;
414 		ici_rq = spc->spc_sched_info;
415 		pri = max(spc->spc_curpriority, spc->spc_maxpriority);
416 		if (pri > lpri)
417 			continue;
418 
419 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
420 			continue;
421 
422 		if (!sched_migratable(l, ci))
423 			continue;
424 
425 		lpri = pri;
426 		tci = ci;
427 		ci_rq = ici_rq;
428 	} while (ci = next, ci != first);
429 
430 	ci_rq = tci->ci_schedstate.spc_sched_info;
431 	ci_rq->r_ev_push.ev_count++;
432 
433 	return tci;
434 }
435 
436 /*
437  * Tries to catch an LWP from the runqueue of other CPU.
438  */
439 static struct lwp *
440 sched_catchlwp(struct cpu_info *ci)
441 {
442 	struct cpu_info *curci = curcpu();
443 	struct schedstate_percpu *spc;
444 	TAILQ_HEAD(, lwp) *q_head;
445 	runqueue_t *ci_rq;
446 	struct lwp *l;
447 
448 	spc = &ci->ci_schedstate;
449 	ci_rq = spc->spc_sched_info;
450 	if (ci_rq->r_mcount < min_catch) {
451 		spc_unlock(ci);
452 		return NULL;
453 	}
454 
455 	/* Take the highest priority thread */
456 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
457 	l = TAILQ_FIRST(q_head);
458 
459 	for (;;) {
460 		/* Check the first and next result from the queue */
461 		if (l == NULL)
462 			break;
463 		KASSERT(l->l_stat == LSRUN);
464 		KASSERT(l->l_flag & LW_INMEM);
465 
466 		/* Look for threads, whose are allowed to migrate */
467 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
468 		    !sched_migratable(l, curci)) {
469 			l = TAILQ_NEXT(l, l_runq);
470 			continue;
471 		}
472 
473 		/* Grab the thread, and move to the local run queue */
474 		sched_dequeue(l);
475 		l->l_cpu = curci;
476 		ci_rq->r_ev_pull.ev_count++;
477 		lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
478 		sched_enqueue(l, false);
479 		return l;
480 	}
481 	spc_unlock(ci);
482 
483 	return l;
484 }
485 
486 /*
487  * Periodical calculations for balancing.
488  */
489 static void
490 sched_balance(void *nocallout)
491 {
492 	struct cpu_info *ci, *hci;
493 	runqueue_t *ci_rq;
494 	CPU_INFO_ITERATOR cii;
495 	u_int highest;
496 
497 	hci = curcpu();
498 	highest = 0;
499 
500 	/* Make lockless countings */
501 	for (CPU_INFO_FOREACH(cii, ci)) {
502 		ci_rq = ci->ci_schedstate.spc_sched_info;
503 
504 		/* Average count of the threads */
505 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
506 
507 		/* Look for CPU with the highest average */
508 		if (ci_rq->r_avgcount > highest) {
509 			hci = ci;
510 			highest = ci_rq->r_avgcount;
511 		}
512 	}
513 
514 	/* Update the worker */
515 	worker_ci = hci;
516 
517 	if (nocallout == NULL)
518 		callout_schedule(&balance_ch, balance_period);
519 }
520 
521 /*
522  * Called from each CPU's idle loop.
523  */
524 void
525 sched_idle(void)
526 {
527 	struct cpu_info *ci = curcpu(), *tci = NULL;
528 	struct schedstate_percpu *spc, *tspc;
529 	runqueue_t *ci_rq;
530 	bool dlock = false;
531 
532 	/* Check if there is a migrating LWP */
533 	spc = &ci->ci_schedstate;
534 	if (spc->spc_migrating == NULL)
535 		goto no_migration;
536 
537 	spc_lock(ci);
538 	for (;;) {
539 		struct lwp *l;
540 
541 		l = spc->spc_migrating;
542 		if (l == NULL)
543 			break;
544 
545 		/*
546 		 * If second attempt, and target CPU has changed,
547 		 * drop the old lock.
548 		 */
549 		if (dlock == true && tci != l->l_target_cpu) {
550 			KASSERT(tci != NULL);
551 			spc_unlock(tci);
552 			dlock = false;
553 		}
554 
555 		/*
556 		 * Nothing to do if destination has changed to the
557 		 * local CPU, or migration was done by other CPU.
558 		 */
559 		tci = l->l_target_cpu;
560 		if (tci == NULL || tci == ci) {
561 			spc->spc_migrating = NULL;
562 			l->l_target_cpu = NULL;
563 			break;
564 		}
565 		tspc = &tci->ci_schedstate;
566 
567 		/*
568 		 * Double-lock the runqueues.
569 		 * We do that only once.
570 		 */
571 		if (dlock == false) {
572 			dlock = true;
573 			if (ci < tci) {
574 				spc_lock(tci);
575 			} else if (!mutex_tryenter(tspc->spc_mutex)) {
576 				spc_unlock(ci);
577 				spc_lock(tci);
578 				spc_lock(ci);
579 				/* Check the situation again.. */
580 				continue;
581 			}
582 		}
583 
584 		/* Migrate the thread */
585 		KASSERT(l->l_stat == LSRUN);
586 		spc->spc_migrating = NULL;
587 		l->l_target_cpu = NULL;
588 		sched_dequeue(l);
589 		l->l_cpu = tci;
590 		lwp_setlock(l, tspc->spc_mutex);
591 		sched_enqueue(l, false);
592 		break;
593 	}
594 	if (dlock == true) {
595 		KASSERT(tci != NULL);
596 		spc_unlock(tci);
597 	}
598 	spc_unlock(ci);
599 
600 no_migration:
601 	ci_rq = spc->spc_sched_info;
602 	if ((spc->spc_flags & SPCF_OFFLINE) != 0 || ci_rq->r_count != 0) {
603 		return;
604 	}
605 
606 	/* Reset the counter, and call the balancer */
607 	ci_rq->r_avgcount = 0;
608 	sched_balance(ci);
609 	tci = worker_ci;
610 	if (ci == tci)
611 		return;
612 	spc_dlock(ci, tci);
613 	(void)sched_catchlwp(tci);
614 	spc_unlock(ci);
615 }
616 
617 #else
618 
619 struct cpu_info *
620 sched_takecpu(struct lwp *l)
621 {
622 
623 	return l->l_cpu;
624 }
625 
626 void
627 sched_idle(void)
628 {
629 
630 }
631 #endif	/* MULTIPROCESSOR */
632 
633 /*
634  * Scheduling statistics and balancing.
635  */
636 void
637 sched_lwp_stats(struct lwp *l)
638 {
639 	int batch;
640 
641 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
642 	    l->l_stat == LSSUSPENDED)
643 		l->l_slptime++;
644 
645 	/*
646 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
647 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
648 	 */
649 	batch = (l->l_rticksum > l->l_slpticksum);
650 	if (batch != 0) {
651 		if ((l->l_flag & LW_BATCH) == 0)
652 			batch = 0;
653 		l->l_flag |= LW_BATCH;
654 	} else
655 		l->l_flag &= ~LW_BATCH;
656 
657 	/*
658 	 * If thread is CPU-bound and never sleeps, it would occupy the CPU.
659 	 * In such case reset the value of last sleep, and check it later, if
660 	 * it is still zero - perform the migration, unmark the batch flag.
661 	 */
662 	if (batch && (l->l_slptime + l->l_slpticksum) == 0) {
663 		if (l->l_slpticks == 0) {
664 			if (l->l_target_cpu == NULL &&
665 			    (l->l_stat == LSRUN || l->l_stat == LSONPROC)) {
666 				struct cpu_info *ci = sched_takecpu(l);
667 				l->l_target_cpu = (ci != l->l_cpu) ? ci : NULL;
668 			}
669 			l->l_flag &= ~LW_BATCH;
670 		} else {
671 			l->l_slpticks = 0;
672 		}
673 	}
674 
675 	/* Reset the time sums */
676 	l->l_slpticksum = 0;
677 	l->l_rticksum = 0;
678 
679 	/* Scheduler-specific hook */
680 	sched_pstats_hook(l, batch);
681 }
682 
683 /*
684  * Scheduler mill.
685  */
686 struct lwp *
687 sched_nextlwp(void)
688 {
689 	struct cpu_info *ci = curcpu();
690 	struct schedstate_percpu *spc;
691 	TAILQ_HEAD(, lwp) *q_head;
692 	runqueue_t *ci_rq;
693 	struct lwp *l;
694 
695 	/* Return to idle LWP if there is a migrating thread */
696 	spc = &ci->ci_schedstate;
697 	if (__predict_false(spc->spc_migrating != NULL))
698 		return NULL;
699 	ci_rq = spc->spc_sched_info;
700 
701 #ifdef MULTIPROCESSOR
702 	/* If runqueue is empty, try to catch some thread from other CPU */
703 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
704 		if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
705 			return NULL;
706 	} else if (ci_rq->r_count == 0) {
707 		struct cpu_info *cci;
708 		/* Reset the counter, and call the balancer */
709 		ci_rq->r_avgcount = 0;
710 		sched_balance(ci);
711 		cci = worker_ci;
712 		if (ci == cci || !mutex_tryenter(cci->ci_schedstate.spc_mutex))
713 			return NULL;
714 		return sched_catchlwp(cci);
715 	}
716 #else
717 	if (ci_rq->r_count == 0)
718 		return NULL;
719 #endif
720 
721 	/* Take the highest priority thread */
722 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
723 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
724 	l = TAILQ_FIRST(q_head);
725 	KASSERT(l != NULL);
726 
727 	sched_oncpu(l);
728 	l->l_rticks = hardclock_ticks;
729 
730 	return l;
731 }
732 
733 bool
734 sched_curcpu_runnable_p(void)
735 {
736 	const struct cpu_info *ci;
737 	const struct schedstate_percpu *spc;
738 	const runqueue_t *ci_rq;
739 	bool rv;
740 
741 	kpreempt_disable();
742 	ci = curcpu();
743 	spc = &ci->ci_schedstate;
744 	ci_rq = spc->spc_sched_info;
745 
746 #ifndef __HAVE_FAST_SOFTINTS
747 	if (ci->ci_data.cpu_softints) {
748 		kpreempt_enable();
749 		return true;
750 	}
751 #endif
752 
753 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
754 		rv = (ci_rq->r_count - ci_rq->r_mcount);
755 	else
756 		rv = ci_rq->r_count != 0;
757 	kpreempt_enable();
758 
759 	return rv;
760 }
761 
762 /*
763  * Sysctl nodes and initialization.
764  */
765 
766 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
767 {
768 	const struct sysctlnode *node = NULL;
769 
770 	sysctl_createv(clog, 0, NULL, NULL,
771 		CTLFLAG_PERMANENT,
772 		CTLTYPE_NODE, "kern", NULL,
773 		NULL, 0, NULL, 0,
774 		CTL_KERN, CTL_EOL);
775 	sysctl_createv(clog, 0, NULL, &node,
776 		CTLFLAG_PERMANENT,
777 		CTLTYPE_NODE, "sched",
778 		SYSCTL_DESCR("Scheduler options"),
779 		NULL, 0, NULL, 0,
780 		CTL_KERN, CTL_CREATE, CTL_EOL);
781 
782 	if (node == NULL)
783 		return;
784 
785 	sysctl_createv(clog, 0, &node, NULL,
786 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
787 		CTLTYPE_INT, "cacheht_time",
788 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
789 		NULL, 0, &cacheht_time, 0,
790 		CTL_CREATE, CTL_EOL);
791 	sysctl_createv(clog, 0, &node, NULL,
792 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
793 		CTLTYPE_INT, "balance_period",
794 		SYSCTL_DESCR("Balance period (in ticks)"),
795 		NULL, 0, &balance_period, 0,
796 		CTL_CREATE, CTL_EOL);
797 	sysctl_createv(clog, 0, &node, NULL,
798 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
799 		CTLTYPE_INT, "min_catch",
800 		SYSCTL_DESCR("Minimal count of threads for catching"),
801 		NULL, 0, &min_catch, 0,
802 		CTL_CREATE, CTL_EOL);
803 	sysctl_createv(clog, 0, &node, NULL,
804 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
805 		CTLTYPE_INT, "timesoftints",
806 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
807 		NULL, 0, &softint_timing, 0,
808 		CTL_CREATE, CTL_EOL);
809 	sysctl_createv(clog, 0, &node, NULL,
810 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
811 		CTLTYPE_INT, "kpreempt_pri",
812 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
813 		NULL, 0, &sched_kpreempt_pri, 0,
814 		CTL_CREATE, CTL_EOL);
815 	sysctl_createv(clog, 0, &node, NULL,
816 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
817 		CTLTYPE_INT, "upreempt_pri",
818 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
819 		NULL, 0, &sched_upreempt_pri, 0,
820 		CTL_CREATE, CTL_EOL);
821 }
822 
823 /*
824  * Debugging.
825  */
826 
827 #ifdef DDB
828 
829 void
830 sched_print_runqueue(void (*pr)(const char *, ...)
831     __attribute__((__format__(__printf__,1,2))))
832 {
833 	runqueue_t *ci_rq;
834 	struct cpu_info *ci, *tci;
835 	struct schedstate_percpu *spc;
836 	struct lwp *l;
837 	struct proc *p;
838 	CPU_INFO_ITERATOR cii;
839 
840 	for (CPU_INFO_FOREACH(cii, ci)) {
841 		int i;
842 
843 		spc = &ci->ci_schedstate;
844 		ci_rq = spc->spc_sched_info;
845 
846 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
847 		(*pr)(" pid.lid = %d.%d, r_count = %u, r_avgcount = %u, "
848 		    "maxpri = %d, mchain = %p\n",
849 #ifdef MULTIPROCESSOR
850 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
851 #else
852 		    curlwp->l_proc->p_pid, curlwp->l_lid,
853 #endif
854 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority,
855 		    spc->spc_migrating);
856 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
857 		do {
858 			uint32_t q;
859 			q = ci_rq->r_bitmap[i];
860 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
861 		} while (i--);
862 	}
863 
864 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %4s %s\n",
865 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "TCI", "LRTICKS");
866 
867 	PROCLIST_FOREACH(p, &allproc) {
868 		if ((p->p_flag & PK_MARKER) != 0)
869 			continue;
870 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
871 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
872 			ci = l->l_cpu;
873 			tci = l->l_target_cpu;
874 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %4d %u\n",
875 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
876 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
877 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
878 			    l, ci->ci_index, (tci ? tci->ci_index : -1),
879 			    (u_int)(hardclock_ticks - l->l_rticks));
880 		}
881 	}
882 }
883 
884 #endif
885