xref: /netbsd-src/sys/kern/kern_runq.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: kern_runq.c,v 1.41 2013/11/24 21:58:38 rmind Exp $	*/
2 
3 /*
4  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.41 2013/11/24 21:58:38 rmind Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/bitops.h>
35 #include <sys/cpu.h>
36 #include <sys/idle.h>
37 #include <sys/intr.h>
38 #include <sys/kmem.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/syscallargs.h>
44 #include <sys/sysctl.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
47 #include <sys/evcnt.h>
48 
49 /*
50  * Priority related defintions.
51  */
52 #define	PRI_TS_COUNT	(NPRI_USER)
53 #define	PRI_RT_COUNT	(PRI_COUNT - PRI_TS_COUNT)
54 #define	PRI_HTS_RANGE	(PRI_TS_COUNT / 10)
55 
56 #define	PRI_HIGHEST_TS	(MAXPRI_USER)
57 
58 /*
59  * Bits per map.
60  */
61 #define	BITMAP_BITS	(32)
62 #define	BITMAP_SHIFT	(5)
63 #define	BITMAP_MSB	(0x80000000U)
64 #define	BITMAP_MASK	(BITMAP_BITS - 1)
65 
66 /*
67  * Structures, runqueue.
68  */
69 
70 const int	schedppq = 1;
71 
72 typedef struct {
73 	TAILQ_HEAD(, lwp) q_head;
74 } queue_t;
75 
76 typedef struct {
77 	/* Bitmap */
78 	uint32_t	r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
79 	/* Counters */
80 	u_int		r_count;	/* Count of the threads */
81 	u_int		r_avgcount;	/* Average count of threads */
82 	u_int		r_mcount;	/* Count of migratable threads */
83 	/* Runqueues */
84 	queue_t		r_rt_queue[PRI_RT_COUNT];
85 	queue_t		r_ts_queue[PRI_TS_COUNT];
86 	/* Event counters */
87 	struct evcnt	r_ev_pull;
88 	struct evcnt	r_ev_push;
89 	struct evcnt	r_ev_stay;
90 	struct evcnt	r_ev_localize;
91 } runqueue_t;
92 
93 static void *	sched_getrq(runqueue_t *, const pri_t);
94 #ifdef MULTIPROCESSOR
95 static lwp_t *	sched_catchlwp(struct cpu_info *);
96 static void	sched_balance(void *);
97 #endif
98 
99 /*
100  * Preemption control.
101  */
102 int		sched_upreempt_pri = 0;
103 #ifdef __HAVE_PREEMPTION
104 # ifdef DEBUG
105 int		sched_kpreempt_pri = 0;
106 # else
107 int		sched_kpreempt_pri = PRI_USER_RT;
108 # endif
109 #else
110 int		sched_kpreempt_pri = 1000;
111 #endif
112 
113 /*
114  * Migration and balancing.
115  */
116 static u_int	cacheht_time;		/* Cache hotness time */
117 static u_int	min_catch;		/* Minimal LWP count for catching */
118 static u_int	balance_period;		/* Balance period */
119 static struct cpu_info *worker_ci;	/* Victim CPU */
120 #ifdef MULTIPROCESSOR
121 static struct callout balance_ch;	/* Callout of balancer */
122 #endif
123 
124 void
125 runq_init(void)
126 {
127 
128 	/* Balancing */
129 	worker_ci = curcpu();
130 	cacheht_time = mstohz(3);		/*   ~3 ms */
131 	balance_period = mstohz(300);		/* ~300 ms */
132 
133 	/* Minimal count of LWPs for catching */
134 	min_catch = 1;
135 
136 	/* Initialize balancing callout and run it */
137 #ifdef MULTIPROCESSOR
138 	callout_init(&balance_ch, CALLOUT_MPSAFE);
139 	callout_setfunc(&balance_ch, sched_balance, NULL);
140 	callout_schedule(&balance_ch, balance_period);
141 #endif
142 }
143 
144 void
145 sched_cpuattach(struct cpu_info *ci)
146 {
147 	runqueue_t *ci_rq;
148 	void *rq_ptr;
149 	u_int i, size;
150 
151 	if (ci->ci_schedstate.spc_lwplock == NULL) {
152 		ci->ci_schedstate.spc_lwplock =
153 		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
154 	}
155 	if (ci == lwp0.l_cpu) {
156 		/* Initialize the scheduler structure of the primary LWP */
157 		lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
158 	}
159 	if (ci->ci_schedstate.spc_mutex != NULL) {
160 		/* Already initialized. */
161 		return;
162 	}
163 
164 	/* Allocate the run queue */
165 	size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
166 	rq_ptr = kmem_zalloc(size, KM_SLEEP);
167 	if (rq_ptr == NULL) {
168 		panic("sched_cpuattach: could not allocate the runqueue");
169 	}
170 	ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
171 
172 	/* Initialize run queues */
173 	ci->ci_schedstate.spc_mutex =
174 	    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
175 	for (i = 0; i < PRI_RT_COUNT; i++)
176 		TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
177 	for (i = 0; i < PRI_TS_COUNT; i++)
178 		TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
179 
180 	ci->ci_schedstate.spc_sched_info = ci_rq;
181 
182 	evcnt_attach_dynamic(&ci_rq->r_ev_pull, EVCNT_TYPE_MISC, NULL,
183 	   cpu_name(ci), "runqueue pull");
184 	evcnt_attach_dynamic(&ci_rq->r_ev_push, EVCNT_TYPE_MISC, NULL,
185 	   cpu_name(ci), "runqueue push");
186 	evcnt_attach_dynamic(&ci_rq->r_ev_stay, EVCNT_TYPE_MISC, NULL,
187 	   cpu_name(ci), "runqueue stay");
188 	evcnt_attach_dynamic(&ci_rq->r_ev_localize, EVCNT_TYPE_MISC, NULL,
189 	   cpu_name(ci), "runqueue localize");
190 }
191 
192 /*
193  * Control of the runqueue.
194  */
195 
196 static inline void *
197 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
198 {
199 
200 	KASSERT(prio < PRI_COUNT);
201 	return (prio <= PRI_HIGHEST_TS) ?
202 	    &ci_rq->r_ts_queue[prio].q_head :
203 	    &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
204 }
205 
206 void
207 sched_enqueue(struct lwp *l, bool swtch)
208 {
209 	runqueue_t *ci_rq;
210 	struct schedstate_percpu *spc;
211 	TAILQ_HEAD(, lwp) *q_head;
212 	const pri_t eprio = lwp_eprio(l);
213 	struct cpu_info *ci;
214 	int type;
215 
216 	ci = l->l_cpu;
217 	spc = &ci->ci_schedstate;
218 	ci_rq = spc->spc_sched_info;
219 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
220 
221 	/* Update the last run time on switch */
222 	if (__predict_true(swtch == true))
223 		l->l_rticksum += (hardclock_ticks - l->l_rticks);
224 	else if (l->l_rticks == 0)
225 		l->l_rticks = hardclock_ticks;
226 
227 	/* Enqueue the thread */
228 	q_head = sched_getrq(ci_rq, eprio);
229 	if (TAILQ_EMPTY(q_head)) {
230 		u_int i;
231 		uint32_t q;
232 
233 		/* Mark bit */
234 		i = eprio >> BITMAP_SHIFT;
235 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
236 		KASSERT((ci_rq->r_bitmap[i] & q) == 0);
237 		ci_rq->r_bitmap[i] |= q;
238 	}
239 	TAILQ_INSERT_TAIL(q_head, l, l_runq);
240 	ci_rq->r_count++;
241 	if ((l->l_pflag & LP_BOUND) == 0)
242 		ci_rq->r_mcount++;
243 
244 	/*
245 	 * Update the value of highest priority in the runqueue,
246 	 * if priority of this thread is higher.
247 	 */
248 	if (eprio > spc->spc_maxpriority)
249 		spc->spc_maxpriority = eprio;
250 
251 	sched_newts(l);
252 
253 	/*
254 	 * Wake the chosen CPU or cause a preemption if the newly
255 	 * enqueued thread has higher priority.  Don't cause a
256 	 * preemption if the thread is yielding (swtch).
257 	 */
258 	if (!swtch && eprio > spc->spc_curpriority) {
259 		if (eprio >= sched_kpreempt_pri)
260 			type = RESCHED_KPREEMPT;
261 		else if (eprio >= sched_upreempt_pri)
262 			type = RESCHED_IMMED;
263 		else
264 			type = RESCHED_LAZY;
265 		cpu_need_resched(ci, type);
266 	}
267 }
268 
269 void
270 sched_dequeue(struct lwp *l)
271 {
272 	runqueue_t *ci_rq;
273 	TAILQ_HEAD(, lwp) *q_head;
274 	struct schedstate_percpu *spc;
275 	const pri_t eprio = lwp_eprio(l);
276 
277 	spc = & l->l_cpu->ci_schedstate;
278 	ci_rq = spc->spc_sched_info;
279 	KASSERT(lwp_locked(l, spc->spc_mutex));
280 
281 	KASSERT(eprio <= spc->spc_maxpriority);
282 	KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
283 	KASSERT(ci_rq->r_count > 0);
284 
285 	if (spc->spc_migrating == l)
286 		spc->spc_migrating = NULL;
287 
288 	ci_rq->r_count--;
289 	if ((l->l_pflag & LP_BOUND) == 0)
290 		ci_rq->r_mcount--;
291 
292 	q_head = sched_getrq(ci_rq, eprio);
293 	TAILQ_REMOVE(q_head, l, l_runq);
294 	if (TAILQ_EMPTY(q_head)) {
295 		u_int i;
296 		uint32_t q;
297 
298 		/* Unmark bit */
299 		i = eprio >> BITMAP_SHIFT;
300 		q = BITMAP_MSB >> (eprio & BITMAP_MASK);
301 		KASSERT((ci_rq->r_bitmap[i] & q) != 0);
302 		ci_rq->r_bitmap[i] &= ~q;
303 
304 		/*
305 		 * Update the value of highest priority in the runqueue, in a
306 		 * case it was a last thread in the queue of highest priority.
307 		 */
308 		if (eprio != spc->spc_maxpriority)
309 			return;
310 
311 		do {
312 			if (ci_rq->r_bitmap[i] != 0) {
313 				q = ffs(ci_rq->r_bitmap[i]);
314 				spc->spc_maxpriority =
315 				    (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
316 				return;
317 			}
318 		} while (i--);
319 
320 		/* If not found - set the lowest value */
321 		spc->spc_maxpriority = 0;
322 	}
323 }
324 
325 /*
326  * Migration and balancing.
327  */
328 
329 #ifdef MULTIPROCESSOR
330 
331 /* Estimate if LWP is cache-hot */
332 static inline bool
333 lwp_cache_hot(const struct lwp *l)
334 {
335 
336 	if (__predict_false(l->l_slptime || l->l_rticks == 0))
337 		return false;
338 
339 	return (hardclock_ticks - l->l_rticks <= cacheht_time);
340 }
341 
342 /* Check if LWP can migrate to the chosen CPU */
343 static inline bool
344 sched_migratable(const struct lwp *l, struct cpu_info *ci)
345 {
346 	const struct schedstate_percpu *spc = &ci->ci_schedstate;
347 	KASSERT(lwp_locked(__UNCONST(l), NULL));
348 
349 	/* Is CPU offline? */
350 	if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
351 		return false;
352 
353 	/* Is affinity set? */
354 	if (__predict_false(l->l_affinity))
355 		return kcpuset_isset(l->l_affinity, cpu_index(ci));
356 
357 	/* Is there a processor-set? */
358 	return (spc->spc_psid == l->l_psid);
359 }
360 
361 /*
362  * Estimate the migration of LWP to the other CPU.
363  * Take and return the CPU, if migration is needed.
364  */
365 struct cpu_info *
366 sched_takecpu(struct lwp *l)
367 {
368 	struct cpu_info *ci, *tci, *pivot, *next;
369 	struct schedstate_percpu *spc;
370 	runqueue_t *ci_rq, *ici_rq;
371 	pri_t eprio, lpri, pri;
372 
373 	KASSERT(lwp_locked(l, NULL));
374 
375 	/* If thread is strictly bound, do not estimate other CPUs */
376 	ci = l->l_cpu;
377 	if (l->l_pflag & LP_BOUND)
378 		return ci;
379 
380 	spc = &ci->ci_schedstate;
381 	ci_rq = spc->spc_sched_info;
382 
383 	/* Make sure that thread is in appropriate processor-set */
384 	if (__predict_true(spc->spc_psid == l->l_psid)) {
385 		/* If CPU of this thread is idling - run there */
386 		if (ci_rq->r_count == 0) {
387 			ci_rq->r_ev_stay.ev_count++;
388 			return ci;
389 		}
390 		/* Stay if thread is cache-hot */
391 		eprio = lwp_eprio(l);
392 		if (__predict_true(l->l_stat != LSIDL) &&
393 		    lwp_cache_hot(l) && eprio >= spc->spc_curpriority) {
394 			ci_rq->r_ev_stay.ev_count++;
395 			return ci;
396 		}
397 	} else {
398 		eprio = lwp_eprio(l);
399 	}
400 
401 	/* Run on current CPU if priority of thread is higher */
402 	ci = curcpu();
403 	spc = &ci->ci_schedstate;
404 	if (eprio > spc->spc_curpriority && sched_migratable(l, ci)) {
405 		ci_rq = spc->spc_sched_info;
406 		ci_rq->r_ev_localize.ev_count++;
407 		return ci;
408 	}
409 
410 	/*
411 	 * Look for the CPU with the lowest priority thread.  In case of
412 	 * equal priority, choose the CPU with the fewest of threads.
413 	 */
414 	pivot = l->l_cpu;
415 	ci = pivot;
416 	tci = pivot;
417 	lpri = PRI_COUNT;
418 	do {
419 		if ((next = cpu_lookup(cpu_index(ci) + 1)) == NULL) {
420 			/* Reached the end, start from the beginning. */
421 			next = cpu_lookup(0);
422 		}
423 		spc = &ci->ci_schedstate;
424 		ici_rq = spc->spc_sched_info;
425 		pri = MAX(spc->spc_curpriority, spc->spc_maxpriority);
426 		if (pri > lpri)
427 			continue;
428 
429 		if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
430 			continue;
431 
432 		if (!sched_migratable(l, ci))
433 			continue;
434 
435 		lpri = pri;
436 		tci = ci;
437 		ci_rq = ici_rq;
438 	} while (ci = next, ci != pivot);
439 
440 	ci_rq = tci->ci_schedstate.spc_sched_info;
441 	ci_rq->r_ev_push.ev_count++;
442 
443 	return tci;
444 }
445 
446 /*
447  * Tries to catch an LWP from the runqueue of other CPU.
448  */
449 static struct lwp *
450 sched_catchlwp(struct cpu_info *ci)
451 {
452 	struct cpu_info *curci = curcpu();
453 	struct schedstate_percpu *spc, *curspc;
454 	TAILQ_HEAD(, lwp) *q_head;
455 	runqueue_t *ci_rq;
456 	struct lwp *l;
457 
458 	curspc = &curci->ci_schedstate;
459 	spc = &ci->ci_schedstate;
460 	KASSERT(curspc->spc_psid == spc->spc_psid);
461 
462 	ci_rq = spc->spc_sched_info;
463 	if (ci_rq->r_mcount < min_catch) {
464 		spc_unlock(ci);
465 		return NULL;
466 	}
467 
468 	/* Take the highest priority thread */
469 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
470 	l = TAILQ_FIRST(q_head);
471 
472 	for (;;) {
473 		/* Check the first and next result from the queue */
474 		if (l == NULL) {
475 			break;
476 		}
477 		KASSERTMSG(l->l_stat == LSRUN, "%s l %p (%s) l_stat %d",
478 		    ci->ci_data.cpu_name,
479 		    l, (l->l_name ? l->l_name : l->l_proc->p_comm), l->l_stat);
480 
481 		/* Look for threads, whose are allowed to migrate */
482 		if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
483 		    !sched_migratable(l, curci)) {
484 			l = TAILQ_NEXT(l, l_runq);
485 			continue;
486 		}
487 
488 		/* Grab the thread, and move to the local run queue */
489 		sched_dequeue(l);
490 
491 		/*
492 		 * If LWP is still context switching, we may need to
493 		 * spin-wait before changing its CPU.
494 		 */
495 		if (__predict_false(l->l_ctxswtch != 0)) {
496 			u_int count;
497 			count = SPINLOCK_BACKOFF_MIN;
498 			while (l->l_ctxswtch)
499 				SPINLOCK_BACKOFF(count);
500 		}
501 		l->l_cpu = curci;
502 		ci_rq->r_ev_pull.ev_count++;
503 		lwp_unlock_to(l, curspc->spc_mutex);
504 		sched_enqueue(l, false);
505 		return l;
506 	}
507 	spc_unlock(ci);
508 
509 	return l;
510 }
511 
512 /*
513  * Periodical calculations for balancing.
514  */
515 static void
516 sched_balance(void *nocallout)
517 {
518 	struct cpu_info *ci, *hci;
519 	runqueue_t *ci_rq;
520 	CPU_INFO_ITERATOR cii;
521 	u_int highest;
522 
523 	hci = curcpu();
524 	highest = 0;
525 
526 	/* Make lockless countings */
527 	for (CPU_INFO_FOREACH(cii, ci)) {
528 		ci_rq = ci->ci_schedstate.spc_sched_info;
529 
530 		/* Average count of the threads */
531 		ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
532 
533 		/* Look for CPU with the highest average */
534 		if (ci_rq->r_avgcount > highest) {
535 			hci = ci;
536 			highest = ci_rq->r_avgcount;
537 		}
538 	}
539 
540 	/* Update the worker */
541 	worker_ci = hci;
542 
543 	if (nocallout == NULL)
544 		callout_schedule(&balance_ch, balance_period);
545 }
546 
547 /*
548  * Called from each CPU's idle loop.
549  */
550 void
551 sched_idle(void)
552 {
553 	struct cpu_info *ci = curcpu(), *tci = NULL;
554 	struct schedstate_percpu *spc, *tspc;
555 	runqueue_t *ci_rq;
556 	bool dlock = false;
557 
558 	/* Check if there is a migrating LWP */
559 	spc = &ci->ci_schedstate;
560 	if (spc->spc_migrating == NULL)
561 		goto no_migration;
562 
563 	spc_lock(ci);
564 	for (;;) {
565 		struct lwp *l;
566 
567 		l = spc->spc_migrating;
568 		if (l == NULL)
569 			break;
570 
571 		/*
572 		 * If second attempt, and target CPU has changed,
573 		 * drop the old lock.
574 		 */
575 		if (dlock == true && tci != l->l_target_cpu) {
576 			KASSERT(tci != NULL);
577 			spc_unlock(tci);
578 			dlock = false;
579 		}
580 
581 		/*
582 		 * Nothing to do if destination has changed to the
583 		 * local CPU, or migration was done by other CPU.
584 		 */
585 		tci = l->l_target_cpu;
586 		if (tci == NULL || tci == ci) {
587 			spc->spc_migrating = NULL;
588 			l->l_target_cpu = NULL;
589 			break;
590 		}
591 		tspc = &tci->ci_schedstate;
592 
593 		/*
594 		 * Double-lock the runqueues.
595 		 * We do that only once.
596 		 */
597 		if (dlock == false) {
598 			dlock = true;
599 			if (ci < tci) {
600 				spc_lock(tci);
601 			} else if (!mutex_tryenter(tspc->spc_mutex)) {
602 				spc_unlock(ci);
603 				spc_lock(tci);
604 				spc_lock(ci);
605 				/* Check the situation again.. */
606 				continue;
607 			}
608 		}
609 
610 		/* Migrate the thread */
611 		KASSERT(l->l_stat == LSRUN);
612 		spc->spc_migrating = NULL;
613 		l->l_target_cpu = NULL;
614 		sched_dequeue(l);
615 		l->l_cpu = tci;
616 		lwp_setlock(l, tspc->spc_mutex);
617 		sched_enqueue(l, false);
618 		break;
619 	}
620 	if (dlock == true) {
621 		KASSERT(tci != NULL);
622 		spc_unlock(tci);
623 	}
624 	spc_unlock(ci);
625 
626 no_migration:
627 	ci_rq = spc->spc_sched_info;
628 	if ((spc->spc_flags & SPCF_OFFLINE) != 0 || ci_rq->r_count != 0) {
629 		return;
630 	}
631 
632 	/* Reset the counter, and call the balancer */
633 	ci_rq->r_avgcount = 0;
634 	sched_balance(ci);
635 	tci = worker_ci;
636 	tspc = &tci->ci_schedstate;
637 	if (ci == tci || spc->spc_psid != tspc->spc_psid)
638 		return;
639 	spc_dlock(ci, tci);
640 	(void)sched_catchlwp(tci);
641 	spc_unlock(ci);
642 }
643 
644 #else
645 
646 /*
647  * stubs for !MULTIPROCESSOR
648  */
649 
650 struct cpu_info *
651 sched_takecpu(struct lwp *l)
652 {
653 
654 	return l->l_cpu;
655 }
656 
657 void
658 sched_idle(void)
659 {
660 
661 }
662 #endif	/* MULTIPROCESSOR */
663 
664 /*
665  * Scheduling statistics and balancing.
666  */
667 void
668 sched_lwp_stats(struct lwp *l)
669 {
670 	int batch;
671 
672 	KASSERT(lwp_locked(l, NULL));
673 
674 	/* Update sleep time */
675 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
676 	    l->l_stat == LSSUSPENDED)
677 		l->l_slptime++;
678 
679 	/*
680 	 * Set that thread is more CPU-bound, if sum of run time exceeds the
681 	 * sum of sleep time.  Check if thread is CPU-bound a first time.
682 	 */
683 	batch = (l->l_rticksum > l->l_slpticksum);
684 	if (batch != 0) {
685 		if ((l->l_flag & LW_BATCH) == 0)
686 			batch = 0;
687 		l->l_flag |= LW_BATCH;
688 	} else
689 		l->l_flag &= ~LW_BATCH;
690 
691 	/*
692 	 * If thread is CPU-bound and never sleeps, it would occupy the CPU.
693 	 * In such case reset the value of last sleep, and check it later, if
694 	 * it is still zero - perform the migration, unmark the batch flag.
695 	 */
696 	if (batch && (l->l_slptime + l->l_slpticksum) == 0) {
697 		if (l->l_slpticks == 0) {
698 			if (l->l_target_cpu == NULL &&
699 			    (l->l_stat == LSRUN || l->l_stat == LSONPROC)) {
700 				struct cpu_info *ci = sched_takecpu(l);
701 				l->l_target_cpu = (ci != l->l_cpu) ? ci : NULL;
702 			}
703 			l->l_flag &= ~LW_BATCH;
704 		} else {
705 			l->l_slpticks = 0;
706 		}
707 	}
708 
709 	/* Reset the time sums */
710 	l->l_slpticksum = 0;
711 	l->l_rticksum = 0;
712 
713 	/* Scheduler-specific hook */
714 	sched_pstats_hook(l, batch);
715 }
716 
717 /*
718  * Scheduler mill.
719  */
720 struct lwp *
721 sched_nextlwp(void)
722 {
723 	struct cpu_info *ci = curcpu();
724 	struct schedstate_percpu *spc;
725 	TAILQ_HEAD(, lwp) *q_head;
726 	runqueue_t *ci_rq;
727 	struct lwp *l;
728 
729 	/* Return to idle LWP if there is a migrating thread */
730 	spc = &ci->ci_schedstate;
731 	if (__predict_false(spc->spc_migrating != NULL))
732 		return NULL;
733 	ci_rq = spc->spc_sched_info;
734 
735 #ifdef MULTIPROCESSOR
736 	/* If runqueue is empty, try to catch some thread from other CPU */
737 	if (__predict_false(ci_rq->r_count == 0)) {
738 		struct schedstate_percpu *cspc;
739 		struct cpu_info *cci;
740 
741 		/* Offline CPUs should not perform this, however */
742 		if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
743 			return NULL;
744 
745 		/* Reset the counter, and call the balancer */
746 		ci_rq->r_avgcount = 0;
747 		sched_balance(ci);
748 		cci = worker_ci;
749 		cspc = &cci->ci_schedstate;
750 		if (ci == cci || spc->spc_psid != cspc->spc_psid ||
751 		    !mutex_tryenter(cci->ci_schedstate.spc_mutex))
752 			return NULL;
753 		return sched_catchlwp(cci);
754 	}
755 #else
756 	if (__predict_false(ci_rq->r_count == 0))
757 		return NULL;
758 #endif
759 
760 	/* Take the highest priority thread */
761 	KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
762 	q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
763 	l = TAILQ_FIRST(q_head);
764 	KASSERT(l != NULL);
765 
766 	sched_oncpu(l);
767 	l->l_rticks = hardclock_ticks;
768 
769 	return l;
770 }
771 
772 /*
773  * sched_curcpu_runnable_p: return if curcpu() should exit the idle loop.
774  */
775 
776 bool
777 sched_curcpu_runnable_p(void)
778 {
779 	const struct cpu_info *ci;
780 	const struct schedstate_percpu *spc;
781 	const runqueue_t *ci_rq;
782 	bool rv;
783 
784 	kpreempt_disable();
785 	ci = curcpu();
786 	spc = &ci->ci_schedstate;
787 	ci_rq = spc->spc_sched_info;
788 
789 #ifndef __HAVE_FAST_SOFTINTS
790 	if (ci->ci_data.cpu_softints) {
791 		kpreempt_enable();
792 		return true;
793 	}
794 #endif
795 
796 	rv = (ci_rq->r_count != 0) ? true : false;
797 	kpreempt_enable();
798 
799 	return rv;
800 }
801 
802 /*
803  * Sysctl nodes and initialization.
804  */
805 
806 SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
807 {
808 	const struct sysctlnode *node = NULL;
809 
810 	sysctl_createv(clog, 0, NULL, NULL,
811 		CTLFLAG_PERMANENT,
812 		CTLTYPE_NODE, "kern", NULL,
813 		NULL, 0, NULL, 0,
814 		CTL_KERN, CTL_EOL);
815 	sysctl_createv(clog, 0, NULL, &node,
816 		CTLFLAG_PERMANENT,
817 		CTLTYPE_NODE, "sched",
818 		SYSCTL_DESCR("Scheduler options"),
819 		NULL, 0, NULL, 0,
820 		CTL_KERN, CTL_CREATE, CTL_EOL);
821 
822 	if (node == NULL)
823 		return;
824 
825 	sysctl_createv(clog, 0, &node, NULL,
826 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
827 		CTLTYPE_INT, "cacheht_time",
828 		SYSCTL_DESCR("Cache hotness time (in ticks)"),
829 		NULL, 0, &cacheht_time, 0,
830 		CTL_CREATE, CTL_EOL);
831 	sysctl_createv(clog, 0, &node, NULL,
832 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
833 		CTLTYPE_INT, "balance_period",
834 		SYSCTL_DESCR("Balance period (in ticks)"),
835 		NULL, 0, &balance_period, 0,
836 		CTL_CREATE, CTL_EOL);
837 	sysctl_createv(clog, 0, &node, NULL,
838 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
839 		CTLTYPE_INT, "min_catch",
840 		SYSCTL_DESCR("Minimal count of threads for catching"),
841 		NULL, 0, &min_catch, 0,
842 		CTL_CREATE, CTL_EOL);
843 	sysctl_createv(clog, 0, &node, NULL,
844 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
845 		CTLTYPE_INT, "timesoftints",
846 		SYSCTL_DESCR("Track CPU time for soft interrupts"),
847 		NULL, 0, &softint_timing, 0,
848 		CTL_CREATE, CTL_EOL);
849 	sysctl_createv(clog, 0, &node, NULL,
850 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
851 		CTLTYPE_INT, "kpreempt_pri",
852 		SYSCTL_DESCR("Minimum priority to trigger kernel preemption"),
853 		NULL, 0, &sched_kpreempt_pri, 0,
854 		CTL_CREATE, CTL_EOL);
855 	sysctl_createv(clog, 0, &node, NULL,
856 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
857 		CTLTYPE_INT, "upreempt_pri",
858 		SYSCTL_DESCR("Minimum priority to trigger user preemption"),
859 		NULL, 0, &sched_upreempt_pri, 0,
860 		CTL_CREATE, CTL_EOL);
861 }
862 
863 /*
864  * Debugging.
865  */
866 
867 #ifdef DDB
868 
869 void
870 sched_print_runqueue(void (*pr)(const char *, ...))
871 {
872 	runqueue_t *ci_rq;
873 	struct cpu_info *ci, *tci;
874 	struct schedstate_percpu *spc;
875 	struct lwp *l;
876 	struct proc *p;
877 	CPU_INFO_ITERATOR cii;
878 
879 	for (CPU_INFO_FOREACH(cii, ci)) {
880 		int i;
881 
882 		spc = &ci->ci_schedstate;
883 		ci_rq = spc->spc_sched_info;
884 
885 		(*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
886 		(*pr)(" pid.lid = %d.%d, r_count = %u, r_avgcount = %u, "
887 		    "maxpri = %d, mlwp = %p\n",
888 #ifdef MULTIPROCESSOR
889 		    ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
890 #else
891 		    curlwp->l_proc->p_pid, curlwp->l_lid,
892 #endif
893 		    ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority,
894 		    spc->spc_migrating);
895 		i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
896 		do {
897 			uint32_t q;
898 			q = ci_rq->r_bitmap[i];
899 			(*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
900 		} while (i--);
901 	}
902 
903 	(*pr)("   %5s %4s %4s %10s %3s %18s %4s %4s %s\n",
904 	    "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "TCI", "LRTICKS");
905 
906 	PROCLIST_FOREACH(p, &allproc) {
907 		(*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
908 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
909 			ci = l->l_cpu;
910 			tci = l->l_target_cpu;
911 			(*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %4d %u\n",
912 			    (int)l->l_lid, l->l_priority, lwp_eprio(l),
913 			    l->l_flag, l->l_stat == LSRUN ? "RQ" :
914 			    (l->l_stat == LSSLEEP ? "SQ" : "-"),
915 			    l, ci->ci_index, (tci ? tci->ci_index : -1),
916 			    (u_int)(hardclock_ticks - l->l_rticks));
917 		}
918 	}
919 }
920 
921 #endif
922