xref: /dflybsd-src/sys/kern/usched_dfly.c (revision 872a09d51adf63b4bdae6adb1d96a53f76e161e2)
1 /*
2  * Copyright (c) 2012-2017 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 
52 #include <sys/ktr.h>
53 
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56 
57 /*
58  * Priorities.  Note that with 32 run queues per scheduler each queue
59  * represents four priority levels.
60  */
61 
62 int dfly_rebalanced;
63 
64 #define MAXPRI			128
65 #define PRIMASK			(MAXPRI - 1)
66 #define PRIBASE_REALTIME	0
67 #define PRIBASE_NORMAL		MAXPRI
68 #define PRIBASE_IDLE		(MAXPRI * 2)
69 #define PRIBASE_THREAD		(MAXPRI * 3)
70 #define PRIBASE_NULL		(MAXPRI * 4)
71 
72 #define NQS	32			/* 32 run queues. */
73 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
74 #define PPQMASK	(PPQ - 1)
75 
76 /*
77  * NICE_QS	- maximum queues nice can shift the process
78  * EST_QS	- maximum queues estcpu can shift the process
79  *
80  * ESTCPUPPQ	- number of estcpu units per priority queue
81  * ESTCPUMAX	- number of estcpu units
82  *
83  * Remember that NICE runs over the whole -20 to +20 range.
84  */
85 #define NICE_QS		24	/* -20 to +20 shift in whole queues */
86 #define EST_QS		20	/* 0-MAX shift in whole queues */
87 #define ESTCPUPPQ	512
88 #define ESTCPUMAX	(ESTCPUPPQ * EST_QS)
89 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
90 
91 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
92 
93 TAILQ_HEAD(rq, lwp);
94 
95 #define lwp_priority	lwp_usdata.dfly.priority
96 #define lwp_forked	lwp_usdata.dfly.forked
97 #define lwp_rqindex	lwp_usdata.dfly.rqindex
98 #define lwp_estcpu	lwp_usdata.dfly.estcpu
99 #define lwp_estfast	lwp_usdata.dfly.estfast
100 #define lwp_uload	lwp_usdata.dfly.uload
101 #define lwp_rqtype	lwp_usdata.dfly.rqtype
102 #define lwp_qcpu	lwp_usdata.dfly.qcpu
103 #define lwp_rrcount	lwp_usdata.dfly.rrcount
104 
105 static __inline int
106 lptouload(struct lwp *lp)
107 {
108 	int uload;
109 
110 	uload = lp->lwp_estcpu / NQS;
111 	uload -= uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
112 
113 	return uload;
114 }
115 
116 /*
117  * DFly scheduler pcpu structure.  Note that the pcpu uload field must
118  * be 64-bits to avoid overflowing in the situation where more than 32768
119  * processes are on a single cpu's queue.  Since high-end systems can
120  * easily run 900,000+ processes, we have to deal with it.
121  */
122 struct usched_dfly_pcpu {
123 	struct spinlock spin;
124 	struct thread	*helper_thread;
125 	struct globaldata *gd;
126 	u_short		scancpu;
127 	short		upri;
128 	long		uload;		/* 64-bits to avoid overflow (1) */
129 	int		ucount;
130 	int		flags;
131 	struct lwp	*uschedcp;
132 	struct rq	queues[NQS];
133 	struct rq	rtqueues[NQS];
134 	struct rq	idqueues[NQS];
135 	u_int32_t	queuebits;
136 	u_int32_t	rtqueuebits;
137 	u_int32_t	idqueuebits;
138 	int		runqcount;
139 	int		cpuid;
140 	cpumask_t	cpumask;
141 	cpu_node_t	*cpunode;
142 } __cachealign;
143 
144 /*
145  * Reflecting bits in the global atomic masks allows us to avoid
146  * a certain degree of global ping-ponging.
147  */
148 #define DFLY_PCPU_RDYMASK	0x0001	/* reflect rdyprocmask */
149 #define DFLY_PCPU_CURMASK	0x0002	/* reflect curprocmask */
150 
151 typedef struct usched_dfly_pcpu	*dfly_pcpu_t;
152 
153 static void dfly_acquire_curproc(struct lwp *lp);
154 static void dfly_release_curproc(struct lwp *lp);
155 static void dfly_select_curproc(globaldata_t gd);
156 static void dfly_setrunqueue(struct lwp *lp);
157 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
158 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
159 				sysclock_t cpstamp);
160 static void dfly_recalculate_estcpu(struct lwp *lp);
161 static void dfly_resetpriority(struct lwp *lp);
162 static void dfly_forking(struct lwp *plp, struct lwp *lp);
163 static void dfly_exiting(struct lwp *lp, struct proc *);
164 static void dfly_uload_update(struct lwp *lp);
165 static void dfly_yield(struct lwp *lp);
166 static void dfly_changeqcpu_locked(struct lwp *lp,
167 				dfly_pcpu_t dd, dfly_pcpu_t rdd);
168 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
169 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit);
170 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
171 static void dfly_need_user_resched_remote(void *dummy);
172 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
173 					  struct lwp *chklp, int worst);
174 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
175 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
176 static void dfly_changedcpu(struct lwp *lp);
177 
178 struct usched usched_dfly = {
179 	{ NULL },
180 	"dfly", "Original DragonFly Scheduler",
181 	NULL,			/* default registration */
182 	NULL,			/* default deregistration */
183 	dfly_acquire_curproc,
184 	dfly_release_curproc,
185 	dfly_setrunqueue,
186 	dfly_schedulerclock,
187 	dfly_recalculate_estcpu,
188 	dfly_resetpriority,
189 	dfly_forking,
190 	dfly_exiting,
191 	dfly_uload_update,
192 	NULL,			/* setcpumask not supported */
193 	dfly_yield,
194 	dfly_changedcpu
195 };
196 
197 /*
198  * We have NQS (32) run queues per scheduling class.  For the normal
199  * class, there are 128 priorities scaled onto these 32 queues.  New
200  * processes are added to the last entry in each queue, and processes
201  * are selected for running by taking them from the head and maintaining
202  * a simple FIFO arrangement.  Realtime and Idle priority processes have
203  * and explicit 0-31 priority which maps directly onto their class queue
204  * index.  When a queue has something in it, the corresponding bit is
205  * set in the queuebits variable, allowing a single read to determine
206  * the state of all 32 queues and then a ffs() to find the first busy
207  * queue.
208  *
209  * curprocmask is used to publish cpus with assigned curprocs to the rest
210  * of the cpus.  In certain situations curprocmask may leave a bit set
211  * (e.g. a yield or a token-based yield) even though dd->uschedcp is
212  * NULL'd out temporarily).
213  */
214 					/* currently running a user process */
215 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
216 static cpumask_t dfly_rdyprocmask;	/* ready to accept a user process */
217 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
218 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
219 static struct sysctl_oid *usched_dfly_sysctl_tree;
220 static struct lock usched_dfly_config_lk = LOCK_INITIALIZER("usdfs", 0, 0);
221 
222 /* Debug info exposed through debug.* sysctl */
223 
224 static int usched_dfly_debug = -1;
225 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
226 	   &usched_dfly_debug, 0,
227 	   "Print debug information for this pid");
228 
229 static int usched_dfly_pid_debug = -1;
230 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
231 	   &usched_dfly_pid_debug, 0,
232 	   "Print KTR debug information for this pid");
233 
234 static int usched_dfly_chooser = 0;
235 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
236 	   &usched_dfly_chooser, 0,
237 	   "Print KTR debug information for this pid");
238 
239 /*
240  * WARNING!
241  *
242  * The fork bias can have a large effect on the system in the face of a
243  * make -j N or other high-forking applications.
244  *
245  * Larger values are much less invasive vs other things that
246  * might be running in the system, but can cause exec chains
247  * such as those typically generated by make to have higher
248  * latencies in the face of modest load.
249  *
250  * Lower values are more invasive but have reduced latencies
251  * for such exec chains.
252  *
253  *	make -j 10 buildkernel example, build times:
254  *
255  *	     +0	3:04
256  *	     +1 3:14	-5.2%	<-- default
257  *	     +2 3:22	-8.9%
258  *
259  * This issue occurs due to the way the scheduler affinity heuristics work.
260  * There is no way to really 'fix' the affinity heuristics because when it
261  * comes right down to it trying to instantly schedule a process on an
262  * available cpu (even if it will become unavailable a microsecond later)
263  * tends to cause processes to shift around between cpus and sockets too much
264  * and breaks the affinity.
265  *
266  * NOTE: Heavily concurrent builds typically have enough things on the pan
267  *	 that they remain time-efficient even with a higher bias.
268  */
269 static int usched_dfly_forkbias = 1;
270 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
271 	   &usched_dfly_forkbias, 0,
272 	   "Fork bias for estcpu in whole queues");
273 
274 /*
275  * Tunning usched_dfly - configurable through kern.usched_dfly.
276  *
277  * weight1 - Tries to keep threads on their current cpu.  If you
278  *	     make this value too large the scheduler will not be
279  *	     able to load-balance large loads.
280  *
281  *	     Generally set to a fairly low value, but high enough
282  *	     such that estcpu jitter doesn't move threads around.
283  *
284  * weight2 - If non-zero, detects thread pairs undergoing synchronous
285  *	     communications and tries to move them closer together.
286  *	     The weight advantages the same package and socket and
287  *	     disadvantages the same core and same cpu.
288  *
289  *	     WARNING!  Weight2 is a ridiculously sensitive parameter,
290  *	     particularly against weight4.  change the default at your
291  *	     peril.
292  *
293  * weight3 - Weighting based on the number of recently runnable threads
294  *	     on the userland scheduling queue (ignoring their loads).
295  *
296  *	     A nominal value here prevents high-priority (low-load)
297  *	     threads from accumulating on one cpu core when other
298  *	     cores are available.
299  *
300  *	     This value should be left fairly small because low-load
301  *	     high priority threads can still be mostly idle and too
302  *	     high a value will kick cpu-bound processes off the cpu
303  *	     unnecessarily.
304  *
305  * weight4 - Weighting based on availability of other logical cpus running
306  *	     less important threads (by upri) than the thread we are trying
307  *	     to schedule.
308  *
309  *	     This allows a thread to migrate to another nearby cpu if it
310  *	     is unable to run on the current cpu based on the other cpu
311  *	     being idle or running a less important (higher lwp_priority)
312  *	     thread.  This value should be large enough to override weight1,
313  *	     but not so large as to override weight2.
314  *
315  *	     This parameter generally ensures fairness at the cost of some
316  *	     performance (if set to too high).  It should generally be just
317  *	     a tad lower than weight2.
318  *
319  * weight5 - Weighting based on the relative amount of ram connected
320  *	     to the node a cpu resides on.
321  *
322  *	     This value should remain fairly low to allow assymetric
323  *	     NUMA nodes to get threads scheduled to them.  Setting a very
324  *	     high level will prevent scheduling on assymetric NUMA nodes
325  *	     with low amounts of directly-attached memory.
326  *
327  *	     Note that when testing e.g. N threads on a machine with N
328  *	     cpu cores with assymtric NUMA nodes, a non-zero value will
329  *	     cause some cpu threads on the low-priority NUMA nodes to remain
330  *	     idle even when a few process threads are doubled-up on other
331  *	     cpus.  But this is typically more ideal because it deschedules
332  *	     low-priority NUMA nodes at lighter nodes.
333  *
334  *	     Values between 50 and 200 are recommended.  Default is 50.
335  *
336  * weight6 - rdd transfer weight hysteresis.  Defaults to 0, can be increased
337  *	     to improve stabillity at the cost of more mis-schedules.
338  *
339  * ipc_smt - If enabled, advantage IPC pairing to sibling cpu threads.
340  *	     If -1, automatic when load >= 1/2 ncpus (default).
341  *
342  * ipc_same- If enabled, advantage IPC pairing to the same logical cpu.
343  *	     If -1, automatic when load >= ncpus (default).
344  *
345  * features - These flags can be set or cleared to enable or disable various
346  *	      features.
347  *
348  *	      0x01	Enable idle-cpu pulling			(default)
349  *	      0x02	Enable proactive pushing		(default)
350  *	      0x04	Enable rebalancing rover		(default)
351  *	      0x08	Enable more proactive pushing		(default)
352  *	      0x10	(unassigned)
353  *	      0x20	choose best cpu for forked process	(default)
354  *	      0x40	choose current cpu for forked process
355  *	      0x80	choose random cpu for forked process
356  *
357  *	     NOTE - The idea behind forking mechanic 0x20 is that most
358  *		    fork()ing is either followed by an exec in the child,
359  *		    or the parent wait*()s.  If the child is short-lived,
360  *		    there is effectively an IPC dependency (td_wakefromcpu
361  *		    is also set in kern_fork.c) and we want to implement
362  *		    the weight2 behavior to reduce IPIs and to reduce CPU
363  *		    cache ping-ponging.
364  */
365 __read_mostly static int usched_dfly_smt = 0;
366 __read_mostly static int usched_dfly_cache_coherent = 0;
367 __read_mostly static int usched_dfly_weight1 = 30;  /* keep thread on cpu */
368 __read_mostly static int usched_dfly_weight2 = 180; /* IPC locality */
369 __read_mostly static int usched_dfly_weight3 = 10;  /* threads on queue */
370 __read_mostly static int usched_dfly_weight4 = 120; /* availability of cores */
371 __read_mostly static int usched_dfly_weight5 = 50;  /* node attached memory */
372 __read_mostly static int usched_dfly_weight6 = 0;   /* rdd transfer weight */
373 __read_mostly static int usched_dfly_features = 0x2f;	     /* allow pulls */
374 __read_mostly static int usched_dfly_fast_resched = PPQ / 2; /* delta pri */
375 __read_mostly static int usched_dfly_swmask = ~PPQMASK;	     /* allow pulls */
376 __read_mostly static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
377 __read_mostly static int usched_dfly_decay = 8;
378 __read_mostly static int usched_dfly_ipc_smt = -1;  /* IPC auto smt pair */
379 __read_mostly static int usched_dfly_ipc_same = -1; /* IPC auto same log cpu */
380 __read_mostly static int usched_dfly_poll_ticks = 0; /* helper polling ticks */
381 __read_mostly static long usched_dfly_node_mem;
382 
383 /* KTR debug printings */
384 
385 KTR_INFO_MASTER(usched);
386 
387 #if !defined(KTR_USCHED_DFLY)
388 #define	KTR_USCHED_DFLY	KTR_ALL
389 #endif
390 
391 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
392     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
393     pid_t pid, int old_cpuid, int curr);
394 
395 /*
396  * This function is called when the kernel intends to return to userland.
397  * It is responsible for making the thread the current designated userland
398  * thread for this cpu, blocking if necessary.
399  *
400  * The kernel will not depress our LWKT priority until after we return,
401  * in case we have to shove over to another cpu.
402  *
403  * We must determine our thread's disposition before we switch away.  This
404  * is very sensitive code.
405  *
406  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
407  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
408  * occur, this function is called only under very controlled circumstances.
409  */
410 static void
411 dfly_acquire_curproc(struct lwp *lp)
412 {
413 	globaldata_t gd;
414 	dfly_pcpu_t dd;
415 	dfly_pcpu_t rdd;
416 	thread_t td;
417 	int force_resched;
418 
419 	/*
420 	 * Make sure we aren't sitting on a tsleep queue.
421 	 */
422 	td = lp->lwp_thread;
423 	crit_enter_quick(td);
424 	if (td->td_flags & TDF_TSLEEPQ)
425 		tsleep_remove(td);
426 	dfly_recalculate_estcpu(lp);
427 
428 	gd = mycpu;
429 	dd = &dfly_pcpu[gd->gd_cpuid];
430 
431 	/*
432 	 * Process any pending interrupts/ipi's, then handle reschedule
433 	 * requests.  dfly_release_curproc() will try to assign a new
434 	 * uschedcp that isn't us and otherwise NULL it out.
435 	 */
436 	force_resched = 0;
437 	if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
438 	    lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
439 		force_resched = 1;
440 	}
441 
442 	if (user_resched_wanted()) {
443 		if (dd->uschedcp == lp)
444 			force_resched = 1;
445 		clear_user_resched();
446 		dfly_release_curproc(lp);
447 	}
448 
449 	/*
450 	 * Loop until we are the current user thread.
451 	 *
452 	 * NOTE: dd spinlock not held at top of loop.
453 	 */
454 	if (dd->uschedcp == lp)
455 		lwkt_yield_quick();
456 
457 	while (dd->uschedcp != lp) {
458 		/*
459 		 * Do not do a lwkt_yield_quick() here as it will prevent
460 		 * the lwp from being placed on the dfly_bsd runqueue for
461 		 * one cycle (possibly an entire round-robin), preventing
462 		 * it from being scheduled to another cpu.
463 		 */
464 		/* lwkt_yield_quick(); */
465 
466 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
467 			kprintf(" pid %d acquire curcpu %d (force %d) ",
468 				lp->lwp_proc->p_pid, gd->gd_cpuid,
469 				force_resched);
470 
471 
472 		spin_lock(&dd->spin);
473 
474 		/* This lwp is an outcast; force reschedule. */
475 		if (__predict_false(
476 		    CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
477 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
478 			dfly_changeqcpu_locked(lp, dd, rdd);
479 			spin_unlock(&dd->spin);
480 			lwkt_deschedule(lp->lwp_thread);
481 			dfly_setrunqueue_dd(rdd, lp);
482 			lwkt_switch();
483 			gd = mycpu;
484 			dd = &dfly_pcpu[gd->gd_cpuid];
485 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
486 				kprintf("SEL-A cpu %d\n", gd->gd_cpuid);
487 			continue;
488 		}
489 
490 		/*
491 		 * We are not or are no longer the current lwp and a forced
492 		 * reschedule was requested.  Figure out the best cpu to
493 		 * run on (our current cpu will be given significant weight).
494 		 *
495 		 * Doing this on many cpus simultaneously leads to
496 		 * instability so pace the operation.
497 		 *
498 		 * (if a reschedule was not requested we want to move this
499 		 * step after the uschedcp tests).
500 		 */
501 		if (force_resched &&
502 		   (usched_dfly_features & 0x08) &&
503 		   (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
504 			if ((rdd = dfly_choose_best_queue(lp)) != dd) {
505 				dfly_changeqcpu_locked(lp, dd, rdd);
506 				spin_unlock(&dd->spin);
507 				lwkt_deschedule(lp->lwp_thread);
508 				dfly_setrunqueue_dd(rdd, lp);
509 				lwkt_switch();
510 				gd = mycpu;
511 				dd = &dfly_pcpu[gd->gd_cpuid];
512 				if (usched_dfly_debug == lp->lwp_proc->p_pid)
513 					kprintf("SEL-B cpu %d\n", gd->gd_cpuid);
514 				continue;
515 			}
516 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
517 				kprintf("(SEL-B same cpu) ");
518 		}
519 
520 		/*
521 		 * Either no reschedule was requested or the best queue was
522 		 * dd, and no current process has been selected.  We can
523 		 * trivially become the current lwp on the current cpu.
524 		 */
525 		if (dd->uschedcp == NULL) {
526 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
527 					 TDF_MP_DIDYIELD);
528 			if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
529 				ATOMIC_CPUMASK_ORBIT(dfly_curprocmask,
530 						     gd->gd_cpuid);
531 				dd->flags |= DFLY_PCPU_CURMASK;
532 			}
533 			dd->uschedcp = lp;
534 			dd->upri = lp->lwp_priority;
535 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
536 			spin_unlock(&dd->spin);
537 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
538 				kprintf("SEL-C cpu %d (same cpu)\n",
539 					gd->gd_cpuid);
540 			break;
541 		}
542 
543 		/*
544 		 * Can we steal the current designated user thread?
545 		 *
546 		 * If we do the other thread will stall when it tries to
547 		 * return to userland, possibly rescheduling elsewhere.
548 		 * Set need_user_resched() to get the thread to cycle soonest.
549 		 *
550 		 * It is important to do a masked test to avoid the edge
551 		 * case where two near-equal-priority threads are constantly
552 		 * interrupting each other.
553 		 *
554 		 * In the exact match case another thread has already gained
555 		 * uschedcp and lowered its priority, if we steal it the
556 		 * other thread will stay stuck on the LWKT runq and not
557 		 * push to another cpu.  So don't steal on equal-priority even
558 		 * though it might appear to be more beneficial due to not
559 		 * having to switch back to the other thread's context.
560 		 *
561 		 * usched_dfly_fast_resched requires that two threads be
562 		 * significantly far apart in priority in order to interrupt.
563 		 *
564 		 * If better but not sufficiently far apart, the current
565 		 * uschedcp will be interrupted at the next scheduler clock.
566 		 */
567 		if (dd->uschedcp &&
568 		   (dd->upri & ~PPQMASK) >
569 		   (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
570 			dd->uschedcp = lp;
571 			dd->upri = lp->lwp_priority;
572 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
573 			need_user_resched();
574 			spin_unlock(&dd->spin);
575 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
576 				kprintf("SEL-D cpu %d (same cpu)\n",
577 					gd->gd_cpuid);
578 			break;
579 		}
580 
581 		/*
582 		 * Requeue us at lwp_priority, which recalculate_estcpu()
583 		 * set for us.  Reset the rrcount to force placement
584 		 * at the end of the queue.
585 		 *
586 		 * We used to move ourselves to the worst queue, but
587 		 * this creates a fairly serious priority inversion
588 		 * problem.
589 		 */
590 		if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
591 			spin_unlock(&dd->spin);
592 			lp->lwp_rrcount = usched_dfly_rrinterval;
593 			lp->lwp_rqindex = (lp->lwp_priority & PRIMASK) / PPQ;
594 
595 			lwkt_deschedule(lp->lwp_thread);
596 			dfly_setrunqueue_dd(dd, lp);
597 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
598 					 TDF_MP_DIDYIELD);
599 			lwkt_switch();
600 			gd = mycpu;
601 			dd = &dfly_pcpu[gd->gd_cpuid];
602 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
603 				kprintf("SEL-E cpu %d (requeue)\n",
604 					gd->gd_cpuid);
605 			continue;
606 		}
607 
608 		/*
609 		 * We are not the current lwp, figure out the best cpu
610 		 * to run on (our current cpu will be given significant
611 		 * weight).  Loop on cpu change.
612 		 */
613 		if ((usched_dfly_features & 0x02) &&
614 		    force_resched == 0 &&
615 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
616 			dfly_changeqcpu_locked(lp, dd, rdd);
617 			spin_unlock(&dd->spin);
618 			lwkt_deschedule(lp->lwp_thread);
619 			dfly_setrunqueue_dd(rdd, lp);
620 			lwkt_switch();
621 			gd = mycpu;
622 			dd = &dfly_pcpu[gd->gd_cpuid];
623 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
624 				kprintf("SEL-F cpu %d (requeue new cpu)\n",
625 					gd->gd_cpuid);
626 			continue;
627 		}
628 
629 		/*
630 		 * We cannot become the current lwp, place the lp on the
631 		 * run-queue of this or another cpu and deschedule ourselves.
632 		 *
633 		 * When we are reactivated we will have another chance.
634 		 *
635 		 * Reload after a switch or setrunqueue/switch possibly
636 		 * moved us to another cpu.
637 		 */
638 		spin_unlock(&dd->spin);
639 		lwkt_deschedule(lp->lwp_thread);
640 		dfly_setrunqueue_dd(dd, lp);
641 		lwkt_switch();
642 		gd = mycpu;
643 		dd = &dfly_pcpu[gd->gd_cpuid];
644 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
645 			kprintf("SEL-G cpu %d (fallback setrunq)\n",
646 				gd->gd_cpuid);
647 	}
648 	if (usched_dfly_debug == lp->lwp_proc->p_pid)
649 		kprintf(" pid %d acquire DONE cpu %d\n",
650 			lp->lwp_proc->p_pid, gd->gd_cpuid);
651 
652 	/*
653 	 * Make sure upri is synchronized, then yield to LWKT threads as
654 	 * needed before returning.  This could result in another reschedule.
655 	 * XXX
656 	 */
657 	crit_exit_quick(td);
658 
659 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
660 }
661 
662 /*
663  * DFLY_RELEASE_CURPROC
664  *
665  * This routine detaches the current thread from the userland scheduler,
666  * usually because the thread needs to run or block in the kernel (at
667  * kernel priority) for a while.
668  *
669  * This routine is also responsible for selecting a new thread to
670  * make the current thread.
671  *
672  * NOTE: This implementation differs from the dummy example in that
673  * dfly_select_curproc() is able to select the current process, whereas
674  * dummy_select_curproc() is not able to select the current process.
675  * This means we have to NULL out uschedcp.
676  *
677  * Additionally, note that we may already be on a run queue if releasing
678  * via the lwkt_switch() in dfly_setrunqueue().
679  */
680 static void
681 dfly_release_curproc(struct lwp *lp)
682 {
683 	globaldata_t gd = mycpu;
684 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
685 
686 	/*
687 	 * Make sure td_wakefromcpu is defaulted.  This will be overwritten
688 	 * by wakeup().
689 	 */
690 	if (dd->uschedcp == lp) {
691 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
692 		spin_lock(&dd->spin);
693 		if (dd->uschedcp == lp) {
694 			dd->uschedcp = NULL;	/* don't let lp be selected */
695 			dd->upri = PRIBASE_NULL;
696 
697 			/*
698 			 * We're just going to set it again, avoid the global
699 			 * cache line ping-pong.
700 			 */
701 			if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0) {
702 				if (dd->flags & DFLY_PCPU_CURMASK) {
703 					ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask,
704 							       gd->gd_cpuid);
705 					dd->flags &= ~DFLY_PCPU_CURMASK;
706 				}
707 			}
708 			spin_unlock(&dd->spin);
709 			dfly_select_curproc(gd);
710 		} else {
711 			spin_unlock(&dd->spin);
712 		}
713 	}
714 }
715 
716 /*
717  * DFLY_SELECT_CURPROC
718  *
719  * Select a new current process for this cpu and clear any pending user
720  * reschedule request.  The cpu currently has no current process.
721  *
722  * This routine is also responsible for equal-priority round-robining,
723  * typically triggered from dfly_schedulerclock().  In our dummy example
724  * all the 'user' threads are LWKT scheduled all at once and we just
725  * call lwkt_switch().
726  *
727  * The calling process is not on the queue and cannot be selected.
728  */
729 static
730 void
731 dfly_select_curproc(globaldata_t gd)
732 {
733 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
734 	struct lwp *nlp;
735 	int cpuid = gd->gd_cpuid;
736 
737 	crit_enter_gd(gd);
738 
739 	spin_lock(&dd->spin);
740 	nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
741 
742 	if (nlp) {
743 		if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
744 			ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
745 			dd->flags |= DFLY_PCPU_CURMASK;
746 		}
747 		dd->upri = nlp->lwp_priority;
748 		dd->uschedcp = nlp;
749 #if 0
750 		dd->rrcount = 0;		/* reset round robin */
751 #endif
752 		spin_unlock(&dd->spin);
753 		lwkt_acquire(nlp->lwp_thread);
754 		lwkt_schedule(nlp->lwp_thread);
755 	} else {
756 		spin_unlock(&dd->spin);
757 	}
758 	crit_exit_gd(gd);
759 }
760 
761 /*
762  * Place the specified lwp on the user scheduler's run queue.  This routine
763  * must be called with the thread descheduled.  The lwp must be runnable.
764  * It must not be possible for anyone else to explicitly schedule this thread.
765  *
766  * The thread may be the current thread as a special case.
767  */
768 static void
769 dfly_setrunqueue(struct lwp *lp)
770 {
771 	dfly_pcpu_t dd;
772 	dfly_pcpu_t rdd;
773 
774 	/*
775 	 * First validate the process LWKT state.
776 	 */
777 	KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
778 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
779 	    ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
780 	     lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
781 	KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
782 
783 	/*
784 	 * NOTE: dd/rdd do not necessarily represent the current cpu.
785 	 *	 Instead they may represent the cpu the thread was last
786 	 *	 scheduled on or inherited by its parent.
787 	 */
788 	dd = &dfly_pcpu[lp->lwp_qcpu];
789 	rdd = dd;
790 
791 	/*
792 	 * This process is not supposed to be scheduled anywhere or assigned
793 	 * as the current process anywhere.  Assert the condition.
794 	 */
795 	KKASSERT(rdd->uschedcp != lp);
796 
797 	/*
798 	 * Ok, we have to setrunqueue some target cpu and request a reschedule
799 	 * if necessary.
800 	 *
801 	 * We have to choose the best target cpu.  It might not be the current
802 	 * target even if the current cpu has no running user thread (for
803 	 * example, because the current cpu might be a hyperthread and its
804 	 * sibling has a thread assigned).
805 	 *
806 	 * If we just forked it is most optimal to run the child on the same
807 	 * cpu just in case the parent decides to wait for it (thus getting
808 	 * off that cpu).  As long as there is nothing else runnable on the
809 	 * cpu, that is.  If we did this unconditionally a parent forking
810 	 * multiple children before waiting (e.g. make -j N) leaves other
811 	 * cpus idle that could be working.
812 	 */
813 	if (lp->lwp_forked) {
814 		lp->lwp_forked = 0;
815 		if (usched_dfly_features & 0x20)
816 			rdd = dfly_choose_best_queue(lp);
817 		else if (usched_dfly_features & 0x40)
818 			rdd = &dfly_pcpu[lp->lwp_qcpu];
819 		else if (usched_dfly_features & 0x80)
820 			rdd = dfly_choose_queue_simple(rdd, lp);
821 		else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
822 			rdd = dfly_choose_best_queue(lp);
823 		else
824 			rdd = &dfly_pcpu[lp->lwp_qcpu];
825 	} else {
826 		rdd = dfly_choose_best_queue(lp);
827 		/* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
828 	}
829 	if (lp->lwp_qcpu != rdd->cpuid) {
830 		spin_lock(&dd->spin);
831 		dfly_changeqcpu_locked(lp, dd, rdd);
832 		spin_unlock(&dd->spin);
833 	}
834 	dfly_setrunqueue_dd(rdd, lp);
835 }
836 
837 /*
838  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
839  * spin-locked on-call.  rdd does not have to be.
840  */
841 static void
842 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
843 {
844 	if (lp->lwp_qcpu != rdd->cpuid) {
845 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
846 			atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
847 			atomic_add_long(&dd->uload, -lp->lwp_uload);
848 			atomic_add_int(&dd->ucount, -1);
849 		}
850 		lp->lwp_qcpu = rdd->cpuid;
851 	}
852 }
853 
854 /*
855  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
856  * also performs all necessary ancillary notification actions.
857  */
858 static void
859 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
860 {
861 	globaldata_t rgd;
862 
863 	/*
864 	 * We might be moving the lp to another cpu's run queue, and once
865 	 * on the runqueue (even if it is our cpu's), another cpu can rip
866 	 * it away from us.
867 	 *
868 	 * TDF_MIGRATING might already be set if this is part of a
869 	 * remrunqueue+setrunqueue sequence.
870 	 */
871 	if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
872 		lwkt_giveaway(lp->lwp_thread);
873 
874 	rgd = rdd->gd;
875 
876 	/*
877 	 * We lose control of the lp the moment we release the spinlock
878 	 * after having placed it on the queue.  i.e. another cpu could pick
879 	 * it up, or it could exit, or its priority could be further
880 	 * adjusted, or something like that.
881 	 *
882 	 * WARNING! rdd can point to a foreign cpu!
883 	 */
884 	spin_lock(&rdd->spin);
885 	dfly_setrunqueue_locked(rdd, lp);
886 
887 	/*
888 	 * Potentially interrupt the currently-running thread
889 	 */
890 	if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
891 		/*
892 		 * Currently running thread is better or same, do not
893 		 * interrupt.
894 		 */
895 		spin_unlock(&rdd->spin);
896 	} else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
897 		   usched_dfly_fast_resched) {
898 		/*
899 		 * Currently running thread is not better, but not so bad
900 		 * that we need to interrupt it.  Let it run for one more
901 		 * scheduler tick.
902 		 */
903 		if (rdd->uschedcp &&
904 		    rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
905 			rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
906 		}
907 		spin_unlock(&rdd->spin);
908 	} else if (rgd == mycpu) {
909 		/*
910 		 * We should interrupt the currently running thread, which
911 		 * is on the current cpu.  However, if DIDYIELD is set we
912 		 * round-robin unconditionally and do not interrupt it.
913 		 */
914 		spin_unlock(&rdd->spin);
915 		if (rdd->uschedcp == NULL)
916 			wakeup_mycpu(rdd->helper_thread); /* XXX */
917 		if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
918 			need_user_resched();
919 	} else {
920 		/*
921 		 * We should interrupt the currently running thread, which
922 		 * is on a different cpu.
923 		 */
924 		spin_unlock(&rdd->spin);
925 		lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
926 	}
927 }
928 
929 /*
930  * This routine is called from a systimer IPI.  It MUST be MP-safe and
931  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
932  * each cpu.
933  */
934 static
935 void
936 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
937 {
938 	globaldata_t gd = mycpu;
939 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
940 
941 	/*
942 	 * Spinlocks also hold a critical section so there should not be
943 	 * any active.
944 	 */
945 	KKASSERT(gd->gd_spinlocks == 0 || dumping);
946 
947 	/*
948 	 * If lp is NULL we might be contended and lwkt_switch() may have
949 	 * cycled into the idle thread.  Apply the tick to the current
950 	 * process on this cpu if it is contended.
951 	 */
952 	if (gd->gd_curthread == &gd->gd_idlethread) {
953 		lp = dd->uschedcp;
954 		if (lp && (lp->lwp_thread == NULL ||
955 			   lp->lwp_thread->td_contended == 0)) {
956 			lp = NULL;
957 		}
958 	}
959 
960 	/*
961 	 * Dock thread for tick
962 	 */
963 	if (lp) {
964 		/*
965 		 * Do we need to round-robin?  We round-robin 10 times a
966 		 * second.  This should only occur for cpu-bound batch
967 		 * processes.
968 		 */
969 		if (++lp->lwp_rrcount >= usched_dfly_rrinterval)
970 			need_user_resched();
971 
972 		/*
973 		 * Adjust estcpu upward using a real time equivalent
974 		 * calculation, and recalculate lp's priority.  Estcpu
975 		 * is increased such that it will cap-out over a period
976 		 * of one second.
977 		 */
978 		lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
979 					   ESTCPUMAX / ESTCPUFREQ + 1);
980 		dfly_resetpriority(lp);
981 	}
982 
983 	/*
984 	 * Rebalance two cpus every 8 ticks, pulling the worst thread
985 	 * from the worst cpu's queue into a rotating cpu number.
986 	 * Also require that the moving of the highest-load thread
987 	 * from rdd to dd does not cause the uload to cross over.
988 	 *
989 	 * This mechanic is needed because the push algorithms can
990 	 * steady-state in an non-optimal configuration.  We need to mix it
991 	 * up a little, even if it means breaking up a paired thread, so
992 	 * the push algorithms can rebalance the degenerate conditions.
993 	 * This portion of the algorithm exists to ensure stability at the
994 	 * selected weightings.
995 	 *
996 	 * Because we might be breaking up optimal conditions we do not want
997 	 * to execute this too quickly, hence we only rebalance approximately
998 	 * ~7-8 times per second.  The push's, on the otherhand, are capable
999 	 * moving threads to other cpus at a much higher rate.
1000 	 *
1001 	 * We choose the most heavily loaded thread from the worst queue
1002 	 * in order to ensure that multiple heavy-weight threads on the same
1003 	 * queue get broken up, and also because these threads are the most
1004 	 * likely to be able to remain in place.  Hopefully then any pairings,
1005 	 * if applicable, migrate to where these threads are.
1006 	 */
1007 	if ((usched_dfly_features & 0x04) &&
1008 	    ((u_int)sched_ticks & 7) == 0 &&
1009 	    (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
1010 		/*
1011 		 * Our cpu is up.
1012 		 */
1013 		struct lwp *nlp;
1014 		dfly_pcpu_t rdd;
1015 
1016 		rdd = dfly_choose_worst_queue(dd, 1);
1017 		if (rdd && dd->uload + usched_dfly_weight6 / 2 < rdd->uload) {
1018 			spin_lock(&dd->spin);
1019 			if (spin_trylock(&rdd->spin)) {
1020 				nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
1021 				spin_unlock(&rdd->spin);
1022 				if (nlp == NULL)
1023 					spin_unlock(&dd->spin);
1024 			} else {
1025 				spin_unlock(&dd->spin);
1026 				nlp = NULL;
1027 			}
1028 		} else {
1029 			nlp = NULL;
1030 		}
1031 		/* dd->spin held if nlp != NULL */
1032 
1033 		/*
1034 		 * Either schedule it or add it to our queue.
1035 		 */
1036 		if (nlp &&
1037 		    (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
1038 			if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
1039 				ATOMIC_CPUMASK_ORMASK(dfly_curprocmask,
1040 						      dd->cpumask);
1041 				dd->flags |= DFLY_PCPU_CURMASK;
1042 			}
1043 			dd->upri = nlp->lwp_priority;
1044 			dd->uschedcp = nlp;
1045 #if 0
1046 			dd->rrcount = 0;	/* reset round robin */
1047 #endif
1048 			spin_unlock(&dd->spin);
1049 			lwkt_acquire(nlp->lwp_thread);
1050 			lwkt_schedule(nlp->lwp_thread);
1051 		} else if (nlp) {
1052 			dfly_setrunqueue_locked(dd, nlp);
1053 			spin_unlock(&dd->spin);
1054 		}
1055 	}
1056 }
1057 
1058 /*
1059  * Called from acquire and from kern_synch's one-second timer (one of the
1060  * callout helper threads) with a critical section held.
1061  *
1062  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
1063  * overall system load.
1064  *
1065  * Note that no recalculation occurs for a process which sleeps and wakes
1066  * up in the same tick.  That is, a system doing thousands of context
1067  * switches per second will still only do serious estcpu calculations
1068  * ESTCPUFREQ times per second.
1069  */
1070 static
1071 void
1072 dfly_recalculate_estcpu(struct lwp *lp)
1073 {
1074 	globaldata_t gd = mycpu;
1075 	sysclock_t cpbase;
1076 	sysclock_t ttlticks;
1077 	int estcpu;
1078 	int decay_factor;
1079 	int ucount;
1080 
1081 	/*
1082 	 * We have to subtract periodic to get the last schedclock
1083 	 * timeout time, otherwise we would get the upcoming timeout.
1084 	 * Keep in mind that a process can migrate between cpus and
1085 	 * while the scheduler clock should be very close, boundary
1086 	 * conditions could lead to a small negative delta.
1087 	 */
1088 	cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1089 
1090 	if (lp->lwp_slptime > 1) {
1091 		/*
1092 		 * Too much time has passed, do a coarse correction.
1093 		 */
1094 		lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1095 		dfly_resetpriority(lp);
1096 		lp->lwp_cpbase = cpbase;
1097 		lp->lwp_cpticks = 0;
1098 		lp->lwp_estfast = 0;
1099 	} else if (lp->lwp_cpbase != cpbase) {
1100 		/*
1101 		 * Adjust estcpu if we are in a different tick.  Don't waste
1102 		 * time if we are in the same tick.
1103 		 *
1104 		 * First calculate the number of ticks in the measurement
1105 		 * interval.  The ttlticks calculation can wind up 0 due to
1106 		 * a bug in the handling of lwp_slptime  (as yet not found),
1107 		 * so make sure we do not get a divide by 0 panic.
1108 		 */
1109 		ttlticks = (cpbase - lp->lwp_cpbase) /
1110 			   gd->gd_schedclock.periodic;
1111 		if ((ssysclock_t)ttlticks < 0) {
1112 			ttlticks = 0;
1113 			lp->lwp_cpbase = cpbase;
1114 		}
1115 		if (ttlticks < 4)
1116 			return;
1117 		updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1118 
1119 		/*
1120 		 * Calculate instant estcpu based percentage of (one) cpu
1121 		 * used and exponentially average it into the current
1122 		 * lwp_estcpu.
1123 		 */
1124 		ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
1125 		estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
1126 
1127 		/*
1128 		 * The higher ttlticks gets, the more meaning the calculation
1129 		 * has and the smaller our decay_factor in the exponential
1130 		 * average.
1131 		 *
1132 		 * The uload calculation has been removed because it actually
1133 		 * makes things worse, causing processes which use less cpu
1134 		 * (such as a browser) to be pumped up and treated the same
1135 		 * as a cpu-bound process (such as a make).  The same effect
1136 		 * can occur with sufficient load without the uload
1137 		 * calculation, but occurs less quickly and takes more load.
1138 		 * In addition, the less cpu a process uses the smaller the
1139 		 * effect of the overload.
1140 		 */
1141 		if (ttlticks >= hz)
1142 			decay_factor = 1;
1143 		else
1144 			decay_factor = hz - ttlticks;
1145 
1146 		lp->lwp_estcpu = ESTCPULIM(
1147 				(lp->lwp_estcpu * ttlticks + estcpu) /
1148 				(ttlticks + 1));
1149 		dfly_resetpriority(lp);
1150 		lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1151 		lp->lwp_cpticks = 0;
1152 	}
1153 }
1154 
1155 /*
1156  * Compute the priority of a process when running in user mode.
1157  * Arrange to reschedule if the resulting priority is better
1158  * than that of the current process.
1159  *
1160  * This routine may be called with any process.
1161  *
1162  * This routine is called by fork1() for initial setup with the process of
1163  * the run queue, and also may be called normally with the process on or
1164  * off the run queue.
1165  */
1166 static void
1167 dfly_resetpriority(struct lwp *lp)
1168 {
1169 	dfly_pcpu_t rdd;
1170 	int newpriority;
1171 	u_short newrqtype;
1172 	int rcpu;
1173 	int checkpri;
1174 	int estcpu;
1175 	int delta_uload;
1176 
1177 	crit_enter();
1178 
1179 	/*
1180 	 * Lock the scheduler (lp) belongs to.  This can be on a different
1181 	 * cpu.  Handle races.  This loop breaks out with the appropriate
1182 	 * rdd locked.
1183 	 */
1184 	for (;;) {
1185 		rcpu = lp->lwp_qcpu;
1186 		cpu_ccfence();
1187 		rdd = &dfly_pcpu[rcpu];
1188 		spin_lock(&rdd->spin);
1189 		if (rcpu == lp->lwp_qcpu)
1190 			break;
1191 		spin_unlock(&rdd->spin);
1192 	}
1193 
1194 	/*
1195 	 * Calculate the new priority and queue type
1196 	 */
1197 	newrqtype = lp->lwp_rtprio.type;
1198 
1199 	switch(newrqtype) {
1200 	case RTP_PRIO_REALTIME:
1201 	case RTP_PRIO_FIFO:
1202 		newpriority = PRIBASE_REALTIME +
1203 			     (lp->lwp_rtprio.prio & PRIMASK);
1204 		break;
1205 	case RTP_PRIO_NORMAL:
1206 		/*
1207 		 * Calculate the new priority.
1208 		 *
1209 		 * nice contributes up to NICE_QS queues (typ 32 - full range)
1210 		 * estcpu contributes up to EST_QS queues (typ 24)
1211 		 *
1212 		 * A nice +20 process receives 1/10 cpu vs nice+0.  Niced
1213 		 * process more than 20 apart may receive no cpu, so cpu
1214 		 * bound nice -20 can prevent a nice +5 from getting any
1215 		 * cpu.  A nice+0, being in the middle, always gets some cpu
1216 		 * no matter what.
1217 		 */
1218 		estcpu = lp->lwp_estcpu;
1219 		newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1220 			      (NICE_QS * PPQ) / PRIO_RANGE;
1221 		newpriority += estcpu * PPQ / ESTCPUPPQ;
1222 		if (newpriority < 0)
1223 			newpriority = 0;
1224 		if (newpriority >= MAXPRI)
1225 			newpriority = MAXPRI - 1;
1226 		newpriority += PRIBASE_NORMAL;
1227 		break;
1228 	case RTP_PRIO_IDLE:
1229 		newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1230 		break;
1231 	case RTP_PRIO_THREAD:
1232 		newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1233 		break;
1234 	default:
1235 		panic("Bad RTP_PRIO %d", newrqtype);
1236 		/* NOT REACHED */
1237 	}
1238 
1239 	/*
1240 	 * The LWKT scheduler doesn't dive usched structures, give it a hint
1241 	 * on the relative priority of user threads running in the kernel.
1242 	 * The LWKT scheduler will always ensure that a user thread running
1243 	 * in the kernel will get cpu some time, regardless of its upri,
1244 	 * but can decide not to instantly switch from one kernel or user
1245 	 * mode user thread to a kernel-mode user thread when it has a less
1246 	 * desireable user priority.
1247 	 *
1248 	 * td_upri has normal sense (higher values are more desireable), so
1249 	 * negate it (this is a different field lp->lwp_priority)
1250 	 */
1251 	lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1252 
1253 	/*
1254 	 * The newpriority incorporates the queue type so do a simple masked
1255 	 * check to determine if the process has moved to another queue.  If
1256 	 * it has, and it is currently on a run queue, then move it.
1257 	 *
1258 	 * Since uload is ~PPQMASK masked, no modifications are necessary if
1259 	 * we end up in the same run queue.
1260 	 *
1261 	 * Reset rrcount if moving to a higher-priority queue, otherwise
1262 	 * retain rrcount.
1263 	 */
1264 	if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1265 		if (lp->lwp_priority < newpriority)
1266 			lp->lwp_rrcount = 0;
1267 		if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1268 			dfly_remrunqueue_locked(rdd, lp);
1269 			lp->lwp_priority = newpriority;
1270 			lp->lwp_rqtype = newrqtype;
1271 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1272 			dfly_setrunqueue_locked(rdd, lp);
1273 			checkpri = 1;
1274 		} else {
1275 			lp->lwp_priority = newpriority;
1276 			lp->lwp_rqtype = newrqtype;
1277 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1278 			checkpri = 0;
1279 		}
1280 	} else {
1281 		/*
1282 		 * In the same PPQ, uload cannot change.
1283 		 */
1284 		lp->lwp_priority = newpriority;
1285 		checkpri = 1;
1286 		rcpu = -1;
1287 	}
1288 
1289 	/*
1290 	 * Adjust effective load.
1291 	 *
1292 	 * Calculate load then scale up or down geometrically based on p_nice.
1293 	 * Processes niced up (positive) are less important, and processes
1294 	 * niced downard (negative) are more important.  The higher the uload,
1295 	 * the more important the thread.
1296 	 */
1297 	/* 0-511, 0-100% cpu */
1298 	delta_uload = lptouload(lp);
1299 	delta_uload -= lp->lwp_uload;
1300 	if (lp->lwp_uload + delta_uload < -32767) {
1301 		delta_uload = -32768 - lp->lwp_uload;
1302 	} else if (lp->lwp_uload + delta_uload > 32767) {
1303 		delta_uload = 32767 - lp->lwp_uload;
1304 	}
1305 	lp->lwp_uload += delta_uload;
1306 	if (lp->lwp_mpflags & LWP_MP_ULOAD)
1307 		atomic_add_long(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1308 
1309 	/*
1310 	 * Determine if we need to reschedule the target cpu.  This only
1311 	 * occurs if the LWP is already on a scheduler queue, which means
1312 	 * that idle cpu notification has already occured.  At most we
1313 	 * need only issue a need_user_resched() on the appropriate cpu.
1314 	 *
1315 	 * The LWP may be owned by a CPU different from the current one,
1316 	 * in which case dd->uschedcp may be modified without an MP lock
1317 	 * or a spinlock held.  The worst that happens is that the code
1318 	 * below causes a spurious need_user_resched() on the target CPU
1319 	 * and dd->pri to be wrong for a short period of time, both of
1320 	 * which are harmless.
1321 	 *
1322 	 * If checkpri is 0 we are adjusting the priority of the current
1323 	 * process, possibly higher (less desireable), so ignore the upri
1324 	 * check which will fail in that case.
1325 	 */
1326 	if (rcpu >= 0) {
1327 		if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1328 		    (checkpri == 0 ||
1329 		     (rdd->upri & ~PRIMASK) >
1330 		     (lp->lwp_priority & ~PRIMASK))) {
1331 			if (rcpu == mycpu->gd_cpuid) {
1332 				spin_unlock(&rdd->spin);
1333 				need_user_resched();
1334 			} else {
1335 				spin_unlock(&rdd->spin);
1336 				lwkt_send_ipiq(globaldata_find(rcpu),
1337 					       dfly_need_user_resched_remote,
1338 					       NULL);
1339 			}
1340 		} else {
1341 			spin_unlock(&rdd->spin);
1342 		}
1343 	} else {
1344 		spin_unlock(&rdd->spin);
1345 	}
1346 	crit_exit();
1347 }
1348 
1349 static
1350 void
1351 dfly_yield(struct lwp *lp)
1352 {
1353 	if (lp->lwp_qcpu != mycpu->gd_cpuid)
1354 		return;
1355 	KKASSERT(lp == curthread->td_lwp);
1356 
1357 	/*
1358 	 * Don't set need_user_resched() or mess with rrcount or anything.
1359 	 * the TDF flag will override everything as long as we release.
1360 	 */
1361 	atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1362 	dfly_release_curproc(lp);
1363 }
1364 
1365 /*
1366  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1367  * are used for iterations and the kernel returns to the original cpu before
1368  * returning and this is not needed.  However, if the kernel migrates a
1369  * thread to another cpu and wants to leave it there, it has to call this
1370  * scheduler helper.
1371  *
1372  * Note that the lwkt_migratecpu() function also released the thread, so
1373  * we don't have to worry about that.
1374  */
1375 static
1376 void
1377 dfly_changedcpu(struct lwp *lp)
1378 {
1379 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1380 	dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1381 
1382 	if (dd != rdd) {
1383 		spin_lock(&dd->spin);
1384 		dfly_changeqcpu_locked(lp, dd, rdd);
1385 		spin_unlock(&dd->spin);
1386 	}
1387 }
1388 
1389 /*
1390  * Called from fork1() when a new child process is being created.
1391  *
1392  * Give the child process an initial estcpu that is more batch then
1393  * its parent and dock the parent for the fork (but do not
1394  * reschedule the parent).
1395  *
1396  * fast
1397  *
1398  * XXX lwp should be "spawning" instead of "forking"
1399  */
1400 static void
1401 dfly_forking(struct lwp *plp, struct lwp *lp)
1402 {
1403 	int estcpu;
1404 
1405 	/*
1406 	 * Put the child 4 queue slots (out of 32) higher than the parent
1407 	 * (less desireable than the parent).
1408 	 */
1409 	lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1410 				   ESTCPUPPQ * usched_dfly_forkbias);
1411 	lp->lwp_forked = 1;
1412 	lp->lwp_estfast = 0;
1413 
1414 	/*
1415 	 * Even though the lp will be scheduled specially the first time
1416 	 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1417 	 * to avoid favoring a fixed cpu.
1418 	 */
1419 #if 0
1420 	static uint16_t save_cpu;
1421 	lp->lwp_qcpu = ++save_cpu % ncpus;
1422 #else
1423 	lp->lwp_qcpu = plp->lwp_qcpu;
1424 	if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1425 		lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1426 #endif
1427 
1428 	/*
1429 	 * Dock the parent a cost for the fork, protecting us from fork
1430 	 * bombs.  If the parent is forking quickly this makes both the
1431 	 * parent and child more batchy.
1432 	 */
1433 	estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1434 	plp->lwp_estcpu = ESTCPULIM(estcpu);
1435 }
1436 
1437 /*
1438  * Called when a lwp is being removed from this scheduler, typically
1439  * during lwp_exit().  We have to clean out any ULOAD accounting before
1440  * we can let the lp go.  The dd->spin lock is not needed for uload
1441  * updates.
1442  *
1443  * Scheduler dequeueing has already occurred, no further action in that
1444  * regard is needed.
1445  */
1446 static void
1447 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1448 {
1449 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1450 
1451 	if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1452 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1453 		atomic_add_long(&dd->uload, -lp->lwp_uload);
1454 		atomic_add_int(&dd->ucount, -1);
1455 	}
1456 }
1457 
1458 /*
1459  * This function cannot block in any way, but spinlocks are ok.
1460  *
1461  * Update the uload based on the state of the thread (whether it is going
1462  * to sleep or running again).  The uload is meant to be a longer-term
1463  * load and not an instantanious load.
1464  */
1465 static void
1466 dfly_uload_update(struct lwp *lp)
1467 {
1468 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1469 
1470 	if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1471 		if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1472 			spin_lock(&dd->spin);
1473 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1474 				atomic_set_int(&lp->lwp_mpflags,
1475 					       LWP_MP_ULOAD);
1476 				atomic_add_long(&dd->uload, lp->lwp_uload);
1477 				atomic_add_int(&dd->ucount, 1);
1478 			}
1479 			spin_unlock(&dd->spin);
1480 		}
1481 	} else if (lp->lwp_slptime > 0) {
1482 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1483 			spin_lock(&dd->spin);
1484 			if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1485 				atomic_clear_int(&lp->lwp_mpflags,
1486 						 LWP_MP_ULOAD);
1487 				atomic_add_long(&dd->uload, -lp->lwp_uload);
1488 				atomic_add_int(&dd->ucount, -1);
1489 			}
1490 			spin_unlock(&dd->spin);
1491 		}
1492 	}
1493 }
1494 
1495 /*
1496  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1497  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1498  * has a better or equal priority then the process that would otherwise be
1499  * chosen, NULL is returned.
1500  *
1501  * Until we fix the RUNQ code the chklp test has to be strict or we may
1502  * bounce between processes trying to acquire the current process designation.
1503  *
1504  * Must be called with rdd->spin locked.  The spinlock is left intact through
1505  * the entire routine.  dd->spin does not have to be locked.
1506  *
1507  * If worst is non-zero this function finds the worst thread instead of the
1508  * best thread (used by the schedulerclock-based rover).
1509  */
1510 static
1511 struct lwp *
1512 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1513 		       struct lwp *chklp, int worst)
1514 {
1515 	struct lwp *lp;
1516 	struct rq *q;
1517 	u_int32_t *which;
1518 	u_int32_t pri;
1519 	u_int32_t rtqbits;
1520 	u_int32_t tsqbits;
1521 	u_int32_t idqbits;
1522 
1523 	/*
1524 	 * Select best or worst process.  Once selected, clear the bit
1525 	 * in our local variable (idqbits, tsqbits, or rtqbits) just
1526 	 * in case we have to loop.
1527 	 */
1528 	rtqbits = rdd->rtqueuebits;
1529 	tsqbits = rdd->queuebits;
1530 	idqbits = rdd->idqueuebits;
1531 
1532 loopfar:
1533 	if (worst) {
1534 		if (idqbits) {
1535 			pri = bsrl(idqbits);
1536 			idqbits &= ~(1U << pri);
1537 			q = &rdd->idqueues[pri];
1538 			which = &rdd->idqueuebits;
1539 		} else if (tsqbits) {
1540 			pri = bsrl(tsqbits);
1541 			tsqbits &= ~(1U << pri);
1542 			q = &rdd->queues[pri];
1543 			which = &rdd->queuebits;
1544 		} else if (rtqbits) {
1545 			pri = bsrl(rtqbits);
1546 			rtqbits &= ~(1U << pri);
1547 			q = &rdd->rtqueues[pri];
1548 			which = &rdd->rtqueuebits;
1549 		} else {
1550 			return (NULL);
1551 		}
1552 		lp = TAILQ_LAST(q, rq);
1553 	} else {
1554 		if (rtqbits) {
1555 			pri = bsfl(rtqbits);
1556 			rtqbits &= ~(1U << pri);
1557 			q = &rdd->rtqueues[pri];
1558 			which = &rdd->rtqueuebits;
1559 		} else if (tsqbits) {
1560 			pri = bsfl(tsqbits);
1561 			tsqbits &= ~(1U << pri);
1562 			q = &rdd->queues[pri];
1563 			which = &rdd->queuebits;
1564 		} else if (idqbits) {
1565 			pri = bsfl(idqbits);
1566 			idqbits &= ~(1U << pri);
1567 			q = &rdd->idqueues[pri];
1568 			which = &rdd->idqueuebits;
1569 		} else {
1570 			return (NULL);
1571 		}
1572 		lp = TAILQ_FIRST(q);
1573 	}
1574 	KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1575 
1576 loopnear:
1577 	/*
1578 	 * If the passed lwp <chklp> is reasonably close to the selected
1579 	 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1580 	 *
1581 	 * Note that we must error on the side of <chklp> to avoid bouncing
1582 	 * between threads in the acquire code.
1583 	 */
1584 	if (chklp) {
1585 		if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1586 			return(NULL);
1587 	}
1588 
1589 	/*
1590 	 * When rdd != dd, we have to make sure that the process we
1591 	 * are pulling is allow to run on our cpu.  This alternative
1592 	 * path is a bit more expensive but its not considered to be
1593 	 * in the critical path.
1594 	 */
1595 	if (rdd != dd && CPUMASK_TESTBIT(lp->lwp_cpumask, dd->cpuid) == 0) {
1596 		if (worst)
1597 			lp = TAILQ_PREV(lp, rq, lwp_procq);
1598 		else
1599 			lp = TAILQ_NEXT(lp, lwp_procq);
1600 		if (lp)
1601 			goto loopnear;
1602 		goto loopfar;
1603 	}
1604 
1605 	KTR_COND_LOG(usched_chooseproc,
1606 	    lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1607 	    lp->lwp_proc->p_pid,
1608 	    lp->lwp_thread->td_gd->gd_cpuid,
1609 	    mycpu->gd_cpuid);
1610 
1611 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1612 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1613 	TAILQ_REMOVE(q, lp, lwp_procq);
1614 	--rdd->runqcount;
1615 	if (TAILQ_EMPTY(q))
1616 		*which &= ~(1 << pri);
1617 
1618 	/*
1619 	 * If we are choosing a process from rdd with the intent to
1620 	 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1621 	 * is still held.
1622 	 */
1623 	if (rdd != dd) {
1624 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1625 			atomic_add_long(&rdd->uload, -lp->lwp_uload);
1626 			atomic_add_int(&rdd->ucount, -1);
1627 		}
1628 		lp->lwp_qcpu = dd->cpuid;
1629 		atomic_add_long(&dd->uload, lp->lwp_uload);
1630 		atomic_add_int(&dd->ucount, 1);
1631 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1632 	}
1633 	return lp;
1634 }
1635 
1636 /*
1637  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1638  *
1639  * Choose a cpu node to schedule lp on, hopefully nearby its current
1640  * node.
1641  *
1642  * We give the current node a modest advantage for obvious reasons.
1643  *
1644  * We also give the node the thread was woken up FROM a slight advantage
1645  * in order to try to schedule paired threads which synchronize/block waiting
1646  * for each other fairly close to each other.  Similarly in a network setting
1647  * this feature will also attempt to place a user process near the kernel
1648  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1649  * algorithm as it heuristically groups synchronizing processes for locality
1650  * of reference in multi-socket systems.
1651  *
1652  * We check against running processes and give a big advantage if there
1653  * are none running.
1654  *
1655  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1656  *
1657  * When the topology is known choose a cpu whos group has, in aggregate,
1658  * has the lowest weighted load.
1659  */
1660 static
1661 dfly_pcpu_t
1662 dfly_choose_best_queue(struct lwp *lp)
1663 {
1664 	cpumask_t wakemask;
1665 	cpumask_t mask;
1666 	cpu_node_t *cpup;
1667 	cpu_node_t *cpun;
1668 	cpu_node_t *cpub;
1669 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1670 	dfly_pcpu_t rdd;
1671 	int wakecpu;
1672 	int cpuid;
1673 	int n;
1674 	int loadav;
1675 	long load;
1676 	long lowest_load;
1677 
1678 	/*
1679 	 * When the topology is unknown choose a random cpu that is hopefully
1680 	 * idle.
1681 	 */
1682 	if (dd->cpunode == NULL)
1683 		return (dfly_choose_queue_simple(dd, lp));
1684 
1685 	loadav = (averunnable.ldavg[0] + FSCALE / 2) >> FSHIFT;
1686 
1687 	/*
1688 	 * Pairing mask
1689 	 */
1690 	if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1691 		wakemask = dfly_pcpu[wakecpu].cpumask;
1692 	else
1693 		CPUMASK_ASSZERO(wakemask);
1694 
1695 	if (usched_dfly_debug == lp->lwp_proc->p_pid)
1696 		kprintf("choosebest wakefromcpu %d:\n",
1697 			lp->lwp_thread->td_wakefromcpu);
1698 
1699 	/*
1700 	 * When the topology is known choose a cpu whos group has, in
1701 	 * aggregate, has the lowest weighted load.
1702 	 */
1703 	cpup = root_cpu_node;
1704 	rdd = dd;
1705 
1706 	while (cpup) {
1707 		/*
1708 		 * Degenerate case super-root
1709 		 */
1710 		if (cpup->child_no == 1) {
1711 			cpup = cpup->child_node[0];
1712 			continue;
1713 		}
1714 
1715 		/*
1716 		 * Terminal cpunode
1717 		 */
1718 		if (cpup->child_no == 0) {
1719 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1720 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1721 				kprintf("  last cpu %d\n", rdd->cpuid);
1722 			break;
1723 		}
1724 
1725 		cpub = NULL;
1726 		lowest_load = 0x7FFFFFFFFFFFFFFFLL;
1727 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
1728 			kprintf("  reset lowest_load for scan\n");
1729 
1730 		for (n = 0; n < cpup->child_no; ++n) {
1731 			/*
1732 			 * Accumulate load information for all cpus
1733 			 * which are members of this node.
1734 			 */
1735 			int count;
1736 
1737 			cpun = cpup->child_node[n];
1738 			mask = cpun->members;
1739 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
1740 			CPUMASK_ANDMASK(mask, smp_active_mask);
1741 			CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1742 			if (CPUMASK_TESTZERO(mask))
1743 				continue;
1744 
1745 			load = 0;
1746 			count = 0;
1747 
1748 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1749 				kprintf("  mask:");
1750 			while (CPUMASK_TESTNZERO(mask)) {
1751 				cpuid = BSFCPUMASK(mask);
1752 				rdd = &dfly_pcpu[cpuid];
1753 
1754 				if (usched_dfly_debug == lp->lwp_proc->p_pid)
1755 					kprintf(" %d", cpuid);
1756 
1757 				/*
1758 				 * Cumulative load for members.  Note that
1759 				 * if (lp) is part of the group, lp's
1760 				 * contribution will be backed out later.
1761 				 */
1762 				load += rdd->uload;
1763 				load += rdd->ucount *
1764 					usched_dfly_weight3;
1765 
1766 				/*
1767 				 * If the node is running a less important
1768 				 * thread than our thread, give it an
1769 				 * advantage.  Witha high-enough weighting
1770 				 * this can override most other considerations
1771 				 * to provide ultimate priority fairness at
1772 				 * the cost of localization.
1773 				 */
1774 				if ((rdd->upri & ~PPQMASK) >
1775 				    (lp->lwp_priority & ~PPQMASK)) {
1776 					load -= usched_dfly_weight4;
1777 				}
1778 
1779 #if 0
1780 				if (rdd->uschedcp == NULL &&
1781 				    rdd->runqcount == 0 &&
1782 				    rdd->gd->gd_tdrunqcount == 0
1783 				) {
1784 					load += rdd->uload / 2;
1785 					load += rdd->ucount *
1786 						usched_dfly_weight3 / 2;
1787 				} else {
1788 					load += rdd->uload;
1789 					load += rdd->ucount *
1790 						usched_dfly_weight3;
1791 				}
1792 #endif
1793 				CPUMASK_NANDBIT(mask, cpuid);
1794 				++count;
1795 			}
1796 
1797 			/*
1798 			 * Compensate if the lp is already accounted for in
1799 			 * the aggregate uload for this mask set.  We want
1800 			 * to calculate the loads as if lp were not present,
1801 			 * otherwise the calculation is bogus.
1802 			 */
1803 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1804 			    CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1805 				load -= lp->lwp_uload;
1806 				load -= usched_dfly_weight3;	/* ucount */
1807 			}
1808 
1809 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1810 				kprintf("\n  accum_start c=%d ld=%ld "
1811 					"cpu=%d ld/cnt=%ld ",
1812 					count, load, rdd->cpuid,
1813 					load / count);
1814 
1815 			/*
1816 			 * load is the aggregate load of count CPUs in the
1817 			 * group.  For the weightings to work as intended,
1818 			 * we want an average per-cpu load.
1819 			 */
1820 			load = load / count;
1821 
1822 			/*
1823 			 * Advantage the cpu group (lp) is already on.
1824 			 */
1825 			if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1826 				load -= usched_dfly_weight1;
1827 
1828 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1829 				kprintf("B:%ld ", load);
1830 
1831 			/*
1832 			 * Advantage nodes with more memory
1833 			 */
1834 			if (usched_dfly_node_mem) {
1835 				load -= cpun->phys_mem * usched_dfly_weight5 /
1836 					usched_dfly_node_mem;
1837 			}
1838 
1839 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1840 				kprintf("C:%ld ", load);
1841 
1842 			/*
1843 			 * Advantage the cpu group we desire to pair (lp)
1844 			 * to, but Disadvantage hyperthreads on the same
1845 			 * core, or the same thread as the ipc peer.
1846 			 *
1847 			 * Under very heavy loads it is usually beneficial
1848 			 * to set kern.usched_dfly.ipc_smt to 1, and under
1849 			 * extreme loads it might be beneficial to also set
1850 			 * kern.usched_dfly.ipc_same to 1.
1851 			 *
1852 			 * load+    disadvantage
1853 			 * load-    advantage
1854 			 */
1855 			if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1856 				if (cpun->child_no) {
1857 					if (cpun->type == CORE_LEVEL &&
1858 					    usched_dfly_ipc_smt < 0 &&
1859 					    loadav >= (ncpus >> 1)) {
1860 						/*
1861 						 * Advantage at higher levels
1862 						 * of the topology.
1863 						 */
1864 						load -= usched_dfly_weight2;
1865 					} else if (cpun->type == CORE_LEVEL &&
1866 						   usched_dfly_ipc_smt == 0) {
1867 						/*
1868 						 * Disadvantage the same core
1869 						 * when there are hyperthreads.
1870 						 */
1871 						load += usched_dfly_weight2;
1872 					} else {
1873 						/*
1874 						 * Advantage at higher levels
1875 						 * of the topology.
1876 						 */
1877 						load -= usched_dfly_weight2;
1878 					}
1879 				} else {
1880 					/*
1881 					 * Disadvantage the last level (core
1882 					 * or hyperthread).  Try to schedule
1883 					 * the ipc
1884 					 */
1885 					if (usched_dfly_ipc_same < 0 &&
1886 					    loadav >= ncpus) {
1887 						load -= usched_dfly_weight2;
1888 					} else if (usched_dfly_ipc_same) {
1889 						load -= usched_dfly_weight2;
1890 					} else {
1891 						load += usched_dfly_weight2;
1892 					}
1893 				}
1894 #if 0
1895 				if (cpun->child_no != 0) {
1896 					/* advantage */
1897 					load -= usched_dfly_weight2;
1898 				} else {
1899 					/*
1900 					 * 0x10 (disadvantage)
1901 					 * 0x00 (advantage)   - default
1902 					 */
1903 					if (usched_dfly_features & 0x10)
1904 						load += usched_dfly_weight2;
1905 					else
1906 						load -= usched_dfly_weight2;
1907 				}
1908 #endif
1909 			}
1910 
1911 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1912 				kprintf("D:%ld ", load);
1913 
1914 			/*
1915 			 * Calculate the best load
1916 			 */
1917 			if (cpub == NULL || lowest_load > load ||
1918 			    (lowest_load == load &&
1919 			     CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1920 			) {
1921 				lowest_load = load;
1922 				cpub = cpun;
1923 			}
1924 
1925 			if (usched_dfly_debug == lp->lwp_proc->p_pid)
1926 				kprintf("low=%ld]\n", lowest_load);
1927 		}
1928 		cpup = cpub;
1929 	}
1930 	/* Dispatch this outcast to a proper CPU. */
1931 	if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1932 		rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1933 	if (usched_dfly_chooser > 0) {
1934 		--usched_dfly_chooser;		/* only N lines */
1935 		kprintf("lp %02d->%02d %s\n",
1936 			lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1937 	}
1938 	if (usched_dfly_debug == lp->lwp_proc->p_pid)
1939 		kprintf("final cpu %d\n", rdd->cpuid);
1940 	return (rdd);
1941 }
1942 
1943 /*
1944  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1945  *
1946  * Choose the worst queue close to dd's cpu node with a non-empty runq
1947  * that is NOT dd.
1948  *
1949  * This is used by the thread chooser when the current cpu's queues are
1950  * empty to steal a thread from another cpu's queue.  We want to offload
1951  * the most heavily-loaded queue.
1952  *
1953  * However, we do not want to steal from far-away nodes who themselves
1954  * have idle cpu's that are more suitable to distribute the far-away
1955  * thread to.
1956  */
1957 static
1958 dfly_pcpu_t
1959 dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit)
1960 {
1961 	cpumask_t mask;
1962 	cpu_node_t *cpup;
1963 	cpu_node_t *cpun;
1964 	cpu_node_t *cpub;
1965 	dfly_pcpu_t rdd;
1966 	int cpuid;
1967 	int n;
1968 	long load;
1969 	long highest_load;
1970 #if 0
1971 	int pri;
1972 	int hpri;
1973 #endif
1974 
1975 	/*
1976 	 * When the topology is unknown choose a random cpu that is hopefully
1977 	 * idle.
1978 	 */
1979 	if (dd->cpunode == NULL) {
1980 		return (NULL);
1981 	}
1982 
1983 	/*
1984 	 * When the topology is known choose a cpu whos group has, in
1985 	 * aggregate, has the highest weighted load.
1986 	 */
1987 	cpup = root_cpu_node;
1988 	rdd = dd;
1989 	while (cpup) {
1990 		/*
1991 		 * Degenerate case super-root
1992 		 */
1993 		if (cpup->child_no == 1) {
1994 			cpup = cpup->child_node[0];
1995 			continue;
1996 		}
1997 
1998 		/*
1999 		 * Terminal cpunode
2000 		 */
2001 		if (cpup->child_no == 0) {
2002 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
2003 			break;
2004 		}
2005 
2006 		cpub = NULL;
2007 		highest_load = -0x7FFFFFFFFFFFFFFFLL;
2008 
2009 		for (n = 0; n < cpup->child_no; ++n) {
2010 			/*
2011 			 * Accumulate load information for all cpus
2012 			 * which are members of this node.
2013 			 */
2014 			int count;
2015 
2016 			cpun = cpup->child_node[n];
2017 			mask = cpun->members;
2018 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
2019 			CPUMASK_ANDMASK(mask, smp_active_mask);
2020 			if (CPUMASK_TESTZERO(mask))
2021 				continue;
2022 
2023 			load = 0;
2024 			count = 0;
2025 
2026 			while (CPUMASK_TESTNZERO(mask)) {
2027 				cpuid = BSFCPUMASK(mask);
2028 				rdd = &dfly_pcpu[cpuid];
2029 
2030 				load += rdd->uload;
2031 				load += rdd->ucount * usched_dfly_weight3;
2032 
2033 #if 0
2034 				if (rdd->uschedcp == NULL &&
2035 				    rdd->runqcount == 0 &&
2036 				    rdd->gd->gd_tdrunqcount == 0
2037 				) {
2038 					load += rdd->uload / 2;
2039 					load += rdd->ucount *
2040 						usched_dfly_weight3 / 2;
2041 				} else {
2042 					load += rdd->uload;
2043 					load += rdd->ucount *
2044 						usched_dfly_weight3;
2045 				}
2046 #endif
2047 				CPUMASK_NANDBIT(mask, cpuid);
2048 				++count;
2049 			}
2050 			load /= count;
2051 
2052 			/*
2053 			 * Advantage the cpu group (dd) is already on.
2054 			 *
2055 			 * When choosing the worst queue we reverse the
2056 			 * sign, but only count half the weight.
2057 			 *
2058 			 * weight1 needs to be high enough to be stable,
2059 			 * but this can also cause it to be too sticky,
2060 			 * so the iterator which rebalances the load sets
2061 			 * forceit to ignore it.
2062 			 */
2063 			if (forceit == 0 &&
2064 			    CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
2065 				load += usched_dfly_weight1 / 2;
2066 			}
2067 
2068 			/*
2069 			 * Disadvantage nodes with more memory (same sign).
2070 			 */
2071 			if (usched_dfly_node_mem) {
2072 				load -= cpun->phys_mem * usched_dfly_weight5 /
2073 					usched_dfly_node_mem;
2074 			}
2075 
2076 
2077 			/*
2078 			 * The best candidate is the one with the worst
2079 			 * (highest) load.
2080 			 */
2081 			if (cpub == NULL || highest_load < load ||
2082 			    (highest_load == load &&
2083 			     CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
2084 				highest_load = load;
2085 				cpub = cpun;
2086 			}
2087 		}
2088 		cpup = cpub;
2089 	}
2090 
2091 	/*
2092 	 * We never return our own node (dd), and only return a remote
2093 	 * node if it's load is significantly worse than ours (i.e. where
2094 	 * stealing a thread would be considered reasonable).
2095 	 *
2096 	 * This also helps us avoid breaking paired threads apart which
2097 	 * can have disastrous effects on performance.
2098 	 */
2099 	if (rdd == dd)
2100 		return(NULL);
2101 
2102 #if 0
2103 	hpri = 0;
2104 	if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
2105 		hpri = pri;
2106 	if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
2107 		hpri = pri;
2108 	if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
2109 		hpri = pri;
2110 	hpri *= PPQ;
2111 	if (rdd->uload - hpri < dd->uload + hpri)
2112 		return(NULL);
2113 #endif
2114 	return (rdd);
2115 }
2116 
2117 static
2118 dfly_pcpu_t
2119 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
2120 {
2121 	dfly_pcpu_t rdd;
2122 	cpumask_t tmpmask;
2123 	cpumask_t mask;
2124 	int cpubase;
2125 	int cpuid;
2126 
2127 	/*
2128 	 * Fallback to the original heuristic, select random cpu,
2129 	 * first checking the cpus not currently running a user thread.
2130 	 *
2131 	 * Use cpuid as the base cpu in our scan, first checking
2132 	 * cpuid...(ncpus-1), then 0...(cpuid-1).  This avoid favoring
2133 	 * lower-numbered cpus.
2134 	 */
2135 	++dd->scancpu;		/* SMP race ok */
2136 	mask = dfly_rdyprocmask;
2137 	CPUMASK_NANDMASK(mask, dfly_curprocmask);
2138 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2139 	CPUMASK_ANDMASK(mask, smp_active_mask);
2140 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
2141 
2142 	cpubase = (int)(dd->scancpu % ncpus);
2143 	CPUMASK_ASSBMASK(tmpmask, cpubase);
2144 	CPUMASK_INVMASK(tmpmask);
2145 	CPUMASK_ANDMASK(tmpmask, mask);
2146 	while (CPUMASK_TESTNZERO(tmpmask)) {
2147 		cpuid = BSFCPUMASK(tmpmask);
2148 		rdd = &dfly_pcpu[cpuid];
2149 
2150 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2151 			goto found;
2152 		CPUMASK_NANDBIT(tmpmask, cpuid);
2153 	}
2154 
2155 	CPUMASK_ASSBMASK(tmpmask, cpubase);
2156 	CPUMASK_ANDMASK(tmpmask, mask);
2157 	while (CPUMASK_TESTNZERO(tmpmask)) {
2158 		cpuid = BSFCPUMASK(tmpmask);
2159 		rdd = &dfly_pcpu[cpuid];
2160 
2161 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2162 			goto found;
2163 		CPUMASK_NANDBIT(tmpmask, cpuid);
2164 	}
2165 
2166 	/*
2167 	 * Then cpus which might have a currently running lp
2168 	 */
2169 	mask = dfly_rdyprocmask;
2170 	CPUMASK_ANDMASK(mask, dfly_curprocmask);
2171 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2172 	CPUMASK_ANDMASK(mask, smp_active_mask);
2173 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
2174 
2175 	CPUMASK_ASSBMASK(tmpmask, cpubase);
2176 	CPUMASK_INVMASK(tmpmask);
2177 	CPUMASK_ANDMASK(tmpmask, mask);
2178 	while (CPUMASK_TESTNZERO(tmpmask)) {
2179 		cpuid = BSFCPUMASK(tmpmask);
2180 		rdd = &dfly_pcpu[cpuid];
2181 
2182 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2183 			goto found;
2184 		CPUMASK_NANDBIT(tmpmask, cpuid);
2185 	}
2186 
2187 	CPUMASK_ASSBMASK(tmpmask, cpubase);
2188 	CPUMASK_ANDMASK(tmpmask, mask);
2189 	while (CPUMASK_TESTNZERO(tmpmask)) {
2190 		cpuid = BSFCPUMASK(tmpmask);
2191 		rdd = &dfly_pcpu[cpuid];
2192 
2193 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2194 			goto found;
2195 		CPUMASK_NANDBIT(tmpmask, cpuid);
2196 	}
2197 
2198 	/*
2199 	 * If we cannot find a suitable cpu we round-robin using scancpu.
2200 	 * Other cpus will pickup as they release their current lwps or
2201 	 * become ready.
2202 	 *
2203 	 * Avoid a degenerate system lockup case if usched_global_cpumask
2204 	 * is set to 0 or otherwise does not cover lwp_cpumask.
2205 	 *
2206 	 * We only kick the target helper thread in this case, we do not
2207 	 * set the user resched flag because
2208 	 */
2209 	cpuid = cpubase;
2210 	if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
2211 		cpuid = BSFCPUMASK(lp->lwp_cpumask);
2212 	else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
2213 		cpuid = 0;
2214 	rdd = &dfly_pcpu[cpuid];
2215 found:
2216 	return (rdd);
2217 }
2218 
2219 static
2220 void
2221 dfly_need_user_resched_remote(void *dummy)
2222 {
2223 	globaldata_t gd = mycpu;
2224 	dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
2225 
2226 	/*
2227 	 * Flag reschedule needed
2228 	 */
2229 	need_user_resched();
2230 
2231 	/*
2232 	 * If no user thread is currently running we need to kick the helper
2233 	 * on our cpu to recover.  Otherwise the cpu will never schedule
2234 	 * anything again.
2235 	 *
2236 	 * We cannot schedule the process ourselves because this is an
2237 	 * IPI callback and we cannot acquire spinlocks in an IPI callback.
2238 	 *
2239 	 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
2240 	 */
2241 	if (dd->uschedcp == NULL && (dd->flags & DFLY_PCPU_RDYMASK)) {
2242 		ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2243 		dd->flags &= ~DFLY_PCPU_RDYMASK;
2244 		wakeup_mycpu(dd->helper_thread);
2245 	}
2246 }
2247 
2248 /*
2249  * dfly_remrunqueue_locked() removes a given process from the run queue
2250  * that it is on, clearing the queue busy bit if it becomes empty.
2251  *
2252  * Note that user process scheduler is different from the LWKT schedule.
2253  * The user process scheduler only manages user processes but it uses LWKT
2254  * underneath, and a user process operating in the kernel will often be
2255  * 'released' from our management.
2256  *
2257  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
2258  * to sleep or the lwp is moved to a different runq.
2259  */
2260 static void
2261 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2262 {
2263 	struct rq *q;
2264 	u_int32_t *which;
2265 	u_int8_t pri;
2266 
2267 	KKASSERT(rdd->runqcount >= 0);
2268 
2269 	pri = lp->lwp_rqindex;
2270 
2271 	switch(lp->lwp_rqtype) {
2272 	case RTP_PRIO_NORMAL:
2273 		q = &rdd->queues[pri];
2274 		which = &rdd->queuebits;
2275 		break;
2276 	case RTP_PRIO_REALTIME:
2277 	case RTP_PRIO_FIFO:
2278 		q = &rdd->rtqueues[pri];
2279 		which = &rdd->rtqueuebits;
2280 		break;
2281 	case RTP_PRIO_IDLE:
2282 		q = &rdd->idqueues[pri];
2283 		which = &rdd->idqueuebits;
2284 		break;
2285 	default:
2286 		panic("remrunqueue: invalid rtprio type");
2287 		/* NOT REACHED */
2288 	}
2289 	KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2290 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2291 	TAILQ_REMOVE(q, lp, lwp_procq);
2292 	--rdd->runqcount;
2293 	if (TAILQ_EMPTY(q)) {
2294 		KASSERT((*which & (1 << pri)) != 0,
2295 			("remrunqueue: remove from empty queue"));
2296 		*which &= ~(1 << pri);
2297 	}
2298 }
2299 
2300 /*
2301  * dfly_setrunqueue_locked()
2302  *
2303  * Add a process whos rqtype and rqindex had previously been calculated
2304  * onto the appropriate run queue.   Determine if the addition requires
2305  * a reschedule on a cpu and return the cpuid or -1.
2306  *
2307  * NOTE: 	  Lower priorities are better priorities.
2308  *
2309  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2310  *		  sum of the rough lwp_priority for all running and runnable
2311  *		  processes.  Lower priority processes (higher lwp_priority
2312  *		  values) actually DO count as more load, not less, because
2313  *		  these are the programs which require the most care with
2314  *		  regards to cpu selection.
2315  */
2316 static void
2317 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2318 {
2319 	u_int32_t *which;
2320 	struct rq *q;
2321 	int pri;
2322 
2323 	KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2324 
2325 	if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2326 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2327 		atomic_add_long(&rdd->uload, lp->lwp_uload);
2328 		atomic_add_int(&rdd->ucount, 1);
2329 	}
2330 
2331 	pri = lp->lwp_rqindex;
2332 
2333 	switch(lp->lwp_rqtype) {
2334 	case RTP_PRIO_NORMAL:
2335 		q = &rdd->queues[pri];
2336 		which = &rdd->queuebits;
2337 		break;
2338 	case RTP_PRIO_REALTIME:
2339 	case RTP_PRIO_FIFO:
2340 		q = &rdd->rtqueues[pri];
2341 		which = &rdd->rtqueuebits;
2342 		break;
2343 	case RTP_PRIO_IDLE:
2344 		q = &rdd->idqueues[pri];
2345 		which = &rdd->idqueuebits;
2346 		break;
2347 	default:
2348 		panic("remrunqueue: invalid rtprio type");
2349 		/* NOT REACHED */
2350 	}
2351 
2352 	/*
2353 	 * Place us on the selected queue.  Determine if we should be
2354 	 * placed at the head of the queue or at the end.
2355 	 *
2356 	 * We are placed at the tail if our round-robin count has expired,
2357 	 * or is about to expire and the system thinks its a good place to
2358 	 * round-robin, or there is already a next thread on the queue
2359 	 * (it might be trying to pick up where it left off and we don't
2360 	 * want to interfere).
2361 	 */
2362 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2363 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2364 	++rdd->runqcount;
2365 
2366 	if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2367 	    (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2368 	     (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2369 	) {
2370 		/*
2371 		 * Place on tail
2372 		 */
2373 		atomic_clear_int(&lp->lwp_thread->td_mpflags,
2374 				 TDF_MP_BATCH_DEMARC);
2375 		lp->lwp_rrcount = 0;
2376 		TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2377 	} else {
2378 		/*
2379 		 * Retain rrcount and place on head.  Count is retained
2380 		 * even if the queue is empty.
2381 		 */
2382 		TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2383 	}
2384 	*which |= 1 << pri;
2385 }
2386 
2387 /*
2388  * For SMP systems a user scheduler helper thread is created for each
2389  * cpu and is used to allow one cpu to wakeup another for the purposes of
2390  * scheduling userland threads from setrunqueue().
2391  *
2392  * UP systems do not need the helper since there is only one cpu.
2393  *
2394  * We can't use the idle thread for this because we might block.
2395  * Additionally, doing things this way allows us to HLT idle cpus
2396  * on MP systems.
2397  */
2398 static void
2399 dfly_helper_thread(void *dummy)
2400 {
2401     globaldata_t gd;
2402     dfly_pcpu_t dd;
2403     dfly_pcpu_t rdd;
2404     struct lwp *nlp;
2405     cpumask_t mask;
2406     int cpuid;
2407 
2408     gd = mycpu;
2409     cpuid = gd->gd_cpuid;	/* doesn't change */
2410     mask = gd->gd_cpumask;	/* doesn't change */
2411     dd = &dfly_pcpu[cpuid];
2412 
2413     /*
2414      * Initial interlock, make sure all dfly_pcpu[] structures have
2415      * been initialized before proceeding.
2416      */
2417     lockmgr(&usched_dfly_config_lk, LK_SHARED);
2418     lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2419 
2420     /*
2421      * Since we only want to be woken up only when no user processes
2422      * are scheduled on a cpu, run at an ultra low priority.
2423      */
2424     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2425 
2426     for (;;) {
2427 	/*
2428 	 * We use the LWKT deschedule-interlock trick to avoid racing
2429 	 * dfly_rdyprocmask.  This means we cannot block through to the
2430 	 * manual lwkt_switch() call we make below.
2431 	 */
2432 	crit_enter_gd(gd);
2433 	tsleep_interlock(dd->helper_thread, 0);
2434 
2435 	spin_lock(&dd->spin);
2436 	if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2437 		ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2438 		dd->flags |= DFLY_PCPU_RDYMASK;
2439 	}
2440 	clear_user_resched();	/* This satisfied the reschedule request */
2441 #if 0
2442 	dd->rrcount = 0;	/* Reset the round-robin counter */
2443 #endif
2444 
2445 	if (dd->runqcount || dd->uschedcp != NULL) {
2446 		/*
2447 		 * Threads are available.  A thread may or may not be
2448 		 * currently scheduled.  Get the best thread already queued
2449 		 * to this cpu.
2450 		 */
2451 		nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2452 		if (nlp) {
2453 			if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2454 				ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2455 				dd->flags |= DFLY_PCPU_CURMASK;
2456 			}
2457 			dd->upri = nlp->lwp_priority;
2458 			dd->uschedcp = nlp;
2459 #if 0
2460 			dd->rrcount = 0;	/* reset round robin */
2461 #endif
2462 			spin_unlock(&dd->spin);
2463 			lwkt_acquire(nlp->lwp_thread);
2464 			lwkt_schedule(nlp->lwp_thread);
2465 		} else {
2466 			/*
2467 			 * This situation should not occur because we had
2468 			 * at least one thread available.
2469 			 */
2470 			spin_unlock(&dd->spin);
2471 		}
2472 	} else if (usched_dfly_features & 0x01) {
2473 		/*
2474 		 * This cpu is devoid of runnable threads, steal a thread
2475 		 * from another cpu.  Since we're stealing, might as well
2476 		 * load balance at the same time.
2477 		 *
2478 		 * We choose the highest-loaded thread from the worst queue.
2479 		 *
2480 		 * NOTE! This function only returns a non-NULL rdd when
2481 		 *	 another cpu's queue is obviously overloaded.  We
2482 		 *	 do not want to perform the type of rebalancing
2483 		 *	 the schedclock does here because it would result
2484 		 *	 in insane process pulling when 'steady' state is
2485 		 *	 partially unbalanced (e.g. 6 runnables and only
2486 		 *	 4 cores).
2487 		 */
2488 		rdd = dfly_choose_worst_queue(dd, 0);
2489 		if (rdd && dd->uload + usched_dfly_weight6 < rdd->uload &&
2490 		    spin_trylock(&rdd->spin)) {
2491 			nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2492 			spin_unlock(&rdd->spin);
2493 		} else {
2494 			nlp = NULL;
2495 		}
2496 		if (nlp) {
2497 			if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2498 				ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2499 				dd->flags |= DFLY_PCPU_CURMASK;
2500 			}
2501 			dd->upri = nlp->lwp_priority;
2502 			dd->uschedcp = nlp;
2503 #if 0
2504 			dd->rrcount = 0;	/* reset round robin */
2505 #endif
2506 			spin_unlock(&dd->spin);
2507 			lwkt_acquire(nlp->lwp_thread);
2508 			lwkt_schedule(nlp->lwp_thread);
2509 		} else {
2510 			/*
2511 			 * Leave the thread on our run queue.  Another
2512 			 * scheduler will try to pull it later.
2513 			 */
2514 			spin_unlock(&dd->spin);
2515 		}
2516 	} else {
2517 		/*
2518 		 * devoid of runnable threads and not allowed to steal
2519 		 * any.
2520 		 */
2521 		spin_unlock(&dd->spin);
2522 	}
2523 
2524 	/*
2525 	 * We're descheduled unless someone scheduled us.  Switch away.
2526 	 * Exiting the critical section will cause splz() to be called
2527 	 * for us if interrupts and such are pending.
2528 	 */
2529 	crit_exit_gd(gd);
2530 	tsleep(dd->helper_thread, PINTERLOCKED, "schslp",
2531 	       usched_dfly_poll_ticks);
2532     }
2533 }
2534 
2535 #if 0
2536 static int
2537 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2538 {
2539 	int error, new_val;
2540 
2541 	new_val = usched_dfly_stick_to_level;
2542 
2543 	error = sysctl_handle_int(oidp, &new_val, 0, req);
2544         if (error != 0 || req->newptr == NULL)
2545 		return (error);
2546 	if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2547 		return (EINVAL);
2548 	usched_dfly_stick_to_level = new_val;
2549 	return (0);
2550 }
2551 #endif
2552 
2553 /*
2554  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2555  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2556  * we should not mess with it further.
2557  */
2558 static void
2559 usched_dfly_cpu_init(void)
2560 {
2561 	int i;
2562 	int j;
2563 	int smt_not_supported = 0;
2564 	int cache_coherent_not_supported = 0;
2565 
2566 	if (bootverbose)
2567 		kprintf("Start usched_dfly helpers on cpus:\n");
2568 
2569 	sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2570 	usched_dfly_sysctl_tree =
2571 		SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2572 				SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2573 				"usched_dfly", CTLFLAG_RD, 0, "");
2574 
2575 	usched_dfly_node_mem = get_highest_node_memory();
2576 
2577 	lockmgr(&usched_dfly_config_lk, LK_EXCLUSIVE);
2578 
2579 	for (i = 0; i < ncpus; ++i) {
2580 		dfly_pcpu_t dd = &dfly_pcpu[i];
2581 		cpumask_t mask;
2582 
2583 		CPUMASK_ASSBIT(mask, i);
2584 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2585 		    continue;
2586 
2587 		spin_init(&dd->spin, "uschedcpuinit");
2588 		dd->cpunode = get_cpu_node_by_cpuid(i);
2589 		dd->cpuid = i;
2590 		dd->gd = globaldata_find(i);
2591 		CPUMASK_ASSBIT(dd->cpumask, i);
2592 		for (j = 0; j < NQS; j++) {
2593 			TAILQ_INIT(&dd->queues[j]);
2594 			TAILQ_INIT(&dd->rtqueues[j]);
2595 			TAILQ_INIT(&dd->idqueues[j]);
2596 		}
2597 		ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2598 		if (i == 0)
2599 			dd->flags &= ~DFLY_PCPU_CURMASK;
2600 
2601 		if (dd->cpunode == NULL) {
2602 			smt_not_supported = 1;
2603 			cache_coherent_not_supported = 1;
2604 			if (bootverbose)
2605 				kprintf ("    cpu%d - WARNING: No CPU NODE "
2606 					 "found for cpu\n", i);
2607 		} else {
2608 			switch (dd->cpunode->type) {
2609 			case THREAD_LEVEL:
2610 				if (bootverbose)
2611 					kprintf ("    cpu%d - HyperThreading "
2612 						 "available. Core siblings: ",
2613 						 i);
2614 				break;
2615 			case CORE_LEVEL:
2616 				smt_not_supported = 1;
2617 
2618 				if (bootverbose)
2619 					kprintf ("    cpu%d - No HT available, "
2620 						 "multi-core/physical "
2621 						 "cpu. Physical siblings: ",
2622 						 i);
2623 				break;
2624 			case CHIP_LEVEL:
2625 				smt_not_supported = 1;
2626 
2627 				if (bootverbose)
2628 					kprintf ("    cpu%d - No HT available, "
2629 						 "single-core/physical cpu. "
2630 						 "Package siblings: ",
2631 						 i);
2632 				break;
2633 			default:
2634 				/* Let's go for safe defaults here */
2635 				smt_not_supported = 1;
2636 				cache_coherent_not_supported = 1;
2637 				if (bootverbose)
2638 					kprintf ("    cpu%d - Unknown cpunode->"
2639 						 "type=%u. siblings: ",
2640 						 i,
2641 						 (u_int)dd->cpunode->type);
2642 				break;
2643 			}
2644 
2645 			if (bootverbose) {
2646 				if (dd->cpunode->parent_node != NULL) {
2647 					kprint_cpuset(&dd->cpunode->
2648 							parent_node->members);
2649 					kprintf("\n");
2650 				} else {
2651 					kprintf(" no siblings\n");
2652 				}
2653 			}
2654 		}
2655 
2656 		lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2657 			    0, i, "usched %d", i);
2658 
2659 		/*
2660 		 * Allow user scheduling on the target cpu.  cpu #0 has already
2661 		 * been enabled in rqinit().
2662 		 */
2663 		if (i) {
2664 			ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2665 			dd->flags &= ~DFLY_PCPU_CURMASK;
2666 		}
2667 		if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2668 			ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2669 			dd->flags |= DFLY_PCPU_RDYMASK;
2670 		}
2671 		dd->upri = PRIBASE_NULL;
2672 
2673 	}
2674 
2675 	/* usched_dfly sysctl configurable parameters */
2676 
2677 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2678 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2679 		       OID_AUTO, "rrinterval", CTLFLAG_RW,
2680 		       &usched_dfly_rrinterval, 0, "");
2681 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2682 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2683 		       OID_AUTO, "decay", CTLFLAG_RW,
2684 		       &usched_dfly_decay, 0, "Extra decay when not running");
2685 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2686 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2687 		       OID_AUTO, "ipc_smt", CTLFLAG_RW,
2688 		       &usched_dfly_ipc_smt, 0, "Pair IPC on hyper-threads");
2689 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2690 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2691 		       OID_AUTO, "ipc_same", CTLFLAG_RW,
2692 		       &usched_dfly_ipc_same, 0, "Pair IPC on same thread");
2693 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2694 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2695 		       OID_AUTO, "poll_ticks", CTLFLAG_RW,
2696 		       &usched_dfly_poll_ticks, 0, "Poll for work (0 ok)");
2697 
2698 	/* Add enable/disable option for SMT scheduling if supported */
2699 	if (smt_not_supported) {
2700 		usched_dfly_smt = 0;
2701 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2702 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2703 				  OID_AUTO, "smt", CTLFLAG_RD,
2704 				  "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2705 	} else {
2706 		usched_dfly_smt = 1;
2707 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2708 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2709 			       OID_AUTO, "smt", CTLFLAG_RW,
2710 			       &usched_dfly_smt, 0, "Enable SMT scheduling");
2711 	}
2712 
2713 	/*
2714 	 * Add enable/disable option for cache coherent scheduling
2715 	 * if supported
2716 	 */
2717 	if (cache_coherent_not_supported) {
2718 		usched_dfly_cache_coherent = 0;
2719 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2720 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2721 				  OID_AUTO, "cache_coherent", CTLFLAG_RD,
2722 				  "NOT SUPPORTED", 0,
2723 				  "Cache coherence NOT SUPPORTED");
2724 	} else {
2725 		usched_dfly_cache_coherent = 1;
2726 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2727 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2728 			       OID_AUTO, "cache_coherent", CTLFLAG_RW,
2729 			       &usched_dfly_cache_coherent, 0,
2730 			       "Enable/Disable cache coherent scheduling");
2731 
2732 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2733 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2734 			       OID_AUTO, "weight1", CTLFLAG_RW,
2735 			       &usched_dfly_weight1, 200,
2736 			       "Weight selection for current cpu");
2737 
2738 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2739 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2740 			       OID_AUTO, "weight2", CTLFLAG_RW,
2741 			       &usched_dfly_weight2, 180,
2742 			       "Weight selection for wakefrom cpu");
2743 
2744 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2745 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2746 			       OID_AUTO, "weight3", CTLFLAG_RW,
2747 			       &usched_dfly_weight3, 40,
2748 			       "Weight selection for num threads on queue");
2749 
2750 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2751 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2752 			       OID_AUTO, "weight4", CTLFLAG_RW,
2753 			       &usched_dfly_weight4, 160,
2754 			       "Availability of other idle cpus");
2755 
2756 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2757 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2758 			       OID_AUTO, "weight5", CTLFLAG_RW,
2759 			       &usched_dfly_weight5, 50,
2760 			       "Memory attached to node");
2761 
2762 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2763 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2764 			       OID_AUTO, "weight6", CTLFLAG_RW,
2765 			       &usched_dfly_weight6, 150,
2766 			       "Transfer weight");
2767 
2768 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2769 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2770 			       OID_AUTO, "fast_resched", CTLFLAG_RW,
2771 			       &usched_dfly_fast_resched, 0,
2772 			       "Availability of other idle cpus");
2773 
2774 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2775 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2776 			       OID_AUTO, "features", CTLFLAG_RW,
2777 			       &usched_dfly_features, 0x8F,
2778 			       "Allow pulls into empty queues");
2779 
2780 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2781 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2782 			       OID_AUTO, "swmask", CTLFLAG_RW,
2783 			       &usched_dfly_swmask, ~PPQMASK,
2784 			       "Queue mask to force thread switch");
2785 
2786 #if 0
2787 		SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2788 				SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2789 				OID_AUTO, "stick_to_level",
2790 				CTLTYPE_INT | CTLFLAG_RW,
2791 				NULL, sizeof usched_dfly_stick_to_level,
2792 				sysctl_usched_dfly_stick_to_level, "I",
2793 				"Stick a process to this level. See sysctl"
2794 				"paremter hw.cpu_topology.level_description");
2795 #endif
2796 	}
2797 	lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2798 }
2799 
2800 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2801 	usched_dfly_cpu_init, NULL);
2802