xref: /dflybsd-src/sys/kern/lwkt_thread.c (revision 90ea502b8c5d21f908cedff6680ee2bc9e74ce74)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * Each cpu in a system has its own self-contained light weight kernel
37  * thread scheduler, which means that generally speaking we only need
38  * to use a critical section to avoid problems.  Foreign thread
39  * scheduling is queued via (async) IPIs.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/rtprio.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/kthread.h>
50 #include <machine/cpu.h>
51 #include <sys/lock.h>
52 #include <sys/caps.h>
53 #include <sys/spinlock.h>
54 #include <sys/ktr.h>
55 
56 #include <sys/thread2.h>
57 #include <sys/spinlock2.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_pager.h>
66 #include <vm/vm_extern.h>
67 
68 #include <machine/stdarg.h>
69 #include <machine/smp.h>
70 
71 #if !defined(KTR_CTXSW)
72 #define KTR_CTXSW KTR_ALL
73 #endif
74 KTR_INFO_MASTER(ctxsw);
75 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "sw  %p > %p", 2 * sizeof(struct thread *));
76 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "pre %p > %p", 2 * sizeof(struct thread *));
77 
78 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
79 
80 #ifdef SMP
81 static int mplock_countx = 0;
82 #endif
83 #ifdef	INVARIANTS
84 static int panic_on_cscount = 0;
85 #endif
86 static __int64_t switch_count = 0;
87 static __int64_t preempt_hit = 0;
88 static __int64_t preempt_miss = 0;
89 static __int64_t preempt_weird = 0;
90 static __int64_t token_contention_count __debugvar = 0;
91 static __int64_t mplock_contention_count __debugvar = 0;
92 static int lwkt_use_spin_port;
93 #ifdef SMP
94 static int chain_mplock = 0;
95 static int bgl_yield = 10;
96 #endif
97 static struct objcache *thread_cache;
98 
99 volatile cpumask_t mp_lock_contention_mask;
100 
101 #ifdef SMP
102 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
103 #endif
104 
105 extern void cpu_heavy_restore(void);
106 extern void cpu_lwkt_restore(void);
107 extern void cpu_kthread_restore(void);
108 extern void cpu_idle_restore(void);
109 
110 #ifdef __x86_64__
111 
112 static int
113 jg_tos_ok(struct thread *td)
114 {
115 	void *tos;
116 	int tos_ok;
117 
118 	if (td == NULL) {
119 		return 1;
120 	}
121 	KKASSERT(td->td_sp != NULL);
122 	tos = ((void **)td->td_sp)[0];
123 	tos_ok = 0;
124 	if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) ||
125 	    (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) {
126 		tos_ok = 1;
127 	}
128 	return tos_ok;
129 }
130 
131 #endif
132 
133 /*
134  * We can make all thread ports use the spin backend instead of the thread
135  * backend.  This should only be set to debug the spin backend.
136  */
137 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
138 
139 #ifdef	INVARIANTS
140 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, "");
141 #endif
142 #ifdef SMP
143 SYSCTL_INT(_lwkt, OID_AUTO, chain_mplock, CTLFLAG_RW, &chain_mplock, 0, "");
144 SYSCTL_INT(_lwkt, OID_AUTO, bgl_yield_delay, CTLFLAG_RW, &bgl_yield, 0, "");
145 #endif
146 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, "");
147 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, "");
148 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, "");
149 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, "");
150 #ifdef	INVARIANTS
151 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW,
152 	&token_contention_count, 0, "spinning due to token contention");
153 SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW,
154 	&mplock_contention_count, 0, "spinning due to MPLOCK contention");
155 #endif
156 
157 /*
158  * Kernel Trace
159  */
160 #if !defined(KTR_GIANT_CONTENTION)
161 #define KTR_GIANT_CONTENTION	KTR_ALL
162 #endif
163 
164 KTR_INFO_MASTER(giant);
165 KTR_INFO(KTR_GIANT_CONTENTION, giant, beg, 0, "thread=%p", sizeof(void *));
166 KTR_INFO(KTR_GIANT_CONTENTION, giant, end, 1, "thread=%p", sizeof(void *));
167 
168 #define loggiant(name)	KTR_LOG(giant_ ## name, curthread)
169 
170 /*
171  * These helper procedures handle the runq, they can only be called from
172  * within a critical section.
173  *
174  * WARNING!  Prior to SMP being brought up it is possible to enqueue and
175  * dequeue threads belonging to other cpus, so be sure to use td->td_gd
176  * instead of 'mycpu' when referencing the globaldata structure.   Once
177  * SMP live enqueuing and dequeueing only occurs on the current cpu.
178  */
179 static __inline
180 void
181 _lwkt_dequeue(thread_t td)
182 {
183     if (td->td_flags & TDF_RUNQ) {
184 	int nq = td->td_pri & TDPRI_MASK;
185 	struct globaldata *gd = td->td_gd;
186 
187 	td->td_flags &= ~TDF_RUNQ;
188 	TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq);
189 	/* runqmask is passively cleaned up by the switcher */
190     }
191 }
192 
193 static __inline
194 void
195 _lwkt_enqueue(thread_t td)
196 {
197     if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
198 	int nq = td->td_pri & TDPRI_MASK;
199 	struct globaldata *gd = td->td_gd;
200 
201 	td->td_flags |= TDF_RUNQ;
202 	TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq);
203 	gd->gd_runqmask |= 1 << nq;
204     }
205 }
206 
207 static __boolean_t
208 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
209 {
210 	struct thread *td = (struct thread *)obj;
211 
212 	td->td_kstack = NULL;
213 	td->td_kstack_size = 0;
214 	td->td_flags = TDF_ALLOCATED_THREAD;
215 	return (1);
216 }
217 
218 static void
219 _lwkt_thread_dtor(void *obj, void *privdata)
220 {
221 	struct thread *td = (struct thread *)obj;
222 
223 	KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
224 	    ("_lwkt_thread_dtor: not allocated from objcache"));
225 	KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
226 		td->td_kstack_size > 0,
227 	    ("_lwkt_thread_dtor: corrupted stack"));
228 	kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
229 }
230 
231 /*
232  * Initialize the lwkt s/system.
233  */
234 void
235 lwkt_init(void)
236 {
237     /* An objcache has 2 magazines per CPU so divide cache size by 2. */
238     thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread),
239 			NULL, CACHE_NTHREADS/2,
240 			_lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
241 }
242 
243 /*
244  * Schedule a thread to run.  As the current thread we can always safely
245  * schedule ourselves, and a shortcut procedure is provided for that
246  * function.
247  *
248  * (non-blocking, self contained on a per cpu basis)
249  */
250 void
251 lwkt_schedule_self(thread_t td)
252 {
253     crit_enter_quick(td);
254     KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
255     KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
256     _lwkt_enqueue(td);
257     crit_exit_quick(td);
258 }
259 
260 /*
261  * Deschedule a thread.
262  *
263  * (non-blocking, self contained on a per cpu basis)
264  */
265 void
266 lwkt_deschedule_self(thread_t td)
267 {
268     crit_enter_quick(td);
269     _lwkt_dequeue(td);
270     crit_exit_quick(td);
271 }
272 
273 /*
274  * LWKTs operate on a per-cpu basis
275  *
276  * WARNING!  Called from early boot, 'mycpu' may not work yet.
277  */
278 void
279 lwkt_gdinit(struct globaldata *gd)
280 {
281     int i;
282 
283     for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i)
284 	TAILQ_INIT(&gd->gd_tdrunq[i]);
285     gd->gd_runqmask = 0;
286     TAILQ_INIT(&gd->gd_tdallq);
287 }
288 
289 /*
290  * Create a new thread.  The thread must be associated with a process context
291  * or LWKT start address before it can be scheduled.  If the target cpu is
292  * -1 the thread will be created on the current cpu.
293  *
294  * If you intend to create a thread without a process context this function
295  * does everything except load the startup and switcher function.
296  */
297 thread_t
298 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
299 {
300     globaldata_t gd = mycpu;
301     void *stack;
302 
303     /*
304      * If static thread storage is not supplied allocate a thread.  Reuse
305      * a cached free thread if possible.  gd_freetd is used to keep an exiting
306      * thread intact through the exit.
307      */
308     if (td == NULL) {
309 	if ((td = gd->gd_freetd) != NULL)
310 	    gd->gd_freetd = NULL;
311 	else
312 	    td = objcache_get(thread_cache, M_WAITOK);
313     	KASSERT((td->td_flags &
314 		 (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD,
315 		("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
316     	flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
317     }
318 
319     /*
320      * Try to reuse cached stack.
321      */
322     if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
323 	if (flags & TDF_ALLOCATED_STACK) {
324 	    kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size);
325 	    stack = NULL;
326 	}
327     }
328     if (stack == NULL) {
329 	stack = (void *)kmem_alloc(&kernel_map, stksize);
330 	flags |= TDF_ALLOCATED_STACK;
331     }
332     if (cpu < 0)
333 	lwkt_init_thread(td, stack, stksize, flags, gd);
334     else
335 	lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
336     return(td);
337 }
338 
339 /*
340  * Initialize a preexisting thread structure.  This function is used by
341  * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
342  *
343  * All threads start out in a critical section at a priority of
344  * TDPRI_KERN_DAEMON.  Higher level code will modify the priority as
345  * appropriate.  This function may send an IPI message when the
346  * requested cpu is not the current cpu and consequently gd_tdallq may
347  * not be initialized synchronously from the point of view of the originating
348  * cpu.
349  *
350  * NOTE! we have to be careful in regards to creating threads for other cpus
351  * if SMP has not yet been activated.
352  */
353 #ifdef SMP
354 
355 static void
356 lwkt_init_thread_remote(void *arg)
357 {
358     thread_t td = arg;
359 
360     /*
361      * Protected by critical section held by IPI dispatch
362      */
363     TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
364 }
365 
366 #endif
367 
368 void
369 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
370 		struct globaldata *gd)
371 {
372     globaldata_t mygd = mycpu;
373 
374     bzero(td, sizeof(struct thread));
375     td->td_kstack = stack;
376     td->td_kstack_size = stksize;
377     td->td_flags = flags;
378     td->td_gd = gd;
379     td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT;
380 #ifdef SMP
381     if ((flags & TDF_MPSAFE) == 0)
382 	td->td_mpcount = 1;
383 #endif
384     if (lwkt_use_spin_port)
385 	lwkt_initport_spin(&td->td_msgport);
386     else
387 	lwkt_initport_thread(&td->td_msgport, td);
388     pmap_init_thread(td);
389 #ifdef SMP
390     /*
391      * Normally initializing a thread for a remote cpu requires sending an
392      * IPI.  However, the idlethread is setup before the other cpus are
393      * activated so we have to treat it as a special case.  XXX manipulation
394      * of gd_tdallq requires the BGL.
395      */
396     if (gd == mygd || td == &gd->gd_idlethread) {
397 	crit_enter_gd(mygd);
398 	TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
399 	crit_exit_gd(mygd);
400     } else {
401 	lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
402     }
403 #else
404     crit_enter_gd(mygd);
405     TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
406     crit_exit_gd(mygd);
407 #endif
408 }
409 
410 void
411 lwkt_set_comm(thread_t td, const char *ctl, ...)
412 {
413     __va_list va;
414 
415     __va_start(va, ctl);
416     kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
417     __va_end(va);
418 }
419 
420 void
421 lwkt_hold(thread_t td)
422 {
423     ++td->td_refs;
424 }
425 
426 void
427 lwkt_rele(thread_t td)
428 {
429     KKASSERT(td->td_refs > 0);
430     --td->td_refs;
431 }
432 
433 void
434 lwkt_wait_free(thread_t td)
435 {
436     while (td->td_refs)
437 	tsleep(td, 0, "tdreap", hz);
438 }
439 
440 void
441 lwkt_free_thread(thread_t td)
442 {
443     KASSERT((td->td_flags & TDF_RUNNING) == 0,
444 	("lwkt_free_thread: did not exit! %p", td));
445 
446     if (td->td_flags & TDF_ALLOCATED_THREAD) {
447     	objcache_put(thread_cache, td);
448     } else if (td->td_flags & TDF_ALLOCATED_STACK) {
449 	/* client-allocated struct with internally allocated stack */
450 	KASSERT(td->td_kstack && td->td_kstack_size > 0,
451 	    ("lwkt_free_thread: corrupted stack"));
452 	kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
453 	td->td_kstack = NULL;
454 	td->td_kstack_size = 0;
455     }
456 }
457 
458 
459 /*
460  * Switch to the next runnable lwkt.  If no LWKTs are runnable then
461  * switch to the idlethread.  Switching must occur within a critical
462  * section to avoid races with the scheduling queue.
463  *
464  * We always have full control over our cpu's run queue.  Other cpus
465  * that wish to manipulate our queue must use the cpu_*msg() calls to
466  * talk to our cpu, so a critical section is all that is needed and
467  * the result is very, very fast thread switching.
468  *
469  * The LWKT scheduler uses a fixed priority model and round-robins at
470  * each priority level.  User process scheduling is a totally
471  * different beast and LWKT priorities should not be confused with
472  * user process priorities.
473  *
474  * The MP lock may be out of sync with the thread's td_mpcount.  lwkt_switch()
475  * cleans it up.  Note that the td_switch() function cannot do anything that
476  * requires the MP lock since the MP lock will have already been setup for
477  * the target thread (not the current thread).  It's nice to have a scheduler
478  * that does not need the MP lock to work because it allows us to do some
479  * really cool high-performance MP lock optimizations.
480  *
481  * PREEMPTION NOTE: Preemption occurs via lwkt_preempt().  lwkt_switch()
482  * is not called by the current thread in the preemption case, only when
483  * the preempting thread blocks (in order to return to the original thread).
484  */
485 void
486 lwkt_switch(void)
487 {
488     globaldata_t gd = mycpu;
489     thread_t td = gd->gd_curthread;
490     thread_t ntd;
491 #ifdef SMP
492     int mpheld;
493 #endif
494 
495     /*
496      * Switching from within a 'fast' (non thread switched) interrupt or IPI
497      * is illegal.  However, we may have to do it anyway if we hit a fatal
498      * kernel trap or we have paniced.
499      *
500      * If this case occurs save and restore the interrupt nesting level.
501      */
502     if (gd->gd_intr_nesting_level) {
503 	int savegdnest;
504 	int savegdtrap;
505 
506 	if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) {
507 	    panic("lwkt_switch: cannot switch from within "
508 		  "a fast interrupt, yet, td %p\n", td);
509 	} else {
510 	    savegdnest = gd->gd_intr_nesting_level;
511 	    savegdtrap = gd->gd_trap_nesting_level;
512 	    gd->gd_intr_nesting_level = 0;
513 	    gd->gd_trap_nesting_level = 0;
514 	    if ((td->td_flags & TDF_PANICWARN) == 0) {
515 		td->td_flags |= TDF_PANICWARN;
516 		kprintf("Warning: thread switch from interrupt or IPI, "
517 			"thread %p (%s)\n", td, td->td_comm);
518 		print_backtrace();
519 	    }
520 	    lwkt_switch();
521 	    gd->gd_intr_nesting_level = savegdnest;
522 	    gd->gd_trap_nesting_level = savegdtrap;
523 	    return;
524 	}
525     }
526 
527     /*
528      * Passive release (used to transition from user to kernel mode
529      * when we block or switch rather then when we enter the kernel).
530      * This function is NOT called if we are switching into a preemption
531      * or returning from a preemption.  Typically this causes us to lose
532      * our current process designation (if we have one) and become a true
533      * LWKT thread, and may also hand the current process designation to
534      * another process and schedule thread.
535      */
536     if (td->td_release)
537 	    td->td_release(td);
538 
539     crit_enter_gd(gd);
540     if (td->td_toks)
541 	    lwkt_relalltokens(td);
542 
543     /*
544      * We had better not be holding any spin locks, but don't get into an
545      * endless panic loop.
546      */
547     KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL,
548 	    ("lwkt_switch: still holding a shared spinlock %p!",
549 	     gd->gd_spinlock_rd));
550     KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL,
551 	    ("lwkt_switch: still holding %d exclusive spinlocks!",
552 	     gd->gd_spinlocks_wr));
553 
554 
555 #ifdef SMP
556     /*
557      * td_mpcount cannot be used to determine if we currently hold the
558      * MP lock because get_mplock() will increment it prior to attempting
559      * to get the lock, and switch out if it can't.  Our ownership of
560      * the actual lock will remain stable while we are in a critical section
561      * (but, of course, another cpu may own or release the lock so the
562      * actual value of mp_lock is not stable).
563      */
564     mpheld = MP_LOCK_HELD();
565 #ifdef	INVARIANTS
566     if (td->td_cscount) {
567 	kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
568 		td);
569 	if (panic_on_cscount)
570 	    panic("switching while mastering cpusync");
571     }
572 #endif
573 #endif
574     if ((ntd = td->td_preempted) != NULL) {
575 	/*
576 	 * We had preempted another thread on this cpu, resume the preempted
577 	 * thread.  This occurs transparently, whether the preempted thread
578 	 * was scheduled or not (it may have been preempted after descheduling
579 	 * itself).
580 	 *
581 	 * We have to setup the MP lock for the original thread after backing
582 	 * out the adjustment that was made to curthread when the original
583 	 * was preempted.
584 	 */
585 	KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
586 #ifdef SMP
587 	if (ntd->td_mpcount && mpheld == 0) {
588 	    panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d",
589 	       td, ntd, td->td_mpcount, ntd->td_mpcount);
590 	}
591 	if (ntd->td_mpcount) {
592 	    td->td_mpcount -= ntd->td_mpcount;
593 	    KKASSERT(td->td_mpcount >= 0);
594 	}
595 #endif
596 	ntd->td_flags |= TDF_PREEMPT_DONE;
597 
598 	/*
599 	 * The interrupt may have woken a thread up, we need to properly
600 	 * set the reschedule flag if the originally interrupted thread is
601 	 * at a lower priority.
602 	 */
603 	if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1)
604 	    need_lwkt_resched();
605 	/* YYY release mp lock on switchback if original doesn't need it */
606     } else {
607 	/*
608 	 * Priority queue / round-robin at each priority.  Note that user
609 	 * processes run at a fixed, low priority and the user process
610 	 * scheduler deals with interactions between user processes
611 	 * by scheduling and descheduling them from the LWKT queue as
612 	 * necessary.
613 	 *
614 	 * We have to adjust the MP lock for the target thread.  If we
615 	 * need the MP lock and cannot obtain it we try to locate a
616 	 * thread that does not need the MP lock.  If we cannot, we spin
617 	 * instead of HLT.
618 	 *
619 	 * A similar issue exists for the tokens held by the target thread.
620 	 * If we cannot obtain ownership of the tokens we cannot immediately
621 	 * schedule the thread.
622 	 */
623 
624 	/*
625 	 * If an LWKT reschedule was requested, well that is what we are
626 	 * doing now so clear it.
627 	 */
628 	clear_lwkt_resched();
629 again:
630 	if (gd->gd_runqmask) {
631 	    int nq = bsrl(gd->gd_runqmask);
632 	    if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) {
633 		gd->gd_runqmask &= ~(1 << nq);
634 		goto again;
635 	    }
636 #ifdef SMP
637 	    /*
638 	     * THREAD SELECTION FOR AN SMP MACHINE BUILD
639 	     *
640 	     * If the target needs the MP lock and we couldn't get it,
641 	     * or if the target is holding tokens and we could not
642 	     * gain ownership of the tokens, continue looking for a
643 	     * thread to schedule and spin instead of HLT if we can't.
644 	     *
645 	     * NOTE: the mpheld variable invalid after this conditional, it
646 	     * can change due to both cpu_try_mplock() returning success
647 	     * AND interactions in lwkt_getalltokens() due to the fact that
648 	     * we are trying to check the mpcount of a thread other then
649 	     * the current thread.  Because of this, if the current thread
650 	     * is not holding td_mpcount, an IPI indirectly run via
651 	     * lwkt_getalltokens() can obtain and release the MP lock and
652 	     * cause the core MP lock to be released.
653 	     */
654 	    if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) ||
655 		(ntd->td_toks && lwkt_getalltokens(ntd) == 0)
656 	    ) {
657 		u_int32_t rqmask = gd->gd_runqmask;
658 
659 		mpheld = MP_LOCK_HELD();
660 		ntd = NULL;
661 		while (rqmask) {
662 		    TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) {
663 			if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) {
664 			    /* spinning due to MP lock being held */
665 #ifdef	INVARIANTS
666 			    ++mplock_contention_count;
667 #endif
668 			    /* mplock still not held, 'mpheld' still valid */
669 			    continue;
670 			}
671 
672 			/*
673 			 * mpheld state invalid after getalltokens call returns
674 			 * failure, but the variable is only needed for
675 			 * the loop.
676 			 */
677 			if (ntd->td_toks && !lwkt_getalltokens(ntd)) {
678 			    /* spinning due to token contention */
679 #ifdef	INVARIANTS
680 			    ++token_contention_count;
681 #endif
682 			    mpheld = MP_LOCK_HELD();
683 			    continue;
684 			}
685 			break;
686 		    }
687 		    if (ntd)
688 			break;
689 		    rqmask &= ~(1 << nq);
690 		    nq = bsrl(rqmask);
691 
692 		    /*
693 		     * We have two choices. We can either refuse to run a
694 		     * user thread when a kernel thread needs the MP lock
695 		     * but could not get it, or we can allow it to run but
696 		     * then expect an IPI (hopefully) later on to force a
697 		     * reschedule when the MP lock might become available.
698 		     */
699 		    if (nq < TDPRI_KERN_LPSCHED) {
700 			if (chain_mplock == 0)
701 				break;
702 			atomic_set_int(&mp_lock_contention_mask,
703 				       gd->gd_cpumask);
704 			/* continue loop, allow user threads to be scheduled */
705 		    }
706 		}
707 		if (ntd == NULL) {
708 		    cpu_mplock_contested();
709 		    ntd = &gd->gd_idlethread;
710 		    ntd->td_flags |= TDF_IDLE_NOHLT;
711 		    goto using_idle_thread;
712 		} else {
713 		    ++gd->gd_cnt.v_swtch;
714 		    TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
715 		    TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
716 		}
717 	    } else {
718 		if (ntd->td_mpcount)
719 			++mplock_countx;
720 		++gd->gd_cnt.v_swtch;
721 		TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
722 		TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
723 	    }
724 #else
725 	    /*
726 	     * THREAD SELECTION FOR A UP MACHINE BUILD.  We don't have to
727 	     * worry about tokens or the BGL.  However, we still have
728 	     * to call lwkt_getalltokens() in order to properly detect
729 	     * stale tokens.  This call cannot fail for a UP build!
730 	     */
731 	    lwkt_getalltokens(ntd);
732 	    ++gd->gd_cnt.v_swtch;
733 	    TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
734 	    TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
735 #endif
736 	} else {
737 	    /*
738 	     * We have nothing to run but only let the idle loop halt
739 	     * the cpu if there are no pending interrupts.
740 	     */
741 	    ntd = &gd->gd_idlethread;
742 	    if (gd->gd_reqflags & RQF_IDLECHECK_MASK)
743 		ntd->td_flags |= TDF_IDLE_NOHLT;
744 #ifdef SMP
745 using_idle_thread:
746 	    /*
747 	     * The idle thread should not be holding the MP lock unless we
748 	     * are trapping in the kernel or in a panic.  Since we select the
749 	     * idle thread unconditionally when no other thread is available,
750 	     * if the MP lock is desired during a panic or kernel trap, we
751 	     * have to loop in the scheduler until we get it.
752 	     */
753 	    if (ntd->td_mpcount) {
754 		mpheld = MP_LOCK_HELD();
755 		if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) {
756 		    panic("Idle thread %p was holding the BGL!", ntd);
757 		} else if (mpheld == 0) {
758 		    cpu_mplock_contested();
759 		    goto again;
760 		}
761 	    }
762 #endif
763 	}
764     }
765     KASSERT(ntd->td_pri >= TDPRI_CRIT,
766 	("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri));
767 
768     /*
769      * Do the actual switch.  If the new target does not need the MP lock
770      * and we are holding it, release the MP lock.  If the new target requires
771      * the MP lock we have already acquired it for the target.
772      */
773 #ifdef SMP
774     if (ntd->td_mpcount == 0 ) {
775 	if (MP_LOCK_HELD())
776 	    cpu_rel_mplock();
777     } else {
778 	ASSERT_MP_LOCK_HELD(ntd);
779     }
780 #endif
781     if (td != ntd) {
782 	++switch_count;
783 #ifdef __x86_64__
784     {
785 	int tos_ok __debugvar = jg_tos_ok(ntd);
786 	KKASSERT(tos_ok);
787     }
788 #endif
789 	KTR_LOG(ctxsw_sw, td, ntd);
790 	td->td_switch(ntd);
791     }
792     /* NOTE: current cpu may have changed after switch */
793     crit_exit_quick(td);
794 }
795 
796 /*
797  * Request that the target thread preempt the current thread.  Preemption
798  * only works under a specific set of conditions:
799  *
800  *	- We are not preempting ourselves
801  *	- The target thread is owned by the current cpu
802  *	- We are not currently being preempted
803  *	- The target is not currently being preempted
804  *	- We are not holding any spin locks
805  *	- The target thread is not holding any tokens
806  *	- We are able to satisfy the target's MP lock requirements (if any).
807  *
808  * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION.  Typically
809  * this is called via lwkt_schedule() through the td_preemptable callback.
810  * critpri is the managed critical priority that we should ignore in order
811  * to determine whether preemption is possible (aka usually just the crit
812  * priority of lwkt_schedule() itself).
813  *
814  * XXX at the moment we run the target thread in a critical section during
815  * the preemption in order to prevent the target from taking interrupts
816  * that *WE* can't.  Preemption is strictly limited to interrupt threads
817  * and interrupt-like threads, outside of a critical section, and the
818  * preempted source thread will be resumed the instant the target blocks
819  * whether or not the source is scheduled (i.e. preemption is supposed to
820  * be as transparent as possible).
821  *
822  * The target thread inherits our MP count (added to its own) for the
823  * duration of the preemption in order to preserve the atomicy of the
824  * MP lock during the preemption.  Therefore, any preempting targets must be
825  * careful in regards to MP assertions.  Note that the MP count may be
826  * out of sync with the physical mp_lock, but we do not have to preserve
827  * the original ownership of the lock if it was out of synch (that is, we
828  * can leave it synchronized on return).
829  */
830 void
831 lwkt_preempt(thread_t ntd, int critpri)
832 {
833     struct globaldata *gd = mycpu;
834     thread_t td;
835 #ifdef SMP
836     int mpheld;
837     int savecnt;
838 #endif
839 
840     /*
841      * The caller has put us in a critical section.  We can only preempt
842      * if the caller of the caller was not in a critical section (basically
843      * a local interrupt), as determined by the 'critpri' parameter.  We
844      * also can't preempt if the caller is holding any spinlocks (even if
845      * he isn't in a critical section).  This also handles the tokens test.
846      *
847      * YYY The target thread must be in a critical section (else it must
848      * inherit our critical section?  I dunno yet).
849      *
850      * Set need_lwkt_resched() unconditionally for now YYY.
851      */
852     KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri));
853 
854     td = gd->gd_curthread;
855     if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) {
856 	++preempt_miss;
857 	return;
858     }
859     if ((td->td_pri & ~TDPRI_MASK) > critpri) {
860 	++preempt_miss;
861 	need_lwkt_resched();
862 	return;
863     }
864 #ifdef SMP
865     if (ntd->td_gd != gd) {
866 	++preempt_miss;
867 	need_lwkt_resched();
868 	return;
869     }
870 #endif
871     /*
872      * Take the easy way out and do not preempt if we are holding
873      * any spinlocks.  We could test whether the thread(s) being
874      * preempted interlock against the target thread's tokens and whether
875      * we can get all the target thread's tokens, but this situation
876      * should not occur very often so its easier to simply not preempt.
877      * Also, plain spinlocks are impossible to figure out at this point so
878      * just don't preempt.
879      *
880      * Do not try to preempt if the target thread is holding any tokens.
881      * We could try to acquire the tokens but this case is so rare there
882      * is no need to support it.
883      */
884     if (gd->gd_spinlock_rd || gd->gd_spinlocks_wr) {
885 	++preempt_miss;
886 	need_lwkt_resched();
887 	return;
888     }
889     if (ntd->td_toks) {
890 	++preempt_miss;
891 	need_lwkt_resched();
892 	return;
893     }
894     if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
895 	++preempt_weird;
896 	need_lwkt_resched();
897 	return;
898     }
899     if (ntd->td_preempted) {
900 	++preempt_hit;
901 	need_lwkt_resched();
902 	return;
903     }
904 #ifdef SMP
905     /*
906      * note: an interrupt might have occured just as we were transitioning
907      * to or from the MP lock.  In this case td_mpcount will be pre-disposed
908      * (non-zero) but not actually synchronized with the actual state of the
909      * lock.  We can use it to imply an MP lock requirement for the
910      * preemption but we cannot use it to test whether we hold the MP lock
911      * or not.
912      */
913     savecnt = td->td_mpcount;
914     mpheld = MP_LOCK_HELD();
915     ntd->td_mpcount += td->td_mpcount;
916     if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) {
917 	ntd->td_mpcount -= td->td_mpcount;
918 	++preempt_miss;
919 	need_lwkt_resched();
920 	return;
921     }
922 #endif
923 
924     /*
925      * Since we are able to preempt the current thread, there is no need to
926      * call need_lwkt_resched().
927      */
928     ++preempt_hit;
929     ntd->td_preempted = td;
930     td->td_flags |= TDF_PREEMPT_LOCK;
931     KTR_LOG(ctxsw_pre, td, ntd);
932     td->td_switch(ntd);
933 
934     KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
935 #ifdef SMP
936     KKASSERT(savecnt == td->td_mpcount);
937     mpheld = MP_LOCK_HELD();
938     if (mpheld && td->td_mpcount == 0)
939 	cpu_rel_mplock();
940     else if (mpheld == 0 && td->td_mpcount)
941 	panic("lwkt_preempt(): MP lock was not held through");
942 #endif
943     ntd->td_preempted = NULL;
944     td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
945 }
946 
947 /*
948  * Conditionally call splz() if gd_reqflags indicates work is pending.
949  *
950  * td_nest_count prevents deep nesting via splz() or doreti() which
951  * might otherwise blow out the kernel stack.  Note that except for
952  * this special case, we MUST call splz() here to handle any
953  * pending ints, particularly after we switch, or we might accidently
954  * halt the cpu with interrupts pending.
955  *
956  * (self contained on a per cpu basis)
957  */
958 void
959 splz_check(void)
960 {
961     globaldata_t gd = mycpu;
962     thread_t td = gd->gd_curthread;
963 
964     if (gd->gd_reqflags && td->td_nest_count < 2)
965 	splz();
966 }
967 
968 /*
969  * This implements a normal yield which will yield to equal priority
970  * threads as well as higher priority threads.  Note that gd_reqflags
971  * tests will be handled by the crit_exit() call in lwkt_switch().
972  *
973  * (self contained on a per cpu basis)
974  */
975 void
976 lwkt_yield(void)
977 {
978     lwkt_schedule_self(curthread);
979     lwkt_switch();
980 }
981 
982 /*
983  * This function is used along with the lwkt_passive_recover() inline
984  * by the trap code to negotiate a passive release of the current
985  * process/lwp designation with the user scheduler.
986  */
987 void
988 lwkt_passive_release(struct thread *td)
989 {
990     struct lwp *lp = td->td_lwp;
991 
992     td->td_release = NULL;
993     lwkt_setpri_self(TDPRI_KERN_USER);
994     lp->lwp_proc->p_usched->release_curproc(lp);
995 }
996 
997 /*
998  * Make a kernel thread act as if it were in user mode with regards
999  * to scheduling, to avoid becoming cpu-bound in the kernel.  Kernel
1000  * loops which may be potentially cpu-bound can call lwkt_user_yield().
1001  *
1002  * The lwkt_user_yield() function is designed to have very low overhead
1003  * if no yield is determined to be needed.
1004  */
1005 void
1006 lwkt_user_yield(void)
1007 {
1008     thread_t td = curthread;
1009     struct lwp *lp = td->td_lwp;
1010 
1011 #ifdef SMP
1012     /*
1013      * XXX SEVERE TEMPORARY HACK.  A cpu-bound operation running in the
1014      * kernel can prevent other cpus from servicing interrupt threads
1015      * which still require the MP lock (which is a lot of them).  This
1016      * has a chaining effect since if the interrupt is blocked, so is
1017      * the event, so normal scheduling will not pick up on the problem.
1018      */
1019     if (mplock_countx && td->td_mpcount) {
1020 	int savecnt = td->td_mpcount;
1021 
1022 	td->td_mpcount = 1;
1023 	mplock_countx = 0;
1024 	rel_mplock();
1025 	DELAY(bgl_yield);
1026 	get_mplock();
1027 	td->td_mpcount = savecnt;
1028     }
1029 #endif
1030 
1031     /*
1032      * Another kernel thread wants the cpu
1033      */
1034     if (lwkt_resched_wanted())
1035 	lwkt_switch();
1036 
1037     /*
1038      * If the user scheduler has asynchronously determined that the current
1039      * process (when running in user mode) needs to lose the cpu then make
1040      * sure we are released.
1041      */
1042     if (user_resched_wanted()) {
1043 	if (td->td_release)
1044 	    td->td_release(td);
1045     }
1046 
1047     /*
1048      * If we are released reduce our priority
1049      */
1050     if (td->td_release == NULL) {
1051 	if (lwkt_check_resched(td) > 0)
1052 		lwkt_switch();
1053 	if (lp) {
1054 		lp->lwp_proc->p_usched->acquire_curproc(lp);
1055 		td->td_release = lwkt_passive_release;
1056 		lwkt_setpri_self(TDPRI_USER_NORM);
1057 	}
1058     }
1059 }
1060 
1061 /*
1062  * Return 0 if no runnable threads are pending at the same or higher
1063  * priority as the passed thread.
1064  *
1065  * Return 1 if runnable threads are pending at the same priority.
1066  *
1067  * Return 2 if runnable threads are pending at a higher priority.
1068  */
1069 int
1070 lwkt_check_resched(thread_t td)
1071 {
1072 	int pri = td->td_pri & TDPRI_MASK;
1073 
1074 	if (td->td_gd->gd_runqmask > (2 << pri) - 1)
1075 		return(2);
1076 	if (TAILQ_NEXT(td, td_threadq))
1077 		return(1);
1078 	return(0);
1079 }
1080 
1081 /*
1082  * Generic schedule.  Possibly schedule threads belonging to other cpus and
1083  * deal with threads that might be blocked on a wait queue.
1084  *
1085  * We have a little helper inline function which does additional work after
1086  * the thread has been enqueued, including dealing with preemption and
1087  * setting need_lwkt_resched() (which prevents the kernel from returning
1088  * to userland until it has processed higher priority threads).
1089  *
1090  * It is possible for this routine to be called after a failed _enqueue
1091  * (due to the target thread migrating, sleeping, or otherwise blocked).
1092  * We have to check that the thread is actually on the run queue!
1093  *
1094  * reschedok is an optimized constant propagated from lwkt_schedule() or
1095  * lwkt_schedule_noresched().  By default it is non-zero, causing a
1096  * reschedule to be requested if the target thread has a higher priority.
1097  * The port messaging code will set MSG_NORESCHED and cause reschedok to
1098  * be 0, prevented undesired reschedules.
1099  */
1100 static __inline
1101 void
1102 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri, int reschedok)
1103 {
1104     thread_t otd;
1105 
1106     if (ntd->td_flags & TDF_RUNQ) {
1107 	if (ntd->td_preemptable && reschedok) {
1108 	    ntd->td_preemptable(ntd, cpri);	/* YYY +token */
1109 	} else if (reschedok) {
1110 	    otd = curthread;
1111 	    if ((ntd->td_pri & TDPRI_MASK) > (otd->td_pri & TDPRI_MASK))
1112 		need_lwkt_resched();
1113 	}
1114     }
1115 }
1116 
1117 static __inline
1118 void
1119 _lwkt_schedule(thread_t td, int reschedok)
1120 {
1121     globaldata_t mygd = mycpu;
1122 
1123     KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
1124     crit_enter_gd(mygd);
1125     KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
1126     if (td == mygd->gd_curthread) {
1127 	_lwkt_enqueue(td);
1128     } else {
1129 	/*
1130 	 * If we own the thread, there is no race (since we are in a
1131 	 * critical section).  If we do not own the thread there might
1132 	 * be a race but the target cpu will deal with it.
1133 	 */
1134 #ifdef SMP
1135 	if (td->td_gd == mygd) {
1136 	    _lwkt_enqueue(td);
1137 	    _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok);
1138 	} else {
1139 	    lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
1140 	}
1141 #else
1142 	_lwkt_enqueue(td);
1143 	_lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok);
1144 #endif
1145     }
1146     crit_exit_gd(mygd);
1147 }
1148 
1149 void
1150 lwkt_schedule(thread_t td)
1151 {
1152     _lwkt_schedule(td, 1);
1153 }
1154 
1155 void
1156 lwkt_schedule_noresched(thread_t td)
1157 {
1158     _lwkt_schedule(td, 0);
1159 }
1160 
1161 #ifdef SMP
1162 
1163 /*
1164  * When scheduled remotely if frame != NULL the IPIQ is being
1165  * run via doreti or an interrupt then preemption can be allowed.
1166  *
1167  * To allow preemption we have to drop the critical section so only
1168  * one is present in _lwkt_schedule_post.
1169  */
1170 static void
1171 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
1172 {
1173     thread_t td = curthread;
1174     thread_t ntd = arg;
1175 
1176     if (frame && ntd->td_preemptable) {
1177 	crit_exit_noyield(td);
1178 	_lwkt_schedule(ntd, 1);
1179 	crit_enter_quick(td);
1180     } else {
1181 	_lwkt_schedule(ntd, 1);
1182     }
1183 }
1184 
1185 /*
1186  * Thread migration using a 'Pull' method.  The thread may or may not be
1187  * the current thread.  It MUST be descheduled and in a stable state.
1188  * lwkt_giveaway() must be called on the cpu owning the thread.
1189  *
1190  * At any point after lwkt_giveaway() is called, the target cpu may
1191  * 'pull' the thread by calling lwkt_acquire().
1192  *
1193  * We have to make sure the thread is not sitting on a per-cpu tsleep
1194  * queue or it will blow up when it moves to another cpu.
1195  *
1196  * MPSAFE - must be called under very specific conditions.
1197  */
1198 void
1199 lwkt_giveaway(thread_t td)
1200 {
1201     globaldata_t gd = mycpu;
1202 
1203     crit_enter_gd(gd);
1204     if (td->td_flags & TDF_TSLEEPQ)
1205 	tsleep_remove(td);
1206     KKASSERT(td->td_gd == gd);
1207     TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
1208     td->td_flags |= TDF_MIGRATING;
1209     crit_exit_gd(gd);
1210 }
1211 
1212 void
1213 lwkt_acquire(thread_t td)
1214 {
1215     globaldata_t gd;
1216     globaldata_t mygd;
1217 
1218     KKASSERT(td->td_flags & TDF_MIGRATING);
1219     gd = td->td_gd;
1220     mygd = mycpu;
1221     if (gd != mycpu) {
1222 	cpu_lfence();
1223 	KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1224 	crit_enter_gd(mygd);
1225 	while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1226 #ifdef SMP
1227 	    lwkt_process_ipiq();
1228 #endif
1229 	    cpu_lfence();
1230 	}
1231 	td->td_gd = mygd;
1232 	TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1233 	td->td_flags &= ~TDF_MIGRATING;
1234 	crit_exit_gd(mygd);
1235     } else {
1236 	crit_enter_gd(mygd);
1237 	TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1238 	td->td_flags &= ~TDF_MIGRATING;
1239 	crit_exit_gd(mygd);
1240     }
1241 }
1242 
1243 #endif
1244 
1245 /*
1246  * Generic deschedule.  Descheduling threads other then your own should be
1247  * done only in carefully controlled circumstances.  Descheduling is
1248  * asynchronous.
1249  *
1250  * This function may block if the cpu has run out of messages.
1251  */
1252 void
1253 lwkt_deschedule(thread_t td)
1254 {
1255     crit_enter();
1256 #ifdef SMP
1257     if (td == curthread) {
1258 	_lwkt_dequeue(td);
1259     } else {
1260 	if (td->td_gd == mycpu) {
1261 	    _lwkt_dequeue(td);
1262 	} else {
1263 	    lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
1264 	}
1265     }
1266 #else
1267     _lwkt_dequeue(td);
1268 #endif
1269     crit_exit();
1270 }
1271 
1272 /*
1273  * Set the target thread's priority.  This routine does not automatically
1274  * switch to a higher priority thread, LWKT threads are not designed for
1275  * continuous priority changes.  Yield if you want to switch.
1276  *
1277  * We have to retain the critical section count which uses the high bits
1278  * of the td_pri field.  The specified priority may also indicate zero or
1279  * more critical sections by adding TDPRI_CRIT*N.
1280  *
1281  * Note that we requeue the thread whether it winds up on a different runq
1282  * or not.  uio_yield() depends on this and the routine is not normally
1283  * called with the same priority otherwise.
1284  */
1285 void
1286 lwkt_setpri(thread_t td, int pri)
1287 {
1288     KKASSERT(pri >= 0);
1289     KKASSERT(td->td_gd == mycpu);
1290     crit_enter();
1291     if (td->td_flags & TDF_RUNQ) {
1292 	_lwkt_dequeue(td);
1293 	td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
1294 	_lwkt_enqueue(td);
1295     } else {
1296 	td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
1297     }
1298     crit_exit();
1299 }
1300 
1301 /*
1302  * Set the initial priority for a thread prior to it being scheduled for
1303  * the first time.  The thread MUST NOT be scheduled before or during
1304  * this call.  The thread may be assigned to a cpu other then the current
1305  * cpu.
1306  *
1307  * Typically used after a thread has been created with TDF_STOPPREQ,
1308  * and before the thread is initially scheduled.
1309  */
1310 void
1311 lwkt_setpri_initial(thread_t td, int pri)
1312 {
1313     KKASSERT(pri >= 0);
1314     KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1315     td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
1316 }
1317 
1318 void
1319 lwkt_setpri_self(int pri)
1320 {
1321     thread_t td = curthread;
1322 
1323     KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
1324     crit_enter();
1325     if (td->td_flags & TDF_RUNQ) {
1326 	_lwkt_dequeue(td);
1327 	td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
1328 	_lwkt_enqueue(td);
1329     } else {
1330 	td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
1331     }
1332     crit_exit();
1333 }
1334 
1335 /*
1336  * Migrate the current thread to the specified cpu.
1337  *
1338  * This is accomplished by descheduling ourselves from the current cpu,
1339  * moving our thread to the tdallq of the target cpu, IPI messaging the
1340  * target cpu, and switching out.  TDF_MIGRATING prevents scheduling
1341  * races while the thread is being migrated.
1342  *
1343  * We must be sure to remove ourselves from the current cpu's tsleepq
1344  * before potentially moving to another queue.  The thread can be on
1345  * a tsleepq due to a left-over tsleep_interlock().
1346  */
1347 #ifdef SMP
1348 static void lwkt_setcpu_remote(void *arg);
1349 #endif
1350 
1351 void
1352 lwkt_setcpu_self(globaldata_t rgd)
1353 {
1354 #ifdef SMP
1355     thread_t td = curthread;
1356 
1357     if (td->td_gd != rgd) {
1358 	crit_enter_quick(td);
1359 	if (td->td_flags & TDF_TSLEEPQ)
1360 	    tsleep_remove(td);
1361 	td->td_flags |= TDF_MIGRATING;
1362 	lwkt_deschedule_self(td);
1363 	TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1364 	lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td);
1365 	lwkt_switch();
1366 	/* we are now on the target cpu */
1367 	TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
1368 	crit_exit_quick(td);
1369     }
1370 #endif
1371 }
1372 
1373 void
1374 lwkt_migratecpu(int cpuid)
1375 {
1376 #ifdef SMP
1377 	globaldata_t rgd;
1378 
1379 	rgd = globaldata_find(cpuid);
1380 	lwkt_setcpu_self(rgd);
1381 #endif
1382 }
1383 
1384 /*
1385  * Remote IPI for cpu migration (called while in a critical section so we
1386  * do not have to enter another one).  The thread has already been moved to
1387  * our cpu's allq, but we must wait for the thread to be completely switched
1388  * out on the originating cpu before we schedule it on ours or the stack
1389  * state may be corrupt.  We clear TDF_MIGRATING after flushing the GD
1390  * change to main memory.
1391  *
1392  * XXX The use of TDF_MIGRATING might not be sufficient to avoid races
1393  * against wakeups.  It is best if this interface is used only when there
1394  * are no pending events that might try to schedule the thread.
1395  */
1396 #ifdef SMP
1397 static void
1398 lwkt_setcpu_remote(void *arg)
1399 {
1400     thread_t td = arg;
1401     globaldata_t gd = mycpu;
1402 
1403     while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1404 #ifdef SMP
1405 	lwkt_process_ipiq();
1406 #endif
1407 	cpu_lfence();
1408     }
1409     td->td_gd = gd;
1410     cpu_sfence();
1411     td->td_flags &= ~TDF_MIGRATING;
1412     KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
1413     _lwkt_enqueue(td);
1414 }
1415 #endif
1416 
1417 struct lwp *
1418 lwkt_preempted_proc(void)
1419 {
1420     thread_t td = curthread;
1421     while (td->td_preempted)
1422 	td = td->td_preempted;
1423     return(td->td_lwp);
1424 }
1425 
1426 /*
1427  * Create a kernel process/thread/whatever.  It shares it's address space
1428  * with proc0 - ie: kernel only.
1429  *
1430  * NOTE!  By default new threads are created with the MP lock held.  A
1431  * thread which does not require the MP lock should release it by calling
1432  * rel_mplock() at the start of the new thread.
1433  */
1434 int
1435 lwkt_create(void (*func)(void *), void *arg,
1436     struct thread **tdp, thread_t template, int tdflags, int cpu,
1437     const char *fmt, ...)
1438 {
1439     thread_t td;
1440     __va_list ap;
1441 
1442     td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
1443 			   tdflags);
1444     if (tdp)
1445 	*tdp = td;
1446     cpu_set_thread_handler(td, lwkt_exit, func, arg);
1447 
1448     /*
1449      * Set up arg0 for 'ps' etc
1450      */
1451     __va_start(ap, fmt);
1452     kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1453     __va_end(ap);
1454 
1455     /*
1456      * Schedule the thread to run
1457      */
1458     if ((td->td_flags & TDF_STOPREQ) == 0)
1459 	lwkt_schedule(td);
1460     else
1461 	td->td_flags &= ~TDF_STOPREQ;
1462     return 0;
1463 }
1464 
1465 /*
1466  * Destroy an LWKT thread.   Warning!  This function is not called when
1467  * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1468  * uses a different reaping mechanism.
1469  */
1470 void
1471 lwkt_exit(void)
1472 {
1473     thread_t td = curthread;
1474     thread_t std;
1475     globaldata_t gd;
1476 
1477     if (td->td_flags & TDF_VERBOSE)
1478 	kprintf("kthread %p %s has exited\n", td, td->td_comm);
1479     caps_exit(td);
1480 
1481     /*
1482      * Get us into a critical section to interlock gd_freetd and loop
1483      * until we can get it freed.
1484      *
1485      * We have to cache the current td in gd_freetd because objcache_put()ing
1486      * it would rip it out from under us while our thread is still active.
1487      */
1488     gd = mycpu;
1489     crit_enter_quick(td);
1490     while ((std = gd->gd_freetd) != NULL) {
1491 	gd->gd_freetd = NULL;
1492 	objcache_put(thread_cache, std);
1493     }
1494 
1495     /*
1496      * Remove thread resources from kernel lists and deschedule us for
1497      * the last time.
1498      */
1499     if (td->td_flags & TDF_TSLEEPQ)
1500 	tsleep_remove(td);
1501     biosched_done(td);
1502     lwkt_deschedule_self(td);
1503     lwkt_remove_tdallq(td);
1504     if (td->td_flags & TDF_ALLOCATED_THREAD)
1505 	gd->gd_freetd = td;
1506     cpu_thread_exit();
1507 }
1508 
1509 void
1510 lwkt_remove_tdallq(thread_t td)
1511 {
1512     KKASSERT(td->td_gd == mycpu);
1513     TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1514 }
1515 
1516 void
1517 crit_panic(void)
1518 {
1519     thread_t td = curthread;
1520     int lpri = td->td_pri;
1521 
1522     td->td_pri = 0;
1523     panic("td_pri is/would-go negative! %p %d", td, lpri);
1524 }
1525 
1526 #ifdef SMP
1527 
1528 /*
1529  * Called from debugger/panic on cpus which have been stopped.  We must still
1530  * process the IPIQ while stopped, even if we were stopped while in a critical
1531  * section (XXX).
1532  *
1533  * If we are dumping also try to process any pending interrupts.  This may
1534  * or may not work depending on the state of the cpu at the point it was
1535  * stopped.
1536  */
1537 void
1538 lwkt_smp_stopped(void)
1539 {
1540     globaldata_t gd = mycpu;
1541 
1542     crit_enter_gd(gd);
1543     if (dumping) {
1544 	lwkt_process_ipiq();
1545 	splz();
1546     } else {
1547 	lwkt_process_ipiq();
1548     }
1549     crit_exit_gd(gd);
1550 }
1551 
1552 /*
1553  * get_mplock() calls this routine if it is unable to obtain the MP lock.
1554  * get_mplock() has already incremented td_mpcount.  We must block and
1555  * not return until giant is held.
1556  *
1557  * All we have to do is lwkt_switch() away.  The LWKT scheduler will not
1558  * reschedule the thread until it can obtain the giant lock for it.
1559  */
1560 void
1561 lwkt_mp_lock_contested(void)
1562 {
1563     ++mplock_countx;
1564     loggiant(beg);
1565     lwkt_switch();
1566     loggiant(end);
1567 }
1568 
1569 /*
1570  * The rel_mplock() code will call this function after releasing the
1571  * last reference on the MP lock if mp_lock_contention_mask is non-zero.
1572  *
1573  * We then chain an IPI to a single other cpu potentially needing the
1574  * lock.  This is a bit heuristical and we can wind up with IPIs flying
1575  * all over the place.
1576  */
1577 static void lwkt_mp_lock_uncontested_remote(void *arg __unused);
1578 
1579 void
1580 lwkt_mp_lock_uncontested(void)
1581 {
1582     globaldata_t gd;
1583     globaldata_t dgd;
1584     cpumask_t mask;
1585     cpumask_t tmpmask;
1586     int cpuid;
1587 
1588     if (chain_mplock) {
1589 	gd = mycpu;
1590 	atomic_clear_int(&mp_lock_contention_mask, gd->gd_cpumask);
1591 	mask = mp_lock_contention_mask;
1592 	tmpmask = ~((1 << gd->gd_cpuid) - 1);
1593 
1594 	if (mask) {
1595 	    if (mask & tmpmask)
1596 		    cpuid = bsfl(mask & tmpmask);
1597 	    else
1598 		    cpuid = bsfl(mask);
1599 	    atomic_clear_int(&mp_lock_contention_mask, 1 << cpuid);
1600 	    dgd = globaldata_find(cpuid);
1601 	    lwkt_send_ipiq(dgd, lwkt_mp_lock_uncontested_remote, NULL);
1602 	}
1603     }
1604 }
1605 
1606 /*
1607  * The idea is for this IPI to interrupt a potentially lower priority
1608  * thread, such as a user thread, to allow the scheduler to reschedule
1609  * a higher priority kernel thread that needs the MP lock.
1610  *
1611  * For now we set the LWKT reschedule flag which generates an AST in
1612  * doreti, though theoretically it is also possible to possibly preempt
1613  * here if the underlying thread was operating in user mode.  Nah.
1614  */
1615 static void
1616 lwkt_mp_lock_uncontested_remote(void *arg __unused)
1617 {
1618 	need_lwkt_resched();
1619 }
1620 
1621 #endif
1622