xref: /dflybsd-src/sys/sys/thread2.h (revision b5516a553a649d22c5fc69edf6d01122399d2529)
1f1d1c3faSMatthew Dillon /*
2f1d1c3faSMatthew Dillon  * SYS/THREAD2.H
3f1d1c3faSMatthew Dillon  *
4f1d1c3faSMatthew Dillon  * Implements inline procedure support for the LWKT subsystem.
5f1d1c3faSMatthew Dillon  *
6f1d1c3faSMatthew Dillon  * Generally speaking these routines only operate on threads associated
7f1d1c3faSMatthew Dillon  * with the current cpu.  For example, a higher priority thread pending
8f1d1c3faSMatthew Dillon  * on a different cpu will not be immediately scheduled by a yield() on
9f1d1c3faSMatthew Dillon  * this cpu.
10f1d1c3faSMatthew Dillon  */
11f1d1c3faSMatthew Dillon 
12f1d1c3faSMatthew Dillon #ifndef _SYS_THREAD2_H_
13f1d1c3faSMatthew Dillon #define _SYS_THREAD2_H_
14f1d1c3faSMatthew Dillon 
1503d6a592SMatthew Dillon #ifndef _KERNEL
1603d6a592SMatthew Dillon #error "This file should not be included by userland programs."
1797b3c8acSSascha Wildner #endif
1803d6a592SMatthew Dillon 
19f1d1c3faSMatthew Dillon /*
2005220613SMatthew Dillon  * Userland will have its own globaldata which it includes prior to this.
2105220613SMatthew Dillon  */
2203d6a592SMatthew Dillon #ifndef _SYS_SYSTM_H_
2303d6a592SMatthew Dillon #include <sys/systm.h>
2403d6a592SMatthew Dillon #endif
2505220613SMatthew Dillon #ifndef _SYS_GLOBALDATA_H_
2605220613SMatthew Dillon #include <sys/globaldata.h>
2705220613SMatthew Dillon #endif
28da82a65aSzrj #ifndef _SYS_CPUMASK_H_
29da82a65aSzrj #include <sys/cpumask.h>
30da82a65aSzrj #endif
31853ae338SMatthew Dillon #include <machine/cpufunc.h>
3205220613SMatthew Dillon 
3305220613SMatthew Dillon /*
34a4d95680SMatthew Dillon  * Don't let GCC reorder critical section count adjustments, because it
35a4d95680SMatthew Dillon  * will BLOW US UP if it does.
36a4d95680SMatthew Dillon  */
37a4d95680SMatthew Dillon static __inline void
crit_enter_raw(thread_t td)38a4d95680SMatthew Dillon crit_enter_raw(thread_t td)
39a4d95680SMatthew Dillon {
40a4d95680SMatthew Dillon 	cpu_ccfence();
41a4d95680SMatthew Dillon 	++td->td_critcount;
42a4d95680SMatthew Dillon 	cpu_ccfence();
43a4d95680SMatthew Dillon }
44a4d95680SMatthew Dillon 
45a4d95680SMatthew Dillon static __inline void
crit_exit_raw(thread_t td)46a4d95680SMatthew Dillon crit_exit_raw(thread_t td)
47a4d95680SMatthew Dillon {
48a4d95680SMatthew Dillon 	cpu_ccfence();
49a4d95680SMatthew Dillon 	--td->td_critcount;
50a4d95680SMatthew Dillon 	cpu_ccfence();
51a4d95680SMatthew Dillon }
52a4d95680SMatthew Dillon 
53a4d95680SMatthew Dillon /*
5401672f8cSMatthew Dillon  * Is a token held either by the specified thread or held shared?
5501672f8cSMatthew Dillon  *
5601672f8cSMatthew Dillon  * We can't inexpensively validate the thread for a shared token
5701672f8cSMatthew Dillon  * without iterating td->td_toks, so this isn't a perfect test.
5801672f8cSMatthew Dillon  */
5901672f8cSMatthew Dillon static __inline int
_lwkt_token_held_any(lwkt_token_t tok,thread_t td)6001672f8cSMatthew Dillon _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
6101672f8cSMatthew Dillon {
6201672f8cSMatthew Dillon 	long count = tok->t_count;
6301672f8cSMatthew Dillon 
6401672f8cSMatthew Dillon 	cpu_ccfence();
6501672f8cSMatthew Dillon 	if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
6601672f8cSMatthew Dillon 		return TRUE;
6701672f8cSMatthew Dillon 	if ((count & TOK_EXCLUSIVE) == 0 &&
6801672f8cSMatthew Dillon 	    (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
6901672f8cSMatthew Dillon 		return TRUE;
7001672f8cSMatthew Dillon 	}
7101672f8cSMatthew Dillon 	return FALSE;
7201672f8cSMatthew Dillon }
7301672f8cSMatthew Dillon 
7401672f8cSMatthew Dillon /*
75b5d16701SMatthew Dillon  * Is a token held by the specified thread?
76b5d16701SMatthew Dillon  */
77b5d16701SMatthew Dillon static __inline int
_lwkt_token_held_excl(lwkt_token_t tok,thread_t td)7801672f8cSMatthew Dillon _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
79b5d16701SMatthew Dillon {
8001672f8cSMatthew Dillon 	return ((tok->t_ref >= &td->td_toks_base &&
8154341a3bSMatthew Dillon 		 tok->t_ref < td->td_toks_stop));
82b5d16701SMatthew Dillon }
83b5d16701SMatthew Dillon 
84b5d16701SMatthew Dillon /*
8502d8a449SMatthew Dillon  * Critical section debugging
8602d8a449SMatthew Dillon  */
8702d8a449SMatthew Dillon #ifdef DEBUG_CRIT_SECTIONS
8802d8a449SMatthew Dillon #define __DEBUG_CRIT_ARG__		const char *id
8902d8a449SMatthew Dillon #define __DEBUG_CRIT_ADD_ARG__		, const char *id
9002d8a449SMatthew Dillon #define __DEBUG_CRIT_PASS_ARG__		, id
9102d8a449SMatthew Dillon #define __DEBUG_CRIT_ENTER(td)		_debug_crit_enter((td), id)
9202d8a449SMatthew Dillon #define __DEBUG_CRIT_EXIT(td)		_debug_crit_exit((td), id)
937fd4e1a1SSascha Wildner #define crit_enter()			_crit_enter(mycpu, __func__)
944a28fe22SMatthew Dillon #define crit_enter_id(id)		_crit_enter(mycpu, id)
957fd4e1a1SSascha Wildner #define crit_enter_gd(curgd)		_crit_enter((curgd), __func__)
967fd4e1a1SSascha Wildner #define crit_enter_quick(curtd)		_crit_enter_quick((curtd), __func__)
977fd4e1a1SSascha Wildner #define crit_enter_hard()		_crit_enter_hard(mycpu, __func__)
987fd4e1a1SSascha Wildner #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd), __func__)
997fd4e1a1SSascha Wildner #define crit_exit()			_crit_exit(mycpu, __func__)
1004a28fe22SMatthew Dillon #define crit_exit_id(id)		_crit_exit(mycpu, id)
1017fd4e1a1SSascha Wildner #define crit_exit_gd(curgd)		_crit_exit((curgd), __func__)
1027fd4e1a1SSascha Wildner #define crit_exit_quick(curtd)		_crit_exit_quick((curtd), __func__)
1037fd4e1a1SSascha Wildner #define crit_exit_hard()		_crit_exit_hard(mycpu, __func__)
1047fd4e1a1SSascha Wildner #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd), __func__)
1057fd4e1a1SSascha Wildner #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd),__func__)
10602d8a449SMatthew Dillon #else
10702d8a449SMatthew Dillon #define __DEBUG_CRIT_ARG__		void
10802d8a449SMatthew Dillon #define __DEBUG_CRIT_ADD_ARG__
10902d8a449SMatthew Dillon #define __DEBUG_CRIT_PASS_ARG__
11002d8a449SMatthew Dillon #define __DEBUG_CRIT_ENTER(td)
11102d8a449SMatthew Dillon #define __DEBUG_CRIT_EXIT(td)
1124a28fe22SMatthew Dillon #define crit_enter()			_crit_enter(mycpu)
1134a28fe22SMatthew Dillon #define crit_enter_id(id)		_crit_enter(mycpu)
1144a28fe22SMatthew Dillon #define crit_enter_gd(curgd)		_crit_enter((curgd))
1154a28fe22SMatthew Dillon #define crit_enter_quick(curtd)		_crit_enter_quick((curtd))
1164a28fe22SMatthew Dillon #define crit_enter_hard()		_crit_enter_hard(mycpu)
1174a28fe22SMatthew Dillon #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd))
118e4db4f52SMatthew Dillon #define crit_exit()			crit_exit_wrapper()
1194a28fe22SMatthew Dillon #define crit_exit_id(id)		_crit_exit(mycpu)
1204a28fe22SMatthew Dillon #define crit_exit_gd(curgd)		_crit_exit((curgd))
1214a28fe22SMatthew Dillon #define crit_exit_quick(curtd)		_crit_exit_quick((curtd))
1224a28fe22SMatthew Dillon #define crit_exit_hard()		_crit_exit_hard(mycpu)
1234a28fe22SMatthew Dillon #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd))
1244a28fe22SMatthew Dillon #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd))
12502d8a449SMatthew Dillon #endif
12602d8a449SMatthew Dillon 
127*b5516a55SSascha Wildner void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
128b6468f56SMatthew Dillon 
12902d8a449SMatthew Dillon /*
13002d8a449SMatthew Dillon  * Track crit_enter()/crit_exit() pairs and warn on mismatches.
13102d8a449SMatthew Dillon  */
13202d8a449SMatthew Dillon #ifdef DEBUG_CRIT_SECTIONS
13302d8a449SMatthew Dillon 
13402d8a449SMatthew Dillon static __inline void
_debug_crit_enter(thread_t td,const char * id)13502d8a449SMatthew Dillon _debug_crit_enter(thread_t td, const char *id)
13602d8a449SMatthew Dillon {
13702d8a449SMatthew Dillon     int wi = td->td_crit_debug_index;
13802d8a449SMatthew Dillon 
13902d8a449SMatthew Dillon     td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
14002d8a449SMatthew Dillon     ++td->td_crit_debug_index;
14102d8a449SMatthew Dillon }
14202d8a449SMatthew Dillon 
14302d8a449SMatthew Dillon static __inline void
_debug_crit_exit(thread_t td,const char * id)14402d8a449SMatthew Dillon _debug_crit_exit(thread_t td, const char *id)
14502d8a449SMatthew Dillon {
14602d8a449SMatthew Dillon     const char *gid;
14702d8a449SMatthew Dillon     int wi;
14802d8a449SMatthew Dillon 
14902d8a449SMatthew Dillon     wi = td->td_crit_debug_index - 1;
15002d8a449SMatthew Dillon     if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
15102d8a449SMatthew Dillon 	if (td->td_in_crit_report == 0) {
15202d8a449SMatthew Dillon 	    td->td_in_crit_report = 1;
15326be20a0SSascha Wildner 	    kprintf("crit_exit(%s) expected id %s\n", id, gid);
15402d8a449SMatthew Dillon 	    td->td_in_crit_report = 0;
15502d8a449SMatthew Dillon 	}
15602d8a449SMatthew Dillon     }
15702d8a449SMatthew Dillon     --td->td_crit_debug_index;
15802d8a449SMatthew Dillon }
15902d8a449SMatthew Dillon 
16002d8a449SMatthew Dillon #endif
16102d8a449SMatthew Dillon 
16202d8a449SMatthew Dillon /*
1634a28fe22SMatthew Dillon  * Critical sections prevent preemption, but allowing explicit blocking
1644a28fe22SMatthew Dillon  * and thread switching.  Any interrupt occuring while in a critical
1654a28fe22SMatthew Dillon  * section is made pending and returns immediately.  Interrupts are not
1664a28fe22SMatthew Dillon  * physically disabled.
167f1d1c3faSMatthew Dillon  *
1684a28fe22SMatthew Dillon  * Hard critical sections prevent preemption and disallow any blocking
1694a28fe22SMatthew Dillon  * or thread switching, and in addition will assert on any blockable
1704a28fe22SMatthew Dillon  * operation (acquire token not already held, lockmgr, mutex ops, or
1714a28fe22SMatthew Dillon  * splz).  Spinlocks can still be used in hard sections.
1727966cb69SMatthew Dillon  *
1734a28fe22SMatthew Dillon  * All critical section routines only operate on the current thread.
1744a28fe22SMatthew Dillon  * Passed gd or td arguments are simply optimizations when mycpu or
1754a28fe22SMatthew Dillon  * curthread is already available to the caller.
176f1d1c3faSMatthew Dillon  */
17757c254dbSMatthew Dillon 
1784a28fe22SMatthew Dillon /*
1794a28fe22SMatthew Dillon  * crit_enter
1804a28fe22SMatthew Dillon  */
181f1d1c3faSMatthew Dillon static __inline void
_crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)1824a28fe22SMatthew Dillon _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
183f1d1c3faSMatthew Dillon {
184a4d95680SMatthew Dillon     crit_enter_raw(td);
18502d8a449SMatthew Dillon     __DEBUG_CRIT_ENTER(td);
186f1d1c3faSMatthew Dillon }
187f1d1c3faSMatthew Dillon 
188f1d1c3faSMatthew Dillon static __inline void
_crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)1894a28fe22SMatthew Dillon _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
190f1d1c3faSMatthew Dillon {
1914a28fe22SMatthew Dillon     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
1927966cb69SMatthew Dillon }
193f1d1c3faSMatthew Dillon 
1947966cb69SMatthew Dillon static __inline void
_crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)1954a28fe22SMatthew Dillon _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
19637af14feSMatthew Dillon {
1974a28fe22SMatthew Dillon     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
1984a28fe22SMatthew Dillon     ++gd->gd_intr_nesting_level;
199a4d95680SMatthew Dillon     cpu_ccfence();
20037af14feSMatthew Dillon }
20137af14feSMatthew Dillon 
202f1d1c3faSMatthew Dillon 
2034a28fe22SMatthew Dillon /*
2044a28fe22SMatthew Dillon  * crit_exit*()
2054a28fe22SMatthew Dillon  *
2064a28fe22SMatthew Dillon  * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
2074a28fe22SMatthew Dillon  *	 never true regardless of crit_count, should result in 100%
2084a28fe22SMatthew Dillon  *	 optimal code execution.  We don't check crit_count because
2094a28fe22SMatthew Dillon  *	 it just bloats the inline and does not improve performance.
210e4db4f52SMatthew Dillon  *
211e4db4f52SMatthew Dillon  * NOTE: This can produce a considerable amount of code despite the
212e4db4f52SMatthew Dillon  *	 relatively few lines of code so the non-debug case typically
213e4db4f52SMatthew Dillon  *	 just wraps it in a real function, crit_exit_wrapper().
2144a28fe22SMatthew Dillon  */
215f1d1c3faSMatthew Dillon static __inline void
_crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)2164a28fe22SMatthew Dillon _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
217f1d1c3faSMatthew Dillon {
21802d8a449SMatthew Dillon     __DEBUG_CRIT_EXIT(td);
219a4d95680SMatthew Dillon     crit_exit_raw(td);
2207966cb69SMatthew Dillon #ifdef INVARIANTS
2214a28fe22SMatthew Dillon     if (__predict_false(td->td_critcount < 0))
22226a0694bSMatthew Dillon 	crit_panic();
2237966cb69SMatthew Dillon #endif
2247966cb69SMatthew Dillon }
2257966cb69SMatthew Dillon 
2267966cb69SMatthew Dillon static __inline void
_crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)2274a28fe22SMatthew Dillon _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
2287966cb69SMatthew Dillon {
2294a28fe22SMatthew Dillon     _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
2304a28fe22SMatthew Dillon     if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
2314a28fe22SMatthew Dillon 	lwkt_maybe_splz(td);
232f1d1c3faSMatthew Dillon }
233f1d1c3faSMatthew Dillon 
23437af14feSMatthew Dillon static __inline void
_crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)2354a28fe22SMatthew Dillon _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
23637af14feSMatthew Dillon {
2374a28fe22SMatthew Dillon     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
2384a28fe22SMatthew Dillon }
2394a28fe22SMatthew Dillon 
2404a28fe22SMatthew Dillon static __inline void
_crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)2414a28fe22SMatthew Dillon _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
2424a28fe22SMatthew Dillon {
243a4d95680SMatthew Dillon     cpu_ccfence();
2444a28fe22SMatthew Dillon     --gd->gd_intr_nesting_level;
2454a28fe22SMatthew Dillon     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
24637af14feSMatthew Dillon }
24737af14feSMatthew Dillon 
248f1d1c3faSMatthew Dillon static __inline int
crit_test(thread_t td)249654a39f0SMatthew Dillon crit_test(thread_t td)
250654a39f0SMatthew Dillon {
251f9235b6dSMatthew Dillon     return(td->td_critcount);
252654a39f0SMatthew Dillon }
253654a39f0SMatthew Dillon 
25441a01a4dSMatthew Dillon /*
255b5d16701SMatthew Dillon  * Return whether any threads are runnable.
25696728c05SMatthew Dillon  */
2574b5f931bSMatthew Dillon static __inline int
lwkt_runnable(void)2584b5f931bSMatthew Dillon lwkt_runnable(void)
2594b5f931bSMatthew Dillon {
260f9235b6dSMatthew Dillon     return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
2614b5f931bSMatthew Dillon }
2624b5f931bSMatthew Dillon 
263234d4a62SMatthew Dillon static __inline int
lwkt_getpri(thread_t td)264234d4a62SMatthew Dillon lwkt_getpri(thread_t td)
265234d4a62SMatthew Dillon {
266f9235b6dSMatthew Dillon     return(td->td_pri);
267234d4a62SMatthew Dillon }
268234d4a62SMatthew Dillon 
269234d4a62SMatthew Dillon static __inline int
lwkt_getpri_self(void)270234d4a62SMatthew Dillon lwkt_getpri_self(void)
271234d4a62SMatthew Dillon {
272234d4a62SMatthew Dillon     return(lwkt_getpri(curthread));
273234d4a62SMatthew Dillon }
274234d4a62SMatthew Dillon 
2753824f392SMatthew Dillon /*
2763824f392SMatthew Dillon  * Reduce our priority in preparation for a return to userland.  If
2773824f392SMatthew Dillon  * our passive release function was still in place, our priority was
2783824f392SMatthew Dillon  * never raised and does not need to be reduced.
2793824f392SMatthew Dillon  *
2803824f392SMatthew Dillon  * See also lwkt_passive_release() and platform/blah/trap.c
2813824f392SMatthew Dillon  */
2823824f392SMatthew Dillon static __inline void
lwkt_passive_recover(thread_t td)2833824f392SMatthew Dillon lwkt_passive_recover(thread_t td)
2843824f392SMatthew Dillon {
285e3e6be1fSMatthew Dillon #ifndef NO_LWKT_SPLIT_USERPRI
2863824f392SMatthew Dillon     if (td->td_release == NULL)
2873824f392SMatthew Dillon 	lwkt_setpri_self(TDPRI_USER_NORM);
2883824f392SMatthew Dillon     td->td_release = NULL;
289d992c377SMatthew Dillon #endif
2903824f392SMatthew Dillon }
2913824f392SMatthew Dillon 
292d5b2d319SMatthew Dillon /*
293d5b2d319SMatthew Dillon  * cpusync support
294d5b2d319SMatthew Dillon  */
295d5b2d319SMatthew Dillon static __inline void
lwkt_cpusync_init(lwkt_cpusync_t cs,cpumask_t mask,cpusync_func_t func,void * data)296d5b2d319SMatthew Dillon lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
297d5b2d319SMatthew Dillon 		  cpusync_func_t func, void *data)
298d5b2d319SMatthew Dillon {
299d5b2d319SMatthew Dillon 	cs->cs_mask = mask;
300d5b2d319SMatthew Dillon 	/* cs->cs_mack = 0; handled by _interlock */
301d5b2d319SMatthew Dillon 	cs->cs_func = func;
302d5b2d319SMatthew Dillon 	cs->cs_data = data;
303d5b2d319SMatthew Dillon }
304d5b2d319SMatthew Dillon 
305b8a98473SMatthew Dillon /*
306b8a98473SMatthew Dillon  * IPIQ messaging wrappers.  IPIQ remote functions are passed three arguments:
307b8a98473SMatthew Dillon  * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
308b8a98473SMatthew Dillon  * the trap frame is not known).  However, we wish to provide opaque
309b8a98473SMatthew Dillon  * interfaces for simpler callbacks... the basic IPI messaging function as
310b8a98473SMatthew Dillon  * used by the kernel takes a single argument.
311b8a98473SMatthew Dillon  */
312b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq(globaldata_t target,ipifunc1_t func,void * arg)313b8a98473SMatthew Dillon lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
314b8a98473SMatthew Dillon {
315b8a98473SMatthew Dillon     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
316b8a98473SMatthew Dillon }
317b8a98473SMatthew Dillon 
318b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq2(globaldata_t target,ipifunc2_t func,void * arg1,int arg2)319fc17ad60SMatthew Dillon lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
320b8a98473SMatthew Dillon {
321b8a98473SMatthew Dillon     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
322b8a98473SMatthew Dillon }
323b8a98473SMatthew Dillon 
324b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq_mask(cpumask_t mask,ipifunc1_t func,void * arg)325da23a592SMatthew Dillon lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
326b8a98473SMatthew Dillon {
327b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
328b8a98473SMatthew Dillon }
329b8a98473SMatthew Dillon 
330b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq2_mask(cpumask_t mask,ipifunc2_t func,void * arg1,int arg2)331da23a592SMatthew Dillon lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
332b8a98473SMatthew Dillon {
333b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
334b8a98473SMatthew Dillon }
335b8a98473SMatthew Dillon 
336b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq_passive(globaldata_t target,ipifunc1_t func,void * arg)337b8a98473SMatthew Dillon lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
338b8a98473SMatthew Dillon {
339b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
340b8a98473SMatthew Dillon }
341b8a98473SMatthew Dillon 
342b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq2_passive(globaldata_t target,ipifunc2_t func,void * arg1,int arg2)343fc17ad60SMatthew Dillon lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
344b8a98473SMatthew Dillon 		       void *arg1, int arg2)
345b8a98473SMatthew Dillon {
346b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
347b8a98473SMatthew Dillon }
348b8a98473SMatthew Dillon 
349b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq_bycpu(int dcpu,ipifunc1_t func,void * arg)350b8a98473SMatthew Dillon lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
351b8a98473SMatthew Dillon {
352b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
353b8a98473SMatthew Dillon }
354b8a98473SMatthew Dillon 
355b8a98473SMatthew Dillon static __inline int
lwkt_send_ipiq2_bycpu(int dcpu,ipifunc2_t func,void * arg1,int arg2)356b8a98473SMatthew Dillon lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
357b8a98473SMatthew Dillon {
358b8a98473SMatthew Dillon     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
359b8a98473SMatthew Dillon }
360b8a98473SMatthew Dillon 
361e76d2ad3SSepherosa Ziehau static __inline int
lwkt_need_ipiq_process(globaldata_t gd)362e76d2ad3SSepherosa Ziehau lwkt_need_ipiq_process(globaldata_t gd)
363e76d2ad3SSepherosa Ziehau {
364e76d2ad3SSepherosa Ziehau     lwkt_ipiq_t ipiq;
365e76d2ad3SSepherosa Ziehau 
366e76d2ad3SSepherosa Ziehau     if (CPUMASK_TESTNZERO(gd->gd_ipimask))
367e76d2ad3SSepherosa Ziehau 	return 1;
368e76d2ad3SSepherosa Ziehau 
369e76d2ad3SSepherosa Ziehau     ipiq = &gd->gd_cpusyncq;
370e76d2ad3SSepherosa Ziehau     return (ipiq->ip_rindex != ipiq->ip_windex);
371e76d2ad3SSepherosa Ziehau }
372e76d2ad3SSepherosa Ziehau 
37303d6a592SMatthew Dillon #endif	/* _SYS_THREAD2_H_ */
374