xref: /netbsd-src/sys/sys/lwp.h (revision 3007f1403a55612b606327e9e6f0e6e356b5bfa2)
1 /*	$NetBSD: lwp.h,v 1.231 2023/11/02 10:31:55 martin Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020, 2023
5  *    The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Nathan J. Williams and Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef _SYS_LWP_H_
34 #define _SYS_LWP_H_
35 
36 #if defined(_KERNEL) || defined(_KMEMUSER)
37 
38 #include <sys/param.h>
39 
40 #include <sys/callout.h>
41 #include <sys/condvar.h>
42 #include <sys/kcpuset.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/resource.h>
46 #include <sys/sched.h>
47 #include <sys/signalvar.h>
48 #include <sys/specificdata.h>
49 #include <sys/time.h>
50 #include <sys/wchan.h>
51 
52 #if defined(_KERNEL)
53 struct lwp;
54 /* forward declare this for <machine/cpu.h> so it can get l_cpu. */
55 static __inline struct cpu_info *lwp_getcpu(struct lwp *);
56 #include <machine/cpu.h>		/* curcpu() and cpu_info */
57 #include <sys/atomic.h>
58 #ifdef _KERNEL_OPT
59 #include "opt_kcov.h"
60 #include "opt_kmsan.h"
61 #include "opt_maxlwp.h"
62 #endif
63 #endif
64 
65 #include <machine/proc.h>		/* Machine-dependent proc substruct. */
66 
67 /*
68  * Lightweight process.  Field markings and the corresponding locks:
69  *
70  * a:	proc_lock
71  * c:	condition variable interlock, passed to cv_wait()
72  * l:	*l_mutex
73  * p:	l_proc->p_lock
74  * s:	spc_mutex, which may or may not be referenced by l_mutex
75  * S:	l_selcluster->sc_lock
76  * (:	unlocked, stable
77  * !:	unlocked, may only be reliably accessed by the LWP itself
78  *
79  * Fields are clustered together by usage (to increase the likelihood
80  * of cache hits) and by size (to reduce dead space in the structure).
81  */
82 
83 #include <sys/pcu.h>
84 
85 struct lockdebug;
86 struct sysent;
87 
88 struct lwp {
89 	/* Must not be zeroed on free. */
90 	struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
91 	kmutex_t * volatile l_mutex;	/* l: ptr to mutex on sched state */
92 	struct turnstile *l_ts;		/* l: current turnstile */
93 	int		l_stat;		/* l: overall LWP status */
94 	int		l__reserved;	/*  : padding - reuse as needed */
95 
96 	/* Scheduling and overall state. */
97 #define	l_startzero l_runq
98 	TAILQ_ENTRY(lwp) l_runq;	/* s: run queue */
99 	union {
100 		void *	info;		/* s: scheduler-specific structure */
101 		u_int	timeslice;	/* l: time-quantum for SCHED_M2 */
102 	} l_sched;
103 	void		*l_addr;	/* l: PCB address; use lwp_getpcb() */
104 	struct mdlwp	l_md;		/* l: machine-dependent fields. */
105 	struct bintime 	l_rtime;	/* l: real time */
106 	struct bintime	l_stime;	/* l: start time (while ONPROC) */
107 	int		l_flag;		/* l: misc flag values */
108 	u_int		l_swtime;	/* l: time swapped in or out */
109 	u_int		l_rticks;	/* l: Saved start time of run */
110 	u_int		l_rticksum;	/* l: Sum of ticks spent running */
111 	u_int		l_slpticks;	/* l: Saved start time of sleep */
112 	u_int		l_slpticksum;	/* l: Sum of ticks spent sleeping */
113 	int		l_class;	/* l: scheduling class */
114 	pri_t		l_boostpri;	/* l: boosted priority after blocking */
115 	pri_t		l_priority;	/* l: scheduler priority */
116 	pri_t		l_inheritedprio;/* l: inherited priority */
117 	pri_t		l_protectprio;	/* l: for PTHREAD_PRIO_PROTECT */
118 	pri_t		l_auxprio;	/* l: max(inherit,protect) priority */
119 	int		l_protectdepth;	/* l: for PTHREAD_PRIO_PROTECT */
120 	u_int		l_cpticks;	/* (: Ticks of CPU time */
121 	psetid_t	l_psid;		/* l: assigned processor-set ID */
122 	fixpt_t		l_pctcpu;	/* p: %cpu during l_swtime */
123 	fixpt_t		l_estcpu;	/* l: cpu time for SCHED_4BSD */
124 	SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
125 	struct cpu_info *l_target_cpu;	/* l: target CPU to migrate */
126 	struct lwpctl	*l_lwpctl;	/* p: lwpctl block kernel address */
127 	struct lcpage	*l_lcpage;	/* p: lwpctl containing page */
128 	kcpuset_t	*l_affinity;	/* l: CPU set for affinity */
129 
130 	/* Synchronisation. */
131 	const struct syncobj *l_syncobj;/* l: sync object operations set */
132 	LIST_ENTRY(lwp) l_sleepchain;	/* l: sleep queue */
133 	wchan_t		l_wchan;	/* l: sleep address */
134 	const char	*l_wmesg;	/* l: reason for sleep */
135 	struct sleepq	*l_sleepq;	/* l: current sleep queue */
136 	callout_t	l_timeout_ch;	/* !: callout for tsleep */
137 	kcondvar_t	l_waitcv;	/* a: vfork() wait */
138 	u_int		l_slptime;	/* l: time since last blocked */
139 	bool		l_vforkwaiting;	/* a: vfork() waiting */
140 
141 	/* User-space synchronization. */
142 	uintptr_t	l_robust_head;	/* !: list of robust futexes */
143 	uint32_t	l___rsvd1;	/* reserved for future use */
144 
145 #if PCU_UNIT_COUNT > 0
146 	struct cpu_info	* volatile l_pcu_cpu[PCU_UNIT_COUNT];
147 	uint32_t	l_pcu_valid;
148 #endif
149 
150 	/* Process level and global state, misc. */
151 	lwpid_t		l_lid;		/* (: LWP identifier; local to proc */
152 	LIST_ENTRY(lwp)	l_list;		/* a: entry on list of all LWPs */
153 	void		*l_ctxlink;	/* p: uc_link {get,set}context */
154 	struct proc	*l_proc;	/* p: parent process */
155 	LIST_ENTRY(lwp)	l_sibling;	/* p: entry on proc's list of LWPs */
156 	char		*l_name;	/* (: name, optional */
157 	lwpid_t		l_waiter;	/* p: first LWP waiting on us */
158 	lwpid_t 	l_waitingfor;	/* p: specific LWP we are waiting on */
159 	int		l_prflag;	/* p: process level flags */
160 	u_int		l_refcnt;	/* p: reference count on this LWP */
161 
162 	/* State of select() or poll(). */
163 	int		l_selflag;	/* S: polling state flags */
164 	int		l_selret;	/* S: return value of select/poll */
165 	SLIST_HEAD(,selinfo) l_selwait;	/* S: descriptors waited on */
166 	uintptr_t	l_selrec;	/* !: argument for selrecord() */
167 	struct selcluster *l_selcluster;/* !: associated cluster data */
168 	void *		l_selbits;	/* (: select() bit-field */
169 	size_t		l_selni;	/* (: size of a single bit-field */
170 
171 	/* Signals. */
172 	int		l_sigrestore;	/* p: need to restore old sig mask */
173 	sigset_t	l_sigwaitset;	/* p: signals being waited for */
174 	kcondvar_t	l_sigcv;	/* p: for sigsuspend() */
175 	struct ksiginfo	*l_sigwaited;	/* p: delivered signals from set */
176 	sigpend_t	*l_sigpendset;	/* p: XXX issignal()/postsig() baton */
177 	LIST_ENTRY(lwp)	l_sigwaiter;	/* p: chain on list of waiting LWPs */
178 	stack_t		l_sigstk;	/* p: sp & on stack state variable */
179 	sigset_t	l_sigmask;	/* p: signal mask */
180 	sigpend_t	l_sigpend;	/* p: signals to this LWP */
181 	sigset_t	l_sigoldmask;	/* p: mask for sigpause */
182 
183 	/* Private data. */
184 	specificdata_reference
185 		l_specdataref;		/* !: subsystem lwp-specific data */
186 	struct timespec l_ktrcsw;	/* !: for ktrace CSW trace XXX */
187 	void		*l_private;	/* !: svr4-style lwp-private data */
188 	struct lwp	*l_switchto;	/* !: mi_switch: switch to this LWP */
189 	struct kauth_cred *l_cred;	/* !: cached credentials */
190 	struct filedesc	*l_fd;		/* !: cached copy of proc::p_fd */
191 	void		*l_emuldata;	/* !: kernel lwp-private data */
192 	struct fstrans_lwp_info *l_fstrans; /* (: fstrans private data */
193 	u_short		l_shlocks;	/* !: lockdebug: shared locks held */
194 	u_short		l_exlocks;	/* !: lockdebug: excl. locks held */
195 	u_short		l_psrefs;	/* !: count of psref held */
196 	u_short		l_blcnt;	/* !: count of kernel_lock held */
197 	volatile int	l_nopreempt;	/* !: don't preempt me! */
198 	volatile u_int	l_dopreempt;	/* s: kernel preemption pending */
199 	int		l_pflag;	/* !: LWP private flags */
200 	int		l_dupfd;	/* !: side return from cloning devs XXX */
201 	const struct sysent * volatile l_sysent;/* !: currently active syscall */
202 	struct rusage	l_ru;		/* !: accounting information */
203 	uint64_t	l_pfailtime;	/* !: for kernel preemption */
204 	uintptr_t	l_pfailaddr;	/* !: for kernel preemption */
205 	uintptr_t	l_pfaillock;	/* !: for kernel preemption */
206 	_TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
207 	volatile void	*l_ld_wanted;	/* !: lock currently wanted by LWP */
208 	uintptr_t	l_rwcallsite;	/* !: rwlock actual callsite */
209 	int		l_tcgen;	/* !: for timecounter removal */
210 
211 	/* These are only used by 'options SYSCALL_TIMES'. */
212 	uint32_t	l_syscall_time;	/* !: time epoch for current syscall */
213 	uint64_t	*l_syscall_counter; /* !: counter for current process */
214 
215 	struct kdtrace_thread *l_dtrace; /* (: DTrace-specific data. */
216 
217 #ifdef KMSAN
218 	void		*l_kmsan; /* !: KMSAN private data. */
219 #endif
220 #ifdef KCOV
221 	void		*l_kcov; /* !: KCOV private data. */
222 #endif
223 };
224 
225 /*
226  * UAREA_PCB_OFFSET: an offset of PCB structure in the uarea.  MD code may
227  * define it in <machine/proc.h>, to indicate a different uarea layout.
228  */
229 #ifndef UAREA_PCB_OFFSET
230 #define	UAREA_PCB_OFFSET	0
231 #endif
232 
233 LIST_HEAD(lwplist, lwp);		/* A list of LWPs. */
234 
235 #ifdef _KERNEL
236 extern struct lwplist	alllwp;		/* List of all LWPs. */
237 extern lwp_t		lwp0;		/* LWP for proc0. */
238 extern int		maxlwp __read_mostly;	/* max number of lwps */
239 #ifndef MAXLWP
240 #define	MAXLWP		4096		/* default max */
241 #endif
242 #ifndef MAXMAXLWP
243 #define MAXMAXLWP	65535		/* absolute max */
244 #endif
245 #endif
246 
247 #endif /* _KERNEL || _KMEMUSER */
248 
249 /*
250  * These flags are kept in l_flag, and they are modified only with the LWP
251  * locked.
252  */
253 #define	LW_IDLE		0x00000001 /* Idle lwp. */
254 #define	LW_LWPCTL	0x00000002 /* Adjust lwpctl in userret */
255 #define	LW_STIMO	0x00000040 /* Sleep timed out */
256 #define	LW_SINTR	0x00000080 /* Sleep is interruptible. */
257 #define	LW_CATCHINTR	0x00000100 /* LW_SINTR intent; see sleepq_block(). */
258 #define	LW_SYSTEM	0x00000200 /* Kernel thread */
259 #define	LW_SYSTEM_FPU	0x00000400 /* Kernel thread with vector/FP enabled */
260 #define	LW_DBGSUSPEND	0x00010000 /* Suspend by debugger */
261 #define	LW_WSUSPEND	0x00020000 /* Suspend before return to user */
262 #define	LW_BATCH	0x00040000 /* LWP tends to hog CPU */
263 #define	LW_WCORE	0x00080000 /* Stop for core dump on return to user */
264 #define	LW_WEXIT	0x00100000 /* Exit before return to user */
265 #define	LW_PENDSIG	0x01000000 /* Pending signal for us */
266 #define	LW_CANCELLED	0x02000000 /* tsleep should not sleep */
267 #define	LW_CACHECRED	0x04000000 /* Cache new process credential */
268 #define	LW_WREBOOT	0x08000000 /* System is rebooting, please suspend */
269 #define	LW_UNPARKED	0x10000000 /* Unpark op pending */
270 #define	LW_RUMP_CLEAR	0x40000000 /* Clear curlwp in RUMP scheduler */
271 #define	LW_RUMP_QEXIT	0x80000000 /* LWP should exit ASAP */
272 
273 /*
274  * The second set of flags is kept in l_pflag, and they are modified only by
275  * the LWP itself, or modified when it's known the LWP cannot be running.
276  * LP_RUNNING is typically updated with the LWP locked, but not always in
277  * the case of soft interrupt handlers.
278  */
279 #define	LP_KTRACTIVE	0x00000001 /* Executing ktrace operation */
280 #define	LP_KTRCSW	0x00000002 /* ktrace context switch marker */
281 #define	LP_KTRCSWUSER	0x00000004 /* ktrace context switch marker */
282 	/* 		0x00000008    was LP_PIDLID */
283 #define	LP_OWEUPC	0x00000010 /* Owe user profiling tick */
284 #define	LP_MPSAFE	0x00000020 /* Starts life without kernel_lock */
285 #define	LP_INTR		0x00000040 /* Soft interrupt handler */
286 #define	LP_SYSCTLWRITE	0x00000080 /* sysctl write lock held */
287 #define	LP_MUSTJOIN	0x00000100 /* Must join kthread on exit */
288 #define	LP_SINGLESTEP	0x00000400 /* Single step thread in ptrace(2) */
289 #define	LP_TIMEINTR	0x00010000 /* Time this soft interrupt */
290 #define	LP_PREEMPTING	0x00020000 /* mi_switch called involuntarily */
291 #define	LP_RUNNING	0x20000000 /* Active on a CPU */
292 #define	LP_TELEPORT	0x40000000 /* Teleport to new CPU on preempt() */
293 #define	LP_BOUND	0x80000000 /* Bound to a CPU */
294 
295 /*
296  * The third set of flags is kept in l_prflag and they are modified only
297  * with p_lock held.
298  */
299 #define	LPR_DETACHED	0x00800000 /* Won't be waited for. */
300 #define	LPR_DRAINING	0x80000000 /* Draining references before exiting */
301 
302 /*
303  * Mask indicating that there is "exceptional" work to be done on return to
304  * user.
305  */
306 #define	LW_USERRET	(LW_WEXIT | LW_PENDSIG | LW_WREBOOT | LW_WSUSPEND \
307     | LW_WCORE | LW_LWPCTL | LW_CACHECRED)
308 
309 /*
310  * Status values.
311  *
312  * A note about LSRUN and LSONPROC: LSRUN indicates that a process is
313  * runnable but *not* yet running, i.e. is on a run queue.  LSONPROC
314  * indicates that the process is actually executing on a CPU, i.e.
315  * it is no longer on a run queue.
316  *
317  * These values are set in stone and must not be reused with future changes.
318  */
319 #define	LSIDL		1	/* Process being created by fork. */
320 #define	LSRUN		2	/* Currently runnable. */
321 #define	LSSLEEP		3	/* Sleeping on an address. */
322 #define	LSSTOP		4	/* Process debugging or suspension. */
323 #define	LSZOMB		5	/* Awaiting collection by parent. */
324 /* define	LSDEAD	6	Process is almost a zombie. (removed in 5.0) */
325 #define	LSONPROC	7	/* Process is currently on a CPU. */
326 #define	LSSUSPENDED	8	/* Not running, not signalable. */
327 
328 #if defined(_KERNEL) || defined(_KMEMUSER)
329 static __inline void *
lwp_getpcb(struct lwp * l)330 lwp_getpcb(struct lwp *l)
331 {
332 
333 	return l->l_addr;
334 }
335 #endif /* _KERNEL || _KMEMUSER */
336 
337 #ifdef _KERNEL
338 void	lwpinit(void);
339 void	lwp0_init(void);
340 
341 void	lwp_startup(lwp_t *, lwp_t *);
342 void	startlwp(void *);
343 
344 void	lwp_lock(lwp_t *);
345 void	lwp_unlock(lwp_t *);
346 pri_t	lwp_eprio(lwp_t *);
347 int	lwp_locked(lwp_t *, kmutex_t *);
348 kmutex_t *lwp_setlock(lwp_t *, kmutex_t *);
349 void	lwp_unlock_to(lwp_t *, kmutex_t *);
350 int	lwp_trylock(lwp_t *);
351 void	lwp_changepri(lwp_t *, pri_t);
352 void	lwp_lendpri(lwp_t *, pri_t);
353 void	lwp_addref(lwp_t *);
354 void	lwp_delref(lwp_t *);
355 void	lwp_delref2(lwp_t *);
356 bool	lwp_drainrefs(lwp_t *);
357 bool	lwp_alive(lwp_t *);
358 lwp_t	*lwp_find_first(proc_t *);
359 
360 int	lwp_wait(lwp_t *, lwpid_t, lwpid_t *, bool);
361 void	lwp_continue(lwp_t *);
362 void	lwp_unsleep(lwp_t *, bool);
363 void	lwp_unstop(lwp_t *);
364 void	lwp_exit(lwp_t *);
365 int	lwp_suspend(lwp_t *, lwp_t *);
366 int	lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
367 void	lwp_start(lwp_t *, int);
368 void	lwp_migrate(lwp_t *, struct cpu_info *);
369 lwp_t *	lwp_find2(pid_t, lwpid_t);
370 lwp_t *	lwp_find(proc_t *, int);
371 void	lwp_userret(lwp_t *);
372 void	lwp_need_userret(lwp_t *);
373 void	lwp_free(lwp_t *, bool, bool);
374 long	lwp_pctr(void);
375 int	lwp_setprivate(lwp_t *, void *);
376 int	do_lwp_create(lwp_t *, void *, u_long, lwp_t **, const sigset_t *,
377     const stack_t *);
378 
379 void	lwp_thread_cleanup(lwp_t *);
380 
381 void	lwpinit_specificdata(void);
382 int	lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
383 void	lwp_specific_key_delete(specificdata_key_t);
384 void	lwp_initspecific(lwp_t *);
385 void	lwp_finispecific(lwp_t *);
386 void	*lwp_getspecific(specificdata_key_t);
387 #if defined(_LWP_API_PRIVATE)
388 void	*_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
389 #endif
390 void	lwp_setspecific(specificdata_key_t, void *);
391 void	lwp_setspecific_by_lwp(lwp_t *, specificdata_key_t, void *);
392 
393 /* Syscalls. */
394 int	lwp_park(clockid_t, int, struct timespec *);
395 int	lwp_unpark(const lwpid_t *, const u_int);
396 
397 /* DDB. */
398 void	lwp_whatis(uintptr_t, void (*)(const char *, ...) __printflike(1, 2));
399 
400 int lwp_create(lwp_t *, struct proc *, vaddr_t, int, void *, size_t,
401     void (*)(void *), void *, lwp_t **, int, const sigset_t *, const stack_t *);
402 
403 /*
404  * XXX _MODULE
405  * We should provide real stubs for the below that modules can use.
406  */
407 
408 static __inline void
spc_lock(struct cpu_info * ci)409 spc_lock(struct cpu_info *ci)
410 {
411 	mutex_spin_enter(ci->ci_schedstate.spc_mutex);
412 }
413 
414 static __inline void
spc_unlock(struct cpu_info * ci)415 spc_unlock(struct cpu_info *ci)
416 {
417 	mutex_spin_exit(ci->ci_schedstate.spc_mutex);
418 }
419 
420 static __inline void
spc_dlock(struct cpu_info * ci1,struct cpu_info * ci2)421 spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2)
422 {
423 	struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
424 	struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
425 
426 	KASSERT(ci1 != ci2);
427 	if (ci1 < ci2) {
428 		mutex_spin_enter(spc1->spc_mutex);
429 		mutex_spin_enter(spc2->spc_mutex);
430 	} else {
431 		mutex_spin_enter(spc2->spc_mutex);
432 		mutex_spin_enter(spc1->spc_mutex);
433 	}
434 }
435 
436 /*
437  * Allow machine-dependent code to override curlwp in <machine/cpu.h> for
438  * its own convenience.  Otherwise, we declare it as appropriate.
439  */
440 #if !defined(curlwp)
441 #if defined(MULTIPROCESSOR)
442 #define	curlwp		curcpu()->ci_curlwp	/* Current running LWP */
443 #else
444 extern struct lwp	*curlwp;		/* Current running LWP */
445 #endif /* MULTIPROCESSOR */
446 #endif /* ! curlwp */
447 #define	curproc		(curlwp->l_proc)
448 
449 /*
450  * This provides a way for <machine/cpu.h> to get l_cpu for curlwp before
451  * struct lwp is defined.
452  */
453 static __inline struct cpu_info *
lwp_getcpu(struct lwp * l)454 lwp_getcpu(struct lwp *l)
455 {
456 	return l->l_cpu;
457 }
458 
459 static __inline bool
CURCPU_IDLE_P(void)460 CURCPU_IDLE_P(void)
461 {
462 	struct cpu_info *ci = curcpu();
463 	return ci->ci_onproc == ci->ci_data.cpu_idlelwp;
464 }
465 
466 /*
467  * Disable and re-enable preemption.  Only for low-level kernel
468  * use.  Device drivers and anything that could potentially be
469  * compiled as a module should use kpreempt_disable() and
470  * kpreempt_enable().
471  */
472 static __inline void
KPREEMPT_DISABLE(lwp_t * l)473 KPREEMPT_DISABLE(lwp_t *l)
474 {
475 	struct lwp *l1 __diagused;
476 
477 	KASSERTMSG(l == (l1 = curlwp), "l=%p curlwp=%p", l, l1);
478 	l->l_nopreempt++;
479 	__insn_barrier();
480 }
481 
482 static __inline void
KPREEMPT_ENABLE(lwp_t * l)483 KPREEMPT_ENABLE(lwp_t *l)
484 {
485 	struct lwp *l1 __diagused;
486 
487 	KASSERTMSG(l == (l1 = curlwp), "l=%p curlwp=%p", l, l1);
488 	KASSERT(l->l_nopreempt > 0);
489 	__insn_barrier();
490 	l->l_nopreempt--;
491 	__insn_barrier();
492 	if (__predict_false(l->l_dopreempt))
493 		kpreempt(0);
494 }
495 
496 /* For lwp::l_dopreempt */
497 #define	DOPREEMPT_ACTIVE	0x01
498 #define	DOPREEMPT_COUNTED	0x02
499 
500 /*
501  * Prevent curlwp from migrating between CPUs between curlwp_bind and
502  * curlwp_bindx. One use case is psref(9) that has a contract that
503  * forbids migrations.
504  */
505 static __inline int
curlwp_bind(void)506 curlwp_bind(void)
507 {
508 	int bound;
509 
510 	bound = curlwp->l_pflag & LP_BOUND;
511 	curlwp->l_pflag |= LP_BOUND;
512 	__insn_barrier();
513 
514 	return bound;
515 }
516 
517 static __inline void
curlwp_bindx(int bound)518 curlwp_bindx(int bound)
519 {
520 
521 	KASSERT(curlwp->l_pflag & LP_BOUND);
522 	__insn_barrier();
523 	curlwp->l_pflag ^= bound ^ LP_BOUND;
524 }
525 
526 #endif /* _KERNEL */
527 
528 /* Flags for _lwp_create(), as per Solaris. */
529 #define	LWP_DETACHED	0x00000040
530 #define	LWP_SUSPENDED	0x00000080
531 
532 /* Kernel-internal flags for LWP creation. */
533 	/*		0x40000000	was LWP_PIDLID */
534 #define	LWP_VFORK	0x80000000
535 
536 #endif	/* !_SYS_LWP_H_ */
537