xref: /netbsd-src/sys/kern/kern_threadpool.c (revision caa99e8b408b0483e1a9e4c77ceb0b30422e647f)
1*caa99e8bSriastradh /*	$NetBSD: kern_threadpool.c,v 1.23 2021/01/23 16:33:49 riastradh Exp $	*/
22834fa0aSthorpej 
32834fa0aSthorpej /*-
42834fa0aSthorpej  * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
52834fa0aSthorpej  * All rights reserved.
62834fa0aSthorpej  *
72834fa0aSthorpej  * This code is derived from software contributed to The NetBSD Foundation
82834fa0aSthorpej  * by Taylor R. Campbell and Jason R. Thorpe.
92834fa0aSthorpej  *
102834fa0aSthorpej  * Redistribution and use in source and binary forms, with or without
112834fa0aSthorpej  * modification, are permitted provided that the following conditions
122834fa0aSthorpej  * are met:
132834fa0aSthorpej  * 1. Redistributions of source code must retain the above copyright
142834fa0aSthorpej  *    notice, this list of conditions and the following disclaimer.
152834fa0aSthorpej  * 2. Redistributions in binary form must reproduce the above copyright
162834fa0aSthorpej  *    notice, this list of conditions and the following disclaimer in the
172834fa0aSthorpej  *    documentation and/or other materials provided with the distribution.
182834fa0aSthorpej  *
192834fa0aSthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
202834fa0aSthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
212834fa0aSthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
222834fa0aSthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
232834fa0aSthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
242834fa0aSthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
252834fa0aSthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
262834fa0aSthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
272834fa0aSthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
282834fa0aSthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
292834fa0aSthorpej  * POSSIBILITY OF SUCH DAMAGE.
302834fa0aSthorpej  */
312834fa0aSthorpej 
322834fa0aSthorpej /*
332834fa0aSthorpej  * Thread pools.
342834fa0aSthorpej  *
352834fa0aSthorpej  * A thread pool is a collection of worker threads idle or running
3616902a87Sskrll  * jobs, together with a dispatcher thread that does not run jobs but
372834fa0aSthorpej  * can be given jobs to assign to a worker thread.  Scheduling a job in
382834fa0aSthorpej  * a thread pool does not allocate or even sleep at all, except perhaps
392834fa0aSthorpej  * on an adaptive lock, unlike kthread_create.  Jobs reuse threads, so
402834fa0aSthorpej  * they do not incur the expense of creating and destroying kthreads
412834fa0aSthorpej  * unless there is not much work to be done.
422834fa0aSthorpej  *
432834fa0aSthorpej  * A per-CPU thread pool (threadpool_percpu) is a collection of thread
442834fa0aSthorpej  * pools, one per CPU bound to that CPU.  For each priority level in
452834fa0aSthorpej  * use, there is one shared unbound thread pool (i.e., pool of threads
462834fa0aSthorpej  * not bound to any CPU) and one shared per-CPU thread pool.
472834fa0aSthorpej  *
482834fa0aSthorpej  * To use the unbound thread pool at priority pri, call
492834fa0aSthorpej  * threadpool_get(&pool, pri).  When you're done, call
502834fa0aSthorpej  * threadpool_put(pool, pri).
512834fa0aSthorpej  *
522834fa0aSthorpej  * To use the per-CPU thread pools at priority pri, call
532834fa0aSthorpej  * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
542834fa0aSthorpej  * pool returned by threadpool_percpu_ref(pool_percpu) for the current
552834fa0aSthorpej  * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
562834fa0aSthorpej  * CPU.  When you're done, call threadpool_percpu_put(pool_percpu,
572834fa0aSthorpej  * pri).
582834fa0aSthorpej  *
59a1c38838Sriastradh  * +--MACHINE-----------------------------------------------------+
60a1c38838Sriastradh  * | +--CPU 0---------+ +--CPU 1---------+     +--CPU n---------+ |
61a1c38838Sriastradh  * | | <dispatcher 0> | | <dispatcher 1> | ... | <dispatcher n> | |
622834fa0aSthorpej  * | | <idle 0a>      | | <running 1a>   | ... | <idle na>      | |
632834fa0aSthorpej  * | | <running 0b>   | | <running 1b>   | ... | <idle nb>      | |
642834fa0aSthorpej  * | | .              | | .              | ... | .              | |
652834fa0aSthorpej  * | | .              | | .              | ... | .              | |
662834fa0aSthorpej  * | | .              | | .              | ... | .              | |
67a1c38838Sriastradh  * | +----------------+ +----------------+     +----------------+ |
68a1c38838Sriastradh  * |            +--unbound-----------+                            |
69a1c38838Sriastradh  * |            | <dispatcher n+1>   |                            |
702834fa0aSthorpej  * |            | <idle (n+1)a>      |                            |
712834fa0aSthorpej  * |            | <running (n+1)b>   |                            |
72a1c38838Sriastradh  * |            +--------------------+                            |
73a1c38838Sriastradh  * +--------------------------------------------------------------+
742834fa0aSthorpej  *
75a1c38838Sriastradh  * XXX Why one dispatcher per CPU?  I did that originally to avoid
762834fa0aSthorpej  * touching remote CPUs' memory when scheduling a job, but that still
772834fa0aSthorpej  * requires interprocessor synchronization.  Perhaps we could get by
78a1c38838Sriastradh  * with a single dispatcher thread, at the expense of another pointer
79a1c38838Sriastradh  * in struct threadpool_job to identify the CPU on which it must run in
80a1c38838Sriastradh  * order for the dispatcher to schedule it correctly.
812834fa0aSthorpej  */
822834fa0aSthorpej 
832834fa0aSthorpej #include <sys/cdefs.h>
84*caa99e8bSriastradh __KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.23 2021/01/23 16:33:49 riastradh Exp $");
852834fa0aSthorpej 
862834fa0aSthorpej #include <sys/types.h>
872834fa0aSthorpej #include <sys/param.h>
882834fa0aSthorpej #include <sys/atomic.h>
892834fa0aSthorpej #include <sys/condvar.h>
902834fa0aSthorpej #include <sys/cpu.h>
912834fa0aSthorpej #include <sys/kernel.h>
922834fa0aSthorpej #include <sys/kmem.h>
932834fa0aSthorpej #include <sys/kthread.h>
942834fa0aSthorpej #include <sys/mutex.h>
952834fa0aSthorpej #include <sys/once.h>
962834fa0aSthorpej #include <sys/percpu.h>
972834fa0aSthorpej #include <sys/pool.h>
982834fa0aSthorpej #include <sys/proc.h>
992834fa0aSthorpej #include <sys/queue.h>
100f7aaf31fSriastradh #include <sys/sdt.h>
10103010c6eSthorpej #include <sys/sysctl.h>
102f7aaf31fSriastradh #include <sys/systm.h>
1032834fa0aSthorpej #include <sys/threadpool.h>
1042834fa0aSthorpej 
105f7aaf31fSriastradh /* Probes */
106f7aaf31fSriastradh 
107f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, get,
108f7aaf31fSriastradh     "pri_t"/*pri*/);
109f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, get__create,
110f7aaf31fSriastradh     "pri_t"/*pri*/);
111f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, get__race,
112f7aaf31fSriastradh     "pri_t"/*pri*/);
113f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, put,
114f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
115f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, put__destroy,
116f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
117f7aaf31fSriastradh 
118f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, percpu__get,
119f7aaf31fSriastradh     "pri_t"/*pri*/);
120f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, percpu__get__create,
121f7aaf31fSriastradh     "pri_t"/*pri*/);
122f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, percpu__get__race,
123f7aaf31fSriastradh     "pri_t"/*pri*/);
124f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, percpu__put,
125f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
126f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, percpu__put__destroy,
127f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
128f7aaf31fSriastradh 
129f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, create,
130f7aaf31fSriastradh     "struct cpu_info *"/*ci*/, "pri_t"/*pri*/);
131f7aaf31fSriastradh SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__success,
132f7aaf31fSriastradh     "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "struct threadpool *"/*pool*/);
133f7aaf31fSriastradh SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__failure,
134f7aaf31fSriastradh     "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "int"/*error*/);
135f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, destroy,
136f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
137f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, destroy__wait,
138f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "uint64_t"/*refcnt*/);
139f7aaf31fSriastradh 
140f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job,
141f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
142f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__running,
143f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
144a1c38838Sriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__dispatcher,
145f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
146f7aaf31fSriastradh SDT_PROBE_DEFINE3(sdt, kernel, threadpool, schedule__job__thread,
147f7aaf31fSriastradh     "struct threadpool *"/*pool*/,
148f7aaf31fSriastradh     "struct threadpool_job *"/*job*/,
149f7aaf31fSriastradh     "struct lwp *"/*thread*/);
150f7aaf31fSriastradh 
151a1c38838Sriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__start,
152f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
153a1c38838Sriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__dying,
154f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
155a1c38838Sriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__spawn,
156f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
157a1c38838Sriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, dispatcher__race,
158f7aaf31fSriastradh     "struct threadpool *"/*pool*/,
159f7aaf31fSriastradh     "struct threadpool_job *"/*job*/);
160a1c38838Sriastradh SDT_PROBE_DEFINE3(sdt, kernel, threadpool, dispatcher__assign,
161f7aaf31fSriastradh     "struct threadpool *"/*pool*/,
162f7aaf31fSriastradh     "struct threadpool_job *"/*job*/,
163f7aaf31fSriastradh     "struct lwp *"/*thread*/);
164a1c38838Sriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__exit,
165f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
166f7aaf31fSriastradh 
167f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__start,
168f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
169f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__dying,
170f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
171f7aaf31fSriastradh SDT_PROBE_DEFINE2(sdt, kernel, threadpool, thread__job,
172f7aaf31fSriastradh     "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
173f7aaf31fSriastradh SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__exit,
174f7aaf31fSriastradh     "struct threadpool *"/*pool*/);
175f7aaf31fSriastradh 
1762834fa0aSthorpej /* Data structures */
1772834fa0aSthorpej 
178fd471022Sthorpej TAILQ_HEAD(job_head, threadpool_job);
1792834fa0aSthorpej TAILQ_HEAD(thread_head, threadpool_thread);
1802834fa0aSthorpej 
1812834fa0aSthorpej struct threadpool_thread {
1822834fa0aSthorpej 	struct lwp			*tpt_lwp;
1830f894b8cSthorpej 	char				*tpt_lwp_savedname;
184fd471022Sthorpej 	struct threadpool		*tpt_pool;
185fd471022Sthorpej 	struct threadpool_job		*tpt_job;
1862834fa0aSthorpej 	kcondvar_t			tpt_cv;
1872834fa0aSthorpej 	TAILQ_ENTRY(threadpool_thread)	tpt_entry;
1882834fa0aSthorpej };
1892834fa0aSthorpej 
1902834fa0aSthorpej struct threadpool {
1912834fa0aSthorpej 	kmutex_t			tp_lock;
192a1c38838Sriastradh 	struct threadpool_thread	tp_dispatcher;
1932834fa0aSthorpej 	struct job_head			tp_jobs;
1942834fa0aSthorpej 	struct thread_head		tp_idle_threads;
195bece032bSthorpej 	uint64_t			tp_refcnt;
1962834fa0aSthorpej 	int				tp_flags;
1972834fa0aSthorpej #define	THREADPOOL_DYING	0x01
1982834fa0aSthorpej 	struct cpu_info			*tp_cpu;
1992834fa0aSthorpej 	pri_t				tp_pri;
2002834fa0aSthorpej };
2012834fa0aSthorpej 
202bece032bSthorpej static void	threadpool_hold(struct threadpool *);
203fd471022Sthorpej static void	threadpool_rele(struct threadpool *);
2042834fa0aSthorpej 
205fd471022Sthorpej static int	threadpool_percpu_create(struct threadpool_percpu **, pri_t);
206fd471022Sthorpej static void	threadpool_percpu_destroy(struct threadpool_percpu *);
207471ee6c6Sriastradh static void	threadpool_percpu_init(void *, void *, struct cpu_info *);
208471ee6c6Sriastradh static void	threadpool_percpu_ok(void *, void *, struct cpu_info *);
209471ee6c6Sriastradh static void	threadpool_percpu_fini(void *, void *, struct cpu_info *);
2102834fa0aSthorpej 
211026a00edSthorpej static threadpool_job_fn_t threadpool_job_dead;
2122834fa0aSthorpej 
2135ccb236eSthorpej static void	threadpool_job_hold(struct threadpool_job *);
214fd471022Sthorpej static void	threadpool_job_rele(struct threadpool_job *);
2152834fa0aSthorpej 
216a1c38838Sriastradh static void	threadpool_dispatcher_thread(void *) __dead;
2172834fa0aSthorpej static void	threadpool_thread(void *) __dead;
2182834fa0aSthorpej 
2192834fa0aSthorpej static pool_cache_t	threadpool_thread_pc __read_mostly;
2202834fa0aSthorpej 
2212834fa0aSthorpej static kmutex_t		threadpools_lock __cacheline_aligned;
2222834fa0aSthorpej 
22303010c6eSthorpej 	/* Default to 30 second idle timeout for pool threads. */
22403010c6eSthorpej static int	threadpool_idle_time_ms = 30 * 1000;
2252834fa0aSthorpej 
2262834fa0aSthorpej struct threadpool_unbound {
2272834fa0aSthorpej 	struct threadpool		tpu_pool;
2282834fa0aSthorpej 
2292834fa0aSthorpej 	/* protected by threadpools_lock */
2302834fa0aSthorpej 	LIST_ENTRY(threadpool_unbound)	tpu_link;
231c17b4c93Sthorpej 	uint64_t			tpu_refcnt;
2322834fa0aSthorpej };
2332834fa0aSthorpej 
2342834fa0aSthorpej static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
2352834fa0aSthorpej 
2362834fa0aSthorpej static struct threadpool_unbound *
threadpool_lookup_unbound(pri_t pri)2372834fa0aSthorpej threadpool_lookup_unbound(pri_t pri)
2382834fa0aSthorpej {
2392834fa0aSthorpej 	struct threadpool_unbound *tpu;
2402834fa0aSthorpej 
2412834fa0aSthorpej 	LIST_FOREACH(tpu, &unbound_threadpools, tpu_link) {
2422834fa0aSthorpej 		if (tpu->tpu_pool.tp_pri == pri)
2432834fa0aSthorpej 			return tpu;
2442834fa0aSthorpej 	}
2452834fa0aSthorpej 	return NULL;
2462834fa0aSthorpej }
2472834fa0aSthorpej 
2482834fa0aSthorpej static void
threadpool_insert_unbound(struct threadpool_unbound * tpu)2492834fa0aSthorpej threadpool_insert_unbound(struct threadpool_unbound *tpu)
2502834fa0aSthorpej {
2512834fa0aSthorpej 	KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == NULL);
2522834fa0aSthorpej 	LIST_INSERT_HEAD(&unbound_threadpools, tpu, tpu_link);
2532834fa0aSthorpej }
2542834fa0aSthorpej 
2552834fa0aSthorpej static void
threadpool_remove_unbound(struct threadpool_unbound * tpu)2562834fa0aSthorpej threadpool_remove_unbound(struct threadpool_unbound *tpu)
2572834fa0aSthorpej {
2582834fa0aSthorpej 	KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == tpu);
2592834fa0aSthorpej 	LIST_REMOVE(tpu, tpu_link);
2602834fa0aSthorpej }
2612834fa0aSthorpej 
2622834fa0aSthorpej struct threadpool_percpu {
2632834fa0aSthorpej 	percpu_t *			tpp_percpu;
2642834fa0aSthorpej 	pri_t				tpp_pri;
2652834fa0aSthorpej 
2662834fa0aSthorpej 	/* protected by threadpools_lock */
2672834fa0aSthorpej 	LIST_ENTRY(threadpool_percpu)	tpp_link;
268c17b4c93Sthorpej 	uint64_t			tpp_refcnt;
2692834fa0aSthorpej };
2702834fa0aSthorpej 
2712834fa0aSthorpej static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
2722834fa0aSthorpej 
273fd471022Sthorpej static struct threadpool_percpu *
threadpool_lookup_percpu(pri_t pri)2742834fa0aSthorpej threadpool_lookup_percpu(pri_t pri)
2752834fa0aSthorpej {
276fd471022Sthorpej 	struct threadpool_percpu *tpp;
2772834fa0aSthorpej 
2782834fa0aSthorpej 	LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
2792834fa0aSthorpej 		if (tpp->tpp_pri == pri)
2802834fa0aSthorpej 			return tpp;
2812834fa0aSthorpej 	}
2822834fa0aSthorpej 	return NULL;
2832834fa0aSthorpej }
2842834fa0aSthorpej 
2852834fa0aSthorpej static void
threadpool_insert_percpu(struct threadpool_percpu * tpp)286fd471022Sthorpej threadpool_insert_percpu(struct threadpool_percpu *tpp)
2872834fa0aSthorpej {
2882834fa0aSthorpej 	KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
2892834fa0aSthorpej 	LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
2902834fa0aSthorpej }
2912834fa0aSthorpej 
2922834fa0aSthorpej static void
threadpool_remove_percpu(struct threadpool_percpu * tpp)293fd471022Sthorpej threadpool_remove_percpu(struct threadpool_percpu *tpp)
2942834fa0aSthorpej {
2952834fa0aSthorpej 	KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
2962834fa0aSthorpej 	LIST_REMOVE(tpp, tpp_link);
2972834fa0aSthorpej }
2982834fa0aSthorpej 
29903010c6eSthorpej static int
sysctl_kern_threadpool_idle_ms(SYSCTLFN_ARGS)30003010c6eSthorpej sysctl_kern_threadpool_idle_ms(SYSCTLFN_ARGS)
30103010c6eSthorpej {
30203010c6eSthorpej 	struct sysctlnode node;
30303010c6eSthorpej 	int val, error;
30403010c6eSthorpej 
30503010c6eSthorpej 	node = *rnode;
30603010c6eSthorpej 
30703010c6eSthorpej 	val = threadpool_idle_time_ms;
30803010c6eSthorpej 	node.sysctl_data = &val;
30903010c6eSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
31003010c6eSthorpej 	if (error == 0 && newp != NULL) {
31103010c6eSthorpej 		/* Disallow negative values and 0 (forever). */
31203010c6eSthorpej 		if (val < 1)
31303010c6eSthorpej 			error = EINVAL;
31403010c6eSthorpej 		else
31503010c6eSthorpej 			threadpool_idle_time_ms = val;
31603010c6eSthorpej 	}
31703010c6eSthorpej 
31803010c6eSthorpej 	return error;
31903010c6eSthorpej }
32003010c6eSthorpej 
32103010c6eSthorpej SYSCTL_SETUP_PROTO(sysctl_threadpool_setup);
32203010c6eSthorpej 
32303010c6eSthorpej SYSCTL_SETUP(sysctl_threadpool_setup,
32403010c6eSthorpej     "sysctl kern.threadpool subtree setup")
32503010c6eSthorpej {
32603010c6eSthorpej 	const struct sysctlnode *rnode, *cnode;
32703010c6eSthorpej 	int error __diagused;
32803010c6eSthorpej 
32903010c6eSthorpej 	error = sysctl_createv(clog, 0, NULL, &rnode,
33003010c6eSthorpej 	    CTLFLAG_PERMANENT,
33103010c6eSthorpej 	    CTLTYPE_NODE, "threadpool",
33203010c6eSthorpej 	    SYSCTL_DESCR("threadpool subsystem options"),
33303010c6eSthorpej 	    NULL, 0, NULL, 0,
33403010c6eSthorpej 	    CTL_KERN, CTL_CREATE, CTL_EOL);
33503010c6eSthorpej 	KASSERT(error == 0);
33603010c6eSthorpej 
33703010c6eSthorpej 	error = sysctl_createv(clog, 0, &rnode, &cnode,
33803010c6eSthorpej 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
33903010c6eSthorpej 	    CTLTYPE_INT, "idle_ms",
34003010c6eSthorpej 	    SYSCTL_DESCR("idle thread timeout in ms"),
34103010c6eSthorpej 	    sysctl_kern_threadpool_idle_ms, 0, NULL, 0,
34203010c6eSthorpej 	    CTL_CREATE, CTL_EOL);
34303010c6eSthorpej 	KASSERT(error == 0);
34403010c6eSthorpej }
34503010c6eSthorpej 
34662117aafSthorpej void
threadpools_init(void)3472834fa0aSthorpej threadpools_init(void)
3482834fa0aSthorpej {
3492834fa0aSthorpej 
3502834fa0aSthorpej 	threadpool_thread_pc =
3512834fa0aSthorpej 	    pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
3522834fa0aSthorpej 		"thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
3532834fa0aSthorpej 
3542834fa0aSthorpej 	LIST_INIT(&unbound_threadpools);
3552834fa0aSthorpej 	LIST_INIT(&percpu_threadpools);
3562834fa0aSthorpej 	mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
3572834fa0aSthorpej }
3582834fa0aSthorpej 
359a1c38838Sriastradh static void
threadnamesuffix(char * buf,size_t buflen,struct cpu_info * ci,int pri)360a1c38838Sriastradh threadnamesuffix(char *buf, size_t buflen, struct cpu_info *ci, int pri)
361a1c38838Sriastradh {
362a1c38838Sriastradh 
363a1c38838Sriastradh 	buf[0] = '\0';
364a1c38838Sriastradh 	if (ci)
365a1c38838Sriastradh 		snprintf(buf + strlen(buf), buflen - strlen(buf), "/%d",
366a1c38838Sriastradh 		    cpu_index(ci));
367a1c38838Sriastradh 	if (pri != PRI_NONE)
368a1c38838Sriastradh 		snprintf(buf + strlen(buf), buflen - strlen(buf), "@%d", pri);
369a1c38838Sriastradh }
370a1c38838Sriastradh 
3712834fa0aSthorpej /* Thread pool creation */
3722834fa0aSthorpej 
3732834fa0aSthorpej static bool
threadpool_pri_is_valid(pri_t pri)3742834fa0aSthorpej threadpool_pri_is_valid(pri_t pri)
3752834fa0aSthorpej {
3762834fa0aSthorpej 	return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
3772834fa0aSthorpej }
3782834fa0aSthorpej 
3792834fa0aSthorpej static int
threadpool_create(struct threadpool * const pool,struct cpu_info * ci,pri_t pri)380518f5aa8Sthorpej threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
381518f5aa8Sthorpej     pri_t pri)
3822834fa0aSthorpej {
3832834fa0aSthorpej 	struct lwp *lwp;
384a1c38838Sriastradh 	char suffix[16];
3852834fa0aSthorpej 	int ktflags;
3862834fa0aSthorpej 	int error;
3872834fa0aSthorpej 
3882834fa0aSthorpej 	KASSERT(threadpool_pri_is_valid(pri));
3892834fa0aSthorpej 
390f7aaf31fSriastradh 	SDT_PROBE2(sdt, kernel, threadpool, create,  ci, pri);
391f7aaf31fSriastradh 
3922834fa0aSthorpej 	mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
393a1c38838Sriastradh 	/* XXX dispatcher */
3942834fa0aSthorpej 	TAILQ_INIT(&pool->tp_jobs);
3952834fa0aSthorpej 	TAILQ_INIT(&pool->tp_idle_threads);
396a1c38838Sriastradh 	pool->tp_refcnt = 1;		/* dispatcher's reference */
3972834fa0aSthorpej 	pool->tp_flags = 0;
3982834fa0aSthorpej 	pool->tp_cpu = ci;
3992834fa0aSthorpej 	pool->tp_pri = pri;
4002834fa0aSthorpej 
401a1c38838Sriastradh 	pool->tp_dispatcher.tpt_lwp = NULL;
402a1c38838Sriastradh 	pool->tp_dispatcher.tpt_pool = pool;
403a1c38838Sriastradh 	pool->tp_dispatcher.tpt_job = NULL;
404a1c38838Sriastradh 	cv_init(&pool->tp_dispatcher.tpt_cv, "pooldisp");
4052834fa0aSthorpej 
4062834fa0aSthorpej 	ktflags = 0;
4072834fa0aSthorpej 	ktflags |= KTHREAD_MPSAFE;
4082834fa0aSthorpej 	if (pri < PRI_KERNEL)
4092834fa0aSthorpej 		ktflags |= KTHREAD_TS;
410a1c38838Sriastradh 	threadnamesuffix(suffix, sizeof(suffix), ci, pri);
411a1c38838Sriastradh 	error = kthread_create(pri, ktflags, ci, &threadpool_dispatcher_thread,
412a1c38838Sriastradh 	    &pool->tp_dispatcher, &lwp, "pooldisp%s", suffix);
4132834fa0aSthorpej 	if (error)
4142834fa0aSthorpej 		goto fail0;
4152834fa0aSthorpej 
4162834fa0aSthorpej 	mutex_spin_enter(&pool->tp_lock);
417a1c38838Sriastradh 	pool->tp_dispatcher.tpt_lwp = lwp;
418a1c38838Sriastradh 	cv_broadcast(&pool->tp_dispatcher.tpt_cv);
4192834fa0aSthorpej 	mutex_spin_exit(&pool->tp_lock);
4202834fa0aSthorpej 
421f7aaf31fSriastradh 	SDT_PROBE3(sdt, kernel, threadpool, create__success,  ci, pri, pool);
4222834fa0aSthorpej 	return 0;
4232834fa0aSthorpej 
4242834fa0aSthorpej fail0:	KASSERT(error);
425a1c38838Sriastradh 	KASSERT(pool->tp_dispatcher.tpt_job == NULL);
426a1c38838Sriastradh 	KASSERT(pool->tp_dispatcher.tpt_pool == pool);
4272834fa0aSthorpej 	KASSERT(pool->tp_flags == 0);
4282834fa0aSthorpej 	KASSERT(pool->tp_refcnt == 0);
4292834fa0aSthorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
4302834fa0aSthorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
431a1c38838Sriastradh 	KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
432a1c38838Sriastradh 	cv_destroy(&pool->tp_dispatcher.tpt_cv);
4332834fa0aSthorpej 	mutex_destroy(&pool->tp_lock);
434f7aaf31fSriastradh 	SDT_PROBE3(sdt, kernel, threadpool, create__failure,  ci, pri, error);
4352834fa0aSthorpej 	return error;
4362834fa0aSthorpej }
4372834fa0aSthorpej 
4382834fa0aSthorpej /* Thread pool destruction */
4392834fa0aSthorpej 
4402834fa0aSthorpej static void
threadpool_destroy(struct threadpool * pool)441518f5aa8Sthorpej threadpool_destroy(struct threadpool *pool)
4422834fa0aSthorpej {
4432834fa0aSthorpej 	struct threadpool_thread *thread;
4442834fa0aSthorpej 
445f7aaf31fSriastradh 	SDT_PROBE1(sdt, kernel, threadpool, destroy,  pool);
446f7aaf31fSriastradh 
4472834fa0aSthorpej 	/* Mark the pool dying and wait for threads to commit suicide.  */
4482834fa0aSthorpej 	mutex_spin_enter(&pool->tp_lock);
4492834fa0aSthorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
4502834fa0aSthorpej 	pool->tp_flags |= THREADPOOL_DYING;
451a1c38838Sriastradh 	cv_broadcast(&pool->tp_dispatcher.tpt_cv);
4522834fa0aSthorpej 	TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
4532834fa0aSthorpej 		cv_broadcast(&thread->tpt_cv);
4542834fa0aSthorpej 	while (0 < pool->tp_refcnt) {
455f7aaf31fSriastradh 		SDT_PROBE2(sdt, kernel, threadpool, destroy__wait,
456f7aaf31fSriastradh 		    pool, pool->tp_refcnt);
457a1c38838Sriastradh 		cv_wait(&pool->tp_dispatcher.tpt_cv, &pool->tp_lock);
4582834fa0aSthorpej 	}
4592834fa0aSthorpej 	mutex_spin_exit(&pool->tp_lock);
4602834fa0aSthorpej 
461a1c38838Sriastradh 	KASSERT(pool->tp_dispatcher.tpt_job == NULL);
462a1c38838Sriastradh 	KASSERT(pool->tp_dispatcher.tpt_pool == pool);
4632834fa0aSthorpej 	KASSERT(pool->tp_flags == THREADPOOL_DYING);
4642834fa0aSthorpej 	KASSERT(pool->tp_refcnt == 0);
4652834fa0aSthorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
4662834fa0aSthorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
467a1c38838Sriastradh 	KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
468a1c38838Sriastradh 	cv_destroy(&pool->tp_dispatcher.tpt_cv);
4692834fa0aSthorpej 	mutex_destroy(&pool->tp_lock);
4702834fa0aSthorpej }
4712834fa0aSthorpej 
472bece032bSthorpej static void
threadpool_hold(struct threadpool * pool)473fd471022Sthorpej threadpool_hold(struct threadpool *pool)
4742834fa0aSthorpej {
4752834fa0aSthorpej 
476bece032bSthorpej 	KASSERT(mutex_owned(&pool->tp_lock));
477bece032bSthorpej 	pool->tp_refcnt++;
478bece032bSthorpej 	KASSERT(pool->tp_refcnt != 0);
4792834fa0aSthorpej }
4802834fa0aSthorpej 
4812834fa0aSthorpej static void
threadpool_rele(struct threadpool * pool)482fd471022Sthorpej threadpool_rele(struct threadpool *pool)
4832834fa0aSthorpej {
4842834fa0aSthorpej 
485bece032bSthorpej 	KASSERT(mutex_owned(&pool->tp_lock));
486bece032bSthorpej 	KASSERT(0 < pool->tp_refcnt);
487d99408aeSthorpej 	if (--pool->tp_refcnt == 0)
488a1c38838Sriastradh 		cv_broadcast(&pool->tp_dispatcher.tpt_cv);
4892834fa0aSthorpej }
4902834fa0aSthorpej 
4912834fa0aSthorpej /* Unbound thread pools */
4922834fa0aSthorpej 
4932834fa0aSthorpej int
threadpool_get(struct threadpool ** poolp,pri_t pri)494fd471022Sthorpej threadpool_get(struct threadpool **poolp, pri_t pri)
4952834fa0aSthorpej {
4962834fa0aSthorpej 	struct threadpool_unbound *tpu, *tmp = NULL;
4972834fa0aSthorpej 	int error;
4982834fa0aSthorpej 
4992834fa0aSthorpej 	ASSERT_SLEEPABLE();
5002834fa0aSthorpej 
501f7aaf31fSriastradh 	SDT_PROBE1(sdt, kernel, threadpool, get,  pri);
502f7aaf31fSriastradh 
5032834fa0aSthorpej 	if (! threadpool_pri_is_valid(pri))
5042834fa0aSthorpej 		return EINVAL;
5052834fa0aSthorpej 
5062834fa0aSthorpej 	mutex_enter(&threadpools_lock);
5072834fa0aSthorpej 	tpu = threadpool_lookup_unbound(pri);
5082834fa0aSthorpej 	if (tpu == NULL) {
5092834fa0aSthorpej 		mutex_exit(&threadpools_lock);
510f7aaf31fSriastradh 		SDT_PROBE1(sdt, kernel, threadpool, get__create,  pri);
511518f5aa8Sthorpej 		tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
512518f5aa8Sthorpej 		error = threadpool_create(&tmp->tpu_pool, NULL, pri);
513518f5aa8Sthorpej 		if (error) {
514518f5aa8Sthorpej 			kmem_free(tmp, sizeof(*tmp));
5152834fa0aSthorpej 			return error;
516518f5aa8Sthorpej 		}
5172834fa0aSthorpej 		mutex_enter(&threadpools_lock);
5182834fa0aSthorpej 		tpu = threadpool_lookup_unbound(pri);
5192834fa0aSthorpej 		if (tpu == NULL) {
5202834fa0aSthorpej 			tpu = tmp;
5212834fa0aSthorpej 			tmp = NULL;
5222834fa0aSthorpej 			threadpool_insert_unbound(tpu);
523f7aaf31fSriastradh 		} else {
524f7aaf31fSriastradh 			SDT_PROBE1(sdt, kernel, threadpool, get__race,  pri);
5252834fa0aSthorpej 		}
5262834fa0aSthorpej 	}
5272834fa0aSthorpej 	KASSERT(tpu != NULL);
5282834fa0aSthorpej 	tpu->tpu_refcnt++;
529c17b4c93Sthorpej 	KASSERT(tpu->tpu_refcnt != 0);
5302834fa0aSthorpej 	mutex_exit(&threadpools_lock);
5312834fa0aSthorpej 
532518f5aa8Sthorpej 	if (tmp != NULL) {
533518f5aa8Sthorpej 		threadpool_destroy(&tmp->tpu_pool);
534518f5aa8Sthorpej 		kmem_free(tmp, sizeof(*tmp));
535518f5aa8Sthorpej 	}
5362834fa0aSthorpej 	KASSERT(tpu != NULL);
5372834fa0aSthorpej 	*poolp = &tpu->tpu_pool;
5382834fa0aSthorpej 	return 0;
5392834fa0aSthorpej }
5402834fa0aSthorpej 
5412834fa0aSthorpej void
threadpool_put(struct threadpool * pool,pri_t pri)542fd471022Sthorpej threadpool_put(struct threadpool *pool, pri_t pri)
5432834fa0aSthorpej {
5442834fa0aSthorpej 	struct threadpool_unbound *tpu =
5452834fa0aSthorpej 	    container_of(pool, struct threadpool_unbound, tpu_pool);
5462834fa0aSthorpej 
5472834fa0aSthorpej 	ASSERT_SLEEPABLE();
5482834fa0aSthorpej 	KASSERT(threadpool_pri_is_valid(pri));
5492834fa0aSthorpej 
550f7aaf31fSriastradh 	SDT_PROBE2(sdt, kernel, threadpool, put,  pool, pri);
551f7aaf31fSriastradh 
5522834fa0aSthorpej 	mutex_enter(&threadpools_lock);
5532834fa0aSthorpej 	KASSERT(tpu == threadpool_lookup_unbound(pri));
5542834fa0aSthorpej 	KASSERT(0 < tpu->tpu_refcnt);
5552834fa0aSthorpej 	if (--tpu->tpu_refcnt == 0) {
556f7aaf31fSriastradh 		SDT_PROBE2(sdt, kernel, threadpool, put__destroy,  pool, pri);
5572834fa0aSthorpej 		threadpool_remove_unbound(tpu);
558c17b4c93Sthorpej 	} else {
5592834fa0aSthorpej 		tpu = NULL;
560c17b4c93Sthorpej 	}
5612834fa0aSthorpej 	mutex_exit(&threadpools_lock);
5622834fa0aSthorpej 
563518f5aa8Sthorpej 	if (tpu) {
564518f5aa8Sthorpej 		threadpool_destroy(&tpu->tpu_pool);
565518f5aa8Sthorpej 		kmem_free(tpu, sizeof(*tpu));
566518f5aa8Sthorpej 	}
5672834fa0aSthorpej }
5682834fa0aSthorpej 
5692834fa0aSthorpej /* Per-CPU thread pools */
5702834fa0aSthorpej 
5712834fa0aSthorpej int
threadpool_percpu_get(struct threadpool_percpu ** pool_percpup,pri_t pri)572fd471022Sthorpej threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
5732834fa0aSthorpej {
574fd471022Sthorpej 	struct threadpool_percpu *pool_percpu, *tmp = NULL;
5752834fa0aSthorpej 	int error;
5762834fa0aSthorpej 
5772834fa0aSthorpej 	ASSERT_SLEEPABLE();
5782834fa0aSthorpej 
579f7aaf31fSriastradh 	SDT_PROBE1(sdt, kernel, threadpool, percpu__get,  pri);
580f7aaf31fSriastradh 
5812834fa0aSthorpej 	if (! threadpool_pri_is_valid(pri))
5822834fa0aSthorpej 		return EINVAL;
5832834fa0aSthorpej 
5842834fa0aSthorpej 	mutex_enter(&threadpools_lock);
5852834fa0aSthorpej 	pool_percpu = threadpool_lookup_percpu(pri);
5862834fa0aSthorpej 	if (pool_percpu == NULL) {
5872834fa0aSthorpej 		mutex_exit(&threadpools_lock);
588f7aaf31fSriastradh 		SDT_PROBE1(sdt, kernel, threadpool, percpu__get__create,  pri);
5892834fa0aSthorpej 		error = threadpool_percpu_create(&tmp, pri);
5902834fa0aSthorpej 		if (error)
5912834fa0aSthorpej 			return error;
5922834fa0aSthorpej 		KASSERT(tmp != NULL);
5932834fa0aSthorpej 		mutex_enter(&threadpools_lock);
5942834fa0aSthorpej 		pool_percpu = threadpool_lookup_percpu(pri);
5952834fa0aSthorpej 		if (pool_percpu == NULL) {
5962834fa0aSthorpej 			pool_percpu = tmp;
5972834fa0aSthorpej 			tmp = NULL;
5982834fa0aSthorpej 			threadpool_insert_percpu(pool_percpu);
599f7aaf31fSriastradh 		} else {
600f7aaf31fSriastradh 			SDT_PROBE1(sdt, kernel, threadpool, percpu__get__race,
601f7aaf31fSriastradh 			    pri);
6022834fa0aSthorpej 		}
6032834fa0aSthorpej 	}
6042834fa0aSthorpej 	KASSERT(pool_percpu != NULL);
6052834fa0aSthorpej 	pool_percpu->tpp_refcnt++;
606c17b4c93Sthorpej 	KASSERT(pool_percpu->tpp_refcnt != 0);
6072834fa0aSthorpej 	mutex_exit(&threadpools_lock);
6082834fa0aSthorpej 
6092834fa0aSthorpej 	if (tmp != NULL)
6102834fa0aSthorpej 		threadpool_percpu_destroy(tmp);
6112834fa0aSthorpej 	KASSERT(pool_percpu != NULL);
6122834fa0aSthorpej 	*pool_percpup = pool_percpu;
6132834fa0aSthorpej 	return 0;
6142834fa0aSthorpej }
6152834fa0aSthorpej 
6162834fa0aSthorpej void
threadpool_percpu_put(struct threadpool_percpu * pool_percpu,pri_t pri)617fd471022Sthorpej threadpool_percpu_put(struct threadpool_percpu *pool_percpu, pri_t pri)
6182834fa0aSthorpej {
6192834fa0aSthorpej 
6202834fa0aSthorpej 	ASSERT_SLEEPABLE();
6212834fa0aSthorpej 
6222834fa0aSthorpej 	KASSERT(threadpool_pri_is_valid(pri));
6232834fa0aSthorpej 
624f7aaf31fSriastradh 	SDT_PROBE2(sdt, kernel, threadpool, percpu__put,  pool_percpu, pri);
625f7aaf31fSriastradh 
6262834fa0aSthorpej 	mutex_enter(&threadpools_lock);
6272834fa0aSthorpej 	KASSERT(pool_percpu == threadpool_lookup_percpu(pri));
6282834fa0aSthorpej 	KASSERT(0 < pool_percpu->tpp_refcnt);
6292834fa0aSthorpej 	if (--pool_percpu->tpp_refcnt == 0) {
630f7aaf31fSriastradh 		SDT_PROBE2(sdt, kernel, threadpool, percpu__put__destroy,
631f7aaf31fSriastradh 		    pool_percpu, pri);
6322834fa0aSthorpej 		threadpool_remove_percpu(pool_percpu);
633c17b4c93Sthorpej 	} else {
6342834fa0aSthorpej 		pool_percpu = NULL;
635c17b4c93Sthorpej 	}
6362834fa0aSthorpej 	mutex_exit(&threadpools_lock);
6372834fa0aSthorpej 
6382834fa0aSthorpej 	if (pool_percpu)
6392834fa0aSthorpej 		threadpool_percpu_destroy(pool_percpu);
6402834fa0aSthorpej }
6412834fa0aSthorpej 
642fd471022Sthorpej struct threadpool *
threadpool_percpu_ref(struct threadpool_percpu * pool_percpu)643fd471022Sthorpej threadpool_percpu_ref(struct threadpool_percpu *pool_percpu)
6442834fa0aSthorpej {
645fd471022Sthorpej 	struct threadpool **poolp, *pool;
6462834fa0aSthorpej 
6472834fa0aSthorpej 	poolp = percpu_getref(pool_percpu->tpp_percpu);
6482834fa0aSthorpej 	pool = *poolp;
6492834fa0aSthorpej 	percpu_putref(pool_percpu->tpp_percpu);
6502834fa0aSthorpej 
6512834fa0aSthorpej 	return pool;
6522834fa0aSthorpej }
6532834fa0aSthorpej 
654fd471022Sthorpej struct threadpool *
threadpool_percpu_ref_remote(struct threadpool_percpu * pool_percpu,struct cpu_info * ci)655fd471022Sthorpej threadpool_percpu_ref_remote(struct threadpool_percpu *pool_percpu,
6562834fa0aSthorpej     struct cpu_info *ci)
6572834fa0aSthorpej {
658fd471022Sthorpej 	struct threadpool **poolp, *pool;
6592834fa0aSthorpej 
6608798207fSriastradh 	/*
6618798207fSriastradh 	 * As long as xcalls are blocked -- e.g., by kpreempt_disable
6628798207fSriastradh 	 * -- the percpu object will not be swapped and destroyed.  We
6638798207fSriastradh 	 * can't write to it, because the data may have already been
6648798207fSriastradh 	 * moved to a new buffer, but we can safely read from it.
6658798207fSriastradh 	 */
6668798207fSriastradh 	kpreempt_disable();
6672834fa0aSthorpej 	poolp = percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
6682834fa0aSthorpej 	pool = *poolp;
6698798207fSriastradh 	kpreempt_enable();
6702834fa0aSthorpej 
6712834fa0aSthorpej 	return pool;
6722834fa0aSthorpej }
6732834fa0aSthorpej 
6742834fa0aSthorpej static int
threadpool_percpu_create(struct threadpool_percpu ** pool_percpup,pri_t pri)675fd471022Sthorpej threadpool_percpu_create(struct threadpool_percpu **pool_percpup, pri_t pri)
6762834fa0aSthorpej {
677fd471022Sthorpej 	struct threadpool_percpu *pool_percpu;
678471ee6c6Sriastradh 	bool ok = true;
6792834fa0aSthorpej 
6802834fa0aSthorpej 	pool_percpu = kmem_zalloc(sizeof(*pool_percpu), KM_SLEEP);
6812834fa0aSthorpej 	pool_percpu->tpp_pri = pri;
682471ee6c6Sriastradh 	pool_percpu->tpp_percpu = percpu_create(sizeof(struct threadpool *),
683471ee6c6Sriastradh 	    threadpool_percpu_init, threadpool_percpu_fini,
684471ee6c6Sriastradh 	    (void *)(intptr_t)pri);
6852834fa0aSthorpej 
686471ee6c6Sriastradh 	/*
687471ee6c6Sriastradh 	 * Verify that all of the CPUs were initialized.
688471ee6c6Sriastradh 	 *
689471ee6c6Sriastradh 	 * XXX What to do if we add CPU hotplug?
690471ee6c6Sriastradh 	 */
691471ee6c6Sriastradh 	percpu_foreach(pool_percpu->tpp_percpu, &threadpool_percpu_ok, &ok);
692471ee6c6Sriastradh 	if (!ok)
693471ee6c6Sriastradh 		goto fail;
6942834fa0aSthorpej 
6952834fa0aSthorpej 	/* Success!  */
696fd471022Sthorpej 	*pool_percpup = (struct threadpool_percpu *)pool_percpu;
6972834fa0aSthorpej 	return 0;
6982834fa0aSthorpej 
699471ee6c6Sriastradh fail:	percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
700471ee6c6Sriastradh 	kmem_free(pool_percpu, sizeof(*pool_percpu));
701471ee6c6Sriastradh 	return ENOMEM;
7022834fa0aSthorpej }
7032834fa0aSthorpej 
7042834fa0aSthorpej static void
threadpool_percpu_destroy(struct threadpool_percpu * pool_percpu)705fd471022Sthorpej threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
7062834fa0aSthorpej {
7072834fa0aSthorpej 
708fd471022Sthorpej 	percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
7092834fa0aSthorpej 	kmem_free(pool_percpu, sizeof(*pool_percpu));
7102834fa0aSthorpej }
7112834fa0aSthorpej 
712471ee6c6Sriastradh static void
threadpool_percpu_init(void * vpoolp,void * vpri,struct cpu_info * ci)713471ee6c6Sriastradh threadpool_percpu_init(void *vpoolp, void *vpri, struct cpu_info *ci)
714471ee6c6Sriastradh {
715471ee6c6Sriastradh 	struct threadpool **const poolp = vpoolp;
716471ee6c6Sriastradh 	pri_t pri = (intptr_t)(void *)vpri;
717471ee6c6Sriastradh 	int error;
718471ee6c6Sriastradh 
719471ee6c6Sriastradh 	*poolp = kmem_zalloc(sizeof(**poolp), KM_SLEEP);
720471ee6c6Sriastradh 	error = threadpool_create(*poolp, ci, pri);
721471ee6c6Sriastradh 	if (error) {
722471ee6c6Sriastradh 		KASSERT(error == ENOMEM);
723471ee6c6Sriastradh 		kmem_free(*poolp, sizeof(**poolp));
724471ee6c6Sriastradh 		*poolp = NULL;
725471ee6c6Sriastradh 	}
726471ee6c6Sriastradh }
727471ee6c6Sriastradh 
728471ee6c6Sriastradh static void
threadpool_percpu_ok(void * vpoolp,void * vokp,struct cpu_info * ci)729471ee6c6Sriastradh threadpool_percpu_ok(void *vpoolp, void *vokp, struct cpu_info *ci)
730471ee6c6Sriastradh {
731471ee6c6Sriastradh 	struct threadpool **const poolp = vpoolp;
732471ee6c6Sriastradh 	bool *okp = vokp;
733471ee6c6Sriastradh 
734471ee6c6Sriastradh 	if (*poolp == NULL)
735471ee6c6Sriastradh 		atomic_store_relaxed(okp, false);
736471ee6c6Sriastradh }
737471ee6c6Sriastradh 
738471ee6c6Sriastradh static void
threadpool_percpu_fini(void * vpoolp,void * vprip,struct cpu_info * ci)739471ee6c6Sriastradh threadpool_percpu_fini(void *vpoolp, void *vprip, struct cpu_info *ci)
740471ee6c6Sriastradh {
741471ee6c6Sriastradh 	struct threadpool **const poolp = vpoolp;
742471ee6c6Sriastradh 
743471ee6c6Sriastradh 	if (*poolp == NULL)	/* initialization failed */
744471ee6c6Sriastradh 		return;
745471ee6c6Sriastradh 	threadpool_destroy(*poolp);
746471ee6c6Sriastradh 	kmem_free(*poolp, sizeof(**poolp));
747471ee6c6Sriastradh }
748471ee6c6Sriastradh 
7492834fa0aSthorpej /* Thread pool jobs */
7502834fa0aSthorpej 
7512834fa0aSthorpej void __printflike(4,5)
threadpool_job_init(struct threadpool_job * job,threadpool_job_fn_t fn,kmutex_t * lock,const char * fmt,...)752fd471022Sthorpej threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
7532834fa0aSthorpej     kmutex_t *lock, const char *fmt, ...)
7542834fa0aSthorpej {
7552834fa0aSthorpej 	va_list ap;
7562834fa0aSthorpej 
7572834fa0aSthorpej 	va_start(ap, fmt);
7582834fa0aSthorpej 	(void)vsnprintf(job->job_name, sizeof(job->job_name), fmt, ap);
7592834fa0aSthorpej 	va_end(ap);
7602834fa0aSthorpej 
7612834fa0aSthorpej 	job->job_lock = lock;
7622834fa0aSthorpej 	job->job_thread = NULL;
7632834fa0aSthorpej 	job->job_refcnt = 0;
7642834fa0aSthorpej 	cv_init(&job->job_cv, job->job_name);
7652834fa0aSthorpej 	job->job_fn = fn;
7662834fa0aSthorpej }
7672834fa0aSthorpej 
7682834fa0aSthorpej static void
threadpool_job_dead(struct threadpool_job * job)769fd471022Sthorpej threadpool_job_dead(struct threadpool_job *job)
7702834fa0aSthorpej {
7712834fa0aSthorpej 
772fd471022Sthorpej 	panic("threadpool job %p ran after destruction", job);
7732834fa0aSthorpej }
7742834fa0aSthorpej 
7752834fa0aSthorpej void
threadpool_job_destroy(struct threadpool_job * job)776fd471022Sthorpej threadpool_job_destroy(struct threadpool_job *job)
7772834fa0aSthorpej {
7782834fa0aSthorpej 
7792834fa0aSthorpej 	ASSERT_SLEEPABLE();
7802834fa0aSthorpej 
7812834fa0aSthorpej 	KASSERTMSG((job->job_thread == NULL), "job %p still running", job);
7822834fa0aSthorpej 
7832834fa0aSthorpej 	mutex_enter(job->job_lock);
784340d2bc5Sriastradh 	while (0 < atomic_load_relaxed(&job->job_refcnt))
7852834fa0aSthorpej 		cv_wait(&job->job_cv, job->job_lock);
7862834fa0aSthorpej 	mutex_exit(job->job_lock);
7872834fa0aSthorpej 
7882834fa0aSthorpej 	job->job_lock = NULL;
7892834fa0aSthorpej 	KASSERT(job->job_thread == NULL);
7902834fa0aSthorpej 	KASSERT(job->job_refcnt == 0);
7912834fa0aSthorpej 	KASSERT(!cv_has_waiters(&job->job_cv));
7922834fa0aSthorpej 	cv_destroy(&job->job_cv);
7932834fa0aSthorpej 	job->job_fn = threadpool_job_dead;
7942834fa0aSthorpej 	(void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
7952834fa0aSthorpej }
7962834fa0aSthorpej 
7975ccb236eSthorpej static void
threadpool_job_hold(struct threadpool_job * job)798fd471022Sthorpej threadpool_job_hold(struct threadpool_job *job)
7992834fa0aSthorpej {
800340d2bc5Sriastradh 	unsigned int refcnt __diagused;
8016eaddb81Sthorpej 
802340d2bc5Sriastradh 	refcnt = atomic_inc_uint_nv(&job->job_refcnt);
803340d2bc5Sriastradh 	KASSERT(refcnt != 0);
8042834fa0aSthorpej }
8052834fa0aSthorpej 
8062834fa0aSthorpej static void
threadpool_job_rele(struct threadpool_job * job)807fd471022Sthorpej threadpool_job_rele(struct threadpool_job *job)
8082834fa0aSthorpej {
8092834fa0aSthorpej 	unsigned int refcnt;
8102834fa0aSthorpej 
8115ccb236eSthorpej 	KASSERT(mutex_owned(job->job_lock));
8125ccb236eSthorpej 
8132834fa0aSthorpej 	refcnt = atomic_dec_uint_nv(&job->job_refcnt);
8142834fa0aSthorpej 	KASSERT(refcnt != UINT_MAX);
8152834fa0aSthorpej 	if (refcnt == 0)
8162834fa0aSthorpej 		cv_broadcast(&job->job_cv);
8172834fa0aSthorpej }
8182834fa0aSthorpej 
8192834fa0aSthorpej void
threadpool_job_done(struct threadpool_job * job)820fd471022Sthorpej threadpool_job_done(struct threadpool_job *job)
8212834fa0aSthorpej {
8222834fa0aSthorpej 
8232834fa0aSthorpej 	KASSERT(mutex_owned(job->job_lock));
8242834fa0aSthorpej 	KASSERT(job->job_thread != NULL);
8252834fa0aSthorpej 	KASSERT(job->job_thread->tpt_lwp == curlwp);
8262834fa0aSthorpej 
8270f894b8cSthorpej 	/*
8280f894b8cSthorpej 	 * We can safely read this field; it's only modified right before
8290f894b8cSthorpej 	 * we call the job work function, and we are only preserving it
8300f894b8cSthorpej 	 * to use here; no one cares if it contains junk afterward.
8310f894b8cSthorpej 	 */
8320f894b8cSthorpej 	lwp_lock(curlwp);
8330f894b8cSthorpej 	curlwp->l_name = job->job_thread->tpt_lwp_savedname;
8340f894b8cSthorpej 	lwp_unlock(curlwp);
8350f894b8cSthorpej 
8365ccb236eSthorpej 	/*
8375ccb236eSthorpej 	 * Inline the work of threadpool_job_rele(); the job is already
8385ccb236eSthorpej 	 * locked, the most likely scenario (XXXJRT only scenario?) is
8395ccb236eSthorpej 	 * that we're dropping the last reference (the one taken in
8405ccb236eSthorpej 	 * threadpool_schedule_job()), and we always do the cv_broadcast()
8415ccb236eSthorpej 	 * anyway.
8425ccb236eSthorpej 	 */
843340d2bc5Sriastradh 	KASSERT(0 < atomic_load_relaxed(&job->job_refcnt));
8445ccb236eSthorpej 	unsigned int refcnt __diagused = atomic_dec_uint_nv(&job->job_refcnt);
8455ccb236eSthorpej 	KASSERT(refcnt != UINT_MAX);
8462834fa0aSthorpej 	cv_broadcast(&job->job_cv);
8472834fa0aSthorpej 	job->job_thread = NULL;
8482834fa0aSthorpej }
8492834fa0aSthorpej 
8502834fa0aSthorpej void
threadpool_schedule_job(struct threadpool * pool,struct threadpool_job * job)851fd471022Sthorpej threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
8522834fa0aSthorpej {
8532834fa0aSthorpej 
8542834fa0aSthorpej 	KASSERT(mutex_owned(job->job_lock));
8552834fa0aSthorpej 
856f7aaf31fSriastradh 	SDT_PROBE2(sdt, kernel, threadpool, schedule__job,  pool, job);
857f7aaf31fSriastradh 
8582834fa0aSthorpej 	/*
8592834fa0aSthorpej 	 * If the job's already running, let it keep running.  The job
8602834fa0aSthorpej 	 * is guaranteed by the interlock not to end early -- if it had
8612834fa0aSthorpej 	 * ended early, threadpool_job_done would have set job_thread
8622834fa0aSthorpej 	 * to NULL under the interlock.
8632834fa0aSthorpej 	 */
8642834fa0aSthorpej 	if (__predict_true(job->job_thread != NULL)) {
865f7aaf31fSriastradh 		SDT_PROBE2(sdt, kernel, threadpool, schedule__job__running,
866f7aaf31fSriastradh 		    pool, job);
8672834fa0aSthorpej 		return;
8682834fa0aSthorpej 	}
8692834fa0aSthorpej 
8705ccb236eSthorpej 	threadpool_job_hold(job);
8715ccb236eSthorpej 
8722834fa0aSthorpej 	/* Otherwise, try to assign a thread to the job.  */
8732834fa0aSthorpej 	mutex_spin_enter(&pool->tp_lock);
8742834fa0aSthorpej 	if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
875a1c38838Sriastradh 		/* Nobody's idle.  Give it to the dispatcher.  */
876a1c38838Sriastradh 		SDT_PROBE2(sdt, kernel, threadpool, schedule__job__dispatcher,
877f7aaf31fSriastradh 		    pool, job);
878a1c38838Sriastradh 		job->job_thread = &pool->tp_dispatcher;
8792834fa0aSthorpej 		TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
8802834fa0aSthorpej 	} else {
8812834fa0aSthorpej 		/* Assign it to the first idle thread.  */
8822834fa0aSthorpej 		job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
883f7aaf31fSriastradh 		SDT_PROBE3(sdt, kernel, threadpool, schedule__job__thread,
884f7aaf31fSriastradh 		    pool, job, job->job_thread->tpt_lwp);
8852834fa0aSthorpej 		TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
8862834fa0aSthorpej 		    tpt_entry);
8872834fa0aSthorpej 		job->job_thread->tpt_job = job;
8882834fa0aSthorpej 	}
8892834fa0aSthorpej 
890a1c38838Sriastradh 	/* Notify whomever we gave it to, dispatcher or idle thread.  */
8912834fa0aSthorpej 	KASSERT(job->job_thread != NULL);
8922834fa0aSthorpej 	cv_broadcast(&job->job_thread->tpt_cv);
8932834fa0aSthorpej 	mutex_spin_exit(&pool->tp_lock);
8942834fa0aSthorpej }
8952834fa0aSthorpej 
8962834fa0aSthorpej bool
threadpool_cancel_job_async(struct threadpool * pool,struct threadpool_job * job)897fd471022Sthorpej threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
8982834fa0aSthorpej {
8992834fa0aSthorpej 
9002834fa0aSthorpej 	KASSERT(mutex_owned(job->job_lock));
9012834fa0aSthorpej 
9022834fa0aSthorpej 	/*
9032834fa0aSthorpej 	 * XXXJRT This fails (albeit safely) when all of the following
9042834fa0aSthorpej 	 * are true:
9052834fa0aSthorpej 	 *
9062834fa0aSthorpej 	 *	=> "pool" is something other than what the job was
9072834fa0aSthorpej 	 *	   scheduled on.  This can legitimately occur if,
9082834fa0aSthorpej 	 *	   for example, a job is percpu-scheduled on CPU0
9092834fa0aSthorpej 	 *	   and then CPU1 attempts to cancel it without taking
9102834fa0aSthorpej 	 *	   a remote pool reference.  (this might happen by
9112834fa0aSthorpej 	 *	   "luck of the draw").
9122834fa0aSthorpej 	 *
9132834fa0aSthorpej 	 *	=> "job" is not yet running, but is assigned to the
914a1c38838Sriastradh 	 *	   dispatcher.
9152834fa0aSthorpej 	 *
9162834fa0aSthorpej 	 * When this happens, this code makes the determination that
9172834fa0aSthorpej 	 * the job is already running.  The failure mode is that the
9182834fa0aSthorpej 	 * caller is told the job is running, and thus has to wait.
919a1c38838Sriastradh 	 * The dispatcher will eventually get to it and the job will
9202834fa0aSthorpej 	 * proceed as if it had been already running.
9212834fa0aSthorpej 	 */
9222834fa0aSthorpej 
9232834fa0aSthorpej 	if (job->job_thread == NULL) {
9242834fa0aSthorpej 		/* Nothing to do.  Guaranteed not running.  */
9252834fa0aSthorpej 		return true;
926a1c38838Sriastradh 	} else if (job->job_thread == &pool->tp_dispatcher) {
9272834fa0aSthorpej 		/* Take it off the list to guarantee it won't run.  */
9282834fa0aSthorpej 		job->job_thread = NULL;
9292834fa0aSthorpej 		mutex_spin_enter(&pool->tp_lock);
9302834fa0aSthorpej 		TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
9312834fa0aSthorpej 		mutex_spin_exit(&pool->tp_lock);
9325ccb236eSthorpej 		threadpool_job_rele(job);
9332834fa0aSthorpej 		return true;
9342834fa0aSthorpej 	} else {
9352834fa0aSthorpej 		/* Too late -- already running.  */
9362834fa0aSthorpej 		return false;
9372834fa0aSthorpej 	}
9382834fa0aSthorpej }
9392834fa0aSthorpej 
9402834fa0aSthorpej void
threadpool_cancel_job(struct threadpool * pool,struct threadpool_job * job)941fd471022Sthorpej threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
9422834fa0aSthorpej {
9432834fa0aSthorpej 
9448b5b320eSthorpej 	/*
9458b5b320eSthorpej 	 * We may sleep here, but we can't ASSERT_SLEEPABLE() because
9468b5b320eSthorpej 	 * the job lock (used to interlock the cv_wait()) may in fact
9478b5b320eSthorpej 	 * legitimately be a spin lock, so the assertion would fire
9488b5b320eSthorpej 	 * as a false-positive.
9498b5b320eSthorpej 	 */
9502834fa0aSthorpej 
9512834fa0aSthorpej 	KASSERT(mutex_owned(job->job_lock));
9522834fa0aSthorpej 
953fd471022Sthorpej 	if (threadpool_cancel_job_async(pool, job))
9542834fa0aSthorpej 		return;
9552834fa0aSthorpej 
9562834fa0aSthorpej 	/* Already running.  Wait for it to complete.  */
9572834fa0aSthorpej 	while (job->job_thread != NULL)
9582834fa0aSthorpej 		cv_wait(&job->job_cv, job->job_lock);
9592834fa0aSthorpej }
9602834fa0aSthorpej 
961a1c38838Sriastradh /* Thread pool dispatcher thread */
9622834fa0aSthorpej 
9632834fa0aSthorpej static void __dead
threadpool_dispatcher_thread(void * arg)964a1c38838Sriastradh threadpool_dispatcher_thread(void *arg)
9652834fa0aSthorpej {
966a1c38838Sriastradh 	struct threadpool_thread *const dispatcher = arg;
967a1c38838Sriastradh 	struct threadpool *const pool = dispatcher->tpt_pool;
9682834fa0aSthorpej 	struct lwp *lwp = NULL;
9692834fa0aSthorpej 	int ktflags;
970a1c38838Sriastradh 	char suffix[16];
9712834fa0aSthorpej 	int error;
9722834fa0aSthorpej 
9732834fa0aSthorpej 	KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
974f7aaf31fSriastradh 	KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND));
9752834fa0aSthorpej 
9762834fa0aSthorpej 	/* Wait until we're initialized.  */
9772834fa0aSthorpej 	mutex_spin_enter(&pool->tp_lock);
978a1c38838Sriastradh 	while (dispatcher->tpt_lwp == NULL)
979a1c38838Sriastradh 		cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
9802834fa0aSthorpej 
981a1c38838Sriastradh 	SDT_PROBE1(sdt, kernel, threadpool, dispatcher__start,  pool);
9822834fa0aSthorpej 
9832834fa0aSthorpej 	for (;;) {
9842834fa0aSthorpej 		/* Wait until there's a job.  */
9852834fa0aSthorpej 		while (TAILQ_EMPTY(&pool->tp_jobs)) {
9862834fa0aSthorpej 			if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
987f7aaf31fSriastradh 				SDT_PROBE1(sdt, kernel, threadpool,
988a1c38838Sriastradh 				    dispatcher__dying,  pool);
9892834fa0aSthorpej 				break;
9902834fa0aSthorpej 			}
991a1c38838Sriastradh 			cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
9922834fa0aSthorpej 		}
9932834fa0aSthorpej 		if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
9942834fa0aSthorpej 			break;
9952834fa0aSthorpej 
9962834fa0aSthorpej 		/* If there are no threads, we'll have to try to start one.  */
9972834fa0aSthorpej 		if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
998a1c38838Sriastradh 			SDT_PROBE1(sdt, kernel, threadpool, dispatcher__spawn,
999f7aaf31fSriastradh 			    pool);
1000bece032bSthorpej 			threadpool_hold(pool);
10012834fa0aSthorpej 			mutex_spin_exit(&pool->tp_lock);
10022834fa0aSthorpej 
10032834fa0aSthorpej 			struct threadpool_thread *const thread =
10042834fa0aSthorpej 			    pool_cache_get(threadpool_thread_pc, PR_WAITOK);
10052834fa0aSthorpej 			thread->tpt_lwp = NULL;
10062834fa0aSthorpej 			thread->tpt_pool = pool;
10072834fa0aSthorpej 			thread->tpt_job = NULL;
1008a1c38838Sriastradh 			cv_init(&thread->tpt_cv, "pooljob");
10092834fa0aSthorpej 
10102834fa0aSthorpej 			ktflags = 0;
10112834fa0aSthorpej 			ktflags |= KTHREAD_MPSAFE;
10122834fa0aSthorpej 			if (pool->tp_pri < PRI_KERNEL)
10132834fa0aSthorpej 				ktflags |= KTHREAD_TS;
1014a1c38838Sriastradh 			threadnamesuffix(suffix, sizeof(suffix), pool->tp_cpu,
1015a1c38838Sriastradh 			    pool->tp_pri);
10162834fa0aSthorpej 			error = kthread_create(pool->tp_pri, ktflags,
10172834fa0aSthorpej 			    pool->tp_cpu, &threadpool_thread, thread, &lwp,
1018a1c38838Sriastradh 			    "poolthread%s", suffix);
10192834fa0aSthorpej 
10202834fa0aSthorpej 			mutex_spin_enter(&pool->tp_lock);
10212834fa0aSthorpej 			if (error) {
10222834fa0aSthorpej 				pool_cache_put(threadpool_thread_pc, thread);
10232834fa0aSthorpej 				threadpool_rele(pool);
10242834fa0aSthorpej 				/* XXX What to do to wait for memory?  */
10252834fa0aSthorpej 				(void)kpause("thrdplcr", false, hz,
10262834fa0aSthorpej 				    &pool->tp_lock);
10272834fa0aSthorpej 				continue;
10282834fa0aSthorpej 			}
1029bece032bSthorpej 			/*
1030bece032bSthorpej 			 * New kthread now owns the reference to the pool
1031bece032bSthorpej 			 * taken above.
1032bece032bSthorpej 			 */
10332834fa0aSthorpej 			KASSERT(lwp != NULL);
10342834fa0aSthorpej 			TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
10352834fa0aSthorpej 			    tpt_entry);
10362834fa0aSthorpej 			thread->tpt_lwp = lwp;
10372834fa0aSthorpej 			lwp = NULL;
10382834fa0aSthorpej 			cv_broadcast(&thread->tpt_cv);
10392834fa0aSthorpej 			continue;
10402834fa0aSthorpej 		}
10412834fa0aSthorpej 
10422834fa0aSthorpej 		/* There are idle threads, so try giving one a job.  */
1043fd471022Sthorpej 		struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
1044*caa99e8bSriastradh 
10455ccb236eSthorpej 		/*
10465ccb236eSthorpej 		 * Take an extra reference on the job temporarily so that
10475ccb236eSthorpej 		 * it won't disappear on us while we have both locks dropped.
10485ccb236eSthorpej 		 */
10495ccb236eSthorpej 		threadpool_job_hold(job);
10502834fa0aSthorpej 		mutex_spin_exit(&pool->tp_lock);
10512834fa0aSthorpej 
10522834fa0aSthorpej 		mutex_enter(job->job_lock);
10532834fa0aSthorpej 		/* If the job was cancelled, we'll no longer be its thread.  */
1054a1c38838Sriastradh 		if (__predict_true(job->job_thread == dispatcher)) {
10552834fa0aSthorpej 			mutex_spin_enter(&pool->tp_lock);
1056*caa99e8bSriastradh 			TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
10572834fa0aSthorpej 			if (__predict_false(
10582834fa0aSthorpej 				    TAILQ_EMPTY(&pool->tp_idle_threads))) {
10592834fa0aSthorpej 				/*
10602834fa0aSthorpej 				 * Someone else snagged the thread
10612834fa0aSthorpej 				 * first.  We'll have to try again.
10622834fa0aSthorpej 				 */
1063f7aaf31fSriastradh 				SDT_PROBE2(sdt, kernel, threadpool,
1064a1c38838Sriastradh 				    dispatcher__race,  pool, job);
10652834fa0aSthorpej 				TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
10662834fa0aSthorpej 				    job_entry);
10672834fa0aSthorpej 			} else {
10682834fa0aSthorpej 				/*
10692834fa0aSthorpej 				 * Assign the job to the thread and
10702834fa0aSthorpej 				 * wake the thread so it starts work.
10712834fa0aSthorpej 				 */
10722834fa0aSthorpej 				struct threadpool_thread *const thread =
10732834fa0aSthorpej 				    TAILQ_FIRST(&pool->tp_idle_threads);
10742834fa0aSthorpej 
1075f7aaf31fSriastradh 				SDT_PROBE2(sdt, kernel, threadpool,
1076a1c38838Sriastradh 				    dispatcher__assign,  job, thread->tpt_lwp);
10772834fa0aSthorpej 				KASSERT(thread->tpt_job == NULL);
10782834fa0aSthorpej 				TAILQ_REMOVE(&pool->tp_idle_threads, thread,
10792834fa0aSthorpej 				    tpt_entry);
10802834fa0aSthorpej 				thread->tpt_job = job;
10812834fa0aSthorpej 				job->job_thread = thread;
10822834fa0aSthorpej 				cv_broadcast(&thread->tpt_cv);
10832834fa0aSthorpej 			}
10842834fa0aSthorpej 			mutex_spin_exit(&pool->tp_lock);
10852834fa0aSthorpej 		}
10862834fa0aSthorpej 		threadpool_job_rele(job);
10875ccb236eSthorpej 		mutex_exit(job->job_lock);
10882834fa0aSthorpej 
10892834fa0aSthorpej 		mutex_spin_enter(&pool->tp_lock);
10902834fa0aSthorpej 	}
1091bece032bSthorpej 	threadpool_rele(pool);
10922834fa0aSthorpej 	mutex_spin_exit(&pool->tp_lock);
10932834fa0aSthorpej 
1094a1c38838Sriastradh 	SDT_PROBE1(sdt, kernel, threadpool, dispatcher__exit,  pool);
10952834fa0aSthorpej 
10962834fa0aSthorpej 	kthread_exit(0);
10972834fa0aSthorpej }
10982834fa0aSthorpej 
10992834fa0aSthorpej /* Thread pool thread */
11002834fa0aSthorpej 
11012834fa0aSthorpej static void __dead
threadpool_thread(void * arg)11022834fa0aSthorpej threadpool_thread(void *arg)
11032834fa0aSthorpej {
11042834fa0aSthorpej 	struct threadpool_thread *const thread = arg;
1105fd471022Sthorpej 	struct threadpool *const pool = thread->tpt_pool;
11062834fa0aSthorpej 
11072834fa0aSthorpej 	KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
1108f7aaf31fSriastradh 	KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND));
11092834fa0aSthorpej 
11102834fa0aSthorpej 	/* Wait until we're initialized and on the queue.  */
11112834fa0aSthorpej 	mutex_spin_enter(&pool->tp_lock);
11122834fa0aSthorpej 	while (thread->tpt_lwp == NULL)
11132834fa0aSthorpej 		cv_wait(&thread->tpt_cv, &pool->tp_lock);
11142834fa0aSthorpej 
1115f7aaf31fSriastradh 	SDT_PROBE1(sdt, kernel, threadpool, thread__start,  pool);
11162834fa0aSthorpej 
11172834fa0aSthorpej 	KASSERT(thread->tpt_lwp == curlwp);
11182834fa0aSthorpej 	for (;;) {
11192834fa0aSthorpej 		/* Wait until we are assigned a job.  */
11202834fa0aSthorpej 		while (thread->tpt_job == NULL) {
11212834fa0aSthorpej 			if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
1122f7aaf31fSriastradh 				SDT_PROBE1(sdt, kernel, threadpool,
1123f7aaf31fSriastradh 				    thread__dying,  pool);
11242834fa0aSthorpej 				break;
11252834fa0aSthorpej 			}
11262834fa0aSthorpej 			if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
112703010c6eSthorpej 				mstohz(threadpool_idle_time_ms)))
11282834fa0aSthorpej 				break;
11292834fa0aSthorpej 		}
11302834fa0aSthorpej 		if (__predict_false(thread->tpt_job == NULL)) {
11312834fa0aSthorpej 			TAILQ_REMOVE(&pool->tp_idle_threads, thread,
11322834fa0aSthorpej 			    tpt_entry);
11332834fa0aSthorpej 			break;
11342834fa0aSthorpej 		}
11352834fa0aSthorpej 
1136fd471022Sthorpej 		struct threadpool_job *const job = thread->tpt_job;
11372834fa0aSthorpej 		KASSERT(job != NULL);
11380f894b8cSthorpej 
11390f894b8cSthorpej 		/* Set our lwp name to reflect what job we're doing.  */
11400f894b8cSthorpej 		lwp_lock(curlwp);
11410f894b8cSthorpej 		char *const lwp_name __diagused = curlwp->l_name;
11420f894b8cSthorpej 		thread->tpt_lwp_savedname = curlwp->l_name;
11430f894b8cSthorpej 		curlwp->l_name = job->job_name;
11440f894b8cSthorpej 		lwp_unlock(curlwp);
11450f894b8cSthorpej 
11462834fa0aSthorpej 		mutex_spin_exit(&pool->tp_lock);
11472834fa0aSthorpej 
1148f7aaf31fSriastradh 		SDT_PROBE2(sdt, kernel, threadpool, thread__job,  pool, job);
11492834fa0aSthorpej 
11502834fa0aSthorpej 		/* Run the job.  */
1151fd471022Sthorpej 		(*job->job_fn)(job);
11522834fa0aSthorpej 
11530f894b8cSthorpej 		/* lwp name restored in threadpool_job_done(). */
11540f894b8cSthorpej 		KASSERTMSG((curlwp->l_name == lwp_name),
11550f894b8cSthorpej 		    "someone forgot to call threadpool_job_done()!");
11562834fa0aSthorpej 
11575ccb236eSthorpej 		/*
11585ccb236eSthorpej 		 * We can compare pointers, but we can no longer deference
11595ccb236eSthorpej 		 * job after this because threadpool_job_done() drops the
11605ccb236eSthorpej 		 * last reference on the job while the job is locked.
11615ccb236eSthorpej 		 */
11622834fa0aSthorpej 
11632834fa0aSthorpej 		mutex_spin_enter(&pool->tp_lock);
11642834fa0aSthorpej 		KASSERT(thread->tpt_job == job);
11652834fa0aSthorpej 		thread->tpt_job = NULL;
11662834fa0aSthorpej 		TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
11672834fa0aSthorpej 	}
1168bece032bSthorpej 	threadpool_rele(pool);
11692834fa0aSthorpej 	mutex_spin_exit(&pool->tp_lock);
11702834fa0aSthorpej 
1171f7aaf31fSriastradh 	SDT_PROBE1(sdt, kernel, threadpool, thread__exit,  pool);
11722834fa0aSthorpej 
11732834fa0aSthorpej 	KASSERT(!cv_has_waiters(&thread->tpt_cv));
11742834fa0aSthorpej 	cv_destroy(&thread->tpt_cv);
11752834fa0aSthorpej 	pool_cache_put(threadpool_thread_pc, thread);
11762834fa0aSthorpej 	kthread_exit(0);
11772834fa0aSthorpej }
1178