xref: /netbsd-src/tests/kernel/threadpool_tester/threadpool_tester.c (revision ad5e13cec65ee2222b5181003801480e70aaa5eb)
1*ad5e13ceSthorpej /*	$NetBSD: threadpool_tester.c,v 1.3 2018/12/26 22:21:10 thorpej Exp $	*/
22834fa0aSthorpej 
32834fa0aSthorpej /*-
42834fa0aSthorpej  * Copyright (c) 2018 The NetBSD Foundation, Inc.
52834fa0aSthorpej  * All rights reserved.
62834fa0aSthorpej  *
72834fa0aSthorpej  * This code is derived from software contributed to The NetBSD Foundation
82834fa0aSthorpej  * by Jason R. Thorpe.
92834fa0aSthorpej  *
102834fa0aSthorpej  * Redistribution and use in source and binary forms, with or without
112834fa0aSthorpej  * modification, are permitted provided that the following conditions
122834fa0aSthorpej  * are met:
132834fa0aSthorpej  * 1. Redistributions of source code must retain the above copyright
142834fa0aSthorpej  *    notice, this list of conditions and the following disclaimer.
152834fa0aSthorpej  * 2. Redistributions in binary form must reproduce the above copyright
162834fa0aSthorpej  *    notice, this list of conditions and the following disclaimer in the
172834fa0aSthorpej  *    documentation and/or other materials provided with the distribution.
182834fa0aSthorpej  *
192834fa0aSthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
202834fa0aSthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
212834fa0aSthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
222834fa0aSthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
232834fa0aSthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
242834fa0aSthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
252834fa0aSthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
262834fa0aSthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
272834fa0aSthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
282834fa0aSthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
292834fa0aSthorpej  * POSSIBILITY OF SUCH DAMAGE.
302834fa0aSthorpej  */
312834fa0aSthorpej 
322834fa0aSthorpej #include <sys/cdefs.h>
33*ad5e13ceSthorpej __KERNEL_RCSID(0, "$NetBSD: threadpool_tester.c,v 1.3 2018/12/26 22:21:10 thorpej Exp $");
342834fa0aSthorpej 
352834fa0aSthorpej #include <sys/param.h>
362834fa0aSthorpej #include <sys/kernel.h>
372834fa0aSthorpej #include <sys/module.h>
382834fa0aSthorpej #include <sys/sysctl.h>
392834fa0aSthorpej #include <sys/threadpool.h>
402834fa0aSthorpej 
412834fa0aSthorpej MODULE(MODULE_CLASS_MISC, threadpool_tester, NULL);
422834fa0aSthorpej 
432834fa0aSthorpej #ifdef THREADPOOL_VERBOSE
442834fa0aSthorpej #define	TP_LOG(x)		printf x
452834fa0aSthorpej #else
462834fa0aSthorpej #define	TP_LOG(x)		/* nothing */
472834fa0aSthorpej #endif /* THREADPOOL_VERBOSE */
482834fa0aSthorpej 
492834fa0aSthorpej static struct tester_context {
502834fa0aSthorpej 	kmutex_t ctx_mutex;
512834fa0aSthorpej 	struct sysctllog *ctx_sysctllog;
52fd471022Sthorpej 	struct threadpool *ctx_unbound[PRI_COUNT + 1];
53fd471022Sthorpej 	struct threadpool_percpu *ctx_percpu[PRI_COUNT + 1];
542834fa0aSthorpej 	unsigned int ctx_value;
55fd471022Sthorpej 	struct threadpool_job ctx_job;
562834fa0aSthorpej } tester_ctx;
572834fa0aSthorpej 
582834fa0aSthorpej #define	pri_to_idx(pri)		((pri) == PRI_NONE ? PRI_COUNT : (pri))
592834fa0aSthorpej 
602834fa0aSthorpej static bool
pri_is_valid(pri_t pri)612834fa0aSthorpej pri_is_valid(pri_t pri)
622834fa0aSthorpej {
632834fa0aSthorpej 	return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
642834fa0aSthorpej }
652834fa0aSthorpej 
662834fa0aSthorpej static int
threadpool_tester_get_unbound(SYSCTLFN_ARGS)672834fa0aSthorpej threadpool_tester_get_unbound(SYSCTLFN_ARGS)
682834fa0aSthorpej {
692834fa0aSthorpej 	struct tester_context *ctx;
70fd471022Sthorpej 	struct threadpool *pool, *opool = NULL;
712834fa0aSthorpej 	struct sysctlnode node;
722834fa0aSthorpej 	int error, val;
732834fa0aSthorpej 
742834fa0aSthorpej 	node = *rnode;
752834fa0aSthorpej 	ctx = node.sysctl_data;
762834fa0aSthorpej 
772834fa0aSthorpej 	val = -1;
782834fa0aSthorpej 	node.sysctl_data = &val;
792834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
802834fa0aSthorpej 	if (error || newp == NULL)
812834fa0aSthorpej 		return error;
822834fa0aSthorpej 
832834fa0aSthorpej 	if (! pri_is_valid(val))
842834fa0aSthorpej 		return EINVAL;
852834fa0aSthorpej 
862834fa0aSthorpej 	error = threadpool_get(&pool, val);
872834fa0aSthorpej 	if (error) {
882834fa0aSthorpej 		TP_LOG(("%s: threadpool_get(..., %d) failed -> %d\n",
892834fa0aSthorpej 		    __func__, val, error));
902834fa0aSthorpej 		return error;
912834fa0aSthorpej 	}
922834fa0aSthorpej 
932834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
942834fa0aSthorpej 	if (ctx->ctx_unbound[pri_to_idx(val)] == NULL)
952834fa0aSthorpej 		ctx->ctx_unbound[pri_to_idx(val)] = pool;
962834fa0aSthorpej 	else
972834fa0aSthorpej 		opool = ctx->ctx_unbound[pri_to_idx(val)];
982834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
992834fa0aSthorpej 
1002834fa0aSthorpej 	if (opool != NULL) {
1012834fa0aSthorpej 		/* Should have gotten reference to existing pool. */
1022834fa0aSthorpej 		TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
1032834fa0aSthorpej 		    __func__, val, opool == pool ? "match" : "NO MATCH"));
1042834fa0aSthorpej 		KASSERT(opool == pool);
1052834fa0aSthorpej 		threadpool_put(pool, val);
1062834fa0aSthorpej 		error = EEXIST;
1072834fa0aSthorpej 	} else {
1082834fa0aSthorpej 		TP_LOG(("%s: created unbound pool for pri %d\n",
1092834fa0aSthorpej 		    __func__, val));
1102834fa0aSthorpej 	}
1112834fa0aSthorpej 
1122834fa0aSthorpej 	return error;
1132834fa0aSthorpej }
1142834fa0aSthorpej 
1152834fa0aSthorpej static int
threadpool_tester_put_unbound(SYSCTLFN_ARGS)1162834fa0aSthorpej threadpool_tester_put_unbound(SYSCTLFN_ARGS)
1172834fa0aSthorpej {
1182834fa0aSthorpej 	struct tester_context *ctx;
119fd471022Sthorpej 	struct threadpool *pool;
1202834fa0aSthorpej 	struct sysctlnode node;
1212834fa0aSthorpej 	int error, val;
1222834fa0aSthorpej 
1232834fa0aSthorpej 	node = *rnode;
1242834fa0aSthorpej 	ctx = node.sysctl_data;
1252834fa0aSthorpej 
1262834fa0aSthorpej 	val = -1;
1272834fa0aSthorpej 	node.sysctl_data = &val;
1282834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1292834fa0aSthorpej 	if (error || newp == NULL)
1302834fa0aSthorpej 		return error;
1312834fa0aSthorpej 
1322834fa0aSthorpej 	if (! pri_is_valid(val))
1332834fa0aSthorpej 		return EINVAL;
1342834fa0aSthorpej 
1352834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
1362834fa0aSthorpej 	/* We only ever maintain a single reference. */
1372834fa0aSthorpej 	pool = ctx->ctx_unbound[pri_to_idx(val)];
1382834fa0aSthorpej 	ctx->ctx_unbound[pri_to_idx(val)] = NULL;
1392834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
1402834fa0aSthorpej 
1412834fa0aSthorpej 	if (pool == NULL) {
1422834fa0aSthorpej 		TP_LOG(("%s: no unbound pool for pri %d\n",
1432834fa0aSthorpej 		    __func__, val));
1442834fa0aSthorpej 		return ENODEV;
1452834fa0aSthorpej 	}
1462834fa0aSthorpej 
1472834fa0aSthorpej 	threadpool_put(pool, val);
1482834fa0aSthorpej 	TP_LOG(("%s: released unbound pool for pri %d\n",
1492834fa0aSthorpej 	    __func__, val));
1502834fa0aSthorpej 
1512834fa0aSthorpej 	return 0;
1522834fa0aSthorpej }
1532834fa0aSthorpej 
1542834fa0aSthorpej static int
threadpool_tester_run_unbound(SYSCTLFN_ARGS)1552834fa0aSthorpej threadpool_tester_run_unbound(SYSCTLFN_ARGS)
1562834fa0aSthorpej {
1572834fa0aSthorpej 	struct tester_context *ctx;
158fd471022Sthorpej 	struct threadpool *pool;
1592834fa0aSthorpej 	struct sysctlnode node;
1602834fa0aSthorpej 	int error, val;
1612834fa0aSthorpej 
1622834fa0aSthorpej 	node = *rnode;
1632834fa0aSthorpej 	ctx = node.sysctl_data;
1642834fa0aSthorpej 
1652834fa0aSthorpej 	val = -1;
1662834fa0aSthorpej 	node.sysctl_data = &val;
1672834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1682834fa0aSthorpej 	if (error || newp == NULL)
1692834fa0aSthorpej 		return error;
1702834fa0aSthorpej 
1712834fa0aSthorpej 	if (! pri_is_valid(val))
1722834fa0aSthorpej 		return EINVAL;
1732834fa0aSthorpej 
1742834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
1752834fa0aSthorpej 	pool = ctx->ctx_unbound[pri_to_idx(val)];
1762834fa0aSthorpej 	if (pool == NULL) {
1772834fa0aSthorpej 		TP_LOG(("%s: no unbound pool for pri %d\n",
1782834fa0aSthorpej 		    __func__, val));
1792834fa0aSthorpej 		mutex_exit(&ctx->ctx_mutex);
1802834fa0aSthorpej 		return ENODEV;
1812834fa0aSthorpej 	}
1822834fa0aSthorpej 
1832834fa0aSthorpej 	threadpool_schedule_job(pool, &ctx->ctx_job);
1842834fa0aSthorpej 	TP_LOG(("%s: scheduled job on unbound pool for pri %d\n",
1852834fa0aSthorpej 	    __func__, val));
1862834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
1872834fa0aSthorpej 
1882834fa0aSthorpej 	return 0;
1892834fa0aSthorpej }
1902834fa0aSthorpej 
1912834fa0aSthorpej static int
threadpool_tester_get_percpu(SYSCTLFN_ARGS)1922834fa0aSthorpej threadpool_tester_get_percpu(SYSCTLFN_ARGS)
1932834fa0aSthorpej {
1942834fa0aSthorpej 	struct tester_context *ctx;
195fd471022Sthorpej 	struct threadpool_percpu *pcpu, *opcpu = NULL;
1962834fa0aSthorpej 	struct sysctlnode node;
1972834fa0aSthorpej 	int error, val;
1982834fa0aSthorpej 
1992834fa0aSthorpej 	node = *rnode;
2002834fa0aSthorpej 	ctx = node.sysctl_data;
2012834fa0aSthorpej 
2022834fa0aSthorpej 	val = -1;
2032834fa0aSthorpej 	node.sysctl_data = &val;
2042834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2052834fa0aSthorpej 	if (error || newp == NULL)
2062834fa0aSthorpej 		return error;
2072834fa0aSthorpej 
2082834fa0aSthorpej 	if (! pri_is_valid(val))
2092834fa0aSthorpej 		return EINVAL;
2102834fa0aSthorpej 
2112834fa0aSthorpej 	error = threadpool_percpu_get(&pcpu, val);
2122834fa0aSthorpej 	if (error) {
2132834fa0aSthorpej 		TP_LOG(("%s: threadpool_percpu_get(..., %d) failed -> %d\n",
2142834fa0aSthorpej 		    __func__, val, error));
2152834fa0aSthorpej 		return error;
2162834fa0aSthorpej 	}
2172834fa0aSthorpej 
2182834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
2192834fa0aSthorpej 	if (ctx->ctx_percpu[pri_to_idx(val)] == NULL)
2202834fa0aSthorpej 		ctx->ctx_percpu[pri_to_idx(val)] = pcpu;
2212834fa0aSthorpej 	else
2222834fa0aSthorpej 		opcpu = ctx->ctx_percpu[pri_to_idx(val)];
2232834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
2242834fa0aSthorpej 
2252834fa0aSthorpej 	if (opcpu != NULL) {
2262834fa0aSthorpej 		/* Should have gotten reference to existing pool. */
2272834fa0aSthorpej 		TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
2282834fa0aSthorpej 		    __func__, val, opcpu == pcpu ? "match" : "NO MATCH"));
2292834fa0aSthorpej 		KASSERT(opcpu == pcpu);
2302834fa0aSthorpej 		threadpool_percpu_put(pcpu, val);
2312834fa0aSthorpej 		error = EEXIST;
2322834fa0aSthorpej 	} else {
2332834fa0aSthorpej 		TP_LOG(("%s: created percpu pool for pri %d\n",
2342834fa0aSthorpej 		    __func__, val));
2352834fa0aSthorpej 	}
2362834fa0aSthorpej 
2372834fa0aSthorpej 	return error;
2382834fa0aSthorpej }
2392834fa0aSthorpej 
2402834fa0aSthorpej static int
threadpool_tester_put_percpu(SYSCTLFN_ARGS)2412834fa0aSthorpej threadpool_tester_put_percpu(SYSCTLFN_ARGS)
2422834fa0aSthorpej {
2432834fa0aSthorpej 	struct tester_context *ctx;
244fd471022Sthorpej 	struct threadpool_percpu *pcpu;
2452834fa0aSthorpej 	struct sysctlnode node;
2462834fa0aSthorpej 	int error, val;
2472834fa0aSthorpej 
2482834fa0aSthorpej 	node = *rnode;
2492834fa0aSthorpej 	ctx = node.sysctl_data;
2502834fa0aSthorpej 
2512834fa0aSthorpej 	val = -1;
2522834fa0aSthorpej 	node.sysctl_data = &val;
2532834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2542834fa0aSthorpej 	if (error || newp == NULL)
2552834fa0aSthorpej 		return error;
2562834fa0aSthorpej 
2572834fa0aSthorpej 	if (! pri_is_valid(val))
2582834fa0aSthorpej 		return EINVAL;
2592834fa0aSthorpej 
2602834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
2612834fa0aSthorpej 	/* We only ever maintain a single reference. */
2622834fa0aSthorpej 	pcpu = ctx->ctx_percpu[pri_to_idx(val)];
2632834fa0aSthorpej 	ctx->ctx_percpu[pri_to_idx(val)] = NULL;
2642834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
2652834fa0aSthorpej 
2662834fa0aSthorpej 	if (pcpu == NULL) {
2672834fa0aSthorpej 		TP_LOG(("%s: no percpu pool for pri %d\n",
2682834fa0aSthorpej 		    __func__, val));
2692834fa0aSthorpej 		return ENODEV;
2702834fa0aSthorpej 	}
2712834fa0aSthorpej 
2722834fa0aSthorpej 	threadpool_percpu_put(pcpu, val);
2732834fa0aSthorpej 	TP_LOG(("%s: released percpu pool for pri %d\n",
2742834fa0aSthorpej 	    __func__, val));
2752834fa0aSthorpej 
2762834fa0aSthorpej 	return 0;
2772834fa0aSthorpej }
2782834fa0aSthorpej 
2792834fa0aSthorpej static int
threadpool_tester_run_percpu(SYSCTLFN_ARGS)2802834fa0aSthorpej threadpool_tester_run_percpu(SYSCTLFN_ARGS)
2812834fa0aSthorpej {
2822834fa0aSthorpej 	struct tester_context *ctx;
283fd471022Sthorpej 	struct threadpool_percpu *pcpu;
284fd471022Sthorpej 	struct threadpool *pool;
2852834fa0aSthorpej 	struct sysctlnode node;
2862834fa0aSthorpej 	int error, val;
2872834fa0aSthorpej 
2882834fa0aSthorpej 	node = *rnode;
2892834fa0aSthorpej 	ctx = node.sysctl_data;
2902834fa0aSthorpej 
2912834fa0aSthorpej 	val = -1;
2922834fa0aSthorpej 	node.sysctl_data = &val;
2932834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2942834fa0aSthorpej 	if (error || newp == NULL)
2952834fa0aSthorpej 		return error;
2962834fa0aSthorpej 
2972834fa0aSthorpej 	if (! pri_is_valid(val))
2982834fa0aSthorpej 		return EINVAL;
2992834fa0aSthorpej 
3002834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
3012834fa0aSthorpej 	pcpu = ctx->ctx_percpu[pri_to_idx(val)];
3022834fa0aSthorpej 	if (pcpu == NULL) {
3032834fa0aSthorpej 		TP_LOG(("%s: no percpu pool for pri %d\n",
3042834fa0aSthorpej 		    __func__, val));
3052834fa0aSthorpej 		mutex_exit(&ctx->ctx_mutex);
3062834fa0aSthorpej 		return ENODEV;
3072834fa0aSthorpej 	}
3082834fa0aSthorpej 
3092834fa0aSthorpej 	pool = threadpool_percpu_ref(pcpu);
3102834fa0aSthorpej 	KASSERT(pool != NULL);
3112834fa0aSthorpej 
3122834fa0aSthorpej 	threadpool_schedule_job(pool, &ctx->ctx_job);
3132834fa0aSthorpej 	TP_LOG(("%s: scheduled job on percpu pool for pri %d\n",
3142834fa0aSthorpej 	    __func__, val));
3152834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
3162834fa0aSthorpej 
3172834fa0aSthorpej 	return 0;
3182834fa0aSthorpej }
3192834fa0aSthorpej 
3202834fa0aSthorpej static int
threadpool_tester_test_value(SYSCTLFN_ARGS)3212834fa0aSthorpej threadpool_tester_test_value(SYSCTLFN_ARGS)
3222834fa0aSthorpej {
3232834fa0aSthorpej 	struct tester_context *ctx;
3242834fa0aSthorpej 	struct sysctlnode node;
3252834fa0aSthorpej 	unsigned int val;
3262834fa0aSthorpej 	int error;
3272834fa0aSthorpej 
3282834fa0aSthorpej 	node = *rnode;
3292834fa0aSthorpej 	ctx = node.sysctl_data;
3302834fa0aSthorpej 
3312834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
3322834fa0aSthorpej 	val = ctx->ctx_value;
3332834fa0aSthorpej 	node.sysctl_data = &val;
3342834fa0aSthorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
3352834fa0aSthorpej 	if (error || newp == NULL) {
3362834fa0aSthorpej 		mutex_exit(&ctx->ctx_mutex);
3372834fa0aSthorpej 		return error;
3382834fa0aSthorpej 	}
3392834fa0aSthorpej 	ctx->ctx_value = val;
3402834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
3412834fa0aSthorpej 
3422834fa0aSthorpej 	return 0;
3432834fa0aSthorpej }
3442834fa0aSthorpej 
3452834fa0aSthorpej static void
threadpool_tester_job(struct threadpool_job * job)346fd471022Sthorpej threadpool_tester_job(struct threadpool_job *job)
3472834fa0aSthorpej {
3482834fa0aSthorpej 	struct tester_context *ctx =
3492834fa0aSthorpej 	    container_of(job, struct tester_context, ctx_job);
3502834fa0aSthorpej 	unsigned int oval, nval;
3512834fa0aSthorpej 
3522834fa0aSthorpej 	TP_LOG(("%s: job = %p, ctx = %p\n", __func__, job, ctx));
3532834fa0aSthorpej 
3542834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
3552834fa0aSthorpej 	oval = ctx->ctx_value;
3562834fa0aSthorpej 	nval = oval + 1;	/* always reference oval and nval */
3572834fa0aSthorpej 	ctx->ctx_value = nval;
3582834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
3592834fa0aSthorpej 
3602834fa0aSthorpej 	TP_LOG(("%s: %u -> %u\n", __func__, oval, nval));
3612834fa0aSthorpej 	(void) kpause("tptestjob", false, hz, NULL);
3622834fa0aSthorpej 
3632834fa0aSthorpej 	mutex_enter(&ctx->ctx_mutex);
3642834fa0aSthorpej 	threadpool_job_done(job);
3652834fa0aSthorpej 	mutex_exit(&ctx->ctx_mutex);
3662834fa0aSthorpej }
3672834fa0aSthorpej 
3682834fa0aSthorpej #define	RETURN_ERROR	if (error) goto return_error
3692834fa0aSthorpej 
3702834fa0aSthorpej static int
threadpool_tester_init(void)3712834fa0aSthorpej threadpool_tester_init(void)
3722834fa0aSthorpej {
3732834fa0aSthorpej 	struct sysctllog **log = &tester_ctx.ctx_sysctllog;
3742834fa0aSthorpej 	const struct sysctlnode *rnode, *cnode;
3752834fa0aSthorpej 	int error;
3762834fa0aSthorpej 
3772834fa0aSthorpej 	mutex_init(&tester_ctx.ctx_mutex, MUTEX_DEFAULT, IPL_NONE);
3782834fa0aSthorpej 	threadpool_job_init(&tester_ctx.ctx_job, threadpool_tester_job,
3792834fa0aSthorpej 	    &tester_ctx.ctx_mutex, "tptest");
3802834fa0aSthorpej 
3812834fa0aSthorpej 	error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT,
3822834fa0aSthorpej 	    CTLTYPE_NODE, "threadpool_tester",
3832834fa0aSthorpej 	    SYSCTL_DESCR("threadpool testing interface"),
3842834fa0aSthorpej 	    NULL, 0, NULL, 0, CTL_KERN, CTL_CREATE, CTL_EOL);
3852834fa0aSthorpej 	RETURN_ERROR;
3862834fa0aSthorpej 
3872834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
3882834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "get_unbound",
3892834fa0aSthorpej 	    SYSCTL_DESCR("get unbound pool of specified priority"),
3902834fa0aSthorpej 	    threadpool_tester_get_unbound, 0,
3912834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
3922834fa0aSthorpej 	RETURN_ERROR;
3932834fa0aSthorpej 
3942834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
3952834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "put_unbound",
3962834fa0aSthorpej 	    SYSCTL_DESCR("put unbound pool of specified priority"),
3972834fa0aSthorpej 	    threadpool_tester_put_unbound, 0,
3982834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
3992834fa0aSthorpej 	RETURN_ERROR;
4002834fa0aSthorpej 
4012834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
4022834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "run_unbound",
4032834fa0aSthorpej 	    SYSCTL_DESCR("run on unbound pool of specified priority"),
4042834fa0aSthorpej 	    threadpool_tester_run_unbound, 0,
4052834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
4062834fa0aSthorpej 	RETURN_ERROR;
4072834fa0aSthorpej 
4082834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
4092834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "get_percpu",
4102834fa0aSthorpej 	    SYSCTL_DESCR("get percpu pool of specified priority"),
4112834fa0aSthorpej 	    threadpool_tester_get_percpu, 0,
4122834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
4132834fa0aSthorpej 	RETURN_ERROR;
4142834fa0aSthorpej 
4152834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
4162834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "put_percpu",
4172834fa0aSthorpej 	    SYSCTL_DESCR("put percpu pool of specified priority"),
4182834fa0aSthorpej 	    threadpool_tester_put_percpu, 0,
4192834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
4202834fa0aSthorpej 	RETURN_ERROR;
4212834fa0aSthorpej 
4222834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
4232834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "run_percpu",
4242834fa0aSthorpej 	    SYSCTL_DESCR("run on percpu pool of specified priority"),
4252834fa0aSthorpej 	    threadpool_tester_run_percpu, 0,
4262834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
4272834fa0aSthorpej 	RETURN_ERROR;
4282834fa0aSthorpej 
4292834fa0aSthorpej 	error = sysctl_createv(log, 0, &rnode, &cnode,
4302834fa0aSthorpej 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "test_value",
4312834fa0aSthorpej 	    SYSCTL_DESCR("test value that jobs increment"),
4322834fa0aSthorpej 	    threadpool_tester_test_value, 0,
4332834fa0aSthorpej 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
4342834fa0aSthorpej 	RETURN_ERROR;
4352834fa0aSthorpej 
4362834fa0aSthorpej 	return 0;
4372834fa0aSthorpej 
4382834fa0aSthorpej  return_error:
4392834fa0aSthorpej  	sysctl_teardown(log);
4402834fa0aSthorpej 	return error;
4412834fa0aSthorpej }
4422834fa0aSthorpej 
4432834fa0aSthorpej static int
threadpool_tester_fini(void)4442834fa0aSthorpej threadpool_tester_fini(void)
4452834fa0aSthorpej {
4462834fa0aSthorpej 	pri_t pri;
4472834fa0aSthorpej 
4482834fa0aSthorpej 	mutex_enter(&tester_ctx.ctx_mutex);
4492834fa0aSthorpej 	for (pri = PRI_NONE/*-1*/; pri < PRI_COUNT; pri++) {
450fd471022Sthorpej 		struct threadpool *pool =
4512834fa0aSthorpej 		    tester_ctx.ctx_unbound[pri_to_idx(pri)];
452fd471022Sthorpej 		struct threadpool_percpu *pcpu =
4532834fa0aSthorpej 		    tester_ctx.ctx_percpu[pri_to_idx(pri)];
4542834fa0aSthorpej 
4552834fa0aSthorpej 		/*
4562834fa0aSthorpej 		 * threadpool_cancel_job() may be called on a pool
4572834fa0aSthorpej 		 * other than what the job is scheduled on. This is
4582834fa0aSthorpej 		 * safe; see comment in threadpool_cancel_job_async().
4592834fa0aSthorpej 		 */
4602834fa0aSthorpej 
4612834fa0aSthorpej 		if (pool != NULL) {
4622834fa0aSthorpej 			threadpool_cancel_job(pool, &tester_ctx.ctx_job);
4632834fa0aSthorpej 			threadpool_put(pool, pri);
4642834fa0aSthorpej 			tester_ctx.ctx_unbound[pri_to_idx(pri)] = NULL;
4652834fa0aSthorpej 		}
4662834fa0aSthorpej 		if (pcpu != NULL) {
4672834fa0aSthorpej 			pool = threadpool_percpu_ref(pcpu);
4682834fa0aSthorpej 			threadpool_cancel_job(pool, &tester_ctx.ctx_job);
4692834fa0aSthorpej 			threadpool_percpu_put(pcpu, pri);
4702834fa0aSthorpej 			tester_ctx.ctx_percpu[pri_to_idx(pri)] = NULL;
4712834fa0aSthorpej 		}
4722834fa0aSthorpej 	}
4732834fa0aSthorpej 	mutex_exit(&tester_ctx.ctx_mutex);
4742834fa0aSthorpej 	threadpool_job_destroy(&tester_ctx.ctx_job);
4752834fa0aSthorpej 	mutex_destroy(&tester_ctx.ctx_mutex);
4762834fa0aSthorpej 
4772834fa0aSthorpej 	sysctl_teardown(&tester_ctx.ctx_sysctllog);
4782834fa0aSthorpej 
4792834fa0aSthorpej 	return 0;
4802834fa0aSthorpej }
4812834fa0aSthorpej 
4822834fa0aSthorpej static int
threadpool_tester_modcmd(modcmd_t cmd,void * arg __unused)4832834fa0aSthorpej threadpool_tester_modcmd(modcmd_t cmd, void *arg __unused)
4842834fa0aSthorpej {
4852834fa0aSthorpej 	int error;
4862834fa0aSthorpej 
4872834fa0aSthorpej 	switch (cmd) {
4882834fa0aSthorpej 	case MODULE_CMD_INIT:
4892834fa0aSthorpej 		error = threadpool_tester_init();
4902834fa0aSthorpej 		break;
4912834fa0aSthorpej 
4922834fa0aSthorpej 	case MODULE_CMD_FINI:
4932834fa0aSthorpej 		error = threadpool_tester_fini();
4942834fa0aSthorpej 		break;
4952834fa0aSthorpej 
4962834fa0aSthorpej 	case MODULE_CMD_STAT:
4972834fa0aSthorpej 	default:
4982834fa0aSthorpej 		error = ENOTTY;
4992834fa0aSthorpej 	}
5002834fa0aSthorpej 
5012834fa0aSthorpej 	return error;
5022834fa0aSthorpej }
503