xref: /netbsd-src/tests/modules/threadpool_tester/threadpool_tester.c (revision 78928445f59b392191cf4e73ea134ffaccc637da)
1 /*	$NetBSD: threadpool_tester.c,v 1.1 2019/01/25 18:33:59 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: threadpool_tester.c,v 1.1 2019/01/25 18:33:59 christos Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/sysctl.h>
39 #include <sys/threadpool.h>
40 
41 MODULE(MODULE_CLASS_MISC, threadpool_tester, NULL);
42 
43 #ifdef THREADPOOL_VERBOSE
44 #define	TP_LOG(x)		printf x
45 #else
46 #define	TP_LOG(x)		/* nothing */
47 #endif /* THREADPOOL_VERBOSE */
48 
49 static struct tester_context {
50 	kmutex_t ctx_mutex;
51 	struct sysctllog *ctx_sysctllog;
52 	struct threadpool *ctx_unbound[PRI_COUNT + 1];
53 	struct threadpool_percpu *ctx_percpu[PRI_COUNT + 1];
54 	unsigned int ctx_value;
55 	struct threadpool_job ctx_job;
56 } tester_ctx;
57 
58 #define	pri_to_idx(pri)		((pri) == PRI_NONE ? PRI_COUNT : (pri))
59 
60 static bool
pri_is_valid(pri_t pri)61 pri_is_valid(pri_t pri)
62 {
63 	return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
64 }
65 
66 static int
threadpool_tester_get_unbound(SYSCTLFN_ARGS)67 threadpool_tester_get_unbound(SYSCTLFN_ARGS)
68 {
69 	struct tester_context *ctx;
70 	struct threadpool *pool, *opool = NULL;
71 	struct sysctlnode node;
72 	int error, val;
73 
74 	node = *rnode;
75 	ctx = node.sysctl_data;
76 
77 	val = -1;
78 	node.sysctl_data = &val;
79 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
80 	if (error || newp == NULL)
81 		return error;
82 
83 	if (! pri_is_valid(val))
84 		return EINVAL;
85 
86 	error = threadpool_get(&pool, val);
87 	if (error) {
88 		TP_LOG(("%s: threadpool_get(..., %d) failed -> %d\n",
89 		    __func__, val, error));
90 		return error;
91 	}
92 
93 	mutex_enter(&ctx->ctx_mutex);
94 	if (ctx->ctx_unbound[pri_to_idx(val)] == NULL)
95 		ctx->ctx_unbound[pri_to_idx(val)] = pool;
96 	else
97 		opool = ctx->ctx_unbound[pri_to_idx(val)];
98 	mutex_exit(&ctx->ctx_mutex);
99 
100 	if (opool != NULL) {
101 		/* Should have gotten reference to existing pool. */
102 		TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
103 		    __func__, val, opool == pool ? "match" : "NO MATCH"));
104 		KASSERT(opool == pool);
105 		threadpool_put(pool, val);
106 		error = EEXIST;
107 	} else {
108 		TP_LOG(("%s: created unbound pool for pri %d\n",
109 		    __func__, val));
110 	}
111 
112 	return error;
113 }
114 
115 static int
threadpool_tester_put_unbound(SYSCTLFN_ARGS)116 threadpool_tester_put_unbound(SYSCTLFN_ARGS)
117 {
118 	struct tester_context *ctx;
119 	struct threadpool *pool;
120 	struct sysctlnode node;
121 	int error, val;
122 
123 	node = *rnode;
124 	ctx = node.sysctl_data;
125 
126 	val = -1;
127 	node.sysctl_data = &val;
128 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
129 	if (error || newp == NULL)
130 		return error;
131 
132 	if (! pri_is_valid(val))
133 		return EINVAL;
134 
135 	mutex_enter(&ctx->ctx_mutex);
136 	/* We only ever maintain a single reference. */
137 	pool = ctx->ctx_unbound[pri_to_idx(val)];
138 	ctx->ctx_unbound[pri_to_idx(val)] = NULL;
139 	mutex_exit(&ctx->ctx_mutex);
140 
141 	if (pool == NULL) {
142 		TP_LOG(("%s: no unbound pool for pri %d\n",
143 		    __func__, val));
144 		return ENODEV;
145 	}
146 
147 	threadpool_put(pool, val);
148 	TP_LOG(("%s: released unbound pool for pri %d\n",
149 	    __func__, val));
150 
151 	return 0;
152 }
153 
154 static int
threadpool_tester_run_unbound(SYSCTLFN_ARGS)155 threadpool_tester_run_unbound(SYSCTLFN_ARGS)
156 {
157 	struct tester_context *ctx;
158 	struct threadpool *pool;
159 	struct sysctlnode node;
160 	int error, val;
161 
162 	node = *rnode;
163 	ctx = node.sysctl_data;
164 
165 	val = -1;
166 	node.sysctl_data = &val;
167 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
168 	if (error || newp == NULL)
169 		return error;
170 
171 	if (! pri_is_valid(val))
172 		return EINVAL;
173 
174 	mutex_enter(&ctx->ctx_mutex);
175 	pool = ctx->ctx_unbound[pri_to_idx(val)];
176 	if (pool == NULL) {
177 		TP_LOG(("%s: no unbound pool for pri %d\n",
178 		    __func__, val));
179 		mutex_exit(&ctx->ctx_mutex);
180 		return ENODEV;
181 	}
182 
183 	threadpool_schedule_job(pool, &ctx->ctx_job);
184 	TP_LOG(("%s: scheduled job on unbound pool for pri %d\n",
185 	    __func__, val));
186 	mutex_exit(&ctx->ctx_mutex);
187 
188 	return 0;
189 }
190 
191 static int
threadpool_tester_get_percpu(SYSCTLFN_ARGS)192 threadpool_tester_get_percpu(SYSCTLFN_ARGS)
193 {
194 	struct tester_context *ctx;
195 	struct threadpool_percpu *pcpu, *opcpu = NULL;
196 	struct sysctlnode node;
197 	int error, val;
198 
199 	node = *rnode;
200 	ctx = node.sysctl_data;
201 
202 	val = -1;
203 	node.sysctl_data = &val;
204 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
205 	if (error || newp == NULL)
206 		return error;
207 
208 	if (! pri_is_valid(val))
209 		return EINVAL;
210 
211 	error = threadpool_percpu_get(&pcpu, val);
212 	if (error) {
213 		TP_LOG(("%s: threadpool_percpu_get(..., %d) failed -> %d\n",
214 		    __func__, val, error));
215 		return error;
216 	}
217 
218 	mutex_enter(&ctx->ctx_mutex);
219 	if (ctx->ctx_percpu[pri_to_idx(val)] == NULL)
220 		ctx->ctx_percpu[pri_to_idx(val)] = pcpu;
221 	else
222 		opcpu = ctx->ctx_percpu[pri_to_idx(val)];
223 	mutex_exit(&ctx->ctx_mutex);
224 
225 	if (opcpu != NULL) {
226 		/* Should have gotten reference to existing pool. */
227 		TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
228 		    __func__, val, opcpu == pcpu ? "match" : "NO MATCH"));
229 		KASSERT(opcpu == pcpu);
230 		threadpool_percpu_put(pcpu, val);
231 		error = EEXIST;
232 	} else {
233 		TP_LOG(("%s: created percpu pool for pri %d\n",
234 		    __func__, val));
235 	}
236 
237 	return error;
238 }
239 
240 static int
threadpool_tester_put_percpu(SYSCTLFN_ARGS)241 threadpool_tester_put_percpu(SYSCTLFN_ARGS)
242 {
243 	struct tester_context *ctx;
244 	struct threadpool_percpu *pcpu;
245 	struct sysctlnode node;
246 	int error, val;
247 
248 	node = *rnode;
249 	ctx = node.sysctl_data;
250 
251 	val = -1;
252 	node.sysctl_data = &val;
253 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
254 	if (error || newp == NULL)
255 		return error;
256 
257 	if (! pri_is_valid(val))
258 		return EINVAL;
259 
260 	mutex_enter(&ctx->ctx_mutex);
261 	/* We only ever maintain a single reference. */
262 	pcpu = ctx->ctx_percpu[pri_to_idx(val)];
263 	ctx->ctx_percpu[pri_to_idx(val)] = NULL;
264 	mutex_exit(&ctx->ctx_mutex);
265 
266 	if (pcpu == NULL) {
267 		TP_LOG(("%s: no percpu pool for pri %d\n",
268 		    __func__, val));
269 		return ENODEV;
270 	}
271 
272 	threadpool_percpu_put(pcpu, val);
273 	TP_LOG(("%s: released percpu pool for pri %d\n",
274 	    __func__, val));
275 
276 	return 0;
277 }
278 
279 static int
threadpool_tester_run_percpu(SYSCTLFN_ARGS)280 threadpool_tester_run_percpu(SYSCTLFN_ARGS)
281 {
282 	struct tester_context *ctx;
283 	struct threadpool_percpu *pcpu;
284 	struct threadpool *pool;
285 	struct sysctlnode node;
286 	int error, val;
287 
288 	node = *rnode;
289 	ctx = node.sysctl_data;
290 
291 	val = -1;
292 	node.sysctl_data = &val;
293 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
294 	if (error || newp == NULL)
295 		return error;
296 
297 	if (! pri_is_valid(val))
298 		return EINVAL;
299 
300 	mutex_enter(&ctx->ctx_mutex);
301 	pcpu = ctx->ctx_percpu[pri_to_idx(val)];
302 	if (pcpu == NULL) {
303 		TP_LOG(("%s: no percpu pool for pri %d\n",
304 		    __func__, val));
305 		mutex_exit(&ctx->ctx_mutex);
306 		return ENODEV;
307 	}
308 
309 	pool = threadpool_percpu_ref(pcpu);
310 	KASSERT(pool != NULL);
311 
312 	threadpool_schedule_job(pool, &ctx->ctx_job);
313 	TP_LOG(("%s: scheduled job on percpu pool for pri %d\n",
314 	    __func__, val));
315 	mutex_exit(&ctx->ctx_mutex);
316 
317 	return 0;
318 }
319 
320 static int
threadpool_tester_test_value(SYSCTLFN_ARGS)321 threadpool_tester_test_value(SYSCTLFN_ARGS)
322 {
323 	struct tester_context *ctx;
324 	struct sysctlnode node;
325 	unsigned int val;
326 	int error;
327 
328 	node = *rnode;
329 	ctx = node.sysctl_data;
330 
331 	mutex_enter(&ctx->ctx_mutex);
332 	val = ctx->ctx_value;
333 	node.sysctl_data = &val;
334 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
335 	if (error || newp == NULL) {
336 		mutex_exit(&ctx->ctx_mutex);
337 		return error;
338 	}
339 	ctx->ctx_value = val;
340 	mutex_exit(&ctx->ctx_mutex);
341 
342 	return 0;
343 }
344 
345 static void
threadpool_tester_job(struct threadpool_job * job)346 threadpool_tester_job(struct threadpool_job *job)
347 {
348 	struct tester_context *ctx =
349 	    container_of(job, struct tester_context, ctx_job);
350 	unsigned int oval, nval;
351 
352 	TP_LOG(("%s: job = %p, ctx = %p\n", __func__, job, ctx));
353 
354 	mutex_enter(&ctx->ctx_mutex);
355 	oval = ctx->ctx_value;
356 	nval = oval + 1;	/* always reference oval and nval */
357 	ctx->ctx_value = nval;
358 	mutex_exit(&ctx->ctx_mutex);
359 
360 	TP_LOG(("%s: %u -> %u\n", __func__, oval, nval));
361 	(void) kpause("tptestjob", false, hz, NULL);
362 
363 	mutex_enter(&ctx->ctx_mutex);
364 	threadpool_job_done(job);
365 	mutex_exit(&ctx->ctx_mutex);
366 }
367 
368 #define	RETURN_ERROR	if (error) goto return_error
369 
370 static int
threadpool_tester_init(void)371 threadpool_tester_init(void)
372 {
373 	struct sysctllog **log = &tester_ctx.ctx_sysctllog;
374 	const struct sysctlnode *rnode, *cnode;
375 	int error;
376 
377 	mutex_init(&tester_ctx.ctx_mutex, MUTEX_DEFAULT, IPL_NONE);
378 	threadpool_job_init(&tester_ctx.ctx_job, threadpool_tester_job,
379 	    &tester_ctx.ctx_mutex, "tptest");
380 
381 	error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT,
382 	    CTLTYPE_NODE, "threadpool_tester",
383 	    SYSCTL_DESCR("threadpool testing interface"),
384 	    NULL, 0, NULL, 0, CTL_KERN, CTL_CREATE, CTL_EOL);
385 	RETURN_ERROR;
386 
387 	error = sysctl_createv(log, 0, &rnode, &cnode,
388 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "get_unbound",
389 	    SYSCTL_DESCR("get unbound pool of specified priority"),
390 	    threadpool_tester_get_unbound, 0,
391 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
392 	RETURN_ERROR;
393 
394 	error = sysctl_createv(log, 0, &rnode, &cnode,
395 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "put_unbound",
396 	    SYSCTL_DESCR("put unbound pool of specified priority"),
397 	    threadpool_tester_put_unbound, 0,
398 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
399 	RETURN_ERROR;
400 
401 	error = sysctl_createv(log, 0, &rnode, &cnode,
402 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "run_unbound",
403 	    SYSCTL_DESCR("run on unbound pool of specified priority"),
404 	    threadpool_tester_run_unbound, 0,
405 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
406 	RETURN_ERROR;
407 
408 	error = sysctl_createv(log, 0, &rnode, &cnode,
409 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "get_percpu",
410 	    SYSCTL_DESCR("get percpu pool of specified priority"),
411 	    threadpool_tester_get_percpu, 0,
412 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
413 	RETURN_ERROR;
414 
415 	error = sysctl_createv(log, 0, &rnode, &cnode,
416 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "put_percpu",
417 	    SYSCTL_DESCR("put percpu pool of specified priority"),
418 	    threadpool_tester_put_percpu, 0,
419 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
420 	RETURN_ERROR;
421 
422 	error = sysctl_createv(log, 0, &rnode, &cnode,
423 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "run_percpu",
424 	    SYSCTL_DESCR("run on percpu pool of specified priority"),
425 	    threadpool_tester_run_percpu, 0,
426 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
427 	RETURN_ERROR;
428 
429 	error = sysctl_createv(log, 0, &rnode, &cnode,
430 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "test_value",
431 	    SYSCTL_DESCR("test value that jobs increment"),
432 	    threadpool_tester_test_value, 0,
433 	    (void *)&tester_ctx, 0, CTL_CREATE, CTL_EOL);
434 	RETURN_ERROR;
435 
436 	return 0;
437 
438  return_error:
439  	sysctl_teardown(log);
440 	return error;
441 }
442 
443 static int
threadpool_tester_fini(void)444 threadpool_tester_fini(void)
445 {
446 	pri_t pri;
447 
448 	mutex_enter(&tester_ctx.ctx_mutex);
449 	for (pri = PRI_NONE/*-1*/; pri < PRI_COUNT; pri++) {
450 		struct threadpool *pool =
451 		    tester_ctx.ctx_unbound[pri_to_idx(pri)];
452 		struct threadpool_percpu *pcpu =
453 		    tester_ctx.ctx_percpu[pri_to_idx(pri)];
454 
455 		/*
456 		 * threadpool_cancel_job() may be called on a pool
457 		 * other than what the job is scheduled on. This is
458 		 * safe; see comment in threadpool_cancel_job_async().
459 		 */
460 
461 		if (pool != NULL) {
462 			threadpool_cancel_job(pool, &tester_ctx.ctx_job);
463 			threadpool_put(pool, pri);
464 			tester_ctx.ctx_unbound[pri_to_idx(pri)] = NULL;
465 		}
466 		if (pcpu != NULL) {
467 			pool = threadpool_percpu_ref(pcpu);
468 			threadpool_cancel_job(pool, &tester_ctx.ctx_job);
469 			threadpool_percpu_put(pcpu, pri);
470 			tester_ctx.ctx_percpu[pri_to_idx(pri)] = NULL;
471 		}
472 	}
473 	mutex_exit(&tester_ctx.ctx_mutex);
474 	threadpool_job_destroy(&tester_ctx.ctx_job);
475 	mutex_destroy(&tester_ctx.ctx_mutex);
476 
477 	sysctl_teardown(&tester_ctx.ctx_sysctllog);
478 
479 	return 0;
480 }
481 
482 static int
threadpool_tester_modcmd(modcmd_t cmd,void * arg __unused)483 threadpool_tester_modcmd(modcmd_t cmd, void *arg __unused)
484 {
485 	int error;
486 
487 	switch (cmd) {
488 	case MODULE_CMD_INIT:
489 		error = threadpool_tester_init();
490 		break;
491 
492 	case MODULE_CMD_FINI:
493 		error = threadpool_tester_fini();
494 		break;
495 
496 	case MODULE_CMD_STAT:
497 	default:
498 		error = ENOTTY;
499 	}
500 
501 	return error;
502 }
503