xref: /netbsd-src/tests/rump/kernspace/threadpool.c (revision 82befdcba0cb9fa322cb90212b7e19929504b050)
1*82befdcbSandvar /*	$NetBSD: threadpool.c,v 1.6 2024/02/02 21:52:23 andvar Exp $	*/
277118773Sthorpej 
377118773Sthorpej /*-
477118773Sthorpej  * Copyright (c) 2018 The NetBSD Foundation, Inc.
577118773Sthorpej  * All rights reserved.
677118773Sthorpej  *
777118773Sthorpej  * This code is derived from software contributed to The NetBSD Foundation
877118773Sthorpej  * by Jason R. Thorpe.
977118773Sthorpej  *
1077118773Sthorpej  * Redistribution and use in source and binary forms, with or without
1177118773Sthorpej  * modification, are permitted provided that the following conditions
1277118773Sthorpej  * are met:
1377118773Sthorpej  * 1. Redistributions of source code must retain the above copyright
1477118773Sthorpej  *    notice, this list of conditions and the following disclaimer.
1577118773Sthorpej  * 2. Redistributions in binary form must reproduce the above copyright
1677118773Sthorpej  *    notice, this list of conditions and the following disclaimer in the
1777118773Sthorpej  *    documentation and/or other materials provided with the distribution.
1877118773Sthorpej  *
1977118773Sthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND
2077118773Sthorpej  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
2177118773Sthorpej  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2277118773Sthorpej  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2377118773Sthorpej  * IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY
2477118773Sthorpej  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2577118773Sthorpej  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
2677118773Sthorpej  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2777118773Sthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
2877118773Sthorpej  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2977118773Sthorpej  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
3077118773Sthorpej  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3177118773Sthorpej  */
3277118773Sthorpej 
3377118773Sthorpej #include <sys/cdefs.h>
3477118773Sthorpej #if !defined(lint)
35*82befdcbSandvar __RCSID("$NetBSD: threadpool.c,v 1.6 2024/02/02 21:52:23 andvar Exp $");
3677118773Sthorpej #endif /* !lint */
3777118773Sthorpej 
3877118773Sthorpej #include <sys/param.h>
3977118773Sthorpej #include <sys/condvar.h>
4077118773Sthorpej #include <sys/kernel.h>
4177118773Sthorpej #include <sys/kmem.h>
4277118773Sthorpej #include <sys/mutex.h>
4377118773Sthorpej #include <sys/threadpool.h>
4477118773Sthorpej 
4577118773Sthorpej #include "kernspace.h"
4677118773Sthorpej 
4777118773Sthorpej void
rumptest_threadpool_unbound_lifecycle(void)4877118773Sthorpej rumptest_threadpool_unbound_lifecycle(void)
4977118773Sthorpej {
50fd471022Sthorpej 	struct threadpool *pool0, *pool1, *pool2;
5177118773Sthorpej 	int error;
5277118773Sthorpej 
5377118773Sthorpej 	error = threadpool_get(&pool0, PRI_NONE);
5477118773Sthorpej 	KASSERT(error == 0);
5577118773Sthorpej 
5677118773Sthorpej 	error = threadpool_get(&pool1, PRI_NONE);
5777118773Sthorpej 	KASSERT(error == 0);
5877118773Sthorpej 
5977118773Sthorpej 	KASSERT(pool0 == pool1);
6077118773Sthorpej 
6177118773Sthorpej 	error = threadpool_get(&pool2, PRI_KERNEL_RT);
6277118773Sthorpej 	KASSERT(error == 0);
6377118773Sthorpej 
6477118773Sthorpej 	KASSERT(pool0 != pool2);
6577118773Sthorpej 
6677118773Sthorpej 	threadpool_put(pool0, PRI_NONE);
6777118773Sthorpej 	threadpool_put(pool1, PRI_NONE);
6877118773Sthorpej 	threadpool_put(pool2, PRI_KERNEL_RT);
6977118773Sthorpej }
7077118773Sthorpej 
7177118773Sthorpej void
rumptest_threadpool_percpu_lifecycle(void)7277118773Sthorpej rumptest_threadpool_percpu_lifecycle(void)
7377118773Sthorpej {
74fd471022Sthorpej 	struct threadpool_percpu *pcpu0, *pcpu1, *pcpu2;
7577118773Sthorpej 	int error;
7677118773Sthorpej 
7777118773Sthorpej 	error = threadpool_percpu_get(&pcpu0, PRI_NONE);
7877118773Sthorpej 	KASSERT(error == 0);
7977118773Sthorpej 
8077118773Sthorpej 	error = threadpool_percpu_get(&pcpu1, PRI_NONE);
8177118773Sthorpej 	KASSERT(error == 0);
8277118773Sthorpej 
8377118773Sthorpej 	KASSERT(pcpu0 == pcpu1);
8477118773Sthorpej 
8577118773Sthorpej 	error = threadpool_percpu_get(&pcpu2, PRI_KERNEL_RT);
8677118773Sthorpej 	KASSERT(error == 0);
8777118773Sthorpej 
8877118773Sthorpej 	KASSERT(pcpu0 != pcpu2);
8977118773Sthorpej 
9077118773Sthorpej 	threadpool_percpu_put(pcpu0, PRI_NONE);
9177118773Sthorpej 	threadpool_percpu_put(pcpu1, PRI_NONE);
9277118773Sthorpej 	threadpool_percpu_put(pcpu2, PRI_KERNEL_RT);
9377118773Sthorpej }
9477118773Sthorpej 
9577118773Sthorpej struct test_job_data {
9677118773Sthorpej 	kmutex_t mutex;
9777118773Sthorpej 	kcondvar_t cond;
9877118773Sthorpej 	unsigned int count;
99fd471022Sthorpej 	struct threadpool_job job;
10077118773Sthorpej };
10177118773Sthorpej 
10277118773Sthorpej #define	FINAL_COUNT	12345
10377118773Sthorpej 
10477118773Sthorpej static void
test_job_func_schedule(struct threadpool_job * job)105fd471022Sthorpej test_job_func_schedule(struct threadpool_job *job)
10677118773Sthorpej {
10777118773Sthorpej 	struct test_job_data *data =
10877118773Sthorpej 	    container_of(job, struct test_job_data, job);
10977118773Sthorpej 
11077118773Sthorpej 	mutex_enter(&data->mutex);
11177118773Sthorpej 	KASSERT(data->count != FINAL_COUNT);
11277118773Sthorpej 	data->count++;
11377118773Sthorpej 	cv_broadcast(&data->cond);
11477118773Sthorpej 	threadpool_job_done(job);
11577118773Sthorpej 	mutex_exit(&data->mutex);
11677118773Sthorpej }
11777118773Sthorpej 
11877118773Sthorpej static void
test_job_func_cancel(struct threadpool_job * job)119fd471022Sthorpej test_job_func_cancel(struct threadpool_job *job)
12077118773Sthorpej {
12177118773Sthorpej 	struct test_job_data *data =
12277118773Sthorpej 	    container_of(job, struct test_job_data, job);
12377118773Sthorpej 
12477118773Sthorpej 	mutex_enter(&data->mutex);
1259e7dc06bSthorpej 	if (data->count == 0) {
12677118773Sthorpej 		data->count = 1;
12777118773Sthorpej 		cv_broadcast(&data->cond);
1289e7dc06bSthorpej 	}
12977118773Sthorpej 	while (data->count != FINAL_COUNT - 1)
13077118773Sthorpej 		cv_wait(&data->cond, &data->mutex);
13177118773Sthorpej 	data->count = FINAL_COUNT;
13277118773Sthorpej 	cv_broadcast(&data->cond);
13377118773Sthorpej 	threadpool_job_done(job);
13477118773Sthorpej 	mutex_exit(&data->mutex);
13577118773Sthorpej }
13677118773Sthorpej 
13777118773Sthorpej static void
init_test_job_data(struct test_job_data * data,threadpool_job_fn_t fn)13877118773Sthorpej init_test_job_data(struct test_job_data *data, threadpool_job_fn_t fn)
13977118773Sthorpej {
14077118773Sthorpej 	mutex_init(&data->mutex, MUTEX_DEFAULT, IPL_NONE);
14177118773Sthorpej 	cv_init(&data->cond, "testjob");
14277118773Sthorpej 	threadpool_job_init(&data->job, fn, &data->mutex, "testjob");
14377118773Sthorpej 	data->count = 0;
14477118773Sthorpej }
14577118773Sthorpej 
14677118773Sthorpej static void
fini_test_job_data(struct test_job_data * data)14777118773Sthorpej fini_test_job_data(struct test_job_data *data)
14877118773Sthorpej {
14977118773Sthorpej 	threadpool_job_destroy(&data->job);
15077118773Sthorpej 	cv_destroy(&data->cond);
15177118773Sthorpej 	mutex_destroy(&data->mutex);
15277118773Sthorpej }
15377118773Sthorpej 
15477118773Sthorpej void
rumptest_threadpool_unbound_schedule(void)15577118773Sthorpej rumptest_threadpool_unbound_schedule(void)
15677118773Sthorpej {
15777118773Sthorpej 	struct test_job_data data;
158fd471022Sthorpej 	struct threadpool *pool;
15977118773Sthorpej 	int error;
16077118773Sthorpej 
16177118773Sthorpej 	error = threadpool_get(&pool, PRI_NONE);
16277118773Sthorpej 	KASSERT(error == 0);
16377118773Sthorpej 
16477118773Sthorpej 	init_test_job_data(&data, test_job_func_schedule);
16577118773Sthorpej 
16677118773Sthorpej 	mutex_enter(&data.mutex);
16777118773Sthorpej 	while (data.count != FINAL_COUNT) {
16877118773Sthorpej 		threadpool_schedule_job(pool, &data.job);
16977118773Sthorpej 		error = cv_timedwait(&data.cond, &data.mutex, hz * 2);
17077118773Sthorpej 		KASSERT(error != EWOULDBLOCK);
17177118773Sthorpej 	}
17277118773Sthorpej 	mutex_exit(&data.mutex);
17377118773Sthorpej 
17477118773Sthorpej 	fini_test_job_data(&data);
17577118773Sthorpej 
17677118773Sthorpej 	threadpool_put(pool, PRI_NONE);
17777118773Sthorpej }
17877118773Sthorpej 
17977118773Sthorpej void
rumptest_threadpool_percpu_schedule(void)18077118773Sthorpej rumptest_threadpool_percpu_schedule(void)
18177118773Sthorpej {
18277118773Sthorpej 	struct test_job_data data;
183fd471022Sthorpej 	struct threadpool_percpu *pcpu;
184fd471022Sthorpej 	struct threadpool *pool;
18577118773Sthorpej 	int error;
18677118773Sthorpej 
18777118773Sthorpej 	error = threadpool_percpu_get(&pcpu, PRI_NONE);
18877118773Sthorpej 	KASSERT(error == 0);
18977118773Sthorpej 
19077118773Sthorpej 	pool = threadpool_percpu_ref(pcpu);
19177118773Sthorpej 
19277118773Sthorpej 	init_test_job_data(&data, test_job_func_schedule);
19377118773Sthorpej 
19477118773Sthorpej 	mutex_enter(&data.mutex);
19577118773Sthorpej 	while (data.count != FINAL_COUNT) {
19677118773Sthorpej 		threadpool_schedule_job(pool, &data.job);
19777118773Sthorpej 		error = cv_timedwait(&data.cond, &data.mutex, hz * 2);
19877118773Sthorpej 		KASSERT(error != EWOULDBLOCK);
19977118773Sthorpej 	}
20077118773Sthorpej 	mutex_exit(&data.mutex);
20177118773Sthorpej 
20277118773Sthorpej 	fini_test_job_data(&data);
20377118773Sthorpej 
20477118773Sthorpej 	threadpool_percpu_put(pcpu, PRI_NONE);
20577118773Sthorpej }
20677118773Sthorpej 
20777118773Sthorpej void
rumptest_threadpool_job_cancel(void)20877118773Sthorpej rumptest_threadpool_job_cancel(void)
20977118773Sthorpej {
21077118773Sthorpej 	struct test_job_data data;
211fd471022Sthorpej 	struct threadpool *pool;
21277118773Sthorpej 	int error;
21377118773Sthorpej 	bool rv;
21477118773Sthorpej 
21577118773Sthorpej 	error = threadpool_get(&pool, PRI_NONE);
21677118773Sthorpej 	KASSERT(error == 0);
21777118773Sthorpej 
21877118773Sthorpej 	init_test_job_data(&data, test_job_func_cancel);
21977118773Sthorpej 
22077118773Sthorpej 	mutex_enter(&data.mutex);
22177118773Sthorpej 	threadpool_schedule_job(pool, &data.job);
22277118773Sthorpej 	while (data.count == 0)
22377118773Sthorpej 		cv_wait(&data.cond, &data.mutex);
22477118773Sthorpej 	KASSERT(data.count == 1);
22577118773Sthorpej 
226*82befdcbSandvar 	/* Job is already running (and is not finished); this should fail. */
22777118773Sthorpej 	rv = threadpool_cancel_job_async(pool, &data.job);
22877118773Sthorpej 	KASSERT(rv == false);
22977118773Sthorpej 
23077118773Sthorpej 	data.count = FINAL_COUNT - 1;
23177118773Sthorpej 	cv_broadcast(&data.cond);
23277118773Sthorpej 
23377118773Sthorpej 	/* Now wait for the job to finish. */
23477118773Sthorpej 	threadpool_cancel_job(pool, &data.job);
23577118773Sthorpej 	KASSERT(data.count == FINAL_COUNT);
23641d71c6bSthorpej 	mutex_exit(&data.mutex);
23741d71c6bSthorpej 
23841d71c6bSthorpej 	fini_test_job_data(&data);
23941d71c6bSthorpej 
24041d71c6bSthorpej 	threadpool_put(pool, PRI_NONE);
24177118773Sthorpej }
242032c1d01Sthorpej 
243032c1d01Sthorpej void
rumptest_threadpool_job_cancelthrash(void)244032c1d01Sthorpej rumptest_threadpool_job_cancelthrash(void)
245032c1d01Sthorpej {
246032c1d01Sthorpej 	struct test_job_data data;
247032c1d01Sthorpej 	struct threadpool *pool;
248032c1d01Sthorpej 	int i, error;
249032c1d01Sthorpej 
250032c1d01Sthorpej 	error = threadpool_get(&pool, PRI_NONE);
251032c1d01Sthorpej 	KASSERT(error == 0);
252032c1d01Sthorpej 
253032c1d01Sthorpej 	init_test_job_data(&data, test_job_func_cancel);
254032c1d01Sthorpej 
255032c1d01Sthorpej 	mutex_enter(&data.mutex);
256032c1d01Sthorpej 	for (i = 0; i < 10000; i++) {
257032c1d01Sthorpej 		threadpool_schedule_job(pool, &data.job);
258032c1d01Sthorpej 		if ((i % 3) == 0) {
259032c1d01Sthorpej 			mutex_exit(&data.mutex);
260032c1d01Sthorpej 			mutex_enter(&data.mutex);
261032c1d01Sthorpej 		}
2629e7dc06bSthorpej 		/*
2639e7dc06bSthorpej 		 * If the job managed to start, ensure that its exit
2649e7dc06bSthorpej 		 * condition is met so that we don't wait forever
2659e7dc06bSthorpej 		 * for the job to finish.
2669e7dc06bSthorpej 		 */
2679e7dc06bSthorpej 		data.count = FINAL_COUNT - 1;
2689e7dc06bSthorpej 		cv_broadcast(&data.cond);
2699e7dc06bSthorpej 
270032c1d01Sthorpej 		threadpool_cancel_job(pool, &data.job);
2719e7dc06bSthorpej 
2729e7dc06bSthorpej 		/*
2739e7dc06bSthorpej 		 * After cancellation, either the job didn't start
2749e7dc06bSthorpej 		 * (data.count == FINAL_COUNT - 1, per above) or
2759e7dc06bSthorpej 		 * it finished (data.count == FINAL_COUNT).
2769e7dc06bSthorpej 		 */
2779e7dc06bSthorpej 		KASSERT(data.count == (FINAL_COUNT - 1) ||
2789e7dc06bSthorpej 		    data.count == FINAL_COUNT);
2799e7dc06bSthorpej 
2809e7dc06bSthorpej 		/* Reset for the loop. */
2819e7dc06bSthorpej 		data.count = 0;
282032c1d01Sthorpej 	}
283032c1d01Sthorpej 	mutex_exit(&data.mutex);
284032c1d01Sthorpej 
285032c1d01Sthorpej 	fini_test_job_data(&data);
286032c1d01Sthorpej 
287032c1d01Sthorpej 	threadpool_put(pool, PRI_NONE);
288032c1d01Sthorpej }
289