xref: /netbsd-src/tests/rump/kernspace/threadpool.c (revision fd47102229ecaef663b9f3512d24ca68686cd183)
1 /*	$NetBSD: threadpool.c,v 1.3 2018/12/26 18:54:20 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND
20  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
21  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY
24  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
26  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
30  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 #if !defined(lint)
35 __RCSID("$NetBSD: threadpool.c,v 1.3 2018/12/26 18:54:20 thorpej Exp $");
36 #endif /* !lint */
37 
38 #include <sys/param.h>
39 #include <sys/condvar.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 #include <sys/mutex.h>
43 #include <sys/threadpool.h>
44 
45 #include "kernspace.h"
46 
47 void
48 rumptest_threadpool_unbound_lifecycle(void)
49 {
50 	struct threadpool *pool0, *pool1, *pool2;
51 	int error;
52 
53 	error = threadpool_get(&pool0, PRI_NONE);
54 	KASSERT(error == 0);
55 
56 	error = threadpool_get(&pool1, PRI_NONE);
57 	KASSERT(error == 0);
58 
59 	KASSERT(pool0 == pool1);
60 
61 	error = threadpool_get(&pool2, PRI_KERNEL_RT);
62 	KASSERT(error == 0);
63 
64 	KASSERT(pool0 != pool2);
65 
66 	threadpool_put(pool0, PRI_NONE);
67 	threadpool_put(pool1, PRI_NONE);
68 	threadpool_put(pool2, PRI_KERNEL_RT);
69 }
70 
71 void
72 rumptest_threadpool_percpu_lifecycle(void)
73 {
74 	struct threadpool_percpu *pcpu0, *pcpu1, *pcpu2;
75 	int error;
76 
77 	error = threadpool_percpu_get(&pcpu0, PRI_NONE);
78 	KASSERT(error == 0);
79 
80 	error = threadpool_percpu_get(&pcpu1, PRI_NONE);
81 	KASSERT(error == 0);
82 
83 	KASSERT(pcpu0 == pcpu1);
84 
85 	error = threadpool_percpu_get(&pcpu2, PRI_KERNEL_RT);
86 	KASSERT(error == 0);
87 
88 	KASSERT(pcpu0 != pcpu2);
89 
90 	threadpool_percpu_put(pcpu0, PRI_NONE);
91 	threadpool_percpu_put(pcpu1, PRI_NONE);
92 	threadpool_percpu_put(pcpu2, PRI_KERNEL_RT);
93 }
94 
95 struct test_job_data {
96 	kmutex_t mutex;
97 	kcondvar_t cond;
98 	unsigned int count;
99 	struct threadpool_job job;
100 };
101 
102 #define	FINAL_COUNT	12345
103 
104 static void
105 test_job_func_schedule(struct threadpool_job *job)
106 {
107 	struct test_job_data *data =
108 	    container_of(job, struct test_job_data, job);
109 
110 	mutex_enter(&data->mutex);
111 	KASSERT(data->count != FINAL_COUNT);
112 	data->count++;
113 	cv_broadcast(&data->cond);
114 	threadpool_job_done(job);
115 	mutex_exit(&data->mutex);
116 }
117 
118 static void
119 test_job_func_cancel(struct threadpool_job *job)
120 {
121 	struct test_job_data *data =
122 	    container_of(job, struct test_job_data, job);
123 
124 	mutex_enter(&data->mutex);
125 	data->count = 1;
126 	cv_broadcast(&data->cond);
127 	while (data->count != FINAL_COUNT - 1)
128 		cv_wait(&data->cond, &data->mutex);
129 	data->count = FINAL_COUNT;
130 	cv_broadcast(&data->cond);
131 	threadpool_job_done(job);
132 	mutex_exit(&data->mutex);
133 }
134 
135 static void
136 init_test_job_data(struct test_job_data *data, threadpool_job_fn_t fn)
137 {
138 	mutex_init(&data->mutex, MUTEX_DEFAULT, IPL_NONE);
139 	cv_init(&data->cond, "testjob");
140 	threadpool_job_init(&data->job, fn, &data->mutex, "testjob");
141 	data->count = 0;
142 }
143 
144 static void
145 fini_test_job_data(struct test_job_data *data)
146 {
147 	threadpool_job_destroy(&data->job);
148 	cv_destroy(&data->cond);
149 	mutex_destroy(&data->mutex);
150 }
151 
152 void
153 rumptest_threadpool_unbound_schedule(void)
154 {
155 	struct test_job_data data;
156 	struct threadpool *pool;
157 	int error;
158 
159 	error = threadpool_get(&pool, PRI_NONE);
160 	KASSERT(error == 0);
161 
162 	init_test_job_data(&data, test_job_func_schedule);
163 
164 	mutex_enter(&data.mutex);
165 	while (data.count != FINAL_COUNT) {
166 		threadpool_schedule_job(pool, &data.job);
167 		error = cv_timedwait(&data.cond, &data.mutex, hz * 2);
168 		KASSERT(error != EWOULDBLOCK);
169 	}
170 	mutex_exit(&data.mutex);
171 
172 	fini_test_job_data(&data);
173 
174 	threadpool_put(pool, PRI_NONE);
175 }
176 
177 void
178 rumptest_threadpool_percpu_schedule(void)
179 {
180 	struct test_job_data data;
181 	struct threadpool_percpu *pcpu;
182 	struct threadpool *pool;
183 	int error;
184 
185 	error = threadpool_percpu_get(&pcpu, PRI_NONE);
186 	KASSERT(error == 0);
187 
188 	pool = threadpool_percpu_ref(pcpu);
189 
190 	init_test_job_data(&data, test_job_func_schedule);
191 
192 	mutex_enter(&data.mutex);
193 	while (data.count != FINAL_COUNT) {
194 		threadpool_schedule_job(pool, &data.job);
195 		error = cv_timedwait(&data.cond, &data.mutex, hz * 2);
196 		KASSERT(error != EWOULDBLOCK);
197 	}
198 	mutex_exit(&data.mutex);
199 
200 	fini_test_job_data(&data);
201 
202 	threadpool_percpu_put(pcpu, PRI_NONE);
203 }
204 
205 void
206 rumptest_threadpool_job_cancel(void)
207 {
208 	struct test_job_data data;
209 	struct threadpool *pool;
210 	int error;
211 	bool rv;
212 
213 	error = threadpool_get(&pool, PRI_NONE);
214 	KASSERT(error == 0);
215 
216 	init_test_job_data(&data, test_job_func_cancel);
217 
218 	mutex_enter(&data.mutex);
219 	threadpool_schedule_job(pool, &data.job);
220 	while (data.count == 0)
221 		cv_wait(&data.cond, &data.mutex);
222 	KASSERT(data.count == 1);
223 
224 	/* Job is already running (and is not finished); this shold fail. */
225 	rv = threadpool_cancel_job_async(pool, &data.job);
226 	KASSERT(rv == false);
227 
228 	data.count = FINAL_COUNT - 1;
229 	cv_broadcast(&data.cond);
230 
231 	/* Now wait for the job to finish. */
232 	threadpool_cancel_job(pool, &data.job);
233 	KASSERT(data.count == FINAL_COUNT);
234 	mutex_exit(&data.mutex);
235 
236 	fini_test_job_data(&data);
237 
238 	threadpool_put(pool, PRI_NONE);
239 }
240