xref: /netbsd-src/sys/dev/sysmon/sysmon_taskq.c (revision fedc51107dbdd5c3050f16850829431bab1d18be)
1 /*	$NetBSD: sysmon_taskq.c,v 1.23 2021/12/31 14:29:14 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * General purpose task queue for sysmon back-ends.  This can be
40  * used to run callbacks that require thread context.
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: sysmon_taskq.c,v 1.23 2021/12/31 14:29:14 riastradh Exp $");
45 
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/queue.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/systm.h>
52 #include <sys/module.h>
53 #include <sys/once.h>
54 
55 #include <dev/sysmon/sysmon_taskq.h>
56 
57 struct sysmon_task {
58 	TAILQ_ENTRY(sysmon_task) st_list;
59 	void (*st_func)(void *);
60 	void *st_arg;
61 	u_int st_pri;
62 };
63 
64 static TAILQ_HEAD(, sysmon_task) sysmon_task_queue =
65     TAILQ_HEAD_INITIALIZER(sysmon_task_queue);
66 
67 static kmutex_t sysmon_task_queue_mtx;
68 static kmutex_t sysmon_task_queue_init_mtx;
69 static kcondvar_t sysmon_task_queue_cv;
70 
71 static int sysmon_task_queue_initialized;
72 static int sysmon_task_queue_cleanup_sem;
73 static struct lwp *sysmon_task_queue_lwp;
74 static void sysmon_task_queue_thread(void *);
75 
76 MODULE(MODULE_CLASS_MISC, sysmon_taskq, NULL);
77 
78 /*
79  * XXX	Normally, all initialization would be handled as part of
80  *	the module(9) framework.  However, there are a number of
81  *	users of the sysmon_taskq facility that are not modular,
82  *	and these can directly call sysmon_task_queue_init()
83  *	directly.  To accommodate these non-standard users, we
84  *	make sure that sysmon_task_queue_init() handles multiple
85  *	invocations.  And we also ensure that, if any non-module
86  *	user exists, we don't allow the module to be unloaded.
87  *	(We can't use module_hold() for this, since the module(9)
88  *	framework itself isn't necessarily initialized yet.)
89  */
90 
91 /*
92  * tq_preinit:
93  *
94  *	Early one-time initialization of task-queue
95  */
96 
97 ONCE_DECL(once_tq);
98 
99 static int
tq_preinit(void)100 tq_preinit(void)
101 {
102 
103 	mutex_init(&sysmon_task_queue_mtx, MUTEX_DEFAULT, IPL_VM);
104 	mutex_init(&sysmon_task_queue_init_mtx, MUTEX_DEFAULT, IPL_NONE);
105 	cv_init(&sysmon_task_queue_cv, "smtaskq");
106 	sysmon_task_queue_initialized = 0;
107 
108 	return 0;
109 }
110 
111 /*
112  * sysmon_task_queue_init:
113  *
114  *	Initialize the sysmon task queue.
115  */
116 void
sysmon_task_queue_init(void)117 sysmon_task_queue_init(void)
118 {
119 	int error;
120 
121 	(void)RUN_ONCE(&once_tq, tq_preinit);
122 
123 	mutex_enter(&sysmon_task_queue_init_mtx);
124 	if (sysmon_task_queue_initialized++) {
125 		mutex_exit(&sysmon_task_queue_init_mtx);
126 		return;
127 	}
128 
129 	mutex_exit(&sysmon_task_queue_init_mtx);
130 
131 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
132 	    sysmon_task_queue_thread, NULL, &sysmon_task_queue_lwp, "sysmon");
133 	if (error) {
134 		printf("Unable to create sysmon task queue thread, "
135 		    "error = %d\n", error);
136 		panic("sysmon_task_queue_init");
137 	}
138 }
139 
140 /*
141  * sysmon_task_queue_fini:
142  *
143  *	Tear town the sysmon task queue.
144  */
145 int
sysmon_task_queue_fini(void)146 sysmon_task_queue_fini(void)
147 {
148 
149 	if (sysmon_task_queue_initialized > 1)
150 		return EBUSY;
151 
152 	mutex_enter(&sysmon_task_queue_mtx);
153 
154 	sysmon_task_queue_cleanup_sem = 1;
155 	cv_signal(&sysmon_task_queue_cv);
156 
157 	while (sysmon_task_queue_cleanup_sem != 0)
158 		cv_wait(&sysmon_task_queue_cv,
159 			&sysmon_task_queue_mtx);
160 
161 	mutex_exit(&sysmon_task_queue_mtx);
162 
163 	return 0;
164 }
165 
166 /*
167  * sysmon_task_queue_thread:
168  *
169  *	The sysmon task queue execution thread.  We execute callbacks that
170  *	have been queued for us.
171  */
172 static void
sysmon_task_queue_thread(void * arg)173 sysmon_task_queue_thread(void *arg)
174 {
175 	struct sysmon_task *st;
176 
177 	/*
178 	 * Run through all the tasks before we check for the exit
179 	 * condition; it's probably more important to actually run
180 	 * all the tasks before we exit.
181 	 */
182 	mutex_enter(&sysmon_task_queue_mtx);
183 	for (;;) {
184 		st = TAILQ_FIRST(&sysmon_task_queue);
185 		if (st != NULL) {
186 			TAILQ_REMOVE(&sysmon_task_queue, st, st_list);
187 			mutex_exit(&sysmon_task_queue_mtx);
188 			(*st->st_func)(st->st_arg);
189 			free(st, M_TEMP);
190 			mutex_enter(&sysmon_task_queue_mtx);
191 		} else {
192 			/* Check for the exit condition. */
193 			if (sysmon_task_queue_cleanup_sem != 0)
194 				break;
195 			cv_wait(&sysmon_task_queue_cv, &sysmon_task_queue_mtx);
196 		}
197 	}
198 	/* Time to die. */
199 	sysmon_task_queue_cleanup_sem = 0;
200 	cv_broadcast(&sysmon_task_queue_cv);
201 	mutex_exit(&sysmon_task_queue_mtx);
202 	kthread_exit(0);
203 }
204 
205 static void
sysmon_task_queue_sched_task(struct sysmon_task * st)206 sysmon_task_queue_sched_task(struct sysmon_task *st)
207 {
208 	struct sysmon_task *lst;
209 
210 	mutex_enter(&sysmon_task_queue_mtx);
211 	TAILQ_FOREACH(lst, &sysmon_task_queue, st_list) {
212 		if (st->st_pri > lst->st_pri) {
213 			TAILQ_INSERT_BEFORE(lst, st, st_list);
214 			break;
215 		}
216 	}
217 
218 	if (lst == NULL)
219 		TAILQ_INSERT_TAIL(&sysmon_task_queue, st, st_list);
220 
221 	cv_broadcast(&sysmon_task_queue_cv);
222 	mutex_exit(&sysmon_task_queue_mtx);
223 }
224 
225 /*
226  * sysmon_task_queue_sched:
227  *
228  *	Schedule a task for deferred execution.
229  */
230 int
sysmon_task_queue_sched(u_int pri,void (* func)(void *),void * arg)231 sysmon_task_queue_sched(u_int pri, void (*func)(void *), void *arg)
232 {
233 	struct sysmon_task *st;
234 
235 	(void)RUN_ONCE(&once_tq, tq_preinit);
236 
237 	if (sysmon_task_queue_lwp == NULL)
238 		aprint_debug("WARNING: Callback scheduled before sysmon "
239 		    "task queue thread present\n");
240 
241 	if (func == NULL)
242 		return EINVAL;
243 
244 	st = malloc(sizeof(*st), M_TEMP, M_NOWAIT);
245 	if (st == NULL)
246 		return ENOMEM;
247 
248 	st->st_func = func;
249 	st->st_arg = arg;
250 	st->st_pri = pri;
251 
252 	sysmon_task_queue_sched_task(st);
253 
254 	return 0;
255 }
256 
257 struct tqbarrier {
258 	kmutex_t	lock;
259 	kcondvar_t	cv;
260 	bool		done;
261 };
262 
263 static void
tqbarrier_task(void * cookie)264 tqbarrier_task(void *cookie)
265 {
266 	struct tqbarrier *bar = cookie;
267 
268 	mutex_enter(&bar->lock);
269 	bar->done = true;
270 	cv_broadcast(&bar->cv);
271 	mutex_exit(&bar->lock);
272 }
273 
274 /*
275  * sysmon_task_queue_barrier:
276  *
277  *	Wait for the completion of all tasks at priority pri or lower
278  *	currently queued at the time of the call.
279  */
280 void
sysmon_task_queue_barrier(u_int pri)281 sysmon_task_queue_barrier(u_int pri)
282 {
283 	struct sysmon_task *st;
284 	struct tqbarrier bar;
285 
286 	(void)RUN_ONCE(&once_tq, tq_preinit);
287 
288 	KASSERT(sysmon_task_queue_lwp);
289 	KASSERT(curlwp != sysmon_task_queue_lwp);
290 
291 	mutex_init(&bar.lock, MUTEX_DEFAULT, IPL_NONE);
292 	cv_init(&bar.cv, "sysmontq");
293 	bar.done = false;
294 
295 	st = malloc(sizeof(*st), M_TEMP, M_WAITOK);
296 	st->st_func = &tqbarrier_task;
297 	st->st_arg = &bar;
298 	st->st_pri = pri;
299 
300 	sysmon_task_queue_sched_task(st);
301 
302 	mutex_enter(&bar.lock);
303 	while (!bar.done)
304 		cv_wait(&bar.cv, &bar.lock);
305 	mutex_exit(&bar.lock);
306 
307 	cv_destroy(&bar.cv);
308 	mutex_destroy(&bar.lock);
309 }
310 
311 static int
sysmon_taskq_modcmd(modcmd_t cmd,void * arg)312 sysmon_taskq_modcmd(modcmd_t cmd, void *arg)
313 {
314 	int ret;
315 
316 	switch (cmd) {
317 	case MODULE_CMD_INIT:
318 		sysmon_task_queue_init();
319 		ret = 0;
320 		break;
321 	case MODULE_CMD_FINI:
322 		ret = sysmon_task_queue_fini();
323 		break;
324 	case MODULE_CMD_STAT:
325 	default:
326 		ret = ENOTTY;
327 	}
328 
329 	return ret;
330 }
331