xref: /netbsd-src/sys/kern/subr_workqueue.c (revision 8c2097ad8d319d6fe26f5819ed85dd1b2cbbee9a)
1 /*	$NetBSD: subr_workqueue.c,v 1.15 2007/07/13 07:21:31 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c)2002, 2005 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.15 2007/07/13 07:21:31 rmind Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kthread.h>
35 #include <sys/kmem.h>
36 #include <sys/proc.h>
37 #include <sys/workqueue.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 
41 SIMPLEQ_HEAD(workqhead, work);
42 
43 struct workqueue_queue {
44 	kmutex_t q_mutex;
45 	kcondvar_t q_cv;
46 	struct workqhead q_queue;
47 	struct lwp *q_worker;
48 	struct cpu_info *q_ci;
49 	SLIST_ENTRY(workqueue_queue) q_list;
50 };
51 
52 struct workqueue {
53 	SLIST_HEAD(, workqueue_queue) wq_queue;
54 	void (*wq_func)(struct work *, void *);
55 	void *wq_arg;
56 	const char *wq_name;
57 	pri_t wq_prio;
58 	ipl_cookie_t wq_ipl;
59 };
60 
61 #define	POISON	0xaabbccdd
62 
63 static struct workqueue_queue *
64 workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci)
65 {
66 	struct workqueue_queue *q;
67 
68 	SLIST_FOREACH(q, &wq->wq_queue, q_list)
69 		if (q->q_ci == ci)
70 			return q;
71 
72 	return SLIST_FIRST(&wq->wq_queue);
73 }
74 
75 static void
76 workqueue_runlist(struct workqueue *wq, struct workqhead *list)
77 {
78 	struct work *wk;
79 	struct work *next;
80 
81 	/*
82 	 * note that "list" is not a complete SIMPLEQ.
83 	 */
84 
85 	for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
86 		next = SIMPLEQ_NEXT(wk, wk_entry);
87 		(*wq->wq_func)(wk, wq->wq_arg);
88 	}
89 }
90 
91 static void
92 workqueue_run(struct workqueue *wq)
93 {
94 	struct workqueue_queue *q;
95 
96 	/* find the workqueue of this kthread */
97 	q = workqueue_queue_lookup(wq, curlwp->l_cpu);
98 	KASSERT(q != NULL);
99 
100 	for (;;) {
101 		struct workqhead tmp;
102 
103 		/*
104 		 * we violate abstraction of SIMPLEQ.
105 		 */
106 
107 #if defined(DIAGNOSTIC)
108 		tmp.sqh_last = (void *)POISON;
109 #endif /* defined(DIAGNOSTIC) */
110 
111 		mutex_enter(&q->q_mutex);
112 		while (SIMPLEQ_EMPTY(&q->q_queue))
113 			cv_wait(&q->q_cv, &q->q_mutex);
114 		tmp.sqh_first = q->q_queue.sqh_first; /* XXX */
115 		SIMPLEQ_INIT(&q->q_queue);
116 		mutex_exit(&q->q_mutex);
117 
118 		workqueue_runlist(wq, &tmp);
119 	}
120 }
121 
122 static void
123 workqueue_worker(void *arg)
124 {
125 	struct workqueue *wq = arg;
126 	struct lwp *l;
127 
128 	l = curlwp;
129 	lwp_lock(l);
130 	l->l_priority = wq->wq_prio;
131 	l->l_usrpri = wq->wq_prio;
132 	lwp_unlock(l);
133 
134 	workqueue_run(wq);
135 }
136 
137 static void
138 workqueue_init(struct workqueue *wq, const char *name,
139     void (*callback_func)(struct work *, void *), void *callback_arg,
140     pri_t prio, int ipl)
141 {
142 
143 	wq->wq_ipl = makeiplcookie(ipl);
144 	wq->wq_prio = prio;
145 	wq->wq_name = name;
146 	wq->wq_func = callback_func;
147 	wq->wq_arg = callback_arg;
148 	SLIST_INIT(&wq->wq_queue);
149 }
150 
151 static int
152 workqueue_initqueue(struct workqueue *wq, int ipl,
153     int flags, struct cpu_info *ci)
154 {
155 	struct workqueue_queue *q;
156 	int error, ktf;
157 	cpuid_t cpuid;
158 
159 #ifdef MULTIPROCESSOR
160 	cpuid = ci->ci_cpuid;
161 #else
162 	cpuid = 0;
163 #endif
164 
165 	q = kmem_alloc(sizeof(struct workqueue_queue), KM_SLEEP);
166 	SLIST_INSERT_HEAD(&wq->wq_queue, q, q_list);
167 	q->q_ci = ci;
168 
169 	mutex_init(&q->q_mutex, MUTEX_DRIVER, ipl);
170 	cv_init(&q->q_cv, wq->wq_name);
171 	SIMPLEQ_INIT(&q->q_queue);
172 	ktf = ((flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0);
173 	error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
174 	    wq, &q->q_worker, "%s/%d", wq->wq_name, (int)cpuid);
175 
176 	return error;
177 }
178 
179 struct workqueue_exitargs {
180 	struct work wqe_wk;
181 	struct workqueue_queue *wqe_q;
182 };
183 
184 static void
185 workqueue_exit(struct work *wk, void *arg)
186 {
187 	struct workqueue_exitargs *wqe = (void *)wk;
188 	struct workqueue_queue *q = wqe->wqe_q;
189 
190 	/*
191 	 * only competition at this point is workqueue_finiqueue.
192 	 */
193 
194 	KASSERT(q->q_worker == curlwp);
195 	mutex_enter(&q->q_mutex);
196 	q->q_worker = NULL;
197 	cv_signal(&q->q_cv);
198 	mutex_exit(&q->q_mutex);
199 	kthread_exit(0);
200 }
201 
202 static void
203 workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q)
204 {
205 	struct workqueue_exitargs wqe;
206 
207 	wq->wq_func = workqueue_exit;
208 
209 	wqe.wqe_q = q;
210 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
211 	KASSERT(q->q_worker != NULL);
212 	mutex_enter(&q->q_mutex);
213 	SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry);
214 	cv_signal(&q->q_cv);
215 	while (q->q_worker != NULL) {
216 		cv_wait(&q->q_cv, &q->q_mutex);
217 	}
218 	mutex_exit(&q->q_mutex);
219 	mutex_destroy(&q->q_mutex);
220 	cv_destroy(&q->q_cv);
221 	kmem_free(q, sizeof(struct workqueue_queue));
222 }
223 
224 /* --- */
225 
226 int
227 workqueue_create(struct workqueue **wqp, const char *name,
228     void (*callback_func)(struct work *, void *), void *callback_arg,
229     pri_t prio, int ipl, int flags)
230 {
231 	struct workqueue *wq;
232 	int error = 0;
233 
234 	wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
235 
236 	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
237 
238 #ifdef MULTIPROCESSOR
239 	if (flags & WQ_PERCPU) {
240 		struct cpu_info *ci;
241 		CPU_INFO_ITERATOR cii;
242 
243 		/* create the work-queue for each CPU */
244 		for (CPU_INFO_FOREACH(cii, ci)) {
245 			error = workqueue_initqueue(wq, ipl, flags, ci);
246 			if (error)
247 				break;
248 		}
249 		if (error)
250 			workqueue_destroy(wq);
251 
252 	} else {
253 		error = workqueue_initqueue(wq, ipl, flags, curcpu());
254 		if (error) {
255 			kmem_free(wq, sizeof(*wq));
256 			return error;
257 		}
258 	}
259 #else
260 	error = workqueue_initqueue(wq, ipl, flags, curcpu());
261 	if (error) {
262 		kmem_free(wq, sizeof(*wq));
263 		return error;
264 	}
265 #endif
266 
267 	*wqp = wq;
268 	return 0;
269 }
270 
271 void
272 workqueue_destroy(struct workqueue *wq)
273 {
274 	struct workqueue_queue *q;
275 
276 	while ((q = SLIST_FIRST(&wq->wq_queue)) != NULL) {
277 		workqueue_finiqueue(wq, q);
278 		SLIST_REMOVE_HEAD(&wq->wq_queue, q_list);
279 	}
280 	kmem_free(wq, sizeof(*wq));
281 }
282 
283 void
284 workqueue_enqueue(struct workqueue *wq, struct work *wk, struct cpu_info *ci)
285 {
286 	struct workqueue_queue *q;
287 
288 	q = workqueue_queue_lookup(wq, ci);
289 	KASSERT(q != NULL);
290 
291 	mutex_enter(&q->q_mutex);
292 	SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry);
293 	cv_signal(&q->q_cv);
294 	mutex_exit(&q->q_mutex);
295 }
296