xref: /netbsd-src/sys/kern/subr_workqueue.c (revision 1906aa3e597072e6bc341fca7bec183cc42bd4f7)
1 /*	$NetBSD: subr_workqueue.c,v 1.25 2008/07/02 14:47:34 matt Exp $	*/
2 
3 /*-
4  * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.25 2008/07/02 14:47:34 matt Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/systm.h>
35 #include <sys/kthread.h>
36 #include <sys/kmem.h>
37 #include <sys/proc.h>
38 #include <sys/workqueue.h>
39 #include <sys/mutex.h>
40 #include <sys/condvar.h>
41 #include <sys/queue.h>
42 
43 typedef struct work_impl {
44 	SIMPLEQ_ENTRY(work_impl) wk_entry;
45 } work_impl_t;
46 
47 SIMPLEQ_HEAD(workqhead, work_impl);
48 
49 struct workqueue_queue {
50 	kmutex_t q_mutex;
51 	kcondvar_t q_cv;
52 	struct workqhead q_queue;
53 	struct lwp *q_worker;
54 };
55 
56 struct workqueue {
57 	void (*wq_func)(struct work *, void *);
58 	void *wq_arg;
59 	int wq_flags;
60 
61 	const char *wq_name;
62 	pri_t wq_prio;
63 	void *wq_ptr;
64 };
65 
66 #define	WQ_SIZE		(roundup2(sizeof(struct workqueue), coherency_unit))
67 #define	WQ_QUEUE_SIZE	(roundup2(sizeof(struct workqueue_queue), coherency_unit))
68 
69 #define	POISON	0xaabbccdd
70 
71 static size_t
72 workqueue_size(int flags)
73 {
74 
75 	return WQ_SIZE
76 	    + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE
77 	    + coherency_unit;
78 }
79 
80 static struct workqueue_queue *
81 workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci)
82 {
83 	u_int idx = 0;
84 
85 	if (wq->wq_flags & WQ_PERCPU) {
86 		idx = ci ? cpu_index(ci) : cpu_index(curcpu());
87 	}
88 
89 	return (void *)((intptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE));
90 }
91 
92 static void
93 workqueue_runlist(struct workqueue *wq, struct workqhead *list)
94 {
95 	work_impl_t *wk;
96 	work_impl_t *next;
97 
98 	/*
99 	 * note that "list" is not a complete SIMPLEQ.
100 	 */
101 
102 	for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
103 		next = SIMPLEQ_NEXT(wk, wk_entry);
104 		(*wq->wq_func)((void *)wk, wq->wq_arg);
105 	}
106 }
107 
108 static void
109 workqueue_worker(void *cookie)
110 {
111 	struct workqueue *wq = cookie;
112 	struct workqueue_queue *q;
113 
114 	/* find the workqueue of this kthread */
115 	q = workqueue_queue_lookup(wq, curlwp->l_cpu);
116 
117 	for (;;) {
118 		struct workqhead tmp;
119 
120 		/*
121 		 * we violate abstraction of SIMPLEQ.
122 		 */
123 
124 #if defined(DIAGNOSTIC)
125 		tmp.sqh_last = (void *)POISON;
126 #endif /* defined(DIAGNOSTIC) */
127 
128 		mutex_enter(&q->q_mutex);
129 		while (SIMPLEQ_EMPTY(&q->q_queue))
130 			cv_wait(&q->q_cv, &q->q_mutex);
131 		tmp.sqh_first = q->q_queue.sqh_first; /* XXX */
132 		SIMPLEQ_INIT(&q->q_queue);
133 		mutex_exit(&q->q_mutex);
134 
135 		workqueue_runlist(wq, &tmp);
136 	}
137 }
138 
139 static void
140 workqueue_init(struct workqueue *wq, const char *name,
141     void (*callback_func)(struct work *, void *), void *callback_arg,
142     pri_t prio, int ipl)
143 {
144 
145 	wq->wq_prio = prio;
146 	wq->wq_name = name;
147 	wq->wq_func = callback_func;
148 	wq->wq_arg = callback_arg;
149 }
150 
151 static int
152 workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q,
153     int ipl, struct cpu_info *ci)
154 {
155 	int error, ktf;
156 
157 	KASSERT(q->q_worker == NULL);
158 
159 	mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl);
160 	cv_init(&q->q_cv, wq->wq_name);
161 	SIMPLEQ_INIT(&q->q_queue);
162 	ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0);
163 	if (ci) {
164 		error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
165 		    wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index);
166 	} else {
167 		error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
168 		    wq, &q->q_worker, "%s", wq->wq_name);
169 	}
170 	if (error != 0) {
171 		mutex_destroy(&q->q_mutex);
172 		cv_destroy(&q->q_cv);
173 		KASSERT(q->q_worker == NULL);
174 	}
175 	return error;
176 }
177 
178 struct workqueue_exitargs {
179 	work_impl_t wqe_wk;
180 	struct workqueue_queue *wqe_q;
181 };
182 
183 static void
184 workqueue_exit(struct work *wk, void *arg)
185 {
186 	struct workqueue_exitargs *wqe = (void *)wk;
187 	struct workqueue_queue *q = wqe->wqe_q;
188 
189 	/*
190 	 * only competition at this point is workqueue_finiqueue.
191 	 */
192 
193 	KASSERT(q->q_worker == curlwp);
194 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
195 	mutex_enter(&q->q_mutex);
196 	q->q_worker = NULL;
197 	cv_signal(&q->q_cv);
198 	mutex_exit(&q->q_mutex);
199 	kthread_exit(0);
200 }
201 
202 static void
203 workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q)
204 {
205 	struct workqueue_exitargs wqe;
206 
207 	KASSERT(wq->wq_func == workqueue_exit);
208 
209 	wqe.wqe_q = q;
210 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
211 	KASSERT(q->q_worker != NULL);
212 	mutex_enter(&q->q_mutex);
213 	SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry);
214 	cv_signal(&q->q_cv);
215 	while (q->q_worker != NULL) {
216 		cv_wait(&q->q_cv, &q->q_mutex);
217 	}
218 	mutex_exit(&q->q_mutex);
219 	mutex_destroy(&q->q_mutex);
220 	cv_destroy(&q->q_cv);
221 }
222 
223 /* --- */
224 
225 int
226 workqueue_create(struct workqueue **wqp, const char *name,
227     void (*callback_func)(struct work *, void *), void *callback_arg,
228     pri_t prio, int ipl, int flags)
229 {
230 	struct workqueue *wq;
231 	struct workqueue_queue *q;
232 	void *ptr;
233 	int error = 0;
234 
235 	CTASSERT(sizeof(work_impl_t) <= sizeof(struct work));
236 
237 	ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP);
238 	wq = (void *)roundup2((intptr_t)ptr, coherency_unit);
239 	wq->wq_ptr = ptr;
240 	wq->wq_flags = flags;
241 
242 	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
243 
244 	if (flags & WQ_PERCPU) {
245 		struct cpu_info *ci;
246 		CPU_INFO_ITERATOR cii;
247 
248 		/* create the work-queue for each CPU */
249 		for (CPU_INFO_FOREACH(cii, ci)) {
250 			q = workqueue_queue_lookup(wq, ci);
251 			error = workqueue_initqueue(wq, q, ipl, ci);
252 			if (error) {
253 				break;
254 			}
255 		}
256 	} else {
257 		/* initialize a work-queue */
258 		q = workqueue_queue_lookup(wq, NULL);
259 		error = workqueue_initqueue(wq, q, ipl, NULL);
260 	}
261 
262 	if (error != 0) {
263 		workqueue_destroy(wq);
264 	} else {
265 		*wqp = wq;
266 	}
267 
268 	return error;
269 }
270 
271 void
272 workqueue_destroy(struct workqueue *wq)
273 {
274 	struct workqueue_queue *q;
275 	struct cpu_info *ci;
276 	CPU_INFO_ITERATOR cii;
277 
278 	wq->wq_func = workqueue_exit;
279 	for (CPU_INFO_FOREACH(cii, ci)) {
280 		q = workqueue_queue_lookup(wq, ci);
281 		if (q->q_worker != NULL) {
282 			workqueue_finiqueue(wq, q);
283 		}
284 	}
285 	kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags));
286 }
287 
288 void
289 workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci)
290 {
291 	struct workqueue_queue *q;
292 	work_impl_t *wk = (void *)wk0;
293 
294 	KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL);
295 	q = workqueue_queue_lookup(wq, ci);
296 
297 	mutex_enter(&q->q_mutex);
298 	SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry);
299 	cv_signal(&q->q_cv);
300 	mutex_exit(&q->q_mutex);
301 }
302