xref: /netbsd-src/sys/kern/subr_workqueue.c (revision 1a7bc55dcc997dfbe2e949226d2291ba337d02a8)
1 /*	$NetBSD: subr_workqueue.c,v 1.7 2006/11/01 10:17:59 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c)2002, 2005 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.7 2006/11/01 10:17:59 yamt Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kthread.h>
35 #include <sys/kmem.h>
36 #include <sys/proc.h>
37 #include <sys/workqueue.h>
38 
39 SIMPLEQ_HEAD(workqhead, work);
40 
41 struct workqueue_queue {
42 	struct simplelock q_lock;
43 	int q_savedipl;
44 	struct workqhead q_queue;
45 	struct proc *q_worker;
46 };
47 
48 struct workqueue {
49 	struct workqueue_queue wq_queue; /* todo: make this per-cpu */
50 
51 	void (*wq_func)(struct work *, void *);
52 	void *wq_arg;
53 	const char *wq_name;
54 	int wq_prio;
55 	int wq_ipl;
56 };
57 
58 #define	POISON	0xaabbccdd
59 
60 static void
61 workqueue_lock(struct workqueue *wq, struct workqueue_queue *q)
62 {
63 	int s;
64 
65 #if 0 /* notyet */
66 	s = splraiseipl(wq->wq_ipl);
67 #else
68 	s = splhigh(); /* XXX */
69 #endif
70 	simple_lock(&q->q_lock);
71 	q->q_savedipl = s;
72 }
73 
74 static void
75 workqueue_unlock(struct workqueue *wq, struct workqueue_queue *q)
76 {
77 	int s = q->q_savedipl;
78 
79 	simple_unlock(&q->q_lock);
80 	splx(s);
81 }
82 
83 static void
84 workqueue_runlist(struct workqueue *wq, struct workqhead *list)
85 {
86 	struct work *wk;
87 	struct work *next;
88 
89 	/*
90 	 * note that "list" is not a complete SIMPLEQ.
91 	 */
92 
93 	for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
94 		next = SIMPLEQ_NEXT(wk, wk_entry);
95 		(*wq->wq_func)(wk, wq->wq_arg);
96 	}
97 }
98 
99 static void
100 workqueue_run(struct workqueue *wq)
101 {
102 	struct workqueue_queue *q = &wq->wq_queue;
103 
104 	for (;;) {
105 		struct workqhead tmp;
106 		int error;
107 
108 		/*
109 		 * we violate abstraction of SIMPLEQ.
110 		 */
111 
112 #if defined(DIAGNOSTIC)
113 		tmp.sqh_last = (void *)POISON;
114 #endif /* defined(DIAGNOSTIC) */
115 
116 		workqueue_lock(wq, q);
117 		while (SIMPLEQ_EMPTY(&q->q_queue)) {
118 			error = ltsleep(q, wq->wq_prio, wq->wq_name, 0,
119 			    &q->q_lock);
120 			if (error) {
121 				panic("%s: %s error=%d",
122 				    __func__, wq->wq_name, error);
123 			}
124 		}
125 		tmp.sqh_first = q->q_queue.sqh_first; /* XXX */
126 		SIMPLEQ_INIT(&q->q_queue);
127 		workqueue_unlock(wq, q);
128 
129 		workqueue_runlist(wq, &tmp);
130 	}
131 }
132 
133 static void
134 workqueue_worker(void *arg)
135 {
136 	struct workqueue *wq = arg;
137 
138 	workqueue_run(wq);
139 }
140 
141 static void
142 workqueue_init(struct workqueue *wq, const char *name,
143     void (*callback_func)(struct work *, void *), void *callback_arg,
144     int prio, int ipl)
145 {
146 
147 	wq->wq_ipl = ipl;
148 	wq->wq_prio = prio;
149 	wq->wq_name = name;
150 	wq->wq_func = callback_func;
151 	wq->wq_arg = callback_arg;
152 }
153 
154 static int
155 workqueue_initqueue(struct workqueue *wq)
156 {
157 	struct workqueue_queue *q = &wq->wq_queue;
158 	int error;
159 
160 	simple_lock_init(&q->q_lock);
161 	SIMPLEQ_INIT(&q->q_queue);
162 	error = kthread_create1(workqueue_worker, wq, &q->q_worker,
163 	    wq->wq_name);
164 
165 	return error;
166 }
167 
168 struct workqueue_exitargs {
169 	struct work wqe_wk;
170 	struct workqueue_queue *wqe_q;
171 };
172 
173 static void
174 workqueue_exit(struct work *wk, void *arg)
175 {
176 	struct workqueue_exitargs *wqe = (void *)wk;
177 	struct workqueue_queue *q = wqe->wqe_q;
178 
179 	/*
180 	 * no need to raise ipl because only competition at this point
181 	 * is workqueue_finiqueue.
182 	 */
183 
184 	KASSERT(q->q_worker == curproc);
185 	simple_lock(&q->q_lock);
186 	q->q_worker = NULL;
187 	simple_unlock(&q->q_lock);
188 	wakeup(q);
189 	kthread_exit(0);
190 }
191 
192 static void
193 workqueue_finiqueue(struct workqueue *wq)
194 {
195 	struct workqueue_queue *q = &wq->wq_queue;
196 	struct workqueue_exitargs wqe;
197 
198 	wq->wq_func = workqueue_exit;
199 
200 	wqe.wqe_q = q;
201 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
202 	KASSERT(q->q_worker != NULL);
203 	workqueue_lock(wq, q);
204 	SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry);
205 	wakeup(q);
206 	while (q->q_worker != NULL) {
207 		int error;
208 
209 		error = ltsleep(q, wq->wq_prio, "wqfini", 0, &q->q_lock);
210 		if (error) {
211 			panic("%s: %s error=%d",
212 			    __func__, wq->wq_name, error);
213 		}
214 	}
215 	workqueue_unlock(wq, q);
216 }
217 
218 /* --- */
219 
220 int
221 workqueue_create(struct workqueue **wqp, const char *name,
222     void (*callback_func)(struct work *, void *), void *callback_arg,
223     int prio, int ipl, int flags)
224 {
225 	struct workqueue *wq;
226 	int error;
227 
228 	wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
229 	if (wq == NULL) {
230 		return ENOMEM;
231 	}
232 
233 	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
234 
235 	error = workqueue_initqueue(wq);
236 	if (error) {
237 		kmem_free(wq, sizeof(*wq));
238 		return error;
239 	}
240 
241 	*wqp = wq;
242 	return 0;
243 }
244 
245 void
246 workqueue_destroy(struct workqueue *wq)
247 {
248 
249 	workqueue_finiqueue(wq);
250 	kmem_free(wq, sizeof(*wq));
251 }
252 
253 void
254 workqueue_enqueue(struct workqueue *wq, struct work *wk)
255 {
256 	struct workqueue_queue *q = &wq->wq_queue;
257 	boolean_t wasempty;
258 
259 	workqueue_lock(wq, q);
260 	wasempty = SIMPLEQ_EMPTY(&q->q_queue);
261 	SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry);
262 	workqueue_unlock(wq, q);
263 
264 	if (wasempty) {
265 		wakeup(q);
266 	}
267 }
268