1789Sahrens /*
2789Sahrens * CDDL HEADER START
3789Sahrens *
4789Sahrens * The contents of this file are subject to the terms of the
54831Sgw25295 * Common Development and Distribution License (the "License").
64831Sgw25295 * You may not use this file except in compliance with the License.
7789Sahrens *
8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens * or http://www.opensolaris.org/os/licensing.
10789Sahrens * See the License for the specific language governing permissions
11789Sahrens * and limitations under the License.
12789Sahrens *
13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens *
19789Sahrens * CDDL HEADER END
20789Sahrens */
21789Sahrens /*
22*11854SChris.Horne@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23789Sahrens * Use is subject to license terms.
24789Sahrens */
25789Sahrens
26789Sahrens #include <sys/zfs_context.h>
27789Sahrens
28789Sahrens int taskq_now;
297837SMatthew.Ahrens@Sun.COM taskq_t *system_taskq;
30789Sahrens
31789Sahrens typedef struct task {
32789Sahrens struct task *task_next;
33789Sahrens struct task *task_prev;
34789Sahrens task_func_t *task_func;
35789Sahrens void *task_arg;
36789Sahrens } task_t;
37789Sahrens
38789Sahrens #define TASKQ_ACTIVE 0x00010000
39789Sahrens
40789Sahrens struct taskq {
41789Sahrens kmutex_t tq_lock;
42789Sahrens krwlock_t tq_threadlock;
43789Sahrens kcondvar_t tq_dispatch_cv;
44789Sahrens kcondvar_t tq_wait_cv;
45789Sahrens thread_t *tq_threadlist;
46789Sahrens int tq_flags;
47789Sahrens int tq_active;
48789Sahrens int tq_nthreads;
49789Sahrens int tq_nalloc;
50789Sahrens int tq_minalloc;
51789Sahrens int tq_maxalloc;
52*11854SChris.Horne@Sun.COM kcondvar_t tq_maxalloc_cv;
53*11854SChris.Horne@Sun.COM int tq_maxalloc_wait;
54789Sahrens task_t *tq_freelist;
55789Sahrens task_t tq_task;
56789Sahrens };
57789Sahrens
58789Sahrens static task_t *
task_alloc(taskq_t * tq,int tqflags)59789Sahrens task_alloc(taskq_t *tq, int tqflags)
60789Sahrens {
61789Sahrens task_t *t;
62*11854SChris.Horne@Sun.COM int rv;
63789Sahrens
64*11854SChris.Horne@Sun.COM again: if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
65789Sahrens tq->tq_freelist = t->task_next;
66789Sahrens } else {
67789Sahrens if (tq->tq_nalloc >= tq->tq_maxalloc) {
68*11854SChris.Horne@Sun.COM if (!(tqflags & KM_SLEEP))
69789Sahrens return (NULL);
70*11854SChris.Horne@Sun.COM
71789Sahrens /*
72789Sahrens * We don't want to exceed tq_maxalloc, but we can't
73789Sahrens * wait for other tasks to complete (and thus free up
74789Sahrens * task structures) without risking deadlock with
75789Sahrens * the caller. So, we just delay for one second
76*11854SChris.Horne@Sun.COM * to throttle the allocation rate. If we have tasks
77*11854SChris.Horne@Sun.COM * complete before one second timeout expires then
78*11854SChris.Horne@Sun.COM * taskq_ent_free will signal us and we will
79*11854SChris.Horne@Sun.COM * immediately retry the allocation.
80789Sahrens */
81*11854SChris.Horne@Sun.COM tq->tq_maxalloc_wait++;
82*11854SChris.Horne@Sun.COM rv = cv_timedwait(&tq->tq_maxalloc_cv,
83*11854SChris.Horne@Sun.COM &tq->tq_lock, ddi_get_lbolt() + hz);
84*11854SChris.Horne@Sun.COM tq->tq_maxalloc_wait--;
85*11854SChris.Horne@Sun.COM if (rv > 0)
86*11854SChris.Horne@Sun.COM goto again; /* signaled */
87789Sahrens }
88*11854SChris.Horne@Sun.COM mutex_exit(&tq->tq_lock);
89*11854SChris.Horne@Sun.COM
90789Sahrens t = kmem_alloc(sizeof (task_t), tqflags);
91*11854SChris.Horne@Sun.COM
92789Sahrens mutex_enter(&tq->tq_lock);
93789Sahrens if (t != NULL)
94789Sahrens tq->tq_nalloc++;
95789Sahrens }
96789Sahrens return (t);
97789Sahrens }
98789Sahrens
99789Sahrens static void
task_free(taskq_t * tq,task_t * t)100789Sahrens task_free(taskq_t *tq, task_t *t)
101789Sahrens {
102789Sahrens if (tq->tq_nalloc <= tq->tq_minalloc) {
103789Sahrens t->task_next = tq->tq_freelist;
104789Sahrens tq->tq_freelist = t;
105789Sahrens } else {
106789Sahrens tq->tq_nalloc--;
107789Sahrens mutex_exit(&tq->tq_lock);
108789Sahrens kmem_free(t, sizeof (task_t));
109789Sahrens mutex_enter(&tq->tq_lock);
110789Sahrens }
111*11854SChris.Horne@Sun.COM
112*11854SChris.Horne@Sun.COM if (tq->tq_maxalloc_wait)
113*11854SChris.Horne@Sun.COM cv_signal(&tq->tq_maxalloc_cv);
114789Sahrens }
115789Sahrens
116789Sahrens taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t tqflags)117789Sahrens taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
118789Sahrens {
119789Sahrens task_t *t;
120789Sahrens
121789Sahrens if (taskq_now) {
122789Sahrens func(arg);
123789Sahrens return (1);
124789Sahrens }
125789Sahrens
126789Sahrens mutex_enter(&tq->tq_lock);
127789Sahrens ASSERT(tq->tq_flags & TASKQ_ACTIVE);
128789Sahrens if ((t = task_alloc(tq, tqflags)) == NULL) {
129789Sahrens mutex_exit(&tq->tq_lock);
130789Sahrens return (0);
131789Sahrens }
13211173SJonathan.Adams@Sun.COM if (tqflags & TQ_FRONT) {
13311173SJonathan.Adams@Sun.COM t->task_next = tq->tq_task.task_next;
13411173SJonathan.Adams@Sun.COM t->task_prev = &tq->tq_task;
13511173SJonathan.Adams@Sun.COM } else {
13611173SJonathan.Adams@Sun.COM t->task_next = &tq->tq_task;
13711173SJonathan.Adams@Sun.COM t->task_prev = tq->tq_task.task_prev;
13811173SJonathan.Adams@Sun.COM }
139789Sahrens t->task_next->task_prev = t;
140789Sahrens t->task_prev->task_next = t;
141789Sahrens t->task_func = func;
142789Sahrens t->task_arg = arg;
143789Sahrens cv_signal(&tq->tq_dispatch_cv);
144789Sahrens mutex_exit(&tq->tq_lock);
145789Sahrens return (1);
146789Sahrens }
147789Sahrens
148789Sahrens void
taskq_wait(taskq_t * tq)149789Sahrens taskq_wait(taskq_t *tq)
150789Sahrens {
151789Sahrens mutex_enter(&tq->tq_lock);
152789Sahrens while (tq->tq_task.task_next != &tq->tq_task || tq->tq_active != 0)
153789Sahrens cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
154789Sahrens mutex_exit(&tq->tq_lock);
155789Sahrens }
156789Sahrens
157789Sahrens static void *
taskq_thread(void * arg)158789Sahrens taskq_thread(void *arg)
159789Sahrens {
160789Sahrens taskq_t *tq = arg;
161789Sahrens task_t *t;
162789Sahrens
163789Sahrens mutex_enter(&tq->tq_lock);
164789Sahrens while (tq->tq_flags & TASKQ_ACTIVE) {
165789Sahrens if ((t = tq->tq_task.task_next) == &tq->tq_task) {
166789Sahrens if (--tq->tq_active == 0)
167789Sahrens cv_broadcast(&tq->tq_wait_cv);
168789Sahrens cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
169789Sahrens tq->tq_active++;
170789Sahrens continue;
171789Sahrens }
172789Sahrens t->task_prev->task_next = t->task_next;
173789Sahrens t->task_next->task_prev = t->task_prev;
174789Sahrens mutex_exit(&tq->tq_lock);
175789Sahrens
176789Sahrens rw_enter(&tq->tq_threadlock, RW_READER);
177789Sahrens t->task_func(t->task_arg);
178789Sahrens rw_exit(&tq->tq_threadlock);
179789Sahrens
180789Sahrens mutex_enter(&tq->tq_lock);
181789Sahrens task_free(tq, t);
182789Sahrens }
183789Sahrens tq->tq_nthreads--;
184789Sahrens cv_broadcast(&tq->tq_wait_cv);
185789Sahrens mutex_exit(&tq->tq_lock);
186789Sahrens return (NULL);
187789Sahrens }
188789Sahrens
189789Sahrens /*ARGSUSED*/
190789Sahrens taskq_t *
taskq_create(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags)191789Sahrens taskq_create(const char *name, int nthreads, pri_t pri,
192789Sahrens int minalloc, int maxalloc, uint_t flags)
193789Sahrens {
194789Sahrens taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
195789Sahrens int t;
196789Sahrens
1979515SJonathan.Adams@Sun.COM if (flags & TASKQ_THREADS_CPU_PCT) {
1989515SJonathan.Adams@Sun.COM int pct;
1999515SJonathan.Adams@Sun.COM ASSERT3S(nthreads, >=, 0);
2009515SJonathan.Adams@Sun.COM ASSERT3S(nthreads, <=, 100);
2019515SJonathan.Adams@Sun.COM pct = MIN(nthreads, 100);
2029515SJonathan.Adams@Sun.COM pct = MAX(pct, 0);
2039515SJonathan.Adams@Sun.COM
2049515SJonathan.Adams@Sun.COM nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
2059515SJonathan.Adams@Sun.COM nthreads = MAX(nthreads, 1); /* need at least 1 thread */
2069515SJonathan.Adams@Sun.COM } else {
2079515SJonathan.Adams@Sun.COM ASSERT3S(nthreads, >=, 1);
2089515SJonathan.Adams@Sun.COM }
2099515SJonathan.Adams@Sun.COM
210789Sahrens rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
2114831Sgw25295 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
2124831Sgw25295 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
2134831Sgw25295 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
214*11854SChris.Horne@Sun.COM cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
215789Sahrens tq->tq_flags = flags | TASKQ_ACTIVE;
216789Sahrens tq->tq_active = nthreads;
217789Sahrens tq->tq_nthreads = nthreads;
218789Sahrens tq->tq_minalloc = minalloc;
219789Sahrens tq->tq_maxalloc = maxalloc;
220789Sahrens tq->tq_task.task_next = &tq->tq_task;
221789Sahrens tq->tq_task.task_prev = &tq->tq_task;
222789Sahrens tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
223789Sahrens
224789Sahrens if (flags & TASKQ_PREPOPULATE) {
225789Sahrens mutex_enter(&tq->tq_lock);
226789Sahrens while (minalloc-- > 0)
227789Sahrens task_free(tq, task_alloc(tq, KM_SLEEP));
228789Sahrens mutex_exit(&tq->tq_lock);
229789Sahrens }
230789Sahrens
231789Sahrens for (t = 0; t < nthreads; t++)
232789Sahrens (void) thr_create(0, 0, taskq_thread,
233789Sahrens tq, THR_BOUND, &tq->tq_threadlist[t]);
234789Sahrens
235789Sahrens return (tq);
236789Sahrens }
237789Sahrens
238789Sahrens void
taskq_destroy(taskq_t * tq)239789Sahrens taskq_destroy(taskq_t *tq)
240789Sahrens {
241789Sahrens int t;
242789Sahrens int nthreads = tq->tq_nthreads;
243789Sahrens
244789Sahrens taskq_wait(tq);
245789Sahrens
246789Sahrens mutex_enter(&tq->tq_lock);
247789Sahrens
248789Sahrens tq->tq_flags &= ~TASKQ_ACTIVE;
249789Sahrens cv_broadcast(&tq->tq_dispatch_cv);
250789Sahrens
251789Sahrens while (tq->tq_nthreads != 0)
252789Sahrens cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
253789Sahrens
254789Sahrens tq->tq_minalloc = 0;
255789Sahrens while (tq->tq_nalloc != 0) {
256789Sahrens ASSERT(tq->tq_freelist != NULL);
257789Sahrens task_free(tq, task_alloc(tq, KM_SLEEP));
258789Sahrens }
259789Sahrens
260789Sahrens mutex_exit(&tq->tq_lock);
261789Sahrens
262789Sahrens for (t = 0; t < nthreads; t++)
263789Sahrens (void) thr_join(tq->tq_threadlist[t], NULL, NULL);
264789Sahrens
265789Sahrens kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
266789Sahrens
267789Sahrens rw_destroy(&tq->tq_threadlock);
2684831Sgw25295 mutex_destroy(&tq->tq_lock);
2694831Sgw25295 cv_destroy(&tq->tq_dispatch_cv);
2704831Sgw25295 cv_destroy(&tq->tq_wait_cv);
271*11854SChris.Horne@Sun.COM cv_destroy(&tq->tq_maxalloc_cv);
272789Sahrens
273789Sahrens kmem_free(tq, sizeof (taskq_t));
274789Sahrens }
275789Sahrens
276789Sahrens int
taskq_member(taskq_t * tq,void * t)277789Sahrens taskq_member(taskq_t *tq, void *t)
278789Sahrens {
279789Sahrens int i;
280789Sahrens
281789Sahrens if (taskq_now)
282789Sahrens return (1);
283789Sahrens
284789Sahrens for (i = 0; i < tq->tq_nthreads; i++)
285789Sahrens if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
286789Sahrens return (1);
287789Sahrens
288789Sahrens return (0);
289789Sahrens }
2907837SMatthew.Ahrens@Sun.COM
2917837SMatthew.Ahrens@Sun.COM void
system_taskq_init(void)2927837SMatthew.Ahrens@Sun.COM system_taskq_init(void)
2937837SMatthew.Ahrens@Sun.COM {
2947837SMatthew.Ahrens@Sun.COM system_taskq = taskq_create("system_taskq", 64, minclsyspri, 4, 512,
2957837SMatthew.Ahrens@Sun.COM TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
2967837SMatthew.Ahrens@Sun.COM }
29710612SRicardo.M.Correia@Sun.COM
29810612SRicardo.M.Correia@Sun.COM void
system_taskq_fini(void)29910612SRicardo.M.Correia@Sun.COM system_taskq_fini(void)
30010612SRicardo.M.Correia@Sun.COM {
30110612SRicardo.M.Correia@Sun.COM taskq_destroy(system_taskq);
30210612SRicardo.M.Correia@Sun.COM system_taskq = NULL; /* defensive */
30310612SRicardo.M.Correia@Sun.COM }
304