1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3eda14cbcSMatt Macy * Copyright (C) 2007 The Regents of the University of California. 4eda14cbcSMatt Macy * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5eda14cbcSMatt Macy * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6eda14cbcSMatt Macy * UCRL-CODE-235197 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * This file is part of the SPL, Solaris Porting Layer. 9eda14cbcSMatt Macy * 10eda14cbcSMatt Macy * The SPL is free software; you can redistribute it and/or modify it 11eda14cbcSMatt Macy * under the terms of the GNU General Public License as published by the 12eda14cbcSMatt Macy * Free Software Foundation; either version 2 of the License, or (at your 13eda14cbcSMatt Macy * option) any later version. 14eda14cbcSMatt Macy * 15eda14cbcSMatt Macy * The SPL is distributed in the hope that it will be useful, but WITHOUT 16eda14cbcSMatt Macy * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17eda14cbcSMatt Macy * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18eda14cbcSMatt Macy * for more details. 19eda14cbcSMatt Macy * 20eda14cbcSMatt Macy * You should have received a copy of the GNU General Public License along 21eda14cbcSMatt Macy * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22eda14cbcSMatt Macy * 23eda14cbcSMatt Macy * Solaris Porting Layer (SPL) Task Queue Implementation. 24eda14cbcSMatt Macy */ 25eda14cbcSMatt Macy 26eda14cbcSMatt Macy #include <sys/timer.h> 27eda14cbcSMatt Macy #include <sys/taskq.h> 28eda14cbcSMatt Macy #include <sys/kmem.h> 29eda14cbcSMatt Macy #include <sys/tsd.h> 30eda14cbcSMatt Macy #include <sys/trace_spl.h> 317877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 327877fdebSMatt Macy #include <linux/cpuhotplug.h> 337877fdebSMatt Macy #endif 34eda14cbcSMatt Macy 35e92ffd9bSMartin Matuska static int spl_taskq_thread_bind = 0; 36eda14cbcSMatt Macy module_param(spl_taskq_thread_bind, int, 0644); 37eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); 38eda14cbcSMatt Macy 397b5e6873SMartin Matuska static uint_t spl_taskq_thread_timeout_ms = 10000; 407b5e6873SMartin Matuska /* BEGIN CSTYLED */ 417b5e6873SMartin Matuska module_param(spl_taskq_thread_timeout_ms, uint, 0644); 427b5e6873SMartin Matuska /* END CSTYLED */ 437b5e6873SMartin Matuska MODULE_PARM_DESC(spl_taskq_thread_timeout_ms, 447b5e6873SMartin Matuska "Time to require a dynamic thread be idle before it gets cleaned up"); 45eda14cbcSMatt Macy 46e92ffd9bSMartin Matuska static int spl_taskq_thread_dynamic = 1; 477877fdebSMatt Macy module_param(spl_taskq_thread_dynamic, int, 0444); 48eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); 49eda14cbcSMatt Macy 50e92ffd9bSMartin Matuska static int spl_taskq_thread_priority = 1; 51eda14cbcSMatt Macy module_param(spl_taskq_thread_priority, int, 0644); 52eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_priority, 53eda14cbcSMatt Macy "Allow non-default priority for taskq threads"); 54eda14cbcSMatt Macy 55be181ee2SMartin Matuska static uint_t spl_taskq_thread_sequential = 4; 56be181ee2SMartin Matuska /* BEGIN CSTYLED */ 57be181ee2SMartin Matuska module_param(spl_taskq_thread_sequential, uint, 0644); 58be181ee2SMartin Matuska /* END CSTYLED */ 59eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_sequential, 60eda14cbcSMatt Macy "Create new taskq threads after N sequential tasks"); 61eda14cbcSMatt Macy 62681ce946SMartin Matuska /* 63681ce946SMartin Matuska * Global system-wide dynamic task queue available for all consumers. This 64681ce946SMartin Matuska * taskq is not intended for long-running tasks; instead, a dedicated taskq 65681ce946SMartin Matuska * should be created. 66681ce946SMartin Matuska */ 67eda14cbcSMatt Macy taskq_t *system_taskq; 68eda14cbcSMatt Macy EXPORT_SYMBOL(system_taskq); 69eda14cbcSMatt Macy /* Global dynamic task queue for long delay */ 70eda14cbcSMatt Macy taskq_t *system_delay_taskq; 71eda14cbcSMatt Macy EXPORT_SYMBOL(system_delay_taskq); 72eda14cbcSMatt Macy 73eda14cbcSMatt Macy /* Private dedicated taskq for creating new taskq threads on demand. */ 74eda14cbcSMatt Macy static taskq_t *dynamic_taskq; 75eda14cbcSMatt Macy static taskq_thread_t *taskq_thread_create(taskq_t *); 76eda14cbcSMatt Macy 777877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 787877fdebSMatt Macy /* Multi-callback id for cpu hotplugging. */ 797877fdebSMatt Macy static int spl_taskq_cpuhp_state; 807877fdebSMatt Macy #endif 817877fdebSMatt Macy 82eda14cbcSMatt Macy /* List of all taskqs */ 83eda14cbcSMatt Macy LIST_HEAD(tq_list); 84eda14cbcSMatt Macy struct rw_semaphore tq_list_sem; 85eda14cbcSMatt Macy static uint_t taskq_tsd; 86eda14cbcSMatt Macy 87eda14cbcSMatt Macy static int 88eda14cbcSMatt Macy task_km_flags(uint_t flags) 89eda14cbcSMatt Macy { 90eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 91eda14cbcSMatt Macy return (KM_NOSLEEP); 92eda14cbcSMatt Macy 93eda14cbcSMatt Macy if (flags & TQ_PUSHPAGE) 94eda14cbcSMatt Macy return (KM_PUSHPAGE); 95eda14cbcSMatt Macy 96eda14cbcSMatt Macy return (KM_SLEEP); 97eda14cbcSMatt Macy } 98eda14cbcSMatt Macy 99eda14cbcSMatt Macy /* 100eda14cbcSMatt Macy * taskq_find_by_name - Find the largest instance number of a named taskq. 101eda14cbcSMatt Macy */ 102eda14cbcSMatt Macy static int 103eda14cbcSMatt Macy taskq_find_by_name(const char *name) 104eda14cbcSMatt Macy { 105eda14cbcSMatt Macy struct list_head *tql = NULL; 106eda14cbcSMatt Macy taskq_t *tq; 107eda14cbcSMatt Macy 108eda14cbcSMatt Macy list_for_each_prev(tql, &tq_list) { 109eda14cbcSMatt Macy tq = list_entry(tql, taskq_t, tq_taskqs); 110eda14cbcSMatt Macy if (strcmp(name, tq->tq_name) == 0) 111eda14cbcSMatt Macy return (tq->tq_instance); 112eda14cbcSMatt Macy } 113eda14cbcSMatt Macy return (-1); 114eda14cbcSMatt Macy } 115eda14cbcSMatt Macy 116eda14cbcSMatt Macy /* 117eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, returns a list_t which 118eda14cbcSMatt Macy * is not attached to the free, work, or pending taskq lists. 119eda14cbcSMatt Macy */ 120eda14cbcSMatt Macy static taskq_ent_t * 121eda14cbcSMatt Macy task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) 122eda14cbcSMatt Macy { 123eda14cbcSMatt Macy taskq_ent_t *t; 124eda14cbcSMatt Macy int count = 0; 125eda14cbcSMatt Macy 126eda14cbcSMatt Macy ASSERT(tq); 127eda14cbcSMatt Macy retry: 128eda14cbcSMatt Macy /* Acquire taskq_ent_t's from free list if available */ 129eda14cbcSMatt Macy if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { 130eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 131eda14cbcSMatt Macy 132eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 133eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); 134eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 135eda14cbcSMatt Macy 136eda14cbcSMatt Macy list_del_init(&t->tqent_list); 137eda14cbcSMatt Macy return (t); 138eda14cbcSMatt Macy } 139eda14cbcSMatt Macy 140eda14cbcSMatt Macy /* Free list is empty and memory allocations are prohibited */ 141eda14cbcSMatt Macy if (flags & TQ_NOALLOC) 142eda14cbcSMatt Macy return (NULL); 143eda14cbcSMatt Macy 144eda14cbcSMatt Macy /* Hit maximum taskq_ent_t pool size */ 145eda14cbcSMatt Macy if (tq->tq_nalloc >= tq->tq_maxalloc) { 146eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 147eda14cbcSMatt Macy return (NULL); 148eda14cbcSMatt Macy 149eda14cbcSMatt Macy /* 150eda14cbcSMatt Macy * Sleep periodically polling the free list for an available 151eda14cbcSMatt Macy * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed 152eda14cbcSMatt Macy * but we cannot block forever waiting for an taskq_ent_t to 153eda14cbcSMatt Macy * show up in the free list, otherwise a deadlock can happen. 154eda14cbcSMatt Macy * 155eda14cbcSMatt Macy * Therefore, we need to allocate a new task even if the number 156eda14cbcSMatt Macy * of allocated tasks is above tq->tq_maxalloc, but we still 157eda14cbcSMatt Macy * end up delaying the task allocation by one second, thereby 158eda14cbcSMatt Macy * throttling the task dispatch rate. 159eda14cbcSMatt Macy */ 160eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 161eda14cbcSMatt Macy schedule_timeout(HZ / 100); 162eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, 163eda14cbcSMatt Macy tq->tq_lock_class); 164eda14cbcSMatt Macy if (count < 100) { 165eda14cbcSMatt Macy count++; 166eda14cbcSMatt Macy goto retry; 167eda14cbcSMatt Macy } 168eda14cbcSMatt Macy } 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 171eda14cbcSMatt Macy t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); 172eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); 173eda14cbcSMatt Macy 174eda14cbcSMatt Macy if (t) { 175eda14cbcSMatt Macy taskq_init_ent(t); 176eda14cbcSMatt Macy tq->tq_nalloc++; 177eda14cbcSMatt Macy } 178eda14cbcSMatt Macy 179eda14cbcSMatt Macy return (t); 180eda14cbcSMatt Macy } 181eda14cbcSMatt Macy 182eda14cbcSMatt Macy /* 183eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t 184eda14cbcSMatt Macy * to already be removed from the free, work, or pending taskq lists. 185eda14cbcSMatt Macy */ 186eda14cbcSMatt Macy static void 187eda14cbcSMatt Macy task_free(taskq_t *tq, taskq_ent_t *t) 188eda14cbcSMatt Macy { 189eda14cbcSMatt Macy ASSERT(tq); 190eda14cbcSMatt Macy ASSERT(t); 191eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 192eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 193eda14cbcSMatt Macy 194eda14cbcSMatt Macy kmem_free(t, sizeof (taskq_ent_t)); 195eda14cbcSMatt Macy tq->tq_nalloc--; 196eda14cbcSMatt Macy } 197eda14cbcSMatt Macy 198eda14cbcSMatt Macy /* 199eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, either destroys the 200eda14cbcSMatt Macy * taskq_ent_t if too many exist or moves it to the free list for later use. 201eda14cbcSMatt Macy */ 202eda14cbcSMatt Macy static void 203eda14cbcSMatt Macy task_done(taskq_t *tq, taskq_ent_t *t) 204eda14cbcSMatt Macy { 205eda14cbcSMatt Macy ASSERT(tq); 206eda14cbcSMatt Macy ASSERT(t); 207eda14cbcSMatt Macy 208eda14cbcSMatt Macy /* Wake tasks blocked in taskq_wait_id() */ 209eda14cbcSMatt Macy wake_up_all(&t->tqent_waitq); 210eda14cbcSMatt Macy 211eda14cbcSMatt Macy list_del_init(&t->tqent_list); 212eda14cbcSMatt Macy 213eda14cbcSMatt Macy if (tq->tq_nalloc <= tq->tq_minalloc) { 214eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 215eda14cbcSMatt Macy t->tqent_func = NULL; 216eda14cbcSMatt Macy t->tqent_arg = NULL; 217eda14cbcSMatt Macy t->tqent_flags = 0; 218eda14cbcSMatt Macy 219eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_free_list); 220eda14cbcSMatt Macy } else { 221eda14cbcSMatt Macy task_free(tq, t); 222eda14cbcSMatt Macy } 223eda14cbcSMatt Macy } 224eda14cbcSMatt Macy 225eda14cbcSMatt Macy /* 226eda14cbcSMatt Macy * When a delayed task timer expires remove it from the delay list and 227eda14cbcSMatt Macy * add it to the priority list in order for immediate processing. 228eda14cbcSMatt Macy */ 229eda14cbcSMatt Macy static void 230eda14cbcSMatt Macy task_expire_impl(taskq_ent_t *t) 231eda14cbcSMatt Macy { 232eda14cbcSMatt Macy taskq_ent_t *w; 233eda14cbcSMatt Macy taskq_t *tq = t->tqent_taskq; 234eda14cbcSMatt Macy struct list_head *l = NULL; 235eda14cbcSMatt Macy unsigned long flags; 236eda14cbcSMatt Macy 237eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 238eda14cbcSMatt Macy 239eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_CANCEL) { 240eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 241eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 242eda14cbcSMatt Macy return; 243eda14cbcSMatt Macy } 244eda14cbcSMatt Macy 245eda14cbcSMatt Macy t->tqent_birth = jiffies; 246eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 247eda14cbcSMatt Macy 248eda14cbcSMatt Macy /* 249eda14cbcSMatt Macy * The priority list must be maintained in strict task id order 250eda14cbcSMatt Macy * from lowest to highest for lowest_id to be easily calculable. 251eda14cbcSMatt Macy */ 252eda14cbcSMatt Macy list_del(&t->tqent_list); 253eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_prio_list) { 254eda14cbcSMatt Macy w = list_entry(l, taskq_ent_t, tqent_list); 255eda14cbcSMatt Macy if (w->tqent_id < t->tqent_id) { 256eda14cbcSMatt Macy list_add(&t->tqent_list, l); 257eda14cbcSMatt Macy break; 258eda14cbcSMatt Macy } 259eda14cbcSMatt Macy } 260eda14cbcSMatt Macy if (l == &tq->tq_prio_list) 261eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 262eda14cbcSMatt Macy 263eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 264eda14cbcSMatt Macy 265eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 266eda14cbcSMatt Macy } 267eda14cbcSMatt Macy 268eda14cbcSMatt Macy static void 269eda14cbcSMatt Macy task_expire(spl_timer_list_t tl) 270eda14cbcSMatt Macy { 271eda14cbcSMatt Macy struct timer_list *tmr = (struct timer_list *)tl; 272eda14cbcSMatt Macy taskq_ent_t *t = from_timer(t, tmr, tqent_timer); 273eda14cbcSMatt Macy task_expire_impl(t); 274eda14cbcSMatt Macy } 275eda14cbcSMatt Macy 276eda14cbcSMatt Macy /* 277eda14cbcSMatt Macy * Returns the lowest incomplete taskqid_t. The taskqid_t may 278eda14cbcSMatt Macy * be queued on the pending list, on the priority list, on the 279eda14cbcSMatt Macy * delay list, or on the work list currently being handled, but 280eda14cbcSMatt Macy * it is not 100% complete yet. 281eda14cbcSMatt Macy */ 282eda14cbcSMatt Macy static taskqid_t 283eda14cbcSMatt Macy taskq_lowest_id(taskq_t *tq) 284eda14cbcSMatt Macy { 285eda14cbcSMatt Macy taskqid_t lowest_id = tq->tq_next_id; 286eda14cbcSMatt Macy taskq_ent_t *t; 287eda14cbcSMatt Macy taskq_thread_t *tqt; 288eda14cbcSMatt Macy 289eda14cbcSMatt Macy if (!list_empty(&tq->tq_pend_list)) { 290eda14cbcSMatt Macy t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); 291eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 292eda14cbcSMatt Macy } 293eda14cbcSMatt Macy 294eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) { 295eda14cbcSMatt Macy t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); 296eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 297eda14cbcSMatt Macy } 298eda14cbcSMatt Macy 299eda14cbcSMatt Macy if (!list_empty(&tq->tq_delay_list)) { 300eda14cbcSMatt Macy t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); 301eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 302eda14cbcSMatt Macy } 303eda14cbcSMatt Macy 304eda14cbcSMatt Macy if (!list_empty(&tq->tq_active_list)) { 305eda14cbcSMatt Macy tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, 306eda14cbcSMatt Macy tqt_active_list); 307eda14cbcSMatt Macy ASSERT(tqt->tqt_id != TASKQID_INVALID); 308eda14cbcSMatt Macy lowest_id = MIN(lowest_id, tqt->tqt_id); 309eda14cbcSMatt Macy } 310eda14cbcSMatt Macy 311eda14cbcSMatt Macy return (lowest_id); 312eda14cbcSMatt Macy } 313eda14cbcSMatt Macy 314eda14cbcSMatt Macy /* 315eda14cbcSMatt Macy * Insert a task into a list keeping the list sorted by increasing taskqid. 316eda14cbcSMatt Macy */ 317eda14cbcSMatt Macy static void 318eda14cbcSMatt Macy taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) 319eda14cbcSMatt Macy { 320eda14cbcSMatt Macy taskq_thread_t *w; 321eda14cbcSMatt Macy struct list_head *l = NULL; 322eda14cbcSMatt Macy 323eda14cbcSMatt Macy ASSERT(tq); 324eda14cbcSMatt Macy ASSERT(tqt); 325eda14cbcSMatt Macy 326eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_active_list) { 327eda14cbcSMatt Macy w = list_entry(l, taskq_thread_t, tqt_active_list); 328eda14cbcSMatt Macy if (w->tqt_id < tqt->tqt_id) { 329eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, l); 330eda14cbcSMatt Macy break; 331eda14cbcSMatt Macy } 332eda14cbcSMatt Macy } 333eda14cbcSMatt Macy if (l == &tq->tq_active_list) 334eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, &tq->tq_active_list); 335eda14cbcSMatt Macy } 336eda14cbcSMatt Macy 337eda14cbcSMatt Macy /* 338eda14cbcSMatt Macy * Find and return a task from the given list if it exists. The list 339eda14cbcSMatt Macy * must be in lowest to highest task id order. 340eda14cbcSMatt Macy */ 341eda14cbcSMatt Macy static taskq_ent_t * 342eda14cbcSMatt Macy taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) 343eda14cbcSMatt Macy { 344eda14cbcSMatt Macy struct list_head *l = NULL; 345eda14cbcSMatt Macy taskq_ent_t *t; 346eda14cbcSMatt Macy 347eda14cbcSMatt Macy list_for_each(l, lh) { 348eda14cbcSMatt Macy t = list_entry(l, taskq_ent_t, tqent_list); 349eda14cbcSMatt Macy 350eda14cbcSMatt Macy if (t->tqent_id == id) 351eda14cbcSMatt Macy return (t); 352eda14cbcSMatt Macy 353eda14cbcSMatt Macy if (t->tqent_id > id) 354eda14cbcSMatt Macy break; 355eda14cbcSMatt Macy } 356eda14cbcSMatt Macy 357eda14cbcSMatt Macy return (NULL); 358eda14cbcSMatt Macy } 359eda14cbcSMatt Macy 360eda14cbcSMatt Macy /* 361eda14cbcSMatt Macy * Find an already dispatched task given the task id regardless of what 362eda14cbcSMatt Macy * state it is in. If a task is still pending it will be returned. 363eda14cbcSMatt Macy * If a task is executing, then -EBUSY will be returned instead. 364eda14cbcSMatt Macy * If the task has already been run then NULL is returned. 365eda14cbcSMatt Macy */ 366eda14cbcSMatt Macy static taskq_ent_t * 367eda14cbcSMatt Macy taskq_find(taskq_t *tq, taskqid_t id) 368eda14cbcSMatt Macy { 369eda14cbcSMatt Macy taskq_thread_t *tqt; 370eda14cbcSMatt Macy struct list_head *l = NULL; 371eda14cbcSMatt Macy taskq_ent_t *t; 372eda14cbcSMatt Macy 373eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_delay_list, id); 374eda14cbcSMatt Macy if (t) 375eda14cbcSMatt Macy return (t); 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_prio_list, id); 378eda14cbcSMatt Macy if (t) 379eda14cbcSMatt Macy return (t); 380eda14cbcSMatt Macy 381eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_pend_list, id); 382eda14cbcSMatt Macy if (t) 383eda14cbcSMatt Macy return (t); 384eda14cbcSMatt Macy 385eda14cbcSMatt Macy list_for_each(l, &tq->tq_active_list) { 386eda14cbcSMatt Macy tqt = list_entry(l, taskq_thread_t, tqt_active_list); 387eda14cbcSMatt Macy if (tqt->tqt_id == id) { 388eda14cbcSMatt Macy /* 389eda14cbcSMatt Macy * Instead of returning tqt_task, we just return a non 390eda14cbcSMatt Macy * NULL value to prevent misuse, since tqt_task only 391eda14cbcSMatt Macy * has two valid fields. 392eda14cbcSMatt Macy */ 393eda14cbcSMatt Macy return (ERR_PTR(-EBUSY)); 394eda14cbcSMatt Macy } 395eda14cbcSMatt Macy } 396eda14cbcSMatt Macy 397eda14cbcSMatt Macy return (NULL); 398eda14cbcSMatt Macy } 399eda14cbcSMatt Macy 400eda14cbcSMatt Macy /* 401eda14cbcSMatt Macy * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and 402eda14cbcSMatt Macy * taskq_wait() functions below. 403eda14cbcSMatt Macy * 404eda14cbcSMatt Macy * Taskq waiting is accomplished by tracking the lowest outstanding task 405eda14cbcSMatt Macy * id and the next available task id. As tasks are dispatched they are 406eda14cbcSMatt Macy * added to the tail of the pending, priority, or delay lists. As worker 407eda14cbcSMatt Macy * threads become available the tasks are removed from the heads of these 408eda14cbcSMatt Macy * lists and linked to the worker threads. This ensures the lists are 409eda14cbcSMatt Macy * kept sorted by lowest to highest task id. 410eda14cbcSMatt Macy * 411eda14cbcSMatt Macy * Therefore the lowest outstanding task id can be quickly determined by 412eda14cbcSMatt Macy * checking the head item from all of these lists. This value is stored 413eda14cbcSMatt Macy * with the taskq as the lowest id. It only needs to be recalculated when 414eda14cbcSMatt Macy * either the task with the current lowest id completes or is canceled. 415eda14cbcSMatt Macy * 416eda14cbcSMatt Macy * By blocking until the lowest task id exceeds the passed task id the 417eda14cbcSMatt Macy * taskq_wait_outstanding() function can be easily implemented. Similarly, 418eda14cbcSMatt Macy * by blocking until the lowest task id matches the next task id taskq_wait() 419eda14cbcSMatt Macy * can be implemented. 420eda14cbcSMatt Macy * 421eda14cbcSMatt Macy * Callers should be aware that when there are multiple worked threads it 422eda14cbcSMatt Macy * is possible for larger task ids to complete before smaller ones. Also 423eda14cbcSMatt Macy * when the taskq contains delay tasks with small task ids callers may 424eda14cbcSMatt Macy * block for a considerable length of time waiting for them to expire and 425eda14cbcSMatt Macy * execute. 426eda14cbcSMatt Macy */ 427eda14cbcSMatt Macy static int 428eda14cbcSMatt Macy taskq_wait_id_check(taskq_t *tq, taskqid_t id) 429eda14cbcSMatt Macy { 430eda14cbcSMatt Macy int rc; 431eda14cbcSMatt Macy unsigned long flags; 432eda14cbcSMatt Macy 433eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 434eda14cbcSMatt Macy rc = (taskq_find(tq, id) == NULL); 435eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 436eda14cbcSMatt Macy 437eda14cbcSMatt Macy return (rc); 438eda14cbcSMatt Macy } 439eda14cbcSMatt Macy 440eda14cbcSMatt Macy /* 441eda14cbcSMatt Macy * The taskq_wait_id() function blocks until the passed task id completes. 442eda14cbcSMatt Macy * This does not guarantee that all lower task ids have completed. 443eda14cbcSMatt Macy */ 444eda14cbcSMatt Macy void 445eda14cbcSMatt Macy taskq_wait_id(taskq_t *tq, taskqid_t id) 446eda14cbcSMatt Macy { 447eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); 448eda14cbcSMatt Macy } 449eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_id); 450eda14cbcSMatt Macy 451eda14cbcSMatt Macy static int 452eda14cbcSMatt Macy taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) 453eda14cbcSMatt Macy { 454eda14cbcSMatt Macy int rc; 455eda14cbcSMatt Macy unsigned long flags; 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 458eda14cbcSMatt Macy rc = (id < tq->tq_lowest_id); 459eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 460eda14cbcSMatt Macy 461eda14cbcSMatt Macy return (rc); 462eda14cbcSMatt Macy } 463eda14cbcSMatt Macy 464eda14cbcSMatt Macy /* 465eda14cbcSMatt Macy * The taskq_wait_outstanding() function will block until all tasks with a 466eda14cbcSMatt Macy * lower taskqid than the passed 'id' have been completed. Note that all 467eda14cbcSMatt Macy * task id's are assigned monotonically at dispatch time. Zero may be 468eda14cbcSMatt Macy * passed for the id to indicate all tasks dispatch up to this point, 469eda14cbcSMatt Macy * but not after, should be waited for. 470eda14cbcSMatt Macy */ 471eda14cbcSMatt Macy void 472eda14cbcSMatt Macy taskq_wait_outstanding(taskq_t *tq, taskqid_t id) 473eda14cbcSMatt Macy { 474eda14cbcSMatt Macy id = id ? id : tq->tq_next_id - 1; 475eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); 476eda14cbcSMatt Macy } 477eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_outstanding); 478eda14cbcSMatt Macy 479eda14cbcSMatt Macy static int 480eda14cbcSMatt Macy taskq_wait_check(taskq_t *tq) 481eda14cbcSMatt Macy { 482eda14cbcSMatt Macy int rc; 483eda14cbcSMatt Macy unsigned long flags; 484eda14cbcSMatt Macy 485eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 486eda14cbcSMatt Macy rc = (tq->tq_lowest_id == tq->tq_next_id); 487eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 488eda14cbcSMatt Macy 489eda14cbcSMatt Macy return (rc); 490eda14cbcSMatt Macy } 491eda14cbcSMatt Macy 492eda14cbcSMatt Macy /* 493eda14cbcSMatt Macy * The taskq_wait() function will block until the taskq is empty. 494eda14cbcSMatt Macy * This means that if a taskq re-dispatches work to itself taskq_wait() 495eda14cbcSMatt Macy * callers will block indefinitely. 496eda14cbcSMatt Macy */ 497eda14cbcSMatt Macy void 498eda14cbcSMatt Macy taskq_wait(taskq_t *tq) 499eda14cbcSMatt Macy { 500eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); 501eda14cbcSMatt Macy } 502eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait); 503eda14cbcSMatt Macy 504eda14cbcSMatt Macy int 505eda14cbcSMatt Macy taskq_member(taskq_t *tq, kthread_t *t) 506eda14cbcSMatt Macy { 507eda14cbcSMatt Macy return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); 508eda14cbcSMatt Macy } 509eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_member); 510eda14cbcSMatt Macy 511eda14cbcSMatt Macy taskq_t * 512eda14cbcSMatt Macy taskq_of_curthread(void) 513eda14cbcSMatt Macy { 514eda14cbcSMatt Macy return (tsd_get(taskq_tsd)); 515eda14cbcSMatt Macy } 516eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_of_curthread); 517eda14cbcSMatt Macy 518eda14cbcSMatt Macy /* 519eda14cbcSMatt Macy * Cancel an already dispatched task given the task id. Still pending tasks 520eda14cbcSMatt Macy * will be immediately canceled, and if the task is active the function will 521eda14cbcSMatt Macy * block until it completes. Preallocated tasks which are canceled must be 522eda14cbcSMatt Macy * freed by the caller. 523eda14cbcSMatt Macy */ 524eda14cbcSMatt Macy int 525eda14cbcSMatt Macy taskq_cancel_id(taskq_t *tq, taskqid_t id) 526eda14cbcSMatt Macy { 527eda14cbcSMatt Macy taskq_ent_t *t; 528eda14cbcSMatt Macy int rc = ENOENT; 529eda14cbcSMatt Macy unsigned long flags; 530eda14cbcSMatt Macy 531eda14cbcSMatt Macy ASSERT(tq); 532eda14cbcSMatt Macy 533eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 534eda14cbcSMatt Macy t = taskq_find(tq, id); 535eda14cbcSMatt Macy if (t && t != ERR_PTR(-EBUSY)) { 536eda14cbcSMatt Macy list_del_init(&t->tqent_list); 537eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_CANCEL; 538eda14cbcSMatt Macy 539eda14cbcSMatt Macy /* 540eda14cbcSMatt Macy * When canceling the lowest outstanding task id we 541eda14cbcSMatt Macy * must recalculate the new lowest outstanding id. 542eda14cbcSMatt Macy */ 543eda14cbcSMatt Macy if (tq->tq_lowest_id == t->tqent_id) { 544eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 545eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); 546eda14cbcSMatt Macy } 547eda14cbcSMatt Macy 548eda14cbcSMatt Macy /* 549eda14cbcSMatt Macy * The task_expire() function takes the tq->tq_lock so drop 550eda14cbcSMatt Macy * drop the lock before synchronously cancelling the timer. 551eda14cbcSMatt Macy */ 552eda14cbcSMatt Macy if (timer_pending(&t->tqent_timer)) { 553eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 554eda14cbcSMatt Macy del_timer_sync(&t->tqent_timer); 555eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 556eda14cbcSMatt Macy tq->tq_lock_class); 557eda14cbcSMatt Macy } 558eda14cbcSMatt Macy 559eda14cbcSMatt Macy if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) 560eda14cbcSMatt Macy task_done(tq, t); 561eda14cbcSMatt Macy 562eda14cbcSMatt Macy rc = 0; 563eda14cbcSMatt Macy } 564eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 565eda14cbcSMatt Macy 566eda14cbcSMatt Macy if (t == ERR_PTR(-EBUSY)) { 567eda14cbcSMatt Macy taskq_wait_id(tq, id); 568eda14cbcSMatt Macy rc = EBUSY; 569eda14cbcSMatt Macy } 570eda14cbcSMatt Macy 571eda14cbcSMatt Macy return (rc); 572eda14cbcSMatt Macy } 573eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_cancel_id); 574eda14cbcSMatt Macy 575eda14cbcSMatt Macy static int taskq_thread_spawn(taskq_t *tq); 576eda14cbcSMatt Macy 577eda14cbcSMatt Macy taskqid_t 578eda14cbcSMatt Macy taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 579eda14cbcSMatt Macy { 580eda14cbcSMatt Macy taskq_ent_t *t; 581eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 582eda14cbcSMatt Macy unsigned long irqflags; 583eda14cbcSMatt Macy 584eda14cbcSMatt Macy ASSERT(tq); 585eda14cbcSMatt Macy ASSERT(func); 586eda14cbcSMatt Macy 587eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 588eda14cbcSMatt Macy 589eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 590eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 591eda14cbcSMatt Macy goto out; 592eda14cbcSMatt Macy 593eda14cbcSMatt Macy /* Do not queue the task unless there is idle thread for it */ 594eda14cbcSMatt Macy ASSERT(tq->tq_nactive <= tq->tq_nthreads); 595eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 596eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 597eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 598eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 599eda14cbcSMatt Macy goto out; 600eda14cbcSMatt Macy } 601eda14cbcSMatt Macy 602eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 603eda14cbcSMatt Macy goto out; 604eda14cbcSMatt Macy 605eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 606eda14cbcSMatt Macy 607eda14cbcSMatt Macy /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ 608eda14cbcSMatt Macy if (flags & TQ_NOQUEUE) 609eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 610eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 611eda14cbcSMatt Macy else if (flags & TQ_FRONT) 612eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 613eda14cbcSMatt Macy else 614eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 615eda14cbcSMatt Macy 616eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 617eda14cbcSMatt Macy tq->tq_next_id++; 618eda14cbcSMatt Macy t->tqent_func = func; 619eda14cbcSMatt Macy t->tqent_arg = arg; 620eda14cbcSMatt Macy t->tqent_taskq = tq; 621eda14cbcSMatt Macy t->tqent_timer.function = NULL; 622eda14cbcSMatt Macy t->tqent_timer.expires = 0; 623eda14cbcSMatt Macy 624eda14cbcSMatt Macy t->tqent_birth = jiffies; 625eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 626eda14cbcSMatt Macy 627eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 628eda14cbcSMatt Macy 629eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 630eda14cbcSMatt Macy 631eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 632eda14cbcSMatt Macy out: 633eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 634eda14cbcSMatt Macy if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) 635eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 636eda14cbcSMatt Macy 637eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 638eda14cbcSMatt Macy return (rc); 639eda14cbcSMatt Macy } 640eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch); 641eda14cbcSMatt Macy 642eda14cbcSMatt Macy taskqid_t 643eda14cbcSMatt Macy taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, 644eda14cbcSMatt Macy uint_t flags, clock_t expire_time) 645eda14cbcSMatt Macy { 646eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 647eda14cbcSMatt Macy taskq_ent_t *t; 648eda14cbcSMatt Macy unsigned long irqflags; 649eda14cbcSMatt Macy 650eda14cbcSMatt Macy ASSERT(tq); 651eda14cbcSMatt Macy ASSERT(func); 652eda14cbcSMatt Macy 653eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 654eda14cbcSMatt Macy 655eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 656eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 657eda14cbcSMatt Macy goto out; 658eda14cbcSMatt Macy 659eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 660eda14cbcSMatt Macy goto out; 661eda14cbcSMatt Macy 662eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 663eda14cbcSMatt Macy 664eda14cbcSMatt Macy /* Queue to the delay list for subsequent execution */ 665eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_delay_list); 666eda14cbcSMatt Macy 667eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 668eda14cbcSMatt Macy tq->tq_next_id++; 669eda14cbcSMatt Macy t->tqent_func = func; 670eda14cbcSMatt Macy t->tqent_arg = arg; 671eda14cbcSMatt Macy t->tqent_taskq = tq; 672eda14cbcSMatt Macy t->tqent_timer.function = task_expire; 673eda14cbcSMatt Macy t->tqent_timer.expires = (unsigned long)expire_time; 674eda14cbcSMatt Macy add_timer(&t->tqent_timer); 675eda14cbcSMatt Macy 676eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 677eda14cbcSMatt Macy 678eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 679eda14cbcSMatt Macy out: 680eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 681eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 682eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 683eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 684eda14cbcSMatt Macy return (rc); 685eda14cbcSMatt Macy } 686eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_delay); 687eda14cbcSMatt Macy 688eda14cbcSMatt Macy void 689eda14cbcSMatt Macy taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, 690eda14cbcSMatt Macy taskq_ent_t *t) 691eda14cbcSMatt Macy { 692eda14cbcSMatt Macy unsigned long irqflags; 693eda14cbcSMatt Macy ASSERT(tq); 694eda14cbcSMatt Macy ASSERT(func); 695eda14cbcSMatt Macy 696eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 697eda14cbcSMatt Macy tq->tq_lock_class); 698eda14cbcSMatt Macy 699eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 700eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) { 701eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 702eda14cbcSMatt Macy goto out; 703eda14cbcSMatt Macy } 704eda14cbcSMatt Macy 705eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 706eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 707eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 708eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 709eda14cbcSMatt Macy goto out2; 710eda14cbcSMatt Macy flags |= TQ_FRONT; 711eda14cbcSMatt Macy } 712eda14cbcSMatt Macy 713eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy /* 716eda14cbcSMatt Macy * Make sure the entry is not on some other taskq; it is important to 717eda14cbcSMatt Macy * ASSERT() under lock 718eda14cbcSMatt Macy */ 719eda14cbcSMatt Macy ASSERT(taskq_empty_ent(t)); 720eda14cbcSMatt Macy 721eda14cbcSMatt Macy /* 722eda14cbcSMatt Macy * Mark it as a prealloc'd task. This is important 723eda14cbcSMatt Macy * to ensure that we don't free it later. 724eda14cbcSMatt Macy */ 725eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_PREALLOC; 726eda14cbcSMatt Macy 727eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 728eda14cbcSMatt Macy if (flags & TQ_FRONT) 729eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 730eda14cbcSMatt Macy else 731eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 732eda14cbcSMatt Macy 733eda14cbcSMatt Macy t->tqent_id = tq->tq_next_id; 734eda14cbcSMatt Macy tq->tq_next_id++; 735eda14cbcSMatt Macy t->tqent_func = func; 736eda14cbcSMatt Macy t->tqent_arg = arg; 737eda14cbcSMatt Macy t->tqent_taskq = tq; 738eda14cbcSMatt Macy 739eda14cbcSMatt Macy t->tqent_birth = jiffies; 740eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 741eda14cbcSMatt Macy 742eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 743eda14cbcSMatt Macy 744eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 745eda14cbcSMatt Macy out: 746eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 747eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 748eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 749eda14cbcSMatt Macy out2: 750eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 751eda14cbcSMatt Macy } 752eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_ent); 753eda14cbcSMatt Macy 754eda14cbcSMatt Macy int 755eda14cbcSMatt Macy taskq_empty_ent(taskq_ent_t *t) 756eda14cbcSMatt Macy { 757eda14cbcSMatt Macy return (list_empty(&t->tqent_list)); 758eda14cbcSMatt Macy } 759eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_empty_ent); 760eda14cbcSMatt Macy 761eda14cbcSMatt Macy void 762eda14cbcSMatt Macy taskq_init_ent(taskq_ent_t *t) 763eda14cbcSMatt Macy { 764eda14cbcSMatt Macy spin_lock_init(&t->tqent_lock); 765eda14cbcSMatt Macy init_waitqueue_head(&t->tqent_waitq); 766eda14cbcSMatt Macy timer_setup(&t->tqent_timer, NULL, 0); 767eda14cbcSMatt Macy INIT_LIST_HEAD(&t->tqent_list); 768eda14cbcSMatt Macy t->tqent_id = 0; 769eda14cbcSMatt Macy t->tqent_func = NULL; 770eda14cbcSMatt Macy t->tqent_arg = NULL; 771eda14cbcSMatt Macy t->tqent_flags = 0; 772eda14cbcSMatt Macy t->tqent_taskq = NULL; 773eda14cbcSMatt Macy } 774eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_init_ent); 775eda14cbcSMatt Macy 776eda14cbcSMatt Macy /* 777eda14cbcSMatt Macy * Return the next pending task, preference is given to tasks on the 778eda14cbcSMatt Macy * priority list which were dispatched with TQ_FRONT. 779eda14cbcSMatt Macy */ 780eda14cbcSMatt Macy static taskq_ent_t * 781eda14cbcSMatt Macy taskq_next_ent(taskq_t *tq) 782eda14cbcSMatt Macy { 783eda14cbcSMatt Macy struct list_head *list; 784eda14cbcSMatt Macy 785eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) 786eda14cbcSMatt Macy list = &tq->tq_prio_list; 787eda14cbcSMatt Macy else if (!list_empty(&tq->tq_pend_list)) 788eda14cbcSMatt Macy list = &tq->tq_pend_list; 789eda14cbcSMatt Macy else 790eda14cbcSMatt Macy return (NULL); 791eda14cbcSMatt Macy 792eda14cbcSMatt Macy return (list_entry(list->next, taskq_ent_t, tqent_list)); 793eda14cbcSMatt Macy } 794eda14cbcSMatt Macy 795eda14cbcSMatt Macy /* 796eda14cbcSMatt Macy * Spawns a new thread for the specified taskq. 797eda14cbcSMatt Macy */ 798eda14cbcSMatt Macy static void 799eda14cbcSMatt Macy taskq_thread_spawn_task(void *arg) 800eda14cbcSMatt Macy { 801eda14cbcSMatt Macy taskq_t *tq = (taskq_t *)arg; 802eda14cbcSMatt Macy unsigned long flags; 803eda14cbcSMatt Macy 804eda14cbcSMatt Macy if (taskq_thread_create(tq) == NULL) { 805eda14cbcSMatt Macy /* restore spawning count if failed */ 806eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 807eda14cbcSMatt Macy tq->tq_lock_class); 808eda14cbcSMatt Macy tq->tq_nspawn--; 809eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 810eda14cbcSMatt Macy } 811eda14cbcSMatt Macy } 812eda14cbcSMatt Macy 813eda14cbcSMatt Macy /* 814eda14cbcSMatt Macy * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current 815eda14cbcSMatt Macy * number of threads is insufficient to handle the pending tasks. These 816eda14cbcSMatt Macy * new threads must be created by the dedicated dynamic_taskq to avoid 817eda14cbcSMatt Macy * deadlocks between thread creation and memory reclaim. The system_taskq 818eda14cbcSMatt Macy * which is also a dynamic taskq cannot be safely used for this. 819eda14cbcSMatt Macy */ 820eda14cbcSMatt Macy static int 821eda14cbcSMatt Macy taskq_thread_spawn(taskq_t *tq) 822eda14cbcSMatt Macy { 823eda14cbcSMatt Macy int spawning = 0; 824eda14cbcSMatt Macy 825eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 826eda14cbcSMatt Macy return (0); 827eda14cbcSMatt Macy 828eda14cbcSMatt Macy if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && 829eda14cbcSMatt Macy (tq->tq_flags & TASKQ_ACTIVE)) { 830eda14cbcSMatt Macy spawning = (++tq->tq_nspawn); 831eda14cbcSMatt Macy taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, 832eda14cbcSMatt Macy tq, TQ_NOSLEEP); 833eda14cbcSMatt Macy } 834eda14cbcSMatt Macy 835eda14cbcSMatt Macy return (spawning); 836eda14cbcSMatt Macy } 837eda14cbcSMatt Macy 838eda14cbcSMatt Macy /* 839eda14cbcSMatt Macy * Threads in a dynamic taskq should only exit once it has been completely 840eda14cbcSMatt Macy * drained and no other threads are actively servicing tasks. This prevents 841eda14cbcSMatt Macy * threads from being created and destroyed more than is required. 842eda14cbcSMatt Macy * 843eda14cbcSMatt Macy * The first thread is the thread list is treated as the primary thread. 844eda14cbcSMatt Macy * There is nothing special about the primary thread but in order to avoid 845eda14cbcSMatt Macy * all the taskq pids from changing we opt to make it long running. 846eda14cbcSMatt Macy */ 847eda14cbcSMatt Macy static int 848eda14cbcSMatt Macy taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) 849eda14cbcSMatt Macy { 850eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 851eda14cbcSMatt Macy return (0); 852eda14cbcSMatt Macy 853eda14cbcSMatt Macy if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, 854eda14cbcSMatt Macy tqt_thread_list) == tqt) 855eda14cbcSMatt Macy return (0); 856eda14cbcSMatt Macy 8577b5e6873SMartin Matuska int no_work = 858eda14cbcSMatt Macy ((tq->tq_nspawn == 0) && /* No threads are being spawned */ 859eda14cbcSMatt Macy (tq->tq_nactive == 0) && /* No threads are handling tasks */ 860eda14cbcSMatt Macy (tq->tq_nthreads > 1) && /* More than 1 thread is running */ 861eda14cbcSMatt Macy (!taskq_next_ent(tq)) && /* There are no pending tasks */ 862eda14cbcSMatt Macy (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ 8637b5e6873SMartin Matuska 8647b5e6873SMartin Matuska /* 8657b5e6873SMartin Matuska * If we would have said stop before, let's instead wait a bit, maybe 8667b5e6873SMartin Matuska * we'll see more work come our way soon... 8677b5e6873SMartin Matuska */ 8687b5e6873SMartin Matuska if (no_work) { 8697b5e6873SMartin Matuska /* if it's 0, we want the old behavior. */ 8707b5e6873SMartin Matuska /* if the taskq is being torn down, we also want to go away. */ 8717b5e6873SMartin Matuska if (spl_taskq_thread_timeout_ms == 0 || 8727b5e6873SMartin Matuska !(tq->tq_flags & TASKQ_ACTIVE)) 8737b5e6873SMartin Matuska return (1); 8747b5e6873SMartin Matuska unsigned long lasttime = tq->lastshouldstop; 8757b5e6873SMartin Matuska if (lasttime > 0) { 8767b5e6873SMartin Matuska if (time_after(jiffies, lasttime + 8777b5e6873SMartin Matuska msecs_to_jiffies(spl_taskq_thread_timeout_ms))) 8787b5e6873SMartin Matuska return (1); 8797b5e6873SMartin Matuska else 8807b5e6873SMartin Matuska return (0); 8817b5e6873SMartin Matuska } else { 8827b5e6873SMartin Matuska tq->lastshouldstop = jiffies; 8837b5e6873SMartin Matuska } 8847b5e6873SMartin Matuska } else { 8857b5e6873SMartin Matuska tq->lastshouldstop = 0; 8867b5e6873SMartin Matuska } 8877b5e6873SMartin Matuska return (0); 888eda14cbcSMatt Macy } 889eda14cbcSMatt Macy 890eda14cbcSMatt Macy static int 891eda14cbcSMatt Macy taskq_thread(void *args) 892eda14cbcSMatt Macy { 893eda14cbcSMatt Macy DECLARE_WAITQUEUE(wait, current); 894eda14cbcSMatt Macy sigset_t blocked; 895eda14cbcSMatt Macy taskq_thread_t *tqt = args; 896eda14cbcSMatt Macy taskq_t *tq; 897eda14cbcSMatt Macy taskq_ent_t *t; 898eda14cbcSMatt Macy int seq_tasks = 0; 899eda14cbcSMatt Macy unsigned long flags; 900eda14cbcSMatt Macy taskq_ent_t dup_task = {}; 901eda14cbcSMatt Macy 902eda14cbcSMatt Macy ASSERT(tqt); 903eda14cbcSMatt Macy ASSERT(tqt->tqt_tq); 904eda14cbcSMatt Macy tq = tqt->tqt_tq; 905eda14cbcSMatt Macy current->flags |= PF_NOFREEZE; 906eda14cbcSMatt Macy 907eda14cbcSMatt Macy (void) spl_fstrans_mark(); 908eda14cbcSMatt Macy 909eda14cbcSMatt Macy sigfillset(&blocked); 910eda14cbcSMatt Macy sigprocmask(SIG_BLOCK, &blocked, NULL); 911eda14cbcSMatt Macy flush_signals(current); 912eda14cbcSMatt Macy 913eda14cbcSMatt Macy tsd_set(taskq_tsd, tq); 914eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 915eda14cbcSMatt Macy /* 916eda14cbcSMatt Macy * If we are dynamically spawned, decrease spawning count. Note that 917eda14cbcSMatt Macy * we could be created during taskq_create, in which case we shouldn't 918eda14cbcSMatt Macy * do the decrement. But it's fine because taskq_create will reset 919eda14cbcSMatt Macy * tq_nspawn later. 920eda14cbcSMatt Macy */ 921eda14cbcSMatt Macy if (tq->tq_flags & TASKQ_DYNAMIC) 922eda14cbcSMatt Macy tq->tq_nspawn--; 923eda14cbcSMatt Macy 924eda14cbcSMatt Macy /* Immediately exit if more threads than allowed were created. */ 925eda14cbcSMatt Macy if (tq->tq_nthreads >= tq->tq_maxthreads) 926eda14cbcSMatt Macy goto error; 927eda14cbcSMatt Macy 928eda14cbcSMatt Macy tq->tq_nthreads++; 929eda14cbcSMatt Macy list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); 930eda14cbcSMatt Macy wake_up(&tq->tq_wait_waitq); 931eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 932eda14cbcSMatt Macy 933eda14cbcSMatt Macy while (!kthread_should_stop()) { 934eda14cbcSMatt Macy 935eda14cbcSMatt Macy if (list_empty(&tq->tq_pend_list) && 936eda14cbcSMatt Macy list_empty(&tq->tq_prio_list)) { 937eda14cbcSMatt Macy 938eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) { 939eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 940eda14cbcSMatt Macy break; 941eda14cbcSMatt Macy } 942eda14cbcSMatt Macy 943eda14cbcSMatt Macy add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); 944eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 945eda14cbcSMatt Macy 946eda14cbcSMatt Macy schedule(); 947eda14cbcSMatt Macy seq_tasks = 0; 948eda14cbcSMatt Macy 949eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 950eda14cbcSMatt Macy tq->tq_lock_class); 951eda14cbcSMatt Macy remove_wait_queue(&tq->tq_work_waitq, &wait); 952eda14cbcSMatt Macy } else { 953eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 954eda14cbcSMatt Macy } 955eda14cbcSMatt Macy 956eda14cbcSMatt Macy if ((t = taskq_next_ent(tq)) != NULL) { 957eda14cbcSMatt Macy list_del_init(&t->tqent_list); 958eda14cbcSMatt Macy 959eda14cbcSMatt Macy /* 960eda14cbcSMatt Macy * A TQENT_FLAG_PREALLOC task may be reused or freed 961eda14cbcSMatt Macy * during the task function call. Store tqent_id and 962eda14cbcSMatt Macy * tqent_flags here. 963eda14cbcSMatt Macy * 964eda14cbcSMatt Macy * Also use an on stack taskq_ent_t for tqt_task 965eda14cbcSMatt Macy * assignment in this case; we want to make sure 966eda14cbcSMatt Macy * to duplicate all fields, so the values are 967eda14cbcSMatt Macy * correct when it's accessed via DTRACE_PROBE*. 968eda14cbcSMatt Macy */ 969eda14cbcSMatt Macy tqt->tqt_id = t->tqent_id; 970eda14cbcSMatt Macy tqt->tqt_flags = t->tqent_flags; 971eda14cbcSMatt Macy 972eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_PREALLOC) { 973eda14cbcSMatt Macy dup_task = *t; 974eda14cbcSMatt Macy t = &dup_task; 975eda14cbcSMatt Macy } 976eda14cbcSMatt Macy tqt->tqt_task = t; 977eda14cbcSMatt Macy 978eda14cbcSMatt Macy taskq_insert_in_order(tq, tqt); 979eda14cbcSMatt Macy tq->tq_nactive++; 980eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 981eda14cbcSMatt Macy 982eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); 983eda14cbcSMatt Macy 984eda14cbcSMatt Macy /* Perform the requested task */ 985eda14cbcSMatt Macy t->tqent_func(t->tqent_arg); 986eda14cbcSMatt Macy 987eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); 988eda14cbcSMatt Macy 989eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 990eda14cbcSMatt Macy tq->tq_lock_class); 991eda14cbcSMatt Macy tq->tq_nactive--; 992eda14cbcSMatt Macy list_del_init(&tqt->tqt_active_list); 993eda14cbcSMatt Macy tqt->tqt_task = NULL; 994eda14cbcSMatt Macy 995eda14cbcSMatt Macy /* For prealloc'd tasks, we don't free anything. */ 996eda14cbcSMatt Macy if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) 997eda14cbcSMatt Macy task_done(tq, t); 998eda14cbcSMatt Macy 999eda14cbcSMatt Macy /* 1000eda14cbcSMatt Macy * When the current lowest outstanding taskqid is 1001eda14cbcSMatt Macy * done calculate the new lowest outstanding id 1002eda14cbcSMatt Macy */ 1003eda14cbcSMatt Macy if (tq->tq_lowest_id == tqt->tqt_id) { 1004eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 1005eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); 1006eda14cbcSMatt Macy } 1007eda14cbcSMatt Macy 1008eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 1009eda14cbcSMatt Macy if ((++seq_tasks) > spl_taskq_thread_sequential && 1010eda14cbcSMatt Macy taskq_thread_spawn(tq)) 1011eda14cbcSMatt Macy seq_tasks = 0; 1012eda14cbcSMatt Macy 1013eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1014eda14cbcSMatt Macy tqt->tqt_flags = 0; 1015eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 1016eda14cbcSMatt Macy } else { 1017eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) 1018eda14cbcSMatt Macy break; 1019eda14cbcSMatt Macy } 1020eda14cbcSMatt Macy 1021eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 1022eda14cbcSMatt Macy 1023eda14cbcSMatt Macy } 1024eda14cbcSMatt Macy 1025eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 1026eda14cbcSMatt Macy tq->tq_nthreads--; 1027eda14cbcSMatt Macy list_del_init(&tqt->tqt_thread_list); 1028eda14cbcSMatt Macy error: 1029eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1030eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1031eda14cbcSMatt Macy 1032eda14cbcSMatt Macy tsd_set(taskq_tsd, NULL); 1033184c1b94SMartin Matuska thread_exit(); 1034eda14cbcSMatt Macy 1035eda14cbcSMatt Macy return (0); 1036eda14cbcSMatt Macy } 1037eda14cbcSMatt Macy 1038eda14cbcSMatt Macy static taskq_thread_t * 1039eda14cbcSMatt Macy taskq_thread_create(taskq_t *tq) 1040eda14cbcSMatt Macy { 1041eda14cbcSMatt Macy static int last_used_cpu = 0; 1042eda14cbcSMatt Macy taskq_thread_t *tqt; 1043eda14cbcSMatt Macy 1044eda14cbcSMatt Macy tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); 1045eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_thread_list); 1046eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_active_list); 1047eda14cbcSMatt Macy tqt->tqt_tq = tq; 1048eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1049eda14cbcSMatt Macy 1050eda14cbcSMatt Macy tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, 1051eda14cbcSMatt Macy "%s", tq->tq_name); 1052eda14cbcSMatt Macy if (tqt->tqt_thread == NULL) { 1053eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1054eda14cbcSMatt Macy return (NULL); 1055eda14cbcSMatt Macy } 1056eda14cbcSMatt Macy 1057eda14cbcSMatt Macy if (spl_taskq_thread_bind) { 1058eda14cbcSMatt Macy last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); 1059eda14cbcSMatt Macy kthread_bind(tqt->tqt_thread, last_used_cpu); 1060eda14cbcSMatt Macy } 1061eda14cbcSMatt Macy 1062eda14cbcSMatt Macy if (spl_taskq_thread_priority) 1063eda14cbcSMatt Macy set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy wake_up_process(tqt->tqt_thread); 1066eda14cbcSMatt Macy 1067eda14cbcSMatt Macy return (tqt); 1068eda14cbcSMatt Macy } 1069eda14cbcSMatt Macy 1070eda14cbcSMatt Macy taskq_t * 10717877fdebSMatt Macy taskq_create(const char *name, int threads_arg, pri_t pri, 1072eda14cbcSMatt Macy int minalloc, int maxalloc, uint_t flags) 1073eda14cbcSMatt Macy { 1074eda14cbcSMatt Macy taskq_t *tq; 1075eda14cbcSMatt Macy taskq_thread_t *tqt; 1076eda14cbcSMatt Macy int count = 0, rc = 0, i; 1077eda14cbcSMatt Macy unsigned long irqflags; 10787877fdebSMatt Macy int nthreads = threads_arg; 1079eda14cbcSMatt Macy 1080eda14cbcSMatt Macy ASSERT(name != NULL); 1081eda14cbcSMatt Macy ASSERT(minalloc >= 0); 1082eda14cbcSMatt Macy ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ 1083eda14cbcSMatt Macy 1084eda14cbcSMatt Macy /* Scale the number of threads using nthreads as a percentage */ 1085eda14cbcSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 1086eda14cbcSMatt Macy ASSERT(nthreads <= 100); 1087eda14cbcSMatt Macy ASSERT(nthreads >= 0); 10887877fdebSMatt Macy nthreads = MIN(threads_arg, 100); 1089eda14cbcSMatt Macy nthreads = MAX(nthreads, 0); 1090eda14cbcSMatt Macy nthreads = MAX((num_online_cpus() * nthreads) /100, 1); 1091eda14cbcSMatt Macy } 1092eda14cbcSMatt Macy 1093eda14cbcSMatt Macy tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); 1094eda14cbcSMatt Macy if (tq == NULL) 1095eda14cbcSMatt Macy return (NULL); 1096eda14cbcSMatt Macy 10977877fdebSMatt Macy tq->tq_hp_support = B_FALSE; 10987877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 10997877fdebSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 11007877fdebSMatt Macy tq->tq_hp_support = B_TRUE; 11017877fdebSMatt Macy if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, 11027877fdebSMatt Macy &tq->tq_hp_cb_node) != 0) { 11037877fdebSMatt Macy kmem_free(tq, sizeof (*tq)); 11047877fdebSMatt Macy return (NULL); 11057877fdebSMatt Macy } 11067877fdebSMatt Macy } 11077877fdebSMatt Macy #endif 11087877fdebSMatt Macy 1109eda14cbcSMatt Macy spin_lock_init(&tq->tq_lock); 1110eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_thread_list); 1111eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_active_list); 1112eda14cbcSMatt Macy tq->tq_name = kmem_strdup(name); 1113eda14cbcSMatt Macy tq->tq_nactive = 0; 1114eda14cbcSMatt Macy tq->tq_nthreads = 0; 1115eda14cbcSMatt Macy tq->tq_nspawn = 0; 1116eda14cbcSMatt Macy tq->tq_maxthreads = nthreads; 11177877fdebSMatt Macy tq->tq_cpu_pct = threads_arg; 1118eda14cbcSMatt Macy tq->tq_pri = pri; 1119eda14cbcSMatt Macy tq->tq_minalloc = minalloc; 1120eda14cbcSMatt Macy tq->tq_maxalloc = maxalloc; 1121eda14cbcSMatt Macy tq->tq_nalloc = 0; 1122eda14cbcSMatt Macy tq->tq_flags = (flags | TASKQ_ACTIVE); 1123eda14cbcSMatt Macy tq->tq_next_id = TASKQID_INITIAL; 1124eda14cbcSMatt Macy tq->tq_lowest_id = TASKQID_INITIAL; 11257b5e6873SMartin Matuska tq->lastshouldstop = 0; 1126eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_free_list); 1127eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_pend_list); 1128eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_prio_list); 1129eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_delay_list); 1130eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_work_waitq); 1131eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_wait_waitq); 1132eda14cbcSMatt Macy tq->tq_lock_class = TQ_LOCK_GENERAL; 1133eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_taskqs); 1134eda14cbcSMatt Macy 1135eda14cbcSMatt Macy if (flags & TASKQ_PREPOPULATE) { 1136eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 1137eda14cbcSMatt Macy tq->tq_lock_class); 1138eda14cbcSMatt Macy 1139eda14cbcSMatt Macy for (i = 0; i < minalloc; i++) 1140eda14cbcSMatt Macy task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, 1141eda14cbcSMatt Macy &irqflags)); 1142eda14cbcSMatt Macy 1143eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 1144eda14cbcSMatt Macy } 1145eda14cbcSMatt Macy 1146eda14cbcSMatt Macy if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) 1147eda14cbcSMatt Macy nthreads = 1; 1148eda14cbcSMatt Macy 1149eda14cbcSMatt Macy for (i = 0; i < nthreads; i++) { 1150eda14cbcSMatt Macy tqt = taskq_thread_create(tq); 1151eda14cbcSMatt Macy if (tqt == NULL) 1152eda14cbcSMatt Macy rc = 1; 1153eda14cbcSMatt Macy else 1154eda14cbcSMatt Macy count++; 1155eda14cbcSMatt Macy } 1156eda14cbcSMatt Macy 1157eda14cbcSMatt Macy /* Wait for all threads to be started before potential destroy */ 1158eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); 1159eda14cbcSMatt Macy /* 1160eda14cbcSMatt Macy * taskq_thread might have touched nspawn, but we don't want them to 1161eda14cbcSMatt Macy * because they're not dynamically spawned. So we reset it to 0 1162eda14cbcSMatt Macy */ 1163eda14cbcSMatt Macy tq->tq_nspawn = 0; 1164eda14cbcSMatt Macy 1165eda14cbcSMatt Macy if (rc) { 1166eda14cbcSMatt Macy taskq_destroy(tq); 1167eda14cbcSMatt Macy tq = NULL; 1168eda14cbcSMatt Macy } else { 1169eda14cbcSMatt Macy down_write(&tq_list_sem); 1170eda14cbcSMatt Macy tq->tq_instance = taskq_find_by_name(name) + 1; 1171eda14cbcSMatt Macy list_add_tail(&tq->tq_taskqs, &tq_list); 1172eda14cbcSMatt Macy up_write(&tq_list_sem); 1173eda14cbcSMatt Macy } 1174eda14cbcSMatt Macy 1175eda14cbcSMatt Macy return (tq); 1176eda14cbcSMatt Macy } 1177eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_create); 1178eda14cbcSMatt Macy 1179eda14cbcSMatt Macy void 1180eda14cbcSMatt Macy taskq_destroy(taskq_t *tq) 1181eda14cbcSMatt Macy { 1182eda14cbcSMatt Macy struct task_struct *thread; 1183eda14cbcSMatt Macy taskq_thread_t *tqt; 1184eda14cbcSMatt Macy taskq_ent_t *t; 1185eda14cbcSMatt Macy unsigned long flags; 1186eda14cbcSMatt Macy 1187eda14cbcSMatt Macy ASSERT(tq); 1188eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1189eda14cbcSMatt Macy tq->tq_flags &= ~TASKQ_ACTIVE; 1190eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1191eda14cbcSMatt Macy 11927877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 11937877fdebSMatt Macy if (tq->tq_hp_support) { 11947877fdebSMatt Macy VERIFY0(cpuhp_state_remove_instance_nocalls( 11957877fdebSMatt Macy spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); 11967877fdebSMatt Macy } 11977877fdebSMatt Macy #endif 1198eda14cbcSMatt Macy /* 1199eda14cbcSMatt Macy * When TASKQ_ACTIVE is clear new tasks may not be added nor may 1200eda14cbcSMatt Macy * new worker threads be spawned for dynamic taskq. 1201eda14cbcSMatt Macy */ 1202eda14cbcSMatt Macy if (dynamic_taskq != NULL) 1203eda14cbcSMatt Macy taskq_wait_outstanding(dynamic_taskq, 0); 1204eda14cbcSMatt Macy 1205eda14cbcSMatt Macy taskq_wait(tq); 1206eda14cbcSMatt Macy 1207eda14cbcSMatt Macy /* remove taskq from global list used by the kstats */ 1208eda14cbcSMatt Macy down_write(&tq_list_sem); 1209eda14cbcSMatt Macy list_del(&tq->tq_taskqs); 1210eda14cbcSMatt Macy up_write(&tq_list_sem); 1211eda14cbcSMatt Macy 1212eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1213eda14cbcSMatt Macy /* wait for spawning threads to insert themselves to the list */ 1214eda14cbcSMatt Macy while (tq->tq_nspawn) { 1215eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1216eda14cbcSMatt Macy schedule_timeout_interruptible(1); 1217eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1218eda14cbcSMatt Macy tq->tq_lock_class); 1219eda14cbcSMatt Macy } 1220eda14cbcSMatt Macy 1221eda14cbcSMatt Macy /* 1222eda14cbcSMatt Macy * Signal each thread to exit and block until it does. Each thread 1223eda14cbcSMatt Macy * is responsible for removing itself from the list and freeing its 1224eda14cbcSMatt Macy * taskq_thread_t. This allows for idle threads to opt to remove 1225eda14cbcSMatt Macy * themselves from the taskq. They can be recreated as needed. 1226eda14cbcSMatt Macy */ 1227eda14cbcSMatt Macy while (!list_empty(&tq->tq_thread_list)) { 1228eda14cbcSMatt Macy tqt = list_entry(tq->tq_thread_list.next, 1229eda14cbcSMatt Macy taskq_thread_t, tqt_thread_list); 1230eda14cbcSMatt Macy thread = tqt->tqt_thread; 1231eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1232eda14cbcSMatt Macy 1233eda14cbcSMatt Macy kthread_stop(thread); 1234eda14cbcSMatt Macy 1235eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1236eda14cbcSMatt Macy tq->tq_lock_class); 1237eda14cbcSMatt Macy } 1238eda14cbcSMatt Macy 1239eda14cbcSMatt Macy while (!list_empty(&tq->tq_free_list)) { 1240eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 1241eda14cbcSMatt Macy 1242eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 1243eda14cbcSMatt Macy 1244eda14cbcSMatt Macy list_del_init(&t->tqent_list); 1245eda14cbcSMatt Macy task_free(tq, t); 1246eda14cbcSMatt Macy } 1247eda14cbcSMatt Macy 1248eda14cbcSMatt Macy ASSERT0(tq->tq_nthreads); 1249eda14cbcSMatt Macy ASSERT0(tq->tq_nalloc); 1250eda14cbcSMatt Macy ASSERT0(tq->tq_nspawn); 1251eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_thread_list)); 1252eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_active_list)); 1253eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_free_list)); 1254eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_pend_list)); 1255eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_prio_list)); 1256eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_delay_list)); 1257eda14cbcSMatt Macy 1258eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1259eda14cbcSMatt Macy 1260eda14cbcSMatt Macy kmem_strfree(tq->tq_name); 1261eda14cbcSMatt Macy kmem_free(tq, sizeof (taskq_t)); 1262eda14cbcSMatt Macy } 1263eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_destroy); 1264eda14cbcSMatt Macy 1265*14c2e0a0SMartin Matuska /* 1266*14c2e0a0SMartin Matuska * Create a taskq with a specified number of pool threads. Allocate 1267*14c2e0a0SMartin Matuska * and return an array of nthreads kthread_t pointers, one for each 1268*14c2e0a0SMartin Matuska * thread in the pool. The array is not ordered and must be freed 1269*14c2e0a0SMartin Matuska * by the caller. 1270*14c2e0a0SMartin Matuska */ 1271*14c2e0a0SMartin Matuska taskq_t * 1272*14c2e0a0SMartin Matuska taskq_create_synced(const char *name, int nthreads, pri_t pri, 1273*14c2e0a0SMartin Matuska int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp) 1274*14c2e0a0SMartin Matuska { 1275*14c2e0a0SMartin Matuska taskq_t *tq; 1276*14c2e0a0SMartin Matuska taskq_thread_t *tqt; 1277*14c2e0a0SMartin Matuska int i = 0; 1278*14c2e0a0SMartin Matuska kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads, 1279*14c2e0a0SMartin Matuska KM_SLEEP); 1280*14c2e0a0SMartin Matuska 1281*14c2e0a0SMartin Matuska flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH); 1282*14c2e0a0SMartin Matuska 1283*14c2e0a0SMartin Matuska /* taskq_create spawns all the threads before returning */ 1284*14c2e0a0SMartin Matuska tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX, 1285*14c2e0a0SMartin Matuska flags | TASKQ_PREPOPULATE); 1286*14c2e0a0SMartin Matuska VERIFY(tq != NULL); 1287*14c2e0a0SMartin Matuska VERIFY(tq->tq_nthreads == nthreads); 1288*14c2e0a0SMartin Matuska 1289*14c2e0a0SMartin Matuska list_for_each_entry(tqt, &tq->tq_thread_list, tqt_thread_list) { 1290*14c2e0a0SMartin Matuska kthreads[i] = tqt->tqt_thread; 1291*14c2e0a0SMartin Matuska i++; 1292*14c2e0a0SMartin Matuska } 1293*14c2e0a0SMartin Matuska 1294*14c2e0a0SMartin Matuska ASSERT3S(i, ==, nthreads); 1295*14c2e0a0SMartin Matuska *ktpp = kthreads; 1296*14c2e0a0SMartin Matuska 1297*14c2e0a0SMartin Matuska return (tq); 1298*14c2e0a0SMartin Matuska } 1299*14c2e0a0SMartin Matuska EXPORT_SYMBOL(taskq_create_synced); 1300*14c2e0a0SMartin Matuska 1301eda14cbcSMatt Macy static unsigned int spl_taskq_kick = 0; 1302eda14cbcSMatt Macy 1303eda14cbcSMatt Macy /* 1304eda14cbcSMatt Macy * 2.6.36 API Change 1305eda14cbcSMatt Macy * module_param_cb is introduced to take kernel_param_ops and 1306eda14cbcSMatt Macy * module_param_call is marked as obsolete. Also set and get operations 1307eda14cbcSMatt Macy * were changed to take a 'const struct kernel_param *'. 1308eda14cbcSMatt Macy */ 1309eda14cbcSMatt Macy static int 1310eda14cbcSMatt Macy #ifdef module_param_cb 1311eda14cbcSMatt Macy param_set_taskq_kick(const char *val, const struct kernel_param *kp) 1312eda14cbcSMatt Macy #else 1313eda14cbcSMatt Macy param_set_taskq_kick(const char *val, struct kernel_param *kp) 1314eda14cbcSMatt Macy #endif 1315eda14cbcSMatt Macy { 1316eda14cbcSMatt Macy int ret; 1317eda14cbcSMatt Macy taskq_t *tq = NULL; 1318eda14cbcSMatt Macy taskq_ent_t *t; 1319eda14cbcSMatt Macy unsigned long flags; 1320eda14cbcSMatt Macy 1321eda14cbcSMatt Macy ret = param_set_uint(val, kp); 1322eda14cbcSMatt Macy if (ret < 0 || !spl_taskq_kick) 1323eda14cbcSMatt Macy return (ret); 1324eda14cbcSMatt Macy /* reset value */ 1325eda14cbcSMatt Macy spl_taskq_kick = 0; 1326eda14cbcSMatt Macy 1327eda14cbcSMatt Macy down_read(&tq_list_sem); 1328eda14cbcSMatt Macy list_for_each_entry(tq, &tq_list, tq_taskqs) { 1329eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1330eda14cbcSMatt Macy tq->tq_lock_class); 1331eda14cbcSMatt Macy /* Check if the first pending is older than 5 seconds */ 1332eda14cbcSMatt Macy t = taskq_next_ent(tq); 1333eda14cbcSMatt Macy if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { 1334eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 1335eda14cbcSMatt Macy printk(KERN_INFO "spl: Kicked taskq %s/%d\n", 1336eda14cbcSMatt Macy tq->tq_name, tq->tq_instance); 1337eda14cbcSMatt Macy } 1338eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1339eda14cbcSMatt Macy } 1340eda14cbcSMatt Macy up_read(&tq_list_sem); 1341eda14cbcSMatt Macy return (ret); 1342eda14cbcSMatt Macy } 1343eda14cbcSMatt Macy 1344eda14cbcSMatt Macy #ifdef module_param_cb 1345eda14cbcSMatt Macy static const struct kernel_param_ops param_ops_taskq_kick = { 1346eda14cbcSMatt Macy .set = param_set_taskq_kick, 1347eda14cbcSMatt Macy .get = param_get_uint, 1348eda14cbcSMatt Macy }; 1349eda14cbcSMatt Macy module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); 1350eda14cbcSMatt Macy #else 1351eda14cbcSMatt Macy module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, 1352eda14cbcSMatt Macy &spl_taskq_kick, 0644); 1353eda14cbcSMatt Macy #endif 1354eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_kick, 1355eda14cbcSMatt Macy "Write nonzero to kick stuck taskqs to spawn more threads"); 1356eda14cbcSMatt Macy 13577877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13587877fdebSMatt Macy /* 13597877fdebSMatt Macy * This callback will be called exactly once for each core that comes online, 13607877fdebSMatt Macy * for each dynamic taskq. We attempt to expand taskqs that have 13617877fdebSMatt Macy * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every 13627877fdebSMatt Macy * time, to correctly determine whether or not to add a thread. 13637877fdebSMatt Macy */ 13647877fdebSMatt Macy static int 13657877fdebSMatt Macy spl_taskq_expand(unsigned int cpu, struct hlist_node *node) 13667877fdebSMatt Macy { 13677877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 13687877fdebSMatt Macy unsigned long flags; 13697877fdebSMatt Macy int err = 0; 13707877fdebSMatt Macy 13717877fdebSMatt Macy ASSERT(tq); 13727877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 13737877fdebSMatt Macy 137481b22a98SMartin Matuska if (!(tq->tq_flags & TASKQ_ACTIVE)) { 137581b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 137681b22a98SMartin Matuska return (err); 137781b22a98SMartin Matuska } 13787877fdebSMatt Macy 13797877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 13807877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 13817877fdebSMatt Macy nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); 13827877fdebSMatt Macy tq->tq_maxthreads = nthreads; 13837877fdebSMatt Macy 13847877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 13857877fdebSMatt Macy tq->tq_maxthreads > tq->tq_nthreads) { 138681b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 13877877fdebSMatt Macy taskq_thread_t *tqt = taskq_thread_create(tq); 13887877fdebSMatt Macy if (tqt == NULL) 13897877fdebSMatt Macy err = -1; 139081b22a98SMartin Matuska return (err); 13917877fdebSMatt Macy } 13927877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13937877fdebSMatt Macy return (err); 13947877fdebSMatt Macy } 13957877fdebSMatt Macy 13967877fdebSMatt Macy /* 13977877fdebSMatt Macy * While we don't support offlining CPUs, it is possible that CPUs will fail 13987877fdebSMatt Macy * to online successfully. We do need to be able to handle this case 13997877fdebSMatt Macy * gracefully. 14007877fdebSMatt Macy */ 14017877fdebSMatt Macy static int 14027877fdebSMatt Macy spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) 14037877fdebSMatt Macy { 14047877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 14057877fdebSMatt Macy unsigned long flags; 14067877fdebSMatt Macy 14077877fdebSMatt Macy ASSERT(tq); 14087877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 14097877fdebSMatt Macy 14107877fdebSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 14117877fdebSMatt Macy goto out; 14127877fdebSMatt Macy 14137877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 14147877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 14157877fdebSMatt Macy nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); 14167877fdebSMatt Macy tq->tq_maxthreads = nthreads; 14177877fdebSMatt Macy 14187877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 14197877fdebSMatt Macy tq->tq_maxthreads < tq->tq_nthreads) { 14207877fdebSMatt Macy ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); 14217877fdebSMatt Macy taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, 14227877fdebSMatt Macy taskq_thread_t, tqt_thread_list); 14237877fdebSMatt Macy struct task_struct *thread = tqt->tqt_thread; 14247877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 14257877fdebSMatt Macy 14267877fdebSMatt Macy kthread_stop(thread); 14277877fdebSMatt Macy 14287877fdebSMatt Macy return (0); 14297877fdebSMatt Macy } 14307877fdebSMatt Macy 14317877fdebSMatt Macy out: 14327877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 14337877fdebSMatt Macy return (0); 14347877fdebSMatt Macy } 14357877fdebSMatt Macy #endif 14367877fdebSMatt Macy 1437eda14cbcSMatt Macy int 1438eda14cbcSMatt Macy spl_taskq_init(void) 1439eda14cbcSMatt Macy { 1440eda14cbcSMatt Macy init_rwsem(&tq_list_sem); 1441eda14cbcSMatt Macy tsd_create(&taskq_tsd, NULL); 1442eda14cbcSMatt Macy 14437877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14447877fdebSMatt Macy spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 14457877fdebSMatt Macy "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); 14467877fdebSMatt Macy #endif 14477877fdebSMatt Macy 1448eda14cbcSMatt Macy system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), 1449eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1450eda14cbcSMatt Macy if (system_taskq == NULL) 1451c7046f76SMartin Matuska return (-ENOMEM); 1452eda14cbcSMatt Macy 1453eda14cbcSMatt Macy system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), 1454eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1455eda14cbcSMatt Macy if (system_delay_taskq == NULL) { 14567877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14577877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 14587877fdebSMatt Macy #endif 1459eda14cbcSMatt Macy taskq_destroy(system_taskq); 1460c7046f76SMartin Matuska return (-ENOMEM); 1461eda14cbcSMatt Macy } 1462eda14cbcSMatt Macy 1463eda14cbcSMatt Macy dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, 1464eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); 1465eda14cbcSMatt Macy if (dynamic_taskq == NULL) { 14667877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14677877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 14687877fdebSMatt Macy #endif 1469eda14cbcSMatt Macy taskq_destroy(system_taskq); 1470eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1471c7046f76SMartin Matuska return (-ENOMEM); 1472eda14cbcSMatt Macy } 1473eda14cbcSMatt Macy 1474eda14cbcSMatt Macy /* 1475eda14cbcSMatt Macy * This is used to annotate tq_lock, so 1476eda14cbcSMatt Macy * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch 1477eda14cbcSMatt Macy * does not trigger a lockdep warning re: possible recursive locking 1478eda14cbcSMatt Macy */ 1479eda14cbcSMatt Macy dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; 1480eda14cbcSMatt Macy 1481eda14cbcSMatt Macy return (0); 1482eda14cbcSMatt Macy } 1483eda14cbcSMatt Macy 1484eda14cbcSMatt Macy void 1485eda14cbcSMatt Macy spl_taskq_fini(void) 1486eda14cbcSMatt Macy { 1487eda14cbcSMatt Macy taskq_destroy(dynamic_taskq); 1488eda14cbcSMatt Macy dynamic_taskq = NULL; 1489eda14cbcSMatt Macy 1490eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1491eda14cbcSMatt Macy system_delay_taskq = NULL; 1492eda14cbcSMatt Macy 1493eda14cbcSMatt Macy taskq_destroy(system_taskq); 1494eda14cbcSMatt Macy system_taskq = NULL; 1495eda14cbcSMatt Macy 1496eda14cbcSMatt Macy tsd_destroy(&taskq_tsd); 14977877fdebSMatt Macy 14987877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14997877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 15007877fdebSMatt Macy spl_taskq_cpuhp_state = 0; 15017877fdebSMatt Macy #endif 1502eda14cbcSMatt Macy } 1503