1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3eda14cbcSMatt Macy * Copyright (C) 2007 The Regents of the University of California. 4eda14cbcSMatt Macy * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5eda14cbcSMatt Macy * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6eda14cbcSMatt Macy * UCRL-CODE-235197 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * This file is part of the SPL, Solaris Porting Layer. 9eda14cbcSMatt Macy * 10eda14cbcSMatt Macy * The SPL is free software; you can redistribute it and/or modify it 11eda14cbcSMatt Macy * under the terms of the GNU General Public License as published by the 12eda14cbcSMatt Macy * Free Software Foundation; either version 2 of the License, or (at your 13eda14cbcSMatt Macy * option) any later version. 14eda14cbcSMatt Macy * 15eda14cbcSMatt Macy * The SPL is distributed in the hope that it will be useful, but WITHOUT 16eda14cbcSMatt Macy * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17eda14cbcSMatt Macy * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18eda14cbcSMatt Macy * for more details. 19eda14cbcSMatt Macy * 20eda14cbcSMatt Macy * You should have received a copy of the GNU General Public License along 21eda14cbcSMatt Macy * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22eda14cbcSMatt Macy * 23eda14cbcSMatt Macy * Solaris Porting Layer (SPL) Task Queue Implementation. 24eda14cbcSMatt Macy */ 25eda14cbcSMatt Macy 26eda14cbcSMatt Macy #include <sys/timer.h> 27eda14cbcSMatt Macy #include <sys/taskq.h> 28eda14cbcSMatt Macy #include <sys/kmem.h> 29eda14cbcSMatt Macy #include <sys/tsd.h> 30eda14cbcSMatt Macy #include <sys/trace_spl.h> 317877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 327877fdebSMatt Macy #include <linux/cpuhotplug.h> 337877fdebSMatt Macy #endif 34eda14cbcSMatt Macy 35e92ffd9bSMartin Matuska static int spl_taskq_thread_bind = 0; 36eda14cbcSMatt Macy module_param(spl_taskq_thread_bind, int, 0644); 37eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); 38eda14cbcSMatt Macy 39eda14cbcSMatt Macy 40e92ffd9bSMartin Matuska static int spl_taskq_thread_dynamic = 1; 417877fdebSMatt Macy module_param(spl_taskq_thread_dynamic, int, 0444); 42eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); 43eda14cbcSMatt Macy 44e92ffd9bSMartin Matuska static int spl_taskq_thread_priority = 1; 45eda14cbcSMatt Macy module_param(spl_taskq_thread_priority, int, 0644); 46eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_priority, 47eda14cbcSMatt Macy "Allow non-default priority for taskq threads"); 48eda14cbcSMatt Macy 49*be181ee2SMartin Matuska static uint_t spl_taskq_thread_sequential = 4; 50*be181ee2SMartin Matuska /* BEGIN CSTYLED */ 51*be181ee2SMartin Matuska module_param(spl_taskq_thread_sequential, uint, 0644); 52*be181ee2SMartin Matuska /* END CSTYLED */ 53eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_sequential, 54eda14cbcSMatt Macy "Create new taskq threads after N sequential tasks"); 55eda14cbcSMatt Macy 56681ce946SMartin Matuska /* 57681ce946SMartin Matuska * Global system-wide dynamic task queue available for all consumers. This 58681ce946SMartin Matuska * taskq is not intended for long-running tasks; instead, a dedicated taskq 59681ce946SMartin Matuska * should be created. 60681ce946SMartin Matuska */ 61eda14cbcSMatt Macy taskq_t *system_taskq; 62eda14cbcSMatt Macy EXPORT_SYMBOL(system_taskq); 63eda14cbcSMatt Macy /* Global dynamic task queue for long delay */ 64eda14cbcSMatt Macy taskq_t *system_delay_taskq; 65eda14cbcSMatt Macy EXPORT_SYMBOL(system_delay_taskq); 66eda14cbcSMatt Macy 67eda14cbcSMatt Macy /* Private dedicated taskq for creating new taskq threads on demand. */ 68eda14cbcSMatt Macy static taskq_t *dynamic_taskq; 69eda14cbcSMatt Macy static taskq_thread_t *taskq_thread_create(taskq_t *); 70eda14cbcSMatt Macy 717877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 727877fdebSMatt Macy /* Multi-callback id for cpu hotplugging. */ 737877fdebSMatt Macy static int spl_taskq_cpuhp_state; 747877fdebSMatt Macy #endif 757877fdebSMatt Macy 76eda14cbcSMatt Macy /* List of all taskqs */ 77eda14cbcSMatt Macy LIST_HEAD(tq_list); 78eda14cbcSMatt Macy struct rw_semaphore tq_list_sem; 79eda14cbcSMatt Macy static uint_t taskq_tsd; 80eda14cbcSMatt Macy 81eda14cbcSMatt Macy static int 82eda14cbcSMatt Macy task_km_flags(uint_t flags) 83eda14cbcSMatt Macy { 84eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 85eda14cbcSMatt Macy return (KM_NOSLEEP); 86eda14cbcSMatt Macy 87eda14cbcSMatt Macy if (flags & TQ_PUSHPAGE) 88eda14cbcSMatt Macy return (KM_PUSHPAGE); 89eda14cbcSMatt Macy 90eda14cbcSMatt Macy return (KM_SLEEP); 91eda14cbcSMatt Macy } 92eda14cbcSMatt Macy 93eda14cbcSMatt Macy /* 94eda14cbcSMatt Macy * taskq_find_by_name - Find the largest instance number of a named taskq. 95eda14cbcSMatt Macy */ 96eda14cbcSMatt Macy static int 97eda14cbcSMatt Macy taskq_find_by_name(const char *name) 98eda14cbcSMatt Macy { 99eda14cbcSMatt Macy struct list_head *tql = NULL; 100eda14cbcSMatt Macy taskq_t *tq; 101eda14cbcSMatt Macy 102eda14cbcSMatt Macy list_for_each_prev(tql, &tq_list) { 103eda14cbcSMatt Macy tq = list_entry(tql, taskq_t, tq_taskqs); 104eda14cbcSMatt Macy if (strcmp(name, tq->tq_name) == 0) 105eda14cbcSMatt Macy return (tq->tq_instance); 106eda14cbcSMatt Macy } 107eda14cbcSMatt Macy return (-1); 108eda14cbcSMatt Macy } 109eda14cbcSMatt Macy 110eda14cbcSMatt Macy /* 111eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, returns a list_t which 112eda14cbcSMatt Macy * is not attached to the free, work, or pending taskq lists. 113eda14cbcSMatt Macy */ 114eda14cbcSMatt Macy static taskq_ent_t * 115eda14cbcSMatt Macy task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) 116eda14cbcSMatt Macy { 117eda14cbcSMatt Macy taskq_ent_t *t; 118eda14cbcSMatt Macy int count = 0; 119eda14cbcSMatt Macy 120eda14cbcSMatt Macy ASSERT(tq); 121eda14cbcSMatt Macy retry: 122eda14cbcSMatt Macy /* Acquire taskq_ent_t's from free list if available */ 123eda14cbcSMatt Macy if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { 124eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 125eda14cbcSMatt Macy 126eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 127eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); 128eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 129eda14cbcSMatt Macy 130eda14cbcSMatt Macy list_del_init(&t->tqent_list); 131eda14cbcSMatt Macy return (t); 132eda14cbcSMatt Macy } 133eda14cbcSMatt Macy 134eda14cbcSMatt Macy /* Free list is empty and memory allocations are prohibited */ 135eda14cbcSMatt Macy if (flags & TQ_NOALLOC) 136eda14cbcSMatt Macy return (NULL); 137eda14cbcSMatt Macy 138eda14cbcSMatt Macy /* Hit maximum taskq_ent_t pool size */ 139eda14cbcSMatt Macy if (tq->tq_nalloc >= tq->tq_maxalloc) { 140eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 141eda14cbcSMatt Macy return (NULL); 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy /* 144eda14cbcSMatt Macy * Sleep periodically polling the free list for an available 145eda14cbcSMatt Macy * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed 146eda14cbcSMatt Macy * but we cannot block forever waiting for an taskq_ent_t to 147eda14cbcSMatt Macy * show up in the free list, otherwise a deadlock can happen. 148eda14cbcSMatt Macy * 149eda14cbcSMatt Macy * Therefore, we need to allocate a new task even if the number 150eda14cbcSMatt Macy * of allocated tasks is above tq->tq_maxalloc, but we still 151eda14cbcSMatt Macy * end up delaying the task allocation by one second, thereby 152eda14cbcSMatt Macy * throttling the task dispatch rate. 153eda14cbcSMatt Macy */ 154eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 155eda14cbcSMatt Macy schedule_timeout(HZ / 100); 156eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, 157eda14cbcSMatt Macy tq->tq_lock_class); 158eda14cbcSMatt Macy if (count < 100) { 159eda14cbcSMatt Macy count++; 160eda14cbcSMatt Macy goto retry; 161eda14cbcSMatt Macy } 162eda14cbcSMatt Macy } 163eda14cbcSMatt Macy 164eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 165eda14cbcSMatt Macy t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); 166eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); 167eda14cbcSMatt Macy 168eda14cbcSMatt Macy if (t) { 169eda14cbcSMatt Macy taskq_init_ent(t); 170eda14cbcSMatt Macy tq->tq_nalloc++; 171eda14cbcSMatt Macy } 172eda14cbcSMatt Macy 173eda14cbcSMatt Macy return (t); 174eda14cbcSMatt Macy } 175eda14cbcSMatt Macy 176eda14cbcSMatt Macy /* 177eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t 178eda14cbcSMatt Macy * to already be removed from the free, work, or pending taskq lists. 179eda14cbcSMatt Macy */ 180eda14cbcSMatt Macy static void 181eda14cbcSMatt Macy task_free(taskq_t *tq, taskq_ent_t *t) 182eda14cbcSMatt Macy { 183eda14cbcSMatt Macy ASSERT(tq); 184eda14cbcSMatt Macy ASSERT(t); 185eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 186eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 187eda14cbcSMatt Macy 188eda14cbcSMatt Macy kmem_free(t, sizeof (taskq_ent_t)); 189eda14cbcSMatt Macy tq->tq_nalloc--; 190eda14cbcSMatt Macy } 191eda14cbcSMatt Macy 192eda14cbcSMatt Macy /* 193eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, either destroys the 194eda14cbcSMatt Macy * taskq_ent_t if too many exist or moves it to the free list for later use. 195eda14cbcSMatt Macy */ 196eda14cbcSMatt Macy static void 197eda14cbcSMatt Macy task_done(taskq_t *tq, taskq_ent_t *t) 198eda14cbcSMatt Macy { 199eda14cbcSMatt Macy ASSERT(tq); 200eda14cbcSMatt Macy ASSERT(t); 201eda14cbcSMatt Macy 202eda14cbcSMatt Macy /* Wake tasks blocked in taskq_wait_id() */ 203eda14cbcSMatt Macy wake_up_all(&t->tqent_waitq); 204eda14cbcSMatt Macy 205eda14cbcSMatt Macy list_del_init(&t->tqent_list); 206eda14cbcSMatt Macy 207eda14cbcSMatt Macy if (tq->tq_nalloc <= tq->tq_minalloc) { 208eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 209eda14cbcSMatt Macy t->tqent_func = NULL; 210eda14cbcSMatt Macy t->tqent_arg = NULL; 211eda14cbcSMatt Macy t->tqent_flags = 0; 212eda14cbcSMatt Macy 213eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_free_list); 214eda14cbcSMatt Macy } else { 215eda14cbcSMatt Macy task_free(tq, t); 216eda14cbcSMatt Macy } 217eda14cbcSMatt Macy } 218eda14cbcSMatt Macy 219eda14cbcSMatt Macy /* 220eda14cbcSMatt Macy * When a delayed task timer expires remove it from the delay list and 221eda14cbcSMatt Macy * add it to the priority list in order for immediate processing. 222eda14cbcSMatt Macy */ 223eda14cbcSMatt Macy static void 224eda14cbcSMatt Macy task_expire_impl(taskq_ent_t *t) 225eda14cbcSMatt Macy { 226eda14cbcSMatt Macy taskq_ent_t *w; 227eda14cbcSMatt Macy taskq_t *tq = t->tqent_taskq; 228eda14cbcSMatt Macy struct list_head *l = NULL; 229eda14cbcSMatt Macy unsigned long flags; 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_CANCEL) { 234eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 235eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 236eda14cbcSMatt Macy return; 237eda14cbcSMatt Macy } 238eda14cbcSMatt Macy 239eda14cbcSMatt Macy t->tqent_birth = jiffies; 240eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 241eda14cbcSMatt Macy 242eda14cbcSMatt Macy /* 243eda14cbcSMatt Macy * The priority list must be maintained in strict task id order 244eda14cbcSMatt Macy * from lowest to highest for lowest_id to be easily calculable. 245eda14cbcSMatt Macy */ 246eda14cbcSMatt Macy list_del(&t->tqent_list); 247eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_prio_list) { 248eda14cbcSMatt Macy w = list_entry(l, taskq_ent_t, tqent_list); 249eda14cbcSMatt Macy if (w->tqent_id < t->tqent_id) { 250eda14cbcSMatt Macy list_add(&t->tqent_list, l); 251eda14cbcSMatt Macy break; 252eda14cbcSMatt Macy } 253eda14cbcSMatt Macy } 254eda14cbcSMatt Macy if (l == &tq->tq_prio_list) 255eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 256eda14cbcSMatt Macy 257eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 258eda14cbcSMatt Macy 259eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 260eda14cbcSMatt Macy } 261eda14cbcSMatt Macy 262eda14cbcSMatt Macy static void 263eda14cbcSMatt Macy task_expire(spl_timer_list_t tl) 264eda14cbcSMatt Macy { 265eda14cbcSMatt Macy struct timer_list *tmr = (struct timer_list *)tl; 266eda14cbcSMatt Macy taskq_ent_t *t = from_timer(t, tmr, tqent_timer); 267eda14cbcSMatt Macy task_expire_impl(t); 268eda14cbcSMatt Macy } 269eda14cbcSMatt Macy 270eda14cbcSMatt Macy /* 271eda14cbcSMatt Macy * Returns the lowest incomplete taskqid_t. The taskqid_t may 272eda14cbcSMatt Macy * be queued on the pending list, on the priority list, on the 273eda14cbcSMatt Macy * delay list, or on the work list currently being handled, but 274eda14cbcSMatt Macy * it is not 100% complete yet. 275eda14cbcSMatt Macy */ 276eda14cbcSMatt Macy static taskqid_t 277eda14cbcSMatt Macy taskq_lowest_id(taskq_t *tq) 278eda14cbcSMatt Macy { 279eda14cbcSMatt Macy taskqid_t lowest_id = tq->tq_next_id; 280eda14cbcSMatt Macy taskq_ent_t *t; 281eda14cbcSMatt Macy taskq_thread_t *tqt; 282eda14cbcSMatt Macy 283eda14cbcSMatt Macy if (!list_empty(&tq->tq_pend_list)) { 284eda14cbcSMatt Macy t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); 285eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 286eda14cbcSMatt Macy } 287eda14cbcSMatt Macy 288eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) { 289eda14cbcSMatt Macy t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); 290eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 291eda14cbcSMatt Macy } 292eda14cbcSMatt Macy 293eda14cbcSMatt Macy if (!list_empty(&tq->tq_delay_list)) { 294eda14cbcSMatt Macy t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); 295eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 296eda14cbcSMatt Macy } 297eda14cbcSMatt Macy 298eda14cbcSMatt Macy if (!list_empty(&tq->tq_active_list)) { 299eda14cbcSMatt Macy tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, 300eda14cbcSMatt Macy tqt_active_list); 301eda14cbcSMatt Macy ASSERT(tqt->tqt_id != TASKQID_INVALID); 302eda14cbcSMatt Macy lowest_id = MIN(lowest_id, tqt->tqt_id); 303eda14cbcSMatt Macy } 304eda14cbcSMatt Macy 305eda14cbcSMatt Macy return (lowest_id); 306eda14cbcSMatt Macy } 307eda14cbcSMatt Macy 308eda14cbcSMatt Macy /* 309eda14cbcSMatt Macy * Insert a task into a list keeping the list sorted by increasing taskqid. 310eda14cbcSMatt Macy */ 311eda14cbcSMatt Macy static void 312eda14cbcSMatt Macy taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) 313eda14cbcSMatt Macy { 314eda14cbcSMatt Macy taskq_thread_t *w; 315eda14cbcSMatt Macy struct list_head *l = NULL; 316eda14cbcSMatt Macy 317eda14cbcSMatt Macy ASSERT(tq); 318eda14cbcSMatt Macy ASSERT(tqt); 319eda14cbcSMatt Macy 320eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_active_list) { 321eda14cbcSMatt Macy w = list_entry(l, taskq_thread_t, tqt_active_list); 322eda14cbcSMatt Macy if (w->tqt_id < tqt->tqt_id) { 323eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, l); 324eda14cbcSMatt Macy break; 325eda14cbcSMatt Macy } 326eda14cbcSMatt Macy } 327eda14cbcSMatt Macy if (l == &tq->tq_active_list) 328eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, &tq->tq_active_list); 329eda14cbcSMatt Macy } 330eda14cbcSMatt Macy 331eda14cbcSMatt Macy /* 332eda14cbcSMatt Macy * Find and return a task from the given list if it exists. The list 333eda14cbcSMatt Macy * must be in lowest to highest task id order. 334eda14cbcSMatt Macy */ 335eda14cbcSMatt Macy static taskq_ent_t * 336eda14cbcSMatt Macy taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) 337eda14cbcSMatt Macy { 338eda14cbcSMatt Macy struct list_head *l = NULL; 339eda14cbcSMatt Macy taskq_ent_t *t; 340eda14cbcSMatt Macy 341eda14cbcSMatt Macy list_for_each(l, lh) { 342eda14cbcSMatt Macy t = list_entry(l, taskq_ent_t, tqent_list); 343eda14cbcSMatt Macy 344eda14cbcSMatt Macy if (t->tqent_id == id) 345eda14cbcSMatt Macy return (t); 346eda14cbcSMatt Macy 347eda14cbcSMatt Macy if (t->tqent_id > id) 348eda14cbcSMatt Macy break; 349eda14cbcSMatt Macy } 350eda14cbcSMatt Macy 351eda14cbcSMatt Macy return (NULL); 352eda14cbcSMatt Macy } 353eda14cbcSMatt Macy 354eda14cbcSMatt Macy /* 355eda14cbcSMatt Macy * Find an already dispatched task given the task id regardless of what 356eda14cbcSMatt Macy * state it is in. If a task is still pending it will be returned. 357eda14cbcSMatt Macy * If a task is executing, then -EBUSY will be returned instead. 358eda14cbcSMatt Macy * If the task has already been run then NULL is returned. 359eda14cbcSMatt Macy */ 360eda14cbcSMatt Macy static taskq_ent_t * 361eda14cbcSMatt Macy taskq_find(taskq_t *tq, taskqid_t id) 362eda14cbcSMatt Macy { 363eda14cbcSMatt Macy taskq_thread_t *tqt; 364eda14cbcSMatt Macy struct list_head *l = NULL; 365eda14cbcSMatt Macy taskq_ent_t *t; 366eda14cbcSMatt Macy 367eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_delay_list, id); 368eda14cbcSMatt Macy if (t) 369eda14cbcSMatt Macy return (t); 370eda14cbcSMatt Macy 371eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_prio_list, id); 372eda14cbcSMatt Macy if (t) 373eda14cbcSMatt Macy return (t); 374eda14cbcSMatt Macy 375eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_pend_list, id); 376eda14cbcSMatt Macy if (t) 377eda14cbcSMatt Macy return (t); 378eda14cbcSMatt Macy 379eda14cbcSMatt Macy list_for_each(l, &tq->tq_active_list) { 380eda14cbcSMatt Macy tqt = list_entry(l, taskq_thread_t, tqt_active_list); 381eda14cbcSMatt Macy if (tqt->tqt_id == id) { 382eda14cbcSMatt Macy /* 383eda14cbcSMatt Macy * Instead of returning tqt_task, we just return a non 384eda14cbcSMatt Macy * NULL value to prevent misuse, since tqt_task only 385eda14cbcSMatt Macy * has two valid fields. 386eda14cbcSMatt Macy */ 387eda14cbcSMatt Macy return (ERR_PTR(-EBUSY)); 388eda14cbcSMatt Macy } 389eda14cbcSMatt Macy } 390eda14cbcSMatt Macy 391eda14cbcSMatt Macy return (NULL); 392eda14cbcSMatt Macy } 393eda14cbcSMatt Macy 394eda14cbcSMatt Macy /* 395eda14cbcSMatt Macy * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and 396eda14cbcSMatt Macy * taskq_wait() functions below. 397eda14cbcSMatt Macy * 398eda14cbcSMatt Macy * Taskq waiting is accomplished by tracking the lowest outstanding task 399eda14cbcSMatt Macy * id and the next available task id. As tasks are dispatched they are 400eda14cbcSMatt Macy * added to the tail of the pending, priority, or delay lists. As worker 401eda14cbcSMatt Macy * threads become available the tasks are removed from the heads of these 402eda14cbcSMatt Macy * lists and linked to the worker threads. This ensures the lists are 403eda14cbcSMatt Macy * kept sorted by lowest to highest task id. 404eda14cbcSMatt Macy * 405eda14cbcSMatt Macy * Therefore the lowest outstanding task id can be quickly determined by 406eda14cbcSMatt Macy * checking the head item from all of these lists. This value is stored 407eda14cbcSMatt Macy * with the taskq as the lowest id. It only needs to be recalculated when 408eda14cbcSMatt Macy * either the task with the current lowest id completes or is canceled. 409eda14cbcSMatt Macy * 410eda14cbcSMatt Macy * By blocking until the lowest task id exceeds the passed task id the 411eda14cbcSMatt Macy * taskq_wait_outstanding() function can be easily implemented. Similarly, 412eda14cbcSMatt Macy * by blocking until the lowest task id matches the next task id taskq_wait() 413eda14cbcSMatt Macy * can be implemented. 414eda14cbcSMatt Macy * 415eda14cbcSMatt Macy * Callers should be aware that when there are multiple worked threads it 416eda14cbcSMatt Macy * is possible for larger task ids to complete before smaller ones. Also 417eda14cbcSMatt Macy * when the taskq contains delay tasks with small task ids callers may 418eda14cbcSMatt Macy * block for a considerable length of time waiting for them to expire and 419eda14cbcSMatt Macy * execute. 420eda14cbcSMatt Macy */ 421eda14cbcSMatt Macy static int 422eda14cbcSMatt Macy taskq_wait_id_check(taskq_t *tq, taskqid_t id) 423eda14cbcSMatt Macy { 424eda14cbcSMatt Macy int rc; 425eda14cbcSMatt Macy unsigned long flags; 426eda14cbcSMatt Macy 427eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 428eda14cbcSMatt Macy rc = (taskq_find(tq, id) == NULL); 429eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 430eda14cbcSMatt Macy 431eda14cbcSMatt Macy return (rc); 432eda14cbcSMatt Macy } 433eda14cbcSMatt Macy 434eda14cbcSMatt Macy /* 435eda14cbcSMatt Macy * The taskq_wait_id() function blocks until the passed task id completes. 436eda14cbcSMatt Macy * This does not guarantee that all lower task ids have completed. 437eda14cbcSMatt Macy */ 438eda14cbcSMatt Macy void 439eda14cbcSMatt Macy taskq_wait_id(taskq_t *tq, taskqid_t id) 440eda14cbcSMatt Macy { 441eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); 442eda14cbcSMatt Macy } 443eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_id); 444eda14cbcSMatt Macy 445eda14cbcSMatt Macy static int 446eda14cbcSMatt Macy taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) 447eda14cbcSMatt Macy { 448eda14cbcSMatt Macy int rc; 449eda14cbcSMatt Macy unsigned long flags; 450eda14cbcSMatt Macy 451eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 452eda14cbcSMatt Macy rc = (id < tq->tq_lowest_id); 453eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 454eda14cbcSMatt Macy 455eda14cbcSMatt Macy return (rc); 456eda14cbcSMatt Macy } 457eda14cbcSMatt Macy 458eda14cbcSMatt Macy /* 459eda14cbcSMatt Macy * The taskq_wait_outstanding() function will block until all tasks with a 460eda14cbcSMatt Macy * lower taskqid than the passed 'id' have been completed. Note that all 461eda14cbcSMatt Macy * task id's are assigned monotonically at dispatch time. Zero may be 462eda14cbcSMatt Macy * passed for the id to indicate all tasks dispatch up to this point, 463eda14cbcSMatt Macy * but not after, should be waited for. 464eda14cbcSMatt Macy */ 465eda14cbcSMatt Macy void 466eda14cbcSMatt Macy taskq_wait_outstanding(taskq_t *tq, taskqid_t id) 467eda14cbcSMatt Macy { 468eda14cbcSMatt Macy id = id ? id : tq->tq_next_id - 1; 469eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); 470eda14cbcSMatt Macy } 471eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_outstanding); 472eda14cbcSMatt Macy 473eda14cbcSMatt Macy static int 474eda14cbcSMatt Macy taskq_wait_check(taskq_t *tq) 475eda14cbcSMatt Macy { 476eda14cbcSMatt Macy int rc; 477eda14cbcSMatt Macy unsigned long flags; 478eda14cbcSMatt Macy 479eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 480eda14cbcSMatt Macy rc = (tq->tq_lowest_id == tq->tq_next_id); 481eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 482eda14cbcSMatt Macy 483eda14cbcSMatt Macy return (rc); 484eda14cbcSMatt Macy } 485eda14cbcSMatt Macy 486eda14cbcSMatt Macy /* 487eda14cbcSMatt Macy * The taskq_wait() function will block until the taskq is empty. 488eda14cbcSMatt Macy * This means that if a taskq re-dispatches work to itself taskq_wait() 489eda14cbcSMatt Macy * callers will block indefinitely. 490eda14cbcSMatt Macy */ 491eda14cbcSMatt Macy void 492eda14cbcSMatt Macy taskq_wait(taskq_t *tq) 493eda14cbcSMatt Macy { 494eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); 495eda14cbcSMatt Macy } 496eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait); 497eda14cbcSMatt Macy 498eda14cbcSMatt Macy int 499eda14cbcSMatt Macy taskq_member(taskq_t *tq, kthread_t *t) 500eda14cbcSMatt Macy { 501eda14cbcSMatt Macy return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); 502eda14cbcSMatt Macy } 503eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_member); 504eda14cbcSMatt Macy 505eda14cbcSMatt Macy taskq_t * 506eda14cbcSMatt Macy taskq_of_curthread(void) 507eda14cbcSMatt Macy { 508eda14cbcSMatt Macy return (tsd_get(taskq_tsd)); 509eda14cbcSMatt Macy } 510eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_of_curthread); 511eda14cbcSMatt Macy 512eda14cbcSMatt Macy /* 513eda14cbcSMatt Macy * Cancel an already dispatched task given the task id. Still pending tasks 514eda14cbcSMatt Macy * will be immediately canceled, and if the task is active the function will 515eda14cbcSMatt Macy * block until it completes. Preallocated tasks which are canceled must be 516eda14cbcSMatt Macy * freed by the caller. 517eda14cbcSMatt Macy */ 518eda14cbcSMatt Macy int 519eda14cbcSMatt Macy taskq_cancel_id(taskq_t *tq, taskqid_t id) 520eda14cbcSMatt Macy { 521eda14cbcSMatt Macy taskq_ent_t *t; 522eda14cbcSMatt Macy int rc = ENOENT; 523eda14cbcSMatt Macy unsigned long flags; 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy ASSERT(tq); 526eda14cbcSMatt Macy 527eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 528eda14cbcSMatt Macy t = taskq_find(tq, id); 529eda14cbcSMatt Macy if (t && t != ERR_PTR(-EBUSY)) { 530eda14cbcSMatt Macy list_del_init(&t->tqent_list); 531eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_CANCEL; 532eda14cbcSMatt Macy 533eda14cbcSMatt Macy /* 534eda14cbcSMatt Macy * When canceling the lowest outstanding task id we 535eda14cbcSMatt Macy * must recalculate the new lowest outstanding id. 536eda14cbcSMatt Macy */ 537eda14cbcSMatt Macy if (tq->tq_lowest_id == t->tqent_id) { 538eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 539eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); 540eda14cbcSMatt Macy } 541eda14cbcSMatt Macy 542eda14cbcSMatt Macy /* 543eda14cbcSMatt Macy * The task_expire() function takes the tq->tq_lock so drop 544eda14cbcSMatt Macy * drop the lock before synchronously cancelling the timer. 545eda14cbcSMatt Macy */ 546eda14cbcSMatt Macy if (timer_pending(&t->tqent_timer)) { 547eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 548eda14cbcSMatt Macy del_timer_sync(&t->tqent_timer); 549eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 550eda14cbcSMatt Macy tq->tq_lock_class); 551eda14cbcSMatt Macy } 552eda14cbcSMatt Macy 553eda14cbcSMatt Macy if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) 554eda14cbcSMatt Macy task_done(tq, t); 555eda14cbcSMatt Macy 556eda14cbcSMatt Macy rc = 0; 557eda14cbcSMatt Macy } 558eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 559eda14cbcSMatt Macy 560eda14cbcSMatt Macy if (t == ERR_PTR(-EBUSY)) { 561eda14cbcSMatt Macy taskq_wait_id(tq, id); 562eda14cbcSMatt Macy rc = EBUSY; 563eda14cbcSMatt Macy } 564eda14cbcSMatt Macy 565eda14cbcSMatt Macy return (rc); 566eda14cbcSMatt Macy } 567eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_cancel_id); 568eda14cbcSMatt Macy 569eda14cbcSMatt Macy static int taskq_thread_spawn(taskq_t *tq); 570eda14cbcSMatt Macy 571eda14cbcSMatt Macy taskqid_t 572eda14cbcSMatt Macy taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 573eda14cbcSMatt Macy { 574eda14cbcSMatt Macy taskq_ent_t *t; 575eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 576eda14cbcSMatt Macy unsigned long irqflags; 577eda14cbcSMatt Macy 578eda14cbcSMatt Macy ASSERT(tq); 579eda14cbcSMatt Macy ASSERT(func); 580eda14cbcSMatt Macy 581eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 582eda14cbcSMatt Macy 583eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 584eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 585eda14cbcSMatt Macy goto out; 586eda14cbcSMatt Macy 587eda14cbcSMatt Macy /* Do not queue the task unless there is idle thread for it */ 588eda14cbcSMatt Macy ASSERT(tq->tq_nactive <= tq->tq_nthreads); 589eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 590eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 591eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 592eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 593eda14cbcSMatt Macy goto out; 594eda14cbcSMatt Macy } 595eda14cbcSMatt Macy 596eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 597eda14cbcSMatt Macy goto out; 598eda14cbcSMatt Macy 599eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 600eda14cbcSMatt Macy 601eda14cbcSMatt Macy /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ 602eda14cbcSMatt Macy if (flags & TQ_NOQUEUE) 603eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 604eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 605eda14cbcSMatt Macy else if (flags & TQ_FRONT) 606eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 607eda14cbcSMatt Macy else 608eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 609eda14cbcSMatt Macy 610eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 611eda14cbcSMatt Macy tq->tq_next_id++; 612eda14cbcSMatt Macy t->tqent_func = func; 613eda14cbcSMatt Macy t->tqent_arg = arg; 614eda14cbcSMatt Macy t->tqent_taskq = tq; 615eda14cbcSMatt Macy t->tqent_timer.function = NULL; 616eda14cbcSMatt Macy t->tqent_timer.expires = 0; 617eda14cbcSMatt Macy 618eda14cbcSMatt Macy t->tqent_birth = jiffies; 619eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 620eda14cbcSMatt Macy 621eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 622eda14cbcSMatt Macy 623eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 624eda14cbcSMatt Macy 625eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 626eda14cbcSMatt Macy out: 627eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 628eda14cbcSMatt Macy if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) 629eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 630eda14cbcSMatt Macy 631eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 632eda14cbcSMatt Macy return (rc); 633eda14cbcSMatt Macy } 634eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch); 635eda14cbcSMatt Macy 636eda14cbcSMatt Macy taskqid_t 637eda14cbcSMatt Macy taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, 638eda14cbcSMatt Macy uint_t flags, clock_t expire_time) 639eda14cbcSMatt Macy { 640eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 641eda14cbcSMatt Macy taskq_ent_t *t; 642eda14cbcSMatt Macy unsigned long irqflags; 643eda14cbcSMatt Macy 644eda14cbcSMatt Macy ASSERT(tq); 645eda14cbcSMatt Macy ASSERT(func); 646eda14cbcSMatt Macy 647eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 648eda14cbcSMatt Macy 649eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 650eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 651eda14cbcSMatt Macy goto out; 652eda14cbcSMatt Macy 653eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 654eda14cbcSMatt Macy goto out; 655eda14cbcSMatt Macy 656eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 657eda14cbcSMatt Macy 658eda14cbcSMatt Macy /* Queue to the delay list for subsequent execution */ 659eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_delay_list); 660eda14cbcSMatt Macy 661eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 662eda14cbcSMatt Macy tq->tq_next_id++; 663eda14cbcSMatt Macy t->tqent_func = func; 664eda14cbcSMatt Macy t->tqent_arg = arg; 665eda14cbcSMatt Macy t->tqent_taskq = tq; 666eda14cbcSMatt Macy t->tqent_timer.function = task_expire; 667eda14cbcSMatt Macy t->tqent_timer.expires = (unsigned long)expire_time; 668eda14cbcSMatt Macy add_timer(&t->tqent_timer); 669eda14cbcSMatt Macy 670eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 671eda14cbcSMatt Macy 672eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 673eda14cbcSMatt Macy out: 674eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 675eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 676eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 677eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 678eda14cbcSMatt Macy return (rc); 679eda14cbcSMatt Macy } 680eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_delay); 681eda14cbcSMatt Macy 682eda14cbcSMatt Macy void 683eda14cbcSMatt Macy taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, 684eda14cbcSMatt Macy taskq_ent_t *t) 685eda14cbcSMatt Macy { 686eda14cbcSMatt Macy unsigned long irqflags; 687eda14cbcSMatt Macy ASSERT(tq); 688eda14cbcSMatt Macy ASSERT(func); 689eda14cbcSMatt Macy 690eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 691eda14cbcSMatt Macy tq->tq_lock_class); 692eda14cbcSMatt Macy 693eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 694eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) { 695eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 696eda14cbcSMatt Macy goto out; 697eda14cbcSMatt Macy } 698eda14cbcSMatt Macy 699eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 700eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 701eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 702eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 703eda14cbcSMatt Macy goto out2; 704eda14cbcSMatt Macy flags |= TQ_FRONT; 705eda14cbcSMatt Macy } 706eda14cbcSMatt Macy 707eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 708eda14cbcSMatt Macy 709eda14cbcSMatt Macy /* 710eda14cbcSMatt Macy * Make sure the entry is not on some other taskq; it is important to 711eda14cbcSMatt Macy * ASSERT() under lock 712eda14cbcSMatt Macy */ 713eda14cbcSMatt Macy ASSERT(taskq_empty_ent(t)); 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy /* 716eda14cbcSMatt Macy * Mark it as a prealloc'd task. This is important 717eda14cbcSMatt Macy * to ensure that we don't free it later. 718eda14cbcSMatt Macy */ 719eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_PREALLOC; 720eda14cbcSMatt Macy 721eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 722eda14cbcSMatt Macy if (flags & TQ_FRONT) 723eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 724eda14cbcSMatt Macy else 725eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 726eda14cbcSMatt Macy 727eda14cbcSMatt Macy t->tqent_id = tq->tq_next_id; 728eda14cbcSMatt Macy tq->tq_next_id++; 729eda14cbcSMatt Macy t->tqent_func = func; 730eda14cbcSMatt Macy t->tqent_arg = arg; 731eda14cbcSMatt Macy t->tqent_taskq = tq; 732eda14cbcSMatt Macy 733eda14cbcSMatt Macy t->tqent_birth = jiffies; 734eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 735eda14cbcSMatt Macy 736eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 737eda14cbcSMatt Macy 738eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 739eda14cbcSMatt Macy out: 740eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 741eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 742eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 743eda14cbcSMatt Macy out2: 744eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 745eda14cbcSMatt Macy } 746eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_ent); 747eda14cbcSMatt Macy 748eda14cbcSMatt Macy int 749eda14cbcSMatt Macy taskq_empty_ent(taskq_ent_t *t) 750eda14cbcSMatt Macy { 751eda14cbcSMatt Macy return (list_empty(&t->tqent_list)); 752eda14cbcSMatt Macy } 753eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_empty_ent); 754eda14cbcSMatt Macy 755eda14cbcSMatt Macy void 756eda14cbcSMatt Macy taskq_init_ent(taskq_ent_t *t) 757eda14cbcSMatt Macy { 758eda14cbcSMatt Macy spin_lock_init(&t->tqent_lock); 759eda14cbcSMatt Macy init_waitqueue_head(&t->tqent_waitq); 760eda14cbcSMatt Macy timer_setup(&t->tqent_timer, NULL, 0); 761eda14cbcSMatt Macy INIT_LIST_HEAD(&t->tqent_list); 762eda14cbcSMatt Macy t->tqent_id = 0; 763eda14cbcSMatt Macy t->tqent_func = NULL; 764eda14cbcSMatt Macy t->tqent_arg = NULL; 765eda14cbcSMatt Macy t->tqent_flags = 0; 766eda14cbcSMatt Macy t->tqent_taskq = NULL; 767eda14cbcSMatt Macy } 768eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_init_ent); 769eda14cbcSMatt Macy 770eda14cbcSMatt Macy /* 771eda14cbcSMatt Macy * Return the next pending task, preference is given to tasks on the 772eda14cbcSMatt Macy * priority list which were dispatched with TQ_FRONT. 773eda14cbcSMatt Macy */ 774eda14cbcSMatt Macy static taskq_ent_t * 775eda14cbcSMatt Macy taskq_next_ent(taskq_t *tq) 776eda14cbcSMatt Macy { 777eda14cbcSMatt Macy struct list_head *list; 778eda14cbcSMatt Macy 779eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) 780eda14cbcSMatt Macy list = &tq->tq_prio_list; 781eda14cbcSMatt Macy else if (!list_empty(&tq->tq_pend_list)) 782eda14cbcSMatt Macy list = &tq->tq_pend_list; 783eda14cbcSMatt Macy else 784eda14cbcSMatt Macy return (NULL); 785eda14cbcSMatt Macy 786eda14cbcSMatt Macy return (list_entry(list->next, taskq_ent_t, tqent_list)); 787eda14cbcSMatt Macy } 788eda14cbcSMatt Macy 789eda14cbcSMatt Macy /* 790eda14cbcSMatt Macy * Spawns a new thread for the specified taskq. 791eda14cbcSMatt Macy */ 792eda14cbcSMatt Macy static void 793eda14cbcSMatt Macy taskq_thread_spawn_task(void *arg) 794eda14cbcSMatt Macy { 795eda14cbcSMatt Macy taskq_t *tq = (taskq_t *)arg; 796eda14cbcSMatt Macy unsigned long flags; 797eda14cbcSMatt Macy 798eda14cbcSMatt Macy if (taskq_thread_create(tq) == NULL) { 799eda14cbcSMatt Macy /* restore spawning count if failed */ 800eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 801eda14cbcSMatt Macy tq->tq_lock_class); 802eda14cbcSMatt Macy tq->tq_nspawn--; 803eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 804eda14cbcSMatt Macy } 805eda14cbcSMatt Macy } 806eda14cbcSMatt Macy 807eda14cbcSMatt Macy /* 808eda14cbcSMatt Macy * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current 809eda14cbcSMatt Macy * number of threads is insufficient to handle the pending tasks. These 810eda14cbcSMatt Macy * new threads must be created by the dedicated dynamic_taskq to avoid 811eda14cbcSMatt Macy * deadlocks between thread creation and memory reclaim. The system_taskq 812eda14cbcSMatt Macy * which is also a dynamic taskq cannot be safely used for this. 813eda14cbcSMatt Macy */ 814eda14cbcSMatt Macy static int 815eda14cbcSMatt Macy taskq_thread_spawn(taskq_t *tq) 816eda14cbcSMatt Macy { 817eda14cbcSMatt Macy int spawning = 0; 818eda14cbcSMatt Macy 819eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 820eda14cbcSMatt Macy return (0); 821eda14cbcSMatt Macy 822eda14cbcSMatt Macy if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && 823eda14cbcSMatt Macy (tq->tq_flags & TASKQ_ACTIVE)) { 824eda14cbcSMatt Macy spawning = (++tq->tq_nspawn); 825eda14cbcSMatt Macy taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, 826eda14cbcSMatt Macy tq, TQ_NOSLEEP); 827eda14cbcSMatt Macy } 828eda14cbcSMatt Macy 829eda14cbcSMatt Macy return (spawning); 830eda14cbcSMatt Macy } 831eda14cbcSMatt Macy 832eda14cbcSMatt Macy /* 833eda14cbcSMatt Macy * Threads in a dynamic taskq should only exit once it has been completely 834eda14cbcSMatt Macy * drained and no other threads are actively servicing tasks. This prevents 835eda14cbcSMatt Macy * threads from being created and destroyed more than is required. 836eda14cbcSMatt Macy * 837eda14cbcSMatt Macy * The first thread is the thread list is treated as the primary thread. 838eda14cbcSMatt Macy * There is nothing special about the primary thread but in order to avoid 839eda14cbcSMatt Macy * all the taskq pids from changing we opt to make it long running. 840eda14cbcSMatt Macy */ 841eda14cbcSMatt Macy static int 842eda14cbcSMatt Macy taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) 843eda14cbcSMatt Macy { 844eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 845eda14cbcSMatt Macy return (0); 846eda14cbcSMatt Macy 847eda14cbcSMatt Macy if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, 848eda14cbcSMatt Macy tqt_thread_list) == tqt) 849eda14cbcSMatt Macy return (0); 850eda14cbcSMatt Macy 851eda14cbcSMatt Macy return 852eda14cbcSMatt Macy ((tq->tq_nspawn == 0) && /* No threads are being spawned */ 853eda14cbcSMatt Macy (tq->tq_nactive == 0) && /* No threads are handling tasks */ 854eda14cbcSMatt Macy (tq->tq_nthreads > 1) && /* More than 1 thread is running */ 855eda14cbcSMatt Macy (!taskq_next_ent(tq)) && /* There are no pending tasks */ 856eda14cbcSMatt Macy (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ 857eda14cbcSMatt Macy } 858eda14cbcSMatt Macy 859eda14cbcSMatt Macy static int 860eda14cbcSMatt Macy taskq_thread(void *args) 861eda14cbcSMatt Macy { 862eda14cbcSMatt Macy DECLARE_WAITQUEUE(wait, current); 863eda14cbcSMatt Macy sigset_t blocked; 864eda14cbcSMatt Macy taskq_thread_t *tqt = args; 865eda14cbcSMatt Macy taskq_t *tq; 866eda14cbcSMatt Macy taskq_ent_t *t; 867eda14cbcSMatt Macy int seq_tasks = 0; 868eda14cbcSMatt Macy unsigned long flags; 869eda14cbcSMatt Macy taskq_ent_t dup_task = {}; 870eda14cbcSMatt Macy 871eda14cbcSMatt Macy ASSERT(tqt); 872eda14cbcSMatt Macy ASSERT(tqt->tqt_tq); 873eda14cbcSMatt Macy tq = tqt->tqt_tq; 874eda14cbcSMatt Macy current->flags |= PF_NOFREEZE; 875eda14cbcSMatt Macy 876eda14cbcSMatt Macy (void) spl_fstrans_mark(); 877eda14cbcSMatt Macy 878eda14cbcSMatt Macy sigfillset(&blocked); 879eda14cbcSMatt Macy sigprocmask(SIG_BLOCK, &blocked, NULL); 880eda14cbcSMatt Macy flush_signals(current); 881eda14cbcSMatt Macy 882eda14cbcSMatt Macy tsd_set(taskq_tsd, tq); 883eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 884eda14cbcSMatt Macy /* 885eda14cbcSMatt Macy * If we are dynamically spawned, decrease spawning count. Note that 886eda14cbcSMatt Macy * we could be created during taskq_create, in which case we shouldn't 887eda14cbcSMatt Macy * do the decrement. But it's fine because taskq_create will reset 888eda14cbcSMatt Macy * tq_nspawn later. 889eda14cbcSMatt Macy */ 890eda14cbcSMatt Macy if (tq->tq_flags & TASKQ_DYNAMIC) 891eda14cbcSMatt Macy tq->tq_nspawn--; 892eda14cbcSMatt Macy 893eda14cbcSMatt Macy /* Immediately exit if more threads than allowed were created. */ 894eda14cbcSMatt Macy if (tq->tq_nthreads >= tq->tq_maxthreads) 895eda14cbcSMatt Macy goto error; 896eda14cbcSMatt Macy 897eda14cbcSMatt Macy tq->tq_nthreads++; 898eda14cbcSMatt Macy list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); 899eda14cbcSMatt Macy wake_up(&tq->tq_wait_waitq); 900eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 901eda14cbcSMatt Macy 902eda14cbcSMatt Macy while (!kthread_should_stop()) { 903eda14cbcSMatt Macy 904eda14cbcSMatt Macy if (list_empty(&tq->tq_pend_list) && 905eda14cbcSMatt Macy list_empty(&tq->tq_prio_list)) { 906eda14cbcSMatt Macy 907eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) { 908eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 909eda14cbcSMatt Macy break; 910eda14cbcSMatt Macy } 911eda14cbcSMatt Macy 912eda14cbcSMatt Macy add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); 913eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 914eda14cbcSMatt Macy 915eda14cbcSMatt Macy schedule(); 916eda14cbcSMatt Macy seq_tasks = 0; 917eda14cbcSMatt Macy 918eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 919eda14cbcSMatt Macy tq->tq_lock_class); 920eda14cbcSMatt Macy remove_wait_queue(&tq->tq_work_waitq, &wait); 921eda14cbcSMatt Macy } else { 922eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 923eda14cbcSMatt Macy } 924eda14cbcSMatt Macy 925eda14cbcSMatt Macy if ((t = taskq_next_ent(tq)) != NULL) { 926eda14cbcSMatt Macy list_del_init(&t->tqent_list); 927eda14cbcSMatt Macy 928eda14cbcSMatt Macy /* 929eda14cbcSMatt Macy * A TQENT_FLAG_PREALLOC task may be reused or freed 930eda14cbcSMatt Macy * during the task function call. Store tqent_id and 931eda14cbcSMatt Macy * tqent_flags here. 932eda14cbcSMatt Macy * 933eda14cbcSMatt Macy * Also use an on stack taskq_ent_t for tqt_task 934eda14cbcSMatt Macy * assignment in this case; we want to make sure 935eda14cbcSMatt Macy * to duplicate all fields, so the values are 936eda14cbcSMatt Macy * correct when it's accessed via DTRACE_PROBE*. 937eda14cbcSMatt Macy */ 938eda14cbcSMatt Macy tqt->tqt_id = t->tqent_id; 939eda14cbcSMatt Macy tqt->tqt_flags = t->tqent_flags; 940eda14cbcSMatt Macy 941eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_PREALLOC) { 942eda14cbcSMatt Macy dup_task = *t; 943eda14cbcSMatt Macy t = &dup_task; 944eda14cbcSMatt Macy } 945eda14cbcSMatt Macy tqt->tqt_task = t; 946eda14cbcSMatt Macy 947eda14cbcSMatt Macy taskq_insert_in_order(tq, tqt); 948eda14cbcSMatt Macy tq->tq_nactive++; 949eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 950eda14cbcSMatt Macy 951eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); 952eda14cbcSMatt Macy 953eda14cbcSMatt Macy /* Perform the requested task */ 954eda14cbcSMatt Macy t->tqent_func(t->tqent_arg); 955eda14cbcSMatt Macy 956eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); 957eda14cbcSMatt Macy 958eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 959eda14cbcSMatt Macy tq->tq_lock_class); 960eda14cbcSMatt Macy tq->tq_nactive--; 961eda14cbcSMatt Macy list_del_init(&tqt->tqt_active_list); 962eda14cbcSMatt Macy tqt->tqt_task = NULL; 963eda14cbcSMatt Macy 964eda14cbcSMatt Macy /* For prealloc'd tasks, we don't free anything. */ 965eda14cbcSMatt Macy if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) 966eda14cbcSMatt Macy task_done(tq, t); 967eda14cbcSMatt Macy 968eda14cbcSMatt Macy /* 969eda14cbcSMatt Macy * When the current lowest outstanding taskqid is 970eda14cbcSMatt Macy * done calculate the new lowest outstanding id 971eda14cbcSMatt Macy */ 972eda14cbcSMatt Macy if (tq->tq_lowest_id == tqt->tqt_id) { 973eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 974eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); 975eda14cbcSMatt Macy } 976eda14cbcSMatt Macy 977eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 978eda14cbcSMatt Macy if ((++seq_tasks) > spl_taskq_thread_sequential && 979eda14cbcSMatt Macy taskq_thread_spawn(tq)) 980eda14cbcSMatt Macy seq_tasks = 0; 981eda14cbcSMatt Macy 982eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 983eda14cbcSMatt Macy tqt->tqt_flags = 0; 984eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 985eda14cbcSMatt Macy } else { 986eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) 987eda14cbcSMatt Macy break; 988eda14cbcSMatt Macy } 989eda14cbcSMatt Macy 990eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 991eda14cbcSMatt Macy 992eda14cbcSMatt Macy } 993eda14cbcSMatt Macy 994eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 995eda14cbcSMatt Macy tq->tq_nthreads--; 996eda14cbcSMatt Macy list_del_init(&tqt->tqt_thread_list); 997eda14cbcSMatt Macy error: 998eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 999eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1000eda14cbcSMatt Macy 1001eda14cbcSMatt Macy tsd_set(taskq_tsd, NULL); 1002184c1b94SMartin Matuska thread_exit(); 1003eda14cbcSMatt Macy 1004eda14cbcSMatt Macy return (0); 1005eda14cbcSMatt Macy } 1006eda14cbcSMatt Macy 1007eda14cbcSMatt Macy static taskq_thread_t * 1008eda14cbcSMatt Macy taskq_thread_create(taskq_t *tq) 1009eda14cbcSMatt Macy { 1010eda14cbcSMatt Macy static int last_used_cpu = 0; 1011eda14cbcSMatt Macy taskq_thread_t *tqt; 1012eda14cbcSMatt Macy 1013eda14cbcSMatt Macy tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); 1014eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_thread_list); 1015eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_active_list); 1016eda14cbcSMatt Macy tqt->tqt_tq = tq; 1017eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1018eda14cbcSMatt Macy 1019eda14cbcSMatt Macy tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, 1020eda14cbcSMatt Macy "%s", tq->tq_name); 1021eda14cbcSMatt Macy if (tqt->tqt_thread == NULL) { 1022eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1023eda14cbcSMatt Macy return (NULL); 1024eda14cbcSMatt Macy } 1025eda14cbcSMatt Macy 1026eda14cbcSMatt Macy if (spl_taskq_thread_bind) { 1027eda14cbcSMatt Macy last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); 1028eda14cbcSMatt Macy kthread_bind(tqt->tqt_thread, last_used_cpu); 1029eda14cbcSMatt Macy } 1030eda14cbcSMatt Macy 1031eda14cbcSMatt Macy if (spl_taskq_thread_priority) 1032eda14cbcSMatt Macy set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy wake_up_process(tqt->tqt_thread); 1035eda14cbcSMatt Macy 1036eda14cbcSMatt Macy return (tqt); 1037eda14cbcSMatt Macy } 1038eda14cbcSMatt Macy 1039eda14cbcSMatt Macy taskq_t * 10407877fdebSMatt Macy taskq_create(const char *name, int threads_arg, pri_t pri, 1041eda14cbcSMatt Macy int minalloc, int maxalloc, uint_t flags) 1042eda14cbcSMatt Macy { 1043eda14cbcSMatt Macy taskq_t *tq; 1044eda14cbcSMatt Macy taskq_thread_t *tqt; 1045eda14cbcSMatt Macy int count = 0, rc = 0, i; 1046eda14cbcSMatt Macy unsigned long irqflags; 10477877fdebSMatt Macy int nthreads = threads_arg; 1048eda14cbcSMatt Macy 1049eda14cbcSMatt Macy ASSERT(name != NULL); 1050eda14cbcSMatt Macy ASSERT(minalloc >= 0); 1051eda14cbcSMatt Macy ASSERT(maxalloc <= INT_MAX); 1052eda14cbcSMatt Macy ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ 1053eda14cbcSMatt Macy 1054eda14cbcSMatt Macy /* Scale the number of threads using nthreads as a percentage */ 1055eda14cbcSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 1056eda14cbcSMatt Macy ASSERT(nthreads <= 100); 1057eda14cbcSMatt Macy ASSERT(nthreads >= 0); 10587877fdebSMatt Macy nthreads = MIN(threads_arg, 100); 1059eda14cbcSMatt Macy nthreads = MAX(nthreads, 0); 1060eda14cbcSMatt Macy nthreads = MAX((num_online_cpus() * nthreads) /100, 1); 1061eda14cbcSMatt Macy } 1062eda14cbcSMatt Macy 1063eda14cbcSMatt Macy tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); 1064eda14cbcSMatt Macy if (tq == NULL) 1065eda14cbcSMatt Macy return (NULL); 1066eda14cbcSMatt Macy 10677877fdebSMatt Macy tq->tq_hp_support = B_FALSE; 10687877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 10697877fdebSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 10707877fdebSMatt Macy tq->tq_hp_support = B_TRUE; 10717877fdebSMatt Macy if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, 10727877fdebSMatt Macy &tq->tq_hp_cb_node) != 0) { 10737877fdebSMatt Macy kmem_free(tq, sizeof (*tq)); 10747877fdebSMatt Macy return (NULL); 10757877fdebSMatt Macy } 10767877fdebSMatt Macy } 10777877fdebSMatt Macy #endif 10787877fdebSMatt Macy 1079eda14cbcSMatt Macy spin_lock_init(&tq->tq_lock); 1080eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_thread_list); 1081eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_active_list); 1082eda14cbcSMatt Macy tq->tq_name = kmem_strdup(name); 1083eda14cbcSMatt Macy tq->tq_nactive = 0; 1084eda14cbcSMatt Macy tq->tq_nthreads = 0; 1085eda14cbcSMatt Macy tq->tq_nspawn = 0; 1086eda14cbcSMatt Macy tq->tq_maxthreads = nthreads; 10877877fdebSMatt Macy tq->tq_cpu_pct = threads_arg; 1088eda14cbcSMatt Macy tq->tq_pri = pri; 1089eda14cbcSMatt Macy tq->tq_minalloc = minalloc; 1090eda14cbcSMatt Macy tq->tq_maxalloc = maxalloc; 1091eda14cbcSMatt Macy tq->tq_nalloc = 0; 1092eda14cbcSMatt Macy tq->tq_flags = (flags | TASKQ_ACTIVE); 1093eda14cbcSMatt Macy tq->tq_next_id = TASKQID_INITIAL; 1094eda14cbcSMatt Macy tq->tq_lowest_id = TASKQID_INITIAL; 1095eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_free_list); 1096eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_pend_list); 1097eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_prio_list); 1098eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_delay_list); 1099eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_work_waitq); 1100eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_wait_waitq); 1101eda14cbcSMatt Macy tq->tq_lock_class = TQ_LOCK_GENERAL; 1102eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_taskqs); 1103eda14cbcSMatt Macy 1104eda14cbcSMatt Macy if (flags & TASKQ_PREPOPULATE) { 1105eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 1106eda14cbcSMatt Macy tq->tq_lock_class); 1107eda14cbcSMatt Macy 1108eda14cbcSMatt Macy for (i = 0; i < minalloc; i++) 1109eda14cbcSMatt Macy task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, 1110eda14cbcSMatt Macy &irqflags)); 1111eda14cbcSMatt Macy 1112eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 1113eda14cbcSMatt Macy } 1114eda14cbcSMatt Macy 1115eda14cbcSMatt Macy if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) 1116eda14cbcSMatt Macy nthreads = 1; 1117eda14cbcSMatt Macy 1118eda14cbcSMatt Macy for (i = 0; i < nthreads; i++) { 1119eda14cbcSMatt Macy tqt = taskq_thread_create(tq); 1120eda14cbcSMatt Macy if (tqt == NULL) 1121eda14cbcSMatt Macy rc = 1; 1122eda14cbcSMatt Macy else 1123eda14cbcSMatt Macy count++; 1124eda14cbcSMatt Macy } 1125eda14cbcSMatt Macy 1126eda14cbcSMatt Macy /* Wait for all threads to be started before potential destroy */ 1127eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); 1128eda14cbcSMatt Macy /* 1129eda14cbcSMatt Macy * taskq_thread might have touched nspawn, but we don't want them to 1130eda14cbcSMatt Macy * because they're not dynamically spawned. So we reset it to 0 1131eda14cbcSMatt Macy */ 1132eda14cbcSMatt Macy tq->tq_nspawn = 0; 1133eda14cbcSMatt Macy 1134eda14cbcSMatt Macy if (rc) { 1135eda14cbcSMatt Macy taskq_destroy(tq); 1136eda14cbcSMatt Macy tq = NULL; 1137eda14cbcSMatt Macy } else { 1138eda14cbcSMatt Macy down_write(&tq_list_sem); 1139eda14cbcSMatt Macy tq->tq_instance = taskq_find_by_name(name) + 1; 1140eda14cbcSMatt Macy list_add_tail(&tq->tq_taskqs, &tq_list); 1141eda14cbcSMatt Macy up_write(&tq_list_sem); 1142eda14cbcSMatt Macy } 1143eda14cbcSMatt Macy 1144eda14cbcSMatt Macy return (tq); 1145eda14cbcSMatt Macy } 1146eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_create); 1147eda14cbcSMatt Macy 1148eda14cbcSMatt Macy void 1149eda14cbcSMatt Macy taskq_destroy(taskq_t *tq) 1150eda14cbcSMatt Macy { 1151eda14cbcSMatt Macy struct task_struct *thread; 1152eda14cbcSMatt Macy taskq_thread_t *tqt; 1153eda14cbcSMatt Macy taskq_ent_t *t; 1154eda14cbcSMatt Macy unsigned long flags; 1155eda14cbcSMatt Macy 1156eda14cbcSMatt Macy ASSERT(tq); 1157eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1158eda14cbcSMatt Macy tq->tq_flags &= ~TASKQ_ACTIVE; 1159eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1160eda14cbcSMatt Macy 11617877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 11627877fdebSMatt Macy if (tq->tq_hp_support) { 11637877fdebSMatt Macy VERIFY0(cpuhp_state_remove_instance_nocalls( 11647877fdebSMatt Macy spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); 11657877fdebSMatt Macy } 11667877fdebSMatt Macy #endif 1167eda14cbcSMatt Macy /* 1168eda14cbcSMatt Macy * When TASKQ_ACTIVE is clear new tasks may not be added nor may 1169eda14cbcSMatt Macy * new worker threads be spawned for dynamic taskq. 1170eda14cbcSMatt Macy */ 1171eda14cbcSMatt Macy if (dynamic_taskq != NULL) 1172eda14cbcSMatt Macy taskq_wait_outstanding(dynamic_taskq, 0); 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy taskq_wait(tq); 1175eda14cbcSMatt Macy 1176eda14cbcSMatt Macy /* remove taskq from global list used by the kstats */ 1177eda14cbcSMatt Macy down_write(&tq_list_sem); 1178eda14cbcSMatt Macy list_del(&tq->tq_taskqs); 1179eda14cbcSMatt Macy up_write(&tq_list_sem); 1180eda14cbcSMatt Macy 1181eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1182eda14cbcSMatt Macy /* wait for spawning threads to insert themselves to the list */ 1183eda14cbcSMatt Macy while (tq->tq_nspawn) { 1184eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1185eda14cbcSMatt Macy schedule_timeout_interruptible(1); 1186eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1187eda14cbcSMatt Macy tq->tq_lock_class); 1188eda14cbcSMatt Macy } 1189eda14cbcSMatt Macy 1190eda14cbcSMatt Macy /* 1191eda14cbcSMatt Macy * Signal each thread to exit and block until it does. Each thread 1192eda14cbcSMatt Macy * is responsible for removing itself from the list and freeing its 1193eda14cbcSMatt Macy * taskq_thread_t. This allows for idle threads to opt to remove 1194eda14cbcSMatt Macy * themselves from the taskq. They can be recreated as needed. 1195eda14cbcSMatt Macy */ 1196eda14cbcSMatt Macy while (!list_empty(&tq->tq_thread_list)) { 1197eda14cbcSMatt Macy tqt = list_entry(tq->tq_thread_list.next, 1198eda14cbcSMatt Macy taskq_thread_t, tqt_thread_list); 1199eda14cbcSMatt Macy thread = tqt->tqt_thread; 1200eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1201eda14cbcSMatt Macy 1202eda14cbcSMatt Macy kthread_stop(thread); 1203eda14cbcSMatt Macy 1204eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1205eda14cbcSMatt Macy tq->tq_lock_class); 1206eda14cbcSMatt Macy } 1207eda14cbcSMatt Macy 1208eda14cbcSMatt Macy while (!list_empty(&tq->tq_free_list)) { 1209eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 1210eda14cbcSMatt Macy 1211eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 1212eda14cbcSMatt Macy 1213eda14cbcSMatt Macy list_del_init(&t->tqent_list); 1214eda14cbcSMatt Macy task_free(tq, t); 1215eda14cbcSMatt Macy } 1216eda14cbcSMatt Macy 1217eda14cbcSMatt Macy ASSERT0(tq->tq_nthreads); 1218eda14cbcSMatt Macy ASSERT0(tq->tq_nalloc); 1219eda14cbcSMatt Macy ASSERT0(tq->tq_nspawn); 1220eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_thread_list)); 1221eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_active_list)); 1222eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_free_list)); 1223eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_pend_list)); 1224eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_prio_list)); 1225eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_delay_list)); 1226eda14cbcSMatt Macy 1227eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1228eda14cbcSMatt Macy 1229eda14cbcSMatt Macy kmem_strfree(tq->tq_name); 1230eda14cbcSMatt Macy kmem_free(tq, sizeof (taskq_t)); 1231eda14cbcSMatt Macy } 1232eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_destroy); 1233eda14cbcSMatt Macy 1234eda14cbcSMatt Macy static unsigned int spl_taskq_kick = 0; 1235eda14cbcSMatt Macy 1236eda14cbcSMatt Macy /* 1237eda14cbcSMatt Macy * 2.6.36 API Change 1238eda14cbcSMatt Macy * module_param_cb is introduced to take kernel_param_ops and 1239eda14cbcSMatt Macy * module_param_call is marked as obsolete. Also set and get operations 1240eda14cbcSMatt Macy * were changed to take a 'const struct kernel_param *'. 1241eda14cbcSMatt Macy */ 1242eda14cbcSMatt Macy static int 1243eda14cbcSMatt Macy #ifdef module_param_cb 1244eda14cbcSMatt Macy param_set_taskq_kick(const char *val, const struct kernel_param *kp) 1245eda14cbcSMatt Macy #else 1246eda14cbcSMatt Macy param_set_taskq_kick(const char *val, struct kernel_param *kp) 1247eda14cbcSMatt Macy #endif 1248eda14cbcSMatt Macy { 1249eda14cbcSMatt Macy int ret; 1250eda14cbcSMatt Macy taskq_t *tq = NULL; 1251eda14cbcSMatt Macy taskq_ent_t *t; 1252eda14cbcSMatt Macy unsigned long flags; 1253eda14cbcSMatt Macy 1254eda14cbcSMatt Macy ret = param_set_uint(val, kp); 1255eda14cbcSMatt Macy if (ret < 0 || !spl_taskq_kick) 1256eda14cbcSMatt Macy return (ret); 1257eda14cbcSMatt Macy /* reset value */ 1258eda14cbcSMatt Macy spl_taskq_kick = 0; 1259eda14cbcSMatt Macy 1260eda14cbcSMatt Macy down_read(&tq_list_sem); 1261eda14cbcSMatt Macy list_for_each_entry(tq, &tq_list, tq_taskqs) { 1262eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1263eda14cbcSMatt Macy tq->tq_lock_class); 1264eda14cbcSMatt Macy /* Check if the first pending is older than 5 seconds */ 1265eda14cbcSMatt Macy t = taskq_next_ent(tq); 1266eda14cbcSMatt Macy if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { 1267eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 1268eda14cbcSMatt Macy printk(KERN_INFO "spl: Kicked taskq %s/%d\n", 1269eda14cbcSMatt Macy tq->tq_name, tq->tq_instance); 1270eda14cbcSMatt Macy } 1271eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1272eda14cbcSMatt Macy } 1273eda14cbcSMatt Macy up_read(&tq_list_sem); 1274eda14cbcSMatt Macy return (ret); 1275eda14cbcSMatt Macy } 1276eda14cbcSMatt Macy 1277eda14cbcSMatt Macy #ifdef module_param_cb 1278eda14cbcSMatt Macy static const struct kernel_param_ops param_ops_taskq_kick = { 1279eda14cbcSMatt Macy .set = param_set_taskq_kick, 1280eda14cbcSMatt Macy .get = param_get_uint, 1281eda14cbcSMatt Macy }; 1282eda14cbcSMatt Macy module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); 1283eda14cbcSMatt Macy #else 1284eda14cbcSMatt Macy module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, 1285eda14cbcSMatt Macy &spl_taskq_kick, 0644); 1286eda14cbcSMatt Macy #endif 1287eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_kick, 1288eda14cbcSMatt Macy "Write nonzero to kick stuck taskqs to spawn more threads"); 1289eda14cbcSMatt Macy 12907877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 12917877fdebSMatt Macy /* 12927877fdebSMatt Macy * This callback will be called exactly once for each core that comes online, 12937877fdebSMatt Macy * for each dynamic taskq. We attempt to expand taskqs that have 12947877fdebSMatt Macy * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every 12957877fdebSMatt Macy * time, to correctly determine whether or not to add a thread. 12967877fdebSMatt Macy */ 12977877fdebSMatt Macy static int 12987877fdebSMatt Macy spl_taskq_expand(unsigned int cpu, struct hlist_node *node) 12997877fdebSMatt Macy { 13007877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 13017877fdebSMatt Macy unsigned long flags; 13027877fdebSMatt Macy int err = 0; 13037877fdebSMatt Macy 13047877fdebSMatt Macy ASSERT(tq); 13057877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 13067877fdebSMatt Macy 130781b22a98SMartin Matuska if (!(tq->tq_flags & TASKQ_ACTIVE)) { 130881b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 130981b22a98SMartin Matuska return (err); 131081b22a98SMartin Matuska } 13117877fdebSMatt Macy 13127877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 13137877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 13147877fdebSMatt Macy nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); 13157877fdebSMatt Macy tq->tq_maxthreads = nthreads; 13167877fdebSMatt Macy 13177877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 13187877fdebSMatt Macy tq->tq_maxthreads > tq->tq_nthreads) { 131981b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 13207877fdebSMatt Macy taskq_thread_t *tqt = taskq_thread_create(tq); 13217877fdebSMatt Macy if (tqt == NULL) 13227877fdebSMatt Macy err = -1; 132381b22a98SMartin Matuska return (err); 13247877fdebSMatt Macy } 13257877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13267877fdebSMatt Macy return (err); 13277877fdebSMatt Macy } 13287877fdebSMatt Macy 13297877fdebSMatt Macy /* 13307877fdebSMatt Macy * While we don't support offlining CPUs, it is possible that CPUs will fail 13317877fdebSMatt Macy * to online successfully. We do need to be able to handle this case 13327877fdebSMatt Macy * gracefully. 13337877fdebSMatt Macy */ 13347877fdebSMatt Macy static int 13357877fdebSMatt Macy spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) 13367877fdebSMatt Macy { 13377877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 13387877fdebSMatt Macy unsigned long flags; 13397877fdebSMatt Macy 13407877fdebSMatt Macy ASSERT(tq); 13417877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 13427877fdebSMatt Macy 13437877fdebSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 13447877fdebSMatt Macy goto out; 13457877fdebSMatt Macy 13467877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 13477877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 13487877fdebSMatt Macy nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); 13497877fdebSMatt Macy tq->tq_maxthreads = nthreads; 13507877fdebSMatt Macy 13517877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 13527877fdebSMatt Macy tq->tq_maxthreads < tq->tq_nthreads) { 13537877fdebSMatt Macy ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); 13547877fdebSMatt Macy taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, 13557877fdebSMatt Macy taskq_thread_t, tqt_thread_list); 13567877fdebSMatt Macy struct task_struct *thread = tqt->tqt_thread; 13577877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13587877fdebSMatt Macy 13597877fdebSMatt Macy kthread_stop(thread); 13607877fdebSMatt Macy 13617877fdebSMatt Macy return (0); 13627877fdebSMatt Macy } 13637877fdebSMatt Macy 13647877fdebSMatt Macy out: 13657877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13667877fdebSMatt Macy return (0); 13677877fdebSMatt Macy } 13687877fdebSMatt Macy #endif 13697877fdebSMatt Macy 1370eda14cbcSMatt Macy int 1371eda14cbcSMatt Macy spl_taskq_init(void) 1372eda14cbcSMatt Macy { 1373eda14cbcSMatt Macy init_rwsem(&tq_list_sem); 1374eda14cbcSMatt Macy tsd_create(&taskq_tsd, NULL); 1375eda14cbcSMatt Macy 13767877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13777877fdebSMatt Macy spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 13787877fdebSMatt Macy "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); 13797877fdebSMatt Macy #endif 13807877fdebSMatt Macy 1381eda14cbcSMatt Macy system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), 1382eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1383eda14cbcSMatt Macy if (system_taskq == NULL) 1384c7046f76SMartin Matuska return (-ENOMEM); 1385eda14cbcSMatt Macy 1386eda14cbcSMatt Macy system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), 1387eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1388eda14cbcSMatt Macy if (system_delay_taskq == NULL) { 13897877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13907877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 13917877fdebSMatt Macy #endif 1392eda14cbcSMatt Macy taskq_destroy(system_taskq); 1393c7046f76SMartin Matuska return (-ENOMEM); 1394eda14cbcSMatt Macy } 1395eda14cbcSMatt Macy 1396eda14cbcSMatt Macy dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, 1397eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); 1398eda14cbcSMatt Macy if (dynamic_taskq == NULL) { 13997877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14007877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 14017877fdebSMatt Macy #endif 1402eda14cbcSMatt Macy taskq_destroy(system_taskq); 1403eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1404c7046f76SMartin Matuska return (-ENOMEM); 1405eda14cbcSMatt Macy } 1406eda14cbcSMatt Macy 1407eda14cbcSMatt Macy /* 1408eda14cbcSMatt Macy * This is used to annotate tq_lock, so 1409eda14cbcSMatt Macy * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch 1410eda14cbcSMatt Macy * does not trigger a lockdep warning re: possible recursive locking 1411eda14cbcSMatt Macy */ 1412eda14cbcSMatt Macy dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; 1413eda14cbcSMatt Macy 1414eda14cbcSMatt Macy return (0); 1415eda14cbcSMatt Macy } 1416eda14cbcSMatt Macy 1417eda14cbcSMatt Macy void 1418eda14cbcSMatt Macy spl_taskq_fini(void) 1419eda14cbcSMatt Macy { 1420eda14cbcSMatt Macy taskq_destroy(dynamic_taskq); 1421eda14cbcSMatt Macy dynamic_taskq = NULL; 1422eda14cbcSMatt Macy 1423eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1424eda14cbcSMatt Macy system_delay_taskq = NULL; 1425eda14cbcSMatt Macy 1426eda14cbcSMatt Macy taskq_destroy(system_taskq); 1427eda14cbcSMatt Macy system_taskq = NULL; 1428eda14cbcSMatt Macy 1429eda14cbcSMatt Macy tsd_destroy(&taskq_tsd); 14307877fdebSMatt Macy 14317877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14327877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 14337877fdebSMatt Macy spl_taskq_cpuhp_state = 0; 14347877fdebSMatt Macy #endif 1435eda14cbcSMatt Macy } 1436