1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3eda14cbcSMatt Macy * Copyright (C) 2007 The Regents of the University of California. 4eda14cbcSMatt Macy * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5eda14cbcSMatt Macy * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6eda14cbcSMatt Macy * UCRL-CODE-235197 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * This file is part of the SPL, Solaris Porting Layer. 9eda14cbcSMatt Macy * 10eda14cbcSMatt Macy * The SPL is free software; you can redistribute it and/or modify it 11eda14cbcSMatt Macy * under the terms of the GNU General Public License as published by the 12eda14cbcSMatt Macy * Free Software Foundation; either version 2 of the License, or (at your 13eda14cbcSMatt Macy * option) any later version. 14eda14cbcSMatt Macy * 15eda14cbcSMatt Macy * The SPL is distributed in the hope that it will be useful, but WITHOUT 16eda14cbcSMatt Macy * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17eda14cbcSMatt Macy * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18eda14cbcSMatt Macy * for more details. 19eda14cbcSMatt Macy * 20eda14cbcSMatt Macy * You should have received a copy of the GNU General Public License along 21eda14cbcSMatt Macy * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22eda14cbcSMatt Macy * 23eda14cbcSMatt Macy * Solaris Porting Layer (SPL) Task Queue Implementation. 24eda14cbcSMatt Macy */ 25eda14cbcSMatt Macy 26eda14cbcSMatt Macy #include <sys/timer.h> 27eda14cbcSMatt Macy #include <sys/taskq.h> 28eda14cbcSMatt Macy #include <sys/kmem.h> 29eda14cbcSMatt Macy #include <sys/tsd.h> 30eda14cbcSMatt Macy #include <sys/trace_spl.h> 317877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 327877fdebSMatt Macy #include <linux/cpuhotplug.h> 337877fdebSMatt Macy #endif 34eda14cbcSMatt Macy 35eda14cbcSMatt Macy int spl_taskq_thread_bind = 0; 36eda14cbcSMatt Macy module_param(spl_taskq_thread_bind, int, 0644); 37eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); 38eda14cbcSMatt Macy 39eda14cbcSMatt Macy 40eda14cbcSMatt Macy int spl_taskq_thread_dynamic = 1; 417877fdebSMatt Macy module_param(spl_taskq_thread_dynamic, int, 0444); 42eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); 43eda14cbcSMatt Macy 44eda14cbcSMatt Macy int spl_taskq_thread_priority = 1; 45eda14cbcSMatt Macy module_param(spl_taskq_thread_priority, int, 0644); 46eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_priority, 47eda14cbcSMatt Macy "Allow non-default priority for taskq threads"); 48eda14cbcSMatt Macy 49eda14cbcSMatt Macy int spl_taskq_thread_sequential = 4; 50eda14cbcSMatt Macy module_param(spl_taskq_thread_sequential, int, 0644); 51eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_sequential, 52eda14cbcSMatt Macy "Create new taskq threads after N sequential tasks"); 53eda14cbcSMatt Macy 54*681ce946SMartin Matuska /* 55*681ce946SMartin Matuska * Global system-wide dynamic task queue available for all consumers. This 56*681ce946SMartin Matuska * taskq is not intended for long-running tasks; instead, a dedicated taskq 57*681ce946SMartin Matuska * should be created. 58*681ce946SMartin Matuska */ 59eda14cbcSMatt Macy taskq_t *system_taskq; 60eda14cbcSMatt Macy EXPORT_SYMBOL(system_taskq); 61eda14cbcSMatt Macy /* Global dynamic task queue for long delay */ 62eda14cbcSMatt Macy taskq_t *system_delay_taskq; 63eda14cbcSMatt Macy EXPORT_SYMBOL(system_delay_taskq); 64eda14cbcSMatt Macy 65eda14cbcSMatt Macy /* Private dedicated taskq for creating new taskq threads on demand. */ 66eda14cbcSMatt Macy static taskq_t *dynamic_taskq; 67eda14cbcSMatt Macy static taskq_thread_t *taskq_thread_create(taskq_t *); 68eda14cbcSMatt Macy 697877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 707877fdebSMatt Macy /* Multi-callback id for cpu hotplugging. */ 717877fdebSMatt Macy static int spl_taskq_cpuhp_state; 727877fdebSMatt Macy #endif 737877fdebSMatt Macy 74eda14cbcSMatt Macy /* List of all taskqs */ 75eda14cbcSMatt Macy LIST_HEAD(tq_list); 76eda14cbcSMatt Macy struct rw_semaphore tq_list_sem; 77eda14cbcSMatt Macy static uint_t taskq_tsd; 78eda14cbcSMatt Macy 79eda14cbcSMatt Macy static int 80eda14cbcSMatt Macy task_km_flags(uint_t flags) 81eda14cbcSMatt Macy { 82eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 83eda14cbcSMatt Macy return (KM_NOSLEEP); 84eda14cbcSMatt Macy 85eda14cbcSMatt Macy if (flags & TQ_PUSHPAGE) 86eda14cbcSMatt Macy return (KM_PUSHPAGE); 87eda14cbcSMatt Macy 88eda14cbcSMatt Macy return (KM_SLEEP); 89eda14cbcSMatt Macy } 90eda14cbcSMatt Macy 91eda14cbcSMatt Macy /* 92eda14cbcSMatt Macy * taskq_find_by_name - Find the largest instance number of a named taskq. 93eda14cbcSMatt Macy */ 94eda14cbcSMatt Macy static int 95eda14cbcSMatt Macy taskq_find_by_name(const char *name) 96eda14cbcSMatt Macy { 97eda14cbcSMatt Macy struct list_head *tql = NULL; 98eda14cbcSMatt Macy taskq_t *tq; 99eda14cbcSMatt Macy 100eda14cbcSMatt Macy list_for_each_prev(tql, &tq_list) { 101eda14cbcSMatt Macy tq = list_entry(tql, taskq_t, tq_taskqs); 102eda14cbcSMatt Macy if (strcmp(name, tq->tq_name) == 0) 103eda14cbcSMatt Macy return (tq->tq_instance); 104eda14cbcSMatt Macy } 105eda14cbcSMatt Macy return (-1); 106eda14cbcSMatt Macy } 107eda14cbcSMatt Macy 108eda14cbcSMatt Macy /* 109eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, returns a list_t which 110eda14cbcSMatt Macy * is not attached to the free, work, or pending taskq lists. 111eda14cbcSMatt Macy */ 112eda14cbcSMatt Macy static taskq_ent_t * 113eda14cbcSMatt Macy task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) 114eda14cbcSMatt Macy { 115eda14cbcSMatt Macy taskq_ent_t *t; 116eda14cbcSMatt Macy int count = 0; 117eda14cbcSMatt Macy 118eda14cbcSMatt Macy ASSERT(tq); 119eda14cbcSMatt Macy retry: 120eda14cbcSMatt Macy /* Acquire taskq_ent_t's from free list if available */ 121eda14cbcSMatt Macy if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { 122eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 123eda14cbcSMatt Macy 124eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 125eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); 126eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 127eda14cbcSMatt Macy 128eda14cbcSMatt Macy list_del_init(&t->tqent_list); 129eda14cbcSMatt Macy return (t); 130eda14cbcSMatt Macy } 131eda14cbcSMatt Macy 132eda14cbcSMatt Macy /* Free list is empty and memory allocations are prohibited */ 133eda14cbcSMatt Macy if (flags & TQ_NOALLOC) 134eda14cbcSMatt Macy return (NULL); 135eda14cbcSMatt Macy 136eda14cbcSMatt Macy /* Hit maximum taskq_ent_t pool size */ 137eda14cbcSMatt Macy if (tq->tq_nalloc >= tq->tq_maxalloc) { 138eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 139eda14cbcSMatt Macy return (NULL); 140eda14cbcSMatt Macy 141eda14cbcSMatt Macy /* 142eda14cbcSMatt Macy * Sleep periodically polling the free list for an available 143eda14cbcSMatt Macy * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed 144eda14cbcSMatt Macy * but we cannot block forever waiting for an taskq_ent_t to 145eda14cbcSMatt Macy * show up in the free list, otherwise a deadlock can happen. 146eda14cbcSMatt Macy * 147eda14cbcSMatt Macy * Therefore, we need to allocate a new task even if the number 148eda14cbcSMatt Macy * of allocated tasks is above tq->tq_maxalloc, but we still 149eda14cbcSMatt Macy * end up delaying the task allocation by one second, thereby 150eda14cbcSMatt Macy * throttling the task dispatch rate. 151eda14cbcSMatt Macy */ 152eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 153eda14cbcSMatt Macy schedule_timeout(HZ / 100); 154eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, 155eda14cbcSMatt Macy tq->tq_lock_class); 156eda14cbcSMatt Macy if (count < 100) { 157eda14cbcSMatt Macy count++; 158eda14cbcSMatt Macy goto retry; 159eda14cbcSMatt Macy } 160eda14cbcSMatt Macy } 161eda14cbcSMatt Macy 162eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 163eda14cbcSMatt Macy t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); 164eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); 165eda14cbcSMatt Macy 166eda14cbcSMatt Macy if (t) { 167eda14cbcSMatt Macy taskq_init_ent(t); 168eda14cbcSMatt Macy tq->tq_nalloc++; 169eda14cbcSMatt Macy } 170eda14cbcSMatt Macy 171eda14cbcSMatt Macy return (t); 172eda14cbcSMatt Macy } 173eda14cbcSMatt Macy 174eda14cbcSMatt Macy /* 175eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t 176eda14cbcSMatt Macy * to already be removed from the free, work, or pending taskq lists. 177eda14cbcSMatt Macy */ 178eda14cbcSMatt Macy static void 179eda14cbcSMatt Macy task_free(taskq_t *tq, taskq_ent_t *t) 180eda14cbcSMatt Macy { 181eda14cbcSMatt Macy ASSERT(tq); 182eda14cbcSMatt Macy ASSERT(t); 183eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 184eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 185eda14cbcSMatt Macy 186eda14cbcSMatt Macy kmem_free(t, sizeof (taskq_ent_t)); 187eda14cbcSMatt Macy tq->tq_nalloc--; 188eda14cbcSMatt Macy } 189eda14cbcSMatt Macy 190eda14cbcSMatt Macy /* 191eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, either destroys the 192eda14cbcSMatt Macy * taskq_ent_t if too many exist or moves it to the free list for later use. 193eda14cbcSMatt Macy */ 194eda14cbcSMatt Macy static void 195eda14cbcSMatt Macy task_done(taskq_t *tq, taskq_ent_t *t) 196eda14cbcSMatt Macy { 197eda14cbcSMatt Macy ASSERT(tq); 198eda14cbcSMatt Macy ASSERT(t); 199eda14cbcSMatt Macy 200eda14cbcSMatt Macy /* Wake tasks blocked in taskq_wait_id() */ 201eda14cbcSMatt Macy wake_up_all(&t->tqent_waitq); 202eda14cbcSMatt Macy 203eda14cbcSMatt Macy list_del_init(&t->tqent_list); 204eda14cbcSMatt Macy 205eda14cbcSMatt Macy if (tq->tq_nalloc <= tq->tq_minalloc) { 206eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 207eda14cbcSMatt Macy t->tqent_func = NULL; 208eda14cbcSMatt Macy t->tqent_arg = NULL; 209eda14cbcSMatt Macy t->tqent_flags = 0; 210eda14cbcSMatt Macy 211eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_free_list); 212eda14cbcSMatt Macy } else { 213eda14cbcSMatt Macy task_free(tq, t); 214eda14cbcSMatt Macy } 215eda14cbcSMatt Macy } 216eda14cbcSMatt Macy 217eda14cbcSMatt Macy /* 218eda14cbcSMatt Macy * When a delayed task timer expires remove it from the delay list and 219eda14cbcSMatt Macy * add it to the priority list in order for immediate processing. 220eda14cbcSMatt Macy */ 221eda14cbcSMatt Macy static void 222eda14cbcSMatt Macy task_expire_impl(taskq_ent_t *t) 223eda14cbcSMatt Macy { 224eda14cbcSMatt Macy taskq_ent_t *w; 225eda14cbcSMatt Macy taskq_t *tq = t->tqent_taskq; 226eda14cbcSMatt Macy struct list_head *l = NULL; 227eda14cbcSMatt Macy unsigned long flags; 228eda14cbcSMatt Macy 229eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_CANCEL) { 232eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 233eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 234eda14cbcSMatt Macy return; 235eda14cbcSMatt Macy } 236eda14cbcSMatt Macy 237eda14cbcSMatt Macy t->tqent_birth = jiffies; 238eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 239eda14cbcSMatt Macy 240eda14cbcSMatt Macy /* 241eda14cbcSMatt Macy * The priority list must be maintained in strict task id order 242eda14cbcSMatt Macy * from lowest to highest for lowest_id to be easily calculable. 243eda14cbcSMatt Macy */ 244eda14cbcSMatt Macy list_del(&t->tqent_list); 245eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_prio_list) { 246eda14cbcSMatt Macy w = list_entry(l, taskq_ent_t, tqent_list); 247eda14cbcSMatt Macy if (w->tqent_id < t->tqent_id) { 248eda14cbcSMatt Macy list_add(&t->tqent_list, l); 249eda14cbcSMatt Macy break; 250eda14cbcSMatt Macy } 251eda14cbcSMatt Macy } 252eda14cbcSMatt Macy if (l == &tq->tq_prio_list) 253eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 254eda14cbcSMatt Macy 255eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 256eda14cbcSMatt Macy 257eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 258eda14cbcSMatt Macy } 259eda14cbcSMatt Macy 260eda14cbcSMatt Macy static void 261eda14cbcSMatt Macy task_expire(spl_timer_list_t tl) 262eda14cbcSMatt Macy { 263eda14cbcSMatt Macy struct timer_list *tmr = (struct timer_list *)tl; 264eda14cbcSMatt Macy taskq_ent_t *t = from_timer(t, tmr, tqent_timer); 265eda14cbcSMatt Macy task_expire_impl(t); 266eda14cbcSMatt Macy } 267eda14cbcSMatt Macy 268eda14cbcSMatt Macy /* 269eda14cbcSMatt Macy * Returns the lowest incomplete taskqid_t. The taskqid_t may 270eda14cbcSMatt Macy * be queued on the pending list, on the priority list, on the 271eda14cbcSMatt Macy * delay list, or on the work list currently being handled, but 272eda14cbcSMatt Macy * it is not 100% complete yet. 273eda14cbcSMatt Macy */ 274eda14cbcSMatt Macy static taskqid_t 275eda14cbcSMatt Macy taskq_lowest_id(taskq_t *tq) 276eda14cbcSMatt Macy { 277eda14cbcSMatt Macy taskqid_t lowest_id = tq->tq_next_id; 278eda14cbcSMatt Macy taskq_ent_t *t; 279eda14cbcSMatt Macy taskq_thread_t *tqt; 280eda14cbcSMatt Macy 281eda14cbcSMatt Macy if (!list_empty(&tq->tq_pend_list)) { 282eda14cbcSMatt Macy t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); 283eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 284eda14cbcSMatt Macy } 285eda14cbcSMatt Macy 286eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) { 287eda14cbcSMatt Macy t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); 288eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 289eda14cbcSMatt Macy } 290eda14cbcSMatt Macy 291eda14cbcSMatt Macy if (!list_empty(&tq->tq_delay_list)) { 292eda14cbcSMatt Macy t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); 293eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 294eda14cbcSMatt Macy } 295eda14cbcSMatt Macy 296eda14cbcSMatt Macy if (!list_empty(&tq->tq_active_list)) { 297eda14cbcSMatt Macy tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, 298eda14cbcSMatt Macy tqt_active_list); 299eda14cbcSMatt Macy ASSERT(tqt->tqt_id != TASKQID_INVALID); 300eda14cbcSMatt Macy lowest_id = MIN(lowest_id, tqt->tqt_id); 301eda14cbcSMatt Macy } 302eda14cbcSMatt Macy 303eda14cbcSMatt Macy return (lowest_id); 304eda14cbcSMatt Macy } 305eda14cbcSMatt Macy 306eda14cbcSMatt Macy /* 307eda14cbcSMatt Macy * Insert a task into a list keeping the list sorted by increasing taskqid. 308eda14cbcSMatt Macy */ 309eda14cbcSMatt Macy static void 310eda14cbcSMatt Macy taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) 311eda14cbcSMatt Macy { 312eda14cbcSMatt Macy taskq_thread_t *w; 313eda14cbcSMatt Macy struct list_head *l = NULL; 314eda14cbcSMatt Macy 315eda14cbcSMatt Macy ASSERT(tq); 316eda14cbcSMatt Macy ASSERT(tqt); 317eda14cbcSMatt Macy 318eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_active_list) { 319eda14cbcSMatt Macy w = list_entry(l, taskq_thread_t, tqt_active_list); 320eda14cbcSMatt Macy if (w->tqt_id < tqt->tqt_id) { 321eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, l); 322eda14cbcSMatt Macy break; 323eda14cbcSMatt Macy } 324eda14cbcSMatt Macy } 325eda14cbcSMatt Macy if (l == &tq->tq_active_list) 326eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, &tq->tq_active_list); 327eda14cbcSMatt Macy } 328eda14cbcSMatt Macy 329eda14cbcSMatt Macy /* 330eda14cbcSMatt Macy * Find and return a task from the given list if it exists. The list 331eda14cbcSMatt Macy * must be in lowest to highest task id order. 332eda14cbcSMatt Macy */ 333eda14cbcSMatt Macy static taskq_ent_t * 334eda14cbcSMatt Macy taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) 335eda14cbcSMatt Macy { 336eda14cbcSMatt Macy struct list_head *l = NULL; 337eda14cbcSMatt Macy taskq_ent_t *t; 338eda14cbcSMatt Macy 339eda14cbcSMatt Macy list_for_each(l, lh) { 340eda14cbcSMatt Macy t = list_entry(l, taskq_ent_t, tqent_list); 341eda14cbcSMatt Macy 342eda14cbcSMatt Macy if (t->tqent_id == id) 343eda14cbcSMatt Macy return (t); 344eda14cbcSMatt Macy 345eda14cbcSMatt Macy if (t->tqent_id > id) 346eda14cbcSMatt Macy break; 347eda14cbcSMatt Macy } 348eda14cbcSMatt Macy 349eda14cbcSMatt Macy return (NULL); 350eda14cbcSMatt Macy } 351eda14cbcSMatt Macy 352eda14cbcSMatt Macy /* 353eda14cbcSMatt Macy * Find an already dispatched task given the task id regardless of what 354eda14cbcSMatt Macy * state it is in. If a task is still pending it will be returned. 355eda14cbcSMatt Macy * If a task is executing, then -EBUSY will be returned instead. 356eda14cbcSMatt Macy * If the task has already been run then NULL is returned. 357eda14cbcSMatt Macy */ 358eda14cbcSMatt Macy static taskq_ent_t * 359eda14cbcSMatt Macy taskq_find(taskq_t *tq, taskqid_t id) 360eda14cbcSMatt Macy { 361eda14cbcSMatt Macy taskq_thread_t *tqt; 362eda14cbcSMatt Macy struct list_head *l = NULL; 363eda14cbcSMatt Macy taskq_ent_t *t; 364eda14cbcSMatt Macy 365eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_delay_list, id); 366eda14cbcSMatt Macy if (t) 367eda14cbcSMatt Macy return (t); 368eda14cbcSMatt Macy 369eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_prio_list, id); 370eda14cbcSMatt Macy if (t) 371eda14cbcSMatt Macy return (t); 372eda14cbcSMatt Macy 373eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_pend_list, id); 374eda14cbcSMatt Macy if (t) 375eda14cbcSMatt Macy return (t); 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy list_for_each(l, &tq->tq_active_list) { 378eda14cbcSMatt Macy tqt = list_entry(l, taskq_thread_t, tqt_active_list); 379eda14cbcSMatt Macy if (tqt->tqt_id == id) { 380eda14cbcSMatt Macy /* 381eda14cbcSMatt Macy * Instead of returning tqt_task, we just return a non 382eda14cbcSMatt Macy * NULL value to prevent misuse, since tqt_task only 383eda14cbcSMatt Macy * has two valid fields. 384eda14cbcSMatt Macy */ 385eda14cbcSMatt Macy return (ERR_PTR(-EBUSY)); 386eda14cbcSMatt Macy } 387eda14cbcSMatt Macy } 388eda14cbcSMatt Macy 389eda14cbcSMatt Macy return (NULL); 390eda14cbcSMatt Macy } 391eda14cbcSMatt Macy 392eda14cbcSMatt Macy /* 393eda14cbcSMatt Macy * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and 394eda14cbcSMatt Macy * taskq_wait() functions below. 395eda14cbcSMatt Macy * 396eda14cbcSMatt Macy * Taskq waiting is accomplished by tracking the lowest outstanding task 397eda14cbcSMatt Macy * id and the next available task id. As tasks are dispatched they are 398eda14cbcSMatt Macy * added to the tail of the pending, priority, or delay lists. As worker 399eda14cbcSMatt Macy * threads become available the tasks are removed from the heads of these 400eda14cbcSMatt Macy * lists and linked to the worker threads. This ensures the lists are 401eda14cbcSMatt Macy * kept sorted by lowest to highest task id. 402eda14cbcSMatt Macy * 403eda14cbcSMatt Macy * Therefore the lowest outstanding task id can be quickly determined by 404eda14cbcSMatt Macy * checking the head item from all of these lists. This value is stored 405eda14cbcSMatt Macy * with the taskq as the lowest id. It only needs to be recalculated when 406eda14cbcSMatt Macy * either the task with the current lowest id completes or is canceled. 407eda14cbcSMatt Macy * 408eda14cbcSMatt Macy * By blocking until the lowest task id exceeds the passed task id the 409eda14cbcSMatt Macy * taskq_wait_outstanding() function can be easily implemented. Similarly, 410eda14cbcSMatt Macy * by blocking until the lowest task id matches the next task id taskq_wait() 411eda14cbcSMatt Macy * can be implemented. 412eda14cbcSMatt Macy * 413eda14cbcSMatt Macy * Callers should be aware that when there are multiple worked threads it 414eda14cbcSMatt Macy * is possible for larger task ids to complete before smaller ones. Also 415eda14cbcSMatt Macy * when the taskq contains delay tasks with small task ids callers may 416eda14cbcSMatt Macy * block for a considerable length of time waiting for them to expire and 417eda14cbcSMatt Macy * execute. 418eda14cbcSMatt Macy */ 419eda14cbcSMatt Macy static int 420eda14cbcSMatt Macy taskq_wait_id_check(taskq_t *tq, taskqid_t id) 421eda14cbcSMatt Macy { 422eda14cbcSMatt Macy int rc; 423eda14cbcSMatt Macy unsigned long flags; 424eda14cbcSMatt Macy 425eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 426eda14cbcSMatt Macy rc = (taskq_find(tq, id) == NULL); 427eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 428eda14cbcSMatt Macy 429eda14cbcSMatt Macy return (rc); 430eda14cbcSMatt Macy } 431eda14cbcSMatt Macy 432eda14cbcSMatt Macy /* 433eda14cbcSMatt Macy * The taskq_wait_id() function blocks until the passed task id completes. 434eda14cbcSMatt Macy * This does not guarantee that all lower task ids have completed. 435eda14cbcSMatt Macy */ 436eda14cbcSMatt Macy void 437eda14cbcSMatt Macy taskq_wait_id(taskq_t *tq, taskqid_t id) 438eda14cbcSMatt Macy { 439eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); 440eda14cbcSMatt Macy } 441eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_id); 442eda14cbcSMatt Macy 443eda14cbcSMatt Macy static int 444eda14cbcSMatt Macy taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) 445eda14cbcSMatt Macy { 446eda14cbcSMatt Macy int rc; 447eda14cbcSMatt Macy unsigned long flags; 448eda14cbcSMatt Macy 449eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 450eda14cbcSMatt Macy rc = (id < tq->tq_lowest_id); 451eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 452eda14cbcSMatt Macy 453eda14cbcSMatt Macy return (rc); 454eda14cbcSMatt Macy } 455eda14cbcSMatt Macy 456eda14cbcSMatt Macy /* 457eda14cbcSMatt Macy * The taskq_wait_outstanding() function will block until all tasks with a 458eda14cbcSMatt Macy * lower taskqid than the passed 'id' have been completed. Note that all 459eda14cbcSMatt Macy * task id's are assigned monotonically at dispatch time. Zero may be 460eda14cbcSMatt Macy * passed for the id to indicate all tasks dispatch up to this point, 461eda14cbcSMatt Macy * but not after, should be waited for. 462eda14cbcSMatt Macy */ 463eda14cbcSMatt Macy void 464eda14cbcSMatt Macy taskq_wait_outstanding(taskq_t *tq, taskqid_t id) 465eda14cbcSMatt Macy { 466eda14cbcSMatt Macy id = id ? id : tq->tq_next_id - 1; 467eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); 468eda14cbcSMatt Macy } 469eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_outstanding); 470eda14cbcSMatt Macy 471eda14cbcSMatt Macy static int 472eda14cbcSMatt Macy taskq_wait_check(taskq_t *tq) 473eda14cbcSMatt Macy { 474eda14cbcSMatt Macy int rc; 475eda14cbcSMatt Macy unsigned long flags; 476eda14cbcSMatt Macy 477eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 478eda14cbcSMatt Macy rc = (tq->tq_lowest_id == tq->tq_next_id); 479eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 480eda14cbcSMatt Macy 481eda14cbcSMatt Macy return (rc); 482eda14cbcSMatt Macy } 483eda14cbcSMatt Macy 484eda14cbcSMatt Macy /* 485eda14cbcSMatt Macy * The taskq_wait() function will block until the taskq is empty. 486eda14cbcSMatt Macy * This means that if a taskq re-dispatches work to itself taskq_wait() 487eda14cbcSMatt Macy * callers will block indefinitely. 488eda14cbcSMatt Macy */ 489eda14cbcSMatt Macy void 490eda14cbcSMatt Macy taskq_wait(taskq_t *tq) 491eda14cbcSMatt Macy { 492eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); 493eda14cbcSMatt Macy } 494eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait); 495eda14cbcSMatt Macy 496eda14cbcSMatt Macy int 497eda14cbcSMatt Macy taskq_member(taskq_t *tq, kthread_t *t) 498eda14cbcSMatt Macy { 499eda14cbcSMatt Macy return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); 500eda14cbcSMatt Macy } 501eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_member); 502eda14cbcSMatt Macy 503eda14cbcSMatt Macy taskq_t * 504eda14cbcSMatt Macy taskq_of_curthread(void) 505eda14cbcSMatt Macy { 506eda14cbcSMatt Macy return (tsd_get(taskq_tsd)); 507eda14cbcSMatt Macy } 508eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_of_curthread); 509eda14cbcSMatt Macy 510eda14cbcSMatt Macy /* 511eda14cbcSMatt Macy * Cancel an already dispatched task given the task id. Still pending tasks 512eda14cbcSMatt Macy * will be immediately canceled, and if the task is active the function will 513eda14cbcSMatt Macy * block until it completes. Preallocated tasks which are canceled must be 514eda14cbcSMatt Macy * freed by the caller. 515eda14cbcSMatt Macy */ 516eda14cbcSMatt Macy int 517eda14cbcSMatt Macy taskq_cancel_id(taskq_t *tq, taskqid_t id) 518eda14cbcSMatt Macy { 519eda14cbcSMatt Macy taskq_ent_t *t; 520eda14cbcSMatt Macy int rc = ENOENT; 521eda14cbcSMatt Macy unsigned long flags; 522eda14cbcSMatt Macy 523eda14cbcSMatt Macy ASSERT(tq); 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 526eda14cbcSMatt Macy t = taskq_find(tq, id); 527eda14cbcSMatt Macy if (t && t != ERR_PTR(-EBUSY)) { 528eda14cbcSMatt Macy list_del_init(&t->tqent_list); 529eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_CANCEL; 530eda14cbcSMatt Macy 531eda14cbcSMatt Macy /* 532eda14cbcSMatt Macy * When canceling the lowest outstanding task id we 533eda14cbcSMatt Macy * must recalculate the new lowest outstanding id. 534eda14cbcSMatt Macy */ 535eda14cbcSMatt Macy if (tq->tq_lowest_id == t->tqent_id) { 536eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 537eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); 538eda14cbcSMatt Macy } 539eda14cbcSMatt Macy 540eda14cbcSMatt Macy /* 541eda14cbcSMatt Macy * The task_expire() function takes the tq->tq_lock so drop 542eda14cbcSMatt Macy * drop the lock before synchronously cancelling the timer. 543eda14cbcSMatt Macy */ 544eda14cbcSMatt Macy if (timer_pending(&t->tqent_timer)) { 545eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 546eda14cbcSMatt Macy del_timer_sync(&t->tqent_timer); 547eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 548eda14cbcSMatt Macy tq->tq_lock_class); 549eda14cbcSMatt Macy } 550eda14cbcSMatt Macy 551eda14cbcSMatt Macy if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) 552eda14cbcSMatt Macy task_done(tq, t); 553eda14cbcSMatt Macy 554eda14cbcSMatt Macy rc = 0; 555eda14cbcSMatt Macy } 556eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 557eda14cbcSMatt Macy 558eda14cbcSMatt Macy if (t == ERR_PTR(-EBUSY)) { 559eda14cbcSMatt Macy taskq_wait_id(tq, id); 560eda14cbcSMatt Macy rc = EBUSY; 561eda14cbcSMatt Macy } 562eda14cbcSMatt Macy 563eda14cbcSMatt Macy return (rc); 564eda14cbcSMatt Macy } 565eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_cancel_id); 566eda14cbcSMatt Macy 567eda14cbcSMatt Macy static int taskq_thread_spawn(taskq_t *tq); 568eda14cbcSMatt Macy 569eda14cbcSMatt Macy taskqid_t 570eda14cbcSMatt Macy taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 571eda14cbcSMatt Macy { 572eda14cbcSMatt Macy taskq_ent_t *t; 573eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 574eda14cbcSMatt Macy unsigned long irqflags; 575eda14cbcSMatt Macy 576eda14cbcSMatt Macy ASSERT(tq); 577eda14cbcSMatt Macy ASSERT(func); 578eda14cbcSMatt Macy 579eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 580eda14cbcSMatt Macy 581eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 582eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 583eda14cbcSMatt Macy goto out; 584eda14cbcSMatt Macy 585eda14cbcSMatt Macy /* Do not queue the task unless there is idle thread for it */ 586eda14cbcSMatt Macy ASSERT(tq->tq_nactive <= tq->tq_nthreads); 587eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 588eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 589eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 590eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 591eda14cbcSMatt Macy goto out; 592eda14cbcSMatt Macy } 593eda14cbcSMatt Macy 594eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 595eda14cbcSMatt Macy goto out; 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 598eda14cbcSMatt Macy 599eda14cbcSMatt Macy /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ 600eda14cbcSMatt Macy if (flags & TQ_NOQUEUE) 601eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 602eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 603eda14cbcSMatt Macy else if (flags & TQ_FRONT) 604eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 605eda14cbcSMatt Macy else 606eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 607eda14cbcSMatt Macy 608eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 609eda14cbcSMatt Macy tq->tq_next_id++; 610eda14cbcSMatt Macy t->tqent_func = func; 611eda14cbcSMatt Macy t->tqent_arg = arg; 612eda14cbcSMatt Macy t->tqent_taskq = tq; 613eda14cbcSMatt Macy t->tqent_timer.function = NULL; 614eda14cbcSMatt Macy t->tqent_timer.expires = 0; 615eda14cbcSMatt Macy 616eda14cbcSMatt Macy t->tqent_birth = jiffies; 617eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 618eda14cbcSMatt Macy 619eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 620eda14cbcSMatt Macy 621eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 622eda14cbcSMatt Macy 623eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 624eda14cbcSMatt Macy out: 625eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 626eda14cbcSMatt Macy if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) 627eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 628eda14cbcSMatt Macy 629eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 630eda14cbcSMatt Macy return (rc); 631eda14cbcSMatt Macy } 632eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch); 633eda14cbcSMatt Macy 634eda14cbcSMatt Macy taskqid_t 635eda14cbcSMatt Macy taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, 636eda14cbcSMatt Macy uint_t flags, clock_t expire_time) 637eda14cbcSMatt Macy { 638eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 639eda14cbcSMatt Macy taskq_ent_t *t; 640eda14cbcSMatt Macy unsigned long irqflags; 641eda14cbcSMatt Macy 642eda14cbcSMatt Macy ASSERT(tq); 643eda14cbcSMatt Macy ASSERT(func); 644eda14cbcSMatt Macy 645eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 646eda14cbcSMatt Macy 647eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 648eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 649eda14cbcSMatt Macy goto out; 650eda14cbcSMatt Macy 651eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 652eda14cbcSMatt Macy goto out; 653eda14cbcSMatt Macy 654eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 655eda14cbcSMatt Macy 656eda14cbcSMatt Macy /* Queue to the delay list for subsequent execution */ 657eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_delay_list); 658eda14cbcSMatt Macy 659eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 660eda14cbcSMatt Macy tq->tq_next_id++; 661eda14cbcSMatt Macy t->tqent_func = func; 662eda14cbcSMatt Macy t->tqent_arg = arg; 663eda14cbcSMatt Macy t->tqent_taskq = tq; 664eda14cbcSMatt Macy t->tqent_timer.function = task_expire; 665eda14cbcSMatt Macy t->tqent_timer.expires = (unsigned long)expire_time; 666eda14cbcSMatt Macy add_timer(&t->tqent_timer); 667eda14cbcSMatt Macy 668eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 669eda14cbcSMatt Macy 670eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 671eda14cbcSMatt Macy out: 672eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 673eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 674eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 675eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 676eda14cbcSMatt Macy return (rc); 677eda14cbcSMatt Macy } 678eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_delay); 679eda14cbcSMatt Macy 680eda14cbcSMatt Macy void 681eda14cbcSMatt Macy taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, 682eda14cbcSMatt Macy taskq_ent_t *t) 683eda14cbcSMatt Macy { 684eda14cbcSMatt Macy unsigned long irqflags; 685eda14cbcSMatt Macy ASSERT(tq); 686eda14cbcSMatt Macy ASSERT(func); 687eda14cbcSMatt Macy 688eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 689eda14cbcSMatt Macy tq->tq_lock_class); 690eda14cbcSMatt Macy 691eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 692eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) { 693eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 694eda14cbcSMatt Macy goto out; 695eda14cbcSMatt Macy } 696eda14cbcSMatt Macy 697eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 698eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 699eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC) || 700eda14cbcSMatt Macy taskq_thread_spawn(tq) == 0) 701eda14cbcSMatt Macy goto out2; 702eda14cbcSMatt Macy flags |= TQ_FRONT; 703eda14cbcSMatt Macy } 704eda14cbcSMatt Macy 705eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 706eda14cbcSMatt Macy 707eda14cbcSMatt Macy /* 708eda14cbcSMatt Macy * Make sure the entry is not on some other taskq; it is important to 709eda14cbcSMatt Macy * ASSERT() under lock 710eda14cbcSMatt Macy */ 711eda14cbcSMatt Macy ASSERT(taskq_empty_ent(t)); 712eda14cbcSMatt Macy 713eda14cbcSMatt Macy /* 714eda14cbcSMatt Macy * Mark it as a prealloc'd task. This is important 715eda14cbcSMatt Macy * to ensure that we don't free it later. 716eda14cbcSMatt Macy */ 717eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_PREALLOC; 718eda14cbcSMatt Macy 719eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 720eda14cbcSMatt Macy if (flags & TQ_FRONT) 721eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 722eda14cbcSMatt Macy else 723eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 724eda14cbcSMatt Macy 725eda14cbcSMatt Macy t->tqent_id = tq->tq_next_id; 726eda14cbcSMatt Macy tq->tq_next_id++; 727eda14cbcSMatt Macy t->tqent_func = func; 728eda14cbcSMatt Macy t->tqent_arg = arg; 729eda14cbcSMatt Macy t->tqent_taskq = tq; 730eda14cbcSMatt Macy 731eda14cbcSMatt Macy t->tqent_birth = jiffies; 732eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 733eda14cbcSMatt Macy 734eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 735eda14cbcSMatt Macy 736eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 737eda14cbcSMatt Macy out: 738eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 739eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 740eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 741eda14cbcSMatt Macy out2: 742eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 743eda14cbcSMatt Macy } 744eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_ent); 745eda14cbcSMatt Macy 746eda14cbcSMatt Macy int 747eda14cbcSMatt Macy taskq_empty_ent(taskq_ent_t *t) 748eda14cbcSMatt Macy { 749eda14cbcSMatt Macy return (list_empty(&t->tqent_list)); 750eda14cbcSMatt Macy } 751eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_empty_ent); 752eda14cbcSMatt Macy 753eda14cbcSMatt Macy void 754eda14cbcSMatt Macy taskq_init_ent(taskq_ent_t *t) 755eda14cbcSMatt Macy { 756eda14cbcSMatt Macy spin_lock_init(&t->tqent_lock); 757eda14cbcSMatt Macy init_waitqueue_head(&t->tqent_waitq); 758eda14cbcSMatt Macy timer_setup(&t->tqent_timer, NULL, 0); 759eda14cbcSMatt Macy INIT_LIST_HEAD(&t->tqent_list); 760eda14cbcSMatt Macy t->tqent_id = 0; 761eda14cbcSMatt Macy t->tqent_func = NULL; 762eda14cbcSMatt Macy t->tqent_arg = NULL; 763eda14cbcSMatt Macy t->tqent_flags = 0; 764eda14cbcSMatt Macy t->tqent_taskq = NULL; 765eda14cbcSMatt Macy } 766eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_init_ent); 767eda14cbcSMatt Macy 768eda14cbcSMatt Macy /* 769eda14cbcSMatt Macy * Return the next pending task, preference is given to tasks on the 770eda14cbcSMatt Macy * priority list which were dispatched with TQ_FRONT. 771eda14cbcSMatt Macy */ 772eda14cbcSMatt Macy static taskq_ent_t * 773eda14cbcSMatt Macy taskq_next_ent(taskq_t *tq) 774eda14cbcSMatt Macy { 775eda14cbcSMatt Macy struct list_head *list; 776eda14cbcSMatt Macy 777eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) 778eda14cbcSMatt Macy list = &tq->tq_prio_list; 779eda14cbcSMatt Macy else if (!list_empty(&tq->tq_pend_list)) 780eda14cbcSMatt Macy list = &tq->tq_pend_list; 781eda14cbcSMatt Macy else 782eda14cbcSMatt Macy return (NULL); 783eda14cbcSMatt Macy 784eda14cbcSMatt Macy return (list_entry(list->next, taskq_ent_t, tqent_list)); 785eda14cbcSMatt Macy } 786eda14cbcSMatt Macy 787eda14cbcSMatt Macy /* 788eda14cbcSMatt Macy * Spawns a new thread for the specified taskq. 789eda14cbcSMatt Macy */ 790eda14cbcSMatt Macy static void 791eda14cbcSMatt Macy taskq_thread_spawn_task(void *arg) 792eda14cbcSMatt Macy { 793eda14cbcSMatt Macy taskq_t *tq = (taskq_t *)arg; 794eda14cbcSMatt Macy unsigned long flags; 795eda14cbcSMatt Macy 796eda14cbcSMatt Macy if (taskq_thread_create(tq) == NULL) { 797eda14cbcSMatt Macy /* restore spawning count if failed */ 798eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 799eda14cbcSMatt Macy tq->tq_lock_class); 800eda14cbcSMatt Macy tq->tq_nspawn--; 801eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 802eda14cbcSMatt Macy } 803eda14cbcSMatt Macy } 804eda14cbcSMatt Macy 805eda14cbcSMatt Macy /* 806eda14cbcSMatt Macy * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current 807eda14cbcSMatt Macy * number of threads is insufficient to handle the pending tasks. These 808eda14cbcSMatt Macy * new threads must be created by the dedicated dynamic_taskq to avoid 809eda14cbcSMatt Macy * deadlocks between thread creation and memory reclaim. The system_taskq 810eda14cbcSMatt Macy * which is also a dynamic taskq cannot be safely used for this. 811eda14cbcSMatt Macy */ 812eda14cbcSMatt Macy static int 813eda14cbcSMatt Macy taskq_thread_spawn(taskq_t *tq) 814eda14cbcSMatt Macy { 815eda14cbcSMatt Macy int spawning = 0; 816eda14cbcSMatt Macy 817eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 818eda14cbcSMatt Macy return (0); 819eda14cbcSMatt Macy 820eda14cbcSMatt Macy if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && 821eda14cbcSMatt Macy (tq->tq_flags & TASKQ_ACTIVE)) { 822eda14cbcSMatt Macy spawning = (++tq->tq_nspawn); 823eda14cbcSMatt Macy taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, 824eda14cbcSMatt Macy tq, TQ_NOSLEEP); 825eda14cbcSMatt Macy } 826eda14cbcSMatt Macy 827eda14cbcSMatt Macy return (spawning); 828eda14cbcSMatt Macy } 829eda14cbcSMatt Macy 830eda14cbcSMatt Macy /* 831eda14cbcSMatt Macy * Threads in a dynamic taskq should only exit once it has been completely 832eda14cbcSMatt Macy * drained and no other threads are actively servicing tasks. This prevents 833eda14cbcSMatt Macy * threads from being created and destroyed more than is required. 834eda14cbcSMatt Macy * 835eda14cbcSMatt Macy * The first thread is the thread list is treated as the primary thread. 836eda14cbcSMatt Macy * There is nothing special about the primary thread but in order to avoid 837eda14cbcSMatt Macy * all the taskq pids from changing we opt to make it long running. 838eda14cbcSMatt Macy */ 839eda14cbcSMatt Macy static int 840eda14cbcSMatt Macy taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) 841eda14cbcSMatt Macy { 842eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 843eda14cbcSMatt Macy return (0); 844eda14cbcSMatt Macy 845eda14cbcSMatt Macy if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, 846eda14cbcSMatt Macy tqt_thread_list) == tqt) 847eda14cbcSMatt Macy return (0); 848eda14cbcSMatt Macy 849eda14cbcSMatt Macy return 850eda14cbcSMatt Macy ((tq->tq_nspawn == 0) && /* No threads are being spawned */ 851eda14cbcSMatt Macy (tq->tq_nactive == 0) && /* No threads are handling tasks */ 852eda14cbcSMatt Macy (tq->tq_nthreads > 1) && /* More than 1 thread is running */ 853eda14cbcSMatt Macy (!taskq_next_ent(tq)) && /* There are no pending tasks */ 854eda14cbcSMatt Macy (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ 855eda14cbcSMatt Macy } 856eda14cbcSMatt Macy 857eda14cbcSMatt Macy static int 858eda14cbcSMatt Macy taskq_thread(void *args) 859eda14cbcSMatt Macy { 860eda14cbcSMatt Macy DECLARE_WAITQUEUE(wait, current); 861eda14cbcSMatt Macy sigset_t blocked; 862eda14cbcSMatt Macy taskq_thread_t *tqt = args; 863eda14cbcSMatt Macy taskq_t *tq; 864eda14cbcSMatt Macy taskq_ent_t *t; 865eda14cbcSMatt Macy int seq_tasks = 0; 866eda14cbcSMatt Macy unsigned long flags; 867eda14cbcSMatt Macy taskq_ent_t dup_task = {}; 868eda14cbcSMatt Macy 869eda14cbcSMatt Macy ASSERT(tqt); 870eda14cbcSMatt Macy ASSERT(tqt->tqt_tq); 871eda14cbcSMatt Macy tq = tqt->tqt_tq; 872eda14cbcSMatt Macy current->flags |= PF_NOFREEZE; 873eda14cbcSMatt Macy 874eda14cbcSMatt Macy (void) spl_fstrans_mark(); 875eda14cbcSMatt Macy 876eda14cbcSMatt Macy sigfillset(&blocked); 877eda14cbcSMatt Macy sigprocmask(SIG_BLOCK, &blocked, NULL); 878eda14cbcSMatt Macy flush_signals(current); 879eda14cbcSMatt Macy 880eda14cbcSMatt Macy tsd_set(taskq_tsd, tq); 881eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 882eda14cbcSMatt Macy /* 883eda14cbcSMatt Macy * If we are dynamically spawned, decrease spawning count. Note that 884eda14cbcSMatt Macy * we could be created during taskq_create, in which case we shouldn't 885eda14cbcSMatt Macy * do the decrement. But it's fine because taskq_create will reset 886eda14cbcSMatt Macy * tq_nspawn later. 887eda14cbcSMatt Macy */ 888eda14cbcSMatt Macy if (tq->tq_flags & TASKQ_DYNAMIC) 889eda14cbcSMatt Macy tq->tq_nspawn--; 890eda14cbcSMatt Macy 891eda14cbcSMatt Macy /* Immediately exit if more threads than allowed were created. */ 892eda14cbcSMatt Macy if (tq->tq_nthreads >= tq->tq_maxthreads) 893eda14cbcSMatt Macy goto error; 894eda14cbcSMatt Macy 895eda14cbcSMatt Macy tq->tq_nthreads++; 896eda14cbcSMatt Macy list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); 897eda14cbcSMatt Macy wake_up(&tq->tq_wait_waitq); 898eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 899eda14cbcSMatt Macy 900eda14cbcSMatt Macy while (!kthread_should_stop()) { 901eda14cbcSMatt Macy 902eda14cbcSMatt Macy if (list_empty(&tq->tq_pend_list) && 903eda14cbcSMatt Macy list_empty(&tq->tq_prio_list)) { 904eda14cbcSMatt Macy 905eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) { 906eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 907eda14cbcSMatt Macy break; 908eda14cbcSMatt Macy } 909eda14cbcSMatt Macy 910eda14cbcSMatt Macy add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); 911eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 912eda14cbcSMatt Macy 913eda14cbcSMatt Macy schedule(); 914eda14cbcSMatt Macy seq_tasks = 0; 915eda14cbcSMatt Macy 916eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 917eda14cbcSMatt Macy tq->tq_lock_class); 918eda14cbcSMatt Macy remove_wait_queue(&tq->tq_work_waitq, &wait); 919eda14cbcSMatt Macy } else { 920eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 921eda14cbcSMatt Macy } 922eda14cbcSMatt Macy 923eda14cbcSMatt Macy if ((t = taskq_next_ent(tq)) != NULL) { 924eda14cbcSMatt Macy list_del_init(&t->tqent_list); 925eda14cbcSMatt Macy 926eda14cbcSMatt Macy /* 927eda14cbcSMatt Macy * A TQENT_FLAG_PREALLOC task may be reused or freed 928eda14cbcSMatt Macy * during the task function call. Store tqent_id and 929eda14cbcSMatt Macy * tqent_flags here. 930eda14cbcSMatt Macy * 931eda14cbcSMatt Macy * Also use an on stack taskq_ent_t for tqt_task 932eda14cbcSMatt Macy * assignment in this case; we want to make sure 933eda14cbcSMatt Macy * to duplicate all fields, so the values are 934eda14cbcSMatt Macy * correct when it's accessed via DTRACE_PROBE*. 935eda14cbcSMatt Macy */ 936eda14cbcSMatt Macy tqt->tqt_id = t->tqent_id; 937eda14cbcSMatt Macy tqt->tqt_flags = t->tqent_flags; 938eda14cbcSMatt Macy 939eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_PREALLOC) { 940eda14cbcSMatt Macy dup_task = *t; 941eda14cbcSMatt Macy t = &dup_task; 942eda14cbcSMatt Macy } 943eda14cbcSMatt Macy tqt->tqt_task = t; 944eda14cbcSMatt Macy 945eda14cbcSMatt Macy taskq_insert_in_order(tq, tqt); 946eda14cbcSMatt Macy tq->tq_nactive++; 947eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 948eda14cbcSMatt Macy 949eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); 950eda14cbcSMatt Macy 951eda14cbcSMatt Macy /* Perform the requested task */ 952eda14cbcSMatt Macy t->tqent_func(t->tqent_arg); 953eda14cbcSMatt Macy 954eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); 955eda14cbcSMatt Macy 956eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 957eda14cbcSMatt Macy tq->tq_lock_class); 958eda14cbcSMatt Macy tq->tq_nactive--; 959eda14cbcSMatt Macy list_del_init(&tqt->tqt_active_list); 960eda14cbcSMatt Macy tqt->tqt_task = NULL; 961eda14cbcSMatt Macy 962eda14cbcSMatt Macy /* For prealloc'd tasks, we don't free anything. */ 963eda14cbcSMatt Macy if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) 964eda14cbcSMatt Macy task_done(tq, t); 965eda14cbcSMatt Macy 966eda14cbcSMatt Macy /* 967eda14cbcSMatt Macy * When the current lowest outstanding taskqid is 968eda14cbcSMatt Macy * done calculate the new lowest outstanding id 969eda14cbcSMatt Macy */ 970eda14cbcSMatt Macy if (tq->tq_lowest_id == tqt->tqt_id) { 971eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 972eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); 973eda14cbcSMatt Macy } 974eda14cbcSMatt Macy 975eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 976eda14cbcSMatt Macy if ((++seq_tasks) > spl_taskq_thread_sequential && 977eda14cbcSMatt Macy taskq_thread_spawn(tq)) 978eda14cbcSMatt Macy seq_tasks = 0; 979eda14cbcSMatt Macy 980eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 981eda14cbcSMatt Macy tqt->tqt_flags = 0; 982eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 983eda14cbcSMatt Macy } else { 984eda14cbcSMatt Macy if (taskq_thread_should_stop(tq, tqt)) 985eda14cbcSMatt Macy break; 986eda14cbcSMatt Macy } 987eda14cbcSMatt Macy 988eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 989eda14cbcSMatt Macy 990eda14cbcSMatt Macy } 991eda14cbcSMatt Macy 992eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 993eda14cbcSMatt Macy tq->tq_nthreads--; 994eda14cbcSMatt Macy list_del_init(&tqt->tqt_thread_list); 995eda14cbcSMatt Macy error: 996eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 997eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 998eda14cbcSMatt Macy 999eda14cbcSMatt Macy tsd_set(taskq_tsd, NULL); 1000184c1b94SMartin Matuska thread_exit(); 1001eda14cbcSMatt Macy 1002eda14cbcSMatt Macy return (0); 1003eda14cbcSMatt Macy } 1004eda14cbcSMatt Macy 1005eda14cbcSMatt Macy static taskq_thread_t * 1006eda14cbcSMatt Macy taskq_thread_create(taskq_t *tq) 1007eda14cbcSMatt Macy { 1008eda14cbcSMatt Macy static int last_used_cpu = 0; 1009eda14cbcSMatt Macy taskq_thread_t *tqt; 1010eda14cbcSMatt Macy 1011eda14cbcSMatt Macy tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); 1012eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_thread_list); 1013eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_active_list); 1014eda14cbcSMatt Macy tqt->tqt_tq = tq; 1015eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1016eda14cbcSMatt Macy 1017eda14cbcSMatt Macy tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, 1018eda14cbcSMatt Macy "%s", tq->tq_name); 1019eda14cbcSMatt Macy if (tqt->tqt_thread == NULL) { 1020eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1021eda14cbcSMatt Macy return (NULL); 1022eda14cbcSMatt Macy } 1023eda14cbcSMatt Macy 1024eda14cbcSMatt Macy if (spl_taskq_thread_bind) { 1025eda14cbcSMatt Macy last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); 1026eda14cbcSMatt Macy kthread_bind(tqt->tqt_thread, last_used_cpu); 1027eda14cbcSMatt Macy } 1028eda14cbcSMatt Macy 1029eda14cbcSMatt Macy if (spl_taskq_thread_priority) 1030eda14cbcSMatt Macy set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); 1031eda14cbcSMatt Macy 1032eda14cbcSMatt Macy wake_up_process(tqt->tqt_thread); 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy return (tqt); 1035eda14cbcSMatt Macy } 1036eda14cbcSMatt Macy 1037eda14cbcSMatt Macy taskq_t * 10387877fdebSMatt Macy taskq_create(const char *name, int threads_arg, pri_t pri, 1039eda14cbcSMatt Macy int minalloc, int maxalloc, uint_t flags) 1040eda14cbcSMatt Macy { 1041eda14cbcSMatt Macy taskq_t *tq; 1042eda14cbcSMatt Macy taskq_thread_t *tqt; 1043eda14cbcSMatt Macy int count = 0, rc = 0, i; 1044eda14cbcSMatt Macy unsigned long irqflags; 10457877fdebSMatt Macy int nthreads = threads_arg; 1046eda14cbcSMatt Macy 1047eda14cbcSMatt Macy ASSERT(name != NULL); 1048eda14cbcSMatt Macy ASSERT(minalloc >= 0); 1049eda14cbcSMatt Macy ASSERT(maxalloc <= INT_MAX); 1050eda14cbcSMatt Macy ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ 1051eda14cbcSMatt Macy 1052eda14cbcSMatt Macy /* Scale the number of threads using nthreads as a percentage */ 1053eda14cbcSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 1054eda14cbcSMatt Macy ASSERT(nthreads <= 100); 1055eda14cbcSMatt Macy ASSERT(nthreads >= 0); 10567877fdebSMatt Macy nthreads = MIN(threads_arg, 100); 1057eda14cbcSMatt Macy nthreads = MAX(nthreads, 0); 1058eda14cbcSMatt Macy nthreads = MAX((num_online_cpus() * nthreads) /100, 1); 1059eda14cbcSMatt Macy } 1060eda14cbcSMatt Macy 1061eda14cbcSMatt Macy tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); 1062eda14cbcSMatt Macy if (tq == NULL) 1063eda14cbcSMatt Macy return (NULL); 1064eda14cbcSMatt Macy 10657877fdebSMatt Macy tq->tq_hp_support = B_FALSE; 10667877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 10677877fdebSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 10687877fdebSMatt Macy tq->tq_hp_support = B_TRUE; 10697877fdebSMatt Macy if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, 10707877fdebSMatt Macy &tq->tq_hp_cb_node) != 0) { 10717877fdebSMatt Macy kmem_free(tq, sizeof (*tq)); 10727877fdebSMatt Macy return (NULL); 10737877fdebSMatt Macy } 10747877fdebSMatt Macy } 10757877fdebSMatt Macy #endif 10767877fdebSMatt Macy 1077eda14cbcSMatt Macy spin_lock_init(&tq->tq_lock); 1078eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_thread_list); 1079eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_active_list); 1080eda14cbcSMatt Macy tq->tq_name = kmem_strdup(name); 1081eda14cbcSMatt Macy tq->tq_nactive = 0; 1082eda14cbcSMatt Macy tq->tq_nthreads = 0; 1083eda14cbcSMatt Macy tq->tq_nspawn = 0; 1084eda14cbcSMatt Macy tq->tq_maxthreads = nthreads; 10857877fdebSMatt Macy tq->tq_cpu_pct = threads_arg; 1086eda14cbcSMatt Macy tq->tq_pri = pri; 1087eda14cbcSMatt Macy tq->tq_minalloc = minalloc; 1088eda14cbcSMatt Macy tq->tq_maxalloc = maxalloc; 1089eda14cbcSMatt Macy tq->tq_nalloc = 0; 1090eda14cbcSMatt Macy tq->tq_flags = (flags | TASKQ_ACTIVE); 1091eda14cbcSMatt Macy tq->tq_next_id = TASKQID_INITIAL; 1092eda14cbcSMatt Macy tq->tq_lowest_id = TASKQID_INITIAL; 1093eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_free_list); 1094eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_pend_list); 1095eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_prio_list); 1096eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_delay_list); 1097eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_work_waitq); 1098eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_wait_waitq); 1099eda14cbcSMatt Macy tq->tq_lock_class = TQ_LOCK_GENERAL; 1100eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_taskqs); 1101eda14cbcSMatt Macy 1102eda14cbcSMatt Macy if (flags & TASKQ_PREPOPULATE) { 1103eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 1104eda14cbcSMatt Macy tq->tq_lock_class); 1105eda14cbcSMatt Macy 1106eda14cbcSMatt Macy for (i = 0; i < minalloc; i++) 1107eda14cbcSMatt Macy task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, 1108eda14cbcSMatt Macy &irqflags)); 1109eda14cbcSMatt Macy 1110eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 1111eda14cbcSMatt Macy } 1112eda14cbcSMatt Macy 1113eda14cbcSMatt Macy if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) 1114eda14cbcSMatt Macy nthreads = 1; 1115eda14cbcSMatt Macy 1116eda14cbcSMatt Macy for (i = 0; i < nthreads; i++) { 1117eda14cbcSMatt Macy tqt = taskq_thread_create(tq); 1118eda14cbcSMatt Macy if (tqt == NULL) 1119eda14cbcSMatt Macy rc = 1; 1120eda14cbcSMatt Macy else 1121eda14cbcSMatt Macy count++; 1122eda14cbcSMatt Macy } 1123eda14cbcSMatt Macy 1124eda14cbcSMatt Macy /* Wait for all threads to be started before potential destroy */ 1125eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); 1126eda14cbcSMatt Macy /* 1127eda14cbcSMatt Macy * taskq_thread might have touched nspawn, but we don't want them to 1128eda14cbcSMatt Macy * because they're not dynamically spawned. So we reset it to 0 1129eda14cbcSMatt Macy */ 1130eda14cbcSMatt Macy tq->tq_nspawn = 0; 1131eda14cbcSMatt Macy 1132eda14cbcSMatt Macy if (rc) { 1133eda14cbcSMatt Macy taskq_destroy(tq); 1134eda14cbcSMatt Macy tq = NULL; 1135eda14cbcSMatt Macy } else { 1136eda14cbcSMatt Macy down_write(&tq_list_sem); 1137eda14cbcSMatt Macy tq->tq_instance = taskq_find_by_name(name) + 1; 1138eda14cbcSMatt Macy list_add_tail(&tq->tq_taskqs, &tq_list); 1139eda14cbcSMatt Macy up_write(&tq_list_sem); 1140eda14cbcSMatt Macy } 1141eda14cbcSMatt Macy 1142eda14cbcSMatt Macy return (tq); 1143eda14cbcSMatt Macy } 1144eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_create); 1145eda14cbcSMatt Macy 1146eda14cbcSMatt Macy void 1147eda14cbcSMatt Macy taskq_destroy(taskq_t *tq) 1148eda14cbcSMatt Macy { 1149eda14cbcSMatt Macy struct task_struct *thread; 1150eda14cbcSMatt Macy taskq_thread_t *tqt; 1151eda14cbcSMatt Macy taskq_ent_t *t; 1152eda14cbcSMatt Macy unsigned long flags; 1153eda14cbcSMatt Macy 1154eda14cbcSMatt Macy ASSERT(tq); 1155eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1156eda14cbcSMatt Macy tq->tq_flags &= ~TASKQ_ACTIVE; 1157eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1158eda14cbcSMatt Macy 11597877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 11607877fdebSMatt Macy if (tq->tq_hp_support) { 11617877fdebSMatt Macy VERIFY0(cpuhp_state_remove_instance_nocalls( 11627877fdebSMatt Macy spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); 11637877fdebSMatt Macy } 11647877fdebSMatt Macy #endif 1165eda14cbcSMatt Macy /* 1166eda14cbcSMatt Macy * When TASKQ_ACTIVE is clear new tasks may not be added nor may 1167eda14cbcSMatt Macy * new worker threads be spawned for dynamic taskq. 1168eda14cbcSMatt Macy */ 1169eda14cbcSMatt Macy if (dynamic_taskq != NULL) 1170eda14cbcSMatt Macy taskq_wait_outstanding(dynamic_taskq, 0); 1171eda14cbcSMatt Macy 1172eda14cbcSMatt Macy taskq_wait(tq); 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy /* remove taskq from global list used by the kstats */ 1175eda14cbcSMatt Macy down_write(&tq_list_sem); 1176eda14cbcSMatt Macy list_del(&tq->tq_taskqs); 1177eda14cbcSMatt Macy up_write(&tq_list_sem); 1178eda14cbcSMatt Macy 1179eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1180eda14cbcSMatt Macy /* wait for spawning threads to insert themselves to the list */ 1181eda14cbcSMatt Macy while (tq->tq_nspawn) { 1182eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1183eda14cbcSMatt Macy schedule_timeout_interruptible(1); 1184eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1185eda14cbcSMatt Macy tq->tq_lock_class); 1186eda14cbcSMatt Macy } 1187eda14cbcSMatt Macy 1188eda14cbcSMatt Macy /* 1189eda14cbcSMatt Macy * Signal each thread to exit and block until it does. Each thread 1190eda14cbcSMatt Macy * is responsible for removing itself from the list and freeing its 1191eda14cbcSMatt Macy * taskq_thread_t. This allows for idle threads to opt to remove 1192eda14cbcSMatt Macy * themselves from the taskq. They can be recreated as needed. 1193eda14cbcSMatt Macy */ 1194eda14cbcSMatt Macy while (!list_empty(&tq->tq_thread_list)) { 1195eda14cbcSMatt Macy tqt = list_entry(tq->tq_thread_list.next, 1196eda14cbcSMatt Macy taskq_thread_t, tqt_thread_list); 1197eda14cbcSMatt Macy thread = tqt->tqt_thread; 1198eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1199eda14cbcSMatt Macy 1200eda14cbcSMatt Macy kthread_stop(thread); 1201eda14cbcSMatt Macy 1202eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1203eda14cbcSMatt Macy tq->tq_lock_class); 1204eda14cbcSMatt Macy } 1205eda14cbcSMatt Macy 1206eda14cbcSMatt Macy while (!list_empty(&tq->tq_free_list)) { 1207eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 1208eda14cbcSMatt Macy 1209eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 1210eda14cbcSMatt Macy 1211eda14cbcSMatt Macy list_del_init(&t->tqent_list); 1212eda14cbcSMatt Macy task_free(tq, t); 1213eda14cbcSMatt Macy } 1214eda14cbcSMatt Macy 1215eda14cbcSMatt Macy ASSERT0(tq->tq_nthreads); 1216eda14cbcSMatt Macy ASSERT0(tq->tq_nalloc); 1217eda14cbcSMatt Macy ASSERT0(tq->tq_nspawn); 1218eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_thread_list)); 1219eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_active_list)); 1220eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_free_list)); 1221eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_pend_list)); 1222eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_prio_list)); 1223eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_delay_list)); 1224eda14cbcSMatt Macy 1225eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1226eda14cbcSMatt Macy 1227eda14cbcSMatt Macy kmem_strfree(tq->tq_name); 1228eda14cbcSMatt Macy kmem_free(tq, sizeof (taskq_t)); 1229eda14cbcSMatt Macy } 1230eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_destroy); 1231eda14cbcSMatt Macy 1232eda14cbcSMatt Macy static unsigned int spl_taskq_kick = 0; 1233eda14cbcSMatt Macy 1234eda14cbcSMatt Macy /* 1235eda14cbcSMatt Macy * 2.6.36 API Change 1236eda14cbcSMatt Macy * module_param_cb is introduced to take kernel_param_ops and 1237eda14cbcSMatt Macy * module_param_call is marked as obsolete. Also set and get operations 1238eda14cbcSMatt Macy * were changed to take a 'const struct kernel_param *'. 1239eda14cbcSMatt Macy */ 1240eda14cbcSMatt Macy static int 1241eda14cbcSMatt Macy #ifdef module_param_cb 1242eda14cbcSMatt Macy param_set_taskq_kick(const char *val, const struct kernel_param *kp) 1243eda14cbcSMatt Macy #else 1244eda14cbcSMatt Macy param_set_taskq_kick(const char *val, struct kernel_param *kp) 1245eda14cbcSMatt Macy #endif 1246eda14cbcSMatt Macy { 1247eda14cbcSMatt Macy int ret; 1248eda14cbcSMatt Macy taskq_t *tq = NULL; 1249eda14cbcSMatt Macy taskq_ent_t *t; 1250eda14cbcSMatt Macy unsigned long flags; 1251eda14cbcSMatt Macy 1252eda14cbcSMatt Macy ret = param_set_uint(val, kp); 1253eda14cbcSMatt Macy if (ret < 0 || !spl_taskq_kick) 1254eda14cbcSMatt Macy return (ret); 1255eda14cbcSMatt Macy /* reset value */ 1256eda14cbcSMatt Macy spl_taskq_kick = 0; 1257eda14cbcSMatt Macy 1258eda14cbcSMatt Macy down_read(&tq_list_sem); 1259eda14cbcSMatt Macy list_for_each_entry(tq, &tq_list, tq_taskqs) { 1260eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1261eda14cbcSMatt Macy tq->tq_lock_class); 1262eda14cbcSMatt Macy /* Check if the first pending is older than 5 seconds */ 1263eda14cbcSMatt Macy t = taskq_next_ent(tq); 1264eda14cbcSMatt Macy if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { 1265eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 1266eda14cbcSMatt Macy printk(KERN_INFO "spl: Kicked taskq %s/%d\n", 1267eda14cbcSMatt Macy tq->tq_name, tq->tq_instance); 1268eda14cbcSMatt Macy } 1269eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1270eda14cbcSMatt Macy } 1271eda14cbcSMatt Macy up_read(&tq_list_sem); 1272eda14cbcSMatt Macy return (ret); 1273eda14cbcSMatt Macy } 1274eda14cbcSMatt Macy 1275eda14cbcSMatt Macy #ifdef module_param_cb 1276eda14cbcSMatt Macy static const struct kernel_param_ops param_ops_taskq_kick = { 1277eda14cbcSMatt Macy .set = param_set_taskq_kick, 1278eda14cbcSMatt Macy .get = param_get_uint, 1279eda14cbcSMatt Macy }; 1280eda14cbcSMatt Macy module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); 1281eda14cbcSMatt Macy #else 1282eda14cbcSMatt Macy module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, 1283eda14cbcSMatt Macy &spl_taskq_kick, 0644); 1284eda14cbcSMatt Macy #endif 1285eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_kick, 1286eda14cbcSMatt Macy "Write nonzero to kick stuck taskqs to spawn more threads"); 1287eda14cbcSMatt Macy 12887877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 12897877fdebSMatt Macy /* 12907877fdebSMatt Macy * This callback will be called exactly once for each core that comes online, 12917877fdebSMatt Macy * for each dynamic taskq. We attempt to expand taskqs that have 12927877fdebSMatt Macy * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every 12937877fdebSMatt Macy * time, to correctly determine whether or not to add a thread. 12947877fdebSMatt Macy */ 12957877fdebSMatt Macy static int 12967877fdebSMatt Macy spl_taskq_expand(unsigned int cpu, struct hlist_node *node) 12977877fdebSMatt Macy { 12987877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 12997877fdebSMatt Macy unsigned long flags; 13007877fdebSMatt Macy int err = 0; 13017877fdebSMatt Macy 13027877fdebSMatt Macy ASSERT(tq); 13037877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 13047877fdebSMatt Macy 130581b22a98SMartin Matuska if (!(tq->tq_flags & TASKQ_ACTIVE)) { 130681b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 130781b22a98SMartin Matuska return (err); 130881b22a98SMartin Matuska } 13097877fdebSMatt Macy 13107877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 13117877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 13127877fdebSMatt Macy nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); 13137877fdebSMatt Macy tq->tq_maxthreads = nthreads; 13147877fdebSMatt Macy 13157877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 13167877fdebSMatt Macy tq->tq_maxthreads > tq->tq_nthreads) { 131781b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 13187877fdebSMatt Macy taskq_thread_t *tqt = taskq_thread_create(tq); 13197877fdebSMatt Macy if (tqt == NULL) 13207877fdebSMatt Macy err = -1; 132181b22a98SMartin Matuska return (err); 13227877fdebSMatt Macy } 13237877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13247877fdebSMatt Macy return (err); 13257877fdebSMatt Macy } 13267877fdebSMatt Macy 13277877fdebSMatt Macy /* 13287877fdebSMatt Macy * While we don't support offlining CPUs, it is possible that CPUs will fail 13297877fdebSMatt Macy * to online successfully. We do need to be able to handle this case 13307877fdebSMatt Macy * gracefully. 13317877fdebSMatt Macy */ 13327877fdebSMatt Macy static int 13337877fdebSMatt Macy spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) 13347877fdebSMatt Macy { 13357877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 13367877fdebSMatt Macy unsigned long flags; 13377877fdebSMatt Macy 13387877fdebSMatt Macy ASSERT(tq); 13397877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 13407877fdebSMatt Macy 13417877fdebSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 13427877fdebSMatt Macy goto out; 13437877fdebSMatt Macy 13447877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 13457877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 13467877fdebSMatt Macy nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); 13477877fdebSMatt Macy tq->tq_maxthreads = nthreads; 13487877fdebSMatt Macy 13497877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 13507877fdebSMatt Macy tq->tq_maxthreads < tq->tq_nthreads) { 13517877fdebSMatt Macy ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); 13527877fdebSMatt Macy taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, 13537877fdebSMatt Macy taskq_thread_t, tqt_thread_list); 13547877fdebSMatt Macy struct task_struct *thread = tqt->tqt_thread; 13557877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13567877fdebSMatt Macy 13577877fdebSMatt Macy kthread_stop(thread); 13587877fdebSMatt Macy 13597877fdebSMatt Macy return (0); 13607877fdebSMatt Macy } 13617877fdebSMatt Macy 13627877fdebSMatt Macy out: 13637877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 13647877fdebSMatt Macy return (0); 13657877fdebSMatt Macy } 13667877fdebSMatt Macy #endif 13677877fdebSMatt Macy 1368eda14cbcSMatt Macy int 1369eda14cbcSMatt Macy spl_taskq_init(void) 1370eda14cbcSMatt Macy { 1371eda14cbcSMatt Macy init_rwsem(&tq_list_sem); 1372eda14cbcSMatt Macy tsd_create(&taskq_tsd, NULL); 1373eda14cbcSMatt Macy 13747877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13757877fdebSMatt Macy spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 13767877fdebSMatt Macy "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); 13777877fdebSMatt Macy #endif 13787877fdebSMatt Macy 1379eda14cbcSMatt Macy system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), 1380eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1381eda14cbcSMatt Macy if (system_taskq == NULL) 1382eda14cbcSMatt Macy return (1); 1383eda14cbcSMatt Macy 1384eda14cbcSMatt Macy system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), 1385eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1386eda14cbcSMatt Macy if (system_delay_taskq == NULL) { 13877877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13887877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 13897877fdebSMatt Macy #endif 1390eda14cbcSMatt Macy taskq_destroy(system_taskq); 1391eda14cbcSMatt Macy return (1); 1392eda14cbcSMatt Macy } 1393eda14cbcSMatt Macy 1394eda14cbcSMatt Macy dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, 1395eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); 1396eda14cbcSMatt Macy if (dynamic_taskq == NULL) { 13977877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 13987877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 13997877fdebSMatt Macy #endif 1400eda14cbcSMatt Macy taskq_destroy(system_taskq); 1401eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1402eda14cbcSMatt Macy return (1); 1403eda14cbcSMatt Macy } 1404eda14cbcSMatt Macy 1405eda14cbcSMatt Macy /* 1406eda14cbcSMatt Macy * This is used to annotate tq_lock, so 1407eda14cbcSMatt Macy * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch 1408eda14cbcSMatt Macy * does not trigger a lockdep warning re: possible recursive locking 1409eda14cbcSMatt Macy */ 1410eda14cbcSMatt Macy dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; 1411eda14cbcSMatt Macy 1412eda14cbcSMatt Macy return (0); 1413eda14cbcSMatt Macy } 1414eda14cbcSMatt Macy 1415eda14cbcSMatt Macy void 1416eda14cbcSMatt Macy spl_taskq_fini(void) 1417eda14cbcSMatt Macy { 1418eda14cbcSMatt Macy taskq_destroy(dynamic_taskq); 1419eda14cbcSMatt Macy dynamic_taskq = NULL; 1420eda14cbcSMatt Macy 1421eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1422eda14cbcSMatt Macy system_delay_taskq = NULL; 1423eda14cbcSMatt Macy 1424eda14cbcSMatt Macy taskq_destroy(system_taskq); 1425eda14cbcSMatt Macy system_taskq = NULL; 1426eda14cbcSMatt Macy 1427eda14cbcSMatt Macy tsd_destroy(&taskq_tsd); 14287877fdebSMatt Macy 14297877fdebSMatt Macy #ifdef HAVE_CPU_HOTPLUG 14307877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 14317877fdebSMatt Macy spl_taskq_cpuhp_state = 0; 14327877fdebSMatt Macy #endif 1433eda14cbcSMatt Macy } 1434