1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3eda14cbcSMatt Macy * Copyright (C) 2007 The Regents of the University of California. 4eda14cbcSMatt Macy * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5eda14cbcSMatt Macy * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6eda14cbcSMatt Macy * UCRL-CODE-235197 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * This file is part of the SPL, Solaris Porting Layer. 9eda14cbcSMatt Macy * 10eda14cbcSMatt Macy * The SPL is free software; you can redistribute it and/or modify it 11eda14cbcSMatt Macy * under the terms of the GNU General Public License as published by the 12eda14cbcSMatt Macy * Free Software Foundation; either version 2 of the License, or (at your 13eda14cbcSMatt Macy * option) any later version. 14eda14cbcSMatt Macy * 15eda14cbcSMatt Macy * The SPL is distributed in the hope that it will be useful, but WITHOUT 16eda14cbcSMatt Macy * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17eda14cbcSMatt Macy * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18eda14cbcSMatt Macy * for more details. 19eda14cbcSMatt Macy * 20eda14cbcSMatt Macy * You should have received a copy of the GNU General Public License along 21eda14cbcSMatt Macy * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22eda14cbcSMatt Macy * 23eda14cbcSMatt Macy * Solaris Porting Layer (SPL) Task Queue Implementation. 24eda14cbcSMatt Macy */ 25e2df9bb4SMartin Matuska /* 26e2df9bb4SMartin Matuska * Copyright (c) 2024, Klara Inc. 27e2df9bb4SMartin Matuska * Copyright (c) 2024, Syneto 28e2df9bb4SMartin Matuska */ 29eda14cbcSMatt Macy 30eda14cbcSMatt Macy #include <sys/timer.h> 31eda14cbcSMatt Macy #include <sys/taskq.h> 32eda14cbcSMatt Macy #include <sys/kmem.h> 33eda14cbcSMatt Macy #include <sys/tsd.h> 34eda14cbcSMatt Macy #include <sys/trace_spl.h> 35e2df9bb4SMartin Matuska #include <sys/time.h> 36e2df9bb4SMartin Matuska #include <sys/atomic.h> 37e2df9bb4SMartin Matuska #include <sys/kstat.h> 387877fdebSMatt Macy #include <linux/cpuhotplug.h> 39eda14cbcSMatt Macy 40e2df9bb4SMartin Matuska typedef struct taskq_kstats { 41e2df9bb4SMartin Matuska /* static values, for completeness */ 42e2df9bb4SMartin Matuska kstat_named_t tqks_threads_max; 43e2df9bb4SMartin Matuska kstat_named_t tqks_entry_pool_min; 44e2df9bb4SMartin Matuska kstat_named_t tqks_entry_pool_max; 45e2df9bb4SMartin Matuska 46e2df9bb4SMartin Matuska /* gauges (inc/dec counters, current value) */ 47e2df9bb4SMartin Matuska kstat_named_t tqks_threads_active; 48e2df9bb4SMartin Matuska kstat_named_t tqks_threads_idle; 49e2df9bb4SMartin Matuska kstat_named_t tqks_threads_total; 50e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_pending; 51e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_priority; 52e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_total; 53e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_delayed; 54e2df9bb4SMartin Matuska kstat_named_t tqks_entries_free; 55e2df9bb4SMartin Matuska 56e2df9bb4SMartin Matuska /* counters (inc only, since taskq creation) */ 57e2df9bb4SMartin Matuska kstat_named_t tqks_threads_created; 58e2df9bb4SMartin Matuska kstat_named_t tqks_threads_destroyed; 59e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_dispatched; 60e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_dispatched_delayed; 61e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_executed_normal; 62e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_executed_priority; 63e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_executed; 64e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_delayed_requeued; 65e2df9bb4SMartin Matuska kstat_named_t tqks_tasks_cancelled; 66e2df9bb4SMartin Matuska kstat_named_t tqks_thread_wakeups; 67e2df9bb4SMartin Matuska kstat_named_t tqks_thread_wakeups_nowork; 68e2df9bb4SMartin Matuska kstat_named_t tqks_thread_sleeps; 69e2df9bb4SMartin Matuska } taskq_kstats_t; 70e2df9bb4SMartin Matuska 71e2df9bb4SMartin Matuska static taskq_kstats_t taskq_kstats_template = { 72e2df9bb4SMartin Matuska { "threads_max", KSTAT_DATA_UINT64 }, 73e2df9bb4SMartin Matuska { "entry_pool_min", KSTAT_DATA_UINT64 }, 74e2df9bb4SMartin Matuska { "entry_pool_max", KSTAT_DATA_UINT64 }, 75e2df9bb4SMartin Matuska { "threads_active", KSTAT_DATA_UINT64 }, 76e2df9bb4SMartin Matuska { "threads_idle", KSTAT_DATA_UINT64 }, 77e2df9bb4SMartin Matuska { "threads_total", KSTAT_DATA_UINT64 }, 78e2df9bb4SMartin Matuska { "tasks_pending", KSTAT_DATA_UINT64 }, 79e2df9bb4SMartin Matuska { "tasks_priority", KSTAT_DATA_UINT64 }, 80e2df9bb4SMartin Matuska { "tasks_total", KSTAT_DATA_UINT64 }, 81e2df9bb4SMartin Matuska { "tasks_delayed", KSTAT_DATA_UINT64 }, 82e2df9bb4SMartin Matuska { "entries_free", KSTAT_DATA_UINT64 }, 83e2df9bb4SMartin Matuska 84e2df9bb4SMartin Matuska { "threads_created", KSTAT_DATA_UINT64 }, 85e2df9bb4SMartin Matuska { "threads_destroyed", KSTAT_DATA_UINT64 }, 86e2df9bb4SMartin Matuska { "tasks_dispatched", KSTAT_DATA_UINT64 }, 87e2df9bb4SMartin Matuska { "tasks_dispatched_delayed", KSTAT_DATA_UINT64 }, 88e2df9bb4SMartin Matuska { "tasks_executed_normal", KSTAT_DATA_UINT64 }, 89e2df9bb4SMartin Matuska { "tasks_executed_priority", KSTAT_DATA_UINT64 }, 90e2df9bb4SMartin Matuska { "tasks_executed", KSTAT_DATA_UINT64 }, 91e2df9bb4SMartin Matuska { "tasks_delayed_requeued", KSTAT_DATA_UINT64 }, 92e2df9bb4SMartin Matuska { "tasks_cancelled", KSTAT_DATA_UINT64 }, 93e2df9bb4SMartin Matuska { "thread_wakeups", KSTAT_DATA_UINT64 }, 94e2df9bb4SMartin Matuska { "thread_wakeups_nowork", KSTAT_DATA_UINT64 }, 95e2df9bb4SMartin Matuska { "thread_sleeps", KSTAT_DATA_UINT64 }, 96e2df9bb4SMartin Matuska }; 97e2df9bb4SMartin Matuska 98e2df9bb4SMartin Matuska #define TQSTAT_INC(tq, stat) wmsum_add(&tq->tq_sums.tqs_##stat, 1) 99e2df9bb4SMartin Matuska #define TQSTAT_DEC(tq, stat) wmsum_add(&tq->tq_sums.tqs_##stat, -1) 100e2df9bb4SMartin Matuska 101e2df9bb4SMartin Matuska #define _TQSTAT_MOD_LIST(mod, tq, t) do { \ 102e2df9bb4SMartin Matuska switch (t->tqent_flags & TQENT_LIST_MASK) { \ 103e2df9bb4SMartin Matuska case TQENT_LIST_NONE: ASSERT(list_empty(&t->tqent_list)); break;\ 104e2df9bb4SMartin Matuska case TQENT_LIST_PENDING: mod(tq, tasks_pending); break; \ 105e2df9bb4SMartin Matuska case TQENT_LIST_PRIORITY: mod(tq, tasks_priority); break; \ 106e2df9bb4SMartin Matuska case TQENT_LIST_DELAY: mod(tq, tasks_delayed); break; \ 107e2df9bb4SMartin Matuska } \ 108e2df9bb4SMartin Matuska } while (0) 109e2df9bb4SMartin Matuska #define TQSTAT_INC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_INC, tq, t) 110e2df9bb4SMartin Matuska #define TQSTAT_DEC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_DEC, tq, t) 111e2df9bb4SMartin Matuska 112e2df9bb4SMartin Matuska #define TQENT_SET_LIST(t, l) \ 113e2df9bb4SMartin Matuska t->tqent_flags = (t->tqent_flags & ~TQENT_LIST_MASK) | l; 114e2df9bb4SMartin Matuska 115e92ffd9bSMartin Matuska static int spl_taskq_thread_bind = 0; 116eda14cbcSMatt Macy module_param(spl_taskq_thread_bind, int, 0644); 117eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); 118eda14cbcSMatt Macy 119e2257b31SMartin Matuska static uint_t spl_taskq_thread_timeout_ms = 5000; 1207b5e6873SMartin Matuska module_param(spl_taskq_thread_timeout_ms, uint, 0644); 1217b5e6873SMartin Matuska MODULE_PARM_DESC(spl_taskq_thread_timeout_ms, 122e2257b31SMartin Matuska "Minimum idle threads exit interval for dynamic taskqs"); 123eda14cbcSMatt Macy 124e92ffd9bSMartin Matuska static int spl_taskq_thread_dynamic = 1; 1257877fdebSMatt Macy module_param(spl_taskq_thread_dynamic, int, 0444); 126eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); 127eda14cbcSMatt Macy 128e92ffd9bSMartin Matuska static int spl_taskq_thread_priority = 1; 129eda14cbcSMatt Macy module_param(spl_taskq_thread_priority, int, 0644); 130eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_priority, 131eda14cbcSMatt Macy "Allow non-default priority for taskq threads"); 132eda14cbcSMatt Macy 133be181ee2SMartin Matuska static uint_t spl_taskq_thread_sequential = 4; 134be181ee2SMartin Matuska module_param(spl_taskq_thread_sequential, uint, 0644); 135eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_thread_sequential, 136eda14cbcSMatt Macy "Create new taskq threads after N sequential tasks"); 137eda14cbcSMatt Macy 138681ce946SMartin Matuska /* 139681ce946SMartin Matuska * Global system-wide dynamic task queue available for all consumers. This 140681ce946SMartin Matuska * taskq is not intended for long-running tasks; instead, a dedicated taskq 141681ce946SMartin Matuska * should be created. 142681ce946SMartin Matuska */ 143eda14cbcSMatt Macy taskq_t *system_taskq; 144eda14cbcSMatt Macy EXPORT_SYMBOL(system_taskq); 145eda14cbcSMatt Macy /* Global dynamic task queue for long delay */ 146eda14cbcSMatt Macy taskq_t *system_delay_taskq; 147eda14cbcSMatt Macy EXPORT_SYMBOL(system_delay_taskq); 148eda14cbcSMatt Macy 149eda14cbcSMatt Macy /* Private dedicated taskq for creating new taskq threads on demand. */ 150eda14cbcSMatt Macy static taskq_t *dynamic_taskq; 151eda14cbcSMatt Macy static taskq_thread_t *taskq_thread_create(taskq_t *); 152eda14cbcSMatt Macy 1537877fdebSMatt Macy /* Multi-callback id for cpu hotplugging. */ 1547877fdebSMatt Macy static int spl_taskq_cpuhp_state; 1557877fdebSMatt Macy 156eda14cbcSMatt Macy /* List of all taskqs */ 157eda14cbcSMatt Macy LIST_HEAD(tq_list); 158eda14cbcSMatt Macy struct rw_semaphore tq_list_sem; 159eda14cbcSMatt Macy static uint_t taskq_tsd; 160eda14cbcSMatt Macy 161eda14cbcSMatt Macy static int 162eda14cbcSMatt Macy task_km_flags(uint_t flags) 163eda14cbcSMatt Macy { 164eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 165eda14cbcSMatt Macy return (KM_NOSLEEP); 166eda14cbcSMatt Macy 167eda14cbcSMatt Macy if (flags & TQ_PUSHPAGE) 168eda14cbcSMatt Macy return (KM_PUSHPAGE); 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy return (KM_SLEEP); 171eda14cbcSMatt Macy } 172eda14cbcSMatt Macy 173eda14cbcSMatt Macy /* 174eda14cbcSMatt Macy * taskq_find_by_name - Find the largest instance number of a named taskq. 175eda14cbcSMatt Macy */ 176eda14cbcSMatt Macy static int 177eda14cbcSMatt Macy taskq_find_by_name(const char *name) 178eda14cbcSMatt Macy { 179eda14cbcSMatt Macy struct list_head *tql = NULL; 180eda14cbcSMatt Macy taskq_t *tq; 181eda14cbcSMatt Macy 182eda14cbcSMatt Macy list_for_each_prev(tql, &tq_list) { 183eda14cbcSMatt Macy tq = list_entry(tql, taskq_t, tq_taskqs); 184eda14cbcSMatt Macy if (strcmp(name, tq->tq_name) == 0) 185eda14cbcSMatt Macy return (tq->tq_instance); 186eda14cbcSMatt Macy } 187eda14cbcSMatt Macy return (-1); 188eda14cbcSMatt Macy } 189eda14cbcSMatt Macy 190eda14cbcSMatt Macy /* 191eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, returns a list_t which 192eda14cbcSMatt Macy * is not attached to the free, work, or pending taskq lists. 193eda14cbcSMatt Macy */ 194eda14cbcSMatt Macy static taskq_ent_t * 195eda14cbcSMatt Macy task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) 196eda14cbcSMatt Macy { 197eda14cbcSMatt Macy taskq_ent_t *t; 198eda14cbcSMatt Macy int count = 0; 199eda14cbcSMatt Macy 200eda14cbcSMatt Macy ASSERT(tq); 201eda14cbcSMatt Macy retry: 202eda14cbcSMatt Macy /* Acquire taskq_ent_t's from free list if available */ 203eda14cbcSMatt Macy if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { 204eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 205eda14cbcSMatt Macy 206eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 207eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); 208eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 209eda14cbcSMatt Macy 210eda14cbcSMatt Macy list_del_init(&t->tqent_list); 211e2df9bb4SMartin Matuska TQSTAT_DEC(tq, entries_free); 212eda14cbcSMatt Macy return (t); 213eda14cbcSMatt Macy } 214eda14cbcSMatt Macy 215eda14cbcSMatt Macy /* Free list is empty and memory allocations are prohibited */ 216eda14cbcSMatt Macy if (flags & TQ_NOALLOC) 217eda14cbcSMatt Macy return (NULL); 218eda14cbcSMatt Macy 219eda14cbcSMatt Macy /* Hit maximum taskq_ent_t pool size */ 220eda14cbcSMatt Macy if (tq->tq_nalloc >= tq->tq_maxalloc) { 221eda14cbcSMatt Macy if (flags & TQ_NOSLEEP) 222eda14cbcSMatt Macy return (NULL); 223eda14cbcSMatt Macy 224eda14cbcSMatt Macy /* 225eda14cbcSMatt Macy * Sleep periodically polling the free list for an available 226eda14cbcSMatt Macy * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed 227eda14cbcSMatt Macy * but we cannot block forever waiting for an taskq_ent_t to 228eda14cbcSMatt Macy * show up in the free list, otherwise a deadlock can happen. 229eda14cbcSMatt Macy * 230eda14cbcSMatt Macy * Therefore, we need to allocate a new task even if the number 231eda14cbcSMatt Macy * of allocated tasks is above tq->tq_maxalloc, but we still 232eda14cbcSMatt Macy * end up delaying the task allocation by one second, thereby 233eda14cbcSMatt Macy * throttling the task dispatch rate. 234eda14cbcSMatt Macy */ 235eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 236aca928a5SMartin Matuska schedule_timeout_interruptible(HZ / 100); 237eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, 238eda14cbcSMatt Macy tq->tq_lock_class); 239eda14cbcSMatt Macy if (count < 100) { 240eda14cbcSMatt Macy count++; 241eda14cbcSMatt Macy goto retry; 242eda14cbcSMatt Macy } 243eda14cbcSMatt Macy } 244eda14cbcSMatt Macy 245eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 246eda14cbcSMatt Macy t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); 247eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); 248eda14cbcSMatt Macy 249eda14cbcSMatt Macy if (t) { 250eda14cbcSMatt Macy taskq_init_ent(t); 251eda14cbcSMatt Macy tq->tq_nalloc++; 252eda14cbcSMatt Macy } 253eda14cbcSMatt Macy 254eda14cbcSMatt Macy return (t); 255eda14cbcSMatt Macy } 256eda14cbcSMatt Macy 257eda14cbcSMatt Macy /* 258eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t 259eda14cbcSMatt Macy * to already be removed from the free, work, or pending taskq lists. 260eda14cbcSMatt Macy */ 261eda14cbcSMatt Macy static void 262eda14cbcSMatt Macy task_free(taskq_t *tq, taskq_ent_t *t) 263eda14cbcSMatt Macy { 264eda14cbcSMatt Macy ASSERT(tq); 265eda14cbcSMatt Macy ASSERT(t); 266eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 267eda14cbcSMatt Macy ASSERT(!timer_pending(&t->tqent_timer)); 268eda14cbcSMatt Macy 269eda14cbcSMatt Macy kmem_free(t, sizeof (taskq_ent_t)); 270eda14cbcSMatt Macy tq->tq_nalloc--; 271eda14cbcSMatt Macy } 272eda14cbcSMatt Macy 273eda14cbcSMatt Macy /* 274eda14cbcSMatt Macy * NOTE: Must be called with tq->tq_lock held, either destroys the 275eda14cbcSMatt Macy * taskq_ent_t if too many exist or moves it to the free list for later use. 276eda14cbcSMatt Macy */ 277eda14cbcSMatt Macy static void 278eda14cbcSMatt Macy task_done(taskq_t *tq, taskq_ent_t *t) 279eda14cbcSMatt Macy { 280eda14cbcSMatt Macy ASSERT(tq); 281eda14cbcSMatt Macy ASSERT(t); 282e2df9bb4SMartin Matuska ASSERT(list_empty(&t->tqent_list)); 283eda14cbcSMatt Macy 284eda14cbcSMatt Macy /* Wake tasks blocked in taskq_wait_id() */ 285eda14cbcSMatt Macy wake_up_all(&t->tqent_waitq); 286eda14cbcSMatt Macy 287eda14cbcSMatt Macy if (tq->tq_nalloc <= tq->tq_minalloc) { 288eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 289eda14cbcSMatt Macy t->tqent_func = NULL; 290eda14cbcSMatt Macy t->tqent_arg = NULL; 291eda14cbcSMatt Macy t->tqent_flags = 0; 292eda14cbcSMatt Macy 293eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_free_list); 294e2df9bb4SMartin Matuska TQSTAT_INC(tq, entries_free); 295eda14cbcSMatt Macy } else { 296eda14cbcSMatt Macy task_free(tq, t); 297eda14cbcSMatt Macy } 298eda14cbcSMatt Macy } 299eda14cbcSMatt Macy 300eda14cbcSMatt Macy /* 301eda14cbcSMatt Macy * When a delayed task timer expires remove it from the delay list and 302eda14cbcSMatt Macy * add it to the priority list in order for immediate processing. 303eda14cbcSMatt Macy */ 304eda14cbcSMatt Macy static void 305eda14cbcSMatt Macy task_expire_impl(taskq_ent_t *t) 306eda14cbcSMatt Macy { 307eda14cbcSMatt Macy taskq_ent_t *w; 308eda14cbcSMatt Macy taskq_t *tq = t->tqent_taskq; 309eda14cbcSMatt Macy struct list_head *l = NULL; 310eda14cbcSMatt Macy unsigned long flags; 311eda14cbcSMatt Macy 312eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 313eda14cbcSMatt Macy 314eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_CANCEL) { 315eda14cbcSMatt Macy ASSERT(list_empty(&t->tqent_list)); 316eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 317eda14cbcSMatt Macy return; 318eda14cbcSMatt Macy } 319eda14cbcSMatt Macy 320eda14cbcSMatt Macy t->tqent_birth = jiffies; 321eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 322eda14cbcSMatt Macy 323eda14cbcSMatt Macy /* 324eda14cbcSMatt Macy * The priority list must be maintained in strict task id order 325eda14cbcSMatt Macy * from lowest to highest for lowest_id to be easily calculable. 326eda14cbcSMatt Macy */ 327eda14cbcSMatt Macy list_del(&t->tqent_list); 328eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_prio_list) { 329eda14cbcSMatt Macy w = list_entry(l, taskq_ent_t, tqent_list); 330eda14cbcSMatt Macy if (w->tqent_id < t->tqent_id) { 331eda14cbcSMatt Macy list_add(&t->tqent_list, l); 332eda14cbcSMatt Macy break; 333eda14cbcSMatt Macy } 334eda14cbcSMatt Macy } 335eda14cbcSMatt Macy if (l == &tq->tq_prio_list) 336eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 337eda14cbcSMatt Macy 338eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 339eda14cbcSMatt Macy 340eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 341e2df9bb4SMartin Matuska 342e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_delayed_requeued); 343eda14cbcSMatt Macy } 344eda14cbcSMatt Macy 345eda14cbcSMatt Macy static void 346*7a7741afSMartin Matuska task_expire(struct timer_list *tl) 347eda14cbcSMatt Macy { 348eda14cbcSMatt Macy struct timer_list *tmr = (struct timer_list *)tl; 349eda14cbcSMatt Macy taskq_ent_t *t = from_timer(t, tmr, tqent_timer); 350eda14cbcSMatt Macy task_expire_impl(t); 351eda14cbcSMatt Macy } 352eda14cbcSMatt Macy 353eda14cbcSMatt Macy /* 354eda14cbcSMatt Macy * Returns the lowest incomplete taskqid_t. The taskqid_t may 355eda14cbcSMatt Macy * be queued on the pending list, on the priority list, on the 356eda14cbcSMatt Macy * delay list, or on the work list currently being handled, but 357eda14cbcSMatt Macy * it is not 100% complete yet. 358eda14cbcSMatt Macy */ 359eda14cbcSMatt Macy static taskqid_t 360eda14cbcSMatt Macy taskq_lowest_id(taskq_t *tq) 361eda14cbcSMatt Macy { 362eda14cbcSMatt Macy taskqid_t lowest_id = tq->tq_next_id; 363eda14cbcSMatt Macy taskq_ent_t *t; 364eda14cbcSMatt Macy taskq_thread_t *tqt; 365eda14cbcSMatt Macy 366eda14cbcSMatt Macy if (!list_empty(&tq->tq_pend_list)) { 367eda14cbcSMatt Macy t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); 368eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 369eda14cbcSMatt Macy } 370eda14cbcSMatt Macy 371eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) { 372eda14cbcSMatt Macy t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); 373eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 374eda14cbcSMatt Macy } 375eda14cbcSMatt Macy 376eda14cbcSMatt Macy if (!list_empty(&tq->tq_delay_list)) { 377eda14cbcSMatt Macy t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); 378eda14cbcSMatt Macy lowest_id = MIN(lowest_id, t->tqent_id); 379eda14cbcSMatt Macy } 380eda14cbcSMatt Macy 381eda14cbcSMatt Macy if (!list_empty(&tq->tq_active_list)) { 382eda14cbcSMatt Macy tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, 383eda14cbcSMatt Macy tqt_active_list); 384eda14cbcSMatt Macy ASSERT(tqt->tqt_id != TASKQID_INVALID); 385eda14cbcSMatt Macy lowest_id = MIN(lowest_id, tqt->tqt_id); 386eda14cbcSMatt Macy } 387eda14cbcSMatt Macy 388eda14cbcSMatt Macy return (lowest_id); 389eda14cbcSMatt Macy } 390eda14cbcSMatt Macy 391eda14cbcSMatt Macy /* 392eda14cbcSMatt Macy * Insert a task into a list keeping the list sorted by increasing taskqid. 393eda14cbcSMatt Macy */ 394eda14cbcSMatt Macy static void 395eda14cbcSMatt Macy taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) 396eda14cbcSMatt Macy { 397eda14cbcSMatt Macy taskq_thread_t *w; 398eda14cbcSMatt Macy struct list_head *l = NULL; 399eda14cbcSMatt Macy 400eda14cbcSMatt Macy ASSERT(tq); 401eda14cbcSMatt Macy ASSERT(tqt); 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy list_for_each_prev(l, &tq->tq_active_list) { 404eda14cbcSMatt Macy w = list_entry(l, taskq_thread_t, tqt_active_list); 405eda14cbcSMatt Macy if (w->tqt_id < tqt->tqt_id) { 406eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, l); 407eda14cbcSMatt Macy break; 408eda14cbcSMatt Macy } 409eda14cbcSMatt Macy } 410eda14cbcSMatt Macy if (l == &tq->tq_active_list) 411eda14cbcSMatt Macy list_add(&tqt->tqt_active_list, &tq->tq_active_list); 412eda14cbcSMatt Macy } 413eda14cbcSMatt Macy 414eda14cbcSMatt Macy /* 415eda14cbcSMatt Macy * Find and return a task from the given list if it exists. The list 416eda14cbcSMatt Macy * must be in lowest to highest task id order. 417eda14cbcSMatt Macy */ 418eda14cbcSMatt Macy static taskq_ent_t * 419eda14cbcSMatt Macy taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) 420eda14cbcSMatt Macy { 421eda14cbcSMatt Macy struct list_head *l = NULL; 422eda14cbcSMatt Macy taskq_ent_t *t; 423eda14cbcSMatt Macy 424eda14cbcSMatt Macy list_for_each(l, lh) { 425eda14cbcSMatt Macy t = list_entry(l, taskq_ent_t, tqent_list); 426eda14cbcSMatt Macy 427eda14cbcSMatt Macy if (t->tqent_id == id) 428eda14cbcSMatt Macy return (t); 429eda14cbcSMatt Macy 430eda14cbcSMatt Macy if (t->tqent_id > id) 431eda14cbcSMatt Macy break; 432eda14cbcSMatt Macy } 433eda14cbcSMatt Macy 434eda14cbcSMatt Macy return (NULL); 435eda14cbcSMatt Macy } 436eda14cbcSMatt Macy 437eda14cbcSMatt Macy /* 438eda14cbcSMatt Macy * Find an already dispatched task given the task id regardless of what 439eda14cbcSMatt Macy * state it is in. If a task is still pending it will be returned. 440eda14cbcSMatt Macy * If a task is executing, then -EBUSY will be returned instead. 441eda14cbcSMatt Macy * If the task has already been run then NULL is returned. 442eda14cbcSMatt Macy */ 443eda14cbcSMatt Macy static taskq_ent_t * 444eda14cbcSMatt Macy taskq_find(taskq_t *tq, taskqid_t id) 445eda14cbcSMatt Macy { 446eda14cbcSMatt Macy taskq_thread_t *tqt; 447eda14cbcSMatt Macy struct list_head *l = NULL; 448eda14cbcSMatt Macy taskq_ent_t *t; 449eda14cbcSMatt Macy 450eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_delay_list, id); 451eda14cbcSMatt Macy if (t) 452eda14cbcSMatt Macy return (t); 453eda14cbcSMatt Macy 454eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_prio_list, id); 455eda14cbcSMatt Macy if (t) 456eda14cbcSMatt Macy return (t); 457eda14cbcSMatt Macy 458eda14cbcSMatt Macy t = taskq_find_list(tq, &tq->tq_pend_list, id); 459eda14cbcSMatt Macy if (t) 460eda14cbcSMatt Macy return (t); 461eda14cbcSMatt Macy 462eda14cbcSMatt Macy list_for_each(l, &tq->tq_active_list) { 463eda14cbcSMatt Macy tqt = list_entry(l, taskq_thread_t, tqt_active_list); 464eda14cbcSMatt Macy if (tqt->tqt_id == id) { 465eda14cbcSMatt Macy /* 466eda14cbcSMatt Macy * Instead of returning tqt_task, we just return a non 467eda14cbcSMatt Macy * NULL value to prevent misuse, since tqt_task only 468eda14cbcSMatt Macy * has two valid fields. 469eda14cbcSMatt Macy */ 470eda14cbcSMatt Macy return (ERR_PTR(-EBUSY)); 471eda14cbcSMatt Macy } 472eda14cbcSMatt Macy } 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy return (NULL); 475eda14cbcSMatt Macy } 476eda14cbcSMatt Macy 477eda14cbcSMatt Macy /* 478eda14cbcSMatt Macy * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and 479eda14cbcSMatt Macy * taskq_wait() functions below. 480eda14cbcSMatt Macy * 481eda14cbcSMatt Macy * Taskq waiting is accomplished by tracking the lowest outstanding task 482eda14cbcSMatt Macy * id and the next available task id. As tasks are dispatched they are 483eda14cbcSMatt Macy * added to the tail of the pending, priority, or delay lists. As worker 484eda14cbcSMatt Macy * threads become available the tasks are removed from the heads of these 485eda14cbcSMatt Macy * lists and linked to the worker threads. This ensures the lists are 486eda14cbcSMatt Macy * kept sorted by lowest to highest task id. 487eda14cbcSMatt Macy * 488eda14cbcSMatt Macy * Therefore the lowest outstanding task id can be quickly determined by 489eda14cbcSMatt Macy * checking the head item from all of these lists. This value is stored 490eda14cbcSMatt Macy * with the taskq as the lowest id. It only needs to be recalculated when 491eda14cbcSMatt Macy * either the task with the current lowest id completes or is canceled. 492eda14cbcSMatt Macy * 493eda14cbcSMatt Macy * By blocking until the lowest task id exceeds the passed task id the 494eda14cbcSMatt Macy * taskq_wait_outstanding() function can be easily implemented. Similarly, 495eda14cbcSMatt Macy * by blocking until the lowest task id matches the next task id taskq_wait() 496eda14cbcSMatt Macy * can be implemented. 497eda14cbcSMatt Macy * 498eda14cbcSMatt Macy * Callers should be aware that when there are multiple worked threads it 499eda14cbcSMatt Macy * is possible for larger task ids to complete before smaller ones. Also 500eda14cbcSMatt Macy * when the taskq contains delay tasks with small task ids callers may 501eda14cbcSMatt Macy * block for a considerable length of time waiting for them to expire and 502eda14cbcSMatt Macy * execute. 503eda14cbcSMatt Macy */ 504eda14cbcSMatt Macy static int 505eda14cbcSMatt Macy taskq_wait_id_check(taskq_t *tq, taskqid_t id) 506eda14cbcSMatt Macy { 507eda14cbcSMatt Macy int rc; 508eda14cbcSMatt Macy unsigned long flags; 509eda14cbcSMatt Macy 510eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 511eda14cbcSMatt Macy rc = (taskq_find(tq, id) == NULL); 512eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 513eda14cbcSMatt Macy 514eda14cbcSMatt Macy return (rc); 515eda14cbcSMatt Macy } 516eda14cbcSMatt Macy 517eda14cbcSMatt Macy /* 518eda14cbcSMatt Macy * The taskq_wait_id() function blocks until the passed task id completes. 519eda14cbcSMatt Macy * This does not guarantee that all lower task ids have completed. 520eda14cbcSMatt Macy */ 521eda14cbcSMatt Macy void 522eda14cbcSMatt Macy taskq_wait_id(taskq_t *tq, taskqid_t id) 523eda14cbcSMatt Macy { 524eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); 525eda14cbcSMatt Macy } 526eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_id); 527eda14cbcSMatt Macy 528eda14cbcSMatt Macy static int 529eda14cbcSMatt Macy taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) 530eda14cbcSMatt Macy { 531eda14cbcSMatt Macy int rc; 532eda14cbcSMatt Macy unsigned long flags; 533eda14cbcSMatt Macy 534eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 535eda14cbcSMatt Macy rc = (id < tq->tq_lowest_id); 536eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 537eda14cbcSMatt Macy 538eda14cbcSMatt Macy return (rc); 539eda14cbcSMatt Macy } 540eda14cbcSMatt Macy 541eda14cbcSMatt Macy /* 542eda14cbcSMatt Macy * The taskq_wait_outstanding() function will block until all tasks with a 543eda14cbcSMatt Macy * lower taskqid than the passed 'id' have been completed. Note that all 544eda14cbcSMatt Macy * task id's are assigned monotonically at dispatch time. Zero may be 545eda14cbcSMatt Macy * passed for the id to indicate all tasks dispatch up to this point, 546eda14cbcSMatt Macy * but not after, should be waited for. 547eda14cbcSMatt Macy */ 548eda14cbcSMatt Macy void 549eda14cbcSMatt Macy taskq_wait_outstanding(taskq_t *tq, taskqid_t id) 550eda14cbcSMatt Macy { 551eda14cbcSMatt Macy id = id ? id : tq->tq_next_id - 1; 552eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); 553eda14cbcSMatt Macy } 554eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait_outstanding); 555eda14cbcSMatt Macy 556eda14cbcSMatt Macy static int 557eda14cbcSMatt Macy taskq_wait_check(taskq_t *tq) 558eda14cbcSMatt Macy { 559eda14cbcSMatt Macy int rc; 560eda14cbcSMatt Macy unsigned long flags; 561eda14cbcSMatt Macy 562eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 563eda14cbcSMatt Macy rc = (tq->tq_lowest_id == tq->tq_next_id); 564eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 565eda14cbcSMatt Macy 566eda14cbcSMatt Macy return (rc); 567eda14cbcSMatt Macy } 568eda14cbcSMatt Macy 569eda14cbcSMatt Macy /* 570eda14cbcSMatt Macy * The taskq_wait() function will block until the taskq is empty. 571eda14cbcSMatt Macy * This means that if a taskq re-dispatches work to itself taskq_wait() 572eda14cbcSMatt Macy * callers will block indefinitely. 573eda14cbcSMatt Macy */ 574eda14cbcSMatt Macy void 575eda14cbcSMatt Macy taskq_wait(taskq_t *tq) 576eda14cbcSMatt Macy { 577eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); 578eda14cbcSMatt Macy } 579eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_wait); 580eda14cbcSMatt Macy 581eda14cbcSMatt Macy int 582eda14cbcSMatt Macy taskq_member(taskq_t *tq, kthread_t *t) 583eda14cbcSMatt Macy { 584eda14cbcSMatt Macy return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); 585eda14cbcSMatt Macy } 586eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_member); 587eda14cbcSMatt Macy 588eda14cbcSMatt Macy taskq_t * 589eda14cbcSMatt Macy taskq_of_curthread(void) 590eda14cbcSMatt Macy { 591eda14cbcSMatt Macy return (tsd_get(taskq_tsd)); 592eda14cbcSMatt Macy } 593eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_of_curthread); 594eda14cbcSMatt Macy 595eda14cbcSMatt Macy /* 596eda14cbcSMatt Macy * Cancel an already dispatched task given the task id. Still pending tasks 597eda14cbcSMatt Macy * will be immediately canceled, and if the task is active the function will 598eda14cbcSMatt Macy * block until it completes. Preallocated tasks which are canceled must be 599eda14cbcSMatt Macy * freed by the caller. 600eda14cbcSMatt Macy */ 601eda14cbcSMatt Macy int 602eda14cbcSMatt Macy taskq_cancel_id(taskq_t *tq, taskqid_t id) 603eda14cbcSMatt Macy { 604eda14cbcSMatt Macy taskq_ent_t *t; 605eda14cbcSMatt Macy int rc = ENOENT; 606eda14cbcSMatt Macy unsigned long flags; 607eda14cbcSMatt Macy 608eda14cbcSMatt Macy ASSERT(tq); 609eda14cbcSMatt Macy 610eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 611eda14cbcSMatt Macy t = taskq_find(tq, id); 612eda14cbcSMatt Macy if (t && t != ERR_PTR(-EBUSY)) { 613eda14cbcSMatt Macy list_del_init(&t->tqent_list); 614e2df9bb4SMartin Matuska TQSTAT_DEC_LIST(tq, t); 615e2df9bb4SMartin Matuska TQSTAT_DEC(tq, tasks_total); 616e2df9bb4SMartin Matuska 617eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_CANCEL; 618e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_cancelled); 619eda14cbcSMatt Macy 620eda14cbcSMatt Macy /* 621eda14cbcSMatt Macy * When canceling the lowest outstanding task id we 622eda14cbcSMatt Macy * must recalculate the new lowest outstanding id. 623eda14cbcSMatt Macy */ 624eda14cbcSMatt Macy if (tq->tq_lowest_id == t->tqent_id) { 625eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 626eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); 627eda14cbcSMatt Macy } 628eda14cbcSMatt Macy 629eda14cbcSMatt Macy /* 630eda14cbcSMatt Macy * The task_expire() function takes the tq->tq_lock so drop 631eda14cbcSMatt Macy * drop the lock before synchronously cancelling the timer. 632eda14cbcSMatt Macy */ 633eda14cbcSMatt Macy if (timer_pending(&t->tqent_timer)) { 634eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 635eda14cbcSMatt Macy del_timer_sync(&t->tqent_timer); 636eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 637eda14cbcSMatt Macy tq->tq_lock_class); 638eda14cbcSMatt Macy } 639eda14cbcSMatt Macy 640eda14cbcSMatt Macy if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) 641eda14cbcSMatt Macy task_done(tq, t); 642eda14cbcSMatt Macy 643eda14cbcSMatt Macy rc = 0; 644eda14cbcSMatt Macy } 645eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 646eda14cbcSMatt Macy 647eda14cbcSMatt Macy if (t == ERR_PTR(-EBUSY)) { 648eda14cbcSMatt Macy taskq_wait_id(tq, id); 649eda14cbcSMatt Macy rc = EBUSY; 650eda14cbcSMatt Macy } 651eda14cbcSMatt Macy 652eda14cbcSMatt Macy return (rc); 653eda14cbcSMatt Macy } 654eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_cancel_id); 655eda14cbcSMatt Macy 656eda14cbcSMatt Macy static int taskq_thread_spawn(taskq_t *tq); 657eda14cbcSMatt Macy 658eda14cbcSMatt Macy taskqid_t 659eda14cbcSMatt Macy taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 660eda14cbcSMatt Macy { 661eda14cbcSMatt Macy taskq_ent_t *t; 662eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 663eda14cbcSMatt Macy unsigned long irqflags; 664eda14cbcSMatt Macy 665eda14cbcSMatt Macy ASSERT(tq); 666eda14cbcSMatt Macy ASSERT(func); 667eda14cbcSMatt Macy 668eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 669eda14cbcSMatt Macy 670eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 671eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 672eda14cbcSMatt Macy goto out; 673eda14cbcSMatt Macy 674eda14cbcSMatt Macy /* Do not queue the task unless there is idle thread for it */ 675eda14cbcSMatt Macy ASSERT(tq->tq_nactive <= tq->tq_nthreads); 676eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 677eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 678e2257b31SMartin Matuska if (taskq_thread_spawn(tq) == 0) 679eda14cbcSMatt Macy goto out; 680eda14cbcSMatt Macy } 681eda14cbcSMatt Macy 682eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 683eda14cbcSMatt Macy goto out; 684eda14cbcSMatt Macy 685eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 686eda14cbcSMatt Macy 687eda14cbcSMatt Macy /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ 688e2df9bb4SMartin Matuska if (flags & TQ_NOQUEUE) { 689e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_PRIORITY); 690eda14cbcSMatt Macy list_add(&t->tqent_list, &tq->tq_prio_list); 691eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 692e2df9bb4SMartin Matuska } else if (flags & TQ_FRONT) { 693e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_PRIORITY); 694eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 695e2df9bb4SMartin Matuska } else { 696e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_PENDING); 697eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 698e2df9bb4SMartin Matuska } 699e2df9bb4SMartin Matuska TQSTAT_INC_LIST(tq, t); 700e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_total); 701eda14cbcSMatt Macy 702eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 703eda14cbcSMatt Macy tq->tq_next_id++; 704eda14cbcSMatt Macy t->tqent_func = func; 705eda14cbcSMatt Macy t->tqent_arg = arg; 706eda14cbcSMatt Macy t->tqent_taskq = tq; 707eda14cbcSMatt Macy t->tqent_timer.function = NULL; 708eda14cbcSMatt Macy t->tqent_timer.expires = 0; 709eda14cbcSMatt Macy 710eda14cbcSMatt Macy t->tqent_birth = jiffies; 711eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 712eda14cbcSMatt Macy 713eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 716eda14cbcSMatt Macy 717eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 718e2257b31SMartin Matuska 719e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_dispatched); 720e2df9bb4SMartin Matuska 721eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 722eda14cbcSMatt Macy if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) 723eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 724e2257b31SMartin Matuska out: 725eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 726eda14cbcSMatt Macy return (rc); 727eda14cbcSMatt Macy } 728eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch); 729eda14cbcSMatt Macy 730eda14cbcSMatt Macy taskqid_t 731eda14cbcSMatt Macy taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, 732eda14cbcSMatt Macy uint_t flags, clock_t expire_time) 733eda14cbcSMatt Macy { 734eda14cbcSMatt Macy taskqid_t rc = TASKQID_INVALID; 735eda14cbcSMatt Macy taskq_ent_t *t; 736eda14cbcSMatt Macy unsigned long irqflags; 737eda14cbcSMatt Macy 738eda14cbcSMatt Macy ASSERT(tq); 739eda14cbcSMatt Macy ASSERT(func); 740eda14cbcSMatt Macy 741eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 742eda14cbcSMatt Macy 743eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 744eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 745eda14cbcSMatt Macy goto out; 746eda14cbcSMatt Macy 747eda14cbcSMatt Macy if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 748eda14cbcSMatt Macy goto out; 749eda14cbcSMatt Macy 750eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 751eda14cbcSMatt Macy 752eda14cbcSMatt Macy /* Queue to the delay list for subsequent execution */ 753eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_delay_list); 754e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_DELAY); 755e2df9bb4SMartin Matuska TQSTAT_INC_LIST(tq, t); 756e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_total); 757eda14cbcSMatt Macy 758eda14cbcSMatt Macy t->tqent_id = rc = tq->tq_next_id; 759eda14cbcSMatt Macy tq->tq_next_id++; 760eda14cbcSMatt Macy t->tqent_func = func; 761eda14cbcSMatt Macy t->tqent_arg = arg; 762eda14cbcSMatt Macy t->tqent_taskq = tq; 763eda14cbcSMatt Macy t->tqent_timer.function = task_expire; 764eda14cbcSMatt Macy t->tqent_timer.expires = (unsigned long)expire_time; 765eda14cbcSMatt Macy add_timer(&t->tqent_timer); 766eda14cbcSMatt Macy 767eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 768eda14cbcSMatt Macy 769eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 770e2257b31SMartin Matuska 771e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_dispatched_delayed); 772e2df9bb4SMartin Matuska 773eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 774eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 775eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 776e2257b31SMartin Matuska out: 777eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 778eda14cbcSMatt Macy return (rc); 779eda14cbcSMatt Macy } 780eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_delay); 781eda14cbcSMatt Macy 782eda14cbcSMatt Macy void 783eda14cbcSMatt Macy taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, 784eda14cbcSMatt Macy taskq_ent_t *t) 785eda14cbcSMatt Macy { 786eda14cbcSMatt Macy unsigned long irqflags; 787eda14cbcSMatt Macy ASSERT(tq); 788eda14cbcSMatt Macy ASSERT(func); 789eda14cbcSMatt Macy 790eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 791eda14cbcSMatt Macy tq->tq_lock_class); 792eda14cbcSMatt Macy 793eda14cbcSMatt Macy /* Taskq being destroyed and all tasks drained */ 794eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) { 795eda14cbcSMatt Macy t->tqent_id = TASKQID_INVALID; 796eda14cbcSMatt Macy goto out; 797eda14cbcSMatt Macy } 798eda14cbcSMatt Macy 799eda14cbcSMatt Macy if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 800eda14cbcSMatt Macy /* Dynamic taskq may be able to spawn another thread */ 801e2257b31SMartin Matuska if (taskq_thread_spawn(tq) == 0) 802e2257b31SMartin Matuska goto out; 803eda14cbcSMatt Macy flags |= TQ_FRONT; 804eda14cbcSMatt Macy } 805eda14cbcSMatt Macy 806eda14cbcSMatt Macy spin_lock(&t->tqent_lock); 807eda14cbcSMatt Macy 808eda14cbcSMatt Macy /* 809eda14cbcSMatt Macy * Make sure the entry is not on some other taskq; it is important to 810eda14cbcSMatt Macy * ASSERT() under lock 811eda14cbcSMatt Macy */ 812eda14cbcSMatt Macy ASSERT(taskq_empty_ent(t)); 813eda14cbcSMatt Macy 814eda14cbcSMatt Macy /* 815eda14cbcSMatt Macy * Mark it as a prealloc'd task. This is important 816eda14cbcSMatt Macy * to ensure that we don't free it later. 817eda14cbcSMatt Macy */ 818eda14cbcSMatt Macy t->tqent_flags |= TQENT_FLAG_PREALLOC; 819eda14cbcSMatt Macy 820eda14cbcSMatt Macy /* Queue to the priority list instead of the pending list */ 821e2df9bb4SMartin Matuska if (flags & TQ_FRONT) { 822e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_PRIORITY); 823eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_prio_list); 824e2df9bb4SMartin Matuska } else { 825e2df9bb4SMartin Matuska TQENT_SET_LIST(t, TQENT_LIST_PENDING); 826eda14cbcSMatt Macy list_add_tail(&t->tqent_list, &tq->tq_pend_list); 827e2df9bb4SMartin Matuska } 828e2df9bb4SMartin Matuska TQSTAT_INC_LIST(tq, t); 829e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_total); 830eda14cbcSMatt Macy 831eda14cbcSMatt Macy t->tqent_id = tq->tq_next_id; 832eda14cbcSMatt Macy tq->tq_next_id++; 833eda14cbcSMatt Macy t->tqent_func = func; 834eda14cbcSMatt Macy t->tqent_arg = arg; 835eda14cbcSMatt Macy t->tqent_taskq = tq; 836eda14cbcSMatt Macy 837eda14cbcSMatt Macy t->tqent_birth = jiffies; 838eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 839eda14cbcSMatt Macy 840eda14cbcSMatt Macy spin_unlock(&t->tqent_lock); 841eda14cbcSMatt Macy 842eda14cbcSMatt Macy wake_up(&tq->tq_work_waitq); 843e2257b31SMartin Matuska 844e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_dispatched); 845e2df9bb4SMartin Matuska 846eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 847eda14cbcSMatt Macy if (tq->tq_nactive == tq->tq_nthreads) 848eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 849e2257b31SMartin Matuska out: 850eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 851eda14cbcSMatt Macy } 852eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_dispatch_ent); 853eda14cbcSMatt Macy 854eda14cbcSMatt Macy int 855eda14cbcSMatt Macy taskq_empty_ent(taskq_ent_t *t) 856eda14cbcSMatt Macy { 857eda14cbcSMatt Macy return (list_empty(&t->tqent_list)); 858eda14cbcSMatt Macy } 859eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_empty_ent); 860eda14cbcSMatt Macy 861eda14cbcSMatt Macy void 862eda14cbcSMatt Macy taskq_init_ent(taskq_ent_t *t) 863eda14cbcSMatt Macy { 864eda14cbcSMatt Macy spin_lock_init(&t->tqent_lock); 865eda14cbcSMatt Macy init_waitqueue_head(&t->tqent_waitq); 866eda14cbcSMatt Macy timer_setup(&t->tqent_timer, NULL, 0); 867eda14cbcSMatt Macy INIT_LIST_HEAD(&t->tqent_list); 868eda14cbcSMatt Macy t->tqent_id = 0; 869eda14cbcSMatt Macy t->tqent_func = NULL; 870eda14cbcSMatt Macy t->tqent_arg = NULL; 871eda14cbcSMatt Macy t->tqent_flags = 0; 872eda14cbcSMatt Macy t->tqent_taskq = NULL; 873eda14cbcSMatt Macy } 874eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_init_ent); 875eda14cbcSMatt Macy 876eda14cbcSMatt Macy /* 877eda14cbcSMatt Macy * Return the next pending task, preference is given to tasks on the 878eda14cbcSMatt Macy * priority list which were dispatched with TQ_FRONT. 879eda14cbcSMatt Macy */ 880eda14cbcSMatt Macy static taskq_ent_t * 881eda14cbcSMatt Macy taskq_next_ent(taskq_t *tq) 882eda14cbcSMatt Macy { 883eda14cbcSMatt Macy struct list_head *list; 884eda14cbcSMatt Macy 885eda14cbcSMatt Macy if (!list_empty(&tq->tq_prio_list)) 886eda14cbcSMatt Macy list = &tq->tq_prio_list; 887eda14cbcSMatt Macy else if (!list_empty(&tq->tq_pend_list)) 888eda14cbcSMatt Macy list = &tq->tq_pend_list; 889eda14cbcSMatt Macy else 890eda14cbcSMatt Macy return (NULL); 891eda14cbcSMatt Macy 892eda14cbcSMatt Macy return (list_entry(list->next, taskq_ent_t, tqent_list)); 893eda14cbcSMatt Macy } 894eda14cbcSMatt Macy 895eda14cbcSMatt Macy /* 896eda14cbcSMatt Macy * Spawns a new thread for the specified taskq. 897eda14cbcSMatt Macy */ 898eda14cbcSMatt Macy static void 899eda14cbcSMatt Macy taskq_thread_spawn_task(void *arg) 900eda14cbcSMatt Macy { 901eda14cbcSMatt Macy taskq_t *tq = (taskq_t *)arg; 902eda14cbcSMatt Macy unsigned long flags; 903eda14cbcSMatt Macy 904eda14cbcSMatt Macy if (taskq_thread_create(tq) == NULL) { 905eda14cbcSMatt Macy /* restore spawning count if failed */ 906eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 907eda14cbcSMatt Macy tq->tq_lock_class); 908eda14cbcSMatt Macy tq->tq_nspawn--; 909eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 910eda14cbcSMatt Macy } 911eda14cbcSMatt Macy } 912eda14cbcSMatt Macy 913eda14cbcSMatt Macy /* 914eda14cbcSMatt Macy * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current 915eda14cbcSMatt Macy * number of threads is insufficient to handle the pending tasks. These 916eda14cbcSMatt Macy * new threads must be created by the dedicated dynamic_taskq to avoid 917eda14cbcSMatt Macy * deadlocks between thread creation and memory reclaim. The system_taskq 918eda14cbcSMatt Macy * which is also a dynamic taskq cannot be safely used for this. 919eda14cbcSMatt Macy */ 920eda14cbcSMatt Macy static int 921eda14cbcSMatt Macy taskq_thread_spawn(taskq_t *tq) 922eda14cbcSMatt Macy { 923eda14cbcSMatt Macy int spawning = 0; 924eda14cbcSMatt Macy 925eda14cbcSMatt Macy if (!(tq->tq_flags & TASKQ_DYNAMIC)) 926eda14cbcSMatt Macy return (0); 927eda14cbcSMatt Macy 928e2257b31SMartin Matuska tq->lastspawnstop = jiffies; 929eda14cbcSMatt Macy if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && 930eda14cbcSMatt Macy (tq->tq_flags & TASKQ_ACTIVE)) { 931eda14cbcSMatt Macy spawning = (++tq->tq_nspawn); 932eda14cbcSMatt Macy taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, 933eda14cbcSMatt Macy tq, TQ_NOSLEEP); 934eda14cbcSMatt Macy } 935eda14cbcSMatt Macy 936eda14cbcSMatt Macy return (spawning); 937eda14cbcSMatt Macy } 938eda14cbcSMatt Macy 939eda14cbcSMatt Macy /* 940e2257b31SMartin Matuska * Threads in a dynamic taskq may exit once there is no more work to do. 941e2257b31SMartin Matuska * To prevent threads from being created and destroyed too often limit 942e2257b31SMartin Matuska * the exit rate to one per spl_taskq_thread_timeout_ms. 943eda14cbcSMatt Macy * 944eda14cbcSMatt Macy * The first thread is the thread list is treated as the primary thread. 945eda14cbcSMatt Macy * There is nothing special about the primary thread but in order to avoid 946eda14cbcSMatt Macy * all the taskq pids from changing we opt to make it long running. 947eda14cbcSMatt Macy */ 948eda14cbcSMatt Macy static int 949eda14cbcSMatt Macy taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) 950eda14cbcSMatt Macy { 951e2257b31SMartin Matuska ASSERT(!taskq_next_ent(tq)); 952e2257b31SMartin Matuska if (!(tq->tq_flags & TASKQ_DYNAMIC) || !spl_taskq_thread_dynamic) 953eda14cbcSMatt Macy return (0); 954e2257b31SMartin Matuska if (!(tq->tq_flags & TASKQ_ACTIVE)) 955e2257b31SMartin Matuska return (1); 956eda14cbcSMatt Macy if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, 957eda14cbcSMatt Macy tqt_thread_list) == tqt) 958eda14cbcSMatt Macy return (0); 959e2257b31SMartin Matuska ASSERT3U(tq->tq_nthreads, >, 1); 960e2257b31SMartin Matuska if (tq->tq_nspawn != 0) 961e2257b31SMartin Matuska return (0); 962e2257b31SMartin Matuska if (time_before(jiffies, tq->lastspawnstop + 9637b5e6873SMartin Matuska msecs_to_jiffies(spl_taskq_thread_timeout_ms))) 964e2257b31SMartin Matuska return (0); 965e2257b31SMartin Matuska tq->lastspawnstop = jiffies; 9667b5e6873SMartin Matuska return (1); 967eda14cbcSMatt Macy } 968eda14cbcSMatt Macy 969eda14cbcSMatt Macy static int 970eda14cbcSMatt Macy taskq_thread(void *args) 971eda14cbcSMatt Macy { 972eda14cbcSMatt Macy DECLARE_WAITQUEUE(wait, current); 973eda14cbcSMatt Macy sigset_t blocked; 974eda14cbcSMatt Macy taskq_thread_t *tqt = args; 975eda14cbcSMatt Macy taskq_t *tq; 976eda14cbcSMatt Macy taskq_ent_t *t; 977eda14cbcSMatt Macy int seq_tasks = 0; 978eda14cbcSMatt Macy unsigned long flags; 979eda14cbcSMatt Macy taskq_ent_t dup_task = {}; 980eda14cbcSMatt Macy 981eda14cbcSMatt Macy ASSERT(tqt); 982eda14cbcSMatt Macy ASSERT(tqt->tqt_tq); 983eda14cbcSMatt Macy tq = tqt->tqt_tq; 984eda14cbcSMatt Macy current->flags |= PF_NOFREEZE; 985eda14cbcSMatt Macy 986eda14cbcSMatt Macy (void) spl_fstrans_mark(); 987eda14cbcSMatt Macy 988eda14cbcSMatt Macy sigfillset(&blocked); 989eda14cbcSMatt Macy sigprocmask(SIG_BLOCK, &blocked, NULL); 990eda14cbcSMatt Macy flush_signals(current); 991eda14cbcSMatt Macy 992eda14cbcSMatt Macy tsd_set(taskq_tsd, tq); 993eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 994eda14cbcSMatt Macy /* 995eda14cbcSMatt Macy * If we are dynamically spawned, decrease spawning count. Note that 996eda14cbcSMatt Macy * we could be created during taskq_create, in which case we shouldn't 997eda14cbcSMatt Macy * do the decrement. But it's fine because taskq_create will reset 998eda14cbcSMatt Macy * tq_nspawn later. 999eda14cbcSMatt Macy */ 1000eda14cbcSMatt Macy if (tq->tq_flags & TASKQ_DYNAMIC) 1001eda14cbcSMatt Macy tq->tq_nspawn--; 1002eda14cbcSMatt Macy 1003eda14cbcSMatt Macy /* Immediately exit if more threads than allowed were created. */ 1004eda14cbcSMatt Macy if (tq->tq_nthreads >= tq->tq_maxthreads) 1005eda14cbcSMatt Macy goto error; 1006eda14cbcSMatt Macy 1007eda14cbcSMatt Macy tq->tq_nthreads++; 1008eda14cbcSMatt Macy list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); 1009eda14cbcSMatt Macy wake_up(&tq->tq_wait_waitq); 1010eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 1011eda14cbcSMatt Macy 1012e2df9bb4SMartin Matuska TQSTAT_INC(tq, threads_total); 1013e2df9bb4SMartin Matuska 1014eda14cbcSMatt Macy while (!kthread_should_stop()) { 1015eda14cbcSMatt Macy 1016eda14cbcSMatt Macy if (list_empty(&tq->tq_pend_list) && 1017eda14cbcSMatt Macy list_empty(&tq->tq_prio_list)) { 1018eda14cbcSMatt Macy 1019e2257b31SMartin Matuska if (taskq_thread_should_stop(tq, tqt)) 1020eda14cbcSMatt Macy break; 1021eda14cbcSMatt Macy 1022eda14cbcSMatt Macy add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); 1023eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1024eda14cbcSMatt Macy 1025e2df9bb4SMartin Matuska TQSTAT_INC(tq, thread_sleeps); 1026e2df9bb4SMartin Matuska TQSTAT_INC(tq, threads_idle); 1027e2df9bb4SMartin Matuska 1028eda14cbcSMatt Macy schedule(); 1029eda14cbcSMatt Macy seq_tasks = 0; 1030eda14cbcSMatt Macy 1031e2df9bb4SMartin Matuska TQSTAT_DEC(tq, threads_idle); 1032e2df9bb4SMartin Matuska TQSTAT_INC(tq, thread_wakeups); 1033e2df9bb4SMartin Matuska 1034eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1035eda14cbcSMatt Macy tq->tq_lock_class); 1036eda14cbcSMatt Macy remove_wait_queue(&tq->tq_work_waitq, &wait); 1037eda14cbcSMatt Macy } else { 1038eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 1039eda14cbcSMatt Macy } 1040eda14cbcSMatt Macy 1041eda14cbcSMatt Macy if ((t = taskq_next_ent(tq)) != NULL) { 1042eda14cbcSMatt Macy list_del_init(&t->tqent_list); 1043e2df9bb4SMartin Matuska TQSTAT_DEC_LIST(tq, t); 1044e2df9bb4SMartin Matuska TQSTAT_DEC(tq, tasks_total); 1045eda14cbcSMatt Macy 1046eda14cbcSMatt Macy /* 1047eda14cbcSMatt Macy * A TQENT_FLAG_PREALLOC task may be reused or freed 1048eda14cbcSMatt Macy * during the task function call. Store tqent_id and 1049eda14cbcSMatt Macy * tqent_flags here. 1050eda14cbcSMatt Macy * 1051eda14cbcSMatt Macy * Also use an on stack taskq_ent_t for tqt_task 1052eda14cbcSMatt Macy * assignment in this case; we want to make sure 1053eda14cbcSMatt Macy * to duplicate all fields, so the values are 1054eda14cbcSMatt Macy * correct when it's accessed via DTRACE_PROBE*. 1055eda14cbcSMatt Macy */ 1056eda14cbcSMatt Macy tqt->tqt_id = t->tqent_id; 1057eda14cbcSMatt Macy tqt->tqt_flags = t->tqent_flags; 1058eda14cbcSMatt Macy 1059eda14cbcSMatt Macy if (t->tqent_flags & TQENT_FLAG_PREALLOC) { 1060eda14cbcSMatt Macy dup_task = *t; 1061eda14cbcSMatt Macy t = &dup_task; 1062eda14cbcSMatt Macy } 1063eda14cbcSMatt Macy tqt->tqt_task = t; 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy taskq_insert_in_order(tq, tqt); 1066eda14cbcSMatt Macy tq->tq_nactive++; 1067eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1068eda14cbcSMatt Macy 1069e2df9bb4SMartin Matuska TQSTAT_INC(tq, threads_active); 1070eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); 1071eda14cbcSMatt Macy 1072eda14cbcSMatt Macy /* Perform the requested task */ 1073eda14cbcSMatt Macy t->tqent_func(t->tqent_arg); 1074eda14cbcSMatt Macy 1075eda14cbcSMatt Macy DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); 1076eda14cbcSMatt Macy 1077e2df9bb4SMartin Matuska TQSTAT_DEC(tq, threads_active); 1078e2df9bb4SMartin Matuska if ((t->tqent_flags & TQENT_LIST_MASK) == 1079e2df9bb4SMartin Matuska TQENT_LIST_PENDING) 1080e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_executed_normal); 1081e2df9bb4SMartin Matuska else 1082e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_executed_priority); 1083e2df9bb4SMartin Matuska TQSTAT_INC(tq, tasks_executed); 1084e2df9bb4SMartin Matuska 1085eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1086eda14cbcSMatt Macy tq->tq_lock_class); 1087e2df9bb4SMartin Matuska 1088eda14cbcSMatt Macy tq->tq_nactive--; 1089eda14cbcSMatt Macy list_del_init(&tqt->tqt_active_list); 1090eda14cbcSMatt Macy tqt->tqt_task = NULL; 1091eda14cbcSMatt Macy 1092eda14cbcSMatt Macy /* For prealloc'd tasks, we don't free anything. */ 1093eda14cbcSMatt Macy if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) 1094eda14cbcSMatt Macy task_done(tq, t); 1095eda14cbcSMatt Macy 1096eda14cbcSMatt Macy /* 1097eda14cbcSMatt Macy * When the current lowest outstanding taskqid is 1098eda14cbcSMatt Macy * done calculate the new lowest outstanding id 1099eda14cbcSMatt Macy */ 1100eda14cbcSMatt Macy if (tq->tq_lowest_id == tqt->tqt_id) { 1101eda14cbcSMatt Macy tq->tq_lowest_id = taskq_lowest_id(tq); 1102eda14cbcSMatt Macy ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); 1103eda14cbcSMatt Macy } 1104eda14cbcSMatt Macy 1105eda14cbcSMatt Macy /* Spawn additional taskq threads if required. */ 1106eda14cbcSMatt Macy if ((++seq_tasks) > spl_taskq_thread_sequential && 1107eda14cbcSMatt Macy taskq_thread_spawn(tq)) 1108eda14cbcSMatt Macy seq_tasks = 0; 1109eda14cbcSMatt Macy 1110eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1111eda14cbcSMatt Macy tqt->tqt_flags = 0; 1112eda14cbcSMatt Macy wake_up_all(&tq->tq_wait_waitq); 1113e2df9bb4SMartin Matuska } else 1114e2df9bb4SMartin Matuska TQSTAT_INC(tq, thread_wakeups_nowork); 1115eda14cbcSMatt Macy 1116eda14cbcSMatt Macy set_current_state(TASK_INTERRUPTIBLE); 1117eda14cbcSMatt Macy 1118eda14cbcSMatt Macy } 1119eda14cbcSMatt Macy 1120eda14cbcSMatt Macy __set_current_state(TASK_RUNNING); 1121eda14cbcSMatt Macy tq->tq_nthreads--; 1122eda14cbcSMatt Macy list_del_init(&tqt->tqt_thread_list); 1123e2df9bb4SMartin Matuska 1124e2df9bb4SMartin Matuska TQSTAT_DEC(tq, threads_total); 1125e2df9bb4SMartin Matuska TQSTAT_INC(tq, threads_destroyed); 1126e2df9bb4SMartin Matuska 1127eda14cbcSMatt Macy error: 1128eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1129eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1130eda14cbcSMatt Macy 1131eda14cbcSMatt Macy tsd_set(taskq_tsd, NULL); 1132184c1b94SMartin Matuska thread_exit(); 1133eda14cbcSMatt Macy 1134eda14cbcSMatt Macy return (0); 1135eda14cbcSMatt Macy } 1136eda14cbcSMatt Macy 1137eda14cbcSMatt Macy static taskq_thread_t * 1138eda14cbcSMatt Macy taskq_thread_create(taskq_t *tq) 1139eda14cbcSMatt Macy { 1140eda14cbcSMatt Macy static int last_used_cpu = 0; 1141eda14cbcSMatt Macy taskq_thread_t *tqt; 1142eda14cbcSMatt Macy 1143eda14cbcSMatt Macy tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); 1144eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_thread_list); 1145eda14cbcSMatt Macy INIT_LIST_HEAD(&tqt->tqt_active_list); 1146eda14cbcSMatt Macy tqt->tqt_tq = tq; 1147eda14cbcSMatt Macy tqt->tqt_id = TASKQID_INVALID; 1148eda14cbcSMatt Macy 1149eda14cbcSMatt Macy tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, 1150eda14cbcSMatt Macy "%s", tq->tq_name); 1151eda14cbcSMatt Macy if (tqt->tqt_thread == NULL) { 1152eda14cbcSMatt Macy kmem_free(tqt, sizeof (taskq_thread_t)); 1153eda14cbcSMatt Macy return (NULL); 1154eda14cbcSMatt Macy } 1155eda14cbcSMatt Macy 1156eda14cbcSMatt Macy if (spl_taskq_thread_bind) { 1157eda14cbcSMatt Macy last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); 1158eda14cbcSMatt Macy kthread_bind(tqt->tqt_thread, last_used_cpu); 1159eda14cbcSMatt Macy } 1160eda14cbcSMatt Macy 1161eda14cbcSMatt Macy if (spl_taskq_thread_priority) 1162eda14cbcSMatt Macy set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); 1163eda14cbcSMatt Macy 1164eda14cbcSMatt Macy wake_up_process(tqt->tqt_thread); 1165eda14cbcSMatt Macy 1166e2df9bb4SMartin Matuska TQSTAT_INC(tq, threads_created); 1167e2df9bb4SMartin Matuska 1168eda14cbcSMatt Macy return (tqt); 1169eda14cbcSMatt Macy } 1170eda14cbcSMatt Macy 1171e2df9bb4SMartin Matuska static void 1172e2df9bb4SMartin Matuska taskq_stats_init(taskq_t *tq) 1173e2df9bb4SMartin Matuska { 1174e2df9bb4SMartin Matuska taskq_sums_t *tqs = &tq->tq_sums; 1175e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_threads_active, 0); 1176e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_threads_idle, 0); 1177e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_threads_total, 0); 1178e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_pending, 0); 1179e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_priority, 0); 1180e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_total, 0); 1181e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_delayed, 0); 1182e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_entries_free, 0); 1183e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_threads_created, 0); 1184e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_threads_destroyed, 0); 1185e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_dispatched, 0); 1186e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_dispatched_delayed, 0); 1187e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_executed_normal, 0); 1188e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_executed_priority, 0); 1189e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_executed, 0); 1190e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_delayed_requeued, 0); 1191e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_tasks_cancelled, 0); 1192e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_thread_wakeups, 0); 1193e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_thread_wakeups_nowork, 0); 1194e2df9bb4SMartin Matuska wmsum_init(&tqs->tqs_thread_sleeps, 0); 1195e2df9bb4SMartin Matuska } 1196e2df9bb4SMartin Matuska 1197e2df9bb4SMartin Matuska static void 1198e2df9bb4SMartin Matuska taskq_stats_fini(taskq_t *tq) 1199e2df9bb4SMartin Matuska { 1200e2df9bb4SMartin Matuska taskq_sums_t *tqs = &tq->tq_sums; 1201e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_threads_active); 1202e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_threads_idle); 1203e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_threads_total); 1204e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_pending); 1205e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_priority); 1206e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_total); 1207e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_delayed); 1208e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_entries_free); 1209e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_threads_created); 1210e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_threads_destroyed); 1211e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_dispatched); 1212e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_dispatched_delayed); 1213e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_executed_normal); 1214e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_executed_priority); 1215e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_executed); 1216e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_delayed_requeued); 1217e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_tasks_cancelled); 1218e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_thread_wakeups); 1219e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_thread_wakeups_nowork); 1220e2df9bb4SMartin Matuska wmsum_fini(&tqs->tqs_thread_sleeps); 1221e2df9bb4SMartin Matuska } 1222e2df9bb4SMartin Matuska 1223e2df9bb4SMartin Matuska static int 1224e2df9bb4SMartin Matuska taskq_kstats_update(kstat_t *ksp, int rw) 1225e2df9bb4SMartin Matuska { 1226e2df9bb4SMartin Matuska if (rw == KSTAT_WRITE) 1227e2df9bb4SMartin Matuska return (EACCES); 1228e2df9bb4SMartin Matuska 1229e2df9bb4SMartin Matuska taskq_t *tq = ksp->ks_private; 1230e2df9bb4SMartin Matuska taskq_kstats_t *tqks = ksp->ks_data; 1231e2df9bb4SMartin Matuska 1232e2df9bb4SMartin Matuska tqks->tqks_threads_max.value.ui64 = tq->tq_maxthreads; 1233e2df9bb4SMartin Matuska tqks->tqks_entry_pool_min.value.ui64 = tq->tq_minalloc; 1234e2df9bb4SMartin Matuska tqks->tqks_entry_pool_max.value.ui64 = tq->tq_maxalloc; 1235e2df9bb4SMartin Matuska 1236e2df9bb4SMartin Matuska taskq_sums_t *tqs = &tq->tq_sums; 1237e2df9bb4SMartin Matuska 1238e2df9bb4SMartin Matuska tqks->tqks_threads_active.value.ui64 = 1239e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_threads_active); 1240e2df9bb4SMartin Matuska tqks->tqks_threads_idle.value.ui64 = 1241e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_threads_idle); 1242e2df9bb4SMartin Matuska tqks->tqks_threads_total.value.ui64 = 1243e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_threads_total); 1244e2df9bb4SMartin Matuska tqks->tqks_tasks_pending.value.ui64 = 1245e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_pending); 1246e2df9bb4SMartin Matuska tqks->tqks_tasks_priority.value.ui64 = 1247e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_priority); 1248e2df9bb4SMartin Matuska tqks->tqks_tasks_total.value.ui64 = 1249e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_total); 1250e2df9bb4SMartin Matuska tqks->tqks_tasks_delayed.value.ui64 = 1251e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_delayed); 1252e2df9bb4SMartin Matuska tqks->tqks_entries_free.value.ui64 = 1253e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_entries_free); 1254e2df9bb4SMartin Matuska tqks->tqks_threads_created.value.ui64 = 1255e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_threads_created); 1256e2df9bb4SMartin Matuska tqks->tqks_threads_destroyed.value.ui64 = 1257e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_threads_destroyed); 1258e2df9bb4SMartin Matuska tqks->tqks_tasks_dispatched.value.ui64 = 1259e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_dispatched); 1260e2df9bb4SMartin Matuska tqks->tqks_tasks_dispatched_delayed.value.ui64 = 1261e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_dispatched_delayed); 1262e2df9bb4SMartin Matuska tqks->tqks_tasks_executed_normal.value.ui64 = 1263e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_executed_normal); 1264e2df9bb4SMartin Matuska tqks->tqks_tasks_executed_priority.value.ui64 = 1265e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_executed_priority); 1266e2df9bb4SMartin Matuska tqks->tqks_tasks_executed.value.ui64 = 1267e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_executed); 1268e2df9bb4SMartin Matuska tqks->tqks_tasks_delayed_requeued.value.ui64 = 1269e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_delayed_requeued); 1270e2df9bb4SMartin Matuska tqks->tqks_tasks_cancelled.value.ui64 = 1271e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_tasks_cancelled); 1272e2df9bb4SMartin Matuska tqks->tqks_thread_wakeups.value.ui64 = 1273e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_thread_wakeups); 1274e2df9bb4SMartin Matuska tqks->tqks_thread_wakeups_nowork.value.ui64 = 1275e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_thread_wakeups_nowork); 1276e2df9bb4SMartin Matuska tqks->tqks_thread_sleeps.value.ui64 = 1277e2df9bb4SMartin Matuska wmsum_value(&tqs->tqs_thread_sleeps); 1278e2df9bb4SMartin Matuska 1279e2df9bb4SMartin Matuska return (0); 1280e2df9bb4SMartin Matuska } 1281e2df9bb4SMartin Matuska 1282e2df9bb4SMartin Matuska static void 1283e2df9bb4SMartin Matuska taskq_kstats_init(taskq_t *tq) 1284e2df9bb4SMartin Matuska { 1285e2df9bb4SMartin Matuska char name[TASKQ_NAMELEN+5]; /* 5 for dot, 3x instance digits, null */ 1286e2df9bb4SMartin Matuska snprintf(name, sizeof (name), "%s.%d", tq->tq_name, tq->tq_instance); 1287e2df9bb4SMartin Matuska 1288e2df9bb4SMartin Matuska kstat_t *ksp = kstat_create("taskq", 0, name, "misc", 1289e2df9bb4SMartin Matuska KSTAT_TYPE_NAMED, sizeof (taskq_kstats_t) / sizeof (kstat_named_t), 1290e2df9bb4SMartin Matuska KSTAT_FLAG_VIRTUAL); 1291e2df9bb4SMartin Matuska 1292e2df9bb4SMartin Matuska if (ksp == NULL) 1293e2df9bb4SMartin Matuska return; 1294e2df9bb4SMartin Matuska 1295e2df9bb4SMartin Matuska ksp->ks_private = tq; 1296e2df9bb4SMartin Matuska ksp->ks_update = taskq_kstats_update; 1297e2df9bb4SMartin Matuska ksp->ks_data = kmem_alloc(sizeof (taskq_kstats_t), KM_SLEEP); 1298e2df9bb4SMartin Matuska memcpy(ksp->ks_data, &taskq_kstats_template, sizeof (taskq_kstats_t)); 1299e2df9bb4SMartin Matuska kstat_install(ksp); 1300e2df9bb4SMartin Matuska 1301e2df9bb4SMartin Matuska tq->tq_ksp = ksp; 1302e2df9bb4SMartin Matuska } 1303e2df9bb4SMartin Matuska 1304e2df9bb4SMartin Matuska static void 1305e2df9bb4SMartin Matuska taskq_kstats_fini(taskq_t *tq) 1306e2df9bb4SMartin Matuska { 1307e2df9bb4SMartin Matuska if (tq->tq_ksp == NULL) 1308e2df9bb4SMartin Matuska return; 1309e2df9bb4SMartin Matuska 1310e2df9bb4SMartin Matuska kmem_free(tq->tq_ksp->ks_data, sizeof (taskq_kstats_t)); 1311e2df9bb4SMartin Matuska kstat_delete(tq->tq_ksp); 1312e2df9bb4SMartin Matuska 1313e2df9bb4SMartin Matuska tq->tq_ksp = NULL; 1314e2df9bb4SMartin Matuska } 1315e2df9bb4SMartin Matuska 1316eda14cbcSMatt Macy taskq_t * 13177877fdebSMatt Macy taskq_create(const char *name, int threads_arg, pri_t pri, 1318eda14cbcSMatt Macy int minalloc, int maxalloc, uint_t flags) 1319eda14cbcSMatt Macy { 1320eda14cbcSMatt Macy taskq_t *tq; 1321eda14cbcSMatt Macy taskq_thread_t *tqt; 1322eda14cbcSMatt Macy int count = 0, rc = 0, i; 1323eda14cbcSMatt Macy unsigned long irqflags; 13247877fdebSMatt Macy int nthreads = threads_arg; 1325eda14cbcSMatt Macy 1326eda14cbcSMatt Macy ASSERT(name != NULL); 1327eda14cbcSMatt Macy ASSERT(minalloc >= 0); 1328eda14cbcSMatt Macy ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ 1329eda14cbcSMatt Macy 1330eda14cbcSMatt Macy /* Scale the number of threads using nthreads as a percentage */ 1331eda14cbcSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 1332eda14cbcSMatt Macy ASSERT(nthreads <= 100); 1333eda14cbcSMatt Macy ASSERT(nthreads >= 0); 13347877fdebSMatt Macy nthreads = MIN(threads_arg, 100); 1335eda14cbcSMatt Macy nthreads = MAX(nthreads, 0); 1336eda14cbcSMatt Macy nthreads = MAX((num_online_cpus() * nthreads) /100, 1); 1337eda14cbcSMatt Macy } 1338eda14cbcSMatt Macy 1339eda14cbcSMatt Macy tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); 1340eda14cbcSMatt Macy if (tq == NULL) 1341eda14cbcSMatt Macy return (NULL); 1342eda14cbcSMatt Macy 13437877fdebSMatt Macy tq->tq_hp_support = B_FALSE; 1344*7a7741afSMartin Matuska 13457877fdebSMatt Macy if (flags & TASKQ_THREADS_CPU_PCT) { 13467877fdebSMatt Macy tq->tq_hp_support = B_TRUE; 13477877fdebSMatt Macy if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, 13487877fdebSMatt Macy &tq->tq_hp_cb_node) != 0) { 13497877fdebSMatt Macy kmem_free(tq, sizeof (*tq)); 13507877fdebSMatt Macy return (NULL); 13517877fdebSMatt Macy } 13527877fdebSMatt Macy } 13537877fdebSMatt Macy 1354eda14cbcSMatt Macy spin_lock_init(&tq->tq_lock); 1355eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_thread_list); 1356eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_active_list); 1357eda14cbcSMatt Macy tq->tq_name = kmem_strdup(name); 1358eda14cbcSMatt Macy tq->tq_nactive = 0; 1359eda14cbcSMatt Macy tq->tq_nthreads = 0; 1360eda14cbcSMatt Macy tq->tq_nspawn = 0; 1361eda14cbcSMatt Macy tq->tq_maxthreads = nthreads; 13627877fdebSMatt Macy tq->tq_cpu_pct = threads_arg; 1363eda14cbcSMatt Macy tq->tq_pri = pri; 1364eda14cbcSMatt Macy tq->tq_minalloc = minalloc; 1365eda14cbcSMatt Macy tq->tq_maxalloc = maxalloc; 1366eda14cbcSMatt Macy tq->tq_nalloc = 0; 1367eda14cbcSMatt Macy tq->tq_flags = (flags | TASKQ_ACTIVE); 1368eda14cbcSMatt Macy tq->tq_next_id = TASKQID_INITIAL; 1369eda14cbcSMatt Macy tq->tq_lowest_id = TASKQID_INITIAL; 1370e2257b31SMartin Matuska tq->lastspawnstop = jiffies; 1371eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_free_list); 1372eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_pend_list); 1373eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_prio_list); 1374eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_delay_list); 1375eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_work_waitq); 1376eda14cbcSMatt Macy init_waitqueue_head(&tq->tq_wait_waitq); 1377eda14cbcSMatt Macy tq->tq_lock_class = TQ_LOCK_GENERAL; 1378eda14cbcSMatt Macy INIT_LIST_HEAD(&tq->tq_taskqs); 1379e2df9bb4SMartin Matuska taskq_stats_init(tq); 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy if (flags & TASKQ_PREPOPULATE) { 1382eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 1383eda14cbcSMatt Macy tq->tq_lock_class); 1384eda14cbcSMatt Macy 1385eda14cbcSMatt Macy for (i = 0; i < minalloc; i++) 1386eda14cbcSMatt Macy task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, 1387eda14cbcSMatt Macy &irqflags)); 1388eda14cbcSMatt Macy 1389eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, irqflags); 1390eda14cbcSMatt Macy } 1391eda14cbcSMatt Macy 1392eda14cbcSMatt Macy if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) 1393eda14cbcSMatt Macy nthreads = 1; 1394eda14cbcSMatt Macy 1395eda14cbcSMatt Macy for (i = 0; i < nthreads; i++) { 1396eda14cbcSMatt Macy tqt = taskq_thread_create(tq); 1397eda14cbcSMatt Macy if (tqt == NULL) 1398eda14cbcSMatt Macy rc = 1; 1399eda14cbcSMatt Macy else 1400eda14cbcSMatt Macy count++; 1401eda14cbcSMatt Macy } 1402eda14cbcSMatt Macy 1403eda14cbcSMatt Macy /* Wait for all threads to be started before potential destroy */ 1404eda14cbcSMatt Macy wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); 1405eda14cbcSMatt Macy /* 1406eda14cbcSMatt Macy * taskq_thread might have touched nspawn, but we don't want them to 1407eda14cbcSMatt Macy * because they're not dynamically spawned. So we reset it to 0 1408eda14cbcSMatt Macy */ 1409eda14cbcSMatt Macy tq->tq_nspawn = 0; 1410eda14cbcSMatt Macy 1411eda14cbcSMatt Macy if (rc) { 1412eda14cbcSMatt Macy taskq_destroy(tq); 1413e2df9bb4SMartin Matuska return (NULL); 1414e2df9bb4SMartin Matuska } 1415e2df9bb4SMartin Matuska 1416eda14cbcSMatt Macy down_write(&tq_list_sem); 1417eda14cbcSMatt Macy tq->tq_instance = taskq_find_by_name(name) + 1; 1418eda14cbcSMatt Macy list_add_tail(&tq->tq_taskqs, &tq_list); 1419eda14cbcSMatt Macy up_write(&tq_list_sem); 1420e2df9bb4SMartin Matuska 1421e2df9bb4SMartin Matuska /* Install kstats late, because the name includes tq_instance */ 1422e2df9bb4SMartin Matuska taskq_kstats_init(tq); 1423eda14cbcSMatt Macy 1424eda14cbcSMatt Macy return (tq); 1425eda14cbcSMatt Macy } 1426eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_create); 1427eda14cbcSMatt Macy 1428eda14cbcSMatt Macy void 1429eda14cbcSMatt Macy taskq_destroy(taskq_t *tq) 1430eda14cbcSMatt Macy { 1431eda14cbcSMatt Macy struct task_struct *thread; 1432eda14cbcSMatt Macy taskq_thread_t *tqt; 1433eda14cbcSMatt Macy taskq_ent_t *t; 1434eda14cbcSMatt Macy unsigned long flags; 1435eda14cbcSMatt Macy 1436eda14cbcSMatt Macy ASSERT(tq); 1437eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1438eda14cbcSMatt Macy tq->tq_flags &= ~TASKQ_ACTIVE; 1439eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1440eda14cbcSMatt Macy 14417877fdebSMatt Macy if (tq->tq_hp_support) { 14427877fdebSMatt Macy VERIFY0(cpuhp_state_remove_instance_nocalls( 14437877fdebSMatt Macy spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); 14447877fdebSMatt Macy } 1445*7a7741afSMartin Matuska 1446eda14cbcSMatt Macy /* 1447eda14cbcSMatt Macy * When TASKQ_ACTIVE is clear new tasks may not be added nor may 1448eda14cbcSMatt Macy * new worker threads be spawned for dynamic taskq. 1449eda14cbcSMatt Macy */ 1450eda14cbcSMatt Macy if (dynamic_taskq != NULL) 1451eda14cbcSMatt Macy taskq_wait_outstanding(dynamic_taskq, 0); 1452eda14cbcSMatt Macy 1453eda14cbcSMatt Macy taskq_wait(tq); 1454eda14cbcSMatt Macy 1455e2df9bb4SMartin Matuska taskq_kstats_fini(tq); 1456e2df9bb4SMartin Matuska 1457eda14cbcSMatt Macy /* remove taskq from global list used by the kstats */ 1458eda14cbcSMatt Macy down_write(&tq_list_sem); 1459eda14cbcSMatt Macy list_del(&tq->tq_taskqs); 1460eda14cbcSMatt Macy up_write(&tq_list_sem); 1461eda14cbcSMatt Macy 1462eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1463eda14cbcSMatt Macy /* wait for spawning threads to insert themselves to the list */ 1464eda14cbcSMatt Macy while (tq->tq_nspawn) { 1465eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1466eda14cbcSMatt Macy schedule_timeout_interruptible(1); 1467eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1468eda14cbcSMatt Macy tq->tq_lock_class); 1469eda14cbcSMatt Macy } 1470eda14cbcSMatt Macy 1471eda14cbcSMatt Macy /* 1472eda14cbcSMatt Macy * Signal each thread to exit and block until it does. Each thread 1473eda14cbcSMatt Macy * is responsible for removing itself from the list and freeing its 1474eda14cbcSMatt Macy * taskq_thread_t. This allows for idle threads to opt to remove 1475eda14cbcSMatt Macy * themselves from the taskq. They can be recreated as needed. 1476eda14cbcSMatt Macy */ 1477eda14cbcSMatt Macy while (!list_empty(&tq->tq_thread_list)) { 1478eda14cbcSMatt Macy tqt = list_entry(tq->tq_thread_list.next, 1479eda14cbcSMatt Macy taskq_thread_t, tqt_thread_list); 1480eda14cbcSMatt Macy thread = tqt->tqt_thread; 1481eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1482eda14cbcSMatt Macy 1483eda14cbcSMatt Macy kthread_stop(thread); 1484eda14cbcSMatt Macy 1485eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1486eda14cbcSMatt Macy tq->tq_lock_class); 1487eda14cbcSMatt Macy } 1488eda14cbcSMatt Macy 1489eda14cbcSMatt Macy while (!list_empty(&tq->tq_free_list)) { 1490eda14cbcSMatt Macy t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 1491eda14cbcSMatt Macy 1492eda14cbcSMatt Macy ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 1493eda14cbcSMatt Macy 1494eda14cbcSMatt Macy list_del_init(&t->tqent_list); 1495eda14cbcSMatt Macy task_free(tq, t); 1496eda14cbcSMatt Macy } 1497eda14cbcSMatt Macy 1498eda14cbcSMatt Macy ASSERT0(tq->tq_nthreads); 1499eda14cbcSMatt Macy ASSERT0(tq->tq_nalloc); 1500eda14cbcSMatt Macy ASSERT0(tq->tq_nspawn); 1501eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_thread_list)); 1502eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_active_list)); 1503eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_free_list)); 1504eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_pend_list)); 1505eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_prio_list)); 1506eda14cbcSMatt Macy ASSERT(list_empty(&tq->tq_delay_list)); 1507eda14cbcSMatt Macy 1508eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1509eda14cbcSMatt Macy 1510e2df9bb4SMartin Matuska taskq_stats_fini(tq); 1511eda14cbcSMatt Macy kmem_strfree(tq->tq_name); 1512eda14cbcSMatt Macy kmem_free(tq, sizeof (taskq_t)); 1513eda14cbcSMatt Macy } 1514eda14cbcSMatt Macy EXPORT_SYMBOL(taskq_destroy); 1515eda14cbcSMatt Macy 151614c2e0a0SMartin Matuska /* 151714c2e0a0SMartin Matuska * Create a taskq with a specified number of pool threads. Allocate 151814c2e0a0SMartin Matuska * and return an array of nthreads kthread_t pointers, one for each 151914c2e0a0SMartin Matuska * thread in the pool. The array is not ordered and must be freed 152014c2e0a0SMartin Matuska * by the caller. 152114c2e0a0SMartin Matuska */ 152214c2e0a0SMartin Matuska taskq_t * 152314c2e0a0SMartin Matuska taskq_create_synced(const char *name, int nthreads, pri_t pri, 152414c2e0a0SMartin Matuska int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp) 152514c2e0a0SMartin Matuska { 152614c2e0a0SMartin Matuska taskq_t *tq; 152714c2e0a0SMartin Matuska taskq_thread_t *tqt; 152814c2e0a0SMartin Matuska int i = 0; 152914c2e0a0SMartin Matuska kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads, 153014c2e0a0SMartin Matuska KM_SLEEP); 153114c2e0a0SMartin Matuska 153214c2e0a0SMartin Matuska flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH); 153314c2e0a0SMartin Matuska 153414c2e0a0SMartin Matuska /* taskq_create spawns all the threads before returning */ 153514c2e0a0SMartin Matuska tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX, 153614c2e0a0SMartin Matuska flags | TASKQ_PREPOPULATE); 153714c2e0a0SMartin Matuska VERIFY(tq != NULL); 153814c2e0a0SMartin Matuska VERIFY(tq->tq_nthreads == nthreads); 153914c2e0a0SMartin Matuska 154014c2e0a0SMartin Matuska list_for_each_entry(tqt, &tq->tq_thread_list, tqt_thread_list) { 154114c2e0a0SMartin Matuska kthreads[i] = tqt->tqt_thread; 154214c2e0a0SMartin Matuska i++; 154314c2e0a0SMartin Matuska } 154414c2e0a0SMartin Matuska 154514c2e0a0SMartin Matuska ASSERT3S(i, ==, nthreads); 154614c2e0a0SMartin Matuska *ktpp = kthreads; 154714c2e0a0SMartin Matuska 154814c2e0a0SMartin Matuska return (tq); 154914c2e0a0SMartin Matuska } 155014c2e0a0SMartin Matuska EXPORT_SYMBOL(taskq_create_synced); 155114c2e0a0SMartin Matuska 1552e2df9bb4SMartin Matuska static kstat_t *taskq_summary_ksp = NULL; 1553e2df9bb4SMartin Matuska 1554e2df9bb4SMartin Matuska static int 1555e2df9bb4SMartin Matuska spl_taskq_kstat_headers(char *buf, size_t size) 1556e2df9bb4SMartin Matuska { 1557e2df9bb4SMartin Matuska size_t n = snprintf(buf, size, 1558e2df9bb4SMartin Matuska "%-20s | %-17s | %-23s\n" 1559e2df9bb4SMartin Matuska "%-20s | %-17s | %-23s\n" 1560e2df9bb4SMartin Matuska "%-20s | %-17s | %-23s\n", 1561e2df9bb4SMartin Matuska "", "threads", "tasks on queue", 1562e2df9bb4SMartin Matuska "taskq name", "tot [act idl] max", " pend [ norm high] dly", 1563e2df9bb4SMartin Matuska "--------------------", "-----------------", 1564e2df9bb4SMartin Matuska "-----------------------"); 1565e2df9bb4SMartin Matuska return (n >= size ? ENOMEM : 0); 1566e2df9bb4SMartin Matuska } 1567e2df9bb4SMartin Matuska 1568e2df9bb4SMartin Matuska static int 1569e2df9bb4SMartin Matuska spl_taskq_kstat_data(char *buf, size_t size, void *data) 1570e2df9bb4SMartin Matuska { 1571e2df9bb4SMartin Matuska struct list_head *tql = NULL; 1572e2df9bb4SMartin Matuska taskq_t *tq; 1573e2df9bb4SMartin Matuska char name[TASKQ_NAMELEN+5]; /* 5 for dot, 3x instance digits, null */ 1574e2df9bb4SMartin Matuska char threads[25]; 1575e2df9bb4SMartin Matuska char tasks[30]; 1576e2df9bb4SMartin Matuska size_t n; 1577e2df9bb4SMartin Matuska int err = 0; 1578e2df9bb4SMartin Matuska 1579e2df9bb4SMartin Matuska down_read(&tq_list_sem); 1580e2df9bb4SMartin Matuska list_for_each_prev(tql, &tq_list) { 1581e2df9bb4SMartin Matuska tq = list_entry(tql, taskq_t, tq_taskqs); 1582e2df9bb4SMartin Matuska 1583e2df9bb4SMartin Matuska mutex_enter(tq->tq_ksp->ks_lock); 1584e2df9bb4SMartin Matuska taskq_kstats_update(tq->tq_ksp, KSTAT_READ); 1585e2df9bb4SMartin Matuska taskq_kstats_t *tqks = tq->tq_ksp->ks_data; 1586e2df9bb4SMartin Matuska 1587e2df9bb4SMartin Matuska snprintf(name, sizeof (name), "%s.%d", tq->tq_name, 1588e2df9bb4SMartin Matuska tq->tq_instance); 1589e2df9bb4SMartin Matuska snprintf(threads, sizeof (threads), "%3llu [%3llu %3llu] %3llu", 1590e2df9bb4SMartin Matuska tqks->tqks_threads_total.value.ui64, 1591e2df9bb4SMartin Matuska tqks->tqks_threads_active.value.ui64, 1592e2df9bb4SMartin Matuska tqks->tqks_threads_idle.value.ui64, 1593e2df9bb4SMartin Matuska tqks->tqks_threads_max.value.ui64); 1594e2df9bb4SMartin Matuska snprintf(tasks, sizeof (tasks), "%5llu [%5llu %5llu] %3llu", 1595e2df9bb4SMartin Matuska tqks->tqks_tasks_total.value.ui64, 1596e2df9bb4SMartin Matuska tqks->tqks_tasks_pending.value.ui64, 1597e2df9bb4SMartin Matuska tqks->tqks_tasks_priority.value.ui64, 1598e2df9bb4SMartin Matuska tqks->tqks_tasks_delayed.value.ui64); 1599e2df9bb4SMartin Matuska 1600e2df9bb4SMartin Matuska mutex_exit(tq->tq_ksp->ks_lock); 1601e2df9bb4SMartin Matuska 1602e2df9bb4SMartin Matuska n = snprintf(buf, size, "%-20s | %-17s | %-23s\n", 1603e2df9bb4SMartin Matuska name, threads, tasks); 1604e2df9bb4SMartin Matuska if (n >= size) { 1605e2df9bb4SMartin Matuska err = ENOMEM; 1606e2df9bb4SMartin Matuska break; 1607e2df9bb4SMartin Matuska } 1608e2df9bb4SMartin Matuska 1609e2df9bb4SMartin Matuska buf = &buf[n]; 1610e2df9bb4SMartin Matuska size -= n; 1611e2df9bb4SMartin Matuska } 1612e2df9bb4SMartin Matuska 1613e2df9bb4SMartin Matuska up_read(&tq_list_sem); 1614e2df9bb4SMartin Matuska 1615e2df9bb4SMartin Matuska return (err); 1616e2df9bb4SMartin Matuska } 1617e2df9bb4SMartin Matuska 1618e2df9bb4SMartin Matuska static void 1619e2df9bb4SMartin Matuska spl_taskq_kstat_init(void) 1620e2df9bb4SMartin Matuska { 1621e2df9bb4SMartin Matuska kstat_t *ksp = kstat_create("taskq", 0, "summary", "misc", 1622e2df9bb4SMartin Matuska KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); 1623e2df9bb4SMartin Matuska 1624e2df9bb4SMartin Matuska if (ksp == NULL) 1625e2df9bb4SMartin Matuska return; 1626e2df9bb4SMartin Matuska 1627e2df9bb4SMartin Matuska ksp->ks_data = (void *)(uintptr_t)1; 1628e2df9bb4SMartin Matuska ksp->ks_ndata = 1; 1629e2df9bb4SMartin Matuska kstat_set_raw_ops(ksp, spl_taskq_kstat_headers, 1630e2df9bb4SMartin Matuska spl_taskq_kstat_data, NULL); 1631e2df9bb4SMartin Matuska kstat_install(ksp); 1632e2df9bb4SMartin Matuska 1633e2df9bb4SMartin Matuska taskq_summary_ksp = ksp; 1634e2df9bb4SMartin Matuska } 1635e2df9bb4SMartin Matuska 1636e2df9bb4SMartin Matuska static void 1637e2df9bb4SMartin Matuska spl_taskq_kstat_fini(void) 1638e2df9bb4SMartin Matuska { 1639e2df9bb4SMartin Matuska if (taskq_summary_ksp == NULL) 1640e2df9bb4SMartin Matuska return; 1641e2df9bb4SMartin Matuska 1642e2df9bb4SMartin Matuska kstat_delete(taskq_summary_ksp); 1643e2df9bb4SMartin Matuska taskq_summary_ksp = NULL; 1644e2df9bb4SMartin Matuska } 1645e2df9bb4SMartin Matuska 1646eda14cbcSMatt Macy static unsigned int spl_taskq_kick = 0; 1647eda14cbcSMatt Macy 1648eda14cbcSMatt Macy /* 1649eda14cbcSMatt Macy * 2.6.36 API Change 1650eda14cbcSMatt Macy * module_param_cb is introduced to take kernel_param_ops and 1651eda14cbcSMatt Macy * module_param_call is marked as obsolete. Also set and get operations 1652eda14cbcSMatt Macy * were changed to take a 'const struct kernel_param *'. 1653eda14cbcSMatt Macy */ 1654eda14cbcSMatt Macy static int 1655eda14cbcSMatt Macy #ifdef module_param_cb 1656eda14cbcSMatt Macy param_set_taskq_kick(const char *val, const struct kernel_param *kp) 1657eda14cbcSMatt Macy #else 1658eda14cbcSMatt Macy param_set_taskq_kick(const char *val, struct kernel_param *kp) 1659eda14cbcSMatt Macy #endif 1660eda14cbcSMatt Macy { 1661eda14cbcSMatt Macy int ret; 1662eda14cbcSMatt Macy taskq_t *tq = NULL; 1663eda14cbcSMatt Macy taskq_ent_t *t; 1664eda14cbcSMatt Macy unsigned long flags; 1665eda14cbcSMatt Macy 1666eda14cbcSMatt Macy ret = param_set_uint(val, kp); 1667eda14cbcSMatt Macy if (ret < 0 || !spl_taskq_kick) 1668eda14cbcSMatt Macy return (ret); 1669eda14cbcSMatt Macy /* reset value */ 1670eda14cbcSMatt Macy spl_taskq_kick = 0; 1671eda14cbcSMatt Macy 1672eda14cbcSMatt Macy down_read(&tq_list_sem); 1673eda14cbcSMatt Macy list_for_each_entry(tq, &tq_list, tq_taskqs) { 1674eda14cbcSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, 1675eda14cbcSMatt Macy tq->tq_lock_class); 1676eda14cbcSMatt Macy /* Check if the first pending is older than 5 seconds */ 1677eda14cbcSMatt Macy t = taskq_next_ent(tq); 1678eda14cbcSMatt Macy if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { 1679eda14cbcSMatt Macy (void) taskq_thread_spawn(tq); 1680eda14cbcSMatt Macy printk(KERN_INFO "spl: Kicked taskq %s/%d\n", 1681eda14cbcSMatt Macy tq->tq_name, tq->tq_instance); 1682eda14cbcSMatt Macy } 1683eda14cbcSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 1684eda14cbcSMatt Macy } 1685eda14cbcSMatt Macy up_read(&tq_list_sem); 1686eda14cbcSMatt Macy return (ret); 1687eda14cbcSMatt Macy } 1688eda14cbcSMatt Macy 1689eda14cbcSMatt Macy #ifdef module_param_cb 1690eda14cbcSMatt Macy static const struct kernel_param_ops param_ops_taskq_kick = { 1691eda14cbcSMatt Macy .set = param_set_taskq_kick, 1692eda14cbcSMatt Macy .get = param_get_uint, 1693eda14cbcSMatt Macy }; 1694eda14cbcSMatt Macy module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); 1695eda14cbcSMatt Macy #else 1696eda14cbcSMatt Macy module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, 1697eda14cbcSMatt Macy &spl_taskq_kick, 0644); 1698eda14cbcSMatt Macy #endif 1699eda14cbcSMatt Macy MODULE_PARM_DESC(spl_taskq_kick, 1700eda14cbcSMatt Macy "Write nonzero to kick stuck taskqs to spawn more threads"); 1701eda14cbcSMatt Macy 17027877fdebSMatt Macy /* 17037877fdebSMatt Macy * This callback will be called exactly once for each core that comes online, 17047877fdebSMatt Macy * for each dynamic taskq. We attempt to expand taskqs that have 17057877fdebSMatt Macy * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every 17067877fdebSMatt Macy * time, to correctly determine whether or not to add a thread. 17077877fdebSMatt Macy */ 17087877fdebSMatt Macy static int 17097877fdebSMatt Macy spl_taskq_expand(unsigned int cpu, struct hlist_node *node) 17107877fdebSMatt Macy { 17117877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 17127877fdebSMatt Macy unsigned long flags; 17137877fdebSMatt Macy int err = 0; 17147877fdebSMatt Macy 17157877fdebSMatt Macy ASSERT(tq); 17167877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 17177877fdebSMatt Macy 171881b22a98SMartin Matuska if (!(tq->tq_flags & TASKQ_ACTIVE)) { 171981b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 172081b22a98SMartin Matuska return (err); 172181b22a98SMartin Matuska } 17227877fdebSMatt Macy 17237877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 17247877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 17257877fdebSMatt Macy nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); 17267877fdebSMatt Macy tq->tq_maxthreads = nthreads; 17277877fdebSMatt Macy 17287877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 17297877fdebSMatt Macy tq->tq_maxthreads > tq->tq_nthreads) { 173081b22a98SMartin Matuska spin_unlock_irqrestore(&tq->tq_lock, flags); 17317877fdebSMatt Macy taskq_thread_t *tqt = taskq_thread_create(tq); 17327877fdebSMatt Macy if (tqt == NULL) 17337877fdebSMatt Macy err = -1; 173481b22a98SMartin Matuska return (err); 17357877fdebSMatt Macy } 17367877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 17377877fdebSMatt Macy return (err); 17387877fdebSMatt Macy } 17397877fdebSMatt Macy 17407877fdebSMatt Macy /* 17417877fdebSMatt Macy * While we don't support offlining CPUs, it is possible that CPUs will fail 17427877fdebSMatt Macy * to online successfully. We do need to be able to handle this case 17437877fdebSMatt Macy * gracefully. 17447877fdebSMatt Macy */ 17457877fdebSMatt Macy static int 17467877fdebSMatt Macy spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) 17477877fdebSMatt Macy { 17487877fdebSMatt Macy taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 17497877fdebSMatt Macy unsigned long flags; 17507877fdebSMatt Macy 17517877fdebSMatt Macy ASSERT(tq); 17527877fdebSMatt Macy spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 17537877fdebSMatt Macy 17547877fdebSMatt Macy if (!(tq->tq_flags & TASKQ_ACTIVE)) 17557877fdebSMatt Macy goto out; 17567877fdebSMatt Macy 17577877fdebSMatt Macy ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 17587877fdebSMatt Macy int nthreads = MIN(tq->tq_cpu_pct, 100); 17597877fdebSMatt Macy nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); 17607877fdebSMatt Macy tq->tq_maxthreads = nthreads; 17617877fdebSMatt Macy 17627877fdebSMatt Macy if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 17637877fdebSMatt Macy tq->tq_maxthreads < tq->tq_nthreads) { 17647877fdebSMatt Macy ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); 17657877fdebSMatt Macy taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, 17667877fdebSMatt Macy taskq_thread_t, tqt_thread_list); 17677877fdebSMatt Macy struct task_struct *thread = tqt->tqt_thread; 17687877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 17697877fdebSMatt Macy 17707877fdebSMatt Macy kthread_stop(thread); 17717877fdebSMatt Macy 17727877fdebSMatt Macy return (0); 17737877fdebSMatt Macy } 17747877fdebSMatt Macy 17757877fdebSMatt Macy out: 17767877fdebSMatt Macy spin_unlock_irqrestore(&tq->tq_lock, flags); 17777877fdebSMatt Macy return (0); 17787877fdebSMatt Macy } 17797877fdebSMatt Macy 1780eda14cbcSMatt Macy int 1781eda14cbcSMatt Macy spl_taskq_init(void) 1782eda14cbcSMatt Macy { 1783eda14cbcSMatt Macy init_rwsem(&tq_list_sem); 1784eda14cbcSMatt Macy tsd_create(&taskq_tsd, NULL); 1785eda14cbcSMatt Macy 17867877fdebSMatt Macy spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 17877877fdebSMatt Macy "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); 17887877fdebSMatt Macy 1789eda14cbcSMatt Macy system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), 1790eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1791eda14cbcSMatt Macy if (system_taskq == NULL) 1792c7046f76SMartin Matuska return (-ENOMEM); 1793eda14cbcSMatt Macy 1794eda14cbcSMatt Macy system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), 1795eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1796eda14cbcSMatt Macy if (system_delay_taskq == NULL) { 17977877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 1798eda14cbcSMatt Macy taskq_destroy(system_taskq); 1799c7046f76SMartin Matuska return (-ENOMEM); 1800eda14cbcSMatt Macy } 1801eda14cbcSMatt Macy 1802eda14cbcSMatt Macy dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, 1803eda14cbcSMatt Macy maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); 1804eda14cbcSMatt Macy if (dynamic_taskq == NULL) { 18057877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 1806eda14cbcSMatt Macy taskq_destroy(system_taskq); 1807eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1808c7046f76SMartin Matuska return (-ENOMEM); 1809eda14cbcSMatt Macy } 1810eda14cbcSMatt Macy 1811eda14cbcSMatt Macy /* 1812eda14cbcSMatt Macy * This is used to annotate tq_lock, so 1813eda14cbcSMatt Macy * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch 1814eda14cbcSMatt Macy * does not trigger a lockdep warning re: possible recursive locking 1815eda14cbcSMatt Macy */ 1816eda14cbcSMatt Macy dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; 1817eda14cbcSMatt Macy 1818e2df9bb4SMartin Matuska spl_taskq_kstat_init(); 1819e2df9bb4SMartin Matuska 1820eda14cbcSMatt Macy return (0); 1821eda14cbcSMatt Macy } 1822eda14cbcSMatt Macy 1823eda14cbcSMatt Macy void 1824eda14cbcSMatt Macy spl_taskq_fini(void) 1825eda14cbcSMatt Macy { 1826e2df9bb4SMartin Matuska spl_taskq_kstat_fini(); 1827e2df9bb4SMartin Matuska 1828eda14cbcSMatt Macy taskq_destroy(dynamic_taskq); 1829eda14cbcSMatt Macy dynamic_taskq = NULL; 1830eda14cbcSMatt Macy 1831eda14cbcSMatt Macy taskq_destroy(system_delay_taskq); 1832eda14cbcSMatt Macy system_delay_taskq = NULL; 1833eda14cbcSMatt Macy 1834eda14cbcSMatt Macy taskq_destroy(system_taskq); 1835eda14cbcSMatt Macy system_taskq = NULL; 1836eda14cbcSMatt Macy 1837eda14cbcSMatt Macy tsd_destroy(&taskq_tsd); 18387877fdebSMatt Macy 18397877fdebSMatt Macy cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 18407877fdebSMatt Macy spl_taskq_cpuhp_state = 0; 1841eda14cbcSMatt Macy } 1842