1*fc432d49Sriastradh /* $NetBSD: linux_tasklet.c,v 1.9 2021/12/19 12:44:43 riastradh Exp $ */ 24e9d6760Sriastradh 34e9d6760Sriastradh /*- 4df13a91dSriastradh * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc. 54e9d6760Sriastradh * All rights reserved. 64e9d6760Sriastradh * 74e9d6760Sriastradh * This code is derived from software contributed to The NetBSD Foundation 84e9d6760Sriastradh * by Taylor R. Campbell. 94e9d6760Sriastradh * 104e9d6760Sriastradh * Redistribution and use in source and binary forms, with or without 114e9d6760Sriastradh * modification, are permitted provided that the following conditions 124e9d6760Sriastradh * are met: 134e9d6760Sriastradh * 1. Redistributions of source code must retain the above copyright 144e9d6760Sriastradh * notice, this list of conditions and the following disclaimer. 154e9d6760Sriastradh * 2. Redistributions in binary form must reproduce the above copyright 164e9d6760Sriastradh * notice, this list of conditions and the following disclaimer in the 174e9d6760Sriastradh * documentation and/or other materials provided with the distribution. 184e9d6760Sriastradh * 194e9d6760Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 204e9d6760Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 214e9d6760Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 224e9d6760Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 234e9d6760Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 244e9d6760Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 254e9d6760Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 264e9d6760Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 274e9d6760Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 284e9d6760Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 294e9d6760Sriastradh * POSSIBILITY OF SUCH DAMAGE. 304e9d6760Sriastradh */ 314e9d6760Sriastradh 324e9d6760Sriastradh #include <sys/cdefs.h> 33*fc432d49Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.9 2021/12/19 12:44:43 riastradh Exp $"); 344e9d6760Sriastradh 35*fc432d49Sriastradh #include <sys/param.h> 364e9d6760Sriastradh #include <sys/types.h> 37*fc432d49Sriastradh 384e9d6760Sriastradh #include <sys/atomic.h> 394e9d6760Sriastradh #include <sys/cpu.h> 404e9d6760Sriastradh #include <sys/errno.h> 414e9d6760Sriastradh #include <sys/intr.h> 42df13a91dSriastradh #include <sys/kmem.h> 434e9d6760Sriastradh #include <sys/lock.h> 444e9d6760Sriastradh #include <sys/percpu.h> 454e9d6760Sriastradh #include <sys/queue.h> 464e9d6760Sriastradh 474e9d6760Sriastradh #include <lib/libkern/libkern.h> 484e9d6760Sriastradh 494e9d6760Sriastradh #include <machine/limits.h> 504e9d6760Sriastradh 514e9d6760Sriastradh #include <linux/tasklet.h> 524e9d6760Sriastradh 534e9d6760Sriastradh #define TASKLET_SCHEDULED ((unsigned)__BIT(0)) 544e9d6760Sriastradh #define TASKLET_RUNNING ((unsigned)__BIT(1)) 554e9d6760Sriastradh 564e9d6760Sriastradh struct tasklet_queue { 57df13a91dSriastradh struct percpu *tq_percpu; /* struct tasklet_cpu * */ 584e9d6760Sriastradh void *tq_sih; 594e9d6760Sriastradh }; 604e9d6760Sriastradh 614e9d6760Sriastradh SIMPLEQ_HEAD(tasklet_head, tasklet_struct); 624e9d6760Sriastradh 634e9d6760Sriastradh struct tasklet_cpu { 644e9d6760Sriastradh struct tasklet_head tc_head; 654e9d6760Sriastradh }; 664e9d6760Sriastradh 674e9d6760Sriastradh static struct tasklet_queue tasklet_queue __read_mostly; 684e9d6760Sriastradh static struct tasklet_queue tasklet_hi_queue __read_mostly; 694e9d6760Sriastradh 704e9d6760Sriastradh static void tasklet_softintr(void *); 714e9d6760Sriastradh static int tasklet_queue_init(struct tasklet_queue *, unsigned); 724e9d6760Sriastradh static void tasklet_queue_fini(struct tasklet_queue *); 734e9d6760Sriastradh static void tasklet_queue_schedule(struct tasklet_queue *, 744e9d6760Sriastradh struct tasklet_struct *); 754e9d6760Sriastradh static void tasklet_queue_enqueue(struct tasklet_queue *, 764e9d6760Sriastradh struct tasklet_struct *); 774e9d6760Sriastradh 784e9d6760Sriastradh /* 794e9d6760Sriastradh * linux_tasklets_init() 804e9d6760Sriastradh * 814e9d6760Sriastradh * Initialize the Linux tasklets subsystem. Return 0 on success, 824e9d6760Sriastradh * error code on failure. 834e9d6760Sriastradh */ 844e9d6760Sriastradh int 854e9d6760Sriastradh linux_tasklets_init(void) 864e9d6760Sriastradh { 874e9d6760Sriastradh int error; 884e9d6760Sriastradh 894e9d6760Sriastradh error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK); 904e9d6760Sriastradh if (error) 914e9d6760Sriastradh goto fail0; 924e9d6760Sriastradh error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL); 934e9d6760Sriastradh if (error) 944e9d6760Sriastradh goto fail1; 954e9d6760Sriastradh 964e9d6760Sriastradh /* Success! */ 974e9d6760Sriastradh return 0; 984e9d6760Sriastradh 994e9d6760Sriastradh fail2: __unused 1004e9d6760Sriastradh tasklet_queue_fini(&tasklet_hi_queue); 1014e9d6760Sriastradh fail1: tasklet_queue_fini(&tasklet_queue); 1024e9d6760Sriastradh fail0: KASSERT(error); 1034e9d6760Sriastradh return error; 1044e9d6760Sriastradh } 1054e9d6760Sriastradh 1064e9d6760Sriastradh /* 1074e9d6760Sriastradh * linux_tasklets_fini() 1084e9d6760Sriastradh * 1094e9d6760Sriastradh * Finalize the Linux tasklets subsystem. All use of tasklets 1104e9d6760Sriastradh * must be done. 1114e9d6760Sriastradh */ 1124e9d6760Sriastradh void 1134e9d6760Sriastradh linux_tasklets_fini(void) 1144e9d6760Sriastradh { 1154e9d6760Sriastradh 1164e9d6760Sriastradh tasklet_queue_fini(&tasklet_hi_queue); 1174e9d6760Sriastradh tasklet_queue_fini(&tasklet_queue); 1184e9d6760Sriastradh } 1194e9d6760Sriastradh 120df13a91dSriastradh static void 121df13a91dSriastradh tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci) 122df13a91dSriastradh { 123df13a91dSriastradh struct tasklet_cpu **tcp = ptr, *tc; 124df13a91dSriastradh 125df13a91dSriastradh *tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP); 126df13a91dSriastradh SIMPLEQ_INIT(&tc->tc_head); 127df13a91dSriastradh } 128df13a91dSriastradh 129df13a91dSriastradh static void 130df13a91dSriastradh tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci) 131df13a91dSriastradh { 132df13a91dSriastradh struct tasklet_cpu **tcp = ptr, *tc = *tcp; 133df13a91dSriastradh 134df13a91dSriastradh KASSERT(SIMPLEQ_EMPTY(&tc->tc_head)); 135df13a91dSriastradh kmem_free(tc, sizeof(*tc)); 136df13a91dSriastradh *tcp = NULL; /* paranoia */ 137df13a91dSriastradh } 138df13a91dSriastradh 1394e9d6760Sriastradh /* 1404e9d6760Sriastradh * tasklet_queue_init(tq, prio) 1414e9d6760Sriastradh * 1424e9d6760Sriastradh * Initialize the tasklet queue tq for running tasklets at softint 1434e9d6760Sriastradh * priority prio (SOFTINT_*). 1444e9d6760Sriastradh */ 1454e9d6760Sriastradh static int 1464e9d6760Sriastradh tasklet_queue_init(struct tasklet_queue *tq, unsigned prio) 1474e9d6760Sriastradh { 1484e9d6760Sriastradh int error; 1494e9d6760Sriastradh 1504e9d6760Sriastradh /* Allocate per-CPU memory. percpu_alloc cannot fail. */ 151df13a91dSriastradh tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu), 152df13a91dSriastradh tasklet_cpu_init, tasklet_cpu_fini, NULL); 1534e9d6760Sriastradh KASSERT(tq->tq_percpu != NULL); 1544e9d6760Sriastradh 1554e9d6760Sriastradh /* Try to establish a softint. softint_establish may fail. */ 1564e9d6760Sriastradh tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr, 1574e9d6760Sriastradh tq); 1584e9d6760Sriastradh if (tq->tq_sih == NULL) { 1594e9d6760Sriastradh error = ENOMEM; 1604e9d6760Sriastradh goto fail1; 1614e9d6760Sriastradh } 1624e9d6760Sriastradh 1634e9d6760Sriastradh /* Success! */ 1644e9d6760Sriastradh return 0; 1654e9d6760Sriastradh 1664e9d6760Sriastradh fail2: __unused 1674e9d6760Sriastradh softint_disestablish(tq->tq_sih); 1684e9d6760Sriastradh tq->tq_sih = NULL; 1694e9d6760Sriastradh fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu)); 1704e9d6760Sriastradh tq->tq_percpu = NULL; 1714e9d6760Sriastradh fail0: __unused 1724e9d6760Sriastradh KASSERT(error); 1734e9d6760Sriastradh return error; 1744e9d6760Sriastradh } 1754e9d6760Sriastradh 1764e9d6760Sriastradh /* 1774e9d6760Sriastradh * tasklet_queue_fini(tq) 1784e9d6760Sriastradh * 1794e9d6760Sriastradh * Finalize the tasklet queue tq: free all resources associated 1804e9d6760Sriastradh * with it. 1814e9d6760Sriastradh */ 1824e9d6760Sriastradh static void 1834e9d6760Sriastradh tasklet_queue_fini(struct tasklet_queue *tq) 1844e9d6760Sriastradh { 1854e9d6760Sriastradh 1864e9d6760Sriastradh softint_disestablish(tq->tq_sih); 1874e9d6760Sriastradh tq->tq_sih = NULL; 1884e9d6760Sriastradh percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu)); 1894e9d6760Sriastradh tq->tq_percpu = NULL; 1904e9d6760Sriastradh } 1914e9d6760Sriastradh 1924e9d6760Sriastradh /* 1934e9d6760Sriastradh * tasklet_softintr(cookie) 1944e9d6760Sriastradh * 1954e9d6760Sriastradh * Soft interrupt handler: Process queued tasklets on the tasklet 1964e9d6760Sriastradh * queue passed in as cookie. 1974e9d6760Sriastradh */ 1984e9d6760Sriastradh static void 1994e9d6760Sriastradh tasklet_softintr(void *cookie) 2004e9d6760Sriastradh { 2014e9d6760Sriastradh struct tasklet_queue *const tq = cookie; 2024e9d6760Sriastradh struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th); 203df13a91dSriastradh struct tasklet_cpu **tcp, *tc; 2044e9d6760Sriastradh int s; 2054e9d6760Sriastradh 2064e9d6760Sriastradh /* 2074e9d6760Sriastradh * With all interrupts deferred, transfer the current CPU's 2084e9d6760Sriastradh * queue of tasklets to a local variable in one swell foop. 2094e9d6760Sriastradh * 2104e9d6760Sriastradh * No memory barriers: CPU-local state only. 2114e9d6760Sriastradh */ 212df13a91dSriastradh tcp = percpu_getref(tq->tq_percpu); 213df13a91dSriastradh tc = *tcp; 2144e9d6760Sriastradh s = splhigh(); 2154e9d6760Sriastradh SIMPLEQ_CONCAT(&th, &tc->tc_head); 2164e9d6760Sriastradh splx(s); 2174e9d6760Sriastradh percpu_putref(tq->tq_percpu); 2184e9d6760Sriastradh 2194e9d6760Sriastradh /* Go through the queue of tasklets we grabbed. */ 2204e9d6760Sriastradh while (!SIMPLEQ_EMPTY(&th)) { 2214e9d6760Sriastradh struct tasklet_struct *tasklet; 2224e9d6760Sriastradh 2234e9d6760Sriastradh /* Remove the first tasklet from the queue. */ 2244e9d6760Sriastradh tasklet = SIMPLEQ_FIRST(&th); 2254e9d6760Sriastradh SIMPLEQ_REMOVE_HEAD(&th, tl_entry); 2264e9d6760Sriastradh 227ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & 228ed61094fSriastradh TASKLET_SCHEDULED); 229ed61094fSriastradh 2304e9d6760Sriastradh /* 2314e9d6760Sriastradh * Test and set RUNNING, in case it is already running 2324e9d6760Sriastradh * on another CPU and got scheduled again on this one 2334e9d6760Sriastradh * before it completed. 2344e9d6760Sriastradh */ 235ed61094fSriastradh if (!tasklet_trylock(tasklet)) { 2364e9d6760Sriastradh /* 2374e9d6760Sriastradh * Put it back on the queue to run it again in 2384e9d6760Sriastradh * a sort of busy-wait, and move on to the next 2394e9d6760Sriastradh * one. 2404e9d6760Sriastradh */ 2414e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet); 2424e9d6760Sriastradh continue; 2434e9d6760Sriastradh } 2444e9d6760Sriastradh 245ed61094fSriastradh /* 246ed61094fSriastradh * Check whether it's currently disabled. 247ed61094fSriastradh * 248ed61094fSriastradh * Pairs with membar_exit in __tasklet_enable. 249ed61094fSriastradh */ 250ed61094fSriastradh if (atomic_load_acquire(&tasklet->tl_disablecount)) { 2514e9d6760Sriastradh /* 2524e9d6760Sriastradh * Disabled: clear the RUNNING bit and, requeue 2534e9d6760Sriastradh * it, but keep it SCHEDULED. 2544e9d6760Sriastradh */ 255ed61094fSriastradh tasklet_unlock(tasklet); 2564e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet); 2574e9d6760Sriastradh continue; 2584e9d6760Sriastradh } 2594e9d6760Sriastradh 2604e9d6760Sriastradh /* Not disabled. Clear SCHEDULED and call func. */ 261ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & 262ed61094fSriastradh TASKLET_SCHEDULED); 2634e9d6760Sriastradh atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED); 2644e9d6760Sriastradh 2654e9d6760Sriastradh (*tasklet->func)(tasklet->data); 2664e9d6760Sriastradh 2674e9d6760Sriastradh /* Clear RUNNING to notify tasklet_disable. */ 268ed61094fSriastradh tasklet_unlock(tasklet); 2694e9d6760Sriastradh } 2704e9d6760Sriastradh } 2714e9d6760Sriastradh 2724e9d6760Sriastradh /* 2734e9d6760Sriastradh * tasklet_queue_schedule(tq, tasklet) 2744e9d6760Sriastradh * 2754e9d6760Sriastradh * Schedule tasklet to run on tq. If it was already scheduled and 2764e9d6760Sriastradh * has not yet run, no effect. 2774e9d6760Sriastradh */ 2784e9d6760Sriastradh static void 2794e9d6760Sriastradh tasklet_queue_schedule(struct tasklet_queue *tq, 2804e9d6760Sriastradh struct tasklet_struct *tasklet) 2814e9d6760Sriastradh { 2824e9d6760Sriastradh unsigned ostate, nstate; 2834e9d6760Sriastradh 2844e9d6760Sriastradh /* Test and set the SCHEDULED bit. If already set, we're done. */ 2854e9d6760Sriastradh do { 286ed61094fSriastradh ostate = atomic_load_relaxed(&tasklet->tl_state); 2874e9d6760Sriastradh if (ostate & TASKLET_SCHEDULED) 2884e9d6760Sriastradh return; 2894e9d6760Sriastradh nstate = ostate | TASKLET_SCHEDULED; 2904e9d6760Sriastradh } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate) 2914e9d6760Sriastradh != ostate); 2924e9d6760Sriastradh 2934e9d6760Sriastradh /* 2944e9d6760Sriastradh * Not already set and we have set it now. Put it on the queue 2954e9d6760Sriastradh * and kick off a softint. 2964e9d6760Sriastradh */ 2974e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet); 2984e9d6760Sriastradh } 2994e9d6760Sriastradh 3004e9d6760Sriastradh /* 3014e9d6760Sriastradh * tasklet_queue_enqueue(tq, tasklet) 3024e9d6760Sriastradh * 3034e9d6760Sriastradh * Put tasklet on the queue tq and ensure it will run. tasklet 3044e9d6760Sriastradh * must be marked SCHEDULED. 3054e9d6760Sriastradh */ 3064e9d6760Sriastradh static void 3074e9d6760Sriastradh tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet) 3084e9d6760Sriastradh { 309df13a91dSriastradh struct tasklet_cpu **tcp, *tc; 3104e9d6760Sriastradh int s; 3114e9d6760Sriastradh 312ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED); 3134e9d6760Sriastradh 3144e9d6760Sriastradh /* 3154e9d6760Sriastradh * Insert on the current CPU's queue while all interrupts are 3164e9d6760Sriastradh * blocked, and schedule a soft interrupt to process it. No 3174e9d6760Sriastradh * memory barriers: CPU-local state only. 3184e9d6760Sriastradh */ 319df13a91dSriastradh tcp = percpu_getref(tq->tq_percpu); 320df13a91dSriastradh tc = *tcp; 3214e9d6760Sriastradh s = splhigh(); 3224e9d6760Sriastradh SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry); 3234e9d6760Sriastradh splx(s); 3244e9d6760Sriastradh softint_schedule(tq->tq_sih); 3254e9d6760Sriastradh percpu_putref(tq->tq_percpu); 3264e9d6760Sriastradh } 3274e9d6760Sriastradh 3284e9d6760Sriastradh /* 3294e9d6760Sriastradh * tasklet_init(tasklet, func, data) 3304e9d6760Sriastradh * 3314e9d6760Sriastradh * Initialize tasklet to call func(data) when scheduled. 3324e9d6760Sriastradh * 3334e9d6760Sriastradh * Caller is responsible for issuing the appropriate memory 3344e9d6760Sriastradh * barriers or store releases to publish the tasklet to other CPUs 3354e9d6760Sriastradh * before use. 3364e9d6760Sriastradh */ 3374e9d6760Sriastradh void 3384e9d6760Sriastradh tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long), 3394e9d6760Sriastradh unsigned long data) 3404e9d6760Sriastradh { 3414e9d6760Sriastradh 342ed61094fSriastradh atomic_store_relaxed(&tasklet->tl_state, 0); 343ed61094fSriastradh atomic_store_relaxed(&tasklet->tl_disablecount, 0); 3444e9d6760Sriastradh tasklet->func = func; 3454e9d6760Sriastradh tasklet->data = data; 3464e9d6760Sriastradh } 3474e9d6760Sriastradh 3484e9d6760Sriastradh /* 3494e9d6760Sriastradh * tasklet_schedule(tasklet) 3504e9d6760Sriastradh * 3514e9d6760Sriastradh * Schedule tasklet to run at regular priority. If it was already 3524e9d6760Sriastradh * scheduled and has not yet run, no effect. 3534e9d6760Sriastradh */ 3544e9d6760Sriastradh void 3554e9d6760Sriastradh tasklet_schedule(struct tasklet_struct *tasklet) 3564e9d6760Sriastradh { 3574e9d6760Sriastradh 3584e9d6760Sriastradh tasklet_queue_schedule(&tasklet_queue, tasklet); 3594e9d6760Sriastradh } 3604e9d6760Sriastradh 3614e9d6760Sriastradh /* 3624e9d6760Sriastradh * tasklet_hi_schedule(tasklet) 3634e9d6760Sriastradh * 3644e9d6760Sriastradh * Schedule tasklet to run at high priority. If it was already 3654e9d6760Sriastradh * scheduled and has not yet run, no effect. 3664e9d6760Sriastradh */ 3674e9d6760Sriastradh void 3684e9d6760Sriastradh tasklet_hi_schedule(struct tasklet_struct *tasklet) 3694e9d6760Sriastradh { 3704e9d6760Sriastradh 3714e9d6760Sriastradh tasklet_queue_schedule(&tasklet_hi_queue, tasklet); 3724e9d6760Sriastradh } 3734e9d6760Sriastradh 3744e9d6760Sriastradh /* 3759e5fbd4fSriastradh * tasklet_disable_nosync(tasklet) 3769e5fbd4fSriastradh * 3779e5fbd4fSriastradh * Increment the disable count of tasklet, but don't wait for it 3789e5fbd4fSriastradh * to complete -- it may remain running after this returns. 3799e5fbd4fSriastradh * 3809e5fbd4fSriastradh * As long as the disable count is nonzero, the tasklet's function 3819e5fbd4fSriastradh * will not run, but if already scheduled, the tasklet will remain 3829e5fbd4fSriastradh * so and the softint will repeatedly trigger itself in a sort of 3839e5fbd4fSriastradh * busy-wait, so this should be used only for short durations. 3849e5fbd4fSriastradh * 3859e5fbd4fSriastradh * Load-acquire semantics. 3869e5fbd4fSriastradh */ 3879e5fbd4fSriastradh void 3889e5fbd4fSriastradh tasklet_disable_nosync(struct tasklet_struct *tasklet) 3899e5fbd4fSriastradh { 3909e5fbd4fSriastradh unsigned int disablecount __diagused; 3919e5fbd4fSriastradh 3929e5fbd4fSriastradh /* Increment the disable count. */ 3939e5fbd4fSriastradh disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount); 3949e5fbd4fSriastradh KASSERT(disablecount < UINT_MAX); 3959e5fbd4fSriastradh KASSERT(disablecount != 0); 3969e5fbd4fSriastradh 3979e5fbd4fSriastradh /* Pairs with membar_exit in __tasklet_enable. */ 3989e5fbd4fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 3999e5fbd4fSriastradh membar_enter(); 4009e5fbd4fSriastradh #endif 4019e5fbd4fSriastradh } 4029e5fbd4fSriastradh 4039e5fbd4fSriastradh /* 4044e9d6760Sriastradh * tasklet_disable(tasklet) 4054e9d6760Sriastradh * 4064e9d6760Sriastradh * Increment the disable count of tasklet, and if it was already 4074e9d6760Sriastradh * running, busy-wait for it to complete. 4084e9d6760Sriastradh * 4094e9d6760Sriastradh * As long as the disable count is nonzero, the tasklet's function 4104e9d6760Sriastradh * will not run, but if already scheduled, the tasklet will remain 4114e9d6760Sriastradh * so and the softint will repeatedly trigger itself in a sort of 4124e9d6760Sriastradh * busy-wait, so this should be used only for short durations. 4134e9d6760Sriastradh * 4144e9d6760Sriastradh * If tasklet is guaranteed not to be scheduled, e.g. if you have 4154e9d6760Sriastradh * just invoked tasklet_kill, then tasklet_disable serves to wait 4164e9d6760Sriastradh * for it to complete in case it might already be running. 417ed61094fSriastradh * 418ed61094fSriastradh * Load-acquire semantics. 4194e9d6760Sriastradh */ 4204e9d6760Sriastradh void 4214e9d6760Sriastradh tasklet_disable(struct tasklet_struct *tasklet) 4224e9d6760Sriastradh { 4234e9d6760Sriastradh 4244e9d6760Sriastradh /* Increment the disable count. */ 4259e5fbd4fSriastradh tasklet_disable_nosync(tasklet); 42672bd4db1Sriastradh 4274e9d6760Sriastradh /* Wait for it to finish running, if it was running. */ 428ed61094fSriastradh tasklet_unlock_wait(tasklet); 4294e9d6760Sriastradh } 4304e9d6760Sriastradh 4314e9d6760Sriastradh /* 4324e9d6760Sriastradh * tasklet_enable(tasklet) 4334e9d6760Sriastradh * 4344e9d6760Sriastradh * Decrement tasklet's disable count. If it was previously 4354e9d6760Sriastradh * scheduled to run, it may now run. 436ed61094fSriastradh * 437ed61094fSriastradh * Store-release semantics. 4384e9d6760Sriastradh */ 4394e9d6760Sriastradh void 4404e9d6760Sriastradh tasklet_enable(struct tasklet_struct *tasklet) 4414e9d6760Sriastradh { 4424e9d6760Sriastradh 443ed61094fSriastradh (void)__tasklet_enable(tasklet); 4444e9d6760Sriastradh } 4454e9d6760Sriastradh 4464e9d6760Sriastradh /* 4474e9d6760Sriastradh * tasklet_kill(tasklet) 4484e9d6760Sriastradh * 4494e9d6760Sriastradh * Busy-wait for tasklet to run, if it is currently scheduled. 4504e9d6760Sriastradh * Caller must guarantee it does not get scheduled again for this 4514e9d6760Sriastradh * to be useful. 4524e9d6760Sriastradh */ 4534e9d6760Sriastradh void 4544e9d6760Sriastradh tasklet_kill(struct tasklet_struct *tasklet) 4554e9d6760Sriastradh { 4564e9d6760Sriastradh 4574e9d6760Sriastradh KASSERTMSG(!cpu_intr_p(), 4584e9d6760Sriastradh "deadlock: soft interrupts are blocked in interrupt context"); 4594e9d6760Sriastradh 4604e9d6760Sriastradh /* Wait for it to be removed from the queue. */ 461ed61094fSriastradh while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED) 4624e9d6760Sriastradh SPINLOCK_BACKOFF_HOOK; 4634e9d6760Sriastradh 4644e9d6760Sriastradh /* 4654e9d6760Sriastradh * No need for a memory barrier here because writes to the 4664e9d6760Sriastradh * single state word are globally ordered, and RUNNING is set 4674e9d6760Sriastradh * before SCHEDULED is cleared, so as long as the caller 4684e9d6760Sriastradh * guarantees no scheduling, the only possible transitions we 4694e9d6760Sriastradh * can witness are: 4704e9d6760Sriastradh * 4714e9d6760Sriastradh * 0 -> 0 4724e9d6760Sriastradh * SCHEDULED -> 0 4734e9d6760Sriastradh * SCHEDULED -> RUNNING 4744e9d6760Sriastradh * RUNNING -> 0 4754e9d6760Sriastradh * RUNNING -> RUNNING 4764e9d6760Sriastradh * SCHEDULED|RUNNING -> 0 4774e9d6760Sriastradh * SCHEDULED|RUNNING -> RUNNING 4784e9d6760Sriastradh */ 4794e9d6760Sriastradh 4804e9d6760Sriastradh /* Wait for it to finish running. */ 481ed61094fSriastradh tasklet_unlock_wait(tasklet); 4824e9d6760Sriastradh } 483075d309eSriastradh 484075d309eSriastradh /* 485ed61094fSriastradh * tasklet_is_scheduled(tasklet) 486ed61094fSriastradh * 487ed61094fSriastradh * True if tasklet is currently locked. Caller must use it only 488ed61094fSriastradh * for positive assertions. 489ed61094fSriastradh */ 490ed61094fSriastradh bool 491ed61094fSriastradh tasklet_is_locked(const struct tasklet_struct *tasklet) 492ed61094fSriastradh { 493ed61094fSriastradh 494ed61094fSriastradh return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING; 495ed61094fSriastradh } 496ed61094fSriastradh 497ed61094fSriastradh /* 498ed61094fSriastradh * tasklet_trylock(tasklet) 499ed61094fSriastradh * 500ed61094fSriastradh * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if 501ed61094fSriastradh * we locked it, false if already locked. 502ed61094fSriastradh * 503ed61094fSriastradh * Load-acquire semantics. 504ed61094fSriastradh */ 505ed61094fSriastradh bool 506ed61094fSriastradh tasklet_trylock(struct tasklet_struct *tasklet) 507ed61094fSriastradh { 508ed61094fSriastradh unsigned state; 509ed61094fSriastradh 510ed61094fSriastradh do { 51172bd4db1Sriastradh state = atomic_load_relaxed(&tasklet->tl_state); 512ed61094fSriastradh if (state & TASKLET_RUNNING) 513ed61094fSriastradh return false; 514ed61094fSriastradh } while (atomic_cas_uint(&tasklet->tl_state, state, 515ed61094fSriastradh state | TASKLET_RUNNING) != state); 516ed61094fSriastradh 51772bd4db1Sriastradh /* Pairs with membar_exit in tasklet_unlock. */ 51872bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 51972bd4db1Sriastradh membar_enter(); 52072bd4db1Sriastradh #endif 52172bd4db1Sriastradh 522ed61094fSriastradh return true; 523ed61094fSriastradh } 524ed61094fSriastradh 525ed61094fSriastradh /* 526ed61094fSriastradh * tasklet_unlock(tasklet) 527ed61094fSriastradh * 528ed61094fSriastradh * Unlock tasklet, i.e., clear TASKLET_RUNNING. 529ed61094fSriastradh * 530ed61094fSriastradh * Store-release semantics. 531ed61094fSriastradh */ 532ed61094fSriastradh void 533ed61094fSriastradh tasklet_unlock(struct tasklet_struct *tasklet) 534ed61094fSriastradh { 535ed61094fSriastradh 536ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING); 537ed61094fSriastradh 538ed61094fSriastradh /* 53972bd4db1Sriastradh * Pairs with membar_enter in tasklet_trylock and with 54072bd4db1Sriastradh * atomic_load_acquire in tasklet_unlock_wait. 541ed61094fSriastradh */ 542ed61094fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 543ed61094fSriastradh membar_exit(); 544ed61094fSriastradh #endif 545ed61094fSriastradh atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING); 546ed61094fSriastradh } 547ed61094fSriastradh 548ed61094fSriastradh /* 549ed61094fSriastradh * tasklet_unlock_wait(tasklet) 550ed61094fSriastradh * 551ed61094fSriastradh * Busy-wait until tasklet is not running. 552ed61094fSriastradh * 553ed61094fSriastradh * Load-acquire semantics. 554ed61094fSriastradh */ 555ed61094fSriastradh void 556ed61094fSriastradh tasklet_unlock_wait(const struct tasklet_struct *tasklet) 557ed61094fSriastradh { 558ed61094fSriastradh 559ed61094fSriastradh /* Pairs with membar_exit in tasklet_unlock. */ 560ed61094fSriastradh while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING) 561ed61094fSriastradh SPINLOCK_BACKOFF_HOOK; 562ed61094fSriastradh } 563ed61094fSriastradh 564ed61094fSriastradh /* 565ed61094fSriastradh * BEGIN I915 HACKS 566ed61094fSriastradh * 567ed61094fSriastradh * The i915 driver abuses the tasklet abstraction like a cop abuses his 568ed61094fSriastradh * wife. 569ed61094fSriastradh */ 570ed61094fSriastradh 571ed61094fSriastradh /* 572ed61094fSriastradh * __tasklet_disable_sync_once(tasklet) 573075d309eSriastradh * 574075d309eSriastradh * Increment the disable count of tasklet, and if this is the 575075d309eSriastradh * first time it was disabled and it was already running, 576075d309eSriastradh * busy-wait for it to complete. 577075d309eSriastradh * 578075d309eSriastradh * Caller must not care about whether the tasklet is running, or 579075d309eSriastradh * about waiting for any side effects of the tasklet to complete, 580075d309eSriastradh * if this was not the first time it was disabled. 581075d309eSriastradh */ 582075d309eSriastradh void 583ed61094fSriastradh __tasklet_disable_sync_once(struct tasklet_struct *tasklet) 584075d309eSriastradh { 585075d309eSriastradh unsigned int disablecount; 586075d309eSriastradh 587075d309eSriastradh /* Increment the disable count. */ 588075d309eSriastradh disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount); 589075d309eSriastradh KASSERT(disablecount < UINT_MAX); 590075d309eSriastradh KASSERT(disablecount != 0); 591075d309eSriastradh 59272bd4db1Sriastradh /* Pairs with membar_exit in __tasklet_enable_sync_once. */ 59372bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 59472bd4db1Sriastradh membar_enter(); 59572bd4db1Sriastradh #endif 59672bd4db1Sriastradh 597075d309eSriastradh /* 598075d309eSriastradh * If it was zero, wait for it to finish running. If it was 599075d309eSriastradh * not zero, caller must not care whether it was running. 600075d309eSriastradh */ 601ed61094fSriastradh if (disablecount == 1) 602ed61094fSriastradh tasklet_unlock_wait(tasklet); 603075d309eSriastradh } 604075d309eSriastradh 605075d309eSriastradh /* 606ed61094fSriastradh * __tasklet_enable_sync_once(tasklet) 607075d309eSriastradh * 608075d309eSriastradh * Decrement the disable count of tasklet, and if it goes to zero, 609075d309eSriastradh * kill tasklet. 610075d309eSriastradh */ 611075d309eSriastradh void 612ed61094fSriastradh __tasklet_enable_sync_once(struct tasklet_struct *tasklet) 613075d309eSriastradh { 614075d309eSriastradh unsigned int disablecount; 615075d309eSriastradh 61672bd4db1Sriastradh /* Pairs with membar_enter in __tasklet_disable_sync_once. */ 61772bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 61872bd4db1Sriastradh membar_exit(); 61972bd4db1Sriastradh #endif 62072bd4db1Sriastradh 621075d309eSriastradh /* Decrement the disable count. */ 622075d309eSriastradh disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount); 623075d309eSriastradh KASSERT(disablecount < UINT_MAX); 624075d309eSriastradh 625075d309eSriastradh /* 626075d309eSriastradh * If it became zero, kill the tasklet. If it was not zero, 627075d309eSriastradh * caller must not care whether it was running. 628075d309eSriastradh */ 629075d309eSriastradh if (disablecount == 0) 630075d309eSriastradh tasklet_kill(tasklet); 631075d309eSriastradh } 632075d309eSriastradh 633075d309eSriastradh /* 634ed61094fSriastradh * __tasklet_is_enabled(tasklet) 635075d309eSriastradh * 636075d309eSriastradh * True if tasklet is not currently disabled. Answer may be stale 637075d309eSriastradh * as soon as it is returned -- caller must use it only as a hint, 638075d309eSriastradh * or must arrange synchronization externally. 639075d309eSriastradh */ 640075d309eSriastradh bool 641ed61094fSriastradh __tasklet_is_enabled(const struct tasklet_struct *tasklet) 642075d309eSriastradh { 643075d309eSriastradh unsigned int disablecount; 644075d309eSriastradh 645ed61094fSriastradh disablecount = atomic_load_relaxed(&tasklet->tl_disablecount); 646ed61094fSriastradh 647ed61094fSriastradh return (disablecount == 0); 648ed61094fSriastradh } 649ed61094fSriastradh 650ed61094fSriastradh /* 651ed61094fSriastradh * __tasklet_is_scheduled(tasklet) 652ed61094fSriastradh * 653ed61094fSriastradh * True if tasklet is currently scheduled. Answer may be stale as 654ed61094fSriastradh * soon as it is returned -- caller must use it only as a hint, or 655ed61094fSriastradh * must arrange synchronization externally. 656ed61094fSriastradh */ 657ed61094fSriastradh bool 658ed61094fSriastradh __tasklet_is_scheduled(const struct tasklet_struct *tasklet) 659ed61094fSriastradh { 660ed61094fSriastradh 661ed61094fSriastradh return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED; 662ed61094fSriastradh } 663ed61094fSriastradh 664ed61094fSriastradh /* 665ed61094fSriastradh * __tasklet_enable(tasklet) 666ed61094fSriastradh * 667ed61094fSriastradh * Decrement tasklet's disable count. If it was previously 668ed61094fSriastradh * scheduled to run, it may now run. Return true if the disable 669ed61094fSriastradh * count went down to zero; otherwise return false. 670ed61094fSriastradh * 671ed61094fSriastradh * Store-release semantics. 672ed61094fSriastradh */ 673ed61094fSriastradh bool 674ed61094fSriastradh __tasklet_enable(struct tasklet_struct *tasklet) 675ed61094fSriastradh { 676ed61094fSriastradh unsigned int disablecount; 677ed61094fSriastradh 678ed61094fSriastradh /* 679ed61094fSriastradh * Guarantee all caller-relevant reads or writes have completed 680ed61094fSriastradh * before potentially allowing tasklet to run again by 681ed61094fSriastradh * decrementing the disable count. 682ed61094fSriastradh * 68372bd4db1Sriastradh * Pairs with atomic_load_acquire in tasklet_softintr and with 68472bd4db1Sriastradh * membar_enter in tasklet_disable. 685ed61094fSriastradh */ 686ed61094fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR 687ed61094fSriastradh membar_exit(); 688ed61094fSriastradh #endif 689ed61094fSriastradh 690ed61094fSriastradh /* Decrement the disable count. */ 691ed61094fSriastradh disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount); 692ed61094fSriastradh KASSERT(disablecount != UINT_MAX); 693075d309eSriastradh 694075d309eSriastradh return (disablecount == 0); 695075d309eSriastradh } 696