1*68de6bb2Sriastradh /* $NetBSD: linux_tasklet.c,v 1.12 2023/02/24 11:02:05 riastradh Exp $ */
24e9d6760Sriastradh
34e9d6760Sriastradh /*-
4df13a91dSriastradh * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc.
54e9d6760Sriastradh * All rights reserved.
64e9d6760Sriastradh *
74e9d6760Sriastradh * This code is derived from software contributed to The NetBSD Foundation
84e9d6760Sriastradh * by Taylor R. Campbell.
94e9d6760Sriastradh *
104e9d6760Sriastradh * Redistribution and use in source and binary forms, with or without
114e9d6760Sriastradh * modification, are permitted provided that the following conditions
124e9d6760Sriastradh * are met:
134e9d6760Sriastradh * 1. Redistributions of source code must retain the above copyright
144e9d6760Sriastradh * notice, this list of conditions and the following disclaimer.
154e9d6760Sriastradh * 2. Redistributions in binary form must reproduce the above copyright
164e9d6760Sriastradh * notice, this list of conditions and the following disclaimer in the
174e9d6760Sriastradh * documentation and/or other materials provided with the distribution.
184e9d6760Sriastradh *
194e9d6760Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
204e9d6760Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
214e9d6760Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
224e9d6760Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
234e9d6760Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
244e9d6760Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
254e9d6760Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
264e9d6760Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
274e9d6760Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
284e9d6760Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
294e9d6760Sriastradh * POSSIBILITY OF SUCH DAMAGE.
304e9d6760Sriastradh */
314e9d6760Sriastradh
324e9d6760Sriastradh #include <sys/cdefs.h>
33*68de6bb2Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.12 2023/02/24 11:02:05 riastradh Exp $");
344e9d6760Sriastradh
35fc432d49Sriastradh #include <sys/param.h>
364e9d6760Sriastradh #include <sys/types.h>
37fc432d49Sriastradh
384e9d6760Sriastradh #include <sys/atomic.h>
394e9d6760Sriastradh #include <sys/cpu.h>
404e9d6760Sriastradh #include <sys/errno.h>
414e9d6760Sriastradh #include <sys/intr.h>
42df13a91dSriastradh #include <sys/kmem.h>
434e9d6760Sriastradh #include <sys/lock.h>
444e9d6760Sriastradh #include <sys/percpu.h>
454e9d6760Sriastradh #include <sys/queue.h>
464e9d6760Sriastradh
474e9d6760Sriastradh #include <lib/libkern/libkern.h>
484e9d6760Sriastradh
494e9d6760Sriastradh #include <machine/limits.h>
504e9d6760Sriastradh
514e9d6760Sriastradh #include <linux/tasklet.h>
524e9d6760Sriastradh
534e9d6760Sriastradh #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
544e9d6760Sriastradh #define TASKLET_RUNNING ((unsigned)__BIT(1))
554e9d6760Sriastradh
564e9d6760Sriastradh struct tasklet_queue {
57df13a91dSriastradh struct percpu *tq_percpu; /* struct tasklet_cpu * */
584e9d6760Sriastradh void *tq_sih;
594e9d6760Sriastradh };
604e9d6760Sriastradh
614e9d6760Sriastradh SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
624e9d6760Sriastradh
634e9d6760Sriastradh struct tasklet_cpu {
644e9d6760Sriastradh struct tasklet_head tc_head;
654e9d6760Sriastradh };
664e9d6760Sriastradh
674e9d6760Sriastradh static struct tasklet_queue tasklet_queue __read_mostly;
684e9d6760Sriastradh static struct tasklet_queue tasklet_hi_queue __read_mostly;
694e9d6760Sriastradh
704e9d6760Sriastradh static void tasklet_softintr(void *);
714e9d6760Sriastradh static int tasklet_queue_init(struct tasklet_queue *, unsigned);
724e9d6760Sriastradh static void tasklet_queue_fini(struct tasklet_queue *);
734e9d6760Sriastradh static void tasklet_queue_schedule(struct tasklet_queue *,
744e9d6760Sriastradh struct tasklet_struct *);
754e9d6760Sriastradh static void tasklet_queue_enqueue(struct tasklet_queue *,
764e9d6760Sriastradh struct tasklet_struct *);
774e9d6760Sriastradh
784e9d6760Sriastradh /*
794e9d6760Sriastradh * linux_tasklets_init()
804e9d6760Sriastradh *
814e9d6760Sriastradh * Initialize the Linux tasklets subsystem. Return 0 on success,
824e9d6760Sriastradh * error code on failure.
834e9d6760Sriastradh */
844e9d6760Sriastradh int
linux_tasklets_init(void)854e9d6760Sriastradh linux_tasklets_init(void)
864e9d6760Sriastradh {
874e9d6760Sriastradh int error;
884e9d6760Sriastradh
894e9d6760Sriastradh error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
904e9d6760Sriastradh if (error)
914e9d6760Sriastradh goto fail0;
924e9d6760Sriastradh error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
934e9d6760Sriastradh if (error)
944e9d6760Sriastradh goto fail1;
954e9d6760Sriastradh
964e9d6760Sriastradh /* Success! */
974e9d6760Sriastradh return 0;
984e9d6760Sriastradh
994e9d6760Sriastradh fail2: __unused
1004e9d6760Sriastradh tasklet_queue_fini(&tasklet_hi_queue);
1014e9d6760Sriastradh fail1: tasklet_queue_fini(&tasklet_queue);
1024e9d6760Sriastradh fail0: KASSERT(error);
1034e9d6760Sriastradh return error;
1044e9d6760Sriastradh }
1054e9d6760Sriastradh
1064e9d6760Sriastradh /*
1074e9d6760Sriastradh * linux_tasklets_fini()
1084e9d6760Sriastradh *
1094e9d6760Sriastradh * Finalize the Linux tasklets subsystem. All use of tasklets
1104e9d6760Sriastradh * must be done.
1114e9d6760Sriastradh */
1124e9d6760Sriastradh void
linux_tasklets_fini(void)1134e9d6760Sriastradh linux_tasklets_fini(void)
1144e9d6760Sriastradh {
1154e9d6760Sriastradh
1164e9d6760Sriastradh tasklet_queue_fini(&tasklet_hi_queue);
1174e9d6760Sriastradh tasklet_queue_fini(&tasklet_queue);
1184e9d6760Sriastradh }
1194e9d6760Sriastradh
120df13a91dSriastradh static void
tasklet_cpu_init(void * ptr,void * cookie,struct cpu_info * ci)121df13a91dSriastradh tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
122df13a91dSriastradh {
123df13a91dSriastradh struct tasklet_cpu **tcp = ptr, *tc;
124df13a91dSriastradh
125df13a91dSriastradh *tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP);
126df13a91dSriastradh SIMPLEQ_INIT(&tc->tc_head);
127df13a91dSriastradh }
128df13a91dSriastradh
129df13a91dSriastradh static void
tasklet_cpu_fini(void * ptr,void * cookie,struct cpu_info * ci)130df13a91dSriastradh tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
131df13a91dSriastradh {
132df13a91dSriastradh struct tasklet_cpu **tcp = ptr, *tc = *tcp;
133df13a91dSriastradh
134df13a91dSriastradh KASSERT(SIMPLEQ_EMPTY(&tc->tc_head));
135df13a91dSriastradh kmem_free(tc, sizeof(*tc));
136df13a91dSriastradh *tcp = NULL; /* paranoia */
137df13a91dSriastradh }
138df13a91dSriastradh
1394e9d6760Sriastradh /*
1404e9d6760Sriastradh * tasklet_queue_init(tq, prio)
1414e9d6760Sriastradh *
1424e9d6760Sriastradh * Initialize the tasklet queue tq for running tasklets at softint
1434e9d6760Sriastradh * priority prio (SOFTINT_*).
1444e9d6760Sriastradh */
1454e9d6760Sriastradh static int
tasklet_queue_init(struct tasklet_queue * tq,unsigned prio)1464e9d6760Sriastradh tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
1474e9d6760Sriastradh {
1484e9d6760Sriastradh int error;
1494e9d6760Sriastradh
1504e9d6760Sriastradh /* Allocate per-CPU memory. percpu_alloc cannot fail. */
151df13a91dSriastradh tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu),
152df13a91dSriastradh tasklet_cpu_init, tasklet_cpu_fini, NULL);
1534e9d6760Sriastradh KASSERT(tq->tq_percpu != NULL);
1544e9d6760Sriastradh
1554e9d6760Sriastradh /* Try to establish a softint. softint_establish may fail. */
1564e9d6760Sriastradh tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
1574e9d6760Sriastradh tq);
1584e9d6760Sriastradh if (tq->tq_sih == NULL) {
1594e9d6760Sriastradh error = ENOMEM;
1604e9d6760Sriastradh goto fail1;
1614e9d6760Sriastradh }
1624e9d6760Sriastradh
1634e9d6760Sriastradh /* Success! */
1644e9d6760Sriastradh return 0;
1654e9d6760Sriastradh
1664e9d6760Sriastradh fail2: __unused
1674e9d6760Sriastradh softint_disestablish(tq->tq_sih);
1684e9d6760Sriastradh tq->tq_sih = NULL;
1694e9d6760Sriastradh fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
1704e9d6760Sriastradh tq->tq_percpu = NULL;
1714e9d6760Sriastradh fail0: __unused
1724e9d6760Sriastradh KASSERT(error);
1734e9d6760Sriastradh return error;
1744e9d6760Sriastradh }
1754e9d6760Sriastradh
1764e9d6760Sriastradh /*
1774e9d6760Sriastradh * tasklet_queue_fini(tq)
1784e9d6760Sriastradh *
1794e9d6760Sriastradh * Finalize the tasklet queue tq: free all resources associated
1804e9d6760Sriastradh * with it.
1814e9d6760Sriastradh */
1824e9d6760Sriastradh static void
tasklet_queue_fini(struct tasklet_queue * tq)1834e9d6760Sriastradh tasklet_queue_fini(struct tasklet_queue *tq)
1844e9d6760Sriastradh {
1854e9d6760Sriastradh
1864e9d6760Sriastradh softint_disestablish(tq->tq_sih);
1874e9d6760Sriastradh tq->tq_sih = NULL;
1884e9d6760Sriastradh percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
1894e9d6760Sriastradh tq->tq_percpu = NULL;
1904e9d6760Sriastradh }
1914e9d6760Sriastradh
1924e9d6760Sriastradh /*
1934e9d6760Sriastradh * tasklet_softintr(cookie)
1944e9d6760Sriastradh *
1954e9d6760Sriastradh * Soft interrupt handler: Process queued tasklets on the tasklet
1964e9d6760Sriastradh * queue passed in as cookie.
1974e9d6760Sriastradh */
1984e9d6760Sriastradh static void
tasklet_softintr(void * cookie)1994e9d6760Sriastradh tasklet_softintr(void *cookie)
2004e9d6760Sriastradh {
2014e9d6760Sriastradh struct tasklet_queue *const tq = cookie;
2024e9d6760Sriastradh struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
203df13a91dSriastradh struct tasklet_cpu **tcp, *tc;
2044e9d6760Sriastradh int s;
2054e9d6760Sriastradh
2064e9d6760Sriastradh /*
2074e9d6760Sriastradh * With all interrupts deferred, transfer the current CPU's
2084e9d6760Sriastradh * queue of tasklets to a local variable in one swell foop.
2094e9d6760Sriastradh *
2104e9d6760Sriastradh * No memory barriers: CPU-local state only.
2114e9d6760Sriastradh */
212df13a91dSriastradh tcp = percpu_getref(tq->tq_percpu);
213df13a91dSriastradh tc = *tcp;
2144e9d6760Sriastradh s = splhigh();
2154e9d6760Sriastradh SIMPLEQ_CONCAT(&th, &tc->tc_head);
2164e9d6760Sriastradh splx(s);
2174e9d6760Sriastradh percpu_putref(tq->tq_percpu);
2184e9d6760Sriastradh
2194e9d6760Sriastradh /* Go through the queue of tasklets we grabbed. */
2204e9d6760Sriastradh while (!SIMPLEQ_EMPTY(&th)) {
2214e9d6760Sriastradh struct tasklet_struct *tasklet;
2224e9d6760Sriastradh
2234e9d6760Sriastradh /* Remove the first tasklet from the queue. */
2244e9d6760Sriastradh tasklet = SIMPLEQ_FIRST(&th);
2254e9d6760Sriastradh SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
2264e9d6760Sriastradh
227ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
228ed61094fSriastradh TASKLET_SCHEDULED);
229ed61094fSriastradh
2304e9d6760Sriastradh /*
2314e9d6760Sriastradh * Test and set RUNNING, in case it is already running
2324e9d6760Sriastradh * on another CPU and got scheduled again on this one
2334e9d6760Sriastradh * before it completed.
2344e9d6760Sriastradh */
235ed61094fSriastradh if (!tasklet_trylock(tasklet)) {
2364e9d6760Sriastradh /*
2374e9d6760Sriastradh * Put it back on the queue to run it again in
2384e9d6760Sriastradh * a sort of busy-wait, and move on to the next
2394e9d6760Sriastradh * one.
2404e9d6760Sriastradh */
2414e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet);
2424e9d6760Sriastradh continue;
2434e9d6760Sriastradh }
2444e9d6760Sriastradh
245ed61094fSriastradh /*
246ed61094fSriastradh * Check whether it's currently disabled.
247ed61094fSriastradh *
24804caf091Sriastradh * Pairs with membar_release in __tasklet_enable.
249ed61094fSriastradh */
250ed61094fSriastradh if (atomic_load_acquire(&tasklet->tl_disablecount)) {
2514e9d6760Sriastradh /*
2524e9d6760Sriastradh * Disabled: clear the RUNNING bit and, requeue
2534e9d6760Sriastradh * it, but keep it SCHEDULED.
2544e9d6760Sriastradh */
255ed61094fSriastradh tasklet_unlock(tasklet);
2564e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet);
2574e9d6760Sriastradh continue;
2584e9d6760Sriastradh }
2594e9d6760Sriastradh
2604e9d6760Sriastradh /* Not disabled. Clear SCHEDULED and call func. */
261ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
262ed61094fSriastradh TASKLET_SCHEDULED);
2634e9d6760Sriastradh atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
2644e9d6760Sriastradh
2654e9d6760Sriastradh (*tasklet->func)(tasklet->data);
2664e9d6760Sriastradh
2674e9d6760Sriastradh /* Clear RUNNING to notify tasklet_disable. */
268ed61094fSriastradh tasklet_unlock(tasklet);
2694e9d6760Sriastradh }
2704e9d6760Sriastradh }
2714e9d6760Sriastradh
2724e9d6760Sriastradh /*
2734e9d6760Sriastradh * tasklet_queue_schedule(tq, tasklet)
2744e9d6760Sriastradh *
2754e9d6760Sriastradh * Schedule tasklet to run on tq. If it was already scheduled and
2764e9d6760Sriastradh * has not yet run, no effect.
2774e9d6760Sriastradh */
2784e9d6760Sriastradh static void
tasklet_queue_schedule(struct tasklet_queue * tq,struct tasklet_struct * tasklet)2794e9d6760Sriastradh tasklet_queue_schedule(struct tasklet_queue *tq,
2804e9d6760Sriastradh struct tasklet_struct *tasklet)
2814e9d6760Sriastradh {
2824e9d6760Sriastradh unsigned ostate, nstate;
2834e9d6760Sriastradh
2844e9d6760Sriastradh /* Test and set the SCHEDULED bit. If already set, we're done. */
2854e9d6760Sriastradh do {
286ed61094fSriastradh ostate = atomic_load_relaxed(&tasklet->tl_state);
2874e9d6760Sriastradh if (ostate & TASKLET_SCHEDULED)
2884e9d6760Sriastradh return;
2894e9d6760Sriastradh nstate = ostate | TASKLET_SCHEDULED;
2904e9d6760Sriastradh } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
2914e9d6760Sriastradh != ostate);
2924e9d6760Sriastradh
2934e9d6760Sriastradh /*
2944e9d6760Sriastradh * Not already set and we have set it now. Put it on the queue
2954e9d6760Sriastradh * and kick off a softint.
2964e9d6760Sriastradh */
2974e9d6760Sriastradh tasklet_queue_enqueue(tq, tasklet);
2984e9d6760Sriastradh }
2994e9d6760Sriastradh
3004e9d6760Sriastradh /*
3014e9d6760Sriastradh * tasklet_queue_enqueue(tq, tasklet)
3024e9d6760Sriastradh *
3034e9d6760Sriastradh * Put tasklet on the queue tq and ensure it will run. tasklet
3044e9d6760Sriastradh * must be marked SCHEDULED.
3054e9d6760Sriastradh */
3064e9d6760Sriastradh static void
tasklet_queue_enqueue(struct tasklet_queue * tq,struct tasklet_struct * tasklet)3074e9d6760Sriastradh tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
3084e9d6760Sriastradh {
309df13a91dSriastradh struct tasklet_cpu **tcp, *tc;
3104e9d6760Sriastradh int s;
3114e9d6760Sriastradh
312ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
3134e9d6760Sriastradh
3144e9d6760Sriastradh /*
3154e9d6760Sriastradh * Insert on the current CPU's queue while all interrupts are
3164e9d6760Sriastradh * blocked, and schedule a soft interrupt to process it. No
3174e9d6760Sriastradh * memory barriers: CPU-local state only.
3184e9d6760Sriastradh */
319df13a91dSriastradh tcp = percpu_getref(tq->tq_percpu);
320df13a91dSriastradh tc = *tcp;
3214e9d6760Sriastradh s = splhigh();
3224e9d6760Sriastradh SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
3234e9d6760Sriastradh splx(s);
3244e9d6760Sriastradh softint_schedule(tq->tq_sih);
3254e9d6760Sriastradh percpu_putref(tq->tq_percpu);
3264e9d6760Sriastradh }
3274e9d6760Sriastradh
3284e9d6760Sriastradh /*
3294e9d6760Sriastradh * tasklet_init(tasklet, func, data)
3304e9d6760Sriastradh *
3314e9d6760Sriastradh * Initialize tasklet to call func(data) when scheduled.
3324e9d6760Sriastradh *
3334e9d6760Sriastradh * Caller is responsible for issuing the appropriate memory
3344e9d6760Sriastradh * barriers or store releases to publish the tasklet to other CPUs
3354e9d6760Sriastradh * before use.
3364e9d6760Sriastradh */
3374e9d6760Sriastradh void
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)3384e9d6760Sriastradh tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
3394e9d6760Sriastradh unsigned long data)
3404e9d6760Sriastradh {
3414e9d6760Sriastradh
342ed61094fSriastradh atomic_store_relaxed(&tasklet->tl_state, 0);
343ed61094fSriastradh atomic_store_relaxed(&tasklet->tl_disablecount, 0);
3444e9d6760Sriastradh tasklet->func = func;
3454e9d6760Sriastradh tasklet->data = data;
3464e9d6760Sriastradh }
3474e9d6760Sriastradh
3484e9d6760Sriastradh /*
3494e9d6760Sriastradh * tasklet_schedule(tasklet)
3504e9d6760Sriastradh *
3514e9d6760Sriastradh * Schedule tasklet to run at regular priority. If it was already
3524e9d6760Sriastradh * scheduled and has not yet run, no effect.
3534e9d6760Sriastradh */
3544e9d6760Sriastradh void
tasklet_schedule(struct tasklet_struct * tasklet)3554e9d6760Sriastradh tasklet_schedule(struct tasklet_struct *tasklet)
3564e9d6760Sriastradh {
3574e9d6760Sriastradh
3584e9d6760Sriastradh tasklet_queue_schedule(&tasklet_queue, tasklet);
3594e9d6760Sriastradh }
3604e9d6760Sriastradh
3614e9d6760Sriastradh /*
3624e9d6760Sriastradh * tasklet_hi_schedule(tasklet)
3634e9d6760Sriastradh *
3644e9d6760Sriastradh * Schedule tasklet to run at high priority. If it was already
3654e9d6760Sriastradh * scheduled and has not yet run, no effect.
3664e9d6760Sriastradh */
3674e9d6760Sriastradh void
tasklet_hi_schedule(struct tasklet_struct * tasklet)3684e9d6760Sriastradh tasklet_hi_schedule(struct tasklet_struct *tasklet)
3694e9d6760Sriastradh {
3704e9d6760Sriastradh
3714e9d6760Sriastradh tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
3724e9d6760Sriastradh }
3734e9d6760Sriastradh
3744e9d6760Sriastradh /*
3759e5fbd4fSriastradh * tasklet_disable_nosync(tasklet)
3769e5fbd4fSriastradh *
3779e5fbd4fSriastradh * Increment the disable count of tasklet, but don't wait for it
3789e5fbd4fSriastradh * to complete -- it may remain running after this returns.
3799e5fbd4fSriastradh *
3809e5fbd4fSriastradh * As long as the disable count is nonzero, the tasklet's function
3819e5fbd4fSriastradh * will not run, but if already scheduled, the tasklet will remain
3829e5fbd4fSriastradh * so and the softint will repeatedly trigger itself in a sort of
3839e5fbd4fSriastradh * busy-wait, so this should be used only for short durations.
3849e5fbd4fSriastradh *
3859e5fbd4fSriastradh * Load-acquire semantics.
3869e5fbd4fSriastradh */
3879e5fbd4fSriastradh void
tasklet_disable_nosync(struct tasklet_struct * tasklet)3889e5fbd4fSriastradh tasklet_disable_nosync(struct tasklet_struct *tasklet)
3899e5fbd4fSriastradh {
3909e5fbd4fSriastradh unsigned int disablecount __diagused;
3919e5fbd4fSriastradh
3929e5fbd4fSriastradh /* Increment the disable count. */
3939e5fbd4fSriastradh disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
3949e5fbd4fSriastradh KASSERT(disablecount < UINT_MAX);
3959e5fbd4fSriastradh KASSERT(disablecount != 0);
3969e5fbd4fSriastradh
39704caf091Sriastradh /* Pairs with membar_release in __tasklet_enable. */
39804caf091Sriastradh membar_acquire();
3999e5fbd4fSriastradh }
4009e5fbd4fSriastradh
4019e5fbd4fSriastradh /*
4024e9d6760Sriastradh * tasklet_disable(tasklet)
4034e9d6760Sriastradh *
4044e9d6760Sriastradh * Increment the disable count of tasklet, and if it was already
4054e9d6760Sriastradh * running, busy-wait for it to complete.
4064e9d6760Sriastradh *
4074e9d6760Sriastradh * As long as the disable count is nonzero, the tasklet's function
4084e9d6760Sriastradh * will not run, but if already scheduled, the tasklet will remain
4094e9d6760Sriastradh * so and the softint will repeatedly trigger itself in a sort of
4104e9d6760Sriastradh * busy-wait, so this should be used only for short durations.
4114e9d6760Sriastradh *
4124e9d6760Sriastradh * If tasklet is guaranteed not to be scheduled, e.g. if you have
4134e9d6760Sriastradh * just invoked tasklet_kill, then tasklet_disable serves to wait
4144e9d6760Sriastradh * for it to complete in case it might already be running.
415ed61094fSriastradh *
416ed61094fSriastradh * Load-acquire semantics.
4174e9d6760Sriastradh */
4184e9d6760Sriastradh void
tasklet_disable(struct tasklet_struct * tasklet)4194e9d6760Sriastradh tasklet_disable(struct tasklet_struct *tasklet)
4204e9d6760Sriastradh {
4214e9d6760Sriastradh
4224e9d6760Sriastradh /* Increment the disable count. */
4239e5fbd4fSriastradh tasklet_disable_nosync(tasklet);
42472bd4db1Sriastradh
4254e9d6760Sriastradh /* Wait for it to finish running, if it was running. */
426ed61094fSriastradh tasklet_unlock_wait(tasklet);
4274e9d6760Sriastradh }
4284e9d6760Sriastradh
4294e9d6760Sriastradh /*
4304e9d6760Sriastradh * tasklet_enable(tasklet)
4314e9d6760Sriastradh *
4324e9d6760Sriastradh * Decrement tasklet's disable count. If it was previously
4334e9d6760Sriastradh * scheduled to run, it may now run.
434ed61094fSriastradh *
435ed61094fSriastradh * Store-release semantics.
4364e9d6760Sriastradh */
4374e9d6760Sriastradh void
tasklet_enable(struct tasklet_struct * tasklet)4384e9d6760Sriastradh tasklet_enable(struct tasklet_struct *tasklet)
4394e9d6760Sriastradh {
4404e9d6760Sriastradh
441ed61094fSriastradh (void)__tasklet_enable(tasklet);
4424e9d6760Sriastradh }
4434e9d6760Sriastradh
4444e9d6760Sriastradh /*
4454e9d6760Sriastradh * tasklet_kill(tasklet)
4464e9d6760Sriastradh *
4474e9d6760Sriastradh * Busy-wait for tasklet to run, if it is currently scheduled.
4484e9d6760Sriastradh * Caller must guarantee it does not get scheduled again for this
4494e9d6760Sriastradh * to be useful.
4504e9d6760Sriastradh */
4514e9d6760Sriastradh void
tasklet_kill(struct tasklet_struct * tasklet)4524e9d6760Sriastradh tasklet_kill(struct tasklet_struct *tasklet)
4534e9d6760Sriastradh {
4544e9d6760Sriastradh
4554e9d6760Sriastradh KASSERTMSG(!cpu_intr_p(),
4564e9d6760Sriastradh "deadlock: soft interrupts are blocked in interrupt context");
4574e9d6760Sriastradh
4584e9d6760Sriastradh /* Wait for it to be removed from the queue. */
459ed61094fSriastradh while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
4604e9d6760Sriastradh SPINLOCK_BACKOFF_HOOK;
4614e9d6760Sriastradh
4624e9d6760Sriastradh /*
4634e9d6760Sriastradh * No need for a memory barrier here because writes to the
4644e9d6760Sriastradh * single state word are globally ordered, and RUNNING is set
4654e9d6760Sriastradh * before SCHEDULED is cleared, so as long as the caller
4664e9d6760Sriastradh * guarantees no scheduling, the only possible transitions we
4674e9d6760Sriastradh * can witness are:
4684e9d6760Sriastradh *
4694e9d6760Sriastradh * 0 -> 0
4704e9d6760Sriastradh * SCHEDULED -> 0
4714e9d6760Sriastradh * SCHEDULED -> RUNNING
4724e9d6760Sriastradh * RUNNING -> 0
4734e9d6760Sriastradh * RUNNING -> RUNNING
4744e9d6760Sriastradh * SCHEDULED|RUNNING -> 0
4754e9d6760Sriastradh * SCHEDULED|RUNNING -> RUNNING
4764e9d6760Sriastradh */
4774e9d6760Sriastradh
4784e9d6760Sriastradh /* Wait for it to finish running. */
479ed61094fSriastradh tasklet_unlock_wait(tasklet);
4804e9d6760Sriastradh }
481075d309eSriastradh
482075d309eSriastradh /*
483b7d09065Sriastradh * tasklet_is_locked(tasklet)
484ed61094fSriastradh *
485ed61094fSriastradh * True if tasklet is currently locked. Caller must use it only
486ed61094fSriastradh * for positive assertions.
487ed61094fSriastradh */
488ed61094fSriastradh bool
tasklet_is_locked(const struct tasklet_struct * tasklet)489ed61094fSriastradh tasklet_is_locked(const struct tasklet_struct *tasklet)
490ed61094fSriastradh {
491ed61094fSriastradh
492ed61094fSriastradh return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
493ed61094fSriastradh }
494ed61094fSriastradh
495ed61094fSriastradh /*
496ed61094fSriastradh * tasklet_trylock(tasklet)
497ed61094fSriastradh *
498ed61094fSriastradh * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
499ed61094fSriastradh * we locked it, false if already locked.
500ed61094fSriastradh *
501ed61094fSriastradh * Load-acquire semantics.
502ed61094fSriastradh */
503ed61094fSriastradh bool
tasklet_trylock(struct tasklet_struct * tasklet)504ed61094fSriastradh tasklet_trylock(struct tasklet_struct *tasklet)
505ed61094fSriastradh {
506ed61094fSriastradh unsigned state;
507ed61094fSriastradh
508ed61094fSriastradh do {
50972bd4db1Sriastradh state = atomic_load_relaxed(&tasklet->tl_state);
510ed61094fSriastradh if (state & TASKLET_RUNNING)
511ed61094fSriastradh return false;
512ed61094fSriastradh } while (atomic_cas_uint(&tasklet->tl_state, state,
513ed61094fSriastradh state | TASKLET_RUNNING) != state);
514ed61094fSriastradh
51504caf091Sriastradh /* Pairs with membar_release in tasklet_unlock. */
51604caf091Sriastradh membar_acquire();
51772bd4db1Sriastradh
518ed61094fSriastradh return true;
519ed61094fSriastradh }
520ed61094fSriastradh
521ed61094fSriastradh /*
522ed61094fSriastradh * tasklet_unlock(tasklet)
523ed61094fSriastradh *
524ed61094fSriastradh * Unlock tasklet, i.e., clear TASKLET_RUNNING.
525ed61094fSriastradh *
526ed61094fSriastradh * Store-release semantics.
527ed61094fSriastradh */
528ed61094fSriastradh void
tasklet_unlock(struct tasklet_struct * tasklet)529ed61094fSriastradh tasklet_unlock(struct tasklet_struct *tasklet)
530ed61094fSriastradh {
531ed61094fSriastradh
532ed61094fSriastradh KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
533ed61094fSriastradh
534ed61094fSriastradh /*
53504caf091Sriastradh * Pairs with membar_acquire in tasklet_trylock and with
53672bd4db1Sriastradh * atomic_load_acquire in tasklet_unlock_wait.
537ed61094fSriastradh */
53804caf091Sriastradh membar_release();
539ed61094fSriastradh atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
540ed61094fSriastradh }
541ed61094fSriastradh
542ed61094fSriastradh /*
543ed61094fSriastradh * tasklet_unlock_wait(tasklet)
544ed61094fSriastradh *
545ed61094fSriastradh * Busy-wait until tasklet is not running.
546ed61094fSriastradh *
547ed61094fSriastradh * Load-acquire semantics.
548ed61094fSriastradh */
549ed61094fSriastradh void
tasklet_unlock_wait(const struct tasklet_struct * tasklet)550ed61094fSriastradh tasklet_unlock_wait(const struct tasklet_struct *tasklet)
551ed61094fSriastradh {
552ed61094fSriastradh
55304caf091Sriastradh /* Pairs with membar_release in tasklet_unlock. */
554ed61094fSriastradh while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
555ed61094fSriastradh SPINLOCK_BACKOFF_HOOK;
556ed61094fSriastradh }
557ed61094fSriastradh
558ed61094fSriastradh /*
559ed61094fSriastradh * BEGIN I915 HACKS
560ed61094fSriastradh *
561ed61094fSriastradh * The i915 driver abuses the tasklet abstraction like a cop abuses his
562ed61094fSriastradh * wife.
563ed61094fSriastradh */
564ed61094fSriastradh
565ed61094fSriastradh /*
566ed61094fSriastradh * __tasklet_disable_sync_once(tasklet)
567075d309eSriastradh *
568075d309eSriastradh * Increment the disable count of tasklet, and if this is the
569075d309eSriastradh * first time it was disabled and it was already running,
570075d309eSriastradh * busy-wait for it to complete.
571075d309eSriastradh *
572075d309eSriastradh * Caller must not care about whether the tasklet is running, or
573075d309eSriastradh * about waiting for any side effects of the tasklet to complete,
574075d309eSriastradh * if this was not the first time it was disabled.
575075d309eSriastradh */
576075d309eSriastradh void
__tasklet_disable_sync_once(struct tasklet_struct * tasklet)577ed61094fSriastradh __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
578075d309eSriastradh {
579075d309eSriastradh unsigned int disablecount;
580075d309eSriastradh
581075d309eSriastradh /* Increment the disable count. */
582075d309eSriastradh disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
583075d309eSriastradh KASSERT(disablecount < UINT_MAX);
584075d309eSriastradh KASSERT(disablecount != 0);
585075d309eSriastradh
58604caf091Sriastradh /* Pairs with membar_release in __tasklet_enable_sync_once. */
58704caf091Sriastradh membar_acquire();
58872bd4db1Sriastradh
589075d309eSriastradh /*
590075d309eSriastradh * If it was zero, wait for it to finish running. If it was
591075d309eSriastradh * not zero, caller must not care whether it was running.
592075d309eSriastradh */
593ed61094fSriastradh if (disablecount == 1)
594ed61094fSriastradh tasklet_unlock_wait(tasklet);
595075d309eSriastradh }
596075d309eSriastradh
597075d309eSriastradh /*
598ed61094fSriastradh * __tasklet_enable_sync_once(tasklet)
599075d309eSriastradh *
600075d309eSriastradh * Decrement the disable count of tasklet, and if it goes to zero,
601075d309eSriastradh * kill tasklet.
602075d309eSriastradh */
603075d309eSriastradh void
__tasklet_enable_sync_once(struct tasklet_struct * tasklet)604ed61094fSriastradh __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
605075d309eSriastradh {
606075d309eSriastradh unsigned int disablecount;
607075d309eSriastradh
60804caf091Sriastradh /* Pairs with membar_acquire in __tasklet_disable_sync_once. */
60904caf091Sriastradh membar_release();
61072bd4db1Sriastradh
611075d309eSriastradh /* Decrement the disable count. */
612075d309eSriastradh disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
613075d309eSriastradh KASSERT(disablecount < UINT_MAX);
614075d309eSriastradh
615075d309eSriastradh /*
616075d309eSriastradh * If it became zero, kill the tasklet. If it was not zero,
617075d309eSriastradh * caller must not care whether it was running.
618075d309eSriastradh */
619075d309eSriastradh if (disablecount == 0)
620075d309eSriastradh tasklet_kill(tasklet);
621075d309eSriastradh }
622075d309eSriastradh
623075d309eSriastradh /*
624ed61094fSriastradh * __tasklet_is_enabled(tasklet)
625075d309eSriastradh *
626075d309eSriastradh * True if tasklet is not currently disabled. Answer may be stale
627075d309eSriastradh * as soon as it is returned -- caller must use it only as a hint,
628075d309eSriastradh * or must arrange synchronization externally.
629075d309eSriastradh */
630075d309eSriastradh bool
__tasklet_is_enabled(const struct tasklet_struct * tasklet)631ed61094fSriastradh __tasklet_is_enabled(const struct tasklet_struct *tasklet)
632075d309eSriastradh {
633075d309eSriastradh unsigned int disablecount;
634075d309eSriastradh
635ed61094fSriastradh disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
636ed61094fSriastradh
637ed61094fSriastradh return (disablecount == 0);
638ed61094fSriastradh }
639ed61094fSriastradh
640ed61094fSriastradh /*
641ed61094fSriastradh * __tasklet_is_scheduled(tasklet)
642ed61094fSriastradh *
643ed61094fSriastradh * True if tasklet is currently scheduled. Answer may be stale as
644ed61094fSriastradh * soon as it is returned -- caller must use it only as a hint, or
645ed61094fSriastradh * must arrange synchronization externally.
646ed61094fSriastradh */
647ed61094fSriastradh bool
__tasklet_is_scheduled(const struct tasklet_struct * tasklet)648ed61094fSriastradh __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
649ed61094fSriastradh {
650ed61094fSriastradh
651ed61094fSriastradh return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
652ed61094fSriastradh }
653ed61094fSriastradh
654ed61094fSriastradh /*
655ed61094fSriastradh * __tasklet_enable(tasklet)
656ed61094fSriastradh *
657ed61094fSriastradh * Decrement tasklet's disable count. If it was previously
658ed61094fSriastradh * scheduled to run, it may now run. Return true if the disable
659ed61094fSriastradh * count went down to zero; otherwise return false.
660ed61094fSriastradh *
661ed61094fSriastradh * Store-release semantics.
662ed61094fSriastradh */
663ed61094fSriastradh bool
__tasklet_enable(struct tasklet_struct * tasklet)664ed61094fSriastradh __tasklet_enable(struct tasklet_struct *tasklet)
665ed61094fSriastradh {
666ed61094fSriastradh unsigned int disablecount;
667ed61094fSriastradh
668ed61094fSriastradh /*
669ed61094fSriastradh * Guarantee all caller-relevant reads or writes have completed
670ed61094fSriastradh * before potentially allowing tasklet to run again by
671ed61094fSriastradh * decrementing the disable count.
672ed61094fSriastradh *
67372bd4db1Sriastradh * Pairs with atomic_load_acquire in tasklet_softintr and with
67404caf091Sriastradh * membar_acquire in tasklet_disable.
675ed61094fSriastradh */
67604caf091Sriastradh membar_release();
677ed61094fSriastradh
678ed61094fSriastradh /* Decrement the disable count. */
679ed61094fSriastradh disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
680ed61094fSriastradh KASSERT(disablecount != UINT_MAX);
681075d309eSriastradh
682075d309eSriastradh return (disablecount == 0);
683075d309eSriastradh }
684