xref: /netbsd-src/sys/external/bsd/common/linux/linux_tasklet.c (revision 9e5fbd4f7f25d0a357b482f4591196ee62d90ed5)
1*9e5fbd4fSriastradh /*	$NetBSD: linux_tasklet.c,v 1.7 2021/12/19 11:49:11 riastradh Exp $	*/
24e9d6760Sriastradh 
34e9d6760Sriastradh /*-
4ed61094fSriastradh  * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
54e9d6760Sriastradh  * All rights reserved.
64e9d6760Sriastradh  *
74e9d6760Sriastradh  * This code is derived from software contributed to The NetBSD Foundation
84e9d6760Sriastradh  * by Taylor R. Campbell.
94e9d6760Sriastradh  *
104e9d6760Sriastradh  * Redistribution and use in source and binary forms, with or without
114e9d6760Sriastradh  * modification, are permitted provided that the following conditions
124e9d6760Sriastradh  * are met:
134e9d6760Sriastradh  * 1. Redistributions of source code must retain the above copyright
144e9d6760Sriastradh  *    notice, this list of conditions and the following disclaimer.
154e9d6760Sriastradh  * 2. Redistributions in binary form must reproduce the above copyright
164e9d6760Sriastradh  *    notice, this list of conditions and the following disclaimer in the
174e9d6760Sriastradh  *    documentation and/or other materials provided with the distribution.
184e9d6760Sriastradh  *
194e9d6760Sriastradh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
204e9d6760Sriastradh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
214e9d6760Sriastradh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
224e9d6760Sriastradh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
234e9d6760Sriastradh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
244e9d6760Sriastradh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
254e9d6760Sriastradh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
264e9d6760Sriastradh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
274e9d6760Sriastradh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
284e9d6760Sriastradh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
294e9d6760Sriastradh  * POSSIBILITY OF SUCH DAMAGE.
304e9d6760Sriastradh  */
314e9d6760Sriastradh 
324e9d6760Sriastradh #include <sys/cdefs.h>
33*9e5fbd4fSriastradh __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.7 2021/12/19 11:49:11 riastradh Exp $");
344e9d6760Sriastradh 
354e9d6760Sriastradh #include <sys/types.h>
364e9d6760Sriastradh #include <sys/atomic.h>
374e9d6760Sriastradh #include <sys/cpu.h>
384e9d6760Sriastradh #include <sys/errno.h>
394e9d6760Sriastradh #include <sys/intr.h>
404e9d6760Sriastradh #include <sys/lock.h>
414e9d6760Sriastradh #include <sys/percpu.h>
424e9d6760Sriastradh #include <sys/queue.h>
434e9d6760Sriastradh 
444e9d6760Sriastradh #include <lib/libkern/libkern.h>
454e9d6760Sriastradh 
464e9d6760Sriastradh #include <machine/limits.h>
474e9d6760Sriastradh 
484e9d6760Sriastradh #include <linux/tasklet.h>
494e9d6760Sriastradh 
504e9d6760Sriastradh #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
514e9d6760Sriastradh #define	TASKLET_RUNNING		((unsigned)__BIT(1))
524e9d6760Sriastradh 
534e9d6760Sriastradh struct tasklet_queue {
544e9d6760Sriastradh 	struct percpu	*tq_percpu;	/* struct tasklet_cpu */
554e9d6760Sriastradh 	void		*tq_sih;
564e9d6760Sriastradh };
574e9d6760Sriastradh 
584e9d6760Sriastradh SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
594e9d6760Sriastradh 
604e9d6760Sriastradh struct tasklet_cpu {
614e9d6760Sriastradh 	struct tasklet_head	tc_head;
624e9d6760Sriastradh };
634e9d6760Sriastradh 
644e9d6760Sriastradh static struct tasklet_queue	tasklet_queue __read_mostly;
654e9d6760Sriastradh static struct tasklet_queue	tasklet_hi_queue __read_mostly;
664e9d6760Sriastradh 
674e9d6760Sriastradh static void	tasklet_softintr(void *);
684e9d6760Sriastradh static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
694e9d6760Sriastradh static void	tasklet_queue_fini(struct tasklet_queue *);
704e9d6760Sriastradh static void	tasklet_queue_schedule(struct tasklet_queue *,
714e9d6760Sriastradh 		    struct tasklet_struct *);
724e9d6760Sriastradh static void	tasklet_queue_enqueue(struct tasklet_queue *,
734e9d6760Sriastradh 		    struct tasklet_struct *);
744e9d6760Sriastradh 
754e9d6760Sriastradh /*
764e9d6760Sriastradh  * linux_tasklets_init()
774e9d6760Sriastradh  *
784e9d6760Sriastradh  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
794e9d6760Sriastradh  *	error code on failure.
804e9d6760Sriastradh  */
814e9d6760Sriastradh int
824e9d6760Sriastradh linux_tasklets_init(void)
834e9d6760Sriastradh {
844e9d6760Sriastradh 	int error;
854e9d6760Sriastradh 
864e9d6760Sriastradh 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
874e9d6760Sriastradh 	if (error)
884e9d6760Sriastradh 		goto fail0;
894e9d6760Sriastradh 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
904e9d6760Sriastradh 	if (error)
914e9d6760Sriastradh 		goto fail1;
924e9d6760Sriastradh 
934e9d6760Sriastradh 	/* Success!  */
944e9d6760Sriastradh 	return 0;
954e9d6760Sriastradh 
964e9d6760Sriastradh fail2: __unused
974e9d6760Sriastradh 	tasklet_queue_fini(&tasklet_hi_queue);
984e9d6760Sriastradh fail1:	tasklet_queue_fini(&tasklet_queue);
994e9d6760Sriastradh fail0:	KASSERT(error);
1004e9d6760Sriastradh 	return error;
1014e9d6760Sriastradh }
1024e9d6760Sriastradh 
1034e9d6760Sriastradh /*
1044e9d6760Sriastradh  * linux_tasklets_fini()
1054e9d6760Sriastradh  *
1064e9d6760Sriastradh  *	Finalize the Linux tasklets subsystem.  All use of tasklets
1074e9d6760Sriastradh  *	must be done.
1084e9d6760Sriastradh  */
1094e9d6760Sriastradh void
1104e9d6760Sriastradh linux_tasklets_fini(void)
1114e9d6760Sriastradh {
1124e9d6760Sriastradh 
1134e9d6760Sriastradh 	tasklet_queue_fini(&tasklet_hi_queue);
1144e9d6760Sriastradh 	tasklet_queue_fini(&tasklet_queue);
1154e9d6760Sriastradh }
1164e9d6760Sriastradh 
1174e9d6760Sriastradh /*
1184e9d6760Sriastradh  * tasklet_queue_init(tq, prio)
1194e9d6760Sriastradh  *
1204e9d6760Sriastradh  *	Initialize the tasklet queue tq for running tasklets at softint
1214e9d6760Sriastradh  *	priority prio (SOFTINT_*).
1224e9d6760Sriastradh  */
1234e9d6760Sriastradh static int
1244e9d6760Sriastradh tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
1254e9d6760Sriastradh {
1264e9d6760Sriastradh 	int error;
1274e9d6760Sriastradh 
1284e9d6760Sriastradh 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
1294e9d6760Sriastradh 	tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
1304e9d6760Sriastradh 	KASSERT(tq->tq_percpu != NULL);
1314e9d6760Sriastradh 
1324e9d6760Sriastradh 	/* Try to establish a softint.  softint_establish may fail.  */
1334e9d6760Sriastradh 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
1344e9d6760Sriastradh 	    tq);
1354e9d6760Sriastradh 	if (tq->tq_sih == NULL) {
1364e9d6760Sriastradh 		error = ENOMEM;
1374e9d6760Sriastradh 		goto fail1;
1384e9d6760Sriastradh 	}
1394e9d6760Sriastradh 
1404e9d6760Sriastradh 	/* Success!  */
1414e9d6760Sriastradh 	return 0;
1424e9d6760Sriastradh 
1434e9d6760Sriastradh fail2: __unused
1444e9d6760Sriastradh 	softint_disestablish(tq->tq_sih);
1454e9d6760Sriastradh 	tq->tq_sih = NULL;
1464e9d6760Sriastradh fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
1474e9d6760Sriastradh 	tq->tq_percpu = NULL;
1484e9d6760Sriastradh fail0: __unused
1494e9d6760Sriastradh 	KASSERT(error);
1504e9d6760Sriastradh 	return error;
1514e9d6760Sriastradh }
1524e9d6760Sriastradh 
1534e9d6760Sriastradh /*
1544e9d6760Sriastradh  * tasklet_queue_fini(tq)
1554e9d6760Sriastradh  *
1564e9d6760Sriastradh  *	Finalize the tasklet queue tq: free all resources associated
1574e9d6760Sriastradh  *	with it.
1584e9d6760Sriastradh  */
1594e9d6760Sriastradh static void
1604e9d6760Sriastradh tasklet_queue_fini(struct tasklet_queue *tq)
1614e9d6760Sriastradh {
1624e9d6760Sriastradh 
1634e9d6760Sriastradh 	softint_disestablish(tq->tq_sih);
1644e9d6760Sriastradh 	tq->tq_sih = NULL;
1654e9d6760Sriastradh 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
1664e9d6760Sriastradh 	tq->tq_percpu = NULL;
1674e9d6760Sriastradh }
1684e9d6760Sriastradh 
1694e9d6760Sriastradh /*
1704e9d6760Sriastradh  * tasklet_softintr(cookie)
1714e9d6760Sriastradh  *
1724e9d6760Sriastradh  *	Soft interrupt handler: Process queued tasklets on the tasklet
1734e9d6760Sriastradh  *	queue passed in as cookie.
1744e9d6760Sriastradh  */
1754e9d6760Sriastradh static void
1764e9d6760Sriastradh tasklet_softintr(void *cookie)
1774e9d6760Sriastradh {
1784e9d6760Sriastradh 	struct tasklet_queue *const tq = cookie;
1794e9d6760Sriastradh 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
1804e9d6760Sriastradh 	struct tasklet_cpu *tc;
1814e9d6760Sriastradh 	int s;
1824e9d6760Sriastradh 
1834e9d6760Sriastradh 	/*
1844e9d6760Sriastradh 	 * With all interrupts deferred, transfer the current CPU's
1854e9d6760Sriastradh 	 * queue of tasklets to a local variable in one swell foop.
1864e9d6760Sriastradh 	 *
1874e9d6760Sriastradh 	 * No memory barriers: CPU-local state only.
1884e9d6760Sriastradh 	 */
1894e9d6760Sriastradh 	tc = percpu_getref(tq->tq_percpu);
1904e9d6760Sriastradh 	s = splhigh();
1914e9d6760Sriastradh 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
1924e9d6760Sriastradh 	splx(s);
1934e9d6760Sriastradh 	percpu_putref(tq->tq_percpu);
1944e9d6760Sriastradh 
1954e9d6760Sriastradh 	/* Go through the queue of tasklets we grabbed.  */
1964e9d6760Sriastradh 	while (!SIMPLEQ_EMPTY(&th)) {
1974e9d6760Sriastradh 		struct tasklet_struct *tasklet;
1984e9d6760Sriastradh 
1994e9d6760Sriastradh 		/* Remove the first tasklet from the queue.  */
2004e9d6760Sriastradh 		tasklet = SIMPLEQ_FIRST(&th);
2014e9d6760Sriastradh 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
2024e9d6760Sriastradh 
203ed61094fSriastradh 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
204ed61094fSriastradh 		    TASKLET_SCHEDULED);
205ed61094fSriastradh 
2064e9d6760Sriastradh 		/*
2074e9d6760Sriastradh 		 * Test and set RUNNING, in case it is already running
2084e9d6760Sriastradh 		 * on another CPU and got scheduled again on this one
2094e9d6760Sriastradh 		 * before it completed.
2104e9d6760Sriastradh 		 */
211ed61094fSriastradh 		if (!tasklet_trylock(tasklet)) {
2124e9d6760Sriastradh 			/*
2134e9d6760Sriastradh 			 * Put it back on the queue to run it again in
2144e9d6760Sriastradh 			 * a sort of busy-wait, and move on to the next
2154e9d6760Sriastradh 			 * one.
2164e9d6760Sriastradh 			 */
2174e9d6760Sriastradh 			tasklet_queue_enqueue(tq, tasklet);
2184e9d6760Sriastradh 			continue;
2194e9d6760Sriastradh 		}
2204e9d6760Sriastradh 
221ed61094fSriastradh 		/*
222ed61094fSriastradh 		 * Check whether it's currently disabled.
223ed61094fSriastradh 		 *
224ed61094fSriastradh 		 * Pairs with membar_exit in __tasklet_enable.
225ed61094fSriastradh 		 */
226ed61094fSriastradh 		if (atomic_load_acquire(&tasklet->tl_disablecount)) {
2274e9d6760Sriastradh 			/*
2284e9d6760Sriastradh 			 * Disabled: clear the RUNNING bit and, requeue
2294e9d6760Sriastradh 			 * it, but keep it SCHEDULED.
2304e9d6760Sriastradh 			 */
231ed61094fSriastradh 			tasklet_unlock(tasklet);
2324e9d6760Sriastradh 			tasklet_queue_enqueue(tq, tasklet);
2334e9d6760Sriastradh 			continue;
2344e9d6760Sriastradh 		}
2354e9d6760Sriastradh 
2364e9d6760Sriastradh 		/* Not disabled.  Clear SCHEDULED and call func.  */
237ed61094fSriastradh 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
238ed61094fSriastradh 		    TASKLET_SCHEDULED);
2394e9d6760Sriastradh 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
2404e9d6760Sriastradh 
2414e9d6760Sriastradh 		(*tasklet->func)(tasklet->data);
2424e9d6760Sriastradh 
2434e9d6760Sriastradh 		/* Clear RUNNING to notify tasklet_disable.  */
244ed61094fSriastradh 		tasklet_unlock(tasklet);
2454e9d6760Sriastradh 	}
2464e9d6760Sriastradh }
2474e9d6760Sriastradh 
2484e9d6760Sriastradh /*
2494e9d6760Sriastradh  * tasklet_queue_schedule(tq, tasklet)
2504e9d6760Sriastradh  *
2514e9d6760Sriastradh  *	Schedule tasklet to run on tq.  If it was already scheduled and
2524e9d6760Sriastradh  *	has not yet run, no effect.
2534e9d6760Sriastradh  */
2544e9d6760Sriastradh static void
2554e9d6760Sriastradh tasklet_queue_schedule(struct tasklet_queue *tq,
2564e9d6760Sriastradh     struct tasklet_struct *tasklet)
2574e9d6760Sriastradh {
2584e9d6760Sriastradh 	unsigned ostate, nstate;
2594e9d6760Sriastradh 
2604e9d6760Sriastradh 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
2614e9d6760Sriastradh 	do {
262ed61094fSriastradh 		ostate = atomic_load_relaxed(&tasklet->tl_state);
2634e9d6760Sriastradh 		if (ostate & TASKLET_SCHEDULED)
2644e9d6760Sriastradh 			return;
2654e9d6760Sriastradh 		nstate = ostate | TASKLET_SCHEDULED;
2664e9d6760Sriastradh 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
2674e9d6760Sriastradh 	    != ostate);
2684e9d6760Sriastradh 
2694e9d6760Sriastradh 	/*
2704e9d6760Sriastradh 	 * Not already set and we have set it now.  Put it on the queue
2714e9d6760Sriastradh 	 * and kick off a softint.
2724e9d6760Sriastradh 	 */
2734e9d6760Sriastradh 	tasklet_queue_enqueue(tq, tasklet);
2744e9d6760Sriastradh }
2754e9d6760Sriastradh 
2764e9d6760Sriastradh /*
2774e9d6760Sriastradh  * tasklet_queue_enqueue(tq, tasklet)
2784e9d6760Sriastradh  *
2794e9d6760Sriastradh  *	Put tasklet on the queue tq and ensure it will run.  tasklet
2804e9d6760Sriastradh  *	must be marked SCHEDULED.
2814e9d6760Sriastradh  */
2824e9d6760Sriastradh static void
2834e9d6760Sriastradh tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
2844e9d6760Sriastradh {
2854e9d6760Sriastradh 	struct tasklet_cpu *tc;
2864e9d6760Sriastradh 	int s;
2874e9d6760Sriastradh 
288ed61094fSriastradh 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
2894e9d6760Sriastradh 
2904e9d6760Sriastradh 	/*
2914e9d6760Sriastradh 	 * Insert on the current CPU's queue while all interrupts are
2924e9d6760Sriastradh 	 * blocked, and schedule a soft interrupt to process it.  No
2934e9d6760Sriastradh 	 * memory barriers: CPU-local state only.
2944e9d6760Sriastradh 	 */
2954e9d6760Sriastradh 	tc = percpu_getref(tq->tq_percpu);
2964e9d6760Sriastradh 	s = splhigh();
2974e9d6760Sriastradh 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
2984e9d6760Sriastradh 	splx(s);
2994e9d6760Sriastradh 	softint_schedule(tq->tq_sih);
3004e9d6760Sriastradh 	percpu_putref(tq->tq_percpu);
3014e9d6760Sriastradh }
3024e9d6760Sriastradh 
3034e9d6760Sriastradh /*
3044e9d6760Sriastradh  * tasklet_init(tasklet, func, data)
3054e9d6760Sriastradh  *
3064e9d6760Sriastradh  *	Initialize tasklet to call func(data) when scheduled.
3074e9d6760Sriastradh  *
3084e9d6760Sriastradh  *	Caller is responsible for issuing the appropriate memory
3094e9d6760Sriastradh  *	barriers or store releases to publish the tasklet to other CPUs
3104e9d6760Sriastradh  *	before use.
3114e9d6760Sriastradh  */
3124e9d6760Sriastradh void
3134e9d6760Sriastradh tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
3144e9d6760Sriastradh     unsigned long data)
3154e9d6760Sriastradh {
3164e9d6760Sriastradh 
317ed61094fSriastradh 	atomic_store_relaxed(&tasklet->tl_state, 0);
318ed61094fSriastradh 	atomic_store_relaxed(&tasklet->tl_disablecount, 0);
3194e9d6760Sriastradh 	tasklet->func = func;
3204e9d6760Sriastradh 	tasklet->data = data;
3214e9d6760Sriastradh }
3224e9d6760Sriastradh 
3234e9d6760Sriastradh /*
3244e9d6760Sriastradh  * tasklet_schedule(tasklet)
3254e9d6760Sriastradh  *
3264e9d6760Sriastradh  *	Schedule tasklet to run at regular priority.  If it was already
3274e9d6760Sriastradh  *	scheduled and has not yet run, no effect.
3284e9d6760Sriastradh  */
3294e9d6760Sriastradh void
3304e9d6760Sriastradh tasklet_schedule(struct tasklet_struct *tasklet)
3314e9d6760Sriastradh {
3324e9d6760Sriastradh 
3334e9d6760Sriastradh 	tasklet_queue_schedule(&tasklet_queue, tasklet);
3344e9d6760Sriastradh }
3354e9d6760Sriastradh 
3364e9d6760Sriastradh /*
3374e9d6760Sriastradh  * tasklet_hi_schedule(tasklet)
3384e9d6760Sriastradh  *
3394e9d6760Sriastradh  *	Schedule tasklet to run at high priority.  If it was already
3404e9d6760Sriastradh  *	scheduled and has not yet run, no effect.
3414e9d6760Sriastradh  */
3424e9d6760Sriastradh void
3434e9d6760Sriastradh tasklet_hi_schedule(struct tasklet_struct *tasklet)
3444e9d6760Sriastradh {
3454e9d6760Sriastradh 
3464e9d6760Sriastradh 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
3474e9d6760Sriastradh }
3484e9d6760Sriastradh 
3494e9d6760Sriastradh /*
350*9e5fbd4fSriastradh  * tasklet_disable_nosync(tasklet)
351*9e5fbd4fSriastradh  *
352*9e5fbd4fSriastradh  *	Increment the disable count of tasklet, but don't wait for it
353*9e5fbd4fSriastradh  *	to complete -- it may remain running after this returns.
354*9e5fbd4fSriastradh  *
355*9e5fbd4fSriastradh  *	As long as the disable count is nonzero, the tasklet's function
356*9e5fbd4fSriastradh  *	will not run, but if already scheduled, the tasklet will remain
357*9e5fbd4fSriastradh  *	so and the softint will repeatedly trigger itself in a sort of
358*9e5fbd4fSriastradh  *	busy-wait, so this should be used only for short durations.
359*9e5fbd4fSriastradh  *
360*9e5fbd4fSriastradh  *	Load-acquire semantics.
361*9e5fbd4fSriastradh  */
362*9e5fbd4fSriastradh void
363*9e5fbd4fSriastradh tasklet_disable_nosync(struct tasklet_struct *tasklet)
364*9e5fbd4fSriastradh {
365*9e5fbd4fSriastradh 	unsigned int disablecount __diagused;
366*9e5fbd4fSriastradh 
367*9e5fbd4fSriastradh 	/* Increment the disable count.  */
368*9e5fbd4fSriastradh 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
369*9e5fbd4fSriastradh 	KASSERT(disablecount < UINT_MAX);
370*9e5fbd4fSriastradh 	KASSERT(disablecount != 0);
371*9e5fbd4fSriastradh 
372*9e5fbd4fSriastradh 	/* Pairs with membar_exit in __tasklet_enable.  */
373*9e5fbd4fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
374*9e5fbd4fSriastradh 	membar_enter();
375*9e5fbd4fSriastradh #endif
376*9e5fbd4fSriastradh }
377*9e5fbd4fSriastradh 
378*9e5fbd4fSriastradh /*
3794e9d6760Sriastradh  * tasklet_disable(tasklet)
3804e9d6760Sriastradh  *
3814e9d6760Sriastradh  *	Increment the disable count of tasklet, and if it was already
3824e9d6760Sriastradh  *	running, busy-wait for it to complete.
3834e9d6760Sriastradh  *
3844e9d6760Sriastradh  *	As long as the disable count is nonzero, the tasklet's function
3854e9d6760Sriastradh  *	will not run, but if already scheduled, the tasklet will remain
3864e9d6760Sriastradh  *	so and the softint will repeatedly trigger itself in a sort of
3874e9d6760Sriastradh  *	busy-wait, so this should be used only for short durations.
3884e9d6760Sriastradh  *
3894e9d6760Sriastradh  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
3904e9d6760Sriastradh  *	just invoked tasklet_kill, then tasklet_disable serves to wait
3914e9d6760Sriastradh  *	for it to complete in case it might already be running.
392ed61094fSriastradh  *
393ed61094fSriastradh  *	Load-acquire semantics.
3944e9d6760Sriastradh  */
3954e9d6760Sriastradh void
3964e9d6760Sriastradh tasklet_disable(struct tasklet_struct *tasklet)
3974e9d6760Sriastradh {
3984e9d6760Sriastradh 
3994e9d6760Sriastradh 	/* Increment the disable count.  */
400*9e5fbd4fSriastradh 	tasklet_disable_nosync(tasklet);
40172bd4db1Sriastradh 
4024e9d6760Sriastradh 	/* Wait for it to finish running, if it was running.  */
403ed61094fSriastradh 	tasklet_unlock_wait(tasklet);
4044e9d6760Sriastradh }
4054e9d6760Sriastradh 
4064e9d6760Sriastradh /*
4074e9d6760Sriastradh  * tasklet_enable(tasklet)
4084e9d6760Sriastradh  *
4094e9d6760Sriastradh  *	Decrement tasklet's disable count.  If it was previously
4104e9d6760Sriastradh  *	scheduled to run, it may now run.
411ed61094fSriastradh  *
412ed61094fSriastradh  *	Store-release semantics.
4134e9d6760Sriastradh  */
4144e9d6760Sriastradh void
4154e9d6760Sriastradh tasklet_enable(struct tasklet_struct *tasklet)
4164e9d6760Sriastradh {
4174e9d6760Sriastradh 
418ed61094fSriastradh 	(void)__tasklet_enable(tasklet);
4194e9d6760Sriastradh }
4204e9d6760Sriastradh 
4214e9d6760Sriastradh /*
4224e9d6760Sriastradh  * tasklet_kill(tasklet)
4234e9d6760Sriastradh  *
4244e9d6760Sriastradh  *	Busy-wait for tasklet to run, if it is currently scheduled.
4254e9d6760Sriastradh  *	Caller must guarantee it does not get scheduled again for this
4264e9d6760Sriastradh  *	to be useful.
4274e9d6760Sriastradh  */
4284e9d6760Sriastradh void
4294e9d6760Sriastradh tasklet_kill(struct tasklet_struct *tasklet)
4304e9d6760Sriastradh {
4314e9d6760Sriastradh 
4324e9d6760Sriastradh 	KASSERTMSG(!cpu_intr_p(),
4334e9d6760Sriastradh 	    "deadlock: soft interrupts are blocked in interrupt context");
4344e9d6760Sriastradh 
4354e9d6760Sriastradh 	/* Wait for it to be removed from the queue.  */
436ed61094fSriastradh 	while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
4374e9d6760Sriastradh 		SPINLOCK_BACKOFF_HOOK;
4384e9d6760Sriastradh 
4394e9d6760Sriastradh 	/*
4404e9d6760Sriastradh 	 * No need for a memory barrier here because writes to the
4414e9d6760Sriastradh 	 * single state word are globally ordered, and RUNNING is set
4424e9d6760Sriastradh 	 * before SCHEDULED is cleared, so as long as the caller
4434e9d6760Sriastradh 	 * guarantees no scheduling, the only possible transitions we
4444e9d6760Sriastradh 	 * can witness are:
4454e9d6760Sriastradh 	 *
4464e9d6760Sriastradh 	 *	0                 -> 0
4474e9d6760Sriastradh 	 *	SCHEDULED         -> 0
4484e9d6760Sriastradh 	 *	SCHEDULED         -> RUNNING
4494e9d6760Sriastradh 	 *	RUNNING           -> 0
4504e9d6760Sriastradh 	 *	RUNNING           -> RUNNING
4514e9d6760Sriastradh 	 *	SCHEDULED|RUNNING -> 0
4524e9d6760Sriastradh 	 *	SCHEDULED|RUNNING -> RUNNING
4534e9d6760Sriastradh 	 */
4544e9d6760Sriastradh 
4554e9d6760Sriastradh 	/* Wait for it to finish running.  */
456ed61094fSriastradh 	tasklet_unlock_wait(tasklet);
4574e9d6760Sriastradh }
458075d309eSriastradh 
459075d309eSriastradh /*
460ed61094fSriastradh  * tasklet_is_scheduled(tasklet)
461ed61094fSriastradh  *
462ed61094fSriastradh  *	True if tasklet is currently locked.  Caller must use it only
463ed61094fSriastradh  *	for positive assertions.
464ed61094fSriastradh  */
465ed61094fSriastradh bool
466ed61094fSriastradh tasklet_is_locked(const struct tasklet_struct *tasklet)
467ed61094fSriastradh {
468ed61094fSriastradh 
469ed61094fSriastradh 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
470ed61094fSriastradh }
471ed61094fSriastradh 
472ed61094fSriastradh /*
473ed61094fSriastradh  * tasklet_trylock(tasklet)
474ed61094fSriastradh  *
475ed61094fSriastradh  *	Try to lock tasklet, i.e., set TASKLET_RUNNING.  Return true if
476ed61094fSriastradh  *	we locked it, false if already locked.
477ed61094fSriastradh  *
478ed61094fSriastradh  *	Load-acquire semantics.
479ed61094fSriastradh  */
480ed61094fSriastradh bool
481ed61094fSriastradh tasklet_trylock(struct tasklet_struct *tasklet)
482ed61094fSriastradh {
483ed61094fSriastradh 	unsigned state;
484ed61094fSriastradh 
485ed61094fSriastradh 	do {
48672bd4db1Sriastradh 		state = atomic_load_relaxed(&tasklet->tl_state);
487ed61094fSriastradh 		if (state & TASKLET_RUNNING)
488ed61094fSriastradh 			return false;
489ed61094fSriastradh 	} while (atomic_cas_uint(&tasklet->tl_state, state,
490ed61094fSriastradh 		state | TASKLET_RUNNING) != state);
491ed61094fSriastradh 
49272bd4db1Sriastradh 	/* Pairs with membar_exit in tasklet_unlock.  */
49372bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
49472bd4db1Sriastradh 	membar_enter();
49572bd4db1Sriastradh #endif
49672bd4db1Sriastradh 
497ed61094fSriastradh 	return true;
498ed61094fSriastradh }
499ed61094fSriastradh 
500ed61094fSriastradh /*
501ed61094fSriastradh  * tasklet_unlock(tasklet)
502ed61094fSriastradh  *
503ed61094fSriastradh  *	Unlock tasklet, i.e., clear TASKLET_RUNNING.
504ed61094fSriastradh  *
505ed61094fSriastradh  *	Store-release semantics.
506ed61094fSriastradh  */
507ed61094fSriastradh void
508ed61094fSriastradh tasklet_unlock(struct tasklet_struct *tasklet)
509ed61094fSriastradh {
510ed61094fSriastradh 
511ed61094fSriastradh 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
512ed61094fSriastradh 
513ed61094fSriastradh 	/*
51472bd4db1Sriastradh 	 * Pairs with membar_enter in tasklet_trylock and with
51572bd4db1Sriastradh 	 * atomic_load_acquire in tasklet_unlock_wait.
516ed61094fSriastradh 	 */
517ed61094fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
518ed61094fSriastradh 	membar_exit();
519ed61094fSriastradh #endif
520ed61094fSriastradh 	atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
521ed61094fSriastradh }
522ed61094fSriastradh 
523ed61094fSriastradh /*
524ed61094fSriastradh  * tasklet_unlock_wait(tasklet)
525ed61094fSriastradh  *
526ed61094fSriastradh  *	Busy-wait until tasklet is not running.
527ed61094fSriastradh  *
528ed61094fSriastradh  *	Load-acquire semantics.
529ed61094fSriastradh  */
530ed61094fSriastradh void
531ed61094fSriastradh tasklet_unlock_wait(const struct tasklet_struct *tasklet)
532ed61094fSriastradh {
533ed61094fSriastradh 
534ed61094fSriastradh 	/* Pairs with membar_exit in tasklet_unlock.  */
535ed61094fSriastradh 	while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
536ed61094fSriastradh 		SPINLOCK_BACKOFF_HOOK;
537ed61094fSriastradh }
538ed61094fSriastradh 
539ed61094fSriastradh /*
540ed61094fSriastradh  * BEGIN I915 HACKS
541ed61094fSriastradh  *
542ed61094fSriastradh  * The i915 driver abuses the tasklet abstraction like a cop abuses his
543ed61094fSriastradh  * wife.
544ed61094fSriastradh  */
545ed61094fSriastradh 
546ed61094fSriastradh /*
547ed61094fSriastradh  * __tasklet_disable_sync_once(tasklet)
548075d309eSriastradh  *
549075d309eSriastradh  *	Increment the disable count of tasklet, and if this is the
550075d309eSriastradh  *	first time it was disabled and it was already running,
551075d309eSriastradh  *	busy-wait for it to complete.
552075d309eSriastradh  *
553075d309eSriastradh  *	Caller must not care about whether the tasklet is running, or
554075d309eSriastradh  *	about waiting for any side effects of the tasklet to complete,
555075d309eSriastradh  *	if this was not the first time it was disabled.
556075d309eSriastradh  */
557075d309eSriastradh void
558ed61094fSriastradh __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
559075d309eSriastradh {
560075d309eSriastradh 	unsigned int disablecount;
561075d309eSriastradh 
562075d309eSriastradh 	/* Increment the disable count.  */
563075d309eSriastradh 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
564075d309eSriastradh 	KASSERT(disablecount < UINT_MAX);
565075d309eSriastradh 	KASSERT(disablecount != 0);
566075d309eSriastradh 
56772bd4db1Sriastradh 	/* Pairs with membar_exit in __tasklet_enable_sync_once.  */
56872bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
56972bd4db1Sriastradh 	membar_enter();
57072bd4db1Sriastradh #endif
57172bd4db1Sriastradh 
572075d309eSriastradh 	/*
573075d309eSriastradh 	 * If it was zero, wait for it to finish running.  If it was
574075d309eSriastradh 	 * not zero, caller must not care whether it was running.
575075d309eSriastradh 	 */
576ed61094fSriastradh 	if (disablecount == 1)
577ed61094fSriastradh 		tasklet_unlock_wait(tasklet);
578075d309eSriastradh }
579075d309eSriastradh 
580075d309eSriastradh /*
581ed61094fSriastradh  * __tasklet_enable_sync_once(tasklet)
582075d309eSriastradh  *
583075d309eSriastradh  *	Decrement the disable count of tasklet, and if it goes to zero,
584075d309eSriastradh  *	kill tasklet.
585075d309eSriastradh  */
586075d309eSriastradh void
587ed61094fSriastradh __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
588075d309eSriastradh {
589075d309eSriastradh 	unsigned int disablecount;
590075d309eSriastradh 
59172bd4db1Sriastradh 	/* Pairs with membar_enter in __tasklet_disable_sync_once.  */
59272bd4db1Sriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
59372bd4db1Sriastradh 	membar_exit();
59472bd4db1Sriastradh #endif
59572bd4db1Sriastradh 
596075d309eSriastradh 	/* Decrement the disable count.  */
597075d309eSriastradh 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
598075d309eSriastradh 	KASSERT(disablecount < UINT_MAX);
599075d309eSriastradh 
600075d309eSriastradh 	/*
601075d309eSriastradh 	 * If it became zero, kill the tasklet.  If it was not zero,
602075d309eSriastradh 	 * caller must not care whether it was running.
603075d309eSriastradh 	 */
604075d309eSriastradh 	if (disablecount == 0)
605075d309eSriastradh 		tasklet_kill(tasklet);
606075d309eSriastradh }
607075d309eSriastradh 
608075d309eSriastradh /*
609ed61094fSriastradh  * __tasklet_is_enabled(tasklet)
610075d309eSriastradh  *
611075d309eSriastradh  *	True if tasklet is not currently disabled.  Answer may be stale
612075d309eSriastradh  *	as soon as it is returned -- caller must use it only as a hint,
613075d309eSriastradh  *	or must arrange synchronization externally.
614075d309eSriastradh  */
615075d309eSriastradh bool
616ed61094fSriastradh __tasklet_is_enabled(const struct tasklet_struct *tasklet)
617075d309eSriastradh {
618075d309eSriastradh 	unsigned int disablecount;
619075d309eSriastradh 
620ed61094fSriastradh 	disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
621ed61094fSriastradh 
622ed61094fSriastradh 	return (disablecount == 0);
623ed61094fSriastradh }
624ed61094fSriastradh 
625ed61094fSriastradh /*
626ed61094fSriastradh  * __tasklet_is_scheduled(tasklet)
627ed61094fSriastradh  *
628ed61094fSriastradh  *	True if tasklet is currently scheduled.  Answer may be stale as
629ed61094fSriastradh  *	soon as it is returned -- caller must use it only as a hint, or
630ed61094fSriastradh  *	must arrange synchronization externally.
631ed61094fSriastradh  */
632ed61094fSriastradh bool
633ed61094fSriastradh __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
634ed61094fSriastradh {
635ed61094fSriastradh 
636ed61094fSriastradh 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
637ed61094fSriastradh }
638ed61094fSriastradh 
639ed61094fSriastradh /*
640ed61094fSriastradh  * __tasklet_enable(tasklet)
641ed61094fSriastradh  *
642ed61094fSriastradh  *	Decrement tasklet's disable count.  If it was previously
643ed61094fSriastradh  *	scheduled to run, it may now run.  Return true if the disable
644ed61094fSriastradh  *	count went down to zero; otherwise return false.
645ed61094fSriastradh  *
646ed61094fSriastradh  *	Store-release semantics.
647ed61094fSriastradh  */
648ed61094fSriastradh bool
649ed61094fSriastradh __tasklet_enable(struct tasklet_struct *tasklet)
650ed61094fSriastradh {
651ed61094fSriastradh 	unsigned int disablecount;
652ed61094fSriastradh 
653ed61094fSriastradh 	/*
654ed61094fSriastradh 	 * Guarantee all caller-relevant reads or writes have completed
655ed61094fSriastradh 	 * before potentially allowing tasklet to run again by
656ed61094fSriastradh 	 * decrementing the disable count.
657ed61094fSriastradh 	 *
65872bd4db1Sriastradh 	 * Pairs with atomic_load_acquire in tasklet_softintr and with
65972bd4db1Sriastradh 	 * membar_enter in tasklet_disable.
660ed61094fSriastradh 	 */
661ed61094fSriastradh #ifndef __HAVE_ATOMIC_AS_MEMBAR
662ed61094fSriastradh 	membar_exit();
663ed61094fSriastradh #endif
664ed61094fSriastradh 
665ed61094fSriastradh 	/* Decrement the disable count.  */
666ed61094fSriastradh 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
667ed61094fSriastradh 	KASSERT(disablecount != UINT_MAX);
668075d309eSriastradh 
669075d309eSriastradh 	return (disablecount == 0);
670075d309eSriastradh }
671