xref: /netbsd-src/sys/external/bsd/common/linux/linux_tasklet.c (revision 075d309ed2c8947a13ea7100f0e49e1f5a86c19b)
1 /*	$NetBSD: linux_tasklet.c,v 1.3 2021/12/19 01:17:46 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.3 2021/12/19 01:17:46 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/lock.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43 
44 #include <lib/libkern/libkern.h>
45 
46 #include <machine/limits.h>
47 
48 #include <linux/tasklet.h>
49 
50 #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
51 #define	TASKLET_RUNNING		((unsigned)__BIT(1))
52 
53 struct tasklet_queue {
54 	struct percpu	*tq_percpu;	/* struct tasklet_cpu */
55 	void		*tq_sih;
56 };
57 
58 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59 
60 struct tasklet_cpu {
61 	struct tasklet_head	tc_head;
62 };
63 
64 static struct tasklet_queue	tasklet_queue __read_mostly;
65 static struct tasklet_queue	tasklet_hi_queue __read_mostly;
66 
67 static void	tasklet_softintr(void *);
68 static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
69 static void	tasklet_queue_fini(struct tasklet_queue *);
70 static void	tasklet_queue_schedule(struct tasklet_queue *,
71 		    struct tasklet_struct *);
72 static void	tasklet_queue_enqueue(struct tasklet_queue *,
73 		    struct tasklet_struct *);
74 
75 /*
76  * linux_tasklets_init()
77  *
78  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
79  *	error code on failure.
80  */
81 int
82 linux_tasklets_init(void)
83 {
84 	int error;
85 
86 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 	if (error)
88 		goto fail0;
89 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 	if (error)
91 		goto fail1;
92 
93 	/* Success!  */
94 	return 0;
95 
96 fail2: __unused
97 	tasklet_queue_fini(&tasklet_hi_queue);
98 fail1:	tasklet_queue_fini(&tasklet_queue);
99 fail0:	KASSERT(error);
100 	return error;
101 }
102 
103 /*
104  * linux_tasklets_fini()
105  *
106  *	Finalize the Linux tasklets subsystem.  All use of tasklets
107  *	must be done.
108  */
109 void
110 linux_tasklets_fini(void)
111 {
112 
113 	tasklet_queue_fini(&tasklet_hi_queue);
114 	tasklet_queue_fini(&tasklet_queue);
115 }
116 
117 /*
118  * tasklet_queue_init(tq, prio)
119  *
120  *	Initialize the tasklet queue tq for running tasklets at softint
121  *	priority prio (SOFTINT_*).
122  */
123 static int
124 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 {
126 	int error;
127 
128 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
129 	tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 	KASSERT(tq->tq_percpu != NULL);
131 
132 	/* Try to establish a softint.  softint_establish may fail.  */
133 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 	    tq);
135 	if (tq->tq_sih == NULL) {
136 		error = ENOMEM;
137 		goto fail1;
138 	}
139 
140 	/* Success!  */
141 	return 0;
142 
143 fail2: __unused
144 	softint_disestablish(tq->tq_sih);
145 	tq->tq_sih = NULL;
146 fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 	tq->tq_percpu = NULL;
148 fail0: __unused
149 	KASSERT(error);
150 	return error;
151 }
152 
153 /*
154  * tasklet_queue_fini(tq)
155  *
156  *	Finalize the tasklet queue tq: free all resources associated
157  *	with it.
158  */
159 static void
160 tasklet_queue_fini(struct tasklet_queue *tq)
161 {
162 
163 	softint_disestablish(tq->tq_sih);
164 	tq->tq_sih = NULL;
165 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 	tq->tq_percpu = NULL;
167 }
168 
169 /*
170  * tasklet_softintr(cookie)
171  *
172  *	Soft interrupt handler: Process queued tasklets on the tasklet
173  *	queue passed in as cookie.
174  */
175 static void
176 tasklet_softintr(void *cookie)
177 {
178 	struct tasklet_queue *const tq = cookie;
179 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 	struct tasklet_cpu *tc;
181 	int s;
182 
183 	/*
184 	 * With all interrupts deferred, transfer the current CPU's
185 	 * queue of tasklets to a local variable in one swell foop.
186 	 *
187 	 * No memory barriers: CPU-local state only.
188 	 */
189 	tc = percpu_getref(tq->tq_percpu);
190 	s = splhigh();
191 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 	splx(s);
193 	percpu_putref(tq->tq_percpu);
194 
195 	/* Go through the queue of tasklets we grabbed.  */
196 	while (!SIMPLEQ_EMPTY(&th)) {
197 		struct tasklet_struct *tasklet;
198 		unsigned state;
199 
200 		/* Remove the first tasklet from the queue.  */
201 		tasklet = SIMPLEQ_FIRST(&th);
202 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
203 
204 		/*
205 		 * Test and set RUNNING, in case it is already running
206 		 * on another CPU and got scheduled again on this one
207 		 * before it completed.
208 		 */
209 		do {
210 			state = tasklet->tl_state;
211 			__insn_barrier();
212 			/* It had better be scheduled.  */
213 			KASSERT(state & TASKLET_SCHEDULED);
214 			if (state & TASKLET_RUNNING)
215 				break;
216 		} while (atomic_cas_uint(&tasklet->tl_state, state,
217 			state | TASKLET_RUNNING) != state);
218 
219 		if (state & TASKLET_RUNNING) {
220 			/*
221 			 * Put it back on the queue to run it again in
222 			 * a sort of busy-wait, and move on to the next
223 			 * one.
224 			 */
225 			tasklet_queue_enqueue(tq, tasklet);
226 			continue;
227 		}
228 
229 		/* Wait for last runner's side effects.  */
230 		membar_enter();
231 
232 		/* Check whether it's currently disabled.  */
233 		if (tasklet->tl_disablecount) {
234 			/*
235 			 * Disabled: clear the RUNNING bit and, requeue
236 			 * it, but keep it SCHEDULED.
237 			 */
238 			KASSERT(tasklet->tl_state & TASKLET_RUNNING);
239 			atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
240 			tasklet_queue_enqueue(tq, tasklet);
241 			continue;
242 		}
243 
244 		/* Not disabled.  Clear SCHEDULED and call func.  */
245 		KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
246 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
247 
248 		(*tasklet->func)(tasklet->data);
249 
250 		/*
251 		 * Guarantee all caller-relevant reads or writes in
252 		 * func have completed before clearing RUNNING bit.
253 		 */
254 		membar_exit();
255 
256 		/* Clear RUNNING to notify tasklet_disable.  */
257 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
258 	}
259 }
260 
261 /*
262  * tasklet_queue_schedule(tq, tasklet)
263  *
264  *	Schedule tasklet to run on tq.  If it was already scheduled and
265  *	has not yet run, no effect.
266  */
267 static void
268 tasklet_queue_schedule(struct tasklet_queue *tq,
269     struct tasklet_struct *tasklet)
270 {
271 	unsigned ostate, nstate;
272 
273 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
274 	do {
275 		ostate = tasklet->tl_state;
276 		if (ostate & TASKLET_SCHEDULED)
277 			return;
278 		nstate = ostate | TASKLET_SCHEDULED;
279 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
280 	    != ostate);
281 
282 	/*
283 	 * Not already set and we have set it now.  Put it on the queue
284 	 * and kick off a softint.
285 	 */
286 	tasklet_queue_enqueue(tq, tasklet);
287 }
288 
289 /*
290  * tasklet_queue_enqueue(tq, tasklet)
291  *
292  *	Put tasklet on the queue tq and ensure it will run.  tasklet
293  *	must be marked SCHEDULED.
294  */
295 static void
296 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
297 {
298 	struct tasklet_cpu *tc;
299 	int s;
300 
301 	KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
302 
303 	/*
304 	 * Insert on the current CPU's queue while all interrupts are
305 	 * blocked, and schedule a soft interrupt to process it.  No
306 	 * memory barriers: CPU-local state only.
307 	 */
308 	tc = percpu_getref(tq->tq_percpu);
309 	s = splhigh();
310 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
311 	splx(s);
312 	softint_schedule(tq->tq_sih);
313 	percpu_putref(tq->tq_percpu);
314 }
315 
316 /*
317  * tasklet_init(tasklet, func, data)
318  *
319  *	Initialize tasklet to call func(data) when scheduled.
320  *
321  *	Caller is responsible for issuing the appropriate memory
322  *	barriers or store releases to publish the tasklet to other CPUs
323  *	before use.
324  */
325 void
326 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
327     unsigned long data)
328 {
329 
330 	tasklet->tl_state = 0;
331 	tasklet->tl_disablecount = 0;
332 	tasklet->func = func;
333 	tasklet->data = data;
334 }
335 
336 /*
337  * tasklet_schedule(tasklet)
338  *
339  *	Schedule tasklet to run at regular priority.  If it was already
340  *	scheduled and has not yet run, no effect.
341  */
342 void
343 tasklet_schedule(struct tasklet_struct *tasklet)
344 {
345 
346 	tasklet_queue_schedule(&tasklet_queue, tasklet);
347 }
348 
349 /*
350  * tasklet_hi_schedule(tasklet)
351  *
352  *	Schedule tasklet to run at high priority.  If it was already
353  *	scheduled and has not yet run, no effect.
354  */
355 void
356 tasklet_hi_schedule(struct tasklet_struct *tasklet)
357 {
358 
359 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
360 }
361 
362 /*
363  * tasklet_disable(tasklet)
364  *
365  *	Increment the disable count of tasklet, and if it was already
366  *	running, busy-wait for it to complete.
367  *
368  *	As long as the disable count is nonzero, the tasklet's function
369  *	will not run, but if already scheduled, the tasklet will remain
370  *	so and the softint will repeatedly trigger itself in a sort of
371  *	busy-wait, so this should be used only for short durations.
372  *
373  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
374  *	just invoked tasklet_kill, then tasklet_disable serves to wait
375  *	for it to complete in case it might already be running.
376  */
377 void
378 tasklet_disable(struct tasklet_struct *tasklet)
379 {
380 	unsigned int disablecount __diagused;
381 
382 	/* Increment the disable count.  */
383 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
384 	KASSERT(disablecount < UINT_MAX);
385 	KASSERT(disablecount != 0);
386 
387 	/* Wait for it to finish running, if it was running.  */
388 	while (tasklet->tl_state & TASKLET_RUNNING)
389 		SPINLOCK_BACKOFF_HOOK;
390 
391 	/*
392 	 * Guarantee any side effects of running are visible to us
393 	 * before we return.
394 	 *
395 	 * XXX membar_sync is overkill here.  It is tempting to issue
396 	 * membar_enter, but it only orders stores | loads, stores;
397 	 * what we really want here is load_acquire(&tasklet->tl_state)
398 	 * above, i.e. to witness all side effects preceding the store
399 	 * whose value we loaded.  Absent that, membar_sync is the best
400 	 * we can do.
401 	 */
402 	membar_sync();
403 }
404 
405 /*
406  * tasklet_enable(tasklet)
407  *
408  *	Decrement tasklet's disable count.  If it was previously
409  *	scheduled to run, it may now run.
410  */
411 void
412 tasklet_enable(struct tasklet_struct *tasklet)
413 {
414 	unsigned int disablecount __diagused;
415 
416 	/*
417 	 * Guarantee all caller-relevant reads or writes have completed
418 	 * before potentially allowing tasklet to run again by
419 	 * decrementing the disable count.
420 	 */
421 	membar_exit();
422 
423 	/* Decrement the disable count.  */
424 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
425 	KASSERT(disablecount != UINT_MAX);
426 }
427 
428 /*
429  * tasklet_kill(tasklet)
430  *
431  *	Busy-wait for tasklet to run, if it is currently scheduled.
432  *	Caller must guarantee it does not get scheduled again for this
433  *	to be useful.
434  */
435 void
436 tasklet_kill(struct tasklet_struct *tasklet)
437 {
438 
439 	KASSERTMSG(!cpu_intr_p(),
440 	    "deadlock: soft interrupts are blocked in interrupt context");
441 
442 	/* Wait for it to be removed from the queue.  */
443 	while (tasklet->tl_state & TASKLET_SCHEDULED)
444 		SPINLOCK_BACKOFF_HOOK;
445 
446 	/*
447 	 * No need for a memory barrier here because writes to the
448 	 * single state word are globally ordered, and RUNNING is set
449 	 * before SCHEDULED is cleared, so as long as the caller
450 	 * guarantees no scheduling, the only possible transitions we
451 	 * can witness are:
452 	 *
453 	 *	0                 -> 0
454 	 *	SCHEDULED         -> 0
455 	 *	SCHEDULED         -> RUNNING
456 	 *	RUNNING           -> 0
457 	 *	RUNNING           -> RUNNING
458 	 *	SCHEDULED|RUNNING -> 0
459 	 *	SCHEDULED|RUNNING -> RUNNING
460 	 */
461 
462 	/* Wait for it to finish running.  */
463 	while (tasklet->tl_state & TASKLET_RUNNING)
464 		SPINLOCK_BACKOFF_HOOK;
465 
466 	/*
467 	 * Wait for any side effects running.  Again, membar_sync is
468 	 * overkill; we really want load_acquire(&tasklet->tl_state)
469 	 * here.
470 	 */
471 	membar_sync();
472 }
473 
474 /*
475  * tasklet_disable_sync_once(tasklet)
476  *
477  *	Increment the disable count of tasklet, and if this is the
478  *	first time it was disabled and it was already running,
479  *	busy-wait for it to complete.
480  *
481  *	Caller must not care about whether the tasklet is running, or
482  *	about waiting for any side effects of the tasklet to complete,
483  *	if this was not the first time it was disabled.
484  */
485 void
486 tasklet_disable_sync_once(struct tasklet_struct *tasklet)
487 {
488 	unsigned int disablecount;
489 
490 	/* Increment the disable count.  */
491 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
492 	KASSERT(disablecount < UINT_MAX);
493 	KASSERT(disablecount != 0);
494 
495 	/*
496 	 * If it was zero, wait for it to finish running.  If it was
497 	 * not zero, caller must not care whether it was running.
498 	 */
499 	if (disablecount == 1) {
500 		while (tasklet->tl_state & TASKLET_RUNNING)
501 			SPINLOCK_BACKOFF_HOOK;
502 		membar_sync();
503 	}
504 }
505 
506 /*
507  * tasklet_enable_sync_once(tasklet)
508  *
509  *	Decrement the disable count of tasklet, and if it goes to zero,
510  *	kill tasklet.
511  */
512 void
513 tasklet_enable_sync_once(struct tasklet_struct *tasklet)
514 {
515 	unsigned int disablecount;
516 
517 	/* Decrement the disable count.  */
518 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
519 	KASSERT(disablecount < UINT_MAX);
520 
521 	/*
522 	 * If it became zero, kill the tasklet.  If it was not zero,
523 	 * caller must not care whether it was running.
524 	 */
525 	if (disablecount == 0)
526 		tasklet_kill(tasklet);
527 }
528 
529 /*
530  * tasklet_is_enabled(tasklet)
531  *
532  *	True if tasklet is not currently disabled.  Answer may be stale
533  *	as soon as it is returned -- caller must use it only as a hint,
534  *	or must arrange synchronization externally.
535  */
536 bool
537 tasklet_is_enabled(const struct tasklet_struct *tasklet)
538 {
539 	unsigned int disablecount;
540 
541 	disablecount = tasklet->tl_disablecount;
542 	__insn_barrier();
543 
544 	return (disablecount == 0);
545 }
546