xref: /netbsd-src/sys/external/bsd/common/linux/linux_tasklet.c (revision 72bd4db1088505ec04c701cf8396ba66c632316d)
1 /*	$NetBSD: linux_tasklet.c,v 1.6 2021/12/19 11:04:58 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.6 2021/12/19 11:04:58 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/lock.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43 
44 #include <lib/libkern/libkern.h>
45 
46 #include <machine/limits.h>
47 
48 #include <linux/tasklet.h>
49 
50 #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
51 #define	TASKLET_RUNNING		((unsigned)__BIT(1))
52 
53 struct tasklet_queue {
54 	struct percpu	*tq_percpu;	/* struct tasklet_cpu */
55 	void		*tq_sih;
56 };
57 
58 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59 
60 struct tasklet_cpu {
61 	struct tasklet_head	tc_head;
62 };
63 
64 static struct tasklet_queue	tasklet_queue __read_mostly;
65 static struct tasklet_queue	tasklet_hi_queue __read_mostly;
66 
67 static void	tasklet_softintr(void *);
68 static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
69 static void	tasklet_queue_fini(struct tasklet_queue *);
70 static void	tasklet_queue_schedule(struct tasklet_queue *,
71 		    struct tasklet_struct *);
72 static void	tasklet_queue_enqueue(struct tasklet_queue *,
73 		    struct tasklet_struct *);
74 
75 /*
76  * linux_tasklets_init()
77  *
78  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
79  *	error code on failure.
80  */
81 int
82 linux_tasklets_init(void)
83 {
84 	int error;
85 
86 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 	if (error)
88 		goto fail0;
89 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 	if (error)
91 		goto fail1;
92 
93 	/* Success!  */
94 	return 0;
95 
96 fail2: __unused
97 	tasklet_queue_fini(&tasklet_hi_queue);
98 fail1:	tasklet_queue_fini(&tasklet_queue);
99 fail0:	KASSERT(error);
100 	return error;
101 }
102 
103 /*
104  * linux_tasklets_fini()
105  *
106  *	Finalize the Linux tasklets subsystem.  All use of tasklets
107  *	must be done.
108  */
109 void
110 linux_tasklets_fini(void)
111 {
112 
113 	tasklet_queue_fini(&tasklet_hi_queue);
114 	tasklet_queue_fini(&tasklet_queue);
115 }
116 
117 /*
118  * tasklet_queue_init(tq, prio)
119  *
120  *	Initialize the tasklet queue tq for running tasklets at softint
121  *	priority prio (SOFTINT_*).
122  */
123 static int
124 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 {
126 	int error;
127 
128 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
129 	tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 	KASSERT(tq->tq_percpu != NULL);
131 
132 	/* Try to establish a softint.  softint_establish may fail.  */
133 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 	    tq);
135 	if (tq->tq_sih == NULL) {
136 		error = ENOMEM;
137 		goto fail1;
138 	}
139 
140 	/* Success!  */
141 	return 0;
142 
143 fail2: __unused
144 	softint_disestablish(tq->tq_sih);
145 	tq->tq_sih = NULL;
146 fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 	tq->tq_percpu = NULL;
148 fail0: __unused
149 	KASSERT(error);
150 	return error;
151 }
152 
153 /*
154  * tasklet_queue_fini(tq)
155  *
156  *	Finalize the tasklet queue tq: free all resources associated
157  *	with it.
158  */
159 static void
160 tasklet_queue_fini(struct tasklet_queue *tq)
161 {
162 
163 	softint_disestablish(tq->tq_sih);
164 	tq->tq_sih = NULL;
165 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 	tq->tq_percpu = NULL;
167 }
168 
169 /*
170  * tasklet_softintr(cookie)
171  *
172  *	Soft interrupt handler: Process queued tasklets on the tasklet
173  *	queue passed in as cookie.
174  */
175 static void
176 tasklet_softintr(void *cookie)
177 {
178 	struct tasklet_queue *const tq = cookie;
179 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 	struct tasklet_cpu *tc;
181 	int s;
182 
183 	/*
184 	 * With all interrupts deferred, transfer the current CPU's
185 	 * queue of tasklets to a local variable in one swell foop.
186 	 *
187 	 * No memory barriers: CPU-local state only.
188 	 */
189 	tc = percpu_getref(tq->tq_percpu);
190 	s = splhigh();
191 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 	splx(s);
193 	percpu_putref(tq->tq_percpu);
194 
195 	/* Go through the queue of tasklets we grabbed.  */
196 	while (!SIMPLEQ_EMPTY(&th)) {
197 		struct tasklet_struct *tasklet;
198 
199 		/* Remove the first tasklet from the queue.  */
200 		tasklet = SIMPLEQ_FIRST(&th);
201 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
202 
203 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
204 		    TASKLET_SCHEDULED);
205 
206 		/*
207 		 * Test and set RUNNING, in case it is already running
208 		 * on another CPU and got scheduled again on this one
209 		 * before it completed.
210 		 */
211 		if (!tasklet_trylock(tasklet)) {
212 			/*
213 			 * Put it back on the queue to run it again in
214 			 * a sort of busy-wait, and move on to the next
215 			 * one.
216 			 */
217 			tasklet_queue_enqueue(tq, tasklet);
218 			continue;
219 		}
220 
221 		/*
222 		 * Check whether it's currently disabled.
223 		 *
224 		 * Pairs with membar_exit in __tasklet_enable.
225 		 */
226 		if (atomic_load_acquire(&tasklet->tl_disablecount)) {
227 			/*
228 			 * Disabled: clear the RUNNING bit and, requeue
229 			 * it, but keep it SCHEDULED.
230 			 */
231 			tasklet_unlock(tasklet);
232 			tasklet_queue_enqueue(tq, tasklet);
233 			continue;
234 		}
235 
236 		/* Not disabled.  Clear SCHEDULED and call func.  */
237 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
238 		    TASKLET_SCHEDULED);
239 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
240 
241 		(*tasklet->func)(tasklet->data);
242 
243 		/* Clear RUNNING to notify tasklet_disable.  */
244 		tasklet_unlock(tasklet);
245 	}
246 }
247 
248 /*
249  * tasklet_queue_schedule(tq, tasklet)
250  *
251  *	Schedule tasklet to run on tq.  If it was already scheduled and
252  *	has not yet run, no effect.
253  */
254 static void
255 tasklet_queue_schedule(struct tasklet_queue *tq,
256     struct tasklet_struct *tasklet)
257 {
258 	unsigned ostate, nstate;
259 
260 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
261 	do {
262 		ostate = atomic_load_relaxed(&tasklet->tl_state);
263 		if (ostate & TASKLET_SCHEDULED)
264 			return;
265 		nstate = ostate | TASKLET_SCHEDULED;
266 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
267 	    != ostate);
268 
269 	/*
270 	 * Not already set and we have set it now.  Put it on the queue
271 	 * and kick off a softint.
272 	 */
273 	tasklet_queue_enqueue(tq, tasklet);
274 }
275 
276 /*
277  * tasklet_queue_enqueue(tq, tasklet)
278  *
279  *	Put tasklet on the queue tq and ensure it will run.  tasklet
280  *	must be marked SCHEDULED.
281  */
282 static void
283 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
284 {
285 	struct tasklet_cpu *tc;
286 	int s;
287 
288 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
289 
290 	/*
291 	 * Insert on the current CPU's queue while all interrupts are
292 	 * blocked, and schedule a soft interrupt to process it.  No
293 	 * memory barriers: CPU-local state only.
294 	 */
295 	tc = percpu_getref(tq->tq_percpu);
296 	s = splhigh();
297 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
298 	splx(s);
299 	softint_schedule(tq->tq_sih);
300 	percpu_putref(tq->tq_percpu);
301 }
302 
303 /*
304  * tasklet_init(tasklet, func, data)
305  *
306  *	Initialize tasklet to call func(data) when scheduled.
307  *
308  *	Caller is responsible for issuing the appropriate memory
309  *	barriers or store releases to publish the tasklet to other CPUs
310  *	before use.
311  */
312 void
313 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
314     unsigned long data)
315 {
316 
317 	atomic_store_relaxed(&tasklet->tl_state, 0);
318 	atomic_store_relaxed(&tasklet->tl_disablecount, 0);
319 	tasklet->func = func;
320 	tasklet->data = data;
321 }
322 
323 /*
324  * tasklet_schedule(tasklet)
325  *
326  *	Schedule tasklet to run at regular priority.  If it was already
327  *	scheduled and has not yet run, no effect.
328  */
329 void
330 tasklet_schedule(struct tasklet_struct *tasklet)
331 {
332 
333 	tasklet_queue_schedule(&tasklet_queue, tasklet);
334 }
335 
336 /*
337  * tasklet_hi_schedule(tasklet)
338  *
339  *	Schedule tasklet to run at high priority.  If it was already
340  *	scheduled and has not yet run, no effect.
341  */
342 void
343 tasklet_hi_schedule(struct tasklet_struct *tasklet)
344 {
345 
346 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
347 }
348 
349 /*
350  * tasklet_disable(tasklet)
351  *
352  *	Increment the disable count of tasklet, and if it was already
353  *	running, busy-wait for it to complete.
354  *
355  *	As long as the disable count is nonzero, the tasklet's function
356  *	will not run, but if already scheduled, the tasklet will remain
357  *	so and the softint will repeatedly trigger itself in a sort of
358  *	busy-wait, so this should be used only for short durations.
359  *
360  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
361  *	just invoked tasklet_kill, then tasklet_disable serves to wait
362  *	for it to complete in case it might already be running.
363  *
364  *	Load-acquire semantics.
365  */
366 void
367 tasklet_disable(struct tasklet_struct *tasklet)
368 {
369 	unsigned int disablecount __diagused;
370 
371 	/* Increment the disable count.  */
372 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
373 	KASSERT(disablecount < UINT_MAX);
374 	KASSERT(disablecount != 0);
375 
376 	/* Pairs with membar_exit in __tasklet_enable.  */
377 #ifndef __HAVE_ATOMIC_AS_MEMBAR
378 	membar_enter();
379 #endif
380 
381 	/* Wait for it to finish running, if it was running.  */
382 	tasklet_unlock_wait(tasklet);
383 }
384 
385 /*
386  * tasklet_enable(tasklet)
387  *
388  *	Decrement tasklet's disable count.  If it was previously
389  *	scheduled to run, it may now run.
390  *
391  *	Store-release semantics.
392  */
393 void
394 tasklet_enable(struct tasklet_struct *tasklet)
395 {
396 
397 	(void)__tasklet_enable(tasklet);
398 }
399 
400 /*
401  * tasklet_kill(tasklet)
402  *
403  *	Busy-wait for tasklet to run, if it is currently scheduled.
404  *	Caller must guarantee it does not get scheduled again for this
405  *	to be useful.
406  */
407 void
408 tasklet_kill(struct tasklet_struct *tasklet)
409 {
410 
411 	KASSERTMSG(!cpu_intr_p(),
412 	    "deadlock: soft interrupts are blocked in interrupt context");
413 
414 	/* Wait for it to be removed from the queue.  */
415 	while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
416 		SPINLOCK_BACKOFF_HOOK;
417 
418 	/*
419 	 * No need for a memory barrier here because writes to the
420 	 * single state word are globally ordered, and RUNNING is set
421 	 * before SCHEDULED is cleared, so as long as the caller
422 	 * guarantees no scheduling, the only possible transitions we
423 	 * can witness are:
424 	 *
425 	 *	0                 -> 0
426 	 *	SCHEDULED         -> 0
427 	 *	SCHEDULED         -> RUNNING
428 	 *	RUNNING           -> 0
429 	 *	RUNNING           -> RUNNING
430 	 *	SCHEDULED|RUNNING -> 0
431 	 *	SCHEDULED|RUNNING -> RUNNING
432 	 */
433 
434 	/* Wait for it to finish running.  */
435 	tasklet_unlock_wait(tasklet);
436 }
437 
438 /*
439  * tasklet_is_scheduled(tasklet)
440  *
441  *	True if tasklet is currently locked.  Caller must use it only
442  *	for positive assertions.
443  */
444 bool
445 tasklet_is_locked(const struct tasklet_struct *tasklet)
446 {
447 
448 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
449 }
450 
451 /*
452  * tasklet_trylock(tasklet)
453  *
454  *	Try to lock tasklet, i.e., set TASKLET_RUNNING.  Return true if
455  *	we locked it, false if already locked.
456  *
457  *	Load-acquire semantics.
458  */
459 bool
460 tasklet_trylock(struct tasklet_struct *tasklet)
461 {
462 	unsigned state;
463 
464 	do {
465 		state = atomic_load_relaxed(&tasklet->tl_state);
466 		if (state & TASKLET_RUNNING)
467 			return false;
468 	} while (atomic_cas_uint(&tasklet->tl_state, state,
469 		state | TASKLET_RUNNING) != state);
470 
471 	/* Pairs with membar_exit in tasklet_unlock.  */
472 #ifndef __HAVE_ATOMIC_AS_MEMBAR
473 	membar_enter();
474 #endif
475 
476 	return true;
477 }
478 
479 /*
480  * tasklet_unlock(tasklet)
481  *
482  *	Unlock tasklet, i.e., clear TASKLET_RUNNING.
483  *
484  *	Store-release semantics.
485  */
486 void
487 tasklet_unlock(struct tasklet_struct *tasklet)
488 {
489 
490 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
491 
492 	/*
493 	 * Pairs with membar_enter in tasklet_trylock and with
494 	 * atomic_load_acquire in tasklet_unlock_wait.
495 	 */
496 #ifndef __HAVE_ATOMIC_AS_MEMBAR
497 	membar_exit();
498 #endif
499 	atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
500 }
501 
502 /*
503  * tasklet_unlock_wait(tasklet)
504  *
505  *	Busy-wait until tasklet is not running.
506  *
507  *	Load-acquire semantics.
508  */
509 void
510 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
511 {
512 
513 	/* Pairs with membar_exit in tasklet_unlock.  */
514 	while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
515 		SPINLOCK_BACKOFF_HOOK;
516 }
517 
518 /*
519  * BEGIN I915 HACKS
520  *
521  * The i915 driver abuses the tasklet abstraction like a cop abuses his
522  * wife.
523  */
524 
525 /*
526  * __tasklet_disable_sync_once(tasklet)
527  *
528  *	Increment the disable count of tasklet, and if this is the
529  *	first time it was disabled and it was already running,
530  *	busy-wait for it to complete.
531  *
532  *	Caller must not care about whether the tasklet is running, or
533  *	about waiting for any side effects of the tasklet to complete,
534  *	if this was not the first time it was disabled.
535  */
536 void
537 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
538 {
539 	unsigned int disablecount;
540 
541 	/* Increment the disable count.  */
542 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
543 	KASSERT(disablecount < UINT_MAX);
544 	KASSERT(disablecount != 0);
545 
546 	/* Pairs with membar_exit in __tasklet_enable_sync_once.  */
547 #ifndef __HAVE_ATOMIC_AS_MEMBAR
548 	membar_enter();
549 #endif
550 
551 	/*
552 	 * If it was zero, wait for it to finish running.  If it was
553 	 * not zero, caller must not care whether it was running.
554 	 */
555 	if (disablecount == 1)
556 		tasklet_unlock_wait(tasklet);
557 }
558 
559 /*
560  * __tasklet_enable_sync_once(tasklet)
561  *
562  *	Decrement the disable count of tasklet, and if it goes to zero,
563  *	kill tasklet.
564  */
565 void
566 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
567 {
568 	unsigned int disablecount;
569 
570 	/* Pairs with membar_enter in __tasklet_disable_sync_once.  */
571 #ifndef __HAVE_ATOMIC_AS_MEMBAR
572 	membar_exit();
573 #endif
574 
575 	/* Decrement the disable count.  */
576 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
577 	KASSERT(disablecount < UINT_MAX);
578 
579 	/*
580 	 * If it became zero, kill the tasklet.  If it was not zero,
581 	 * caller must not care whether it was running.
582 	 */
583 	if (disablecount == 0)
584 		tasklet_kill(tasklet);
585 }
586 
587 /*
588  * __tasklet_is_enabled(tasklet)
589  *
590  *	True if tasklet is not currently disabled.  Answer may be stale
591  *	as soon as it is returned -- caller must use it only as a hint,
592  *	or must arrange synchronization externally.
593  */
594 bool
595 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
596 {
597 	unsigned int disablecount;
598 
599 	disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
600 
601 	return (disablecount == 0);
602 }
603 
604 /*
605  * __tasklet_is_scheduled(tasklet)
606  *
607  *	True if tasklet is currently scheduled.  Answer may be stale as
608  *	soon as it is returned -- caller must use it only as a hint, or
609  *	must arrange synchronization externally.
610  */
611 bool
612 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
613 {
614 
615 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
616 }
617 
618 /*
619  * __tasklet_enable(tasklet)
620  *
621  *	Decrement tasklet's disable count.  If it was previously
622  *	scheduled to run, it may now run.  Return true if the disable
623  *	count went down to zero; otherwise return false.
624  *
625  *	Store-release semantics.
626  */
627 bool
628 __tasklet_enable(struct tasklet_struct *tasklet)
629 {
630 	unsigned int disablecount;
631 
632 	/*
633 	 * Guarantee all caller-relevant reads or writes have completed
634 	 * before potentially allowing tasklet to run again by
635 	 * decrementing the disable count.
636 	 *
637 	 * Pairs with atomic_load_acquire in tasklet_softintr and with
638 	 * membar_enter in tasklet_disable.
639 	 */
640 #ifndef __HAVE_ATOMIC_AS_MEMBAR
641 	membar_exit();
642 #endif
643 
644 	/* Decrement the disable count.  */
645 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
646 	KASSERT(disablecount != UINT_MAX);
647 
648 	return (disablecount == 0);
649 }
650