1 /* $NetBSD: linux_tasklet.c,v 1.12 2023/02/24 11:02:05 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.12 2023/02/24 11:02:05 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37
38 #include <sys/atomic.h>
39 #include <sys/cpu.h>
40 #include <sys/errno.h>
41 #include <sys/intr.h>
42 #include <sys/kmem.h>
43 #include <sys/lock.h>
44 #include <sys/percpu.h>
45 #include <sys/queue.h>
46
47 #include <lib/libkern/libkern.h>
48
49 #include <machine/limits.h>
50
51 #include <linux/tasklet.h>
52
53 #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
54 #define TASKLET_RUNNING ((unsigned)__BIT(1))
55
56 struct tasklet_queue {
57 struct percpu *tq_percpu; /* struct tasklet_cpu * */
58 void *tq_sih;
59 };
60
61 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
62
63 struct tasklet_cpu {
64 struct tasklet_head tc_head;
65 };
66
67 static struct tasklet_queue tasklet_queue __read_mostly;
68 static struct tasklet_queue tasklet_hi_queue __read_mostly;
69
70 static void tasklet_softintr(void *);
71 static int tasklet_queue_init(struct tasklet_queue *, unsigned);
72 static void tasklet_queue_fini(struct tasklet_queue *);
73 static void tasklet_queue_schedule(struct tasklet_queue *,
74 struct tasklet_struct *);
75 static void tasklet_queue_enqueue(struct tasklet_queue *,
76 struct tasklet_struct *);
77
78 /*
79 * linux_tasklets_init()
80 *
81 * Initialize the Linux tasklets subsystem. Return 0 on success,
82 * error code on failure.
83 */
84 int
linux_tasklets_init(void)85 linux_tasklets_init(void)
86 {
87 int error;
88
89 error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
90 if (error)
91 goto fail0;
92 error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
93 if (error)
94 goto fail1;
95
96 /* Success! */
97 return 0;
98
99 fail2: __unused
100 tasklet_queue_fini(&tasklet_hi_queue);
101 fail1: tasklet_queue_fini(&tasklet_queue);
102 fail0: KASSERT(error);
103 return error;
104 }
105
106 /*
107 * linux_tasklets_fini()
108 *
109 * Finalize the Linux tasklets subsystem. All use of tasklets
110 * must be done.
111 */
112 void
linux_tasklets_fini(void)113 linux_tasklets_fini(void)
114 {
115
116 tasklet_queue_fini(&tasklet_hi_queue);
117 tasklet_queue_fini(&tasklet_queue);
118 }
119
120 static void
tasklet_cpu_init(void * ptr,void * cookie,struct cpu_info * ci)121 tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
122 {
123 struct tasklet_cpu **tcp = ptr, *tc;
124
125 *tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP);
126 SIMPLEQ_INIT(&tc->tc_head);
127 }
128
129 static void
tasklet_cpu_fini(void * ptr,void * cookie,struct cpu_info * ci)130 tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
131 {
132 struct tasklet_cpu **tcp = ptr, *tc = *tcp;
133
134 KASSERT(SIMPLEQ_EMPTY(&tc->tc_head));
135 kmem_free(tc, sizeof(*tc));
136 *tcp = NULL; /* paranoia */
137 }
138
139 /*
140 * tasklet_queue_init(tq, prio)
141 *
142 * Initialize the tasklet queue tq for running tasklets at softint
143 * priority prio (SOFTINT_*).
144 */
145 static int
tasklet_queue_init(struct tasklet_queue * tq,unsigned prio)146 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
147 {
148 int error;
149
150 /* Allocate per-CPU memory. percpu_alloc cannot fail. */
151 tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu),
152 tasklet_cpu_init, tasklet_cpu_fini, NULL);
153 KASSERT(tq->tq_percpu != NULL);
154
155 /* Try to establish a softint. softint_establish may fail. */
156 tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
157 tq);
158 if (tq->tq_sih == NULL) {
159 error = ENOMEM;
160 goto fail1;
161 }
162
163 /* Success! */
164 return 0;
165
166 fail2: __unused
167 softint_disestablish(tq->tq_sih);
168 tq->tq_sih = NULL;
169 fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
170 tq->tq_percpu = NULL;
171 fail0: __unused
172 KASSERT(error);
173 return error;
174 }
175
176 /*
177 * tasklet_queue_fini(tq)
178 *
179 * Finalize the tasklet queue tq: free all resources associated
180 * with it.
181 */
182 static void
tasklet_queue_fini(struct tasklet_queue * tq)183 tasklet_queue_fini(struct tasklet_queue *tq)
184 {
185
186 softint_disestablish(tq->tq_sih);
187 tq->tq_sih = NULL;
188 percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
189 tq->tq_percpu = NULL;
190 }
191
192 /*
193 * tasklet_softintr(cookie)
194 *
195 * Soft interrupt handler: Process queued tasklets on the tasklet
196 * queue passed in as cookie.
197 */
198 static void
tasklet_softintr(void * cookie)199 tasklet_softintr(void *cookie)
200 {
201 struct tasklet_queue *const tq = cookie;
202 struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
203 struct tasklet_cpu **tcp, *tc;
204 int s;
205
206 /*
207 * With all interrupts deferred, transfer the current CPU's
208 * queue of tasklets to a local variable in one swell foop.
209 *
210 * No memory barriers: CPU-local state only.
211 */
212 tcp = percpu_getref(tq->tq_percpu);
213 tc = *tcp;
214 s = splhigh();
215 SIMPLEQ_CONCAT(&th, &tc->tc_head);
216 splx(s);
217 percpu_putref(tq->tq_percpu);
218
219 /* Go through the queue of tasklets we grabbed. */
220 while (!SIMPLEQ_EMPTY(&th)) {
221 struct tasklet_struct *tasklet;
222
223 /* Remove the first tasklet from the queue. */
224 tasklet = SIMPLEQ_FIRST(&th);
225 SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
226
227 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
228 TASKLET_SCHEDULED);
229
230 /*
231 * Test and set RUNNING, in case it is already running
232 * on another CPU and got scheduled again on this one
233 * before it completed.
234 */
235 if (!tasklet_trylock(tasklet)) {
236 /*
237 * Put it back on the queue to run it again in
238 * a sort of busy-wait, and move on to the next
239 * one.
240 */
241 tasklet_queue_enqueue(tq, tasklet);
242 continue;
243 }
244
245 /*
246 * Check whether it's currently disabled.
247 *
248 * Pairs with membar_release in __tasklet_enable.
249 */
250 if (atomic_load_acquire(&tasklet->tl_disablecount)) {
251 /*
252 * Disabled: clear the RUNNING bit and, requeue
253 * it, but keep it SCHEDULED.
254 */
255 tasklet_unlock(tasklet);
256 tasklet_queue_enqueue(tq, tasklet);
257 continue;
258 }
259
260 /* Not disabled. Clear SCHEDULED and call func. */
261 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
262 TASKLET_SCHEDULED);
263 atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
264
265 (*tasklet->func)(tasklet->data);
266
267 /* Clear RUNNING to notify tasklet_disable. */
268 tasklet_unlock(tasklet);
269 }
270 }
271
272 /*
273 * tasklet_queue_schedule(tq, tasklet)
274 *
275 * Schedule tasklet to run on tq. If it was already scheduled and
276 * has not yet run, no effect.
277 */
278 static void
tasklet_queue_schedule(struct tasklet_queue * tq,struct tasklet_struct * tasklet)279 tasklet_queue_schedule(struct tasklet_queue *tq,
280 struct tasklet_struct *tasklet)
281 {
282 unsigned ostate, nstate;
283
284 /* Test and set the SCHEDULED bit. If already set, we're done. */
285 do {
286 ostate = atomic_load_relaxed(&tasklet->tl_state);
287 if (ostate & TASKLET_SCHEDULED)
288 return;
289 nstate = ostate | TASKLET_SCHEDULED;
290 } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
291 != ostate);
292
293 /*
294 * Not already set and we have set it now. Put it on the queue
295 * and kick off a softint.
296 */
297 tasklet_queue_enqueue(tq, tasklet);
298 }
299
300 /*
301 * tasklet_queue_enqueue(tq, tasklet)
302 *
303 * Put tasklet on the queue tq and ensure it will run. tasklet
304 * must be marked SCHEDULED.
305 */
306 static void
tasklet_queue_enqueue(struct tasklet_queue * tq,struct tasklet_struct * tasklet)307 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
308 {
309 struct tasklet_cpu **tcp, *tc;
310 int s;
311
312 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
313
314 /*
315 * Insert on the current CPU's queue while all interrupts are
316 * blocked, and schedule a soft interrupt to process it. No
317 * memory barriers: CPU-local state only.
318 */
319 tcp = percpu_getref(tq->tq_percpu);
320 tc = *tcp;
321 s = splhigh();
322 SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
323 splx(s);
324 softint_schedule(tq->tq_sih);
325 percpu_putref(tq->tq_percpu);
326 }
327
328 /*
329 * tasklet_init(tasklet, func, data)
330 *
331 * Initialize tasklet to call func(data) when scheduled.
332 *
333 * Caller is responsible for issuing the appropriate memory
334 * barriers or store releases to publish the tasklet to other CPUs
335 * before use.
336 */
337 void
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)338 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
339 unsigned long data)
340 {
341
342 atomic_store_relaxed(&tasklet->tl_state, 0);
343 atomic_store_relaxed(&tasklet->tl_disablecount, 0);
344 tasklet->func = func;
345 tasklet->data = data;
346 }
347
348 /*
349 * tasklet_schedule(tasklet)
350 *
351 * Schedule tasklet to run at regular priority. If it was already
352 * scheduled and has not yet run, no effect.
353 */
354 void
tasklet_schedule(struct tasklet_struct * tasklet)355 tasklet_schedule(struct tasklet_struct *tasklet)
356 {
357
358 tasklet_queue_schedule(&tasklet_queue, tasklet);
359 }
360
361 /*
362 * tasklet_hi_schedule(tasklet)
363 *
364 * Schedule tasklet to run at high priority. If it was already
365 * scheduled and has not yet run, no effect.
366 */
367 void
tasklet_hi_schedule(struct tasklet_struct * tasklet)368 tasklet_hi_schedule(struct tasklet_struct *tasklet)
369 {
370
371 tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
372 }
373
374 /*
375 * tasklet_disable_nosync(tasklet)
376 *
377 * Increment the disable count of tasklet, but don't wait for it
378 * to complete -- it may remain running after this returns.
379 *
380 * As long as the disable count is nonzero, the tasklet's function
381 * will not run, but if already scheduled, the tasklet will remain
382 * so and the softint will repeatedly trigger itself in a sort of
383 * busy-wait, so this should be used only for short durations.
384 *
385 * Load-acquire semantics.
386 */
387 void
tasklet_disable_nosync(struct tasklet_struct * tasklet)388 tasklet_disable_nosync(struct tasklet_struct *tasklet)
389 {
390 unsigned int disablecount __diagused;
391
392 /* Increment the disable count. */
393 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
394 KASSERT(disablecount < UINT_MAX);
395 KASSERT(disablecount != 0);
396
397 /* Pairs with membar_release in __tasklet_enable. */
398 membar_acquire();
399 }
400
401 /*
402 * tasklet_disable(tasklet)
403 *
404 * Increment the disable count of tasklet, and if it was already
405 * running, busy-wait for it to complete.
406 *
407 * As long as the disable count is nonzero, the tasklet's function
408 * will not run, but if already scheduled, the tasklet will remain
409 * so and the softint will repeatedly trigger itself in a sort of
410 * busy-wait, so this should be used only for short durations.
411 *
412 * If tasklet is guaranteed not to be scheduled, e.g. if you have
413 * just invoked tasklet_kill, then tasklet_disable serves to wait
414 * for it to complete in case it might already be running.
415 *
416 * Load-acquire semantics.
417 */
418 void
tasklet_disable(struct tasklet_struct * tasklet)419 tasklet_disable(struct tasklet_struct *tasklet)
420 {
421
422 /* Increment the disable count. */
423 tasklet_disable_nosync(tasklet);
424
425 /* Wait for it to finish running, if it was running. */
426 tasklet_unlock_wait(tasklet);
427 }
428
429 /*
430 * tasklet_enable(tasklet)
431 *
432 * Decrement tasklet's disable count. If it was previously
433 * scheduled to run, it may now run.
434 *
435 * Store-release semantics.
436 */
437 void
tasklet_enable(struct tasklet_struct * tasklet)438 tasklet_enable(struct tasklet_struct *tasklet)
439 {
440
441 (void)__tasklet_enable(tasklet);
442 }
443
444 /*
445 * tasklet_kill(tasklet)
446 *
447 * Busy-wait for tasklet to run, if it is currently scheduled.
448 * Caller must guarantee it does not get scheduled again for this
449 * to be useful.
450 */
451 void
tasklet_kill(struct tasklet_struct * tasklet)452 tasklet_kill(struct tasklet_struct *tasklet)
453 {
454
455 KASSERTMSG(!cpu_intr_p(),
456 "deadlock: soft interrupts are blocked in interrupt context");
457
458 /* Wait for it to be removed from the queue. */
459 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
460 SPINLOCK_BACKOFF_HOOK;
461
462 /*
463 * No need for a memory barrier here because writes to the
464 * single state word are globally ordered, and RUNNING is set
465 * before SCHEDULED is cleared, so as long as the caller
466 * guarantees no scheduling, the only possible transitions we
467 * can witness are:
468 *
469 * 0 -> 0
470 * SCHEDULED -> 0
471 * SCHEDULED -> RUNNING
472 * RUNNING -> 0
473 * RUNNING -> RUNNING
474 * SCHEDULED|RUNNING -> 0
475 * SCHEDULED|RUNNING -> RUNNING
476 */
477
478 /* Wait for it to finish running. */
479 tasklet_unlock_wait(tasklet);
480 }
481
482 /*
483 * tasklet_is_locked(tasklet)
484 *
485 * True if tasklet is currently locked. Caller must use it only
486 * for positive assertions.
487 */
488 bool
tasklet_is_locked(const struct tasklet_struct * tasklet)489 tasklet_is_locked(const struct tasklet_struct *tasklet)
490 {
491
492 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
493 }
494
495 /*
496 * tasklet_trylock(tasklet)
497 *
498 * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
499 * we locked it, false if already locked.
500 *
501 * Load-acquire semantics.
502 */
503 bool
tasklet_trylock(struct tasklet_struct * tasklet)504 tasklet_trylock(struct tasklet_struct *tasklet)
505 {
506 unsigned state;
507
508 do {
509 state = atomic_load_relaxed(&tasklet->tl_state);
510 if (state & TASKLET_RUNNING)
511 return false;
512 } while (atomic_cas_uint(&tasklet->tl_state, state,
513 state | TASKLET_RUNNING) != state);
514
515 /* Pairs with membar_release in tasklet_unlock. */
516 membar_acquire();
517
518 return true;
519 }
520
521 /*
522 * tasklet_unlock(tasklet)
523 *
524 * Unlock tasklet, i.e., clear TASKLET_RUNNING.
525 *
526 * Store-release semantics.
527 */
528 void
tasklet_unlock(struct tasklet_struct * tasklet)529 tasklet_unlock(struct tasklet_struct *tasklet)
530 {
531
532 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
533
534 /*
535 * Pairs with membar_acquire in tasklet_trylock and with
536 * atomic_load_acquire in tasklet_unlock_wait.
537 */
538 membar_release();
539 atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
540 }
541
542 /*
543 * tasklet_unlock_wait(tasklet)
544 *
545 * Busy-wait until tasklet is not running.
546 *
547 * Load-acquire semantics.
548 */
549 void
tasklet_unlock_wait(const struct tasklet_struct * tasklet)550 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
551 {
552
553 /* Pairs with membar_release in tasklet_unlock. */
554 while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
555 SPINLOCK_BACKOFF_HOOK;
556 }
557
558 /*
559 * BEGIN I915 HACKS
560 *
561 * The i915 driver abuses the tasklet abstraction like a cop abuses his
562 * wife.
563 */
564
565 /*
566 * __tasklet_disable_sync_once(tasklet)
567 *
568 * Increment the disable count of tasklet, and if this is the
569 * first time it was disabled and it was already running,
570 * busy-wait for it to complete.
571 *
572 * Caller must not care about whether the tasklet is running, or
573 * about waiting for any side effects of the tasklet to complete,
574 * if this was not the first time it was disabled.
575 */
576 void
__tasklet_disable_sync_once(struct tasklet_struct * tasklet)577 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
578 {
579 unsigned int disablecount;
580
581 /* Increment the disable count. */
582 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
583 KASSERT(disablecount < UINT_MAX);
584 KASSERT(disablecount != 0);
585
586 /* Pairs with membar_release in __tasklet_enable_sync_once. */
587 membar_acquire();
588
589 /*
590 * If it was zero, wait for it to finish running. If it was
591 * not zero, caller must not care whether it was running.
592 */
593 if (disablecount == 1)
594 tasklet_unlock_wait(tasklet);
595 }
596
597 /*
598 * __tasklet_enable_sync_once(tasklet)
599 *
600 * Decrement the disable count of tasklet, and if it goes to zero,
601 * kill tasklet.
602 */
603 void
__tasklet_enable_sync_once(struct tasklet_struct * tasklet)604 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
605 {
606 unsigned int disablecount;
607
608 /* Pairs with membar_acquire in __tasklet_disable_sync_once. */
609 membar_release();
610
611 /* Decrement the disable count. */
612 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
613 KASSERT(disablecount < UINT_MAX);
614
615 /*
616 * If it became zero, kill the tasklet. If it was not zero,
617 * caller must not care whether it was running.
618 */
619 if (disablecount == 0)
620 tasklet_kill(tasklet);
621 }
622
623 /*
624 * __tasklet_is_enabled(tasklet)
625 *
626 * True if tasklet is not currently disabled. Answer may be stale
627 * as soon as it is returned -- caller must use it only as a hint,
628 * or must arrange synchronization externally.
629 */
630 bool
__tasklet_is_enabled(const struct tasklet_struct * tasklet)631 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
632 {
633 unsigned int disablecount;
634
635 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
636
637 return (disablecount == 0);
638 }
639
640 /*
641 * __tasklet_is_scheduled(tasklet)
642 *
643 * True if tasklet is currently scheduled. Answer may be stale as
644 * soon as it is returned -- caller must use it only as a hint, or
645 * must arrange synchronization externally.
646 */
647 bool
__tasklet_is_scheduled(const struct tasklet_struct * tasklet)648 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
649 {
650
651 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
652 }
653
654 /*
655 * __tasklet_enable(tasklet)
656 *
657 * Decrement tasklet's disable count. If it was previously
658 * scheduled to run, it may now run. Return true if the disable
659 * count went down to zero; otherwise return false.
660 *
661 * Store-release semantics.
662 */
663 bool
__tasklet_enable(struct tasklet_struct * tasklet)664 __tasklet_enable(struct tasklet_struct *tasklet)
665 {
666 unsigned int disablecount;
667
668 /*
669 * Guarantee all caller-relevant reads or writes have completed
670 * before potentially allowing tasklet to run again by
671 * decrementing the disable count.
672 *
673 * Pairs with atomic_load_acquire in tasklet_softintr and with
674 * membar_acquire in tasklet_disable.
675 */
676 membar_release();
677
678 /* Decrement the disable count. */
679 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
680 KASSERT(disablecount != UINT_MAX);
681
682 return (disablecount == 0);
683 }
684