xref: /netbsd-src/sys/external/bsd/common/linux/linux_tasklet.c (revision c25862e8dfdd058d9c0abff07fcf21c3faeb71d9)
1 /*	$NetBSD: linux_tasklet.c,v 1.4 2021/12/19 01:46:01 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.4 2021/12/19 01:46:01 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/lock.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43 
44 #include <lib/libkern/libkern.h>
45 
46 #include <machine/limits.h>
47 
48 #include <linux/tasklet.h>
49 
50 #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
51 #define	TASKLET_RUNNING		((unsigned)__BIT(1))
52 
53 struct tasklet_queue {
54 	struct percpu	*tq_percpu;	/* struct tasklet_cpu */
55 	void		*tq_sih;
56 };
57 
58 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59 
60 struct tasklet_cpu {
61 	struct tasklet_head	tc_head;
62 };
63 
64 static struct tasklet_queue	tasklet_queue __read_mostly;
65 static struct tasklet_queue	tasklet_hi_queue __read_mostly;
66 
67 static void	tasklet_softintr(void *);
68 static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
69 static void	tasklet_queue_fini(struct tasklet_queue *);
70 static void	tasklet_queue_schedule(struct tasklet_queue *,
71 		    struct tasklet_struct *);
72 static void	tasklet_queue_enqueue(struct tasklet_queue *,
73 		    struct tasklet_struct *);
74 
75 /*
76  * linux_tasklets_init()
77  *
78  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
79  *	error code on failure.
80  */
81 int
82 linux_tasklets_init(void)
83 {
84 	int error;
85 
86 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 	if (error)
88 		goto fail0;
89 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 	if (error)
91 		goto fail1;
92 
93 	/* Success!  */
94 	return 0;
95 
96 fail2: __unused
97 	tasklet_queue_fini(&tasklet_hi_queue);
98 fail1:	tasklet_queue_fini(&tasklet_queue);
99 fail0:	KASSERT(error);
100 	return error;
101 }
102 
103 /*
104  * linux_tasklets_fini()
105  *
106  *	Finalize the Linux tasklets subsystem.  All use of tasklets
107  *	must be done.
108  */
109 void
110 linux_tasklets_fini(void)
111 {
112 
113 	tasklet_queue_fini(&tasklet_hi_queue);
114 	tasklet_queue_fini(&tasklet_queue);
115 }
116 
117 /*
118  * tasklet_queue_init(tq, prio)
119  *
120  *	Initialize the tasklet queue tq for running tasklets at softint
121  *	priority prio (SOFTINT_*).
122  */
123 static int
124 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 {
126 	int error;
127 
128 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
129 	tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 	KASSERT(tq->tq_percpu != NULL);
131 
132 	/* Try to establish a softint.  softint_establish may fail.  */
133 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 	    tq);
135 	if (tq->tq_sih == NULL) {
136 		error = ENOMEM;
137 		goto fail1;
138 	}
139 
140 	/* Success!  */
141 	return 0;
142 
143 fail2: __unused
144 	softint_disestablish(tq->tq_sih);
145 	tq->tq_sih = NULL;
146 fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 	tq->tq_percpu = NULL;
148 fail0: __unused
149 	KASSERT(error);
150 	return error;
151 }
152 
153 /*
154  * tasklet_queue_fini(tq)
155  *
156  *	Finalize the tasklet queue tq: free all resources associated
157  *	with it.
158  */
159 static void
160 tasklet_queue_fini(struct tasklet_queue *tq)
161 {
162 
163 	softint_disestablish(tq->tq_sih);
164 	tq->tq_sih = NULL;
165 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 	tq->tq_percpu = NULL;
167 }
168 
169 /*
170  * tasklet_softintr(cookie)
171  *
172  *	Soft interrupt handler: Process queued tasklets on the tasklet
173  *	queue passed in as cookie.
174  */
175 static void
176 tasklet_softintr(void *cookie)
177 {
178 	struct tasklet_queue *const tq = cookie;
179 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 	struct tasklet_cpu *tc;
181 	int s;
182 
183 	/*
184 	 * With all interrupts deferred, transfer the current CPU's
185 	 * queue of tasklets to a local variable in one swell foop.
186 	 *
187 	 * No memory barriers: CPU-local state only.
188 	 */
189 	tc = percpu_getref(tq->tq_percpu);
190 	s = splhigh();
191 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 	splx(s);
193 	percpu_putref(tq->tq_percpu);
194 
195 	/* Go through the queue of tasklets we grabbed.  */
196 	while (!SIMPLEQ_EMPTY(&th)) {
197 		struct tasklet_struct *tasklet;
198 		unsigned state;
199 
200 		/* Remove the first tasklet from the queue.  */
201 		tasklet = SIMPLEQ_FIRST(&th);
202 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
203 
204 		/*
205 		 * Test and set RUNNING, in case it is already running
206 		 * on another CPU and got scheduled again on this one
207 		 * before it completed.
208 		 */
209 		do {
210 			state = tasklet->tl_state;
211 			/* It had better be scheduled.  */
212 			KASSERT(state & TASKLET_SCHEDULED);
213 			if (state & TASKLET_RUNNING)
214 				break;
215 		} while (atomic_cas_uint(&tasklet->tl_state, state,
216 			state | TASKLET_RUNNING) != state);
217 
218 		if (state & TASKLET_RUNNING) {
219 			/*
220 			 * Put it back on the queue to run it again in
221 			 * a sort of busy-wait, and move on to the next
222 			 * one.
223 			 */
224 			tasklet_queue_enqueue(tq, tasklet);
225 			continue;
226 		}
227 
228 		/* Wait for last runner's side effects.  */
229 		membar_enter();
230 
231 		/* Check whether it's currently disabled.  */
232 		if (tasklet->tl_disablecount) {
233 			/*
234 			 * Disabled: clear the RUNNING bit and, requeue
235 			 * it, but keep it SCHEDULED.
236 			 */
237 			KASSERT(tasklet->tl_state & TASKLET_RUNNING);
238 			atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
239 			tasklet_queue_enqueue(tq, tasklet);
240 			continue;
241 		}
242 
243 		/* Not disabled.  Clear SCHEDULED and call func.  */
244 		KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
245 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
246 
247 		(*tasklet->func)(tasklet->data);
248 
249 		/*
250 		 * Guarantee all caller-relevant reads or writes in
251 		 * func have completed before clearing RUNNING bit.
252 		 */
253 		membar_exit();
254 
255 		/* Clear RUNNING to notify tasklet_disable.  */
256 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
257 	}
258 }
259 
260 /*
261  * tasklet_queue_schedule(tq, tasklet)
262  *
263  *	Schedule tasklet to run on tq.  If it was already scheduled and
264  *	has not yet run, no effect.
265  */
266 static void
267 tasklet_queue_schedule(struct tasklet_queue *tq,
268     struct tasklet_struct *tasklet)
269 {
270 	unsigned ostate, nstate;
271 
272 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
273 	do {
274 		ostate = tasklet->tl_state;
275 		if (ostate & TASKLET_SCHEDULED)
276 			return;
277 		nstate = ostate | TASKLET_SCHEDULED;
278 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
279 	    != ostate);
280 
281 	/*
282 	 * Not already set and we have set it now.  Put it on the queue
283 	 * and kick off a softint.
284 	 */
285 	tasklet_queue_enqueue(tq, tasklet);
286 }
287 
288 /*
289  * tasklet_queue_enqueue(tq, tasklet)
290  *
291  *	Put tasklet on the queue tq and ensure it will run.  tasklet
292  *	must be marked SCHEDULED.
293  */
294 static void
295 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
296 {
297 	struct tasklet_cpu *tc;
298 	int s;
299 
300 	KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
301 
302 	/*
303 	 * Insert on the current CPU's queue while all interrupts are
304 	 * blocked, and schedule a soft interrupt to process it.  No
305 	 * memory barriers: CPU-local state only.
306 	 */
307 	tc = percpu_getref(tq->tq_percpu);
308 	s = splhigh();
309 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
310 	splx(s);
311 	softint_schedule(tq->tq_sih);
312 	percpu_putref(tq->tq_percpu);
313 }
314 
315 /*
316  * tasklet_init(tasklet, func, data)
317  *
318  *	Initialize tasklet to call func(data) when scheduled.
319  *
320  *	Caller is responsible for issuing the appropriate memory
321  *	barriers or store releases to publish the tasklet to other CPUs
322  *	before use.
323  */
324 void
325 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
326     unsigned long data)
327 {
328 
329 	tasklet->tl_state = 0;
330 	tasklet->tl_disablecount = 0;
331 	tasklet->func = func;
332 	tasklet->data = data;
333 }
334 
335 /*
336  * tasklet_schedule(tasklet)
337  *
338  *	Schedule tasklet to run at regular priority.  If it was already
339  *	scheduled and has not yet run, no effect.
340  */
341 void
342 tasklet_schedule(struct tasklet_struct *tasklet)
343 {
344 
345 	tasklet_queue_schedule(&tasklet_queue, tasklet);
346 }
347 
348 /*
349  * tasklet_hi_schedule(tasklet)
350  *
351  *	Schedule tasklet to run at high priority.  If it was already
352  *	scheduled and has not yet run, no effect.
353  */
354 void
355 tasklet_hi_schedule(struct tasklet_struct *tasklet)
356 {
357 
358 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
359 }
360 
361 /*
362  * tasklet_disable(tasklet)
363  *
364  *	Increment the disable count of tasklet, and if it was already
365  *	running, busy-wait for it to complete.
366  *
367  *	As long as the disable count is nonzero, the tasklet's function
368  *	will not run, but if already scheduled, the tasklet will remain
369  *	so and the softint will repeatedly trigger itself in a sort of
370  *	busy-wait, so this should be used only for short durations.
371  *
372  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
373  *	just invoked tasklet_kill, then tasklet_disable serves to wait
374  *	for it to complete in case it might already be running.
375  */
376 void
377 tasklet_disable(struct tasklet_struct *tasklet)
378 {
379 	unsigned int disablecount __diagused;
380 
381 	/* Increment the disable count.  */
382 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
383 	KASSERT(disablecount < UINT_MAX);
384 	KASSERT(disablecount != 0);
385 
386 	/* Wait for it to finish running, if it was running.  */
387 	while (tasklet->tl_state & TASKLET_RUNNING)
388 		SPINLOCK_BACKOFF_HOOK;
389 
390 	/*
391 	 * Guarantee any side effects of running are visible to us
392 	 * before we return.
393 	 *
394 	 * XXX membar_sync is overkill here.  It is tempting to issue
395 	 * membar_enter, but it only orders stores | loads, stores;
396 	 * what we really want here is load_acquire(&tasklet->tl_state)
397 	 * above, i.e. to witness all side effects preceding the store
398 	 * whose value we loaded.  Absent that, membar_sync is the best
399 	 * we can do.
400 	 */
401 	membar_sync();
402 }
403 
404 /*
405  * tasklet_enable(tasklet)
406  *
407  *	Decrement tasklet's disable count.  If it was previously
408  *	scheduled to run, it may now run.
409  */
410 void
411 tasklet_enable(struct tasklet_struct *tasklet)
412 {
413 	unsigned int disablecount __diagused;
414 
415 	/*
416 	 * Guarantee all caller-relevant reads or writes have completed
417 	 * before potentially allowing tasklet to run again by
418 	 * decrementing the disable count.
419 	 */
420 	membar_exit();
421 
422 	/* Decrement the disable count.  */
423 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
424 	KASSERT(disablecount != UINT_MAX);
425 }
426 
427 /*
428  * tasklet_kill(tasklet)
429  *
430  *	Busy-wait for tasklet to run, if it is currently scheduled.
431  *	Caller must guarantee it does not get scheduled again for this
432  *	to be useful.
433  */
434 void
435 tasklet_kill(struct tasklet_struct *tasklet)
436 {
437 
438 	KASSERTMSG(!cpu_intr_p(),
439 	    "deadlock: soft interrupts are blocked in interrupt context");
440 
441 	/* Wait for it to be removed from the queue.  */
442 	while (tasklet->tl_state & TASKLET_SCHEDULED)
443 		SPINLOCK_BACKOFF_HOOK;
444 
445 	/*
446 	 * No need for a memory barrier here because writes to the
447 	 * single state word are globally ordered, and RUNNING is set
448 	 * before SCHEDULED is cleared, so as long as the caller
449 	 * guarantees no scheduling, the only possible transitions we
450 	 * can witness are:
451 	 *
452 	 *	0                 -> 0
453 	 *	SCHEDULED         -> 0
454 	 *	SCHEDULED         -> RUNNING
455 	 *	RUNNING           -> 0
456 	 *	RUNNING           -> RUNNING
457 	 *	SCHEDULED|RUNNING -> 0
458 	 *	SCHEDULED|RUNNING -> RUNNING
459 	 */
460 
461 	/* Wait for it to finish running.  */
462 	while (tasklet->tl_state & TASKLET_RUNNING)
463 		SPINLOCK_BACKOFF_HOOK;
464 
465 	/*
466 	 * Wait for any side effects running.  Again, membar_sync is
467 	 * overkill; we really want load_acquire(&tasklet->tl_state)
468 	 * here.
469 	 */
470 	membar_sync();
471 }
472 
473 /*
474  * tasklet_disable_sync_once(tasklet)
475  *
476  *	Increment the disable count of tasklet, and if this is the
477  *	first time it was disabled and it was already running,
478  *	busy-wait for it to complete.
479  *
480  *	Caller must not care about whether the tasklet is running, or
481  *	about waiting for any side effects of the tasklet to complete,
482  *	if this was not the first time it was disabled.
483  */
484 void
485 tasklet_disable_sync_once(struct tasklet_struct *tasklet)
486 {
487 	unsigned int disablecount;
488 
489 	/* Increment the disable count.  */
490 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
491 	KASSERT(disablecount < UINT_MAX);
492 	KASSERT(disablecount != 0);
493 
494 	/*
495 	 * If it was zero, wait for it to finish running.  If it was
496 	 * not zero, caller must not care whether it was running.
497 	 */
498 	if (disablecount == 1) {
499 		while (tasklet->tl_state & TASKLET_RUNNING)
500 			SPINLOCK_BACKOFF_HOOK;
501 		membar_sync();
502 	}
503 }
504 
505 /*
506  * tasklet_enable_sync_once(tasklet)
507  *
508  *	Decrement the disable count of tasklet, and if it goes to zero,
509  *	kill tasklet.
510  */
511 void
512 tasklet_enable_sync_once(struct tasklet_struct *tasklet)
513 {
514 	unsigned int disablecount;
515 
516 	/* Decrement the disable count.  */
517 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
518 	KASSERT(disablecount < UINT_MAX);
519 
520 	/*
521 	 * If it became zero, kill the tasklet.  If it was not zero,
522 	 * caller must not care whether it was running.
523 	 */
524 	if (disablecount == 0)
525 		tasklet_kill(tasklet);
526 }
527 
528 /*
529  * tasklet_is_enabled(tasklet)
530  *
531  *	True if tasklet is not currently disabled.  Answer may be stale
532  *	as soon as it is returned -- caller must use it only as a hint,
533  *	or must arrange synchronization externally.
534  */
535 bool
536 tasklet_is_enabled(const struct tasklet_struct *tasklet)
537 {
538 	unsigned int disablecount;
539 
540 	disablecount = tasklet->tl_disablecount;
541 
542 	return (disablecount == 0);
543 }
544