1*fc0f4556Sriastradh /* $NetBSD: linux_irq_work.c,v 1.2 2021/12/19 11:50:54 riastradh Exp $ */
2aaec65bfSriastradh
3aaec65bfSriastradh /*-
4aaec65bfSriastradh * Copyright (c) 2021 The NetBSD Foundation, Inc.
5aaec65bfSriastradh * All rights reserved.
6aaec65bfSriastradh *
7aaec65bfSriastradh * This code is derived from software contributed to The NetBSD Foundation
8aaec65bfSriastradh * by Taylor R. Campbell.
9aaec65bfSriastradh *
10aaec65bfSriastradh * Redistribution and use in source and binary forms, with or without
11aaec65bfSriastradh * modification, are permitted provided that the following conditions
12aaec65bfSriastradh * are met:
13aaec65bfSriastradh * 1. Redistributions of source code must retain the above copyright
14aaec65bfSriastradh * notice, this list of conditions and the following disclaimer.
15aaec65bfSriastradh * 2. Redistributions in binary form must reproduce the above copyright
16aaec65bfSriastradh * notice, this list of conditions and the following disclaimer in the
17aaec65bfSriastradh * documentation and/or other materials provided with the distribution.
18aaec65bfSriastradh *
19aaec65bfSriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20aaec65bfSriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21aaec65bfSriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22aaec65bfSriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23aaec65bfSriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24aaec65bfSriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25aaec65bfSriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26aaec65bfSriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27aaec65bfSriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28aaec65bfSriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29aaec65bfSriastradh * POSSIBILITY OF SUCH DAMAGE.
30aaec65bfSriastradh */
31aaec65bfSriastradh
32aaec65bfSriastradh #include <sys/cdefs.h>
33*fc0f4556Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_irq_work.c,v 1.2 2021/12/19 11:50:54 riastradh Exp $");
34aaec65bfSriastradh
35aaec65bfSriastradh #include <sys/param.h>
36aaec65bfSriastradh
37aaec65bfSriastradh #include <sys/atomic.h>
38aaec65bfSriastradh #include <sys/intr.h>
39*fc0f4556Sriastradh #include <sys/kmem.h>
40aaec65bfSriastradh #include <sys/mutex.h>
41aaec65bfSriastradh #include <sys/percpu.h>
42aaec65bfSriastradh #include <sys/queue.h>
43aaec65bfSriastradh
44aaec65bfSriastradh #include <linux/irq_work.h>
45aaec65bfSriastradh
46aaec65bfSriastradh struct irq_work_cpu {
47aaec65bfSriastradh kmutex_t iwc_lock;
48aaec65bfSriastradh SIMPLEQ_HEAD(, irq_work) iwc_todo;
49aaec65bfSriastradh };
50aaec65bfSriastradh
51aaec65bfSriastradh enum {
52aaec65bfSriastradh IRQ_WORK_PENDING = 1,
53aaec65bfSriastradh };
54aaec65bfSriastradh
55aaec65bfSriastradh static struct percpu *irq_work_percpu;
56aaec65bfSriastradh static void *irq_work_sih __read_mostly;
57aaec65bfSriastradh
58aaec65bfSriastradh static void
irq_work_intr(void * cookie)59aaec65bfSriastradh irq_work_intr(void *cookie)
60aaec65bfSriastradh {
61*fc0f4556Sriastradh struct irq_work_cpu *const *iwcp, *iwc;
62aaec65bfSriastradh SIMPLEQ_HEAD(, irq_work) todo = SIMPLEQ_HEAD_INITIALIZER(todo);
63aaec65bfSriastradh struct irq_work *iw, *next;
64aaec65bfSriastradh
65*fc0f4556Sriastradh iwcp = percpu_getref(irq_work_percpu);
66*fc0f4556Sriastradh iwc = *iwcp;
67aaec65bfSriastradh mutex_spin_enter(&iwc->iwc_lock);
68aaec65bfSriastradh SIMPLEQ_CONCAT(&todo, &iwc->iwc_todo);
69aaec65bfSriastradh mutex_spin_exit(&iwc->iwc_lock);
70aaec65bfSriastradh percpu_putref(irq_work_percpu);
71aaec65bfSriastradh
72aaec65bfSriastradh SIMPLEQ_FOREACH_SAFE(iw, &todo, iw_entry, next) {
73aaec65bfSriastradh atomic_store_relaxed(&iw->iw_flags, 0);
74aaec65bfSriastradh (*iw->func)(iw);
75aaec65bfSriastradh }
76aaec65bfSriastradh }
77aaec65bfSriastradh
78aaec65bfSriastradh static void
irq_work_cpu_init(void * ptr,void * cookie,struct cpu_info * ci)79aaec65bfSriastradh irq_work_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
80aaec65bfSriastradh {
81*fc0f4556Sriastradh struct irq_work_cpu **iwcp = ptr, *iwc;
82aaec65bfSriastradh
83*fc0f4556Sriastradh iwc = *iwcp = kmem_zalloc(sizeof(*iwc), KM_SLEEP);
84aaec65bfSriastradh mutex_init(&iwc->iwc_lock, MUTEX_DEFAULT, IPL_HIGH);
85aaec65bfSriastradh SIMPLEQ_INIT(&iwc->iwc_todo);
86aaec65bfSriastradh }
87aaec65bfSriastradh
88aaec65bfSriastradh static void
irq_work_cpu_fini(void * ptr,void * cookie,struct cpu_info * ci)89aaec65bfSriastradh irq_work_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
90aaec65bfSriastradh {
91*fc0f4556Sriastradh struct irq_work_cpu **iwcp = ptr, *iwc = *iwcp;
92aaec65bfSriastradh
93aaec65bfSriastradh KASSERT(SIMPLEQ_EMPTY(&iwc->iwc_todo));
94aaec65bfSriastradh mutex_destroy(&iwc->iwc_lock);
95*fc0f4556Sriastradh kmem_free(iwc, sizeof(*iwc));
96aaec65bfSriastradh }
97aaec65bfSriastradh
98aaec65bfSriastradh void
linux_irq_work_init(void)99aaec65bfSriastradh linux_irq_work_init(void)
100aaec65bfSriastradh {
101aaec65bfSriastradh
102aaec65bfSriastradh irq_work_percpu = percpu_create(sizeof(struct irq_work_cpu),
103aaec65bfSriastradh irq_work_cpu_init, irq_work_cpu_fini, NULL);
104aaec65bfSriastradh irq_work_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
105aaec65bfSriastradh irq_work_intr, NULL);
106aaec65bfSriastradh }
107aaec65bfSriastradh
108aaec65bfSriastradh void
linux_irq_work_fini(void)109aaec65bfSriastradh linux_irq_work_fini(void)
110aaec65bfSriastradh {
111aaec65bfSriastradh
112aaec65bfSriastradh softint_disestablish(irq_work_sih);
113aaec65bfSriastradh percpu_free(irq_work_percpu, sizeof(struct irq_work_cpu));
114aaec65bfSriastradh }
115aaec65bfSriastradh
116aaec65bfSriastradh void
init_irq_work(struct irq_work * iw,void (* func)(struct irq_work *))117aaec65bfSriastradh init_irq_work(struct irq_work *iw, void (*func)(struct irq_work *))
118aaec65bfSriastradh {
119aaec65bfSriastradh
120aaec65bfSriastradh iw->iw_flags = 0;
121aaec65bfSriastradh iw->func = func;
122aaec65bfSriastradh }
123aaec65bfSriastradh
124aaec65bfSriastradh bool
irq_work_queue(struct irq_work * iw)125aaec65bfSriastradh irq_work_queue(struct irq_work *iw)
126aaec65bfSriastradh {
127*fc0f4556Sriastradh struct irq_work_cpu *const *iwcp, *iwc;
128aaec65bfSriastradh
129aaec65bfSriastradh if (atomic_swap_uint(&iw->iw_flags, IRQ_WORK_PENDING)
130aaec65bfSriastradh & IRQ_WORK_PENDING)
131aaec65bfSriastradh return false;
132aaec65bfSriastradh
133*fc0f4556Sriastradh iwcp = percpu_getref(irq_work_percpu);
134*fc0f4556Sriastradh iwc = *iwcp;
135aaec65bfSriastradh mutex_spin_enter(&iwc->iwc_lock);
136aaec65bfSriastradh SIMPLEQ_INSERT_TAIL(&iwc->iwc_todo, iw, iw_entry);
137aaec65bfSriastradh mutex_spin_exit(&iwc->iwc_lock);
138aaec65bfSriastradh softint_schedule(irq_work_sih);
139aaec65bfSriastradh percpu_putref(irq_work_percpu);
140aaec65bfSriastradh
141aaec65bfSriastradh return true;
142aaec65bfSriastradh }
143