xref: /netbsd-src/sys/arch/hppa/hppa/ipifuncs.c (revision f712c64e09a3d29b8b71f65127dfb0d3eebe0401)
1 /*	$NetBSD: ipifuncs.c,v 1.7 2022/04/09 23:43:12 riastradh Exp $	*/
2 /*	$OpenBSD: ipi.c,v 1.4 2011/01/14 13:20:06 jsing Exp $	*/
3 
4 /*
5  * Copyright (c) 2010 Joel Sing <jsing@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/types.h>
22 #include <sys/systm.h>
23 #include <sys/mutex.h>
24 #include <sys/device.h>
25 #include <sys/atomic.h>
26 #include <sys/xcall.h>
27 #include <sys/ipi.h>
28 
29 #include <machine/cpu.h>
30 #include <machine/cpufunc.h>
31 #if 0
32 #include <machine/fpu.h>
33 #endif
34 #include <machine/iomod.h>
35 #include <machine/intr.h>
36 #include <machine/mutex.h>
37 #include <machine/reg.h>
38 #include <machine/int_fmtio.h>
39 
40 #include <hppa/hppa/cpuvar.h>
41 
42 static void hppa_ipi_nop(void);
43 static void hppa_ipi_halt(void);
44 
45 void (*ipifunc[HPPA_NIPI])(void) =
46 {
47 	hppa_ipi_nop,
48 	hppa_ipi_halt,
49 	xc_ipi_handler,
50 	ipi_cpu_handler,
51 };
52 
53 const char *ipinames[HPPA_NIPI] = {
54 	"nop ipi",
55 	"halt ipi",
56 	"xcall ipi"
57 	"generic ipi"
58 };
59 
60 void
hppa_ipi_init(struct cpu_info * ci)61 hppa_ipi_init(struct cpu_info *ci)
62 {
63 	struct cpu_softc *sc = ci->ci_softc;
64 	int i;
65 
66 	evcnt_attach_dynamic(&sc->sc_evcnt_ipi, EVCNT_TYPE_INTR,
67 	    NULL, device_xname(sc->sc_dev), "ipi");
68 
69 	for (i = 0; i < HPPA_NIPI; i++) {
70 		evcnt_attach_dynamic(&sc->sc_evcnt_which_ipi[i],
71 		    EVCNT_TYPE_INTR, NULL, device_xname(sc->sc_dev),
72 		    ipinames[i]);
73 	}
74 }
75 
76 int
hppa_ipi_intr(void * arg)77 hppa_ipi_intr(void *arg)
78 {
79 	struct cpu_info *ci = curcpu();
80 	struct cpu_softc *sc = ci->ci_softc;
81 	u_long ipi_pending;
82 	int bit = 0;
83 
84 	/* Handle an IPI. */
85 	ipi_pending = atomic_swap_ulong(&ci->ci_ipi, 0);
86 	membar_acquire(); /* matches membar_release in xc_send_ipi, cpu_ipi */
87 
88 	KASSERT(ipi_pending);
89 
90 	sc->sc_evcnt_ipi.ev_count++;
91 
92 	while (ipi_pending) {
93 		if (ipi_pending & (1L << bit)) {
94 			sc->sc_evcnt_which_ipi[bit].ev_count++;
95 			(*ipifunc[bit])();
96 		}
97 		ipi_pending &= ~(1L << bit);
98 		bit++;
99 	}
100 
101 	return 1;
102 }
103 
104 int
hppa_ipi_send(struct cpu_info * ci,u_long ipi)105 hppa_ipi_send(struct cpu_info *ci, u_long ipi)
106 {
107 	struct iomod *cpu;
108 	KASSERT(ci->ci_flags & CPUF_RUNNING);
109 
110 	atomic_or_ulong(&ci->ci_ipi, (1L << ipi));
111 
112 	/*
113 	 * Send an IPI to the specified CPU by triggering EIR{1} (irq 30).
114 	 *
115 	 * The `load-acquire operation' matching this store-release is
116 	 * somewhere inside the silicon or firmware -- the point is
117 	 * that the store to ci->ci_ipi above must happen before
118 	 * writing to EIR{1}; there is conceptually some magic inside
119 	 * the silicon or firmware on the target CPU that effectively
120 	 * does
121 	 *
122 	 *	if (atomic_load_acquire(&cpu->io_eir)) {
123 	 *		enter_interrupt_vector();
124 	 *	}
125 	 */
126 	cpu = (struct iomod *)(ci->ci_hpa);
127 	atomic_store_release(&cpu->io_eir, 1);
128 
129 	return 0;
130 }
131 
132 int
hppa_ipi_broadcast(u_long ipi)133 hppa_ipi_broadcast(u_long ipi)
134 {
135 	CPU_INFO_ITERATOR cii;
136 	struct cpu_info *ci;
137 	int count = 0;
138 
139 	for (CPU_INFO_FOREACH(cii, ci)) {
140 		if (ci != curcpu() && (ci->ci_flags & CPUF_RUNNING))
141 			if (hppa_ipi_send(ci, ipi))
142 				count++;
143 	}
144 
145 	return count;
146 }
147 
148 static void
hppa_ipi_nop(void)149 hppa_ipi_nop(void)
150 {
151 }
152 
153 static void
hppa_ipi_halt(void)154 hppa_ipi_halt(void)
155 {
156 	struct cpu_info *ci = curcpu();
157 
158 	/* Turn off interrupts and halt CPU. */
159 // 	hppa_intr_disable();
160 	ci->ci_flags &= ~CPUF_RUNNING;
161 
162 	for (;;)
163 		;
164 }
165 
166 void
xc_send_ipi(struct cpu_info * ci)167 xc_send_ipi(struct cpu_info *ci)
168 {
169 	KASSERT(kpreempt_disabled());
170 	KASSERT(curcpu() != ci);
171 
172 	membar_release();	/* matches membar_acquire in hppa_ipi_intr */
173 	if (ci) {
174 		/* Unicast: remote CPU. */
175 		hppa_ipi_send(ci, HPPA_IPI_XCALL);
176 	} else {
177 		/* Broadcast: all, but local CPU (caller will handle it). */
178 		hppa_ipi_broadcast(HPPA_IPI_XCALL);
179 	}
180 }
181 
182 void
cpu_ipi(struct cpu_info * ci)183 cpu_ipi(struct cpu_info *ci)
184 {
185 	KASSERT(kpreempt_disabled());
186 	KASSERT(curcpu() != ci);
187 
188 	membar_release();	/* matches membar_acquire in hppa_ipi_intr */
189 	if (ci) {
190 		/* Unicast: remote CPU. */
191 		hppa_ipi_send(ci, HPPA_IPI_GENERIC);
192 	} else {
193 		/* Broadcast: all, but local CPU (caller will handle it). */
194 		hppa_ipi_broadcast(HPPA_IPI_GENERIC);
195 	}
196 }
197