xref: /openbsd-src/sys/arch/arm64/dev/aplintc.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: aplintc.c,v 1.4 2021/05/16 15:10:19 deraadt Exp $	*/
2 /*
3  * Copyright (c) 2021 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/atomic.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/malloc.h>
23 #include <sys/systm.h>
24 
25 #include <machine/bus.h>
26 #include <machine/fdt.h>
27 #include <machine/intr.h>
28 
29 #include <dev/ofw/openfirm.h>
30 #include <dev/ofw/fdt.h>
31 
32 #include <ddb/db_output.h>
33 
34 #define CNTV_CTL_IMASK		(1 << 1)
35 
36 #define AIC_INFO		0x0004
37 #define  AIC_INFO_NIRQ(val)	((val) & 0xffff)
38 #define AIC_WHOAMI		0x2000
39 #define AIC_EVENT		0x2004
40 #define  AIC_EVENT_TYPE(val)	((val) >> 16)
41 #define  AIC_EVENT_TYPE_IRQ	1
42 #define  AIC_EVENT_TYPE_IPI	4
43 #define  AIC_EVENT_IRQ(val)	((val) & 0xffff)
44 #define  AIC_EVENT_IPI_OTHER	1
45 #define  AIC_EVENT_IPI_SELF	2
46 #define AIC_IPI_SEND		0x2008
47 #define AIC_IPI_ACK		0x200c
48 #define AIC_IPI_MASK_SET	0x2024
49 #define AIC_IPI_MASK_CLR	0x2028
50 #define  AIC_IPI_OTHER		(1U << 0)
51 #define  AIC_IPI_SELF		(1U << 31)
52 #define AIC_TARGET_CPU(irq)	(0x3000 + ((irq) << 2))
53 #define AIC_SW_SET(irq)		(0x4000 + (((irq) >> 5) << 2))
54 #define AIC_SW_CLR(irq)		(0x4080 + (((irq) >> 5) << 2))
55 #define  AIC_SW_BIT(irq)	(1U << ((irq) & 0x1f))
56 #define AIC_MASK_SET(irq)	(0x4100 + (((irq) >> 5) << 2))
57 #define AIC_MASK_CLR(irq)	(0x4180 + (((irq) >> 5) << 2))
58 #define  AIC_MASK_BIT(irq)	(1U << ((irq) & 0x1f))
59 
60 #define AIC_MAXCPUS		32
61 
62 #define HREAD4(sc, reg)							\
63 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
64 #define HWRITE4(sc, reg, val)						\
65 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
66 
67 struct intrhand {
68 	TAILQ_ENTRY(intrhand) ih_list;
69 	int		(*ih_func)(void *);
70 	void		*ih_arg;
71 	int		ih_ipl;
72 	int		ih_flags;
73 	int		ih_irq;
74 	struct evcount	ih_count;
75 	const char	*ih_name;
76 	struct cpu_info *ih_ci;
77 };
78 
79 struct aplintc_softc {
80 	struct device		sc_dev;
81 	bus_space_tag_t		sc_iot;
82 	bus_space_handle_t	sc_ioh;
83 
84 	struct interrupt_controller sc_ic;
85 
86 	struct intrhand		*sc_fiq_handler;
87 	int			sc_fiq_pending[AIC_MAXCPUS];
88 	struct intrhand		**sc_irq_handler;
89 	int 			sc_nirq;
90 	TAILQ_HEAD(, intrhand)	sc_irq_list[NIPL];
91 
92 	uint32_t		sc_cpuremap[AIC_MAXCPUS];
93 	u_int			sc_ipi_reason[AIC_MAXCPUS];
94 };
95 
96 struct aplintc_softc *aplintc_sc;
97 
98 int	aplintc_match(struct device *, void *, void *);
99 void	aplintc_attach(struct device *, struct device *, void *);
100 
101 struct cfattach	aplintc_ca = {
102 	sizeof (struct aplintc_softc), aplintc_match, aplintc_attach
103 };
104 
105 struct cfdriver aplintc_cd = {
106 	NULL, "aplintc", DV_DULL
107 };
108 
109 void	aplintc_cpuinit(void);
110 void	aplintc_irq_handler(void *);
111 void	aplintc_fiq_handler(void *);
112 void	aplintc_intr_barrier(void *);
113 int	aplintc_splraise(int);
114 int	aplintc_spllower(int);
115 void	aplintc_splx(int);
116 void	aplintc_setipl(int);
117 
118 void 	*aplintc_intr_establish(void *, int *, int, struct cpu_info *,
119 	    int (*)(void *), void *, char *);
120 void	aplintc_intr_disestablish(void *);
121 
122 void	aplintc_send_ipi(struct cpu_info *, int);
123 void	aplintc_handle_ipi(struct aplintc_softc *, uint32_t);
124 
125 int
126 aplintc_match(struct device *parent, void *match, void *aux)
127 {
128 	struct fdt_attach_args *faa = aux;
129 
130 	return OF_is_compatible(faa->fa_node, "apple,aic");
131 }
132 
133 void
134 aplintc_attach(struct device *parent, struct device *self, void *aux)
135 {
136 	struct aplintc_softc *sc = (struct aplintc_softc *)self;
137 	struct fdt_attach_args *faa = aux;
138 	uint32_t info;
139 	int ipl;
140 
141 	if (faa->fa_nreg < 1) {
142 		printf(": no registers\n");
143 		return;
144 	}
145 
146 	sc->sc_iot = faa->fa_iot;
147 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
148 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
149 		printf(": can't map registers\n");
150 		return;
151 	}
152 
153 	info = HREAD4(sc, AIC_INFO);
154 	sc->sc_nirq = AIC_INFO_NIRQ(info);
155 	sc->sc_irq_handler = mallocarray(sc->sc_nirq,
156 	    sizeof(*sc->sc_irq_handler), M_DEVBUF, M_WAITOK | M_ZERO);
157 	for (ipl = 0; ipl < NIPL; ipl++)
158 		TAILQ_INIT(&sc->sc_irq_list[ipl]);
159 
160 	printf(" nirq %d\n", sc->sc_nirq);
161 
162 	arm_init_smask();
163 
164 	aplintc_sc = sc;
165 	aplintc_cpuinit();
166 
167 	arm_set_intr_handler(aplintc_splraise, aplintc_spllower, aplintc_splx,
168 	    aplintc_setipl, aplintc_irq_handler, aplintc_fiq_handler);
169 
170 	sc->sc_ic.ic_node = faa->fa_node;
171 	sc->sc_ic.ic_cookie = self;
172 	sc->sc_ic.ic_establish = aplintc_intr_establish;
173 	sc->sc_ic.ic_disestablish = aplintc_intr_disestablish;
174 	sc->sc_ic.ic_cpu_enable = aplintc_cpuinit;
175 	sc->sc_ic.ic_barrier = aplintc_intr_barrier;
176 	arm_intr_register_fdt(&sc->sc_ic);
177 
178 	intr_send_ipi_func = aplintc_send_ipi;
179 }
180 
181 void
182 aplintc_cpuinit(void)
183 {
184 	struct aplintc_softc *sc = aplintc_sc;
185 	struct cpu_info *ci = curcpu();
186 	uint32_t hwid;
187 
188 	KASSERT(ci->ci_cpuid < AIC_MAXCPUS);
189 
190 	hwid = HREAD4(sc, AIC_WHOAMI);
191 	KASSERT(hwid < AIC_MAXCPUS);
192 	sc->sc_cpuremap[ci->ci_cpuid] = hwid;
193 }
194 
195 void
196 aplintc_irq_handler(void *frame)
197 {
198 	struct aplintc_softc *sc = aplintc_sc;
199 	struct cpu_info *ci = curcpu();
200 	struct intrhand *ih;
201 	uint32_t event;
202 	uint32_t irq, type;
203 	int handled;
204 	int s;
205 
206 	event = HREAD4(sc, AIC_EVENT);
207 	irq = AIC_EVENT_IRQ(event);
208 	type = AIC_EVENT_TYPE(event);
209 
210 	if (type == AIC_EVENT_TYPE_IPI) {
211 		aplintc_handle_ipi(sc, irq);
212 		return;
213 	}
214 
215 	if (type != AIC_EVENT_TYPE_IRQ) {
216 		printf("%s: unexpected event type %d\n", __func__, type);
217 		return;
218 	}
219 
220 	if (irq >= sc->sc_nirq)
221 		panic("%s: unexpected irq %d", __func__, irq);
222 
223 	if (sc->sc_irq_handler[irq] == NULL)
224 		return;
225 
226 	HWRITE4(sc, AIC_SW_CLR(irq), AIC_SW_BIT(irq));
227 
228 	ih = sc->sc_irq_handler[irq];
229 
230 	if (ci->ci_cpl >= ih->ih_ipl) {
231 		/* Queue interrupt as pending. */
232 		TAILQ_INSERT_TAIL(&sc->sc_irq_list[ih->ih_ipl], ih, ih_list);
233 	} else {
234 		s = aplintc_splraise(ih->ih_ipl);
235 		intr_enable();
236 		handled = ih->ih_func(ih->ih_arg);
237 		intr_disable();
238 		if (handled)
239 			ih->ih_count.ec_count++;
240 		aplintc_splx(s);
241 
242 		HWRITE4(sc, AIC_MASK_CLR(irq), AIC_MASK_BIT(irq));
243 	}
244 }
245 
246 void
247 aplintc_fiq_handler(void *frame)
248 {
249 	struct aplintc_softc *sc = aplintc_sc;
250 	struct cpu_info *ci = curcpu();
251 	uint64_t reg;
252 	int s;
253 
254 	if (ci->ci_cpl >= IPL_CLOCK) {
255 		/* Mask timer interrupt and mark as pending. */
256 		reg = READ_SPECIALREG(cntv_ctl_el0);
257 		WRITE_SPECIALREG(cntv_ctl_el0, reg | CNTV_CTL_IMASK);
258 		sc->sc_fiq_pending[ci->ci_cpuid] = 1;
259 		return;
260 	}
261 
262 	s = aplintc_splraise(IPL_CLOCK);
263 	sc->sc_fiq_handler->ih_func(frame);
264 	sc->sc_fiq_handler->ih_count.ec_count++;
265 	aplintc_splx(s);
266 }
267 
268 void
269 aplintc_intr_barrier(void *cookie)
270 {
271 	struct intrhand	*ih = cookie;
272 
273 	sched_barrier(ih->ih_ci);
274 }
275 
276 int
277 aplintc_splraise(int new)
278 {
279 	struct cpu_info *ci = curcpu();
280 	int old = ci->ci_cpl;
281 
282 	if (old > new)
283 		new = old;
284 
285 	aplintc_setipl(new);
286 	return old;
287 }
288 
289 int
290 aplintc_spllower(int new)
291 {
292 	struct cpu_info *ci = curcpu();
293 	int old = ci->ci_cpl;
294 
295 	aplintc_splx(new);
296 	return old;
297 }
298 
299 void
300 aplintc_splx(int new)
301 {
302 	struct aplintc_softc *sc = aplintc_sc;
303 	struct cpu_info *ci = curcpu();
304 	struct intrhand *ih;
305 	uint64_t reg;
306 	u_long daif;
307 	int ipl;
308 
309 	daif = intr_disable();
310 
311 	/* Process pending FIQs. */
312 	if (sc->sc_fiq_pending[ci->ci_cpuid] && new < IPL_CLOCK) {
313 		sc->sc_fiq_pending[ci->ci_cpuid] = 0;
314 		reg = READ_SPECIALREG(cntv_ctl_el0);
315 		WRITE_SPECIALREG(cntv_ctl_el0, reg & ~CNTV_CTL_IMASK);
316 	}
317 
318 	/* Process pending IRQs. */
319 	if (CPU_IS_PRIMARY(ci)) {
320 		for (ipl = ci->ci_cpl; ipl > new; ipl--) {
321 			while (!TAILQ_EMPTY(&sc->sc_irq_list[ipl])) {
322 				ih = TAILQ_FIRST(&sc->sc_irq_list[ipl]);
323 				TAILQ_REMOVE(&sc->sc_irq_list[ipl],
324 				    ih, ih_list);
325 
326 				HWRITE4(sc, AIC_SW_SET(ih->ih_irq),
327 				    AIC_SW_BIT(ih->ih_irq));
328 				HWRITE4(sc, AIC_MASK_CLR(ih->ih_irq),
329 				    AIC_MASK_BIT(ih->ih_irq));
330 			}
331 		}
332 	}
333 
334 	aplintc_setipl(new);
335 	intr_restore(daif);
336 
337 	if (ci->ci_ipending & arm_smask[new])
338 		arm_do_pending_intr(new);
339 }
340 
341 void
342 aplintc_setipl(int ipl)
343 {
344 	struct cpu_info *ci = curcpu();
345 
346 	ci->ci_cpl = ipl;
347 }
348 
349 void *
350 aplintc_intr_establish(void *cookie, int *cell, int level,
351     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
352 {
353 	struct aplintc_softc *sc = cookie;
354 	struct intrhand *ih;
355 	uint32_t type = cell[0];
356 	uint32_t irq = cell[1];
357 
358 	if (type == 0) {
359 		KASSERT(level != (IPL_CLOCK | IPL_MPSAFE));
360 		if (irq >= sc->sc_nirq) {
361 			panic("%s: bogus irq number %d",
362 			    sc->sc_dev.dv_xname, irq);
363 		}
364 	} else if (type == 1) {
365 		KASSERT(level == (IPL_CLOCK | IPL_MPSAFE));
366 		if (irq >= 4)
367 			panic("%s: bogus fiq number %d",
368 			    sc->sc_dev.dv_xname, irq);
369 	} else {
370 		panic("%s: bogus irq type %d",
371 		    sc->sc_dev.dv_xname, cell[0]);
372 	}
373 
374 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
375 	ih->ih_func = func;
376 	ih->ih_arg = arg;
377 	ih->ih_ipl = level & IPL_IRQMASK;
378 	ih->ih_flags = level & IPL_FLAGMASK;
379 	ih->ih_irq = irq;
380 	ih->ih_name = name;
381 	ih->ih_ci = ci;
382 
383 	if (name != NULL)
384 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
385 
386 	if (type == 0) {
387 		sc->sc_irq_handler[irq] = ih;
388 		HWRITE4(sc, AIC_TARGET_CPU(irq), 1);
389 		HWRITE4(sc, AIC_MASK_CLR(irq), AIC_MASK_BIT(irq));
390 	} else
391 		sc->sc_fiq_handler = ih;
392 
393 	return ih;
394 }
395 
396 void
397 aplintc_intr_disestablish(void *cookie)
398 {
399 	struct aplintc_softc *sc = aplintc_sc;
400 	struct intrhand *ih = cookie;
401 	struct intrhand *tmp;
402 	u_long daif;
403 
404 	KASSERT(ih->ih_ipl < IPL_CLOCK);
405 
406 	daif = intr_disable();
407 
408 	HWRITE4(sc, AIC_SW_CLR(ih->ih_irq), AIC_SW_BIT(ih->ih_irq));
409 	HWRITE4(sc, AIC_MASK_SET(ih->ih_irq), AIC_MASK_BIT(ih->ih_irq));
410 
411 	/* Remove ourselves from the list of pending IRQs. */
412 	TAILQ_FOREACH(tmp, &sc->sc_irq_list[ih->ih_ipl], ih_list) {
413 		if (tmp == ih) {
414 			TAILQ_REMOVE(&sc->sc_irq_list[ih->ih_ipl],
415 			    ih, ih_list);
416 			break;
417 		}
418 	}
419 
420 	sc->sc_irq_handler[ih->ih_irq] = NULL;
421 	if (ih->ih_name)
422 		evcount_detach(&ih->ih_count);
423 
424 	intr_restore(daif);
425 
426 	free(ih, M_DEVBUF, sizeof(*ih));
427 }
428 
429 void
430 aplintc_send_ipi(struct cpu_info *ci, int reason)
431 {
432 	struct aplintc_softc *sc = aplintc_sc;
433 	uint32_t hwid;
434 
435 	if (ci == curcpu() && reason == ARM_IPI_NOP)
436 		return;
437 
438 	/* never overwrite IPI_DDB with IPI_NOP */
439 	if (reason == ARM_IPI_DDB)
440 		sc->sc_ipi_reason[ci->ci_cpuid] = reason;
441 	membar_producer();
442 
443 	hwid = sc->sc_cpuremap[ci->ci_cpuid];
444 	HWRITE4(sc, AIC_IPI_SEND, (1U << hwid));
445 }
446 
447 void
448 aplintc_handle_ipi(struct aplintc_softc *sc, uint32_t irq)
449 {
450 	struct cpu_info *ci = curcpu();
451 
452 	if (irq != AIC_EVENT_IPI_OTHER)
453 		panic("%s: unexpected irq %d", __func__, irq);
454 
455 	HWRITE4(sc, AIC_IPI_ACK, AIC_IPI_OTHER);
456 
457 	membar_consumer();
458 	if (sc->sc_ipi_reason[ci->ci_cpuid] == ARM_IPI_DDB) {
459 		sc->sc_ipi_reason[ci->ci_cpuid] = ARM_IPI_NOP;
460 #ifdef DDB
461 		db_enter();
462 #endif
463 	}
464 
465 	HWRITE4(sc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
466 }
467