xref: /openbsd-src/sys/arch/octeon/dev/octcit.c (revision cb5217fba3727742f326b0c20baed47113c0e1ef)
1 /*	$OpenBSD: octcit.c,v 1.6 2018/02/24 11:42:31 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for OCTEON Central Interrupt Unit version 3 (CIU3).
21  *
22  * CIU3 is present on CN72xx, CN73xx, CN77xx, and CN78xx.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/atomic.h>
28 #include <sys/conf.h>
29 #include <sys/device.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
32 
33 #include <dev/ofw/fdt.h>
34 #include <dev/ofw/openfirm.h>
35 
36 #include <mips64/mips_cpu.h>
37 
38 #include <machine/autoconf.h>
39 #include <machine/fdt.h>
40 #include <machine/intr.h>
41 #include <machine/octeonreg.h>
42 
43 #define CIU3_IDT(core, ipl)		((core) * 4 + (ipl))
44 #define CIU3_IDT_CTL(idt)		((idt) *  8 + 0x110000u)
45 #define CIU3_IDT_PP(idt)		((idt) * 32 + 0x120000u)
46 #define CIU3_IDT_IO(idt)		((idt) *  8 + 0x130000u)
47 #define CIU3_DEST_PP_INT(core)		((core) * 8 + 0x200000u)
48 #define   CIU3_DEST_PP_INT_INTSN		0x000fffff00000000ull
49 #define   CIU3_DEST_PP_INT_INTSN_SHIFT		32
50 #define   CIU3_DEST_PP_INT_INTR			0x0000000000000001ull
51 #define CIU3_ISC_CTL(intsn)		((intsn) * 8 + 0x80000000u)
52 #define   CIU3_ISC_CTL_IDT			0x0000000000ff0000ull
53 #define   CIU3_ISC_CTL_IDT_SHIFT		16
54 #define   CIU3_ISC_CTL_IMP			0x0000000000008000ull
55 #define   CIU3_ISC_CTL_EN			0x0000000000000002ull
56 #define   CIU3_ISC_CTL_RAW			0x0000000000000001ull
57 #define CIU3_ISC_W1C(intsn)		((intsn) * 8 + 0x90000000u)
58 #define   CIU3_ISC_W1C_EN			0x0000000000000002ull
59 #define   CIU3_ISC_W1C_RAW			0x0000000000000001ull
60 #define CIU3_ISC_W1S(intsn)		((intsn) * 8 + 0xa0000000u)
61 #define   CIU3_ISC_W1S_EN			0x0000000000000002ull
62 #define   CIU3_ISC_W1S_RAW			0x0000000000000001ull
63 #define CIU3_NINTSN			(1u << 20)
64 
65 #define IS_MBOX(intsn)			(((intsn) >> 12) == 4)
66 #define MBOX_INTSN(core)		((core) + 0x4000u)
67 
68 #define CIU3_RD_8(sc, reg) \
69 	bus_space_read_8((sc)->sc_iot, (sc)->sc_ioh, (reg))
70 #define CIU3_WR_8(sc, reg, val) \
71 	bus_space_write_8((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
72 
73 #define INTPRI_CIU_0	(INTPRI_CLOCK + 1)
74 
75 #define HASH_SIZE			64
76 
77 struct octcit_intrhand {
78 	SLIST_ENTRY(octcit_intrhand)
79 				 ih_list;
80 	int			(*ih_func)(void *);
81 	void			*ih_arg;
82 	int			 ih_intsn;
83 	int			 ih_flags;
84 #define CIH_MPSAFE			0x01
85 #define CIH_EDGE			0x02	/* edge-triggered */
86 	int			 ih_level;
87 	struct evcount		 ih_count;
88 };
89 
90 struct octcit_softc {
91 	struct device		 sc_dev;
92 	bus_space_tag_t		 sc_iot;
93 	bus_space_handle_t	 sc_ioh;
94 
95 	SLIST_HEAD(, octcit_intrhand)
96 				 sc_handlers[HASH_SIZE];
97 	int			 sc_minipl[MAXCPUS];
98 	int			(*sc_ipi_handler)(void *);
99 
100 	struct intr_controller	 sc_ic;
101 };
102 
103 int	 octcit_match(struct device *, void *, void *);
104 void	 octcit_attach(struct device *, struct device *, void *);
105 
106 void	 octcit_init(void);
107 uint32_t octcit_intr(uint32_t, struct trapframe *);
108 void	*octcit_intr_establish(int, int, int (*)(void *), void *,
109 	    const char *);
110 void	*octcit_intr_establish_intsn(int, int, int, int (*)(void *),
111 	    void *, const char *);
112 void	*octcit_intr_establish_fdt_idx(void *, int, int, int,
113 	    int (*)(void *), void *, const char *);
114 void	 octcit_intr_disestablish(void *);
115 void	 octcit_splx(int);
116 
117 uint32_t octcit_ipi_intr(uint32_t, struct trapframe *);
118 int	 octcit_ipi_establish(int (*)(void *), cpuid_t);
119 void	 octcit_ipi_set(cpuid_t);
120 void	 octcit_ipi_clear(cpuid_t);
121 
122 const struct cfattach octcit_ca = {
123 	sizeof(struct octcit_softc), octcit_match, octcit_attach
124 };
125 
126 struct cfdriver octcit_cd = {
127 	NULL, "octcit", DV_DULL
128 };
129 
130 struct octcit_softc	*octcit_sc;
131 
132 int
133 octcit_match(struct device *parent, void *match, void *aux)
134 {
135 	struct fdt_attach_args *faa = aux;
136 
137 	return OF_is_compatible(faa->fa_node, "cavium,octeon-7890-ciu3");
138 }
139 
140 void
141 octcit_attach(struct device *parent, struct device *self, void *aux)
142 {
143 	struct fdt_attach_args *faa = aux;
144 	struct octcit_softc *sc = (struct octcit_softc *)self;
145 	uint64_t val;
146 	int hash, intsn;
147 
148 	if (faa->fa_nreg != 1) {
149 		printf(": expected one IO space, got %d\n", faa->fa_nreg);
150 		return;
151 	}
152 
153 	sc->sc_iot = faa->fa_iot;
154 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
155 	    0, &sc->sc_ioh)) {
156 		printf(": could not map IO space\n");
157 		return;
158 	}
159 
160 	for (hash = 0; hash < HASH_SIZE; hash++)
161 		SLIST_INIT(&sc->sc_handlers[hash]);
162 
163 	/* Disable all interrupts and acknowledge any pending ones. */
164 	for (intsn = 0; intsn < CIU3_NINTSN; intsn++) {
165 		val = CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
166 		if (ISSET(val, CIU3_ISC_CTL_IMP)) {
167 			CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
168 			CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), 0);
169 			(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
170 		}
171 	}
172 
173 	printf("\n");
174 
175 	sc->sc_ic.ic_cookie = sc;
176 	sc->sc_ic.ic_node = faa->fa_node;
177 	sc->sc_ic.ic_init = octcit_init;
178 	sc->sc_ic.ic_establish = octcit_intr_establish;
179 	sc->sc_ic.ic_establish_fdt_idx = octcit_intr_establish_fdt_idx;
180 	sc->sc_ic.ic_disestablish = octcit_intr_disestablish;
181 #ifdef MULTIPROCESSOR
182 	sc->sc_ic.ic_ipi_establish = octcit_ipi_establish;
183 	sc->sc_ic.ic_ipi_set = octcit_ipi_set;
184 	sc->sc_ic.ic_ipi_clear = octcit_ipi_clear;
185 #endif
186 
187 	octcit_sc = sc;
188 
189 	set_intr(INTPRI_CIU_0, CR_INT_0, octcit_intr);
190 #ifdef MULTIPROCESSOR
191 	set_intr(INTPRI_IPI, CR_INT_1, octcit_ipi_intr);
192 #endif
193 
194 	octcit_init();
195 
196 	register_splx_handler(octcit_splx);
197 	octeon_intr_register(&sc->sc_ic);
198 }
199 
200 static inline int
201 intsn_hash(int intsn)
202 {
203 	int tmp;
204 
205 	tmp = intsn * 0xffb;
206 	return ((tmp >> 14) ^ tmp) & (HASH_SIZE - 1);
207 }
208 
209 void
210 octcit_init(void)
211 {
212 	struct cpu_info *ci = curcpu();
213 	struct octcit_softc *sc = octcit_sc;
214 	int core = ci->ci_cpuid;
215 
216 	sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
217 
218 	/*
219 	 * Set up interrupt routing.
220 	 */
221 
222 	/* Route IP2. */
223 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 0)), 0);
224 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
225 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 0)), 0);
226 
227 	/* Route IP3. */
228 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core , 1)), 1);
229 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 1)), 1ul << core);
230 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 1)), 0);
231 
232 	/* Disable IP4. */
233 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 2)), 0);
234 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 2)), 0);
235 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 2)), 0);
236 
237 	/* Disable IP5. */
238 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 3)), 0);
239 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 3)), 0);
240 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 3)), 0);
241 }
242 
243 void *
244 octcit_intr_establish(int irq, int level, int (*func)(void *), void *arg,
245     const char *name)
246 {
247 	return octcit_intr_establish_intsn(irq, level, CIH_EDGE, func, arg,
248 	    name);
249 }
250 
251 void *
252 octcit_intr_establish_intsn(int intsn, int level, int flags,
253     int (*func)(void *), void *arg, const char *name)
254 {
255 	struct cpu_info *ci = curcpu();
256 	struct octcit_intrhand *ih;
257 	struct octcit_softc *sc = octcit_sc;
258 	uint64_t val;
259 	int s;
260 
261 	if ((unsigned int)intsn > CIU3_NINTSN)
262 		panic("%s: illegal intsn 0x%x", __func__, intsn);
263 
264 	if (IS_MBOX(intsn))
265 		panic("%s: mbox intsn 0x%x not allowed", __func__, intsn);
266 
267 	if (ISSET(level, IPL_MPSAFE))
268 		flags |= CIH_MPSAFE;
269 	level &= ~IPL_MPSAFE;
270 
271 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
272 	if (ih == NULL)
273 		return NULL;
274 
275 	ih->ih_func = func;
276 	ih->ih_arg = arg;
277 	ih->ih_level = level;
278 	ih->ih_flags = flags;
279 	ih->ih_intsn = intsn;
280 	evcount_attach(&ih->ih_count, name, &ih->ih_intsn);
281 
282 	s = splhigh();
283 
284 	SLIST_INSERT_HEAD(&sc->sc_handlers[intsn_hash(intsn)], ih, ih_list);
285 	if (sc->sc_minipl[ci->ci_cpuid] > level)
286 		sc->sc_minipl[ci->ci_cpuid] = level;
287 
288 	val = CIU3_ISC_CTL_EN | (CIU3_IDT(ci->ci_cpuid, 0) <<
289 	    CIU3_ISC_CTL_IDT_SHIFT);
290 	CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
291 	CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
292 	(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
293 
294 	splx(s);
295 
296 	return ih;
297 }
298 
299 void *
300 octcit_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
301     int (*func)(void *), void *arg, const char *name)
302 {
303 	uint32_t *cells;
304 	int flags = 0;
305 	int intsn, len, type;
306 
307 	len = OF_getproplen(node, "interrupts");
308 	if (len / (sizeof(uint32_t) * 2) <= idx ||
309 	    len % (sizeof(uint32_t) * 2) != 0)
310 		return NULL;
311 
312 	cells = malloc(len, M_TEMP, M_NOWAIT);
313 	if (cells == NULL)
314 		return NULL;
315 
316 	OF_getpropintarray(node, "interrupts", cells, len);
317 	intsn = cells[idx * 2];
318 	type = cells[idx * 2 + 1];
319 
320 	free(cells, M_TEMP, len);
321 
322 	if (type != 4)
323 		flags |= CIH_EDGE;
324 
325 	return octcit_intr_establish_intsn(intsn, level, flags, func, arg,
326 	    name);
327 }
328 
329 void
330 octcit_intr_disestablish(void *_ih)
331 {
332 	struct cpu_info *ci = curcpu();
333 	struct octcit_intrhand *ih = _ih;
334 	struct octcit_intrhand *tmp;
335 	struct octcit_softc *sc = octcit_sc;
336 	unsigned int count;
337 	int found = 0;
338 	int hash = intsn_hash(ih->ih_intsn);
339 	int i, s;
340 
341 	count = 0;
342 	SLIST_FOREACH(tmp, &sc->sc_handlers[hash], ih_list) {
343 		if (tmp->ih_intsn == ih->ih_intsn)
344 			count++;
345 		if (tmp == ih)
346 			found = 1;
347 	}
348 	if (found == 0)
349 		panic("%s: intrhand %p not registered", __func__, ih);
350 
351 	s = splhigh();
352 
353 	if (count == 0) {
354 		CIU3_WR_8(sc, CIU3_ISC_W1C(ih->ih_intsn), CIU3_ISC_W1C_EN);
355 		CIU3_WR_8(sc, CIU3_ISC_CTL(ih->ih_intsn), 0);
356 		(void)CIU3_RD_8(sc, CIU3_ISC_CTL(ih->ih_intsn));
357 	}
358 
359 	SLIST_REMOVE(&sc->sc_handlers[hash], ih, octcit_intrhand, ih_list);
360 
361 	/* Recompute IPL floor if necessary. */
362 	if (sc->sc_minipl[ci->ci_cpuid] == ih->ih_level) {
363 		sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
364 		for (i = 0; i < HASH_SIZE; i++) {
365 			SLIST_FOREACH(tmp, &sc->sc_handlers[i], ih_list) {
366 				if (sc->sc_minipl[ci->ci_cpuid] >
367 				    tmp->ih_level)
368 					sc->sc_minipl[ci->ci_cpuid] =
369 					    tmp->ih_level;
370 			}
371 		}
372 	}
373 
374 	splx(s);
375 
376 	free(ih, M_DEVBUF, sizeof(*ih));
377 }
378 
379 uint32_t
380 octcit_intr(uint32_t hwpend, struct trapframe *frame)
381 {
382 	struct cpu_info *ci = curcpu();
383 	struct octcit_intrhand *ih;
384 	struct octcit_softc *sc = octcit_sc;
385 	uint64_t destpp;
386 	uint64_t intsn;
387 	unsigned int core = ci->ci_cpuid;
388 	int handled = 0;
389 	int ipl;
390 	int ret;
391 #ifdef MULTIPROCESSOR
392 	register_t sr;
393 	int need_lock;
394 #endif
395 
396 	if (frame->ipl >= sc->sc_minipl[ci->ci_cpuid]) {
397 		/* Disable IP2. */
398 		CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 0);
399 		(void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
400 		return hwpend;
401 	}
402 
403 	destpp = CIU3_RD_8(sc, CIU3_DEST_PP_INT(core));
404 	if (!ISSET(destpp, CIU3_DEST_PP_INT_INTR))
405 		goto spurious;
406 
407 	ipl = ci->ci_ipl;
408 
409 	intsn = (destpp & CIU3_DEST_PP_INT_INTSN) >>
410 	    CIU3_DEST_PP_INT_INTSN_SHIFT;
411 	SLIST_FOREACH(ih, &sc->sc_handlers[intsn_hash(intsn)], ih_list) {
412 		if (ih->ih_intsn != intsn)
413 			continue;
414 
415 		splraise(ih->ih_level);
416 
417 		/* Acknowledge the interrupt. */
418 		if (ISSET(ih->ih_flags, CIH_EDGE)) {
419 			CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
420 			(void)CIU3_RD_8(sc, CIU3_ISC_W1C(intsn));
421 		}
422 
423 #ifdef MULTIPROCESSOR
424 		if (ih->ih_level < IPL_IPI) {
425 			sr = getsr();
426 			ENABLEIPI();
427 		}
428 		if (ISSET(ih->ih_flags, IH_MPSAFE))
429 			need_lock = 0;
430 		else
431 			need_lock = 1;
432 		if (need_lock)
433 			__mp_lock(&kernel_lock);
434 #endif
435 		ret = (*ih->ih_func)(ih->ih_arg);
436 #ifdef MULTIPROCESSOR
437 		if (need_lock)
438 			__mp_unlock(&kernel_lock);
439 		if (ih->ih_level < IPL_IPI)
440 			setsr(sr);
441 #endif
442 
443 		if (ret != 0) {
444 			handled = 1;
445 			atomic_inc_long(
446 			    (unsigned long *)&ih->ih_count.ec_count);
447 		}
448 
449 		/*
450 		 * Stop processing when one handler has claimed the interrupt.
451 		 * This saves cycles because interrupt sharing should not
452 		 * happen on this hardware.
453 		 */
454 		if (ret == 1)
455 			break;
456 	}
457 
458 	ci->ci_ipl = ipl;
459 
460 spurious:
461 	if (handled == 0)
462 		printf("cpu%lu: spurious interrupt: dest 0x%016llx\n",
463 		    ci->ci_cpuid, destpp);
464 
465 	return hwpend;
466 }
467 
468 void
469 octcit_splx(int newipl)
470 {
471 	struct octcit_softc *sc = octcit_sc;
472 	struct cpu_info *ci = curcpu();
473 	unsigned int core = ci->ci_cpuid;
474 
475 	ci->ci_ipl = newipl;
476 
477 	if (newipl < sc->sc_minipl[ci->ci_cpuid])
478 		CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
479 
480 	/* If we still have softints pending trigger processing. */
481 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
482 		setsoftintr0();
483 }
484 
485 #ifdef MULTIPROCESSOR
486 uint32_t
487 octcit_ipi_intr(uint32_t hwpend, struct trapframe *frame)
488 {
489 	struct octcit_softc *sc = octcit_sc;
490 	u_long cpuid = cpu_number();
491 
492 	if (sc->sc_ipi_handler != NULL)
493 		sc->sc_ipi_handler((void *)cpuid);
494 
495 	return hwpend;
496 }
497 
498 int
499 octcit_ipi_establish(int (*func)(void *), cpuid_t cpuid)
500 {
501 	struct octcit_softc *sc = octcit_sc;
502 	uint64_t val;
503 	int intsn;
504 
505 	if (cpuid == 0)
506 		sc->sc_ipi_handler = func;
507 
508 	intsn = MBOX_INTSN(cpuid);
509 	val = CIU3_ISC_CTL_EN | (CIU3_IDT(cpuid, 1) << CIU3_ISC_CTL_IDT_SHIFT);
510 	CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
511 	CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
512 	(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
513 
514 	return 0;
515 }
516 
517 void
518 octcit_ipi_set(cpuid_t cpuid)
519 {
520 	struct octcit_softc *sc = octcit_sc;
521 
522 	CIU3_WR_8(sc, CIU3_ISC_W1S(MBOX_INTSN(cpuid)), CIU3_ISC_W1S_RAW);
523 }
524 
525 void
526 octcit_ipi_clear(cpuid_t cpuid)
527 {
528 	struct octcit_softc *sc = octcit_sc;
529 	uint64_t reg = CIU3_ISC_W1C(MBOX_INTSN(cpuid));
530 
531 	CIU3_WR_8(sc, reg, CIU3_ISC_W1C_RAW);
532 	(void)CIU3_RD_8(sc, reg);
533 }
534 #endif /* MULTIPROCESSOR */
535