xref: /openbsd-src/sys/arch/octeon/dev/octeon_intr.c (revision aec33009216a0cbb33e3403afd3dfa92bc7773fd)
1 /*	$OpenBSD: octeon_intr.c,v 1.21 2017/06/18 12:48:13 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Opsycon AB  (www.opsycon.se)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * Interrupt support for Octeon Processor.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/conf.h>
37 #include <sys/malloc.h>
38 #include <sys/device.h>
39 #include <sys/proc.h>
40 #include <sys/atomic.h>
41 
42 #include <dev/ofw/openfirm.h>
43 
44 #include <mips64/mips_cpu.h>
45 
46 #include <machine/autoconf.h>
47 #include <machine/intr.h>
48 #include <machine/octeonreg.h>
49 
50 #include <octeon/dev/iobusvar.h>
51 
52 extern bus_space_handle_t iobus_h;
53 
54 #define OCTEON_NINTS 128
55 
56 struct intrbank {
57 	uint64_t	en;		/* enable mask register */
58 	uint64_t	sum;		/* service request register */
59 	int		id;		/* bank number */
60 };
61 
62 #define NBANKS		2
63 #define BANK_SIZE	64
64 #define IRQ_TO_BANK(x)	((x) >> 6)
65 #define IRQ_TO_BIT(x)	((x) & 0x3f)
66 
67 void	 octeon_intr_makemasks(void);
68 void	 octeon_splx(int);
69 uint32_t octeon_iointr_bank(struct trapframe *, struct intrbank *);
70 uint32_t octeon_iointr(uint32_t, struct trapframe *);
71 void	 octeon_setintrmask(int);
72 
73 struct intrhand *octeon_intrhand[OCTEON_NINTS];
74 
75 #define	INTPRI_CIU_0	(INTPRI_CLOCK + 1)
76 
77 uint64_t octeon_intem[MAXCPUS][NBANKS];
78 uint64_t octeon_imask[MAXCPUS][NIPLS][NBANKS];
79 struct intrbank octeon_ibank[MAXCPUS][NBANKS];
80 
81 #ifdef MULTIPROCESSOR
82 uint32_t	ipi_intr(uint32_t, struct trapframe *);
83 
84 static int	(*ipi_handler)(void *);
85 #endif
86 
87 void
88 octeon_intr_init(void)
89 {
90 	int cpuid = cpu_number();
91 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 0);
92 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid), 0);
93 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN1(cpuid), 0);
94 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN1(cpuid), 0);
95 
96 	octeon_ibank[cpuid][0].en = CIU_IP2_EN0(cpuid);
97 	octeon_ibank[cpuid][0].sum = CIU_IP2_SUM0(cpuid);
98 	octeon_ibank[cpuid][0].id = 0;
99 	octeon_ibank[cpuid][1].en = CIU_IP2_EN1(cpuid);
100 	octeon_ibank[cpuid][1].sum = CIU_INT32_SUM1;
101 	octeon_ibank[cpuid][1].id = 1;
102 
103 	set_intr(INTPRI_CIU_0, CR_INT_0, octeon_iointr);
104 	register_splx_handler(octeon_splx);
105 
106 #ifdef MULTIPROCESSOR
107 	set_intr(INTPRI_IPI, CR_INT_1, ipi_intr);
108 #endif
109 }
110 
111 /*
112  * Establish an interrupt handler called from the dispatcher.
113  * The interrupt function established should return zero if there was nothing
114  * to serve (no int) and non-zero when an interrupt was serviced.
115  */
116 void *
117 octeon_intr_establish(int irq, int level,
118     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
119 {
120 	int cpuid = cpu_number();
121 	struct intrhand **p, *q, *ih;
122 	int flags;
123 	int s;
124 
125 #ifdef DIAGNOSTIC
126 	if (irq >= OCTEON_NINTS || irq < 0)
127 		panic("intr_establish: illegal irq %d", irq);
128 #endif
129 
130 	flags = (level & IPL_MPSAFE) ? IH_MPSAFE : 0;
131 	level &= ~IPL_MPSAFE;
132 
133 	ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
134 	if (ih == NULL)
135 		return NULL;
136 
137 	ih->ih_next = NULL;
138 	ih->ih_fun = ih_fun;
139 	ih->ih_arg = ih_arg;
140 	ih->ih_level = level;
141 	ih->ih_flags = flags;
142 	ih->ih_irq = irq;
143 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
144 
145 	s = splhigh();
146 
147 	/*
148 	 * Figure out where to put the handler.
149 	 * This is O(N^2), but we want to preserve the order, and N is
150 	 * generally small.
151 	 */
152 	for (p = &octeon_intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
153 		continue;
154 	*p = ih;
155 
156 	octeon_intem[cpuid][IRQ_TO_BANK(irq)] |= 1UL << IRQ_TO_BIT(irq);
157 	octeon_intr_makemasks();
158 
159 	splx(s);	/* causes hw mask update */
160 
161 	return (ih);
162 }
163 
164 void *
165 octeon_intr_establish_fdt(int node, int level,
166     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
167 {
168 	return octeon_intr_establish_fdt_idx(node, 0, level, ih_fun,
169 	    ih_arg, ih_what);
170 }
171 
172 void *
173 octeon_intr_establish_fdt_idx(int node, int idx, int level,
174     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
175 {
176 	uint32_t *cells;
177 	int irq, len;
178 
179 	/*
180 	 * Assume the interrupt controller is compatible with
181 	 * cavium,octeon-3860-ciu.
182 	 */
183 
184 	len = OF_getproplen(node, "interrupts");
185 	if (len / (sizeof(uint32_t) * 2) <= idx ||
186 	    len % (sizeof(uint32_t) * 2) != 0)
187 		return NULL;
188 
189 	cells = malloc(len, M_TEMP, M_NOWAIT);
190 	if (cells == NULL)
191 		return NULL;
192 
193 	OF_getpropintarray(node, "interrupts", cells, len);
194 	irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1];
195 
196 	free(cells, M_TEMP, len);
197 
198 	return octeon_intr_establish(irq, level, ih_fun, ih_arg, ih_what);
199 }
200 
201 void
202 octeon_intr_disestablish(void *_ih)
203 {
204 	struct intrhand *ih = _ih;
205 	struct intrhand *p;
206 	unsigned int irq = ih->ih_irq;
207 	int cpuid = cpu_number();
208 	int s;
209 
210 	KASSERT(irq < OCTEON_NINTS);
211 
212 	s = splhigh();
213 
214 	if (ih == octeon_intrhand[irq]) {
215 		octeon_intrhand[irq] = ih->ih_next;
216 
217 		if (octeon_intrhand[irq] == NULL)
218 			octeon_intem[cpuid][IRQ_TO_BANK(irq)] &=
219 			    ~(1UL << IRQ_TO_BIT(irq));
220 	} else {
221 		for (p = octeon_intrhand[irq]; p != NULL; p = p->ih_next) {
222 			if (p->ih_next == ih) {
223 				p->ih_next = ih->ih_next;
224 				break;
225 			}
226 		}
227 		if (p == NULL)
228 			panic("%s: intrhand %p has not been registered",
229 			    __func__, ih);
230 	}
231 	free(ih, M_DEVBUF, sizeof(*ih));
232 
233 	octeon_intr_makemasks();
234 	splx(s);	/* causes hw mask update */
235 }
236 
237 void
238 octeon_intr_disestablish_fdt(void *ih)
239 {
240 	octeon_intr_disestablish(ih);
241 }
242 
243 void
244 octeon_splx(int newipl)
245 {
246 	struct cpu_info *ci = curcpu();
247 
248 	/* Update masks to new ipl. Order highly important! */
249 	__asm__ (".set noreorder\n");
250 	ci->ci_ipl = newipl;
251 	mips_sync();
252 	__asm__ (".set reorder\n");
253 	octeon_setintrmask(newipl);
254 
255 	/* If we still have softints pending trigger processing. */
256 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
257 		setsoftintr0();
258 }
259 
260 /*
261  * Recompute interrupt masks.
262  */
263 void
264 octeon_intr_makemasks()
265 {
266 	int cpuid = cpu_number();
267 	int irq, level;
268 	struct intrhand *q;
269 	uint intrlevel[OCTEON_NINTS];
270 
271 	/* First, figure out which levels each IRQ uses. */
272 	for (irq = 0; irq < OCTEON_NINTS; irq++) {
273 		uint levels = 0;
274 		for (q = octeon_intrhand[irq]; q != NULL; q = q->ih_next)
275 			levels |= 1 << q->ih_level;
276 		intrlevel[irq] = levels;
277 	}
278 
279 	/*
280 	 * Then figure out which IRQs use each level.
281 	 * Note that we make sure never to overwrite imask[IPL_HIGH], in
282 	 * case an interrupt occurs during intr_disestablish() and causes
283 	 * an unfortunate splx() while we are here recomputing the masks.
284 	 */
285 	for (level = IPL_NONE; level < NIPLS; level++) {
286 		uint64_t mask[NBANKS] = {};
287 		for (irq = 0; irq < OCTEON_NINTS; irq++)
288 			if (intrlevel[irq] & (1 << level))
289 				mask[IRQ_TO_BANK(irq)] |=
290 				    1UL << IRQ_TO_BIT(irq);
291 		octeon_imask[cpuid][level][0] = mask[0];
292 		octeon_imask[cpuid][level][1] = mask[1];
293 	}
294 	/*
295 	 * There are tty, network and disk drivers that use free() at interrupt
296 	 * time, so vm > (tty | net | bio).
297 	 *
298 	 * Enforce a hierarchy that gives slow devices a better chance at not
299 	 * dropping data.
300 	 */
301 #define ADD_MASK(dst, src) do {	\
302 	dst[0] |= src[0];	\
303 	dst[1] |= src[1];	\
304 } while (0)
305 	ADD_MASK(octeon_imask[cpuid][IPL_NET], octeon_imask[cpuid][IPL_BIO]);
306 	ADD_MASK(octeon_imask[cpuid][IPL_TTY], octeon_imask[cpuid][IPL_NET]);
307 	ADD_MASK(octeon_imask[cpuid][IPL_VM], octeon_imask[cpuid][IPL_TTY]);
308 	ADD_MASK(octeon_imask[cpuid][IPL_CLOCK], octeon_imask[cpuid][IPL_VM]);
309 	ADD_MASK(octeon_imask[cpuid][IPL_HIGH], octeon_imask[cpuid][IPL_CLOCK]);
310 	ADD_MASK(octeon_imask[cpuid][IPL_IPI], octeon_imask[cpuid][IPL_HIGH]);
311 
312 	/*
313 	 * These are pseudo-levels.
314 	 */
315 	octeon_imask[cpuid][IPL_NONE][0] = 0;
316 	octeon_imask[cpuid][IPL_NONE][1] = 0;
317 }
318 
319 static inline int
320 octeon_next_irq(uint64_t *isr)
321 {
322 	uint64_t irq, tmp = *isr;
323 
324 	if (tmp == 0)
325 		return -1;
326 
327 	asm volatile (
328 	"	.set push\n"
329 	"	.set mips64\n"
330 	"	dclz	%0, %0\n"
331 	"	.set pop\n"
332 	: "=r" (tmp) : "0" (tmp));
333 
334 	irq = 63u - tmp;
335 	*isr &= ~(1u << irq);
336 	return irq;
337 }
338 
339 /*
340  * Dispatch interrupts in given bank.
341  */
342 uint32_t
343 octeon_iointr_bank(struct trapframe *frame, struct intrbank *bank)
344 {
345 	struct cpu_info *ci = curcpu();
346 	struct intrhand *ih;
347 	uint64_t imr, isr, mask;
348 	int handled, ipl, irq;
349 #ifdef MULTIPROCESSOR
350 	register_t sr;
351 	int need_lock;
352 #endif
353 
354 	isr = bus_space_read_8(&iobus_tag, iobus_h, bank->sum);
355 	imr = bus_space_read_8(&iobus_tag, iobus_h, bank->en);
356 
357 	isr &= imr;
358 	if (isr == 0)
359 		return 0;	/* not for us */
360 
361 	/*
362 	 * Mask all pending interrupts.
363 	 */
364 	bus_space_write_8(&iobus_tag, iobus_h, bank->en, imr & ~isr);
365 
366 	/*
367 	 * If interrupts are spl-masked, mask them and wait for splx()
368 	 * to reenable them when necessary.
369 	 */
370 	if ((mask = isr & octeon_imask[ci->ci_cpuid][frame->ipl][bank->id])
371 	    != 0) {
372 		isr &= ~mask;
373 		imr &= ~mask;
374 	}
375 	if (isr == 0)
376 		return 1;
377 
378 	/*
379 	 * Now process allowed interrupts.
380 	 */
381 
382 	__asm__ (".set noreorder\n");
383 	ipl = ci->ci_ipl;
384 	mips_sync();
385 	__asm__ (".set reorder\n");
386 
387 	while ((irq = octeon_next_irq(&isr)) >= 0) {
388 		irq += bank->id * BANK_SIZE;
389 		handled = 0;
390 		for (ih = octeon_intrhand[irq]; ih != NULL; ih = ih->ih_next) {
391 			splraise(ih->ih_level);
392 #ifdef MULTIPROCESSOR
393 			if (ih->ih_level < IPL_IPI) {
394 				sr = getsr();
395 				ENABLEIPI();
396 			}
397 			if (ih->ih_flags & IH_MPSAFE)
398 				need_lock = 0;
399 			else
400 				need_lock = ih->ih_level < IPL_CLOCK;
401 			if (need_lock)
402 				__mp_lock(&kernel_lock);
403 #endif
404 			if ((*ih->ih_fun)(ih->ih_arg) != 0) {
405 				handled = 1;
406 				atomic_inc_long(
407 				    (unsigned long *)&ih->ih_count.ec_count);
408 			}
409 #ifdef MULTIPROCESSOR
410 			if (need_lock)
411 				__mp_unlock(&kernel_lock);
412 			if (ih->ih_level < IPL_IPI)
413 				setsr(sr);
414 #endif
415 		}
416 		if (!handled)
417 			printf("spurious interrupt %d\n", irq);
418 	}
419 
420 	__asm__ (".set noreorder\n");
421 	ci->ci_ipl = ipl;
422 	mips_sync();
423 	__asm__ (".set reorder\n");
424 
425 	/*
426 	 * Reenable interrupts which have been serviced.
427 	 */
428 	bus_space_write_8(&iobus_tag, iobus_h, bank->en, imr);
429 
430 	return 1;
431 }
432 
433 /*
434  * Interrupt dispatcher.
435  */
436 uint32_t
437 octeon_iointr(uint32_t hwpend, struct trapframe *frame)
438 {
439 	int cpuid = cpu_number();
440 	int handled;
441 
442 	handled = octeon_iointr_bank(frame, &octeon_ibank[cpuid][0]);
443 	handled |= octeon_iointr_bank(frame, &octeon_ibank[cpuid][1]);
444 	return handled ? hwpend : 0;
445 }
446 
447 void
448 octeon_setintrmask(int level)
449 {
450 	int cpuid = cpu_number();
451 
452 	bus_space_write_8(&iobus_tag, iobus_h, octeon_ibank[cpuid][0].en,
453 	    octeon_intem[cpuid][0] & ~octeon_imask[cpuid][level][0]);
454 	bus_space_write_8(&iobus_tag, iobus_h, octeon_ibank[cpuid][1].en,
455 	    octeon_intem[cpuid][1] & ~octeon_imask[cpuid][level][1]);
456 }
457 
458 #ifdef MULTIPROCESSOR
459 /*
460  * Inter-processor interrupt control logic.
461  */
462 
463 uint32_t
464 ipi_intr(uint32_t hwpend, struct trapframe *frame)
465 {
466 	u_long cpuid = cpu_number();
467 
468 	/*
469 	 * Mask all pending interrupts.
470 	 */
471 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid), 0);
472 
473 	if (ipi_handler == NULL)
474 		return hwpend;
475 
476 	ipi_handler((void *)cpuid);
477 
478 	/*
479 	 * Reenable interrupts which have been serviced.
480 	 */
481 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid),
482 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
483 	return hwpend;
484 }
485 
486 int
487 hw_ipi_intr_establish(int (*func)(void *), u_long cpuid)
488 {
489 	if (cpuid == 0)
490 		ipi_handler = func;
491 
492 	bus_space_write_8(&iobus_tag, iobus_h, CIU_MBOX_CLR(cpuid),
493 		0xffffffff);
494 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid),
495 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
496 
497 	return 0;
498 };
499 
500 void
501 hw_ipi_intr_set(u_long cpuid)
502 {
503 	bus_space_write_8(&iobus_tag, iobus_h, CIU_MBOX_SET(cpuid), 1);
504 }
505 
506 void
507 hw_ipi_intr_clear(u_long cpuid)
508 {
509 	uint64_t clr =
510 		bus_space_read_8(&iobus_tag, iobus_h, CIU_MBOX_CLR(cpuid));
511 	bus_space_write_8(&iobus_tag, iobus_h, CIU_MBOX_CLR(cpuid), clr);
512 }
513 #endif /* MULTIPROCESSOR */
514