xref: /openbsd-src/sys/arch/octeon/dev/octeon_intr.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: octeon_intr.c,v 1.19 2016/12/08 16:27:46 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Opsycon AB  (www.opsycon.se)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * Interrupt support for Octeon Processor.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/conf.h>
37 #include <sys/malloc.h>
38 #include <sys/device.h>
39 #include <sys/proc.h>
40 #include <sys/atomic.h>
41 
42 #include <dev/ofw/openfirm.h>
43 
44 #include <mips64/mips_cpu.h>
45 
46 #include <machine/autoconf.h>
47 #include <machine/intr.h>
48 #include <machine/octeonreg.h>
49 
50 #include <octeon/dev/iobusvar.h>
51 
52 extern bus_space_handle_t iobus_h;
53 
54 #define OCTEON_NINTS 128
55 
56 struct intrbank {
57 	uint64_t	en;		/* enable mask register */
58 	uint64_t	sum;		/* service request register */
59 	int		id;		/* bank number */
60 };
61 
62 #define NBANKS		2
63 #define BANK_SIZE	64
64 #define IRQ_TO_BANK(x)	((x) >> 6)
65 #define IRQ_TO_BIT(x)	((x) & 0x3f)
66 
67 void	 octeon_intr_makemasks(void);
68 void	 octeon_splx(int);
69 uint32_t octeon_iointr_bank(struct trapframe *, struct intrbank *);
70 uint32_t octeon_iointr(uint32_t, struct trapframe *);
71 void	 octeon_setintrmask(int);
72 
73 struct intrhand *octeon_intrhand[OCTEON_NINTS];
74 
75 #define	INTPRI_CIU_0	(INTPRI_CLOCK + 1)
76 
77 uint64_t octeon_intem[MAXCPUS][NBANKS];
78 uint64_t octeon_imask[MAXCPUS][NIPLS][NBANKS];
79 struct intrbank octeon_ibank[MAXCPUS][NBANKS];
80 
81 void
82 octeon_intr_init(void)
83 {
84 	int cpuid = cpu_number();
85 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 0);
86 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid), 0);
87 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN1(cpuid), 0);
88 	bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN1(cpuid), 0);
89 
90 	octeon_ibank[cpuid][0].en = CIU_IP2_EN0(cpuid);
91 	octeon_ibank[cpuid][0].sum = CIU_IP2_SUM0(cpuid);
92 	octeon_ibank[cpuid][0].id = 0;
93 	octeon_ibank[cpuid][1].en = CIU_IP2_EN1(cpuid);
94 	octeon_ibank[cpuid][1].sum = CIU_INT32_SUM1;
95 	octeon_ibank[cpuid][1].id = 1;
96 
97 	set_intr(INTPRI_CIU_0, CR_INT_0, octeon_iointr);
98 	register_splx_handler(octeon_splx);
99 }
100 
101 /*
102  * Establish an interrupt handler called from the dispatcher.
103  * The interrupt function established should return zero if there was nothing
104  * to serve (no int) and non-zero when an interrupt was serviced.
105  */
106 void *
107 octeon_intr_establish(int irq, int level,
108     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
109 {
110 	int cpuid = cpu_number();
111 	struct intrhand **p, *q, *ih;
112 	int flags;
113 	int s;
114 
115 #ifdef DIAGNOSTIC
116 	if (irq >= OCTEON_NINTS || irq < 0)
117 		panic("intr_establish: illegal irq %d", irq);
118 #endif
119 
120 	flags = (level & IPL_MPSAFE) ? IH_MPSAFE : 0;
121 	level &= ~IPL_MPSAFE;
122 
123 	ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
124 	if (ih == NULL)
125 		return NULL;
126 
127 	ih->ih_next = NULL;
128 	ih->ih_fun = ih_fun;
129 	ih->ih_arg = ih_arg;
130 	ih->ih_level = level;
131 	ih->ih_flags = flags;
132 	ih->ih_irq = irq;
133 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
134 
135 	s = splhigh();
136 
137 	/*
138 	 * Figure out where to put the handler.
139 	 * This is O(N^2), but we want to preserve the order, and N is
140 	 * generally small.
141 	 */
142 	for (p = &octeon_intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
143 		continue;
144 	*p = ih;
145 
146 	octeon_intem[cpuid][IRQ_TO_BANK(irq)] |= 1UL << IRQ_TO_BIT(irq);
147 	octeon_intr_makemasks();
148 
149 	splx(s);	/* causes hw mask update */
150 
151 	return (ih);
152 }
153 
154 void *
155 octeon_intr_establish_fdt_idx(int node, int idx, int level,
156     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
157 {
158 	uint32_t *cells;
159 	int irq, len;
160 
161 	/*
162 	 * Assume the interrupt controller is compatible with
163 	 * cavium,octeon-3860-ciu.
164 	 */
165 
166 	len = OF_getproplen(node, "interrupts");
167 	if (len / (sizeof(uint32_t) * 2) <= idx ||
168 	    len % (sizeof(uint32_t) * 2) != 0)
169 		return NULL;
170 
171 	cells = malloc(len, M_TEMP, M_NOWAIT);
172 	if (cells == NULL)
173 		return NULL;
174 
175 	OF_getpropintarray(node, "interrupts", cells, len);
176 	irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1];
177 
178 	free(cells, M_TEMP, len);
179 
180 	return octeon_intr_establish(irq, level, ih_fun, ih_arg, ih_what);
181 }
182 
183 void
184 octeon_intr_disestablish(void *_ih)
185 {
186 	struct intrhand *ih = _ih;
187 	struct intrhand *p;
188 	unsigned int irq = ih->ih_irq;
189 	int cpuid = cpu_number();
190 	int s;
191 
192 	KASSERT(irq < OCTEON_NINTS);
193 
194 	s = splhigh();
195 
196 	if (ih == octeon_intrhand[irq]) {
197 		octeon_intrhand[irq] = ih->ih_next;
198 
199 		if (octeon_intrhand[irq] == NULL)
200 			octeon_intem[cpuid][IRQ_TO_BANK(irq)] &=
201 			    ~(1UL << IRQ_TO_BIT(irq));
202 	} else {
203 		for (p = octeon_intrhand[irq]; p != NULL; p = p->ih_next) {
204 			if (p->ih_next == ih) {
205 				p->ih_next = ih->ih_next;
206 				break;
207 			}
208 		}
209 		if (p == NULL)
210 			panic("%s: intrhand %p has not been registered",
211 			    __func__, ih);
212 	}
213 	free(ih, M_DEVBUF, sizeof(*ih));
214 
215 	octeon_intr_makemasks();
216 	splx(s);	/* causes hw mask update */
217 }
218 
219 void
220 octeon_splx(int newipl)
221 {
222 	struct cpu_info *ci = curcpu();
223 
224 	/* Update masks to new ipl. Order highly important! */
225 	__asm__ (".set noreorder\n");
226 	ci->ci_ipl = newipl;
227 	mips_sync();
228 	__asm__ (".set reorder\n");
229 	octeon_setintrmask(newipl);
230 
231 	/* If we still have softints pending trigger processing. */
232 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
233 		setsoftintr0();
234 }
235 
236 /*
237  * Recompute interrupt masks.
238  */
239 void
240 octeon_intr_makemasks()
241 {
242 	int cpuid = cpu_number();
243 	int irq, level;
244 	struct intrhand *q;
245 	uint intrlevel[OCTEON_NINTS];
246 
247 	/* First, figure out which levels each IRQ uses. */
248 	for (irq = 0; irq < OCTEON_NINTS; irq++) {
249 		uint levels = 0;
250 		for (q = octeon_intrhand[irq]; q != NULL; q = q->ih_next)
251 			levels |= 1 << q->ih_level;
252 		intrlevel[irq] = levels;
253 	}
254 
255 	/*
256 	 * Then figure out which IRQs use each level.
257 	 * Note that we make sure never to overwrite imask[IPL_HIGH], in
258 	 * case an interrupt occurs during intr_disestablish() and causes
259 	 * an unfortunate splx() while we are here recomputing the masks.
260 	 */
261 	for (level = IPL_NONE; level < NIPLS; level++) {
262 		uint64_t mask[NBANKS] = {};
263 		for (irq = 0; irq < OCTEON_NINTS; irq++)
264 			if (intrlevel[irq] & (1 << level))
265 				mask[IRQ_TO_BANK(irq)] |=
266 				    1UL << IRQ_TO_BIT(irq);
267 		octeon_imask[cpuid][level][0] = mask[0];
268 		octeon_imask[cpuid][level][1] = mask[1];
269 	}
270 	/*
271 	 * There are tty, network and disk drivers that use free() at interrupt
272 	 * time, so vm > (tty | net | bio).
273 	 *
274 	 * Enforce a hierarchy that gives slow devices a better chance at not
275 	 * dropping data.
276 	 */
277 #define ADD_MASK(dst, src) do {	\
278 	dst[0] |= src[0];	\
279 	dst[1] |= src[1];	\
280 } while (0)
281 	ADD_MASK(octeon_imask[cpuid][IPL_NET], octeon_imask[cpuid][IPL_BIO]);
282 	ADD_MASK(octeon_imask[cpuid][IPL_TTY], octeon_imask[cpuid][IPL_NET]);
283 	ADD_MASK(octeon_imask[cpuid][IPL_VM], octeon_imask[cpuid][IPL_TTY]);
284 	ADD_MASK(octeon_imask[cpuid][IPL_CLOCK], octeon_imask[cpuid][IPL_VM]);
285 	ADD_MASK(octeon_imask[cpuid][IPL_HIGH], octeon_imask[cpuid][IPL_CLOCK]);
286 	ADD_MASK(octeon_imask[cpuid][IPL_IPI], octeon_imask[cpuid][IPL_HIGH]);
287 
288 	/*
289 	 * These are pseudo-levels.
290 	 */
291 	octeon_imask[cpuid][IPL_NONE][0] = 0;
292 	octeon_imask[cpuid][IPL_NONE][1] = 0;
293 }
294 
295 static inline int
296 octeon_next_irq(uint64_t *isr)
297 {
298 	uint64_t irq, tmp = *isr;
299 
300 	if (tmp == 0)
301 		return -1;
302 
303 	asm volatile (
304 	"	.set push\n"
305 	"	.set mips64\n"
306 	"	dclz	%0, %0\n"
307 	"	.set pop\n"
308 	: "=r" (tmp) : "0" (tmp));
309 
310 	irq = 63u - tmp;
311 	*isr &= ~(1u << irq);
312 	return irq;
313 }
314 
315 /*
316  * Dispatch interrupts in given bank.
317  */
318 uint32_t
319 octeon_iointr_bank(struct trapframe *frame, struct intrbank *bank)
320 {
321 	struct cpu_info *ci = curcpu();
322 	struct intrhand *ih;
323 	uint64_t imr, isr, mask;
324 	int handled, ipl, irq;
325 #ifdef MULTIPROCESSOR
326 	register_t sr;
327 	int need_lock;
328 #endif
329 
330 	isr = bus_space_read_8(&iobus_tag, iobus_h, bank->sum);
331 	imr = bus_space_read_8(&iobus_tag, iobus_h, bank->en);
332 
333 	isr &= imr;
334 	if (isr == 0)
335 		return 0;	/* not for us */
336 
337 	/*
338 	 * Mask all pending interrupts.
339 	 */
340 	bus_space_write_8(&iobus_tag, iobus_h, bank->en, imr & ~isr);
341 
342 	/*
343 	 * If interrupts are spl-masked, mask them and wait for splx()
344 	 * to reenable them when necessary.
345 	 */
346 	if ((mask = isr & octeon_imask[ci->ci_cpuid][frame->ipl][bank->id])
347 	    != 0) {
348 		isr &= ~mask;
349 		imr &= ~mask;
350 	}
351 	if (isr == 0)
352 		return 1;
353 
354 	/*
355 	 * Now process allowed interrupts.
356 	 */
357 
358 	__asm__ (".set noreorder\n");
359 	ipl = ci->ci_ipl;
360 	mips_sync();
361 	__asm__ (".set reorder\n");
362 
363 	while ((irq = octeon_next_irq(&isr)) >= 0) {
364 		irq += bank->id * BANK_SIZE;
365 		handled = 0;
366 		for (ih = octeon_intrhand[irq]; ih != NULL; ih = ih->ih_next) {
367 			splraise(ih->ih_level);
368 #ifdef MULTIPROCESSOR
369 			if (ih->ih_level < IPL_IPI) {
370 				sr = getsr();
371 				ENABLEIPI();
372 			}
373 			if (ih->ih_flags & IH_MPSAFE)
374 				need_lock = 0;
375 			else
376 				need_lock = ih->ih_level < IPL_CLOCK;
377 			if (need_lock)
378 				__mp_lock(&kernel_lock);
379 #endif
380 			if ((*ih->ih_fun)(ih->ih_arg) != 0) {
381 				handled = 1;
382 				atomic_inc_long(
383 				    (unsigned long *)&ih->ih_count.ec_count);
384 			}
385 #ifdef MULTIPROCESSOR
386 			if (need_lock)
387 				__mp_unlock(&kernel_lock);
388 			if (ih->ih_level < IPL_IPI)
389 				setsr(sr);
390 #endif
391 		}
392 		if (!handled)
393 			printf("spurious interrupt %d\n", irq);
394 	}
395 
396 	__asm__ (".set noreorder\n");
397 	ci->ci_ipl = ipl;
398 	mips_sync();
399 	__asm__ (".set reorder\n");
400 
401 	/*
402 	 * Reenable interrupts which have been serviced.
403 	 */
404 	bus_space_write_8(&iobus_tag, iobus_h, bank->en, imr);
405 
406 	return 1;
407 }
408 
409 /*
410  * Interrupt dispatcher.
411  */
412 uint32_t
413 octeon_iointr(uint32_t hwpend, struct trapframe *frame)
414 {
415 	int cpuid = cpu_number();
416 	int handled;
417 
418 	handled = octeon_iointr_bank(frame, &octeon_ibank[cpuid][0]);
419 	handled |= octeon_iointr_bank(frame, &octeon_ibank[cpuid][1]);
420 	return handled ? hwpend : 0;
421 }
422 
423 void
424 octeon_setintrmask(int level)
425 {
426 	int cpuid = cpu_number();
427 
428 	bus_space_write_8(&iobus_tag, iobus_h, octeon_ibank[cpuid][0].en,
429 	    octeon_intem[cpuid][0] & ~octeon_imask[cpuid][level][0]);
430 	bus_space_write_8(&iobus_tag, iobus_h, octeon_ibank[cpuid][1].en,
431 	    octeon_intem[cpuid][1] & ~octeon_imask[cpuid][level][1]);
432 }
433