xref: /netbsd-src/sys/arch/arm/ixp12x0/ixp12x0_intr.c (revision f3b496ec9be495acbb17756f05d342b6b7b495e9)
1 /* $NetBSD: ixp12x0_intr.c,v 1.14 2005/12/24 20:06:52 perry Exp $ */
2 
3 /*
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Ichiro FUKUHARA and Naoto Shimazaki.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.14 2005/12/24 20:06:52 perry Exp $");
41 
42 /*
43  * Interrupt support for the Intel ixp12x0
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/termios.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55 
56 #include <arm/cpufunc.h>
57 
58 #include <arm/ixp12x0/ixp12x0reg.h>
59 #include <arm/ixp12x0/ixp12x0var.h>
60 #include <arm/ixp12x0/ixp12x0_comreg.h>
61 #include <arm/ixp12x0/ixp12x0_comvar.h>
62 #include <arm/ixp12x0/ixp12x0_pcireg.h>
63 
64 
65 extern u_int32_t	ixpcom_cr;	/* current cr from *_com.c */
66 extern u_int32_t	ixpcom_imask;	/* tell mask to *_com.c */
67 
68 /* Interrupt handler queues. */
69 struct intrq intrq[NIRQ];
70 
71 /* Interrupts to mask at each level. */
72 static u_int32_t imask[NIPL];
73 static u_int32_t pci_imask[NIPL];
74 
75 /* Current interrupt priority level. */
76 volatile int current_spl_level;
77 volatile int hardware_spl_level;
78 
79 /* Software copy of the IRQs we have enabled. */
80 volatile u_int32_t intr_enabled;
81 volatile u_int32_t pci_intr_enabled;
82 
83 /* Interrupts pending. */
84 static volatile int ipending;
85 
86 /*
87  * Map a software interrupt queue index (to the unused bits in the
88  * ICU registers -- XXX will need to revisit this if those bits are
89  * ever used in future steppings).
90  */
91 static const u_int32_t si_to_irqbit[SI_NQUEUES] = {
92 	IXP12X0_INTR_bit30,		/* SI_SOFT */
93 	IXP12X0_INTR_bit29,		/* SI_SOFTCLOCK */
94 	IXP12X0_INTR_bit28,		/* SI_SOFTNET */
95 	IXP12X0_INTR_bit27,		/* SI_SOFTSERIAL */
96 };
97 
98 #define	INT_SWMASK							\
99 	((1U << IXP12X0_INTR_bit30) | (1U << IXP12X0_INTR_bit29) |	\
100 	 (1U << IXP12X0_INTR_bit28) | (1U << IXP12X0_INTR_bit27))
101 
102 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
103 
104 /*
105  * Map a software interrupt queue to an interrupt priority level.
106  */
107 static const int si_to_ipl[SI_NQUEUES] = {
108 	IPL_SOFT,		/* SI_SOFT */
109 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
110 	IPL_SOFTNET,		/* SI_SOFTNET */
111 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
112 };
113 
114 void	ixp12x0_intr_dispatch(struct irqframe *frame);
115 
116 #define IXPREG(reg)	*((volatile u_int32_t*) (reg))
117 
118 static inline u_int32_t
119 ixp12x0_irq_read(void)
120 {
121 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
122 }
123 
124 static inline u_int32_t
125 ixp12x0_pci_irq_read(void)
126 {
127 	return IXPREG(IXPPCI_IRQ_STATUS);
128 }
129 
130 static void
131 ixp12x0_enable_uart_irq(void)
132 {
133 	ixpcom_imask = 0;
134 	if (ixpcom_sc)
135 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
136 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
137 }
138 
139 static void
140 ixp12x0_disable_uart_irq(void)
141 {
142 	ixpcom_imask = CR_RIE | CR_XIE;
143 	if (ixpcom_sc)
144 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
145 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
146 }
147 
148 static void
149 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
150 {
151 	if (irqs & (1U << IXP12X0_INTR_UART)) {
152 		ixp12x0_disable_uart_irq();
153 	} else {
154 		ixp12x0_enable_uart_irq();
155 	}
156 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
157 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
158 }
159 
160 static void
161 ixp12x0_enable_irq(int irq)
162 {
163 	if (irq < SYS_NIRQ) {
164 		intr_enabled |= (1U << irq);
165 		switch (irq) {
166 		case IXP12X0_INTR_UART:
167 			ixp12x0_enable_uart_irq();
168 			break;
169 
170 		case IXP12X0_INTR_PCI:
171 			/* nothing to do */
172 			break;
173 		default:
174 			panic("enable_irq:bad IRQ %d", irq);
175 		}
176 	} else {
177 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
178 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
179 	}
180 }
181 
182 static inline void
183 ixp12x0_disable_irq(int irq)
184 {
185 	if (irq < SYS_NIRQ) {
186 		intr_enabled ^= ~(1U << irq);
187 		switch (irq) {
188 		case IXP12X0_INTR_UART:
189 			ixp12x0_disable_uart_irq();
190 			break;
191 
192 		case IXP12X0_INTR_PCI:
193 			/* nothing to do */
194 			break;
195 		default:
196 			/* nothing to do */
197 			break;
198 		}
199 	} else {
200 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
201 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
202 	}
203 }
204 
205 /*
206  * NOTE: This routine must be called with interrupts disabled in the CPSR.
207  */
208 static void
209 ixp12x0_intr_calculate_masks(void)
210 {
211 	struct intrq *iq;
212 	struct intrhand *ih;
213 	int irq, ipl;
214 
215 	/* First, figure out which IPLs each IRQ has. */
216 	for (irq = 0; irq < NIRQ; irq++) {
217 		int levels = 0;
218 		iq = &intrq[irq];
219 		ixp12x0_disable_irq(irq);
220 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
221 		     ih = TAILQ_NEXT(ih, ih_list))
222 			levels |= (1U << ih->ih_ipl);
223 		iq->iq_levels = levels;
224 	}
225 
226 	/* Next, figure out which IRQs are used by each IPL. */
227 	for (ipl = 0; ipl < NIPL; ipl++) {
228 		int irqs = 0;
229 		int pci_irqs = 0;
230 		for (irq = 0; irq < SYS_NIRQ; irq++) {
231 			if (intrq[irq].iq_levels & (1U << ipl))
232 				irqs |= (1U << irq);
233 		}
234 		imask[ipl] = irqs;
235 		for (irq = 0; irq < SYS_NIRQ; irq++) {
236 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
237 				pci_irqs |= (1U << irq);
238 		}
239 		pci_imask[ipl] = pci_irqs;
240 	}
241 
242 	imask[IPL_NONE] = 0;
243 	pci_imask[IPL_NONE] = 0;
244 
245 	/*
246 	 * Initialize the soft interrupt masks to block themselves.
247 	 */
248 	imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
249 	imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
250 	imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
251 	imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
252 
253 	/*
254 	 * splsoftclock() is the only interface that users of the
255 	 * generic software interrupt facility have to block their
256 	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
257 	 */
258 	imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
259 	pci_imask[IPL_SOFTCLOCK] |= pci_imask[IPL_SOFT];
260 
261 	/*
262 	 * splsoftnet() must also block splsoftclock(), since we don't
263 	 * want timer-driven network events to occur while we're
264 	 * processing incoming packets.
265 	 */
266 	imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
267 	pci_imask[IPL_SOFTNET] |= pci_imask[IPL_SOFTCLOCK];
268 
269 	/*
270 	 * Enforce a heirarchy that gives "slow" device (or devices with
271 	 * limited input buffer space/"real-time" requirements) a better
272 	 * chance at not dropping data.
273 	 */
274 	imask[IPL_BIO] |= imask[IPL_SOFTNET];
275 	pci_imask[IPL_BIO] |= pci_imask[IPL_SOFTNET];
276 	imask[IPL_NET] |= imask[IPL_BIO];
277 	pci_imask[IPL_NET] |= pci_imask[IPL_BIO];
278 	imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
279 	pci_imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
280 	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
281 	pci_imask[IPL_TTY] |= pci_imask[IPL_SOFTSERIAL];
282 
283 	/*
284 	 * splvm() blocks all interrupts that use the kernel memory
285 	 * allocation facilities.
286 	 */
287 	imask[IPL_VM] |= imask[IPL_TTY];
288 	pci_imask[IPL_VM] |= pci_imask[IPL_TTY];
289 
290 	/*
291 	 * Audio devices are not allowed to perform memory allocation
292 	 * in their interrupt routines, and they have fairly "real-time"
293 	 * requirements, so give them a high interrupt priority.
294 	 */
295 	imask[IPL_AUDIO] |= imask[IPL_VM];
296 	pci_imask[IPL_AUDIO] |= pci_imask[IPL_VM];
297 
298 	/*
299 	 * splclock() must block anything that uses the scheduler.
300 	 */
301 	imask[IPL_CLOCK] |= imask[IPL_AUDIO];
302 	pci_imask[IPL_CLOCK] |= pci_imask[IPL_AUDIO];
303 
304 	/*
305 	 * No separate statclock on the IXP12x0.
306 	 */
307 	imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
308 	pci_imask[IPL_STATCLOCK] |= pci_imask[IPL_CLOCK];
309 
310 	/*
311 	 * splhigh() must block "everything".
312 	 */
313 	imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
314 	pci_imask[IPL_HIGH] |= pci_imask[IPL_STATCLOCK];
315 
316 	/*
317 	 * XXX We need serial drivers to run at the absolute highest priority
318 	 * in order to avoid overruns, so serial > high.
319 	 */
320 	imask[IPL_SERIAL] |= imask[IPL_HIGH];
321 	pci_imask[IPL_SERIAL] |= pci_imask[IPL_HIGH];
322 
323 	/*
324 	 * Now compute which IRQs must be blocked when servicing any
325 	 * given IRQ.
326 	 */
327 	for (irq = 0; irq < NIRQ; irq++) {
328 		int	irqs;
329 		int	pci_irqs;
330 
331 		if (irq < SYS_NIRQ) {
332 			irqs = (1U << irq);
333 			pci_irqs = 0;
334 		} else {
335 			irqs = 0;
336 			pci_irqs = (1U << (irq - SYS_NIRQ));
337 		}
338 		iq = &intrq[irq];
339 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
340 			ixp12x0_enable_irq(irq);
341 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
342 		     ih = TAILQ_NEXT(ih, ih_list)) {
343 			irqs |= imask[ih->ih_ipl];
344 			pci_irqs |= pci_imask[ih->ih_ipl];
345 		}
346 		iq->iq_mask = irqs;
347 		iq->iq_pci_mask = pci_irqs;
348 	}
349 }
350 
351 static void
352 ixp12x0_do_pending(void)
353 {
354 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
355 	int	new;
356 	u_int	oldirqstate;
357 
358 	if (__cpu_simple_lock_try(&processing) == 0)
359 		return;
360 
361 	new = current_spl_level;
362 
363 	oldirqstate = disable_interrupts(I32_bit);
364 
365 #define	DO_SOFTINT(si)							\
366 	if ((ipending & ~imask[new]) & SI_TO_IRQBIT(si)) {		\
367 		ipending &= ~SI_TO_IRQBIT(si);				\
368 		current_spl_level = si_to_ipl[(si)];			\
369 		restore_interrupts(oldirqstate);			\
370 		softintr_dispatch(si);					\
371 		oldirqstate = disable_interrupts(I32_bit);		\
372 		current_spl_level = new;				\
373 	}
374 
375 	DO_SOFTINT(SI_SOFTSERIAL);
376 	DO_SOFTINT(SI_SOFTNET);
377 	DO_SOFTINT(SI_SOFTCLOCK);
378 	DO_SOFTINT(SI_SOFT);
379 
380 	__cpu_simple_unlock(&processing);
381 
382 	restore_interrupts(oldirqstate);
383 }
384 
385 inline void
386 splx(int new)
387 {
388 	int	old;
389 	u_int	oldirqstate;
390 
391 	oldirqstate = disable_interrupts(I32_bit);
392 	old = current_spl_level;
393 	current_spl_level = new;
394 	if (new != hardware_spl_level) {
395 		hardware_spl_level = new;
396 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
397 	}
398 	restore_interrupts(oldirqstate);
399 
400 	/* If there are software interrupts to process, do it. */
401 	if ((ipending & INT_SWMASK) & ~imask[new])
402 		ixp12x0_do_pending();
403 }
404 
405 int
406 _splraise(int ipl)
407 {
408 	int	old;
409 	u_int	oldirqstate;
410 
411 	oldirqstate = disable_interrupts(I32_bit);
412 	old = current_spl_level;
413 	current_spl_level = ipl;
414 	restore_interrupts(oldirqstate);
415 	return (old);
416 }
417 
418 int
419 _spllower(int ipl)
420 {
421 	int	old = current_spl_level;
422 
423 	if (old <= ipl)
424 		return (old);
425 	splx(ipl);
426 	return (old);
427 }
428 
429 void
430 _setsoftintr(int si)
431 {
432 	u_int	oldirqstate;
433 
434 	oldirqstate = disable_interrupts(I32_bit);
435 	ipending |= SI_TO_IRQBIT(si);
436 	restore_interrupts(oldirqstate);
437 
438 	/* Process unmasked pending soft interrupts. */
439 	if ((ipending & INT_SWMASK) & ~imask[current_spl_level])
440 		ixp12x0_do_pending();
441 }
442 
443 /*
444  * ixp12x0_intr_init:
445  *
446  *	Initialize the rest of the interrupt subsystem, making it
447  *	ready to handle interrupts from devices.
448  */
449 void
450 ixp12x0_intr_init(void)
451 {
452 	struct intrq *iq;
453 	int i;
454 
455 	intr_enabled = 0;
456 	pci_intr_enabled = 0;
457 
458 	for (i = 0; i < NIRQ; i++) {
459 		iq = &intrq[i];
460 		TAILQ_INIT(&iq->iq_list);
461 
462 		sprintf(iq->iq_name, "ipl %d", i);
463 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
464 				     NULL, "ixpintr", iq->iq_name);
465 	}
466 	current_intr_depth = 0;
467 	current_spl_level = 0;
468 	hardware_spl_level = 0;
469 
470 	ixp12x0_intr_calculate_masks();
471 
472 	/* Enable IRQs (don't yet use FIQs). */
473 	enable_interrupts(I32_bit);
474 }
475 
476 void *
477 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
478 {
479 	struct intrq*		iq;
480 	struct intrhand*	ih;
481 	u_int			oldirqstate;
482 #ifdef DEBUG
483 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
484 	       irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
485 #endif
486 	if (irq < 0 || irq > NIRQ)
487 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
488 	if (ipl < 0 || ipl > NIPL)
489 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
490 
491 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
492 	if (ih == NULL)
493 		return (NULL);
494 
495 	ih->ih_func = ih_func;
496 	ih->ih_arg = arg;
497 	ih->ih_irq = irq;
498 	ih->ih_ipl = ipl;
499 
500 	iq = &intrq[irq];
501 	iq->iq_ist = IST_LEVEL;
502 
503 	oldirqstate = disable_interrupts(I32_bit);
504 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
505 	ixp12x0_intr_calculate_masks();
506 	restore_interrupts(oldirqstate);
507 
508 	return (ih);
509 }
510 
511 void
512 ixp12x0_intr_disestablish(void *cookie)
513 {
514 	struct intrhand*	ih = cookie;
515 	struct intrq*		iq = &intrq[ih->ih_ipl];
516 	u_int			oldirqstate;
517 
518 	oldirqstate = disable_interrupts(I32_bit);
519 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
520 	ixp12x0_intr_calculate_masks();
521 	restore_interrupts(oldirqstate);
522 }
523 
524 void
525 ixp12x0_intr_dispatch(struct irqframe *frame)
526 {
527 	struct intrq*		iq;
528 	struct intrhand*	ih;
529 	u_int			oldirqstate;
530 	int			pcpl;
531 	u_int32_t		hwpend;
532 	u_int32_t		pci_hwpend;
533 	int			irq;
534 	u_int32_t		ibit;
535 
536 	pcpl = current_spl_level;
537 
538 	hwpend = ixp12x0_irq_read();
539 	pci_hwpend = ixp12x0_pci_irq_read();
540 
541 	hardware_spl_level = pcpl;
542 	ixp12x0_set_intrmask(imask[pcpl] | hwpend,
543 			     pci_imask[pcpl] | pci_hwpend);
544 
545 	hwpend &= ~imask[pcpl];
546 	pci_hwpend &= ~pci_imask[pcpl];
547 
548 	while (hwpend) {
549 		irq = ffs(hwpend) - 1;
550 		ibit = (1U << irq);
551 
552 		iq = &intrq[irq];
553 		iq->iq_ev.ev_count++;
554 		uvmexp.intrs++;
555 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
556 		     ih = TAILQ_NEXT(ih, ih_list)) {
557 			int	ipl;
558 
559 			current_spl_level = ipl = ih->ih_ipl;
560 			oldirqstate = enable_interrupts(I32_bit);
561 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
562 			restore_interrupts(oldirqstate);
563 			hwpend &= ~ibit;
564 		}
565 	}
566 	while (pci_hwpend) {
567 		irq = ffs(pci_hwpend) - 1;
568 		ibit = (1U << irq);
569 
570 		iq = &intrq[irq + SYS_NIRQ];
571 		iq->iq_ev.ev_count++;
572 		uvmexp.intrs++;
573 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
574 		     ih = TAILQ_NEXT(ih, ih_list)) {
575 			int	ipl;
576 
577 			current_spl_level = ipl = ih->ih_ipl;
578 			oldirqstate = enable_interrupts(I32_bit);
579 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
580 			restore_interrupts(oldirqstate);
581 			pci_hwpend &= ~ibit;
582 		}
583 	}
584 
585 	current_spl_level = pcpl;
586 	hardware_spl_level = pcpl;
587 	ixp12x0_set_intrmask(imask[pcpl], pci_imask[pcpl]);
588 
589 	/* Check for pendings soft intrs. */
590 	if ((ipending & INT_SWMASK) & ~imask[pcpl]) {
591 		oldirqstate = enable_interrupts(I32_bit);
592 		ixp12x0_do_pending();
593 		restore_interrupts(oldirqstate);
594 	}
595 }
596