xref: /netbsd-src/sys/arch/arm/footbridge/footbridge_irqhandler.c (revision e5548b402ae4c44fb816de42c7bba9581ce23ef5)
1 /*	$NetBSD: footbridge_irqhandler.c,v 1.13 2005/12/11 12:16:45 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef ARM_SPL_NOINLINE
39 #define	ARM_SPL_NOINLINE
40 #endif
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.13 2005/12/11 12:16:45 christos Exp $");
44 
45 #include "opt_irqstats.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/intr.h>
53 #include <machine/cpu.h>
54 #include <arm/footbridge/dc21285mem.h>
55 #include <arm/footbridge/dc21285reg.h>
56 
57 #include <dev/pci/pcivar.h>
58 
59 #include "isa.h"
60 #if NISA > 0
61 #include <dev/isa/isavar.h>
62 #endif
63 
64 /* Interrupt handler queues. */
65 static struct intrq footbridge_intrq[NIRQ];
66 
67 /* Interrupts to mask at each level. */
68 int footbridge_imask[NIPL];
69 
70 /* Software copy of the IRQs we have enabled. */
71 __volatile uint32_t intr_enabled;
72 
73 /* Current interrupt priority level */
74 __volatile int current_spl_level;
75 
76 /* Interrupts pending */
77 __volatile int footbridge_ipending;
78 
79 void footbridge_intr_dispatch(struct clockframe *frame);
80 
81 const struct evcnt *footbridge_pci_intr_evcnt __P((void *, pci_intr_handle_t));
82 
83 void footbridge_do_pending(void);
84 
85 static const uint32_t si_to_irqbit[SI_NQUEUES] =
86 	{ IRQ_SOFTINT,
87 	  IRQ_RESERVED0,
88 	  IRQ_RESERVED1,
89 	  IRQ_RESERVED2 };
90 
91 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
92 
93 /*
94  * Map a software interrupt queue to an interrupt priority level.
95  */
96 static const int si_to_ipl[SI_NQUEUES] = {
97 	IPL_SOFT,		/* SI_SOFT */
98 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
99 	IPL_SOFTNET,		/* SI_SOFTNET */
100 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
101 };
102 
103 const struct evcnt *
104 footbridge_pci_intr_evcnt(pcv, ih)
105 	void *pcv;
106 	pci_intr_handle_t ih;
107 {
108 	/* XXX check range is valid */
109 #if NISA > 0
110 	if (ih >= 0x80 && ih <= 0x8f) {
111 		return isa_intr_evcnt(NULL, (ih & 0x0f));
112 	}
113 #endif
114 	return &footbridge_intrq[ih].iq_ev;
115 }
116 
117 static __inline void
118 footbridge_enable_irq(int irq)
119 {
120 	intr_enabled |= (1U << irq);
121 
122 	footbridge_set_intrmask();
123 }
124 
125 static __inline void
126 footbridge_disable_irq(int irq)
127 {
128 	intr_enabled &= ~(1U << irq);
129 	footbridge_set_intrmask();
130 }
131 
132 /*
133  * NOTE: This routine must be called with interrupts disabled in the CPSR.
134  */
135 static void
136 footbridge_intr_calculate_masks(void)
137 {
138 	struct intrq *iq;
139 	struct intrhand *ih;
140 	int irq, ipl;
141 
142 	/* First, figure out which IPLs each IRQ has. */
143 	for (irq = 0; irq < NIRQ; irq++) {
144 		int levels = 0;
145 		iq = &footbridge_intrq[irq];
146 		footbridge_disable_irq(irq);
147 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
148 		     ih = TAILQ_NEXT(ih, ih_list))
149 			levels |= (1U << ih->ih_ipl);
150 		iq->iq_levels = levels;
151 	}
152 
153 	/* Next, figure out which IRQs are used by each IPL. */
154 	for (ipl = 0; ipl < NIPL; ipl++) {
155 		int irqs = 0;
156 		for (irq = 0; irq < NIRQ; irq++) {
157 			if (footbridge_intrq[irq].iq_levels & (1U << ipl))
158 				irqs |= (1U << irq);
159 		}
160 		footbridge_imask[ipl] = irqs;
161 	}
162 
163 	/* IPL_NONE must open up all interrupts */
164 	footbridge_imask[IPL_NONE] = 0;
165 
166 	/*
167 	 * Initialize the soft interrupt masks to block themselves.
168 	 */
169 	footbridge_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
170 	footbridge_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
171 	footbridge_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
172 	footbridge_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
173 
174 	footbridge_imask[IPL_SOFTCLOCK] |= footbridge_imask[IPL_SOFT];
175 	footbridge_imask[IPL_SOFTNET] |= footbridge_imask[IPL_SOFTCLOCK];
176 
177 	/*
178 	 * Enforce a heirarchy that gives "slow" device (or devices with
179 	 * limited input buffer space/"real-time" requirements) a better
180 	 * chance at not dropping data.
181 	 */
182 	footbridge_imask[IPL_BIO] |= footbridge_imask[IPL_SOFTNET];
183 	footbridge_imask[IPL_NET] |= footbridge_imask[IPL_BIO];
184 	footbridge_imask[IPL_SOFTSERIAL] |= footbridge_imask[IPL_NET];
185 
186 	footbridge_imask[IPL_TTY] |= footbridge_imask[IPL_SOFTSERIAL];
187 
188 	/*
189 	 * splvm() blocks all interrupts that use the kernel memory
190 	 * allocation facilities.
191 	 */
192 	footbridge_imask[IPL_VM] |= footbridge_imask[IPL_TTY];
193 
194 	/*
195 	 * Audio devices are not allowed to perform memory allocation
196 	 * in their interrupt routines, and they have fairly "real-time"
197 	 * requirements, so give them a high interrupt priority.
198 	 */
199 	footbridge_imask[IPL_AUDIO] |= footbridge_imask[IPL_VM];
200 
201 	/*
202 	 * splclock() must block anything that uses the scheduler.
203 	 */
204 	footbridge_imask[IPL_CLOCK] |= footbridge_imask[IPL_AUDIO];
205 
206 	/*
207 	 * footbridge has seperate statclock.
208 	 */
209 	footbridge_imask[IPL_STATCLOCK] |= footbridge_imask[IPL_CLOCK];
210 
211 	/*
212 	 * splhigh() must block "everything".
213 	 */
214 	footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_STATCLOCK];
215 
216 	/*
217 	 * XXX We need serial drivers to run at the absolute highest priority
218 	 * in order to avoid overruns, so serial > high.
219 	 */
220 	footbridge_imask[IPL_SERIAL] |= footbridge_imask[IPL_HIGH];
221 
222 	/*
223 	 * Calculate the ipl level to go to when handling this interrupt
224 	 */
225 	for (irq = 0; irq < NIRQ; irq++) {
226 		int irqs = (1U << irq);
227 		iq = &footbridge_intrq[irq];
228 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
229 			footbridge_enable_irq(irq);
230 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
231 		     ih = TAILQ_NEXT(ih, ih_list))
232 			irqs |= footbridge_imask[ih->ih_ipl];
233 		iq->iq_mask = irqs;
234 	}
235 }
236 
237 int
238 _splraise(int ipl)
239 {
240     return (footbridge_splraise(ipl));
241 }
242 
243 /* this will always take us to the ipl passed in */
244 void
245 splx(int new)
246 {
247     footbridge_splx(new);
248 }
249 
250 int
251 _spllower(int ipl)
252 {
253     return (footbridge_spllower(ipl));
254 }
255 
256 __inline void
257 footbridge_do_pending(void)
258 {
259 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
260 	uint32_t new, oldirqstate;
261 
262 	if (__cpu_simple_lock_try(&processing) == 0)
263 		return;
264 
265 	new = current_spl_level;
266 
267 	oldirqstate = disable_interrupts(I32_bit);
268 
269 #define	DO_SOFTINT(si)							\
270 	if ((footbridge_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
271 		footbridge_ipending &= ~SI_TO_IRQBIT(si);		\
272 		current_spl_level |= footbridge_imask[si_to_ipl[(si)]];	\
273 		restore_interrupts(oldirqstate);			\
274 		softintr_dispatch(si);					\
275 		oldirqstate = disable_interrupts(I32_bit);		\
276 		current_spl_level = new;				\
277 	}
278 	DO_SOFTINT(SI_SOFTSERIAL);
279 	DO_SOFTINT(SI_SOFTNET);
280 	DO_SOFTINT(SI_SOFTCLOCK);
281 	DO_SOFTINT(SI_SOFT);
282 
283 	__cpu_simple_unlock(&processing);
284 
285 	restore_interrupts(oldirqstate);
286 }
287 
288 
289 /* called from splhigh, so the matching splx will set the interrupt up.*/
290 void
291 _setsoftintr(int si)
292 {
293 	int oldirqstate;
294 
295 	oldirqstate = disable_interrupts(I32_bit);
296 	footbridge_ipending |= SI_TO_IRQBIT(si);
297 	restore_interrupts(oldirqstate);
298 
299 	/* Process unmasked pending soft interrupts. */
300 	if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level)
301 		footbridge_do_pending();
302 }
303 
304 void
305 footbridge_intr_init(void)
306 {
307 	struct intrq *iq;
308 	int i;
309 
310 	intr_enabled = 0;
311 	current_spl_level = 0xffffffff;
312 	footbridge_ipending = 0;
313 	footbridge_set_intrmask();
314 
315 	for (i = 0; i < NIRQ; i++) {
316 		iq = &footbridge_intrq[i];
317 		TAILQ_INIT(&iq->iq_list);
318 
319 		sprintf(iq->iq_name, "irq %d", i);
320 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
321 		    NULL, "footbridge", iq->iq_name);
322 	}
323 
324 	footbridge_intr_calculate_masks();
325 
326 	/* Enable IRQ's, we don't have any FIQ's*/
327 	enable_interrupts(I32_bit);
328 }
329 
330 void *
331 footbridge_intr_claim(int irq, int ipl, const char *name, int (*func)(void *), void *arg)
332 {
333 	struct intrq *iq;
334 	struct intrhand *ih;
335 	u_int oldirqstate;
336 
337 	if (irq < 0 || irq > NIRQ)
338 		panic("footbridge_intr_establish: IRQ %d out of range", irq);
339 
340 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
341 	if (ih == NULL)
342 	{
343 		printf("No memory");
344 		return (NULL);
345 	}
346 
347 	ih->ih_func = func;
348 	ih->ih_arg = arg;
349 	ih->ih_ipl = ipl;
350 	ih->ih_irq = irq;
351 
352 	iq = &footbridge_intrq[irq];
353 
354 	iq->iq_ist = IST_LEVEL;
355 
356 	oldirqstate = disable_interrupts(I32_bit);
357 
358 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
359 
360 	footbridge_intr_calculate_masks();
361 
362 	/* detach the existing event counter and add the new name */
363 	evcnt_detach(&iq->iq_ev);
364 	evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
365 			NULL, "footbridge", name);
366 
367 	restore_interrupts(oldirqstate);
368 
369 	return(ih);
370 }
371 
372 void
373 footbridge_intr_disestablish(void *cookie)
374 {
375 	struct intrhand *ih = cookie;
376 	struct intrq *iq = &footbridge_intrq[ih->ih_irq];
377 	int oldirqstate;
378 
379 	/* XXX need to free ih ? */
380 	oldirqstate = disable_interrupts(I32_bit);
381 
382 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
383 
384 	footbridge_intr_calculate_masks();
385 
386 	restore_interrupts(oldirqstate);
387 }
388 
389 static uint32_t footbridge_intstatus(void);
390 
391 static inline uint32_t footbridge_intstatus()
392 {
393     return ((__volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
394 }
395 
396 /* called with external interrupts disabled */
397 void
398 footbridge_intr_dispatch(struct clockframe *frame)
399 {
400 	struct intrq *iq;
401 	struct intrhand *ih;
402 	int oldirqstate, pcpl, irq, ibit, hwpend;
403 
404 	pcpl = current_spl_level;
405 
406 	hwpend = footbridge_intstatus();
407 
408 	/*
409 	 * Disable all the interrupts that are pending.  We will
410 	 * reenable them once they are processed and not masked.
411 	 */
412 	intr_enabled &= ~hwpend;
413 	footbridge_set_intrmask();
414 
415 	while (hwpend != 0) {
416 		int intr_rc = 0;
417 		irq = ffs(hwpend) - 1;
418 		ibit = (1U << irq);
419 
420 		hwpend &= ~ibit;
421 
422 		if (pcpl & ibit) {
423 			/*
424 			 * IRQ is masked; mark it as pending and check
425 			 * the next one.  Note: the IRQ is already disabled.
426 			 */
427 			footbridge_ipending |= ibit;
428 			continue;
429 		}
430 
431 		footbridge_ipending &= ~ibit;
432 
433 		iq = &footbridge_intrq[irq];
434 		iq->iq_ev.ev_count++;
435 		uvmexp.intrs++;
436 		current_spl_level |= iq->iq_mask;
437 		oldirqstate = enable_interrupts(I32_bit);
438 		for (ih = TAILQ_FIRST(&iq->iq_list);
439 			((ih != NULL) && (intr_rc != 1));
440 		     ih = TAILQ_NEXT(ih, ih_list)) {
441 			intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
442 		}
443 		restore_interrupts(oldirqstate);
444 
445 		current_spl_level = pcpl;
446 
447 		/* Re-enable this interrupt now that's it's cleared. */
448 		intr_enabled |= ibit;
449 		footbridge_set_intrmask();
450 
451 		/* also check for any new interrupts that may have occurred,
452 		 * that we can handle at this spl level */
453 		hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~pcpl;
454 	}
455 
456 	/* Check for pendings soft intrs. */
457         if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level) {
458 	    /*
459 	     * XXX this feels the wrong place to enable irqs, as some
460 	     * soft ints are higher priority than hardware irqs
461 	     */
462                 oldirqstate = enable_interrupts(I32_bit);
463                 footbridge_do_pending();
464                 restore_interrupts(oldirqstate);
465         }
466 }
467