xref: /netbsd-src/sys/arch/arm/footbridge/footbridge_irqhandler.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: footbridge_irqhandler.c,v 1.18 2007/12/03 15:33:18 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef ARM_SPL_NOINLINE
39 #define	ARM_SPL_NOINLINE
40 #endif
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.18 2007/12/03 15:33:18 ad Exp $");
44 
45 #include "opt_irqstats.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/intr.h>
53 #include <machine/cpu.h>
54 #include <arm/footbridge/dc21285mem.h>
55 #include <arm/footbridge/dc21285reg.h>
56 
57 #include <dev/pci/pcivar.h>
58 
59 #include "isa.h"
60 #if NISA > 0
61 #include <dev/isa/isavar.h>
62 #endif
63 
64 /* Interrupt handler queues. */
65 static struct intrq footbridge_intrq[NIRQ];
66 
67 /* Interrupts to mask at each level. */
68 int footbridge_imask[NIPL];
69 
70 /* Software copy of the IRQs we have enabled. */
71 volatile uint32_t intr_enabled;
72 
73 /* Current interrupt priority level */
74 volatile int current_spl_level;
75 
76 /* Interrupts pending */
77 volatile int footbridge_ipending;
78 
79 void footbridge_intr_dispatch(struct clockframe *frame);
80 
81 const struct evcnt *footbridge_pci_intr_evcnt __P((void *, pci_intr_handle_t));
82 
83 void footbridge_do_pending(void);
84 
85 static const uint32_t si_to_irqbit[SI_NQUEUES] =
86 	{ IRQ_SOFTINT,
87 	  IRQ_RESERVED0,
88 	  IRQ_RESERVED1,
89 	  IRQ_RESERVED2 };
90 
91 #define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
92 
93 /*
94  * Map a software interrupt queue to an interrupt priority level.
95  */
96 static const int si_to_ipl[SI_NQUEUES] = {
97 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
98 	IPL_SOFTBIO,		/* SI_SOFTBIO */
99 	IPL_SOFTNET,		/* SI_SOFTNET */
100 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
101 };
102 
103 const struct evcnt *
104 footbridge_pci_intr_evcnt(pcv, ih)
105 	void *pcv;
106 	pci_intr_handle_t ih;
107 {
108 	/* XXX check range is valid */
109 #if NISA > 0
110 	if (ih >= 0x80 && ih <= 0x8f) {
111 		return isa_intr_evcnt(NULL, (ih & 0x0f));
112 	}
113 #endif
114 	return &footbridge_intrq[ih].iq_ev;
115 }
116 
117 static inline void
118 footbridge_enable_irq(int irq)
119 {
120 	intr_enabled |= (1U << irq);
121 
122 	footbridge_set_intrmask();
123 }
124 
125 static inline void
126 footbridge_disable_irq(int irq)
127 {
128 	intr_enabled &= ~(1U << irq);
129 	footbridge_set_intrmask();
130 }
131 
132 /*
133  * NOTE: This routine must be called with interrupts disabled in the CPSR.
134  */
135 static void
136 footbridge_intr_calculate_masks(void)
137 {
138 	struct intrq *iq;
139 	struct intrhand *ih;
140 	int irq, ipl;
141 
142 	/* First, figure out which IPLs each IRQ has. */
143 	for (irq = 0; irq < NIRQ; irq++) {
144 		int levels = 0;
145 		iq = &footbridge_intrq[irq];
146 		footbridge_disable_irq(irq);
147 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
148 		     ih = TAILQ_NEXT(ih, ih_list))
149 			levels |= (1U << ih->ih_ipl);
150 		iq->iq_levels = levels;
151 	}
152 
153 	/* Next, figure out which IRQs are used by each IPL. */
154 	for (ipl = 0; ipl < NIPL; ipl++) {
155 		int irqs = 0;
156 		for (irq = 0; irq < NIRQ; irq++) {
157 			if (footbridge_intrq[irq].iq_levels & (1U << ipl))
158 				irqs |= (1U << irq);
159 		}
160 		footbridge_imask[ipl] = irqs;
161 	}
162 
163 	/* IPL_NONE must open up all interrupts */
164 	footbridge_imask[IPL_NONE] = 0;
165 
166 	/*
167 	 * Initialize the soft interrupt masks to block themselves.
168 	 */
169 	footbridge_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
170 	footbridge_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO);
171 	footbridge_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
172 	footbridge_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
173 
174 	/*
175 	 * Enforce a hierarchy that gives "slow" device (or devices with
176 	 * limited input buffer space/"real-time" requirements) a better
177 	 * chance at not dropping data.
178 	 */
179 	footbridge_imask[IPL_SOFTBIO] |= footbridge_imask[IPL_SOFTCLOCK];
180 	footbridge_imask[IPL_SOFTNET] |= footbridge_imask[IPL_SOFTBIO];
181 	footbridge_imask[IPL_SOFTSERIAL] |= footbridge_imask[IPL_SOFTNET];
182 	footbridge_imask[IPL_VM] |= footbridge_imask[IPL_SOFTSERIAL];
183 	footbridge_imask[IPL_SCHED] |= footbridge_imask[IPL_VM];
184 	footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_SCHED];
185 
186 	/*
187 	 * Calculate the ipl level to go to when handling this interrupt
188 	 */
189 	for (irq = 0; irq < NIRQ; irq++) {
190 		int irqs = (1U << irq);
191 		iq = &footbridge_intrq[irq];
192 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
193 			footbridge_enable_irq(irq);
194 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
195 		     ih = TAILQ_NEXT(ih, ih_list))
196 			irqs |= footbridge_imask[ih->ih_ipl];
197 		iq->iq_mask = irqs;
198 	}
199 }
200 
201 int
202 _splraise(int ipl)
203 {
204     return (footbridge_splraise(ipl));
205 }
206 
207 /* this will always take us to the ipl passed in */
208 void
209 splx(int new)
210 {
211     footbridge_splx(new);
212 }
213 
214 int
215 _spllower(int ipl)
216 {
217     return (footbridge_spllower(ipl));
218 }
219 
220 void
221 footbridge_do_pending(void)
222 {
223 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
224 	uint32_t new, oldirqstate;
225 
226 	if (__cpu_simple_lock_try(&processing) == 0)
227 		return;
228 
229 	new = current_spl_level;
230 
231 	oldirqstate = disable_interrupts(I32_bit);
232 
233 #ifdef __HAVE_FAST_SOFTINTS
234 #define	DO_SOFTINT(si)							\
235 	if ((footbridge_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
236 		footbridge_ipending &= ~SI_TO_IRQBIT(si);		\
237 		current_spl_level |= footbridge_imask[si_to_ipl[(si)]];	\
238 		restore_interrupts(oldirqstate);			\
239 		softintr_dispatch(si);					\
240 		oldirqstate = disable_interrupts(I32_bit);		\
241 		current_spl_level = new;				\
242 	}
243 	DO_SOFTINT(SI_SOFTSERIAL);
244 	DO_SOFTINT(SI_SOFTNET);
245 	DO_SOFTINT(SI_SOFTCLOCK);
246 	DO_SOFTINT(SI_SOFT);
247 #endif
248 	__cpu_simple_unlock(&processing);
249 
250 	restore_interrupts(oldirqstate);
251 }
252 
253 
254 /* called from splhigh, so the matching splx will set the interrupt up.*/
255 void
256 _setsoftintr(int si)
257 {
258 	int oldirqstate;
259 
260 	oldirqstate = disable_interrupts(I32_bit);
261 	footbridge_ipending |= SI_TO_IRQBIT(si);
262 	restore_interrupts(oldirqstate);
263 
264 	/* Process unmasked pending soft interrupts. */
265 	if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level)
266 		footbridge_do_pending();
267 }
268 
269 void
270 footbridge_intr_init(void)
271 {
272 	struct intrq *iq;
273 	int i;
274 
275 	intr_enabled = 0;
276 	current_spl_level = 0xffffffff;
277 	footbridge_ipending = 0;
278 	footbridge_set_intrmask();
279 
280 	for (i = 0; i < NIRQ; i++) {
281 		iq = &footbridge_intrq[i];
282 		TAILQ_INIT(&iq->iq_list);
283 
284 		sprintf(iq->iq_name, "irq %d", i);
285 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
286 		    NULL, "footbridge", iq->iq_name);
287 	}
288 
289 	footbridge_intr_calculate_masks();
290 
291 	/* Enable IRQ's, we don't have any FIQ's*/
292 	enable_interrupts(I32_bit);
293 }
294 
295 void *
296 footbridge_intr_claim(int irq, int ipl, const char *name, int (*func)(void *), void *arg)
297 {
298 	struct intrq *iq;
299 	struct intrhand *ih;
300 	u_int oldirqstate;
301 
302 	if (irq < 0 || irq > NIRQ)
303 		panic("footbridge_intr_establish: IRQ %d out of range", irq);
304 
305 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
306 	if (ih == NULL)
307 	{
308 		printf("No memory");
309 		return (NULL);
310 	}
311 
312 	ih->ih_func = func;
313 	ih->ih_arg = arg;
314 	ih->ih_ipl = ipl;
315 	ih->ih_irq = irq;
316 
317 	iq = &footbridge_intrq[irq];
318 
319 	iq->iq_ist = IST_LEVEL;
320 
321 	oldirqstate = disable_interrupts(I32_bit);
322 
323 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
324 
325 	footbridge_intr_calculate_masks();
326 
327 	/* detach the existing event counter and add the new name */
328 	evcnt_detach(&iq->iq_ev);
329 	evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
330 			NULL, "footbridge", name);
331 
332 	restore_interrupts(oldirqstate);
333 
334 	return(ih);
335 }
336 
337 void
338 footbridge_intr_disestablish(void *cookie)
339 {
340 	struct intrhand *ih = cookie;
341 	struct intrq *iq = &footbridge_intrq[ih->ih_irq];
342 	int oldirqstate;
343 
344 	/* XXX need to free ih ? */
345 	oldirqstate = disable_interrupts(I32_bit);
346 
347 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
348 
349 	footbridge_intr_calculate_masks();
350 
351 	restore_interrupts(oldirqstate);
352 }
353 
354 static uint32_t footbridge_intstatus(void);
355 
356 static inline uint32_t footbridge_intstatus()
357 {
358     return ((volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
359 }
360 
361 /* called with external interrupts disabled */
362 void
363 footbridge_intr_dispatch(struct clockframe *frame)
364 {
365 	struct intrq *iq;
366 	struct intrhand *ih;
367 	int oldirqstate, pcpl, irq, ibit, hwpend;
368 
369 	pcpl = current_spl_level;
370 
371 	hwpend = footbridge_intstatus();
372 
373 	/*
374 	 * Disable all the interrupts that are pending.  We will
375 	 * reenable them once they are processed and not masked.
376 	 */
377 	intr_enabled &= ~hwpend;
378 	footbridge_set_intrmask();
379 
380 	while (hwpend != 0) {
381 		int intr_rc = 0;
382 		irq = ffs(hwpend) - 1;
383 		ibit = (1U << irq);
384 
385 		hwpend &= ~ibit;
386 
387 		if (pcpl & ibit) {
388 			/*
389 			 * IRQ is masked; mark it as pending and check
390 			 * the next one.  Note: the IRQ is already disabled.
391 			 */
392 			footbridge_ipending |= ibit;
393 			continue;
394 		}
395 
396 		footbridge_ipending &= ~ibit;
397 
398 		iq = &footbridge_intrq[irq];
399 		iq->iq_ev.ev_count++;
400 		uvmexp.intrs++;
401 		current_spl_level |= iq->iq_mask;
402 		oldirqstate = enable_interrupts(I32_bit);
403 		for (ih = TAILQ_FIRST(&iq->iq_list);
404 			((ih != NULL) && (intr_rc != 1));
405 		     ih = TAILQ_NEXT(ih, ih_list)) {
406 			intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
407 		}
408 		restore_interrupts(oldirqstate);
409 
410 		current_spl_level = pcpl;
411 
412 		/* Re-enable this interrupt now that's it's cleared. */
413 		intr_enabled |= ibit;
414 		footbridge_set_intrmask();
415 
416 		/* also check for any new interrupts that may have occurred,
417 		 * that we can handle at this spl level */
418 		hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~pcpl;
419 	}
420 
421 	/* Check for pendings soft intrs. */
422         if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level) {
423 	    /*
424 	     * XXX this feels the wrong place to enable irqs, as some
425 	     * soft ints are higher priority than hardware irqs
426 	     */
427                 oldirqstate = enable_interrupts(I32_bit);
428                 footbridge_do_pending();
429                 restore_interrupts(oldirqstate);
430         }
431 }
432