xref: /netbsd-src/sys/arch/arm/footbridge/footbridge_irqhandler.c (revision bbde328be4e75ea9ad02e9715ea13ca54b797ada)
1 /*	$NetBSD: footbridge_irqhandler.c,v 1.22 2009/06/17 06:27:05 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef ARM_SPL_NOINLINE
39 #define	ARM_SPL_NOINLINE
40 #endif
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.22 2009/06/17 06:27:05 skrll Exp $");
44 
45 #include "opt_irqstats.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/intr.h>
53 #include <machine/cpu.h>
54 #include <arm/footbridge/dc21285mem.h>
55 #include <arm/footbridge/dc21285reg.h>
56 
57 #include <dev/pci/pcivar.h>
58 
59 #include "isa.h"
60 #if NISA > 0
61 #include <dev/isa/isavar.h>
62 #endif
63 
64 /* Interrupt handler queues. */
65 static struct intrq footbridge_intrq[NIRQ];
66 
67 /* Interrupts to mask at each level. */
68 int footbridge_imask[NIPL];
69 
70 /* Software copy of the IRQs we have enabled. */
71 volatile uint32_t intr_enabled;
72 
73 /* Interrupts pending */
74 volatile int footbridge_ipending;
75 
76 void footbridge_intr_dispatch(struct clockframe *frame);
77 
78 const struct evcnt *footbridge_pci_intr_evcnt(void *, pci_intr_handle_t);
79 
80 const struct evcnt *
81 footbridge_pci_intr_evcnt(void *pcv, pci_intr_handle_t ih)
82 {
83 	/* XXX check range is valid */
84 #if NISA > 0
85 	if (ih >= 0x80 && ih <= 0x8f) {
86 		return isa_intr_evcnt(NULL, (ih & 0x0f));
87 	}
88 #endif
89 	return &footbridge_intrq[ih].iq_ev;
90 }
91 
92 static inline void
93 footbridge_enable_irq(int irq)
94 {
95 	intr_enabled |= (1U << irq);
96 	footbridge_set_intrmask();
97 }
98 
99 static inline void
100 footbridge_disable_irq(int irq)
101 {
102 	intr_enabled &= ~(1U << irq);
103 	footbridge_set_intrmask();
104 }
105 
106 /*
107  * NOTE: This routine must be called with interrupts disabled in the CPSR.
108  */
109 static void
110 footbridge_intr_calculate_masks(void)
111 {
112 	struct intrq *iq;
113 	struct intrhand *ih;
114 	int irq, ipl;
115 
116 	/* First, figure out which IPLs each IRQ has. */
117 	for (irq = 0; irq < NIRQ; irq++) {
118 		int levels = 0;
119 		iq = &footbridge_intrq[irq];
120 		footbridge_disable_irq(irq);
121 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
122 			levels |= (1U << ih->ih_ipl);
123 		}
124 		iq->iq_levels = levels;
125 	}
126 
127 	/* Next, figure out which IRQs are used by each IPL. */
128 	for (ipl = 0; ipl < NIPL; ipl++) {
129 		int irqs = 0;
130 		for (irq = 0; irq < NIRQ; irq++) {
131 			if (footbridge_intrq[irq].iq_levels & (1U << ipl))
132 				irqs |= (1U << irq);
133 		}
134 		footbridge_imask[ipl] = irqs;
135 	}
136 
137 	/* IPL_NONE must open up all interrupts */
138 	KASSERT(footbridge_imask[IPL_NONE] == 0);
139 	KASSERT(footbridge_imask[IPL_SOFTCLOCK] == 0);
140 	KASSERT(footbridge_imask[IPL_SOFTBIO] == 0);
141 	KASSERT(footbridge_imask[IPL_SOFTNET] == 0);
142 	KASSERT(footbridge_imask[IPL_SOFTSERIAL] == 0);
143 
144 	/*
145 	 * Enforce a hierarchy that gives "slow" device (or devices with
146 	 * limited input buffer space/"real-time" requirements) a better
147 	 * chance at not dropping data.
148 	 */
149 	footbridge_imask[IPL_SCHED] |= footbridge_imask[IPL_VM];
150 	footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_SCHED];
151 
152 	/*
153 	 * Calculate the ipl level to go to when handling this interrupt
154 	 */
155 	for (irq = 0, iq = footbridge_intrq; irq < NIRQ; irq++, iq++) {
156 		int irqs = (1U << irq);
157 		if (!TAILQ_EMPTY(&iq->iq_list)) {
158 			footbridge_enable_irq(irq);
159 			TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
160 				irqs |= footbridge_imask[ih->ih_ipl];
161 			}
162 		}
163 		iq->iq_mask = irqs;
164 	}
165 }
166 
167 int
168 _splraise(int ipl)
169 {
170     return (footbridge_splraise(ipl));
171 }
172 
173 /* this will always take us to the ipl passed in */
174 void
175 splx(int new)
176 {
177     footbridge_splx(new);
178 }
179 
180 int
181 _spllower(int ipl)
182 {
183     return (footbridge_spllower(ipl));
184 }
185 
186 void
187 footbridge_intr_init(void)
188 {
189 	struct intrq *iq;
190 	int i;
191 
192 	intr_enabled = 0;
193 	set_curcpl(0xffffffff);
194 	footbridge_ipending = 0;
195 	footbridge_set_intrmask();
196 
197 	for (i = 0, iq = footbridge_intrq; i < NIRQ; i++, iq++) {
198 		TAILQ_INIT(&iq->iq_list);
199 
200 		sprintf(iq->iq_name, "irq %d", i);
201 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
202 		    NULL, "footbridge", iq->iq_name);
203 	}
204 
205 	footbridge_intr_calculate_masks();
206 
207 	/* Enable IRQ's, we don't have any FIQ's*/
208 	enable_interrupts(I32_bit);
209 }
210 
211 void *
212 footbridge_intr_claim(int irq, int ipl, const char *name, int (*func)(void *), void *arg)
213 {
214 	struct intrq *iq;
215 	struct intrhand *ih;
216 	u_int oldirqstate;
217 
218 	if (irq < 0 || irq > NIRQ)
219 		panic("footbridge_intr_establish: IRQ %d out of range", irq);
220 
221 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
222 	if (ih == NULL)
223 	{
224 		printf("No memory");
225 		return (NULL);
226 	}
227 
228 	ih->ih_func = func;
229 	ih->ih_arg = arg;
230 	ih->ih_ipl = ipl;
231 	ih->ih_irq = irq;
232 
233 	iq = &footbridge_intrq[irq];
234 
235 	iq->iq_ist = IST_LEVEL;
236 
237 	oldirqstate = disable_interrupts(I32_bit);
238 
239 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
240 
241 	footbridge_intr_calculate_masks();
242 
243 	/* detach the existing event counter and add the new name */
244 	evcnt_detach(&iq->iq_ev);
245 	evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
246 			NULL, "footbridge", name);
247 
248 	restore_interrupts(oldirqstate);
249 
250 	return(ih);
251 }
252 
253 void
254 footbridge_intr_disestablish(void *cookie)
255 {
256 	struct intrhand *ih = cookie;
257 	struct intrq *iq = &footbridge_intrq[ih->ih_irq];
258 	int oldirqstate;
259 
260 	/* XXX need to free ih ? */
261 	oldirqstate = disable_interrupts(I32_bit);
262 
263 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
264 
265 	footbridge_intr_calculate_masks();
266 
267 	restore_interrupts(oldirqstate);
268 }
269 
270 static inline uint32_t footbridge_intstatus(void)
271 {
272 	return ((volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
273 }
274 
275 /* called with external interrupts disabled */
276 void
277 footbridge_intr_dispatch(struct clockframe *frame)
278 {
279 	struct intrq *iq;
280 	struct intrhand *ih;
281 	int oldirqstate, irq, ibit, hwpend;
282 	struct cpu_info * const ci = curcpu();
283 	const int ppl = ci->ci_cpl;
284 	const int imask = footbridge_imask[ppl];
285 
286 	hwpend = footbridge_intstatus();
287 
288 	/*
289 	 * Disable all the interrupts that are pending.  We will
290 	 * reenable them once they are processed and not masked.
291 	 */
292 	intr_enabled &= ~hwpend;
293 	footbridge_set_intrmask();
294 
295 	while (hwpend != 0) {
296 		int intr_rc = 0;
297 		irq = ffs(hwpend) - 1;
298 		ibit = (1U << irq);
299 
300 		hwpend &= ~ibit;
301 
302 		if (imask & ibit) {
303 			/*
304 			 * IRQ is masked; mark it as pending and check
305 			 * the next one.  Note: the IRQ is already disabled.
306 			 */
307 			footbridge_ipending |= ibit;
308 			continue;
309 		}
310 
311 		footbridge_ipending &= ~ibit;
312 
313 		iq = &footbridge_intrq[irq];
314 		iq->iq_ev.ev_count++;
315 		uvmexp.intrs++;
316 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
317 			ci->ci_cpl = ih->ih_ipl;
318 			oldirqstate = enable_interrupts(I32_bit);
319 			intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
320 			restore_interrupts(oldirqstate);
321 			if (intr_rc != 1)
322 				break;
323 		}
324 
325 		ci->ci_cpl = ppl;
326 
327 		/* Re-enable this interrupt now that's it's cleared. */
328 		intr_enabled |= ibit;
329 		footbridge_set_intrmask();
330 
331 		/* also check for any new interrupts that may have occurred,
332 		 * that we can handle at this spl level */
333 		hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~imask;
334 	}
335 
336 #ifdef __HAVE_FAST_SOFTINTS
337 	cpu_dosoftints();
338 #endif /* __HAVE_FAST_SOFTINTS */
339 }
340