xref: /netbsd-src/sys/arch/arm/xscale/i80321_icu.c (revision cd22f25e6f6d1cc1f197fe8c5468a80f51d1c4e1)
1 /*	$NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.18 2008/04/27 18:58:45 matt Exp $");
40 
41 #ifndef EVBARM_SPL_NOINLINE
42 #define	EVBARM_SPL_NOINLINE
43 #endif
44 
45 /*
46  * Interrupt support for the Intel i80321 I/O Processor.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57 
58 #include <arm/cpufunc.h>
59 
60 #include <arm/xscale/i80321reg.h>
61 #include <arm/xscale/i80321var.h>
62 
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
65 
66 /* Interrupts to mask at each level. */
67 int i80321_imask[NIPL];
68 
69 /* Interrupts pending. */
70 volatile int i80321_ipending;
71 
72 /* Software copy of the IRQs we have enabled. */
73 volatile uint32_t intr_enabled;
74 
75 /* Mask if interrupts steered to FIQs. */
76 uint32_t intr_steer;
77 
78 /*
79  * Interrupt bit names.
80  */
81 const char * const i80321_irqnames[] = {
82 	"DMA0 EOT",
83 	"DMA0 EOC",
84 	"DMA1 EOT",
85 	"DMA1 EOC",
86 	"irq 4",
87 	"irq 5",
88 	"AAU EOT",
89 	"AAU EOC",
90 	"core PMU",
91 	"TMR0 (hardclock)",
92 	"TMR1",
93 	"I2C0",
94 	"I2C1",
95 	"MU",
96 	"BIST",
97 	"periph PMU",
98 	"XScale PMU",
99 	"BIU error",
100 	"ATU error",
101 	"MCU error",
102 	"DMA0 error",
103 	"DMA1 error",
104 	"irq 22",
105 	"AAU error",
106 	"MU error",
107 	"SSP",
108 	"irq 26",
109 	"irq 27",
110 	"irq 28",
111 	"irq 29",
112 	"irq 30",
113 	"irq 31",
114 };
115 
116 void	i80321_intr_dispatch(struct clockframe *frame);
117 
118 static inline uint32_t
119 i80321_iintsrc_read(void)
120 {
121 	uint32_t iintsrc;
122 
123 	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
124 		: "=r" (iintsrc));
125 
126 	/*
127 	 * The IINTSRC register shows bits that are active even
128 	 * if they are masked in INTCTL, so we have to mask them
129 	 * off with the interrupts we consider enabled.
130 	 */
131 	return (iintsrc & intr_enabled);
132 }
133 
134 static inline void
135 i80321_set_intrsteer(void)
136 {
137 
138 	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
139 		:
140 		: "r" (intr_steer & ICU_INT_HWMASK));
141 }
142 
143 static inline void
144 i80321_enable_irq(int irq)
145 {
146 
147 	intr_enabled |= (1U << irq);
148 	i80321_set_intrmask();
149 }
150 
151 static inline void
152 i80321_disable_irq(int irq)
153 {
154 
155 	intr_enabled &= ~(1U << irq);
156 	i80321_set_intrmask();
157 }
158 
159 /*
160  * NOTE: This routine must be called with interrupts disabled in the CPSR.
161  */
162 static void
163 i80321_intr_calculate_masks(void)
164 {
165 	struct intrq *iq;
166 	struct intrhand *ih;
167 	int irq, ipl;
168 
169 	/* First, figure out which IPLs each IRQ has. */
170 	for (irq = 0; irq < NIRQ; irq++) {
171 		int levels = 0;
172 		iq = &intrq[irq];
173 		i80321_disable_irq(irq);
174 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
175 		     ih = TAILQ_NEXT(ih, ih_list))
176 			levels |= (1U << ih->ih_ipl);
177 		iq->iq_levels = levels;
178 	}
179 
180 	/* Next, figure out which IRQs are used by each IPL. */
181 	for (ipl = 0; ipl < NIPL; ipl++) {
182 		int irqs = 0;
183 		for (irq = 0; irq < NIRQ; irq++) {
184 			if (intrq[irq].iq_levels & (1U << ipl))
185 				irqs |= (1U << irq);
186 		}
187 		i80321_imask[ipl] = irqs;
188 	}
189 
190 	i80321_imask[IPL_NONE] = 0;
191 
192 	/*
193 	 * Enforce a hierarchy that gives "slow" device (or devices with
194 	 * limited input buffer space/"real-time" requirements) a better
195 	 * chance at not dropping data.
196 	 */
197 
198 	KASSERT(i80321_imask[IPL_VM] != 0);
199 	i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
200 	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
201 
202 	/*
203 	 * Now compute which IRQs must be blocked when servicing any
204 	 * given IRQ.
205 	 */
206 	for (irq = 0; irq < NIRQ; irq++) {
207 		int irqs = (1U << irq);
208 		iq = &intrq[irq];
209 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
210 			i80321_enable_irq(irq);
211 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
212 		     ih = TAILQ_NEXT(ih, ih_list))
213 			irqs |= i80321_imask[ih->ih_ipl];
214 		iq->iq_mask = irqs;
215 	}
216 }
217 
218 void
219 splx(int new)
220 {
221 	i80321_splx(new);
222 }
223 
224 int
225 _spllower(int ipl)
226 {
227 	return (i80321_spllower(ipl));
228 }
229 
230 int
231 _splraise(int ipl)
232 {
233 	return (i80321_splraise(ipl));
234 }
235 
236 /*
237  * i80321_icu_init:
238  *
239  *	Initialize the i80321 ICU.  Called early in bootstrap
240  *	to make sure the ICU is in a pristine state.
241  */
242 void
243 i80321_icu_init(void)
244 {
245 
246 	intr_enabled = 0;	/* All interrupts disabled */
247 	i80321_set_intrmask();
248 
249 	intr_steer = 0;		/* All interrupts steered to IRQ */
250 	i80321_set_intrsteer();
251 }
252 
253 /*
254  * i80321_intr_init:
255  *
256  *	Initialize the rest of the interrupt subsystem, making it
257  *	ready to handle interrupts from devices.
258  */
259 void
260 i80321_intr_init(void)
261 {
262 	struct intrq *iq;
263 	int i;
264 
265 	intr_enabled = 0;
266 
267 	for (i = 0; i < NIRQ; i++) {
268 		iq = &intrq[i];
269 		TAILQ_INIT(&iq->iq_list);
270 
271 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
272 		    NULL, "iop321", i80321_irqnames[i]);
273 	}
274 
275 	i80321_intr_calculate_masks();
276 
277 	/* Enable IRQs (don't yet use FIQs). */
278 	enable_interrupts(I32_bit);
279 }
280 
281 void *
282 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
283 {
284 	struct intrq *iq;
285 	struct intrhand *ih;
286 	u_int oldirqstate;
287 
288 	if (irq < 0 || irq > NIRQ)
289 		panic("i80321_intr_establish: IRQ %d out of range", irq);
290 
291 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
292 	if (ih == NULL)
293 		return (NULL);
294 
295 	ih->ih_func = func;
296 	ih->ih_arg = arg;
297 	ih->ih_ipl = ipl;
298 	ih->ih_irq = irq;
299 
300 	iq = &intrq[irq];
301 
302 	/* All IOP321 interrupts are level-triggered. */
303 	iq->iq_ist = IST_LEVEL;
304 
305 	oldirqstate = disable_interrupts(I32_bit);
306 
307 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
308 
309 	i80321_intr_calculate_masks();
310 
311 	restore_interrupts(oldirqstate);
312 
313 	return (ih);
314 }
315 
316 void
317 i80321_intr_disestablish(void *cookie)
318 {
319 	struct intrhand *ih = cookie;
320 	struct intrq *iq = &intrq[ih->ih_irq];
321 	int oldirqstate;
322 
323 	oldirqstate = disable_interrupts(I32_bit);
324 
325 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
326 
327 	i80321_intr_calculate_masks();
328 
329 	restore_interrupts(oldirqstate);
330 }
331 
332 /*
333  * Hardware interrupt handler.
334  *
335  * If I80321_HPI_ENABLED is defined, this code attempts to deal with
336  * HPI interrupts as best it can.
337  *
338  * The problem is that HPIs cannot be masked at the interrupt controller;
339  * they can only be masked by disabling IRQs in the XScale core.
340  *
341  * So, if an HPI comes in and we determine that it should be masked at
342  * the current IPL then we mark it pending in the usual way and set
343  * I32_bit in the interrupt frame. This ensures that when we return from
344  * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
345  * ensure IRQs are enabled later, i80321_splx() has been modified to do
346  * just that when a pending HPI interrupt is unmasked.) Additionally,
347  * because HPIs are level-triggered, the registered handler for the HPI
348  * interrupt will also be invoked with IRQs disabled. If a masked HPI
349  * occurs at the same time as another unmasked higher priority interrupt,
350  * the higher priority handler will also be invoked with IRQs disabled.
351  * As a result, the system could end up executing a lot of code with IRQs
352  * completely disabled if the HPI's IPL is relatively low.
353  *
354  * At the present time, the only known use of HPI is for the console UART
355  * on a couple of boards. This is probably the least intrusive use of HPI
356  * as IPL_SERIAL is the highest priority IPL in the system anyway. The
357  * code has not been tested with HPI hooked up to a class of device which
358  * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
359  * perform very poorly if at all, even though the following code has been
360  * designed (hopefully) to cope with it.
361  */
362 
363 void
364 i80321_intr_dispatch(struct clockframe *frame)
365 {
366 	struct intrq *iq;
367 	struct intrhand *ih;
368 	int oldirqstate, irq, ibit, hwpend;
369 #ifdef I80321_HPI_ENABLED
370 	int oldpending;
371 #endif
372 	struct cpu_info * const ci = curcpu();
373 	const int ppl = ci->ci_cpl;
374 	const uint32_t imask = i80321_imask[ppl];
375 
376 	hwpend = i80321_iintsrc_read();
377 
378 	/*
379 	 * Disable all the interrupts that are pending.  We will
380 	 * reenable them once they are processed and not masked.
381 	 */
382 	intr_enabled &= ~hwpend;
383 	i80321_set_intrmask();
384 
385 #ifdef I80321_HPI_ENABLED
386 	oldirqstate = 0;	/* XXX: quell gcc warning */
387 #endif
388 
389 	while (hwpend != 0) {
390 #ifdef I80321_HPI_ENABLED
391 		/* Deal with HPI interrupt first */
392 		if (__predict_false(hwpend & INT_HPIMASK))
393 			irq = ICU_INT_HPI;
394 		else
395 #endif
396 		irq = ffs(hwpend) - 1;
397 		ibit = (1U << irq);
398 
399 		hwpend &= ~ibit;
400 
401 		if (imask & ibit) {
402 			/*
403 			 * IRQ is masked; mark it as pending and check
404 			 * the next one.  Note: the IRQ is already disabled.
405 			 */
406 #ifdef I80321_HPI_ENABLED
407 			if (__predict_false(irq == ICU_INT_HPI)) {
408 				/*
409 				 * This is an HPI. We *must* disable
410 				 * IRQs in the interrupt frame until
411 				 * INT_HPIMASK is cleared by a later
412 				 * call to splx(). Otherwise the level-
413 				 * triggered interrupt will just keep
414 				 * coming back.
415 				 */
416 				frame->cf_if.if_spsr |= I32_bit;
417 			}
418 #endif
419 			i80321_ipending |= ibit;
420 			continue;
421 		}
422 
423 #ifdef I80321_HPI_ENABLED
424 		oldpending = i80321_ipending | ibit;
425 #endif
426 		i80321_ipending &= ~ibit;
427 
428 		iq = &intrq[irq];
429 		iq->iq_ev.ev_count++;
430 		uvmexp.intrs++;
431 #ifdef I80321_HPI_ENABLED
432 		/*
433 		 * Re-enable interrupts iff an HPI is not pending
434 		 */
435 		if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
436 #endif
437 			TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
438 				ci->ci_cpl = ih->ih_ipl;
439 				oldirqstate = enable_interrupts(I32_bit);
440 				(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
441 				restore_interrupts(oldirqstate);
442 			}
443 #ifdef I80321_HPI_ENABLED
444 		} else if (irq == ICU_INT_HPI) {
445 			/*
446 			 * We've just handled the HPI. Make sure IRQs
447 			 * are enabled in the interrupt frame.
448 			 * Here's hoping the handler really did clear
449 			 * down the source...
450 			 */
451 			frame->cf_if.if_spsr &= ~I32_bit;
452 		}
453 #endif
454 		ci->ci_cpl = ppl;
455 
456 		/* Re-enable this interrupt now that's it's cleared. */
457 		intr_enabled |= ibit;
458 		i80321_set_intrmask();
459 
460 		/*
461 		 * Don't forget to include interrupts which may have
462 		 * arrived in the meantime.
463 		 */
464 		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
465 	}
466 
467 #ifdef __HAVE_FAST_SOFTINTS
468 	cpu_dosoftints();
469 #endif
470 }
471