xref: /netbsd-src/sys/arch/arm/xscale/becc_icu.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: becc_icu.c,v 1.2 2003/06/16 20:00:58 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Interrupt support for the ADI Engineering Big Endian Companion Chip.
40  */
41 
42 #ifndef EVBARM_SPL_NOINLINE
43 #define	EVBARM_SPL_NOINLINE
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54 
55 #include <arm/cpufunc.h>
56 
57 #include <arm/xscale/beccreg.h>
58 #include <arm/xscale/beccvar.h>
59 
60 #include <arm/xscale/i80200reg.h>
61 #include <arm/xscale/i80200var.h>
62 
63 /* Interrupt handler queues. */
64 struct intrq intrq[NIRQ];
65 
66 /* Interrupts to mask at each level. */
67 uint32_t becc_imask[NIPL];
68 
69 /* Current interrupt priority level. */
70 __volatile uint32_t current_spl_level;
71 
72 /* Interrupts pending. */
73 __volatile uint32_t becc_ipending;
74 __volatile uint32_t becc_sipending;
75 
76 /* Software copy of the IRQs we have enabled. */
77 __volatile uint32_t intr_enabled;
78 
79 /* Mask if interrupts steered to FIQs. */
80 uint32_t intr_steer;
81 
82 /*
83  * Interrupt bit names.
84  * XXX Some of these are BRH-centric.
85  */
86 const char *becc_irqnames[] = {
87 	"soft",
88 	"timer A",
89 	"timer B",
90 	"irq 3",
91 	"irq 4",
92 	"irq 5",
93 	"irq 6",
94 	"diagerr",
95 	"DMA EOT",
96 	"DMA PERR",
97 	"DMA TABT",
98 	"DMA MABT",
99 	"irq 12",
100 	"irq 13",
101 	"irq 14",
102 	"irq 15",
103 	"PCI PERR",
104 	"irq 17",
105 	"irq 18",
106 	"PCI SERR",
107 	"PCI OAPE",
108 	"PCI OATA",
109 	"PCI OAMA",
110 	"irq 23",
111 	"irq 24",
112 	"irq 25",
113 	"irq 26",	/* PCI INTA */
114 	"irq 27",	/* PCI INTB */
115 	"irq 28",	/* PCI INTC */
116 	"irq 29",	/* PCI INTD */
117 	"pushbutton",
118 	"irq 31",
119 };
120 
121 void	becc_intr_dispatch(struct clockframe *frame);
122 
123 static __inline uint32_t
124 becc_icsr_read(void)
125 {
126 	uint32_t icsr;
127 
128 	icsr = BECC_CSR_READ(BECC_ICSR);
129 
130 	/*
131 	 * The ICSR register shows bits that are active even if they are
132 	 * masked in ICMR, so we have to mask them off with the interrupts
133 	 * we consider enabled.
134 	 */
135 	return (icsr & intr_enabled);
136 }
137 
138 static __inline void
139 becc_set_intrsteer(void)
140 {
141 
142 	BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK);
143 	(void) BECC_CSR_READ(BECC_ICSTR);
144 }
145 
146 static __inline void
147 becc_enable_irq(int irq)
148 {
149 
150 	intr_enabled |= (1U << irq);
151 	becc_set_intrmask();
152 }
153 
154 static __inline void
155 becc_disable_irq(int irq)
156 {
157 
158 	intr_enabled &= ~(1U << irq);
159 	becc_set_intrmask();
160 }
161 
162 /*
163  * NOTE: This routine must be called with interrupts disabled in the CPSR.
164  */
165 static void
166 becc_intr_calculate_masks(void)
167 {
168 	struct intrq *iq;
169 	struct intrhand *ih;
170 	int irq, ipl;
171 
172 	/* First, figure out which IPLs each IRQ has. */
173 	for (irq = 0; irq < NIRQ; irq++) {
174 		int levels = 0;
175 		iq = &intrq[irq];
176 		becc_disable_irq(irq);
177 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
178 		     ih = TAILQ_NEXT(ih, ih_list))
179 			levels |= (1U << ih->ih_ipl);
180 		iq->iq_levels = levels;
181 	}
182 
183 	/* Next, figure out which IRQs are used by each IPL. */
184 	for (ipl = 0; ipl < NIPL; ipl++) {
185 		int irqs = 0;
186 		for (irq = 0; irq < NIRQ; irq++) {
187 			if (intrq[irq].iq_levels & (1U << ipl))
188 				irqs |= (1U << irq);
189 		}
190 		becc_imask[ipl] = irqs;
191 	}
192 
193 	becc_imask[IPL_NONE] = 0;
194 
195 	/*
196 	 * Initialize the soft interrupt masks to block themselves.
197 	 * Note they all come in at the same physical IRQ.
198 	 */
199 	becc_imask[IPL_SOFT] = (1U << ICU_SOFT);
200 	becc_imask[IPL_SOFTCLOCK] = (1U << ICU_SOFT);
201 	becc_imask[IPL_SOFTNET] = (1U << ICU_SOFT);
202 	becc_imask[IPL_SOFTSERIAL] = (1U << ICU_SOFT);
203 
204 	/*
205 	 * splsoftclock() is the only interface that users of the
206 	 * generic software interrupt facility have to block their
207 	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
208 	 */
209 	becc_imask[IPL_SOFTCLOCK] |= becc_imask[IPL_SOFT];
210 
211 	/*
212 	 * splsoftnet() must also block splsoftclock(), since we don't
213 	 * want timer-driven network events to occur while we're
214 	 * processing incoming packets.
215 	 */
216 	becc_imask[IPL_SOFTNET] |= becc_imask[IPL_SOFTCLOCK];
217 
218 	/*
219 	 * Enforce a heirarchy that gives "slow" device (or devices with
220 	 * limited input buffer space/"real-time" requirements) a better
221 	 * chance at not dropping data.
222 	 */
223 	becc_imask[IPL_BIO] |= becc_imask[IPL_SOFTNET];
224 	becc_imask[IPL_NET] |= becc_imask[IPL_BIO];
225 	becc_imask[IPL_SOFTSERIAL] |= becc_imask[IPL_NET];
226 	becc_imask[IPL_TTY] |= becc_imask[IPL_SOFTSERIAL];
227 
228 	/*
229 	 * splvm() blocks all interrupts that use the kernel memory
230 	 * allocation facilities.
231 	 */
232 	becc_imask[IPL_VM] |= becc_imask[IPL_TTY];
233 
234 	/*
235 	 * Audio devices are not allowed to perform memory allocation
236 	 * in their interrupt routines, and they have fairly "real-time"
237 	 * requirements, so give them a high interrupt priority.
238 	 */
239 	becc_imask[IPL_AUDIO] |= becc_imask[IPL_VM];
240 
241 	/*
242 	 * splclock() must block anything that uses the scheduler.
243 	 */
244 	becc_imask[IPL_CLOCK] |= becc_imask[IPL_AUDIO];
245 
246 	/*
247 	 * No separate statclock on the IQ80310.
248 	 */
249 	becc_imask[IPL_STATCLOCK] |= becc_imask[IPL_CLOCK];
250 
251 	/*
252 	 * splhigh() must block "everything".
253 	 */
254 	becc_imask[IPL_HIGH] |= becc_imask[IPL_STATCLOCK];
255 
256 	/*
257 	 * XXX We need serial drivers to run at the absolute highest priority
258 	 * in order to avoid overruns, so serial > high.
259 	 */
260 	becc_imask[IPL_SERIAL] |= becc_imask[IPL_HIGH];
261 
262 	/*
263 	 * Now compute which IRQs must be blocked when servicing any
264 	 * given IRQ.
265 	 */
266 	for (irq = 0; irq < NIRQ; irq++) {
267 		int irqs = (1U << irq);
268 		iq = &intrq[irq];
269 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
270 			becc_enable_irq(irq);
271 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
272 		     ih = TAILQ_NEXT(ih, ih_list))
273 			irqs |= becc_imask[ih->ih_ipl];
274 		iq->iq_mask = irqs;
275 	}
276 }
277 
278 void
279 splx(int new)
280 {
281 
282 	becc_splx(new);
283 }
284 
285 int
286 _spllower(int ipl)
287 {
288 
289 	return (becc_spllower(ipl));
290 }
291 
292 int
293 _splraise(int ipl)
294 {
295 
296 	return (becc_splraise(ipl));
297 }
298 
299 void
300 _setsoftintr(int si)
301 {
302 
303 	becc_setsoftintr(si);
304 }
305 
306 static const int si_to_ipl[SI_NQUEUES] = {
307 	IPL_SOFT,		/* SI_SOFT */
308 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
309 	IPL_SOFTNET,		/* SI_SOFTNET */
310 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
311 };
312 
313 int
314 becc_softint(void *arg)
315 {
316 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
317 	uint32_t	new, oldirqstate;
318 
319 	/* Clear interrupt */
320 	BECC_CSR_WRITE(BECC_ICSR, 0);
321 
322 	if (__cpu_simple_lock_try(&processing) == 0)
323 		return 0;
324 
325 	oldirqstate = disable_interrupts(I32_bit);
326 
327 	new = current_spl_level;
328 
329 #define DO_SOFTINT(si)							\
330 	if (becc_sipending & (1 << (si))) {				\
331 		becc_sipending &= ~(1 << (si));				\
332 		current_spl_level |= becc_imask[si_to_ipl[(si)]];	\
333 		restore_interrupts(oldirqstate);			\
334 		softintr_dispatch(si);					\
335 		oldirqstate = disable_interrupts(I32_bit);		\
336 		current_spl_level = new;				\
337 	}
338 
339 	DO_SOFTINT(SI_SOFTSERIAL);
340 	DO_SOFTINT(SI_SOFTNET);
341 	DO_SOFTINT(SI_SOFTCLOCK);
342 	DO_SOFTINT(SI_SOFT);
343 
344 	__cpu_simple_unlock(&processing);
345 
346 	restore_interrupts(oldirqstate);
347 
348 	return 1;
349 }
350 
351 /*
352  * becc_icu_init:
353  *
354  *	Initialize the BECC ICU.  Called early in bootstrap
355  *	to make sure the ICU is in a pristine state.
356  */
357 void
358 becc_icu_init(void)
359 {
360 
361 	intr_enabled = 0;	/* All interrupts disabled */
362 	becc_set_intrmask();
363 
364 	intr_steer = 0;		/* All interrupts steered to IRQ */
365 	becc_set_intrsteer();
366 
367 	i80200_extirq_dispatch = becc_intr_dispatch;
368 
369 	i80200_intr_enable(INTCTL_IM);
370 }
371 
372 /*
373  * becc_intr_init:
374  *
375  *	Initialize the rest of the interrupt subsystem, making it
376  *	ready to handle interrupts from devices.
377  */
378 void
379 becc_intr_init(void)
380 {
381 	struct intrq *iq;
382 	int i;
383 
384 	intr_enabled = 0;
385 
386 	for (i = 0; i < NIRQ; i++) {
387 		iq = &intrq[i];
388 		TAILQ_INIT(&iq->iq_list);
389 
390 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
391 		    NULL, "becc", becc_irqnames[i]);
392 	}
393 
394 	becc_intr_calculate_masks();
395 
396 	/* Enable IRQs (don't yet use FIQs). */
397 	enable_interrupts(I32_bit);
398 }
399 
400 void *
401 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
402 {
403 	struct intrq *iq;
404 	struct intrhand *ih;
405 	uint32_t oldirqstate;
406 
407 	if (irq < 0 || irq > NIRQ)
408 		panic("becc_intr_establish: IRQ %d out of range", irq);
409 
410 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
411 	if (ih == NULL)
412 		return (NULL);
413 
414 	ih->ih_func = func;
415 	ih->ih_arg = arg;
416 	ih->ih_ipl = ipl;
417 	ih->ih_irq = irq;
418 
419 	iq = &intrq[irq];
420 
421 	/* All BECC interrupts are level-triggered. */
422 	iq->iq_ist = IST_LEVEL;
423 
424 	oldirqstate = disable_interrupts(I32_bit);
425 
426 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
427 
428 	becc_intr_calculate_masks();
429 
430 	restore_interrupts(oldirqstate);
431 
432 	return (ih);
433 }
434 
435 void
436 becc_intr_disestablish(void *cookie)
437 {
438 	struct intrhand *ih = cookie;
439 	struct intrq *iq = &intrq[ih->ih_irq];
440 	uint32_t oldirqstate;
441 
442 	oldirqstate = disable_interrupts(I32_bit);
443 
444 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
445 
446 	becc_intr_calculate_masks();
447 
448 	restore_interrupts(oldirqstate);
449 }
450 
451 void
452 becc_intr_dispatch(struct clockframe *frame)
453 {
454 	struct intrq *iq;
455 	struct intrhand *ih;
456 	uint32_t oldirqstate, pcpl, irq, ibit, hwpend;
457 
458 	pcpl = current_spl_level;
459 
460 	hwpend = becc_icsr_read();
461 
462 	/*
463 	 * Disable all the interrupts that are pending.  We will
464 	 * reenable them once they are processed and not masked.
465 	 */
466 	intr_enabled &= ~hwpend;
467 	becc_set_intrmask();
468 
469 	while (hwpend != 0) {
470 		irq = ffs(hwpend) - 1;
471 		ibit = (1U << irq);
472 
473 		hwpend &= ~ibit;
474 
475 		if (pcpl & ibit) {
476 			/*
477 			 * IRQ is masked; mark it as pending and check
478 			 * the next one.  Note: the IRQ is already disabled.
479 			 */
480 			becc_ipending |= ibit;
481 			continue;
482 		}
483 
484 		becc_ipending &= ~ibit;
485 
486 		iq = &intrq[irq];
487 		iq->iq_ev.ev_count++;
488 		uvmexp.intrs++;
489 		current_spl_level |= iq->iq_mask;
490 		oldirqstate = enable_interrupts(I32_bit);
491 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
492 		     ih = TAILQ_NEXT(ih, ih_list)) {
493 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
494 		}
495 		restore_interrupts(oldirqstate);
496 
497 		current_spl_level = pcpl;
498 
499 		/* Re-enable this interrupt now that's it's cleared. */
500 		intr_enabled |= ibit;
501 		becc_set_intrmask();
502 	}
503 
504 	if (becc_ipending & ~pcpl) {
505 		intr_enabled |= (becc_ipending & ~pcpl);
506 		becc_set_intrmask();
507 	}
508 }
509