xref: /netbsd-src/sys/arch/arm/ixp12x0/ixp12x0_intr.c (revision 49d8c9ecf4abd21261269266ef64939f71b3cd09)
1 /* $NetBSD: ixp12x0_intr.c,v 1.26 2013/12/18 13:03:59 skrll Exp $ */
2 
3 /*
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Ichiro FUKUHARA and Naoto Shimazaki.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.26 2013/12/18 13:03:59 skrll Exp $");
34 
35 /*
36  * Interrupt support for the Intel ixp12x0
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/simplelock.h>
43 #include <sys/termios.h>
44 #include <sys/bus.h>
45 #include <sys/intr.h>
46 
47 #include <arm/locore.h>
48 
49 #include <arm/ixp12x0/ixp12x0reg.h>
50 #include <arm/ixp12x0/ixp12x0var.h>
51 #include <arm/ixp12x0/ixp12x0_comreg.h>
52 #include <arm/ixp12x0/ixp12x0_comvar.h>
53 #include <arm/ixp12x0/ixp12x0_pcireg.h>
54 
55 
56 extern uint32_t	ixpcom_cr;	/* current cr from *_com.c */
57 extern uint32_t	ixpcom_imask;	/* tell mask to *_com.c */
58 
59 /* Interrupt handler queues. */
60 struct intrq intrq[NIRQ];
61 
62 /* Interrupts to mask at each level. */
63 static uint32_t imask[NIPL];
64 static uint32_t pci_imask[NIPL];
65 
66 /* Current interrupt priority level. */
67 volatile int hardware_spl_level;
68 
69 /* Software copy of the IRQs we have enabled. */
70 volatile uint32_t intr_enabled;
71 volatile uint32_t pci_intr_enabled;
72 
73 /* Interrupts pending. */
74 static volatile int ipending;
75 
76 void	ixp12x0_intr_dispatch(struct trapframe *);
77 
78 #define IXPREG(reg)	*((volatile uint32_t*) (reg))
79 
80 static inline uint32_t
81 ixp12x0_irq_read(void)
82 {
83 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
84 }
85 
86 static inline uint32_t
87 ixp12x0_pci_irq_read(void)
88 {
89 	return IXPREG(IXPPCI_IRQ_STATUS);
90 }
91 
92 static void
93 ixp12x0_enable_uart_irq(void)
94 {
95 	ixpcom_imask = 0;
96 	if (ixpcom_sc)
97 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
98 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
99 }
100 
101 static void
102 ixp12x0_disable_uart_irq(void)
103 {
104 	ixpcom_imask = CR_RIE | CR_XIE;
105 	if (ixpcom_sc)
106 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
107 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
108 }
109 
110 static void
111 ixp12x0_set_intrmask(uint32_t irqs, uint32_t pci_irqs)
112 {
113 	if (irqs & (1U << IXP12X0_INTR_UART)) {
114 		ixp12x0_disable_uart_irq();
115 	} else {
116 		ixp12x0_enable_uart_irq();
117 	}
118 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
119 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
120 }
121 
122 static void
123 ixp12x0_enable_irq(int irq)
124 {
125 	if (irq < SYS_NIRQ) {
126 		intr_enabled |= (1U << irq);
127 		switch (irq) {
128 		case IXP12X0_INTR_UART:
129 			ixp12x0_enable_uart_irq();
130 			break;
131 
132 		case IXP12X0_INTR_PCI:
133 			/* nothing to do */
134 			break;
135 		default:
136 			panic("enable_irq:bad IRQ %d", irq);
137 		}
138 	} else {
139 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
140 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
141 	}
142 }
143 
144 static inline void
145 ixp12x0_disable_irq(int irq)
146 {
147 	if (irq < SYS_NIRQ) {
148 		intr_enabled ^= ~(1U << irq);
149 		switch (irq) {
150 		case IXP12X0_INTR_UART:
151 			ixp12x0_disable_uart_irq();
152 			break;
153 
154 		case IXP12X0_INTR_PCI:
155 			/* nothing to do */
156 			break;
157 		default:
158 			/* nothing to do */
159 			break;
160 		}
161 	} else {
162 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
163 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
164 	}
165 }
166 
167 /*
168  * NOTE: This routine must be called with interrupts disabled in the CPSR.
169  */
170 static void
171 ixp12x0_intr_calculate_masks(void)
172 {
173 	struct intrq *iq;
174 	struct intrhand *ih;
175 	int irq, ipl;
176 
177 	/* First, figure out which IPLs each IRQ has. */
178 	for (irq = 0; irq < NIRQ; irq++) {
179 		int levels = 0;
180 		iq = &intrq[irq];
181 		ixp12x0_disable_irq(irq);
182 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
183 		     ih = TAILQ_NEXT(ih, ih_list))
184 			levels |= (1U << ih->ih_ipl);
185 		iq->iq_levels = levels;
186 	}
187 
188 	/* Next, figure out which IRQs are used by each IPL. */
189 	for (ipl = 0; ipl < NIPL; ipl++) {
190 		int irqs = 0;
191 		int pci_irqs = 0;
192 		for (irq = 0; irq < SYS_NIRQ; irq++) {
193 			if (intrq[irq].iq_levels & (1U << ipl))
194 				irqs |= (1U << irq);
195 		}
196 		imask[ipl] = irqs;
197 		for (irq = 0; irq < SYS_NIRQ; irq++) {
198 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
199 				pci_irqs |= (1U << irq);
200 		}
201 		pci_imask[ipl] = pci_irqs;
202 	}
203 
204 	KASSERT(imask[IPL_NONE] == 0);
205 	KASSERT(pci_imask[IPL_NONE] == 0);
206 	KASSERT(imask[IPL_SOFTCLOCK] == 0);
207 	KASSERT(pci_imask[IPL_SOFTCLOCK] == 0);
208 	KASSERT(imask[IPL_SOFTBIO] == 0);
209 	KASSERT(pci_imask[IPL_SOFTBIO] == 0);
210 	KASSERT(imask[IPL_SOFTNET] == 0);
211 	KASSERT(pci_imask[IPL_SOFTNET] == 0);
212 	KASSERT(imask[IPL_SOFTSERIAL] == 0);
213 	KASSERT(pci_imask[IPL_SOFTSERIAL] == 0);
214 
215 	KASSERT(imask[IPL_VM] != 0);
216 	KASSERT(pci_imask[IPL_VM] != 0);
217 
218 	/*
219 	 * splsched() must block anything that uses the scheduler.
220 	 */
221 	imask[IPL_SCHED] |= imask[IPL_VM];
222 	pci_imask[IPL_SCHED] |= pci_imask[IPL_VM];
223 
224 	/*
225 	 * splhigh() must block "everything".
226 	 */
227 	imask[IPL_HIGH] |= imask[IPL_SCHED];
228 	pci_imask[IPL_HIGH] |= pci_imask[IPL_SCHED];
229 
230 	/*
231 	 * Now compute which IRQs must be blocked when servicing any
232 	 * given IRQ.
233 	 */
234 	for (irq = 0; irq < NIRQ; irq++) {
235 		int	irqs;
236 		int	pci_irqs;
237 
238 		if (irq < SYS_NIRQ) {
239 			irqs = (1U << irq);
240 			pci_irqs = 0;
241 		} else {
242 			irqs = 0;
243 			pci_irqs = (1U << (irq - SYS_NIRQ));
244 		}
245 		iq = &intrq[irq];
246 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
247 			ixp12x0_enable_irq(irq);
248 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
249 		     ih = TAILQ_NEXT(ih, ih_list)) {
250 			irqs |= imask[ih->ih_ipl];
251 			pci_irqs |= pci_imask[ih->ih_ipl];
252 		}
253 		iq->iq_mask = irqs;
254 		iq->iq_pci_mask = pci_irqs;
255 	}
256 }
257 
258 inline void
259 splx(int new)
260 {
261 	u_int	oldirqstate;
262 
263 	oldirqstate = disable_interrupts(I32_bit);
264 	set_curcpl(new);
265 	if (new != hardware_spl_level) {
266 		hardware_spl_level = new;
267 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
268 	}
269 	restore_interrupts(oldirqstate);
270 
271 #ifdef __HAVE_FAST_SOFTINTS
272 	cpu_dosoftints();
273 #endif
274 }
275 
276 int
277 _splraise(int ipl)
278 {
279 	int	old;
280 	u_int	oldirqstate;
281 
282 	oldirqstate = disable_interrupts(I32_bit);
283 	old = curcpl();
284 	set_curcpl(ipl);
285 	restore_interrupts(oldirqstate);
286 	return (old);
287 }
288 
289 int
290 _spllower(int ipl)
291 {
292 	int	old = curcpl();
293 
294 	if (old <= ipl)
295 		return (old);
296 	splx(ipl);
297 	return (old);
298 }
299 
300 /*
301  * ixp12x0_intr_init:
302  *
303  *	Initialize the rest of the interrupt subsystem, making it
304  *	ready to handle interrupts from devices.
305  */
306 void
307 ixp12x0_intr_init(void)
308 {
309 	struct intrq *iq;
310 	int i;
311 
312 	intr_enabled = 0;
313 	pci_intr_enabled = 0;
314 
315 	for (i = 0; i < NIRQ; i++) {
316 		iq = &intrq[i];
317 		TAILQ_INIT(&iq->iq_list);
318 
319 		sprintf(iq->iq_name, "ipl %d", i);
320 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
321 				     NULL, "ixpintr", iq->iq_name);
322 	}
323 	curcpu()->ci_intr_depth = 0;
324 	curcpu()->ci_cpl = 0;
325 	hardware_spl_level = 0;
326 
327 	ixp12x0_intr_calculate_masks();
328 
329 	/* Enable IRQs (don't yet use FIQs). */
330 	enable_interrupts(I32_bit);
331 }
332 
333 void *
334 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
335 {
336 	struct intrq*		iq;
337 	struct intrhand*	ih;
338 	u_int			oldirqstate;
339 #ifdef DEBUG
340 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
341 	       irq, ipl, (uint32_t) ih_func, (uint32_t) arg);
342 #endif
343 	if (irq < 0 || irq > NIRQ)
344 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
345 	if (ipl < 0 || ipl > NIPL)
346 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
347 
348 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
349 	if (ih == NULL)
350 		return (NULL);
351 
352 	ih->ih_func = ih_func;
353 	ih->ih_arg = arg;
354 	ih->ih_irq = irq;
355 	ih->ih_ipl = ipl;
356 
357 	iq = &intrq[irq];
358 	iq->iq_ist = IST_LEVEL;
359 
360 	oldirqstate = disable_interrupts(I32_bit);
361 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
362 	ixp12x0_intr_calculate_masks();
363 	restore_interrupts(oldirqstate);
364 
365 	return (ih);
366 }
367 
368 void
369 ixp12x0_intr_disestablish(void *cookie)
370 {
371 	struct intrhand*	ih = cookie;
372 	struct intrq*		iq = &intrq[ih->ih_ipl];
373 	u_int			oldirqstate;
374 
375 	oldirqstate = disable_interrupts(I32_bit);
376 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
377 	ixp12x0_intr_calculate_masks();
378 	restore_interrupts(oldirqstate);
379 }
380 
381 void
382 ixp12x0_intr_dispatch(struct trapframe *frame)
383 {
384 	struct intrq*		iq;
385 	struct intrhand*	ih;
386 	struct cpu_info* const	ci = curcpu();
387 	const int		ppl = ci->ci_cpl;
388 	u_int			oldirqstate;
389 	uint32_t		hwpend;
390 	uint32_t		pci_hwpend;
391 	int			irq;
392 	uint32_t		ibit;
393 
394 
395 	hwpend = ixp12x0_irq_read();
396 	pci_hwpend = ixp12x0_pci_irq_read();
397 
398 	hardware_spl_level = ppl;
399 	ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
400 
401 	hwpend &= ~imask[ppl];
402 	pci_hwpend &= ~pci_imask[ppl];
403 
404 	while (hwpend) {
405 		irq = ffs(hwpend) - 1;
406 		ibit = (1U << irq);
407 
408 		iq = &intrq[irq];
409 		iq->iq_ev.ev_count++;
410 		ci->ci_data.cpu_nintr++;
411 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
412 			ci->ci_cpl = ih->ih_ipl;
413 			oldirqstate = enable_interrupts(I32_bit);
414 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
415 			restore_interrupts(oldirqstate);
416 			hwpend &= ~ibit;
417 		}
418 	}
419 	while (pci_hwpend) {
420 		irq = ffs(pci_hwpend) - 1;
421 		ibit = (1U << irq);
422 
423 		iq = &intrq[irq + SYS_NIRQ];
424 		iq->iq_ev.ev_count++;
425 		ci->ci_data.cpu_nintr++;
426 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
427 			ci->ci_cpl = ih->ih_ipl;
428 			oldirqstate = enable_interrupts(I32_bit);
429 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
430 			restore_interrupts(oldirqstate);
431 		}
432 		pci_hwpend &= ~ibit;
433 	}
434 
435 	ci->ci_cpl = ppl;
436 	hardware_spl_level = ppl;
437 	ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
438 
439 #ifdef __HAVE_FAST_SOFTINTS
440 	cpu_dosoftints();
441 #endif
442 }
443