xref: /netbsd-src/sys/arch/arm/ixp12x0/ixp12x0_intr.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* $NetBSD: ixp12x0_intr.c,v 1.29 2014/03/26 08:52:00 christos Exp $ */
2 
3 /*
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Ichiro FUKUHARA and Naoto Shimazaki.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.29 2014/03/26 08:52:00 christos Exp $");
34 
35 /*
36  * Interrupt support for the Intel ixp12x0
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/termios.h>
43 #include <sys/bus.h>
44 #include <sys/intr.h>
45 
46 #include <arm/locore.h>
47 
48 #include <arm/ixp12x0/ixp12x0reg.h>
49 #include <arm/ixp12x0/ixp12x0var.h>
50 #include <arm/ixp12x0/ixp12x0_comreg.h>
51 #include <arm/ixp12x0/ixp12x0_comvar.h>
52 #include <arm/ixp12x0/ixp12x0_pcireg.h>
53 
54 
55 extern uint32_t	ixpcom_cr;	/* current cr from *_com.c */
56 extern uint32_t	ixpcom_imask;	/* tell mask to *_com.c */
57 
58 /* Interrupt handler queues. */
59 struct intrq intrq[NIRQ];
60 
61 /* Interrupts to mask at each level. */
62 static uint32_t imask[NIPL];
63 static uint32_t pci_imask[NIPL];
64 
65 /* Current interrupt priority level. */
66 volatile int hardware_spl_level;
67 
68 /* Software copy of the IRQs we have enabled. */
69 volatile uint32_t intr_enabled;
70 volatile uint32_t pci_intr_enabled;
71 
72 void	ixp12x0_intr_dispatch(struct trapframe *);
73 
74 #define IXPREG(reg)	*((volatile uint32_t*) (reg))
75 
76 static inline uint32_t
77 ixp12x0_irq_read(void)
78 {
79 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
80 }
81 
82 static inline uint32_t
83 ixp12x0_pci_irq_read(void)
84 {
85 	return IXPREG(IXPPCI_IRQ_STATUS);
86 }
87 
88 static void
89 ixp12x0_enable_uart_irq(void)
90 {
91 	ixpcom_imask = 0;
92 	if (ixpcom_sc)
93 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
94 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
95 }
96 
97 static void
98 ixp12x0_disable_uart_irq(void)
99 {
100 	ixpcom_imask = CR_RIE | CR_XIE;
101 	if (ixpcom_sc)
102 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
103 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
104 }
105 
106 static void
107 ixp12x0_set_intrmask(uint32_t irqs, uint32_t pci_irqs)
108 {
109 	if (irqs & (1U << IXP12X0_INTR_UART)) {
110 		ixp12x0_disable_uart_irq();
111 	} else {
112 		ixp12x0_enable_uart_irq();
113 	}
114 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
115 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
116 }
117 
118 static void
119 ixp12x0_enable_irq(int irq)
120 {
121 	if (irq < SYS_NIRQ) {
122 		intr_enabled |= (1U << irq);
123 		switch (irq) {
124 		case IXP12X0_INTR_UART:
125 			ixp12x0_enable_uart_irq();
126 			break;
127 
128 		case IXP12X0_INTR_PCI:
129 			/* nothing to do */
130 			break;
131 		default:
132 			panic("enable_irq:bad IRQ %d", irq);
133 		}
134 	} else {
135 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
136 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
137 	}
138 }
139 
140 static inline void
141 ixp12x0_disable_irq(int irq)
142 {
143 	if (irq < SYS_NIRQ) {
144 		intr_enabled ^= ~(1U << irq);
145 		switch (irq) {
146 		case IXP12X0_INTR_UART:
147 			ixp12x0_disable_uart_irq();
148 			break;
149 
150 		case IXP12X0_INTR_PCI:
151 			/* nothing to do */
152 			break;
153 		default:
154 			/* nothing to do */
155 			break;
156 		}
157 	} else {
158 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
159 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
160 	}
161 }
162 
163 /*
164  * NOTE: This routine must be called with interrupts disabled in the CPSR.
165  */
166 static void
167 ixp12x0_intr_calculate_masks(void)
168 {
169 	struct intrq *iq;
170 	struct intrhand *ih;
171 	int irq, ipl;
172 
173 	/* First, figure out which IPLs each IRQ has. */
174 	for (irq = 0; irq < NIRQ; irq++) {
175 		int levels = 0;
176 		iq = &intrq[irq];
177 		ixp12x0_disable_irq(irq);
178 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
179 		     ih = TAILQ_NEXT(ih, ih_list))
180 			levels |= (1U << ih->ih_ipl);
181 		iq->iq_levels = levels;
182 	}
183 
184 	/* Next, figure out which IRQs are used by each IPL. */
185 	for (ipl = 0; ipl < NIPL; ipl++) {
186 		int irqs = 0;
187 		int pci_irqs = 0;
188 		for (irq = 0; irq < SYS_NIRQ; irq++) {
189 			if (intrq[irq].iq_levels & (1U << ipl))
190 				irqs |= (1U << irq);
191 		}
192 		imask[ipl] = irqs;
193 		for (irq = 0; irq < SYS_NIRQ; irq++) {
194 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
195 				pci_irqs |= (1U << irq);
196 		}
197 		pci_imask[ipl] = pci_irqs;
198 	}
199 
200 	KASSERT(imask[IPL_NONE] == 0);
201 	KASSERT(pci_imask[IPL_NONE] == 0);
202 	KASSERT(imask[IPL_SOFTCLOCK] == 0);
203 	KASSERT(pci_imask[IPL_SOFTCLOCK] == 0);
204 	KASSERT(imask[IPL_SOFTBIO] == 0);
205 	KASSERT(pci_imask[IPL_SOFTBIO] == 0);
206 	KASSERT(imask[IPL_SOFTNET] == 0);
207 	KASSERT(pci_imask[IPL_SOFTNET] == 0);
208 	KASSERT(imask[IPL_SOFTSERIAL] == 0);
209 	KASSERT(pci_imask[IPL_SOFTSERIAL] == 0);
210 
211 	KASSERT(imask[IPL_VM] != 0);
212 	KASSERT(pci_imask[IPL_VM] != 0);
213 
214 	/*
215 	 * splsched() must block anything that uses the scheduler.
216 	 */
217 	imask[IPL_SCHED] |= imask[IPL_VM];
218 	pci_imask[IPL_SCHED] |= pci_imask[IPL_VM];
219 
220 	/*
221 	 * splhigh() must block "everything".
222 	 */
223 	imask[IPL_HIGH] |= imask[IPL_SCHED];
224 	pci_imask[IPL_HIGH] |= pci_imask[IPL_SCHED];
225 
226 	/*
227 	 * Now compute which IRQs must be blocked when servicing any
228 	 * given IRQ.
229 	 */
230 	for (irq = 0; irq < NIRQ; irq++) {
231 		int	irqs;
232 		int	pci_irqs;
233 
234 		if (irq < SYS_NIRQ) {
235 			irqs = (1U << irq);
236 			pci_irqs = 0;
237 		} else {
238 			irqs = 0;
239 			pci_irqs = (1U << (irq - SYS_NIRQ));
240 		}
241 		iq = &intrq[irq];
242 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
243 			ixp12x0_enable_irq(irq);
244 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
245 		     ih = TAILQ_NEXT(ih, ih_list)) {
246 			irqs |= imask[ih->ih_ipl];
247 			pci_irqs |= pci_imask[ih->ih_ipl];
248 		}
249 		iq->iq_mask = irqs;
250 		iq->iq_pci_mask = pci_irqs;
251 	}
252 }
253 
254 inline void
255 splx(int new)
256 {
257 	u_int	oldirqstate;
258 
259 	oldirqstate = disable_interrupts(I32_bit);
260 	set_curcpl(new);
261 	if (new != hardware_spl_level) {
262 		hardware_spl_level = new;
263 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
264 	}
265 	restore_interrupts(oldirqstate);
266 
267 #ifdef __HAVE_FAST_SOFTINTS
268 	cpu_dosoftints();
269 #endif
270 }
271 
272 int
273 _splraise(int ipl)
274 {
275 	int	old;
276 	u_int	oldirqstate;
277 
278 	oldirqstate = disable_interrupts(I32_bit);
279 	old = curcpl();
280 	set_curcpl(ipl);
281 	restore_interrupts(oldirqstate);
282 	return (old);
283 }
284 
285 int
286 _spllower(int ipl)
287 {
288 	int	old = curcpl();
289 
290 	if (old <= ipl)
291 		return (old);
292 	splx(ipl);
293 	return (old);
294 }
295 
296 /*
297  * ixp12x0_intr_init:
298  *
299  *	Initialize the rest of the interrupt subsystem, making it
300  *	ready to handle interrupts from devices.
301  */
302 void
303 ixp12x0_intr_init(void)
304 {
305 	struct intrq *iq;
306 	int i;
307 
308 	intr_enabled = 0;
309 	pci_intr_enabled = 0;
310 
311 	for (i = 0; i < NIRQ; i++) {
312 		iq = &intrq[i];
313 		TAILQ_INIT(&iq->iq_list);
314 
315 		snprintf(iq->iq_name, sizeof(iq->iq_name), "ipl %d", i);
316 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
317 				     NULL, "ixpintr", iq->iq_name);
318 	}
319 	curcpu()->ci_intr_depth = 0;
320 	curcpu()->ci_cpl = 0;
321 	hardware_spl_level = 0;
322 
323 	ixp12x0_intr_calculate_masks();
324 
325 	/* Enable IRQs (don't yet use FIQs). */
326 	enable_interrupts(I32_bit);
327 }
328 
329 void *
330 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
331 {
332 	struct intrq*		iq;
333 	struct intrhand*	ih;
334 	u_int			oldirqstate;
335 #ifdef DEBUG
336 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
337 	       irq, ipl, (uint32_t) ih_func, (uint32_t) arg);
338 #endif
339 	if (irq < 0 || irq > NIRQ)
340 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
341 	if (ipl < 0 || ipl > NIPL)
342 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
343 
344 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
345 	if (ih == NULL)
346 		return (NULL);
347 
348 	ih->ih_func = ih_func;
349 	ih->ih_arg = arg;
350 	ih->ih_irq = irq;
351 	ih->ih_ipl = ipl;
352 
353 	iq = &intrq[irq];
354 	iq->iq_ist = IST_LEVEL;
355 
356 	oldirqstate = disable_interrupts(I32_bit);
357 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
358 	ixp12x0_intr_calculate_masks();
359 	restore_interrupts(oldirqstate);
360 
361 	return (ih);
362 }
363 
364 void
365 ixp12x0_intr_disestablish(void *cookie)
366 {
367 	struct intrhand*	ih = cookie;
368 	struct intrq*		iq = &intrq[ih->ih_ipl];
369 	u_int			oldirqstate;
370 
371 	oldirqstate = disable_interrupts(I32_bit);
372 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
373 	ixp12x0_intr_calculate_masks();
374 	restore_interrupts(oldirqstate);
375 }
376 
377 void
378 ixp12x0_intr_dispatch(struct trapframe *frame)
379 {
380 	struct intrq*		iq;
381 	struct intrhand*	ih;
382 	struct cpu_info* const	ci = curcpu();
383 	const int		ppl = ci->ci_cpl;
384 	u_int			oldirqstate;
385 	uint32_t		hwpend;
386 	uint32_t		pci_hwpend;
387 	int			irq;
388 	uint32_t		ibit;
389 
390 
391 	hwpend = ixp12x0_irq_read();
392 	pci_hwpend = ixp12x0_pci_irq_read();
393 
394 	hardware_spl_level = ppl;
395 	ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
396 
397 	hwpend &= ~imask[ppl];
398 	pci_hwpend &= ~pci_imask[ppl];
399 
400 	while (hwpend) {
401 		irq = ffs(hwpend) - 1;
402 		ibit = (1U << irq);
403 
404 		iq = &intrq[irq];
405 		iq->iq_ev.ev_count++;
406 		ci->ci_data.cpu_nintr++;
407 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
408 			ci->ci_cpl = ih->ih_ipl;
409 			oldirqstate = enable_interrupts(I32_bit);
410 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
411 			restore_interrupts(oldirqstate);
412 			hwpend &= ~ibit;
413 		}
414 	}
415 	while (pci_hwpend) {
416 		irq = ffs(pci_hwpend) - 1;
417 		ibit = (1U << irq);
418 
419 		iq = &intrq[irq + SYS_NIRQ];
420 		iq->iq_ev.ev_count++;
421 		ci->ci_data.cpu_nintr++;
422 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
423 			ci->ci_cpl = ih->ih_ipl;
424 			oldirqstate = enable_interrupts(I32_bit);
425 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
426 			restore_interrupts(oldirqstate);
427 		}
428 		pci_hwpend &= ~ibit;
429 	}
430 
431 	ci->ci_cpl = ppl;
432 	hardware_spl_level = ppl;
433 	ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
434 
435 #ifdef __HAVE_FAST_SOFTINTS
436 	cpu_dosoftints();
437 #endif
438 }
439