xref: /netbsd-src/sys/arch/powerpc/marvell/marvell_intr.h (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: marvell_intr.h,v 1.5 2003/04/09 15:44:27 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #ifndef _MVPPPC_INTR_H_
40 #define _MVPPPC_INTR_H_
41 
42 /*
43  * Interrupt Priority Levels
44  */
45 #define	IPL_NONE	0	/* nothing */
46 #define	IPL_SOFTCLOCK	1	/* timeouts */
47 #define	IPL_SOFTNET	2	/* protocol stacks */
48 #define	IPL_BIO		3	/* block I/O */
49 #define	IPL_NET		4	/* network */
50 #define IPL_NCP		5	/* network processors */
51 #define IPL_SOFTI2C	6	/* i2c */
52 #define	IPL_SOFTSERIAL	7	/* serial */
53 #define	IPL_TTY		8	/* terminal */
54 #define IPL_AUDIO       9       /* boom box */
55 #define IPL_EJECT	10	/* card eject */
56 #define IPL_GTERR	10	/* GT-64260 errors */
57 #define	IPL_I2C		11	/* i2c */
58 #define	IPL_VM		12	/* memory allocation */
59 #define	IPL_SERIAL	13	/* serial */
60 #define	IPL_CLOCK	14	/* clock */
61 #define	IPL_SCHED	14	/* schedular */
62 #define	IPL_LOCK	14	/* same as high for now */
63 #define	IPL_HIGH	15	/* everything */
64 #define	NIPL		16
65 #define IPL_PRIMASK	0xf
66 #define IPL_EE		0x10	/* enable external interrupts on splx */
67 
68 /* Interrupt sharing types. */
69 #define	IST_NONE	0	/* none */
70 #define	IST_PULSE	1	/* pulsed */
71 #define	IST_EDGE	2	/* edge-triggered */
72 #define	IST_LEVEL	3	/* level-triggered */
73 #define	IST_SOFT	4	/* software-triggered */
74 #define	IST_CLOCK	5	/* exclusive for clock */
75 #define	NIST		6
76 
77 #if !defined(_LOCORE) && defined(_KERNEL)
78 
79 /*
80  * we support 128 IRQs:
81  *	96 (ICU_LEN) hard interrupt IRQs:
82  *		- 64 Main Cause IRQs,
83  *		- 32 GPP IRQs,
84  *	and 32 softint IRQs
85  */
86 #define ICU_LEN		96	/* number of  HW IRQs */
87 #define IRQ_GPP_BASE	64	/* base of GPP IRQs */
88 #define IRQ_GPP_SUM	(32+24) /* GPP[7..0] interrupt */	/* XXX */
89 #define NIRQ		128	/* total # of HW IRQs */
90 
91 #define IMASK_ICU_LO	0
92 #define IMASK_ICU_HI	1
93 #define IMASK_ICU_GPP	2
94 #define IMASK_SOFTINT	3
95 #define IMASK_WORDSHIFT 5	/* log2(32) */
96 #define IMASK_BITMASK	~((~0) << IMASK_WORDSHIFT)
97 
98 #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
99 
100 /*
101  * interrupt mask bit vector
102  */
103 typedef struct {
104 	u_int32_t bits[4];
105 } imask_t __attribute__ ((aligned(16)));
106 
107 static __inline void imask_zero(imask_t *);
108 static __inline void imask_zero_v(volatile imask_t *);
109 static __inline void imask_dup_v(imask_t *, const volatile imask_t *);
110 static __inline void imask_and(imask_t *, const imask_t *);
111 static __inline void imask_andnot_v(volatile imask_t *, const imask_t *);
112 static __inline void imask_andnot_icu_vv(volatile imask_t *, const volatile imask_t *);
113 static __inline int imask_empty(const imask_t *);
114 static __inline void imask_orbit(imask_t *, int);
115 static __inline void imask_orbit_v(volatile imask_t *, int);
116 static __inline void imask_clrbit(imask_t *, int);
117 static __inline void imask_clrbit_v(volatile imask_t *, int);
118 static __inline u_int32_t imask_andbit_v(const volatile imask_t *, int);
119 static __inline int imask_test_v(const volatile imask_t *, const imask_t *);
120 
121 static __inline void
122 imask_zero(imask_t *idp)
123 {
124 	idp->bits[IMASK_ICU_LO]  = 0;
125 	idp->bits[IMASK_ICU_HI]  = 0;
126 	idp->bits[IMASK_ICU_GPP] = 0;
127 	idp->bits[IMASK_SOFTINT] = 0;
128 }
129 
130 static __inline void
131 imask_zero_v(volatile imask_t *idp)
132 {
133 	idp->bits[IMASK_ICU_LO]  = 0;
134 	idp->bits[IMASK_ICU_HI]  = 0;
135 	idp->bits[IMASK_ICU_GPP] = 0;
136 	idp->bits[IMASK_SOFTINT] = 0;
137 }
138 
139 static __inline void
140 imask_dup_v(imask_t *idp, const volatile imask_t *isp)
141 {
142 	*idp = *isp;
143 }
144 
145 static __inline void
146 imask_and(imask_t *idp, const imask_t *isp)
147 {
148 	idp->bits[IMASK_ICU_LO]  &= isp->bits[IMASK_ICU_LO];
149 	idp->bits[IMASK_ICU_HI]  &= isp->bits[IMASK_ICU_HI];
150 	idp->bits[IMASK_ICU_GPP] &= isp->bits[IMASK_ICU_GPP];
151 	idp->bits[IMASK_SOFTINT] &= isp->bits[IMASK_SOFTINT];
152 }
153 
154 static __inline void
155 imask_andnot_v(volatile imask_t *idp, const imask_t *isp)
156 {
157 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
158 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
159 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
160 	idp->bits[IMASK_SOFTINT] &= ~isp->bits[IMASK_SOFTINT];
161 }
162 
163 static __inline void
164 imask_andnot_icu_vv(volatile imask_t *idp, const volatile imask_t *isp)
165 {
166 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
167 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
168 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
169 }
170 
171 static __inline int
172 imask_empty(const imask_t *isp)
173 {
174 	return (! (isp->bits[IMASK_ICU_LO] | isp->bits[IMASK_ICU_HI] |
175 		   isp->bits[IMASK_ICU_GPP]| isp->bits[IMASK_SOFTINT]));
176 }
177 
178 static __inline void
179 imask_orbit(imask_t *idp, int bitno)
180 {
181 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
182 }
183 
184 static __inline void
185 imask_orbit_v(volatile imask_t *idp, int bitno)
186 {
187 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
188 }
189 
190 static __inline void
191 imask_clrbit(imask_t *idp, int bitno)
192 {
193 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
194 }
195 
196 static __inline void
197 imask_clrbit_v(volatile imask_t *idp, int bitno)
198 {
199 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
200 }
201 
202 static __inline u_int32_t
203 imask_andbit_v(const volatile imask_t *idp, int bitno)
204 {
205 	return idp->bits[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
206 }
207 
208 static __inline int
209 imask_test_v(const volatile imask_t *idp, const imask_t *isp)
210 {
211 	return ((idp->bits[IMASK_ICU_LO]  & isp->bits[IMASK_ICU_LO]) ||
212 		(idp->bits[IMASK_ICU_HI]  & isp->bits[IMASK_ICU_HI]) ||
213 		(idp->bits[IMASK_ICU_GPP] & isp->bits[IMASK_ICU_GPP])||
214 		(idp->bits[IMASK_SOFTINT] & isp->bits[IMASK_SOFTINT]));
215 }
216 
217 #ifdef EXT_INTR_STATS
218 /*
219  * ISR timing stats
220  */
221 
222 typedef struct ext_intr_hist {
223 	u_int64_t tcause;
224 	u_int64_t tcommit;
225 	u_int64_t tstart;
226 	u_int64_t tfin;
227 } ext_intr_hist_t __attribute__ ((aligned(32)));
228 
229 typedef struct ext_intr_stat {
230         struct ext_intr_hist *histp;
231         unsigned int histix;
232         u_int64_t cnt;
233         u_int64_t sum;
234         u_int64_t min;
235         u_int64_t max;
236         u_int64_t pnd;
237         u_int64_t borrowed;
238         struct ext_intr_stat *save;
239 	unsigned long preempted[NIRQ];	/* XXX */
240 } ext_intr_stat_t  __attribute__ ((aligned(32)));
241 
242 extern int intr_depth_max;
243 extern int ext_intr_stats_enb;
244 extern ext_intr_stat_t ext_intr_stats[];
245 extern ext_intr_stat_t *ext_intr_statp;
246 
247 extern void ext_intr_stats_init __P((void));
248 extern void ext_intr_stats_cause
249 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
250 extern void ext_intr_stats_pend
251 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
252 extern void ext_intr_stats_commit __P((imask_t *));
253 extern void ext_intr_stats_commit_m __P((imask_t *));
254 extern void ext_intr_stats_commit_irq __P((u_int));
255 extern u_int64_t ext_intr_stats_pre  __P((int));
256 extern void ext_intr_stats_post __P((int, u_int64_t));
257 
258 #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
259 #define EXT_INTR_STATS_CAUSE(l, h, g, s)  ext_intr_stats_cause(l, h, g, s)
260 #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
261 #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
262 #define EXT_INTR_STATS_DECL(t) u_int64_t t
263 #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
264 #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
265 #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
266 #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
267 #define EXT_INTR_STATS_DEPTH() \
268 		 intr_depth_max = (intr_depth > intr_depth_max) ? \
269 			 intr_depth : intr_depth_max
270 
271 #else /* EXT_INTR_STATS */
272 
273 #define EXT_INTR_STATS_INIT()
274 #define EXT_INTR_STATS_CAUSE(l, h, g, s)
275 #define EXT_INTR_STATS_COMMIT_M(m)
276 #define EXT_INTR_STATS_COMMIT_IRQ(i)
277 #define EXT_INTR_STATS_DECL(t)
278 #define EXT_INTR_STATS_PRE(irq, t)
279 #define EXT_INTR_STATS_POST(i, t)
280 #define EXT_INTR_STATS_PEND(l, h, g, s)
281 #define EXT_INTR_STATS_PEND_IRQ(i)
282 #define EXT_INTR_STATS_DEPTH()
283 
284 #endif	/* EXT_INTR_STATS */
285 
286 
287 #ifdef SPL_STATS
288 typedef struct spl_hist {
289 	int level;
290 	void *addr;
291 	u_int64_t time;
292 } spl_hist_t;
293 
294 extern  void spl_stats_init();
295 extern  void spl_stats_log();
296 extern unsigned int spl_stats_enb;
297 
298 #define SPL_STATS_INIT()	spl_stats_init()
299 #define SPL_STATS_LOG(ipl, cc)	spl_stats_log((ipl), (cc))
300 
301 #else
302 
303 #define SPL_STATS_INIT()
304 #define SPL_STATS_LOG(ipl, cc)
305 
306 #endif	/* SPL_STATS */
307 
308 
309 void setsoftclock __P((void));
310 void clearsoftclock __P((void));
311 int  splsoftclock __P((void));
312 void setsoftnet   __P((void));
313 void clearsoftnet __P((void));
314 int  splsoftnet   __P((void));
315 
316 void intr_dispatch __P((void));
317 #ifdef SPL_INLINE
318 static __inline int splraise __P((int));
319 static __inline int spllower __P((int));
320 static __inline void splx __P((int));
321 #else
322 extern int splraise __P((int));
323 extern int spllower __P((int));
324 extern void splx __P((int));
325 #endif
326 
327 extern volatile int tickspending;
328 
329 extern volatile imask_t ipending;
330 extern imask_t imask[];
331 
332 /*
333  * inlines for manipulating PSL_EE
334  */
335 static __inline void
336 extintr_restore(register_t omsr)
337 {
338 	__asm __volatile ("sync; mtmsr %0;" :: "r"(omsr));
339 }
340 
341 static __inline register_t
342 extintr_enable(void)
343 {
344 	register_t omsr;
345 
346 	__asm __volatile("sync;");
347 	__asm __volatile("mfmsr %0;" : "=r"(omsr));
348 	__asm __volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
349 
350 	return omsr;
351 }
352 
353 static __inline register_t
354 extintr_disable(void)
355 {
356 	register_t omsr;
357 
358 	__asm __volatile("mfmsr %0;" : "=r"(omsr));
359 	__asm __volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
360 	__asm __volatile("isync;");
361 
362 	return omsr;
363 }
364 
365 #ifdef SPL_INLINE
366 static __inline int
367 splraise(int ncpl)
368 {
369 	int ocpl;
370 	register_t omsr;
371 
372 	omsr = extintr_disable();
373 	ocpl = cpl;
374         if (ncpl > cpl) {
375 		SPL_STATS_LOG(ncpl, 0);
376                 cpl = ncpl;
377 		if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
378 			/* leave external interrupts disabled */
379 			return (ocpl | IPL_EE);
380 		}
381 	}
382         extintr_restore(omsr);
383         return (ocpl);
384 }
385 
386 static __inline void
387 splx(int xcpl)
388 {
389 	imask_t *ncplp;
390 	register_t omsr;
391 	int ncpl = xcpl & IPL_PRIMASK;
392 
393 	ncplp = &imask[ncpl];
394 
395 	omsr = extintr_disable();
396 	if (ncpl < cpl) {
397 		cpl = ncpl;
398 		SPL_STATS_LOG(ncpl, 0);
399 		if (imask_test_v(&ipending, ncplp))
400 			intr_dispatch();
401 	}
402 	if (xcpl & IPL_EE)
403 		omsr |= PSL_EE;
404 	extintr_restore(omsr);
405 }
406 
407 static __inline int
408 spllower(int ncpl)
409 {
410 	int ocpl;
411 	imask_t *ncplp;
412 	register_t omsr;
413 
414 	ncpl &= IPL_PRIMASK;
415 	ncplp = &imask[ncpl];
416 
417 	omsr = extintr_disable();
418 	ocpl = cpl;
419 	cpl = ncpl;
420 	SPL_STATS_LOG(ncpl, 0);
421 #ifdef EXT_INTR_STATS
422         ext_intr_statp = 0;
423 #endif
424 	if (imask_test_v(&ipending, ncplp))
425 		intr_dispatch();
426 
427 	if (ncpl < IPL_HIGH)
428 		omsr |= PSL_EE;
429 	extintr_restore(omsr);
430 
431 	return (ocpl);
432 }
433 #endif	/* SPL_INLINE */
434 
435 
436 /*
437  * Soft interrupt IRQs
438  * see also intrnames[] in locore.S
439  */
440 #define SIR_BASE	(NIRQ-32)
441 #define SIR_SOFTCLOCK	(NIRQ-5)
442 #define SIR_SOFTNET	(NIRQ-4)
443 #define SIR_SOFTI2C	(NIRQ-3)
444 #define SIR_SOFTSERIAL	(NIRQ-2)
445 #define SIR_HWCLOCK	(NIRQ-1)
446 #define SIR_RES		~(SIBIT(SIR_SOFTCLOCK)|\
447 			  SIBIT(SIR_SOFTNET)|\
448 			  SIBIT(SIR_SOFTI2C)|\
449 			  SIBIT(SIR_SOFTSERIAL)|\
450 			  SIBIT(SIR_HWCLOCK))
451 
452 /*
453  * standard hardware interrupt spl's
454  */
455 #define splbio()	splraise(IPL_BIO)
456 #define splnet()	splraise(IPL_NET)
457 #define spltty()	splraise(IPL_TTY)
458 #define	splaudio()	splraise(IPL_AUDIO)
459 #define splsched()	splraise(IPL_SCHED)
460 #define splclock()	splraise(IPL_CLOCK)
461 #define splstatclock()	splclock()
462 #define	splserial()	splraise(IPL_SERIAL)
463 
464 #define spllpt()	spltty()
465 
466 /*
467  * Software interrupt spl's
468  *
469  * NOTE: splsoftclock() is used by hardclock() to lower the priority from
470  * clock to softclock before it calls softclock().
471  */
472 #define	spllowersoftclock()	spllower(IPL_SOFTCLOCK)
473 #define	splsoftclock()		splraise(IPL_SOFTCLOCK)
474 #define	splsoftnet()		splraise(IPL_SOFTNET)
475 #define	splsoftserial()		splraise(IPL_SOFTSERIAL)
476 
477 #define __HAVE_GENERIC_SOFT_INTERRUPTS	/* should be in <machine/types.h> */
478 void *softintr_establish(int level, void (*fun)(void *), void *arg);
479 void softintr_disestablish(void *cookie);
480 void softintr_schedule(void *cookie);
481 
482 
483 /*
484  * Miscellaneous
485  */
486 #define splvm()		splraise(IPL_VM)
487 #define spllock()	splraise(IPL_LOCK)
488 #define	splhigh()	splraise(IPL_HIGH)
489 #define	spl0()		spllower(IPL_NONE)
490 
491 #define SIBIT(ipl)	(1 << ((ipl) - SIR_BASE))
492 #if 0
493 #define	setsoftclock()	softintr(SIBIT(SIR_SOFTCLOCK))
494 #define	setsoftnet()	softintr(SIBIT(SIR_SOFTNET))
495 #define	setsoftserial()	softintr(SIBIT(SIR_SOFTSERIAL))
496 #define	setsofti2c()	softintr(SIBIT(SIR_SOFTI2C))
497 #endif
498 
499 extern void *softnet_si;
500 void	*intr_establish(int, int, int, int (*)(void *), void *);
501 void	intr_disestablish(void *);
502 void	init_interrupt(void);
503 const char * intr_typename(int);
504 const char * intr_string(int);
505 const struct evcnt * intr_evcnt(int);
506 void	ext_intr(struct intrframe *);
507 
508 #if 0
509 void	softserial(void);
510 #endif
511 void	strayintr(int);
512 
513 #define	schednetisr(isr)  do {			\
514 	__asm __volatile(			\
515 		"1:	lwarx	0,0,%1\n"	\
516 		"	or	0,0,%0\n"	\
517 		"	stwcx.	0,0,%1\n"	\
518 		"	bne-	1b"		\
519 	   :					\
520 	   : "r"(1 << (isr)), "b"(&netisr)	\
521 	   : "cr0", "r0");			\
522 	softintr_schedule(softnet_si);		\
523 } while (/*CONSTCOND*/ 0)
524 
525 /*
526  * defines for indexing intrcnt
527  */
528 #define CNT_IRQ0	0
529 #define CNT_CLOCK	SIR_HWCLOCK
530 #define CNT_SOFTCLOCK	SIR_SOFTCLOCK
531 #define CNT_SOFTNET	SIR_NET
532 #define CNT_SOFTSERIAL	SIR_SOFTSERIAL
533 #define CNT_SOFTI2C	SIR_I2C
534 
535 #endif /* !_LOCORE */
536 
537 #endif /* _MVPPPC_INTR_H_ */
538