xref: /netbsd-src/sys/arch/sh3/sh3/interrupt.c (revision e5fbc36ada28f9b9a5836ecffaf4a06aa1ebb687)
1 /*	$NetBSD: interrupt.c,v 1.30 2023/12/20 15:34:45 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.30 2023/12/20 15:34:45 thorpej Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/intr.h>
37 #include <sys/cpu.h>
38 
39 #include <sh3/exception.h>
40 #include <sh3/clock.h>
41 #include <sh3/intcreg.h>
42 #include <sh3/tmureg.h>
43 
44 static void intc_intr_priority(int, int);
45 static struct intc_intrhand *intc_alloc_ih(void);
46 static void intc_free_ih(struct intc_intrhand *);
47 static int intc_unknown_intr(void *);
48 
49 #ifdef SH4
50 static void intpri_intr_enable(int);
51 static void intpri_intr_disable(int);
52 #endif
53 
54 /*
55  * EVTCODE to intc_intrhand mapper.
56  * max #76 is SH4_INTEVT_TMU4 (0xb80)
57  */
58 int8_t __intc_evtcode_to_ih[128];
59 
60 struct intc_intrhand __intc_intrhand[_INTR_N + 1] = {
61 	/* Place holder interrupt handler for unregistered interrupt. */
62 	[0] = { .ih_func = intc_unknown_intr, .ih_level = 0xf0 }
63 };
64 
65 /*
66  * SH INTC support.
67  */
68 void
intc_init(void)69 intc_init(void)
70 {
71 
72 	switch (cpu_product) {
73 #ifdef SH3
74 	case CPU_PRODUCT_7709:
75 	case CPU_PRODUCT_7709A:
76 	case CPU_PRODUCT_7706:
77 		_reg_write_2(SH7709_IPRC, 0);
78 		_reg_write_2(SH7709_IPRD, 0);
79 		_reg_write_2(SH7709_IPRE, 0);
80 		/* FALLTHROUGH */
81 	case CPU_PRODUCT_7708:
82 	case CPU_PRODUCT_7708S:
83 	case CPU_PRODUCT_7708R:
84 		_reg_write_2(SH3_IPRA, 0);
85 		_reg_write_2(SH3_IPRB, 0);
86 		break;
87 #endif /* SH3 */
88 
89 #ifdef SH4
90 	case CPU_PRODUCT_7751:
91 	case CPU_PRODUCT_7751R:
92 		_reg_write_4(SH4_INTPRI00, 0);
93 		_reg_write_4(SH4_INTMSK00, INTMSK00_MASK_ALL);
94 		/* FALLTHROUGH */
95 	case CPU_PRODUCT_7750S:
96 	case CPU_PRODUCT_7750R:
97 		_reg_write_2(SH4_IPRD, 0);
98 		/* FALLTHROUGH */
99 	case CPU_PRODUCT_7750:
100 		_reg_write_2(SH4_IPRA, 0);
101 		_reg_write_2(SH4_IPRB, 0);
102 		_reg_write_2(SH4_IPRC, 0);
103 		break;
104 #endif /* SH4 */
105 	}
106 }
107 
108 void *
intc_intr_establish(int evtcode,int trigger,int level,int (* ih_func)(void *),void * ih_arg)109 intc_intr_establish(int evtcode, int trigger, int level,
110     int (*ih_func)(void *), void *ih_arg)
111 {
112 	struct intc_intrhand *ih;
113 
114 	KDASSERT(evtcode >= 0x200 && level > 0);
115 
116 	ih = intc_alloc_ih();
117 	ih->ih_func	= ih_func;
118 	ih->ih_arg	= ih_arg;
119 	ih->ih_level	= level << 4;	/* convert to SR.IMASK format. */
120 	ih->ih_evtcode	= evtcode;
121 
122 	/* Map interrupt handler */
123 	EVTCODE_TO_IH_INDEX(evtcode) = ih->ih_idx;
124 
125 	/* Priority */
126 	intc_intr_priority(evtcode, level);
127 
128 	/* Sense select (SH7709, SH7709A only) XXX notyet */
129 
130 	return (ih);
131 }
132 
133 void
intc_intr_disestablish(void * arg)134 intc_intr_disestablish(void *arg)
135 {
136 	struct intc_intrhand *ih = arg;
137 	int evtcode = ih->ih_evtcode;
138 
139 	/* Mask interrupt if IPR can manage it. if not, cascaded ICU will do */
140 	intc_intr_priority(evtcode, 0);
141 
142 	/* Unmap interrupt handler */
143 	EVTCODE_TO_IH_INDEX(evtcode) = 0;
144 
145 	intc_free_ih(ih);
146 }
147 
148 void
intc_intr_disable(int evtcode)149 intc_intr_disable(int evtcode)
150 {
151 	int s;
152 
153 	s = _cpu_intr_suspend();
154 	KASSERT(EVTCODE_TO_IH_INDEX(evtcode) != 0); /* there is a handler */
155 	switch (evtcode) {
156 	default:
157 		intc_intr_priority(evtcode, 0);
158 		break;
159 
160 #ifdef SH4
161 	case SH4_INTEVT_PCISERR:
162 	case SH4_INTEVT_PCIDMA3:
163 	case SH4_INTEVT_PCIDMA2:
164 	case SH4_INTEVT_PCIDMA1:
165 	case SH4_INTEVT_PCIDMA0:
166 	case SH4_INTEVT_PCIPWON:
167 	case SH4_INTEVT_PCIPWDWN:
168 	case SH4_INTEVT_PCIERR:
169 	case SH4_INTEVT_TMU3:
170 	case SH4_INTEVT_TMU4:
171 		intpri_intr_disable(evtcode);
172 		break;
173 #endif
174 	}
175 	_cpu_intr_resume(s);
176 }
177 
178 void
intc_intr_enable(int evtcode)179 intc_intr_enable(int evtcode)
180 {
181 	struct intc_intrhand *ih;
182 	int s;
183 
184 	s = _cpu_intr_suspend();
185 	KASSERT(EVTCODE_TO_IH_INDEX(evtcode) != 0); /* there is a handler */
186 	switch (evtcode) {
187 	default:
188 		ih = EVTCODE_IH(evtcode);
189 		/* ih_level is in the SR.IMASK format */
190 		intc_intr_priority(evtcode, (ih->ih_level >> 4));
191 		break;
192 
193 #ifdef SH4
194 	case SH4_INTEVT_PCISERR:
195 	case SH4_INTEVT_PCIDMA3:
196 	case SH4_INTEVT_PCIDMA2:
197 	case SH4_INTEVT_PCIDMA1:
198 	case SH4_INTEVT_PCIDMA0:
199 	case SH4_INTEVT_PCIPWON:
200 	case SH4_INTEVT_PCIPWDWN:
201 	case SH4_INTEVT_PCIERR:
202 	case SH4_INTEVT_TMU3:
203 	case SH4_INTEVT_TMU4:
204 		intpri_intr_enable(evtcode);
205 		break;
206 #endif
207 	}
208 	_cpu_intr_resume(s);
209 }
210 
211 
212 /*
213  * int intc_intr_priority(int evtcode, int level)
214  *	Setup interrupt priority register.
215  *	SH7708, SH7708S, SH7708R, SH7750, SH7750S ... evtcode is INTEVT
216  *	SH7709, SH7709A, SH7706			  ... evtcode is INTEVT2
217  */
218 static void
intc_intr_priority(int evtcode,int level)219 intc_intr_priority(int evtcode, int level)
220 {
221 	volatile uint16_t *iprreg;
222 	int pos;
223 	uint16_t r;
224 
225 #define	__SH_IPR(_sh, _ipr, _pos)					   \
226 	do {								   \
227 		iprreg = (volatile uint16_t *)(SH ## _sh ## _IPR ## _ipr); \
228 		pos = (_pos);						   \
229 	} while (/*CONSTCOND*/0)
230 
231 #define	SH3_IPR(_ipr, _pos)		__SH_IPR(3, _ipr, _pos)
232 #define	SH4_IPR(_ipr, _pos)		__SH_IPR(4, _ipr, _pos)
233 #define	SH7709_IPR(_ipr, _pos)		__SH_IPR(7709, _ipr, _pos)
234 
235 #define	SH_IPR(_ipr, _pos)						\
236 	do {								\
237 		if (CPU_IS_SH3)						\
238 			SH3_IPR(_ipr, _pos);				\
239 		else							\
240 			SH4_IPR(_ipr, _pos);				\
241 	} while (/*CONSTCOND*/0)
242 
243 	iprreg = 0;
244 	pos = -1;
245 
246 	switch (evtcode) {
247 	case SH_INTEVT_TMU0_TUNI0:
248 		SH_IPR(A, 12);
249 		break;
250 	case SH_INTEVT_TMU1_TUNI1:
251 		SH_IPR(A, 8);
252 		break;
253 	case SH_INTEVT_TMU2_TUNI2:
254 		SH_IPR(A, 4);
255 		break;
256 	case SH_INTEVT_WDT_ITI:
257 		SH_IPR(B, 12);
258 		break;
259 	case SH_INTEVT_SCI_ERI:
260 	case SH_INTEVT_SCI_RXI:
261 	case SH_INTEVT_SCI_TXI:
262 	case SH_INTEVT_SCI_TEI:
263 		SH_IPR(B, 4);
264 		break;
265 	}
266 
267 #ifdef SH3
268 	if (CPU_IS_SH3) {
269 		switch (evtcode) {
270 		case SH7709_INTEVT2_IRQ3:
271 			SH7709_IPR(C, 12);
272 			break;
273 		case SH7709_INTEVT2_IRQ2:
274 			SH7709_IPR(C, 8);
275 			break;
276 		case SH7709_INTEVT2_IRQ1:
277 			SH7709_IPR(C, 4);
278 			break;
279 		case SH7709_INTEVT2_IRQ0:
280 			SH7709_IPR(C, 0);
281 			break;
282 		case SH7709_INTEVT2_PINT07:
283 			SH7709_IPR(D, 12);
284 			break;
285 		case SH7709_INTEVT2_PINT8F:
286 			SH7709_IPR(D, 8);
287 			break;
288 		case SH7709_INTEVT2_IRQ5:
289 			SH7709_IPR(D, 4);
290 			break;
291 		case SH7709_INTEVT2_IRQ4:
292 			SH7709_IPR(D, 0);
293 			break;
294 		case SH7709_INTEVT2_DEI0:
295 		case SH7709_INTEVT2_DEI1:
296 		case SH7709_INTEVT2_DEI2:
297 		case SH7709_INTEVT2_DEI3:
298 			SH7709_IPR(E, 12);
299 			break;
300 		case SH7709_INTEVT2_IRDA_ERI:
301 		case SH7709_INTEVT2_IRDA_RXI:
302 		case SH7709_INTEVT2_IRDA_BRI:
303 		case SH7709_INTEVT2_IRDA_TXI:
304 			SH7709_IPR(E, 8);
305 			break;
306 		case SH7709_INTEVT2_SCIF_ERI:
307 		case SH7709_INTEVT2_SCIF_RXI:
308 		case SH7709_INTEVT2_SCIF_BRI:
309 		case SH7709_INTEVT2_SCIF_TXI:
310 			SH7709_IPR(E, 4);
311 			break;
312 		case SH7709_INTEVT2_ADC:
313 			SH7709_IPR(E, 0);
314 			break;
315 		}
316 	}
317 #endif /* SH3 */
318 
319 #ifdef SH4
320 	if (CPU_IS_SH4) {
321 		switch (evtcode) {
322 		case SH4_INTEVT_SCIF_ERI:
323 		case SH4_INTEVT_SCIF_RXI:
324 		case SH4_INTEVT_SCIF_BRI:
325 		case SH4_INTEVT_SCIF_TXI:
326 			SH4_IPR(C, 4);
327 			break;
328 
329 #if 0
330 		case SH4_INTEVT_PCISERR:
331 		case SH4_INTEVT_PCIDMA3:
332 		case SH4_INTEVT_PCIDMA2:
333 		case SH4_INTEVT_PCIDMA1:
334 		case SH4_INTEVT_PCIDMA0:
335 		case SH4_INTEVT_PCIPWON:
336 		case SH4_INTEVT_PCIPWDWN:
337 		case SH4_INTEVT_PCIERR:
338 #endif
339 		case SH4_INTEVT_TMU3:
340 		case SH4_INTEVT_TMU4:
341 			intpri_intr_priority(evtcode, level);
342 			break;
343 		}
344 	}
345 #endif /* SH4 */
346 
347 	/*
348 	 * XXX: This function gets called even for interrupts that
349 	 * don't have their priority defined by IPR registers.
350 	 */
351 	if (pos < 0)
352 		return;
353 
354 	r = _reg_read_2(iprreg);
355 	r = (r & ~(0xf << (pos))) | (level << (pos));
356 	_reg_write_2(iprreg, r);
357 }
358 
359 /*
360  * Interrupt handler holder allocater.
361  */
362 static struct intc_intrhand *
intc_alloc_ih(void)363 intc_alloc_ih(void)
364 {
365 	/* #0 is reserved for unregistered interrupt. */
366 	struct intc_intrhand *ih = &__intc_intrhand[1];
367 	int i;
368 
369 	for (i = 1; i <= _INTR_N; i++, ih++)
370 		if (ih->ih_idx == 0) {	/* no driver uses this. */
371 			ih->ih_idx = i;	/* register myself */
372 			return (ih);
373 		}
374 
375 	panic("increase _INTR_N greater than %d", _INTR_N);
376 	return (NULL);
377 }
378 
379 static void
intc_free_ih(struct intc_intrhand * ih)380 intc_free_ih(struct intc_intrhand *ih)
381 {
382 
383 	memset(ih, 0, sizeof(*ih));
384 }
385 
386 /* Place-holder for debugging */
387 static int
intc_unknown_intr(void * arg)388 intc_unknown_intr(void *arg)
389 {
390 
391 	printf("INTEVT=0x%x", _reg_read_4(SH_(INTEVT)));
392 	if (cpu_product == CPU_PRODUCT_7709 ||
393 	    cpu_product == CPU_PRODUCT_7709A ||
394 	    cpu_product == CPU_PRODUCT_7706)
395 		printf(" INTEVT2=0x%x", _reg_read_4(SH7709_INTEVT2));
396 	printf("\n");
397 
398 	panic("unknown interrupt");
399 	/* NOTREACHED */
400 	return (0);
401 }
402 
403 #ifdef SH4 /* SH7751 support */
404 
405 /*
406  * INTPRIxx
407  */
408 void
intpri_intr_priority(int evtcode,int level)409 intpri_intr_priority(int evtcode, int level)
410 {
411 	volatile uint32_t *iprreg;
412 	uint32_t r;
413 	int pos;
414 
415 	if (!CPU_IS_SH4)
416 		return;
417 
418 	switch (cpu_product) {
419 	default:
420 		return;
421 
422 	case CPU_PRODUCT_7751:
423 	case CPU_PRODUCT_7751R:
424 		break;
425 	}
426 
427 	iprreg = (volatile uint32_t *)SH4_INTPRI00;
428 	pos = -1;
429 
430 	switch (evtcode) {
431 	case SH4_INTEVT_PCIDMA3:
432 	case SH4_INTEVT_PCIDMA2:
433 	case SH4_INTEVT_PCIDMA1:
434 	case SH4_INTEVT_PCIDMA0:
435 	case SH4_INTEVT_PCIPWDWN:
436 	case SH4_INTEVT_PCIPWON:
437 	case SH4_INTEVT_PCIERR:
438 		pos = 0;
439 		break;
440 
441 	case SH4_INTEVT_PCISERR:
442 		pos = 4;
443 		break;
444 
445 	case SH4_INTEVT_TMU3:
446 		pos = 8;
447 		break;
448 
449 	case SH4_INTEVT_TMU4:
450 		pos = 12;
451 		break;
452 	}
453 
454 	if (pos < 0) {
455 		return;
456 	}
457 
458 	r = _reg_read_4(iprreg);
459 	r = (r & ~(0xf << pos)) | (level << pos);
460 	_reg_write_4(iprreg, r);
461 }
462 
463 static void
intpri_intr_enable(int evtcode)464 intpri_intr_enable(int evtcode)
465 {
466 	volatile uint32_t *iprreg;
467 	uint32_t bit;
468 
469 	if (!CPU_IS_SH4)
470 		return;
471 
472 	switch (cpu_product) {
473 	default:
474 		return;
475 
476 	case CPU_PRODUCT_7751:
477 	case CPU_PRODUCT_7751R:
478 		break;
479 	}
480 
481 	iprreg = (volatile uint32_t *)SH4_INTMSKCLR00;
482 	bit = 0;
483 
484 	switch (evtcode) {
485 	case SH4_INTEVT_PCISERR:
486 	case SH4_INTEVT_PCIDMA3:
487 	case SH4_INTEVT_PCIDMA2:
488 	case SH4_INTEVT_PCIDMA1:
489 	case SH4_INTEVT_PCIDMA0:
490 	case SH4_INTEVT_PCIPWON:
491 	case SH4_INTEVT_PCIPWDWN:
492 	case SH4_INTEVT_PCIERR:
493 		bit = (1 << ((evtcode - SH4_INTEVT_PCISERR) >> 5));
494 		break;
495 
496 	case SH4_INTEVT_TMU3:
497 		bit = INTREQ00_TUNI3;
498 		break;
499 
500 	case SH4_INTEVT_TMU4:
501 		bit = INTREQ00_TUNI4;
502 		break;
503 	}
504 
505 	if ((bit == 0) || (iprreg == NULL)) {
506 		return;
507 	}
508 
509 	_reg_write_4(iprreg, bit);
510 }
511 
512 static void
intpri_intr_disable(int evtcode)513 intpri_intr_disable(int evtcode)
514 {
515 	volatile uint32_t *iprreg;
516 	uint32_t bit;
517 
518 	if (!CPU_IS_SH4)
519 		return;
520 
521 	switch (cpu_product) {
522 	default:
523 		return;
524 
525 	case CPU_PRODUCT_7751:
526 	case CPU_PRODUCT_7751R:
527 		break;
528 	}
529 
530 	iprreg = (volatile uint32_t *)SH4_INTMSK00;
531 	bit = 0;
532 
533 	switch (evtcode) {
534 	case SH4_INTEVT_PCISERR:
535 	case SH4_INTEVT_PCIDMA3:
536 	case SH4_INTEVT_PCIDMA2:
537 	case SH4_INTEVT_PCIDMA1:
538 	case SH4_INTEVT_PCIDMA0:
539 	case SH4_INTEVT_PCIPWON:
540 	case SH4_INTEVT_PCIPWDWN:
541 	case SH4_INTEVT_PCIERR:
542 		bit = (1 << ((evtcode - SH4_INTEVT_PCISERR) >> 5));
543 		break;
544 
545 	case SH4_INTEVT_TMU3:
546 		bit = INTREQ00_TUNI3;
547 		break;
548 
549 	case SH4_INTEVT_TMU4:
550 		bit = INTREQ00_TUNI4;
551 		break;
552 	}
553 
554 	if ((bit == 0) || (iprreg == NULL)) {
555 		return;
556 	}
557 
558 	_reg_write_4(iprreg, bit);
559 }
560 #endif /* SH4 */
561 
562 bool
cpu_intr_p(void)563 cpu_intr_p(void)
564 {
565 
566 	return curcpu()->ci_idepth >= 0;
567 }
568