xref: /netbsd-src/sys/arch/mips/rmi/rmixl_intr.c (revision aec6f0cf2ee0e8ce1a23f9d46109cdee745ca66f)
1 /*	$NetBSD: rmixl_intr.c,v 1.15 2022/09/29 07:00:47 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or
8  * without modification, are permitted provided that the following
9  * conditions are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above
13  *    copyright notice, this list of conditions and the following
14  *    disclaimer in the documentation and/or other materials provided
15  *    with the distribution.
16  * 3. The names of the authors may not be used to endorse or promote
17  *    products derived from this software without specific prior
18  *    written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31  * OF SUCH DAMAGE.
32  */
33 /*-
34  * Copyright (c) 2001 The NetBSD Foundation, Inc.
35  * All rights reserved.
36  *
37  * This code is derived from software contributed to The NetBSD Foundation
38  * by Jason R. Thorpe.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  */
61 
62 /*
63  * Platform-specific interrupt support for the RMI XLP, XLR, XLS
64  */
65 
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.15 2022/09/29 07:00:47 skrll Exp $");
68 
69 #include "opt_ddb.h"
70 #include "opt_multiprocessor.h"
71 #define	__INTR_PRIVATE
72 
73 #include <sys/param.h>
74 #include <sys/atomic.h>
75 #include <sys/bus.h>
76 #include <sys/cpu.h>
77 #include <sys/device.h>
78 #include <sys/intr.h>
79 #include <sys/kernel.h>
80 #include <sys/mutex.h>
81 #include <sys/systm.h>
82 
83 #include <mips/locore.h>
84 
85 #include <mips/rmi/rmixlreg.h>
86 #include <mips/rmi/rmixlvar.h>
87 
88 #include <mips/rmi/rmixl_cpuvar.h>
89 #include <mips/rmi/rmixl_intr.h>
90 
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
93 
94 //#define IOINTR_DEBUG	1
95 #ifdef IOINTR_DEBUG
96 int iointr_debug = IOINTR_DEBUG;
97 # define DPRINTF(x)	do { if (iointr_debug) printf x ; } while(0)
98 #else
99 # define DPRINTF(x)
100 #endif
101 
102 #define RMIXL_PICREG_READ(off) \
103 	RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off))
104 #define RMIXL_PICREG_WRITE(off, val) \
105 	RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val))
106 
107 /*
108  * do not clear these when acking EIRR
109  * (otherwise they get lost)
110  */
111 #define RMIXL_EIRR_PRESERVE_MASK	\
112 		((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8)
113 
114 /*
115  * IRT assignments depends on the RMI chip family
116  * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx)
117  * use the right display string table for the CPU that's running.
118  */
119 
120 /*
121  * rmixl_irtnames_xlrxxx
122  * - use for XLRxxx
123  */
124 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = {
125 	"pic int 0 (watchdog)",		/*  0 */
126 	"pic int 1 (timer0)",		/*  1 */
127 	"pic int 2 (timer1)",		/*  2 */
128 	"pic int 3 (timer2)",		/*  3 */
129 	"pic int 4 (timer3)",		/*  4 */
130 	"pic int 5 (timer4)",		/*  5 */
131 	"pic int 6 (timer5)",		/*  6 */
132 	"pic int 7 (timer6)",		/*  7 */
133 	"pic int 8 (timer7)",		/*  8 */
134 	"pic int 9 (uart0)",		/*  9 */
135 	"pic int 10 (uart1)",		/* 10 */
136 	"pic int 11 (i2c0)",		/* 11 */
137 	"pic int 12 (i2c1)",		/* 12 */
138 	"pic int 13 (pcmcia)",		/* 13 */
139 	"pic int 14 (gpio)",		/* 14 */
140 	"pic int 15 (hyper)",		/* 15 */
141 	"pic int 16 (pcix)",		/* 16 */
142 	"pic int 17 (gmac0)",		/* 17 */
143 	"pic int 18 (gmac1)",		/* 18 */
144 	"pic int 19 (gmac2)",		/* 19 */
145 	"pic int 20 (gmac3)",		/* 20 */
146 	"pic int 21 (xgs0)",		/* 21 */
147 	"pic int 22 (xgs1)",		/* 22 */
148 	"pic int 23 (irq23)",		/* 23 */
149 	"pic int 24 (hyper_fatal)",	/* 24 */
150 	"pic int 25 (bridge_aerr)",	/* 25 */
151 	"pic int 26 (bridge_berr)",	/* 26 */
152 	"pic int 27 (bridge_tb)",	/* 27 */
153 	"pic int 28 (bridge_nmi)",	/* 28 */
154 	"pic int 29 (bridge_sram_derr)",/* 29 */
155 	"pic int 30 (gpio_fatal)",	/* 30 */
156 	"pic int 31 (reserved)",	/* 31 */
157 };
158 
159 /*
160  * rmixl_irtnames_xls2xx
161  * - use for XLS2xx
162  */
163 static const char * const rmixl_irtnames_xls2xx[NIRTS] = {
164 	"pic int 0 (watchdog)",		/*  0 */
165 	"pic int 1 (timer0)",		/*  1 */
166 	"pic int 2 (timer1)",		/*  2 */
167 	"pic int 3 (timer2)",		/*  3 */
168 	"pic int 4 (timer3)",		/*  4 */
169 	"pic int 5 (timer4)",		/*  5 */
170 	"pic int 6 (timer5)",		/*  6 */
171 	"pic int 7 (timer6)",		/*  7 */
172 	"pic int 8 (timer7)",		/*  8 */
173 	"pic int 9 (uart0)",		/*  9 */
174 	"pic int 10 (uart1)",		/* 10 */
175 	"pic int 11 (i2c0)",		/* 11 */
176 	"pic int 12 (i2c1)",		/* 12 */
177 	"pic int 13 (pcmcia)",		/* 13 */
178 	"pic int 14 (gpio_a)",		/* 14 */
179 	"pic int 15 (irq15)",		/* 15 */
180 	"pic int 16 (bridge_tb)",	/* 16 */
181 	"pic int 17 (gmac0)",		/* 17 */
182 	"pic int 18 (gmac1)",		/* 18 */
183 	"pic int 19 (gmac2)",		/* 19 */
184 	"pic int 20 (gmac3)",		/* 20 */
185 	"pic int 21 (irq21)",		/* 21 */
186 	"pic int 22 (irq22)",		/* 22 */
187 	"pic int 23 (pcie_link2)",	/* 23 */
188 	"pic int 24 (pcie_link3)",	/* 24 */
189 	"pic int 25 (bridge_err)",	/* 25 */
190 	"pic int 26 (pcie_link0)",	/* 26 */
191 	"pic int 27 (pcie_link1)",	/* 27 */
192 	"pic int 28 (irq28)",		/* 28 */
193 	"pic int 29 (pcie_err)",	/* 29 */
194 	"pic int 30 (gpio_b)",		/* 30 */
195 	"pic int 31 (usb)",		/* 31 */
196 };
197 
198 /*
199  * rmixl_irtnames_xls1xx
200  * - use for XLS1xx, XLS4xx-Lite
201  */
202 static const char * const rmixl_irtnames_xls1xx[NIRTS] = {
203 	"pic int 0 (watchdog)",		/*  0 */
204 	"pic int 1 (timer0)",		/*  1 */
205 	"pic int 2 (timer1)",		/*  2 */
206 	"pic int 3 (timer2)",		/*  3 */
207 	"pic int 4 (timer3)",		/*  4 */
208 	"pic int 5 (timer4)",		/*  5 */
209 	"pic int 6 (timer5)",		/*  6 */
210 	"pic int 7 (timer6)",		/*  7 */
211 	"pic int 8 (timer7)",		/*  8 */
212 	"pic int 9 (uart0)",		/*  9 */
213 	"pic int 10 (uart1)",		/* 10 */
214 	"pic int 11 (i2c0)",		/* 11 */
215 	"pic int 12 (i2c1)",		/* 12 */
216 	"pic int 13 (pcmcia)",		/* 13 */
217 	"pic int 14 (gpio_a)",		/* 14 */
218 	"pic int 15 (irq15)",		/* 15 */
219 	"pic int 16 (bridge_tb)",	/* 16 */
220 	"pic int 17 (gmac0)",		/* 17 */
221 	"pic int 18 (gmac1)",		/* 18 */
222 	"pic int 19 (gmac2)",		/* 19 */
223 	"pic int 20 (gmac3)",		/* 20 */
224 	"pic int 21 (irq21)",		/* 21 */
225 	"pic int 22 (irq22)",		/* 22 */
226 	"pic int 23 (irq23)",		/* 23 */
227 	"pic int 24 (irq24)",		/* 24 */
228 	"pic int 25 (bridge_err)",	/* 25 */
229 	"pic int 26 (pcie_link0)",	/* 26 */
230 	"pic int 27 (pcie_link1)",	/* 27 */
231 	"pic int 28 (irq28)",		/* 28 */
232 	"pic int 29 (pcie_err)",	/* 29 */
233 	"pic int 30 (gpio_b)",		/* 30 */
234 	"pic int 31 (usb)",		/* 31 */
235 };
236 
237 /*
238  * rmixl_irtnames_xls4xx:
239  * - use for XLS4xx, XLS6xx
240  */
241 static const char * const rmixl_irtnames_xls4xx[NIRTS] = {
242 	"pic int 0 (watchdog)",		/*  0 */
243 	"pic int 1 (timer0)",		/*  1 */
244 	"pic int 2 (timer1)",		/*  2 */
245 	"pic int 3 (timer2)",		/*  3 */
246 	"pic int 4 (timer3)",		/*  4 */
247 	"pic int 5 (timer4)",		/*  5 */
248 	"pic int 6 (timer5)",		/*  6 */
249 	"pic int 7 (timer6)",		/*  7 */
250 	"pic int 8 (timer7)",		/*  8 */
251 	"pic int 9 (uart0)",		/*  9 */
252 	"pic int 10 (uart1)",		/* 10 */
253 	"pic int 11 (i2c0)",		/* 11 */
254 	"pic int 12 (i2c1)",		/* 12 */
255 	"pic int 13 (pcmcia)",		/* 13 */
256 	"pic int 14 (gpio_a)",		/* 14 */
257 	"pic int 15 (irq15)",		/* 15 */
258 	"pic int 16 (bridge_tb)",	/* 16 */
259 	"pic int 17 (gmac0)",		/* 17 */
260 	"pic int 18 (gmac1)",		/* 18 */
261 	"pic int 19 (gmac2)",		/* 19 */
262 	"pic int 20 (gmac3)",		/* 20 */
263 	"pic int 21 (irq21)",		/* 21 */
264 	"pic int 22 (irq22)",		/* 22 */
265 	"pic int 23 (irq23)",		/* 23 */
266 	"pic int 24 (irq24)",		/* 24 */
267 	"pic int 25 (bridge_err)",	/* 25 */
268 	"pic int 26 (pcie_link0)",	/* 26 */
269 	"pic int 27 (pcie_link1)",	/* 27 */
270 	"pic int 28 (pcie_link2)",	/* 28 */
271 	"pic int 29 (pcie_link3)",	/* 29 */
272 	"pic int 30 (gpio_b)",		/* 30 */
273 	"pic int 31 (usb)",		/* 31 */
274 };
275 
276 /*
277  * rmixl_vecnames_common:
278  * - use for unknown cpu implementation
279  * - covers all vectors, not just IRT intrs
280  */
281 static const char * const rmixl_vecnames_common[NINTRVECS] = {
282 	"vec 0",		/*  0 */
283 	"vec 1",		/*  1 */
284 	"vec 2",		/*  2 */
285 	"vec 3",		/*  3 */
286 	"vec 4",		/*  4 */
287 	"vec 5",		/*  5 */
288 	"vec 6",		/*  6 */
289 	"vec 7",		/*  7 */
290 	"vec 8 (ipi 0)",	/*  8 */
291 	"vec 9 (ipi 1)",	/*  9 */
292 	"vec 10 (ipi 2)",	/* 10 */
293 	"vec 11 (ipi 3)",	/* 11 */
294 	"vec 12 (ipi 4)",	/* 12 */
295 	"vec 13 (ipi 5)",	/* 13 */
296 	"vec 14 (ipi 6)",	/* 14 */
297 	"vec 15 (fmn)",		/* 15 */
298 	"vec 16",		/* 16 */
299 	"vec 17",		/* 17 */
300 	"vec 18",		/* 18 */
301 	"vec 19",		/* 19 */
302 	"vec 20",		/* 20 */
303 	"vec 21",		/* 21 */
304 	"vec 22",		/* 22 */
305 	"vec 23",		/* 23 */
306 	"vec 24",		/* 24 */
307 	"vec 25",		/* 25 */
308 	"vec 26",		/* 26 */
309 	"vec 27",		/* 27 */
310 	"vec 28",		/* 28 */
311 	"vec 29",		/* 29 */
312 	"vec 30",		/* 30 */
313 	"vec 31",		/* 31 */
314 	"vec 32",		/* 32 */
315 	"vec 33",		/* 33 */
316 	"vec 34",		/* 34 */
317 	"vec 35",		/* 35 */
318 	"vec 36",		/* 36 */
319 	"vec 37",		/* 37 */
320 	"vec 38",		/* 38 */
321 	"vec 39",		/* 39 */
322 	"vec 40",		/* 40 */
323 	"vec 41",		/* 41 */
324 	"vec 42",		/* 42 */
325 	"vec 43",		/* 43 */
326 	"vec 44",		/* 44 */
327 	"vec 45",		/* 45 */
328 	"vec 46",		/* 46 */
329 	"vec 47",		/* 47 */
330 	"vec 48",		/* 48 */
331 	"vec 49",		/* 49 */
332 	"vec 50",		/* 50 */
333 	"vec 51",		/* 51 */
334 	"vec 52",		/* 52 */
335 	"vec 53",		/* 53 */
336 	"vec 54",		/* 54 */
337 	"vec 55",		/* 55 */
338 	"vec 56",		/* 56 */
339 	"vec 57",		/* 57 */
340 	"vec 58",		/* 58 */
341 	"vec 59",		/* 59 */
342 	"vec 60",		/* 60 */
343 	"vec 61",		/* 61 */
344 	"vec 62",		/* 63 */
345 	"vec 63",		/* 63 */
346 };
347 
348 /*
349  * mask of CPUs attached
350  * once they are attached, this var is read-only so mp safe
351  */
352 static uint32_t cpu_present_mask;
353 
354 kmutex_t rmixl_ipi_lock __cacheline_aligned;
355 				/* covers RMIXL_PIC_IPIBASE */
356 kmutex_t rmixl_intr_lock __cacheline_aligned;
357 				/* covers rest of PIC, and rmixl_intrhand[] */
358 rmixl_intrhand_t rmixl_intrhand[NINTRVECS];
359 
360 #ifdef DIAGNOSTIC
361 static int rmixl_pic_init_done;
362 #endif
363 
364 
365 static const char *rmixl_intr_string_xlr(int);
366 static const char *rmixl_intr_string_xls(int);
367 static uint32_t rmixl_irt_thread_mask(int);
368 static void rmixl_irt_init(int);
369 static void rmixl_irt_disestablish(int);
370 static void rmixl_irt_establish(int, int, int,
371 		rmixl_intr_trigger_t, rmixl_intr_polarity_t);
372 
373 #ifdef MULTIPROCESSOR
374 static int rmixl_send_ipi(struct cpu_info *, int);
375 static int rmixl_ipi_intr(void *);
376 #endif
377 
378 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
379 int  rmixl_intrhand_print_subr(int);
380 int  rmixl_intrhand_print(void);
381 int  rmixl_irt_print(void);
382 void rmixl_ipl_eimr_map_print(void);
383 #endif
384 
385 
386 static inline u_int
dclz(uint64_t val)387 dclz(uint64_t val)
388 {
389 	int nlz;
390 
391 	asm volatile("dclz %0, %1;"
392 		: "=r"(nlz) : "r"(val));
393 
394 	return nlz;
395 }
396 
397 void
evbmips_intr_init(void)398 evbmips_intr_init(void)
399 {
400 	uint32_t r;
401 
402 	KASSERT(cpu_rmixlr(mips_options.mips_cpu)
403 	     || cpu_rmixls(mips_options.mips_cpu));
404 
405 
406 #ifdef DIAGNOSTIC
407 	if (rmixl_pic_init_done != 0)
408 		panic("%s: rmixl_pic_init_done %d",
409 			__func__, rmixl_pic_init_done);
410 #endif
411 
412 	mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
413 	mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
414 
415 	mutex_enter(&rmixl_intr_lock);
416 
417 	/*
418 	 * initialize (zero) all IRT Entries in the PIC
419 	 */
420 	for (u_int i = 0; i < NIRTS; i++) {
421 		rmixl_irt_init(i);
422 	}
423 
424 	/*
425 	 * disable watchdog NMI, timers
426 	 *
427 	 * XXX
428 	 *  WATCHDOG_ENB is preserved because clearing it causes
429 	 *  hang on the XLS616 (but not on the XLS408)
430 	 */
431 	r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL);
432 	r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB;
433 	RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r);
434 
435 #ifdef DIAGNOSTIC
436 	rmixl_pic_init_done = 1;
437 #endif
438 	mutex_exit(&rmixl_intr_lock);
439 
440 }
441 
442 /*
443  * establish vector for mips3 count/compare clock interrupt
444  * this ensures we enable in EIRR,
445  * even though cpu_intr() handles the interrupt
446  * note the 'mpsafe' arg here is a placeholder only
447  */
448 void
rmixl_intr_init_clk(void)449 rmixl_intr_init_clk(void)
450 {
451 	const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1;
452 
453 	mutex_enter(&rmixl_intr_lock);
454 
455 	void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false);
456 	if (ih == NULL)
457 		panic("%s: establish vec %d failed", __func__, vec);
458 
459 	mutex_exit(&rmixl_intr_lock);
460 }
461 
462 #ifdef MULTIPROCESSOR
463 /*
464  * establish IPI interrupt and send function
465  */
466 void
rmixl_intr_init_ipi(void)467 rmixl_intr_init_ipi(void)
468 {
469 	mutex_enter(&rmixl_intr_lock);
470 
471 	for (u_int ipi = 0; ipi < NIPIS; ipi++) {
472 		const u_int vec = RMIXL_INTRVEC_IPI + ipi;
473 		void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED,
474 			rmixl_ipi_intr, (void *)(uintptr_t)ipi, true);
475 		if (ih == NULL)
476 			panic("%s: establish ipi %d at vec %d failed",
477 				__func__, ipi, vec);
478 	}
479 
480 	mips_locoresw.lsw_send_ipi = rmixl_send_ipi;
481 
482 	mutex_exit(&rmixl_intr_lock);
483 }
484 #endif 	/* MULTIPROCESSOR */
485 
486 /*
487  * initialize per-cpu interrupt stuff in softc
488  * accumulate per-cpu bits in 'cpu_present_mask'
489  */
490 void
rmixl_intr_init_cpu(struct cpu_info * ci)491 rmixl_intr_init_cpu(struct cpu_info *ci)
492 {
493 	struct rmixl_cpu_softc *sc = (void *)ci->ci_softc;
494 
495 	KASSERT(sc != NULL);
496 
497 	for (int vec=0; vec < NINTRVECS; vec++)
498 		evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec],
499 			EVCNT_TYPE_INTR, NULL,
500 			device_xname(sc->sc_dev),
501 			rmixl_intr_string(vec));
502 
503 	KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8));
504 	atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci));
505 }
506 
507 /*
508  * rmixl_intr_string - return pointer to display name of a PIC-based interrupt
509  */
510 const char *
rmixl_intr_string(int vec)511 rmixl_intr_string(int vec)
512 {
513 	int irt;
514 
515 	if (vec < 0 || vec >= NINTRVECS)
516 		panic("%s: vec index %d out of range, max %d",
517 			__func__, vec, NINTRVECS - 1);
518 
519 	if (! RMIXL_VECTOR_IS_IRT(vec))
520 		return rmixl_vecnames_common[vec];
521 
522 	irt = RMIXL_VECTOR_IRT(vec);
523 	switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) {
524 	case CIDFL_RMI_TYPE_XLR:
525 		return rmixl_intr_string_xlr(irt);
526 	case CIDFL_RMI_TYPE_XLS:
527 		return rmixl_intr_string_xls(irt);
528 	case CIDFL_RMI_TYPE_XLP:
529 		panic("%s: RMI XLP not yet supported", __func__);
530 	}
531 
532 	return "undefined";	/* appease gcc */
533 }
534 
535 static const char *
rmixl_intr_string_xlr(int irt)536 rmixl_intr_string_xlr(int irt)
537 {
538 	return rmixl_irtnames_xlrxxx[irt];
539 }
540 
541 static const char *
rmixl_intr_string_xls(int irt)542 rmixl_intr_string_xls(int irt)
543 {
544 	const char *name;
545 
546 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
547 	case MIPS_XLS104:
548 	case MIPS_XLS108:
549 	case MIPS_XLS404LITE:
550 	case MIPS_XLS408LITE:
551 		name = rmixl_irtnames_xls1xx[irt];
552 		break;
553 	case MIPS_XLS204:
554 	case MIPS_XLS208:
555 		name = rmixl_irtnames_xls2xx[irt];
556 		break;
557 	case MIPS_XLS404:
558 	case MIPS_XLS408:
559 	case MIPS_XLS416:
560 	case MIPS_XLS608:
561 	case MIPS_XLS616:
562 		name = rmixl_irtnames_xls4xx[irt];
563 		break;
564 	default:
565 		name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)];
566 		break;
567 	}
568 
569 	return name;
570 }
571 
572 /*
573  * rmixl_irt_thread_mask
574  *
575  *	given a bitmask of cpus, return a, IRT thread mask
576  */
577 static uint32_t
rmixl_irt_thread_mask(int cpumask)578 rmixl_irt_thread_mask(int cpumask)
579 {
580 	uint32_t irtc0;
581 
582 #if defined(MULTIPROCESSOR)
583 #ifndef NOTYET
584 	if (cpumask == -1)
585 		return 1;	/* XXX TMP FIXME */
586 #endif
587 
588 	/*
589 	 * discount cpus not present
590 	 */
591 	cpumask &= cpu_present_mask;
592 
593 	switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
594 	case MIPS_XLS104:
595 	case MIPS_XLS204:
596 	case MIPS_XLS404:
597 	case MIPS_XLS404LITE:
598 		irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0));
599 		irtc0 &= (__BITS(5,4) | __BITS(1,0));
600 		break;
601 	case MIPS_XLS108:
602 	case MIPS_XLS208:
603 	case MIPS_XLS408:
604 	case MIPS_XLS408LITE:
605 	case MIPS_XLS608:
606 		irtc0 = cpumask & __BITS(7,0);
607 		break;
608 	case MIPS_XLS416:
609 	case MIPS_XLS616:
610 		irtc0 = cpumask & __BITS(15,0);
611 		break;
612 	default:
613 		panic("%s: unknown cpu ID %#x\n", __func__,
614 			mips_options.mips_cpu_id);
615 	}
616 #else
617 	irtc0 = 1;
618 #endif	/* MULTIPROCESSOR */
619 
620 	return irtc0;
621 }
622 
623 /*
624  * rmixl_irt_init
625  * - initialize IRT Entry for given index
626  * - unmask Thread#0 in low word (assume we only have 1 thread)
627  */
628 static void
rmixl_irt_init(int irt)629 rmixl_irt_init(int irt)
630 {
631 	KASSERT(irt < NIRTS);
632 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0);	/* high word */
633 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0);	/* low  word */
634 }
635 
636 /*
637  * rmixl_irt_disestablish
638  * - invalidate IRT Entry for given index
639  */
640 static void
rmixl_irt_disestablish(int irt)641 rmixl_irt_disestablish(int irt)
642 {
643 	KASSERT(mutex_owned(&rmixl_intr_lock));
644 	DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0));
645 	rmixl_irt_init(irt);
646 }
647 
648 /*
649  * rmixl_irt_establish
650  * - construct an IRT Entry for irt and write to PIC
651  */
652 static void
rmixl_irt_establish(int irt,int vec,int cpumask,rmixl_intr_trigger_t trigger,rmixl_intr_polarity_t polarity)653 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger,
654 	rmixl_intr_polarity_t polarity)
655 {
656 	uint32_t irtc1;
657 	uint32_t irtc0;
658 
659 	KASSERT(mutex_owned(&rmixl_intr_lock));
660 
661 	if (irt >= NIRTS)
662 		panic("%s: bad irt %d\n", __func__, irt);
663 
664 	if (! RMIXL_VECTOR_IS_IRT(vec))
665 		panic("%s: bad vec %d\n", __func__, vec);
666 
667 	switch (trigger) {
668 	case RMIXL_TRIG_EDGE:
669 	case RMIXL_TRIG_LEVEL:
670 		break;
671 	default:
672 		panic("%s: bad trigger %d\n", __func__, trigger);
673 	}
674 
675 	switch (polarity) {
676 	case RMIXL_POLR_RISING:
677 	case RMIXL_POLR_HIGH:
678 	case RMIXL_POLR_FALLING:
679 	case RMIXL_POLR_LOW:
680 		break;
681 	default:
682 		panic("%s: bad polarity %d\n", __func__, polarity);
683 	}
684 
685 	/*
686 	 * XXX IRT entries are not shared
687 	 */
688 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0);
689 	KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0);
690 
691 	irtc0 = rmixl_irt_thread_mask(cpumask);
692 
693 	irtc1  = RMIXL_PIC_IRTENTRYC1_VALID;
694 	irtc1 |= RMIXL_PIC_IRTENTRYC1_GL;	/* local */
695 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
696 
697 	if (trigger == RMIXL_TRIG_LEVEL)
698 		irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG;
699 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
700 
701 	if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW))
702 		irtc1 |= RMIXL_PIC_IRTENTRYC1_P;
703 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
704 
705 	irtc1 |= vec;			/* vector in EIRR */
706 	KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0);
707 
708 	/*
709 	 * write IRT Entry to PIC
710 	 */
711 	DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n",
712 		__func__, vec, vec, irt, irtc0, irtc1));
713 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0);	/* low  word */
714 	RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1);	/* high word */
715 }
716 
717 void *
rmixl_vec_establish(int vec,int cpumask,int ipl,int (* func)(void *),void * arg,bool mpsafe)718 rmixl_vec_establish(int vec, int cpumask, int ipl,
719 	int (*func)(void *), void *arg, bool mpsafe)
720 {
721 	rmixl_intrhand_t *ih;
722 	uint64_t eimr_bit;
723 	int s;
724 
725 	KASSERT(mutex_owned(&rmixl_intr_lock));
726 
727 	DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n",
728 			__func__, vec, cpumask, ipl, func, arg, mpsafe));
729 #ifdef DIAGNOSTIC
730 	if (rmixl_pic_init_done == 0)
731 		panic("%s: called before evbmips_intr_init", __func__);
732 #endif
733 
734 	/*
735 	 * check args
736 	 */
737 	if (vec < 0 || vec >= NINTRVECS)
738 		panic("%s: vec %d out of range, max %d",
739 			__func__, vec, NINTRVECS - 1);
740 	if (ipl <= 0 || ipl >= _IPL_N)
741 		panic("%s: ipl %d out of range, min %d, max %d",
742 			__func__, ipl, 1, _IPL_N - 1);
743 
744 	s = splhigh();
745 
746 	ih = &rmixl_intrhand[vec];
747 	if (ih->ih_func != NULL) {
748 #ifdef DIAGNOSTIC
749 		printf("%s: intrhand[%d] busy\n", __func__, vec);
750 #endif
751 		splx(s);
752 		return NULL;
753 	}
754 
755 	ih->ih_arg = arg;
756 	ih->ih_mpsafe = mpsafe;
757 	ih->ih_vec = vec;
758 	ih->ih_ipl = ipl;
759 	ih->ih_cpumask = cpumask;
760 
761 	eimr_bit = (uint64_t)1 << vec;
762 	for (int i=ih->ih_ipl; --i >= 0; ) {
763 		KASSERT((ipl_eimr_map[i] & eimr_bit) == 0);
764 		ipl_eimr_map[i] |= eimr_bit;
765 	}
766 
767 	ih->ih_func = func;	/* do this last */
768 
769 	splx(s);
770 
771 	return ih;
772 }
773 
774 /*
775  * rmixl_intr_establish
776  * - used to establish an IRT-based interrupt only
777  */
778 void *
rmixl_intr_establish(int irt,int cpumask,int ipl,rmixl_intr_trigger_t trigger,rmixl_intr_polarity_t polarity,int (* func)(void *),void * arg,bool mpsafe)779 rmixl_intr_establish(int irt, int cpumask, int ipl,
780 	rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity,
781 	int (*func)(void *), void *arg, bool mpsafe)
782 {
783 	rmixl_intrhand_t *ih;
784 	int vec;
785 
786 #ifdef DIAGNOSTIC
787 	if (rmixl_pic_init_done == 0)
788 		panic("%s: called before rmixl_pic_init_done", __func__);
789 #endif
790 
791 	/*
792 	 * check args
793 	 */
794 	if (irt < 0 || irt >= NIRTS)
795 		panic("%s: irt %d out of range, max %d",
796 			__func__, irt, NIRTS - 1);
797 	if (ipl <= 0 || ipl >= _IPL_N)
798 		panic("%s: ipl %d out of range, min %d, max %d",
799 			__func__, ipl, 1, _IPL_N - 1);
800 
801 	vec = RMIXL_IRT_VECTOR(irt);
802 
803 	DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl));
804 
805 	mutex_enter(&rmixl_intr_lock);
806 
807 	/*
808 	 * establish vector
809 	 */
810 	ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe);
811 
812 	/*
813 	 * establish IRT Entry
814 	 */
815 	rmixl_irt_establish(irt, vec, cpumask, trigger, polarity);
816 
817 	mutex_exit(&rmixl_intr_lock);
818 
819 	return ih;
820 }
821 
822 void
rmixl_vec_disestablish(void * cookie)823 rmixl_vec_disestablish(void *cookie)
824 {
825 	rmixl_intrhand_t *ih = cookie;
826 	uint64_t eimr_bit;
827 
828 	KASSERT(mutex_owned(&rmixl_intr_lock));
829 	KASSERT(ih->ih_vec < NINTRVECS);
830 	KASSERT(ih == &rmixl_intrhand[ih->ih_vec]);
831 
832 	ih->ih_func = NULL;	/* do this first */
833 
834 	eimr_bit = (uint64_t)1 << ih->ih_vec;
835 	for (int i=ih->ih_ipl; --i >= 0; ) {
836 		KASSERT((ipl_eimr_map[i] & eimr_bit) != 0);
837 		ipl_eimr_map[i] ^= eimr_bit;
838 	}
839 }
840 
841 void
rmixl_intr_disestablish(void * cookie)842 rmixl_intr_disestablish(void *cookie)
843 {
844 	rmixl_intrhand_t *ih = cookie;
845 	const int vec = ih->ih_vec;
846 
847 	KASSERT(vec < NINTRVECS);
848 	KASSERT(ih == &rmixl_intrhand[vec]);
849 
850 	mutex_enter(&rmixl_intr_lock);
851 
852 	/*
853 	 * disable/invalidate the IRT Entry if needed
854 	 */
855 	if (RMIXL_VECTOR_IS_IRT(vec))
856 		rmixl_irt_disestablish(vec);
857 
858 	/*
859 	 * disasociate from vector and free the handle
860 	 */
861 	rmixl_vec_disestablish(cookie);
862 
863 	mutex_exit(&rmixl_intr_lock);
864 }
865 
866 void
evbmips_iointr(int ipl,uint32_t pending,struct clockframe * cf)867 evbmips_iointr(int ipl, uint32_t pending, struct clockframe *cf)
868 {
869 	struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc;
870 
871 	DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
872 		__func__, cpu_number(), ipl, cf->pc, pending));
873 
874 	/*
875 	 * 'pending' arg is a summary that there is something to do
876 	 * the real pending status is obtained from EIRR
877 	 */
878 	KASSERT(pending == MIPS_INT_MASK_1);
879 
880 	for (;;) {
881 		rmixl_intrhand_t *ih;
882 		uint64_t eirr;
883 		uint64_t eimr;
884 		uint64_t vecbit;
885 		int vec;
886 
887 		asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr));
888 		asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr));
889 
890 #ifdef IOINTR_DEBUG
891 		printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n",
892 			__func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]);
893 #endif	/* IOINTR_DEBUG */
894 
895 		/*
896 		 * reduce eirr to
897 		 * - ints that are enabled at or below this ipl
898 		 * - exclude count/compare clock and soft ints
899 		 *   they are handled elsewhere
900 		 */
901 		eirr &= ipl_eimr_map[ipl-1];
902 		eirr &= ~ipl_eimr_map[ipl];
903 		eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8);
904 		if (eirr == 0)
905 			break;
906 
907 		vec = 63 - dclz(eirr);
908 		ih = &rmixl_intrhand[vec];
909 		vecbit = 1ULL << vec;
910 		KASSERT (ih->ih_ipl == ipl);
911 		KASSERT ((vecbit & eimr) == 0);
912 		KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0);
913 
914 		/*
915 		 * ack in EIRR, and in PIC if needed,
916 		 * the irq we are about to handle
917 		 */
918 		rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK);
919 		if (RMIXL_VECTOR_IS_IRT(vec))
920 			RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK,
921 				1 << RMIXL_VECTOR_IRT(vec));
922 
923 		if (ih->ih_func != NULL) {
924 #ifdef MULTIPROCESSOR
925 			if (ih->ih_mpsafe) {
926 				(void)(*ih->ih_func)(ih->ih_arg);
927 			} else {
928 				KASSERTMSG(ipl == IPL_VM,
929 				    "%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK",
930 				    __func__, sc->sc_vec_evcnts[vec].ev_name,
931 				    ipl);
932 				KERNEL_LOCK(1, NULL);
933 				(void)(*ih->ih_func)(ih->ih_arg);
934 				KERNEL_UNLOCK_ONE(NULL);
935 			}
936 #else
937 			(void)(*ih->ih_func)(ih->ih_arg);
938 #endif /* MULTIPROCESSOR */
939 		}
940 		KASSERT(ipl == ih->ih_ipl);
941 		KASSERTMSG(curcpu()->ci_cpl >= ipl,
942 		    "%s: after %s: cpl (%d) < ipl %d",
943 		    __func__, sc->sc_vec_evcnts[vec].ev_name,
944 		    ipl, curcpu()->ci_cpl);
945 		sc->sc_vec_evcnts[vec].ev_count++;
946 	}
947 }
948 
949 #ifdef MULTIPROCESSOR
950 static int
rmixl_send_ipi(struct cpu_info * ci,int tag)951 rmixl_send_ipi(struct cpu_info *ci, int tag)
952 {
953 	const cpuid_t cpuid = ci->ci_cpuid;
954 	uint32_t core = (uint32_t)(cpuid >> 2);
955 	uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
956 	uint64_t req = 1 << tag;
957 	uint32_t r;
958 
959 	if (!kcpuset_isset(cpus_running, cpu_index(ci)))
960 		return -1;
961 
962 	KASSERT((tag >= 0) && (tag < NIPIS));
963 
964 	r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT)
965 	  | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT)
966 	  | (RMIXL_INTRVEC_IPI + tag);
967 
968 	mutex_enter(&rmixl_ipi_lock);
969 	membar_release();
970 	atomic_or_64(&ci->ci_request_ipis, req);
971 	RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r);
972 	mutex_exit(&rmixl_ipi_lock);
973 
974 	return 0;
975 }
976 
977 static int
rmixl_ipi_intr(void * arg)978 rmixl_ipi_intr(void *arg)
979 {
980 	struct cpu_info * const ci = curcpu();
981 	const uint64_t ipi_mask = 1ULL << (uintptr_t)arg;
982 
983 	KASSERT(ci->ci_cpl >= IPL_SCHED);
984 	KASSERT((uintptr_t)arg < NIPIS);
985 
986 	/* if the request is clear, it was previously processed */
987 	if ((atomic_load_relaxed(&ci->ci_request_ipis) & ipi_mask) == 0)
988 		return 0;
989 	membar_acquire();
990 
991 	atomic_or_64(&ci->ci_active_ipis, ipi_mask);
992 	atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
993 
994 	ipi_process(ci, ipi_mask);
995 
996 	atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
997 
998 	return 1;
999 }
1000 #endif	/* MULTIPROCESSOR */
1001 
1002 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB)
1003 int
rmixl_intrhand_print_subr(int vec)1004 rmixl_intrhand_print_subr(int vec)
1005 {
1006 	rmixl_intrhand_t *ih = &rmixl_intrhand[vec];
1007 	printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n",
1008 		vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl,
1009 		ih->ih_cpumask);
1010 	return 0;
1011 }
1012 int
rmixl_intrhand_print(void)1013 rmixl_intrhand_print(void)
1014 {
1015 	for (int vec=0; vec < NINTRVECS ; vec++)
1016 		rmixl_intrhand_print_subr(vec);
1017 	return 0;
1018 }
1019 
1020 static inline void
rmixl_irt_entry_print(u_int irt)1021 rmixl_irt_entry_print(u_int irt)
1022 {
1023 	uint32_t c0, c1;
1024 
1025 	if ((irt < 0) || (irt > NIRTS))
1026 		return;
1027 	c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt));
1028 	c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt));
1029 	printf("irt[%d]: %#x, %#x\n", irt, c0, c1);
1030 }
1031 
1032 int
rmixl_irt_print(void)1033 rmixl_irt_print(void)
1034 {
1035 	printf("%s:\n", __func__);
1036 	for (int irt=0; irt < NIRTS ; irt++)
1037 		rmixl_irt_entry_print(irt);
1038 	return 0;
1039 }
1040 
1041 void
rmixl_ipl_eimr_map_print(void)1042 rmixl_ipl_eimr_map_print(void)
1043 {
1044 	printf("IPL_NONE=%d, mask %#"PRIx64"\n",
1045 		IPL_NONE, ipl_eimr_map[IPL_NONE]);
1046 	printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n",
1047 		IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]);
1048 	printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n",
1049 		IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]);
1050 	printf("IPL_VM=%d, mask %#"PRIx64"\n",
1051 		IPL_VM, ipl_eimr_map[IPL_VM]);
1052 	printf("IPL_SCHED=%d, mask %#"PRIx64"\n",
1053 		IPL_SCHED, ipl_eimr_map[IPL_SCHED]);
1054 	printf("IPL_DDB=%d, mask %#"PRIx64"\n",
1055 		IPL_DDB, ipl_eimr_map[IPL_DDB]);
1056 	printf("IPL_HIGH=%d, mask %#"PRIx64"\n",
1057 		IPL_HIGH, ipl_eimr_map[IPL_HIGH]);
1058 }
1059 
1060 #endif
1061