1 /* $NetBSD: rmixl_intr.c,v 1.4 2011/04/14 05:16:00 cliff Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /*- 34 * Copyright (c) 2001 The NetBSD Foundation, Inc. 35 * All rights reserved. 36 * 37 * This code is derived from software contributed to The NetBSD Foundation 38 * by Jason R. Thorpe. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.4 2011/04/14 05:16:00 cliff Exp $"); 68 69 #include "opt_ddb.h" 70 #include "opt_multiprocessor.h" 71 #define __INTR_PRIVATE 72 73 #include <sys/param.h> 74 #include <sys/queue.h> 75 #include <sys/malloc.h> 76 #include <sys/systm.h> 77 #include <sys/device.h> 78 #include <sys/kernel.h> 79 #include <sys/atomic.h> 80 #include <sys/mutex.h> 81 #include <sys/cpu.h> 82 83 #include <machine/bus.h> 84 #include <machine/intr.h> 85 86 #include <mips/cpu.h> 87 #include <mips/cpuset.h> 88 #include <mips/locore.h> 89 90 #include <mips/rmi/rmixlreg.h> 91 #include <mips/rmi/rmixlvar.h> 92 93 #include <mips/rmi/rmixl_cpuvar.h> 94 #include <mips/rmi/rmixl_intr.h> 95 96 #include <dev/pci/pcireg.h> 97 #include <dev/pci/pcivar.h> 98 99 //#define IOINTR_DEBUG 1 100 #ifdef IOINTR_DEBUG 101 int iointr_debug = IOINTR_DEBUG; 102 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0) 103 #else 104 # define DPRINTF(x) 105 #endif 106 107 #define RMIXL_PICREG_READ(off) \ 108 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off)) 109 #define RMIXL_PICREG_WRITE(off, val) \ 110 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val)) 111 112 /* 113 * do not clear these when acking EIRR 114 * (otherwise they get lost) 115 */ 116 #define RMIXL_EIRR_PRESERVE_MASK \ 117 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8) 118 119 /* 120 * IRT assignments depends on the RMI chip family 121 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx) 122 * use the right display string table for the CPU that's running. 123 */ 124 125 /* 126 * rmixl_irtnames_xlrxxx 127 * - use for XLRxxx 128 */ 129 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = { 130 "pic int 0 (watchdog)", /* 0 */ 131 "pic int 1 (timer0)", /* 1 */ 132 "pic int 2 (timer1)", /* 2 */ 133 "pic int 3 (timer2)", /* 3 */ 134 "pic int 4 (timer3)", /* 4 */ 135 "pic int 5 (timer4)", /* 5 */ 136 "pic int 6 (timer5)", /* 6 */ 137 "pic int 7 (timer6)", /* 7 */ 138 "pic int 8 (timer7)", /* 8 */ 139 "pic int 9 (uart0)", /* 9 */ 140 "pic int 10 (uart1)", /* 10 */ 141 "pic int 11 (i2c0)", /* 11 */ 142 "pic int 12 (i2c1)", /* 12 */ 143 "pic int 13 (pcmcia)", /* 13 */ 144 "pic int 14 (gpio)", /* 14 */ 145 "pic int 15 (hyper)", /* 15 */ 146 "pic int 16 (pcix)", /* 16 */ 147 "pic int 17 (gmac0)", /* 17 */ 148 "pic int 18 (gmac1)", /* 18 */ 149 "pic int 19 (gmac2)", /* 19 */ 150 "pic int 20 (gmac3)", /* 20 */ 151 "pic int 21 (xgs0)", /* 21 */ 152 "pic int 22 (xgs1)", /* 22 */ 153 "pic int 23 (irq23)", /* 23 */ 154 "pic int 24 (hyper_fatal)", /* 24 */ 155 "pic int 25 (bridge_aerr)", /* 25 */ 156 "pic int 26 (bridge_berr)", /* 26 */ 157 "pic int 27 (bridge_tb)", /* 27 */ 158 "pic int 28 (bridge_nmi)", /* 28 */ 159 "pic int 29 (bridge_sram_derr)",/* 29 */ 160 "pic int 30 (gpio_fatal)", /* 30 */ 161 "pic int 31 (reserved)", /* 31 */ 162 }; 163 164 /* 165 * rmixl_irtnames_xls2xx 166 * - use for XLS2xx 167 */ 168 static const char * const rmixl_irtnames_xls2xx[NIRTS] = { 169 "pic int 0 (watchdog)", /* 0 */ 170 "pic int 1 (timer0)", /* 1 */ 171 "pic int 2 (timer1)", /* 2 */ 172 "pic int 3 (timer2)", /* 3 */ 173 "pic int 4 (timer3)", /* 4 */ 174 "pic int 5 (timer4)", /* 5 */ 175 "pic int 6 (timer5)", /* 6 */ 176 "pic int 7 (timer6)", /* 7 */ 177 "pic int 8 (timer7)", /* 8 */ 178 "pic int 9 (uart0)", /* 9 */ 179 "pic int 10 (uart1)", /* 10 */ 180 "pic int 11 (i2c0)", /* 11 */ 181 "pic int 12 (i2c1)", /* 12 */ 182 "pic int 13 (pcmcia)", /* 13 */ 183 "pic int 14 (gpio_a)", /* 14 */ 184 "pic int 15 (irq15)", /* 15 */ 185 "pic int 16 (bridge_tb)", /* 16 */ 186 "pic int 17 (gmac0)", /* 17 */ 187 "pic int 18 (gmac1)", /* 18 */ 188 "pic int 19 (gmac2)", /* 19 */ 189 "pic int 20 (gmac3)", /* 20 */ 190 "pic int 21 (irq21)", /* 21 */ 191 "pic int 22 (irq22)", /* 22 */ 192 "pic int 23 (pcie_link2)", /* 23 */ 193 "pic int 24 (pcie_link3)", /* 24 */ 194 "pic int 25 (bridge_err)", /* 25 */ 195 "pic int 26 (pcie_link0)", /* 26 */ 196 "pic int 27 (pcie_link1)", /* 27 */ 197 "pic int 28 (irq28)", /* 28 */ 198 "pic int 29 (pcie_err)", /* 29 */ 199 "pic int 30 (gpio_b)", /* 30 */ 200 "pic int 31 (usb)", /* 31 */ 201 }; 202 203 /* 204 * rmixl_irtnames_xls1xx 205 * - use for XLS1xx, XLS4xx-Lite 206 */ 207 static const char * const rmixl_irtnames_xls1xx[NIRTS] = { 208 "pic int 0 (watchdog)", /* 0 */ 209 "pic int 1 (timer0)", /* 1 */ 210 "pic int 2 (timer1)", /* 2 */ 211 "pic int 3 (timer2)", /* 3 */ 212 "pic int 4 (timer3)", /* 4 */ 213 "pic int 5 (timer4)", /* 5 */ 214 "pic int 6 (timer5)", /* 6 */ 215 "pic int 7 (timer6)", /* 7 */ 216 "pic int 8 (timer7)", /* 8 */ 217 "pic int 9 (uart0)", /* 9 */ 218 "pic int 10 (uart1)", /* 10 */ 219 "pic int 11 (i2c0)", /* 11 */ 220 "pic int 12 (i2c1)", /* 12 */ 221 "pic int 13 (pcmcia)", /* 13 */ 222 "pic int 14 (gpio_a)", /* 14 */ 223 "pic int 15 (irq15)", /* 15 */ 224 "pic int 16 (bridge_tb)", /* 16 */ 225 "pic int 17 (gmac0)", /* 17 */ 226 "pic int 18 (gmac1)", /* 18 */ 227 "pic int 19 (gmac2)", /* 19 */ 228 "pic int 20 (gmac3)", /* 20 */ 229 "pic int 21 (irq21)", /* 21 */ 230 "pic int 22 (irq22)", /* 22 */ 231 "pic int 23 (irq23)", /* 23 */ 232 "pic int 24 (irq24)", /* 24 */ 233 "pic int 25 (bridge_err)", /* 25 */ 234 "pic int 26 (pcie_link0)", /* 26 */ 235 "pic int 27 (pcie_link1)", /* 27 */ 236 "pic int 28 (irq28)", /* 28 */ 237 "pic int 29 (pcie_err)", /* 29 */ 238 "pic int 30 (gpio_b)", /* 30 */ 239 "pic int 31 (usb)", /* 31 */ 240 }; 241 242 /* 243 * rmixl_irtnames_xls4xx: 244 * - use for XLS4xx, XLS6xx 245 */ 246 static const char * const rmixl_irtnames_xls4xx[NIRTS] = { 247 "pic int 0 (watchdog)", /* 0 */ 248 "pic int 1 (timer0)", /* 1 */ 249 "pic int 2 (timer1)", /* 2 */ 250 "pic int 3 (timer2)", /* 3 */ 251 "pic int 4 (timer3)", /* 4 */ 252 "pic int 5 (timer4)", /* 5 */ 253 "pic int 6 (timer5)", /* 6 */ 254 "pic int 7 (timer6)", /* 7 */ 255 "pic int 8 (timer7)", /* 8 */ 256 "pic int 9 (uart0)", /* 9 */ 257 "pic int 10 (uart1)", /* 10 */ 258 "pic int 11 (i2c0)", /* 11 */ 259 "pic int 12 (i2c1)", /* 12 */ 260 "pic int 13 (pcmcia)", /* 13 */ 261 "pic int 14 (gpio_a)", /* 14 */ 262 "pic int 15 (irq15)", /* 15 */ 263 "pic int 16 (bridge_tb)", /* 16 */ 264 "pic int 17 (gmac0)", /* 17 */ 265 "pic int 18 (gmac1)", /* 18 */ 266 "pic int 19 (gmac2)", /* 19 */ 267 "pic int 20 (gmac3)", /* 20 */ 268 "pic int 21 (irq21)", /* 21 */ 269 "pic int 22 (irq22)", /* 22 */ 270 "pic int 23 (irq23)", /* 23 */ 271 "pic int 24 (irq24)", /* 24 */ 272 "pic int 25 (bridge_err)", /* 25 */ 273 "pic int 26 (pcie_link0)", /* 26 */ 274 "pic int 27 (pcie_link1)", /* 27 */ 275 "pic int 28 (pcie_link2)", /* 28 */ 276 "pic int 29 (pcie_link3)", /* 29 */ 277 "pic int 30 (gpio_b)", /* 30 */ 278 "pic int 31 (usb)", /* 31 */ 279 }; 280 281 /* 282 * rmixl_vecnames_common: 283 * - use for unknown cpu implementation 284 * - covers all vectors, not just IRT intrs 285 */ 286 static const char * const rmixl_vecnames_common[NINTRVECS] = { 287 "vec 0", /* 0 */ 288 "vec 1", /* 1 */ 289 "vec 2", /* 2 */ 290 "vec 3", /* 3 */ 291 "vec 4", /* 4 */ 292 "vec 5", /* 5 */ 293 "vec 6", /* 6 */ 294 "vec 7", /* 7 */ 295 "vec 8 (ipi 0)", /* 8 */ 296 "vec 9 (ipi 1)", /* 9 */ 297 "vec 10 (ipi 2)", /* 10 */ 298 "vec 11 (ipi 3)", /* 11 */ 299 "vec 12 (ipi 4)", /* 12 */ 300 "vec 13 (ipi 5)", /* 13 */ 301 "vec 14 (ipi 6)", /* 14 */ 302 "vec 15 (fmn)", /* 15 */ 303 "vec 16", /* 16 */ 304 "vec 17", /* 17 */ 305 "vec 18", /* 18 */ 306 "vec 19", /* 19 */ 307 "vec 20", /* 20 */ 308 "vec 21", /* 21 */ 309 "vec 22", /* 22 */ 310 "vec 23", /* 23 */ 311 "vec 24", /* 24 */ 312 "vec 25", /* 25 */ 313 "vec 26", /* 26 */ 314 "vec 27", /* 27 */ 315 "vec 28", /* 28 */ 316 "vec 29", /* 29 */ 317 "vec 30", /* 30 */ 318 "vec 31", /* 31 */ 319 "vec 32", /* 32 */ 320 "vec 33", /* 33 */ 321 "vec 34", /* 34 */ 322 "vec 35", /* 35 */ 323 "vec 36", /* 36 */ 324 "vec 37", /* 37 */ 325 "vec 38", /* 38 */ 326 "vec 39", /* 39 */ 327 "vec 40", /* 40 */ 328 "vec 41", /* 41 */ 329 "vec 42", /* 42 */ 330 "vec 43", /* 43 */ 331 "vec 44", /* 44 */ 332 "vec 45", /* 45 */ 333 "vec 46", /* 46 */ 334 "vec 47", /* 47 */ 335 "vec 48", /* 48 */ 336 "vec 49", /* 49 */ 337 "vec 50", /* 50 */ 338 "vec 51", /* 51 */ 339 "vec 52", /* 52 */ 340 "vec 53", /* 53 */ 341 "vec 54", /* 54 */ 342 "vec 55", /* 55 */ 343 "vec 56", /* 56 */ 344 "vec 57", /* 57 */ 345 "vec 58", /* 58 */ 346 "vec 59", /* 59 */ 347 "vec 60", /* 60 */ 348 "vec 61", /* 61 */ 349 "vec 62", /* 63 */ 350 "vec 63", /* 63 */ 351 }; 352 353 /* 354 * mask of CPUs attached 355 * once they are attached, this var is read-only so mp safe 356 */ 357 static uint32_t cpu_present_mask; 358 359 kmutex_t rmixl_ipi_lock __cacheline_aligned; 360 /* covers RMIXL_PIC_IPIBASE */ 361 kmutex_t rmixl_intr_lock __cacheline_aligned; 362 /* covers rest of PIC, and rmixl_intrhand[] */ 363 rmixl_intrhand_t rmixl_intrhand[NINTRVECS]; 364 365 #ifdef DIAGNOSTIC 366 static int rmixl_pic_init_done; 367 #endif 368 369 370 static const char *rmixl_intr_string_xlr(int); 371 static const char *rmixl_intr_string_xls(int); 372 static uint32_t rmixl_irt_thread_mask(int); 373 static void rmixl_irt_init(int); 374 static void rmixl_irt_disestablish(int); 375 static void rmixl_irt_establish(int, int, int, 376 rmixl_intr_trigger_t, rmixl_intr_polarity_t); 377 378 #ifdef MULTIPROCESSOR 379 static int rmixl_send_ipi(struct cpu_info *, int); 380 static int rmixl_ipi_intr(void *); 381 #endif 382 383 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 384 int rmixl_intrhand_print_subr(int); 385 int rmixl_intrhand_print(void); 386 int rmixl_irt_print(void); 387 void rmixl_ipl_eimr_map_print(void); 388 #endif 389 390 391 static inline u_int 392 dclz(uint64_t val) 393 { 394 int nlz; 395 396 asm volatile("dclz %0, %1;" 397 : "=r"(nlz) : "r"(val)); 398 399 return nlz; 400 } 401 402 void 403 evbmips_intr_init(void) 404 { 405 uint32_t r; 406 407 KASSERT(cpu_rmixlr(mips_options.mips_cpu) 408 || cpu_rmixls(mips_options.mips_cpu)); 409 410 411 #ifdef DIAGNOSTIC 412 if (rmixl_pic_init_done != 0) 413 panic("%s: rmixl_pic_init_done %d", 414 __func__, rmixl_pic_init_done); 415 #endif 416 417 mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH); 418 mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 419 420 mutex_enter(&rmixl_intr_lock); 421 422 /* 423 * initialize (zero) all IRT Entries in the PIC 424 */ 425 for (u_int i = 0; i < NIRTS; i++) { 426 rmixl_irt_init(i); 427 } 428 429 /* 430 * disable watchdog NMI, timers 431 * 432 * XXX 433 * WATCHDOG_ENB is preserved because clearing it causes 434 * hang on the XLS616 (but not on the XLS408) 435 */ 436 r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL); 437 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB; 438 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r); 439 440 #ifdef DIAGNOSTIC 441 rmixl_pic_init_done = 1; 442 #endif 443 mutex_exit(&rmixl_intr_lock); 444 445 } 446 447 /* 448 * establish vector for mips3 count/compare clock interrupt 449 * this ensures we enable in EIRR, 450 * even though cpu_intr() handles the interrupt 451 * note the 'mpsafe' arg here is a placeholder only 452 */ 453 void 454 rmixl_intr_init_clk(void) 455 { 456 const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1; 457 458 mutex_enter(&rmixl_intr_lock); 459 460 void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false); 461 if (ih == NULL) 462 panic("%s: establish vec %d failed", __func__, vec); 463 464 mutex_exit(&rmixl_intr_lock); 465 } 466 467 #ifdef MULTIPROCESSOR 468 /* 469 * establish IPI interrupt and send function 470 */ 471 void 472 rmixl_intr_init_ipi(void) 473 { 474 475 mutex_enter(&rmixl_intr_lock); 476 477 for (u_int ipi = 0; ipi < NIPIS; ipi++) { 478 const u_int vec = RMIXL_INTRVEC_IPI + ipi; 479 void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED, 480 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true); 481 if (ih == NULL) 482 panic("%s: establish ipi %d at vec %d failed", 483 __func__, ipi, vec); 484 } 485 486 mips_locoresw.lsw_send_ipi = rmixl_send_ipi; 487 488 mutex_exit(&rmixl_intr_lock); 489 } 490 #endif /* MULTIPROCESSOR */ 491 492 /* 493 * initialize per-cpu interrupt stuff in softc 494 * accumulate per-cpu bits in 'cpu_present_mask' 495 */ 496 void 497 rmixl_intr_init_cpu(struct cpu_info *ci) 498 { 499 struct rmixl_cpu_softc *sc = (void *)ci->ci_softc; 500 501 KASSERT(sc != NULL); 502 503 for (int vec=0; vec < NINTRVECS; vec++) 504 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec], 505 EVCNT_TYPE_INTR, NULL, 506 device_xname(sc->sc_dev), 507 rmixl_intr_string(vec)); 508 509 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8)); 510 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci)); 511 } 512 513 /* 514 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt 515 */ 516 const char * 517 rmixl_intr_string(int vec) 518 { 519 int irt; 520 521 if (vec < 0 || vec >= NINTRVECS) 522 panic("%s: vec index %d out of range, max %d", 523 __func__, vec, NINTRVECS - 1); 524 525 if (! RMIXL_VECTOR_IS_IRT(vec)) 526 return rmixl_vecnames_common[vec]; 527 528 irt = RMIXL_VECTOR_IRT(vec); 529 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) { 530 case CIDFL_RMI_TYPE_XLR: 531 return rmixl_intr_string_xlr(irt); 532 case CIDFL_RMI_TYPE_XLS: 533 return rmixl_intr_string_xls(irt); 534 case CIDFL_RMI_TYPE_XLP: 535 panic("%s: RMI XLP not yet supported", __func__); 536 } 537 538 return "undefined"; /* appease gcc */ 539 } 540 541 static const char * 542 rmixl_intr_string_xlr(int irt) 543 { 544 return rmixl_irtnames_xlrxxx[irt]; 545 } 546 547 static const char * 548 rmixl_intr_string_xls(int irt) 549 { 550 const char *name; 551 552 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 553 case MIPS_XLS104: 554 case MIPS_XLS108: 555 case MIPS_XLS404LITE: 556 case MIPS_XLS408LITE: 557 name = rmixl_irtnames_xls1xx[irt]; 558 break; 559 case MIPS_XLS204: 560 case MIPS_XLS208: 561 name = rmixl_irtnames_xls2xx[irt]; 562 break; 563 case MIPS_XLS404: 564 case MIPS_XLS408: 565 case MIPS_XLS416: 566 case MIPS_XLS608: 567 case MIPS_XLS616: 568 name = rmixl_irtnames_xls4xx[irt]; 569 break; 570 default: 571 name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)]; 572 break; 573 } 574 575 return name; 576 } 577 578 /* 579 * rmixl_irt_thread_mask 580 * 581 * given a bitmask of cpus, return a, IRT thread mask 582 */ 583 static uint32_t 584 rmixl_irt_thread_mask(int cpumask) 585 { 586 uint32_t irtc0; 587 588 #if defined(MULTIPROCESSOR) 589 #ifndef NOTYET 590 if (cpumask == -1) 591 return 1; /* XXX TMP FIXME */ 592 #endif 593 594 /* 595 * discount cpus not present 596 */ 597 cpumask &= cpu_present_mask; 598 599 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 600 case MIPS_XLS104: 601 case MIPS_XLS204: 602 case MIPS_XLS404: 603 case MIPS_XLS404LITE: 604 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0)); 605 irtc0 &= (__BITS(5,4) | __BITS(1,0)); 606 break; 607 case MIPS_XLS108: 608 case MIPS_XLS208: 609 case MIPS_XLS408: 610 case MIPS_XLS408LITE: 611 case MIPS_XLS608: 612 irtc0 = cpumask & __BITS(7,0); 613 break; 614 case MIPS_XLS416: 615 case MIPS_XLS616: 616 irtc0 = cpumask & __BITS(15,0); 617 break; 618 default: 619 panic("%s: unknown cpu ID %#x\n", __func__, 620 mips_options.mips_cpu_id); 621 } 622 #else 623 irtc0 = 1; 624 #endif /* MULTIPROCESSOR */ 625 626 return irtc0; 627 } 628 629 /* 630 * rmixl_irt_init 631 * - initialize IRT Entry for given index 632 * - unmask Thread#0 in low word (assume we only have 1 thread) 633 */ 634 static void 635 rmixl_irt_init(int irt) 636 { 637 KASSERT(irt < NIRTS); 638 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */ 639 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */ 640 } 641 642 /* 643 * rmixl_irt_disestablish 644 * - invalidate IRT Entry for given index 645 */ 646 static void 647 rmixl_irt_disestablish(int irt) 648 { 649 KASSERT(mutex_owned(&rmixl_intr_lock)); 650 DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0)); 651 rmixl_irt_init(irt); 652 } 653 654 /* 655 * rmixl_irt_establish 656 * - construct an IRT Entry for irt and write to PIC 657 */ 658 static void 659 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger, 660 rmixl_intr_polarity_t polarity) 661 { 662 uint32_t irtc1; 663 uint32_t irtc0; 664 665 KASSERT(mutex_owned(&rmixl_intr_lock)); 666 667 if (irt >= NIRTS) 668 panic("%s: bad irt %d\n", __func__, irt); 669 670 if (! RMIXL_VECTOR_IS_IRT(vec)) 671 panic("%s: bad vec %d\n", __func__, vec); 672 673 switch (trigger) { 674 case RMIXL_TRIG_EDGE: 675 case RMIXL_TRIG_LEVEL: 676 break; 677 default: 678 panic("%s: bad trigger %d\n", __func__, trigger); 679 } 680 681 switch (polarity) { 682 case RMIXL_POLR_RISING: 683 case RMIXL_POLR_HIGH: 684 case RMIXL_POLR_FALLING: 685 case RMIXL_POLR_LOW: 686 break; 687 default: 688 panic("%s: bad polarity %d\n", __func__, polarity); 689 } 690 691 /* 692 * XXX IRT entries are not shared 693 */ 694 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0); 695 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0); 696 697 irtc0 = rmixl_irt_thread_mask(cpumask); 698 699 irtc1 = RMIXL_PIC_IRTENTRYC1_VALID; 700 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */ 701 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 702 703 if (trigger == RMIXL_TRIG_LEVEL) 704 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG; 705 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 706 707 if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW)) 708 irtc1 |= RMIXL_PIC_IRTENTRYC1_P; 709 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 710 711 irtc1 |= vec; /* vector in EIRR */ 712 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 713 714 /* 715 * write IRT Entry to PIC 716 */ 717 DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n", 718 __func__, vec, vec, irt, irtc0, irtc1)); 719 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */ 720 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */ 721 } 722 723 void * 724 rmixl_vec_establish(int vec, int cpumask, int ipl, 725 int (*func)(void *), void *arg, bool mpsafe) 726 { 727 rmixl_intrhand_t *ih; 728 uint64_t eimr_bit; 729 int s; 730 731 KASSERT(mutex_owned(&rmixl_intr_lock)); 732 733 DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsage %d\n", 734 __func__, vec, cpumask, ipl, func, arg, mpsafe)); 735 #ifdef DIAGNOSTIC 736 if (rmixl_pic_init_done == 0) 737 panic("%s: called before evbmips_intr_init", __func__); 738 #endif 739 740 /* 741 * check args 742 */ 743 if (vec < 0 || vec >= NINTRVECS) 744 panic("%s: vec %d out of range, max %d", 745 __func__, vec, NINTRVECS - 1); 746 if (ipl <= 0 || ipl >= _IPL_N) 747 panic("%s: ipl %d out of range, min %d, max %d", 748 __func__, ipl, 1, _IPL_N - 1); 749 750 s = splhigh(); 751 752 ih = &rmixl_intrhand[vec]; 753 if (ih->ih_func != NULL) { 754 #ifdef DIAGNOSTIC 755 printf("%s: intrhand[%d] busy\n", __func__, vec); 756 #endif 757 splx(s); 758 return NULL; 759 } 760 761 ih->ih_arg = arg; 762 ih->ih_mpsafe = mpsafe; 763 ih->ih_vec = vec; 764 ih->ih_ipl = ipl; 765 ih->ih_cpumask = cpumask; 766 767 eimr_bit = (uint64_t)1 << vec; 768 for (int i=ih->ih_ipl; --i >= 0; ) { 769 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0); 770 ipl_eimr_map[i] |= eimr_bit; 771 } 772 773 ih->ih_func = func; /* do this last */ 774 775 splx(s); 776 777 return ih; 778 } 779 780 /* 781 * rmixl_intr_establish 782 * - used to establish an IRT-based interrupt only 783 */ 784 void * 785 rmixl_intr_establish(int irt, int cpumask, int ipl, 786 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity, 787 int (*func)(void *), void *arg, bool mpsafe) 788 { 789 rmixl_intrhand_t *ih; 790 int vec; 791 792 #ifdef DIAGNOSTIC 793 if (rmixl_pic_init_done == 0) 794 panic("%s: called before rmixl_pic_init_done", __func__); 795 #endif 796 797 /* 798 * check args 799 */ 800 if (irt < 0 || irt >= NIRTS) 801 panic("%s: irt %d out of range, max %d", 802 __func__, irt, NIRTS - 1); 803 if (ipl <= 0 || ipl >= _IPL_N) 804 panic("%s: ipl %d out of range, min %d, max %d", 805 __func__, ipl, 1, _IPL_N - 1); 806 807 vec = RMIXL_IRT_VECTOR(irt); 808 809 DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl)); 810 811 mutex_enter(&rmixl_intr_lock); 812 813 /* 814 * establish vector 815 */ 816 ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe); 817 818 /* 819 * establish IRT Entry 820 */ 821 rmixl_irt_establish(irt, vec, cpumask, trigger, polarity); 822 823 mutex_exit(&rmixl_intr_lock); 824 825 return ih; 826 } 827 828 void 829 rmixl_vec_disestablish(void *cookie) 830 { 831 rmixl_intrhand_t *ih = cookie; 832 uint64_t eimr_bit; 833 834 KASSERT(mutex_owned(&rmixl_intr_lock)); 835 KASSERT(ih->ih_vec < NINTRVECS); 836 KASSERT(ih == &rmixl_intrhand[ih->ih_vec]); 837 838 ih->ih_func = NULL; /* do this first */ 839 840 eimr_bit = (uint64_t)1 << ih->ih_vec; 841 for (int i=ih->ih_ipl; --i >= 0; ) { 842 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0); 843 ipl_eimr_map[i] ^= eimr_bit; 844 } 845 } 846 847 void 848 rmixl_intr_disestablish(void *cookie) 849 { 850 rmixl_intrhand_t *ih = cookie; 851 int vec; 852 853 vec = ih->ih_vec; 854 855 KASSERT(vec < NINTRVECS); 856 KASSERT(ih == &rmixl_intrhand[vec]); 857 858 mutex_enter(&rmixl_intr_lock); 859 860 /* 861 * disable/invalidate the IRT Entry if needed 862 */ 863 if (RMIXL_VECTOR_IS_IRT(vec)) 864 rmixl_irt_disestablish(vec); 865 866 /* 867 * disasociate from vector and free the handle 868 */ 869 rmixl_vec_disestablish(cookie); 870 871 mutex_exit(&rmixl_intr_lock); 872 } 873 874 void 875 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending) 876 { 877 struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc; 878 879 DPRINTF(("%s: cpu%ld: ipl %d, pc %#"PRIxVADDR", pending %#x\n", 880 __func__, cpu_number(), ipl, pc, pending)); 881 882 /* 883 * 'pending' arg is a summary that there is something to do 884 * the real pending status is obtained from EIRR 885 */ 886 KASSERT(pending == MIPS_INT_MASK_1); 887 888 for (;;) { 889 rmixl_intrhand_t *ih; 890 uint64_t eirr; 891 uint64_t eimr; 892 uint64_t vecbit; 893 int vec; 894 895 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr)); 896 asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); 897 898 #ifdef IOINTR_DEBUG 899 printf("%s: cpu%ld: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n", 900 __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]); 901 #endif /* IOINTR_DEBUG */ 902 903 /* 904 * reduce eirr to 905 * - ints that are enabled at or below this ipl 906 * - exclude count/compare clock and soft ints 907 * they are handled elsewhere 908 */ 909 eirr &= ipl_eimr_map[ipl-1]; 910 eirr &= ~ipl_eimr_map[ipl]; 911 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8); 912 if (eirr == 0) 913 break; 914 915 vec = 63 - dclz(eirr); 916 ih = &rmixl_intrhand[vec]; 917 vecbit = 1ULL << vec; 918 KASSERT (ih->ih_ipl == ipl); 919 KASSERT ((vecbit & eimr) == 0); 920 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0); 921 922 /* 923 * ack in EIRR, and in PIC if needed, 924 * the irq we are about to handle 925 */ 926 rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK); 927 if (RMIXL_VECTOR_IS_IRT(vec)) 928 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK, 929 1 << RMIXL_VECTOR_IRT(vec)); 930 931 if (ih->ih_func != NULL) { 932 #ifdef MULTIPROCESSOR 933 if (ih->ih_mpsafe) { 934 (void)(*ih->ih_func)(ih->ih_arg); 935 } else { 936 KASSERTMSG(ipl == IPL_VM, 937 ("%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK", 938 __func__, sc->sc_vec_evcnts[vec].ev_name, 939 ipl)); 940 KERNEL_LOCK(1, NULL); 941 (void)(*ih->ih_func)(ih->ih_arg); 942 KERNEL_UNLOCK_ONE(NULL); 943 } 944 #else 945 (void)(*ih->ih_func)(ih->ih_arg); 946 #endif /* MULTIPROCESSOR */ 947 } 948 KASSERT(ipl == ih->ih_ipl); 949 KASSERTMSG(curcpu()->ci_cpl >= ipl, 950 ("%s: after %s: cpl (%d) < ipl %d", 951 __func__, sc->sc_vec_evcnts[vec].ev_name, 952 ipl, curcpu()->ci_cpl)); 953 sc->sc_vec_evcnts[vec].ev_count++; 954 } 955 } 956 957 #ifdef MULTIPROCESSOR 958 static int 959 rmixl_send_ipi(struct cpu_info *ci, int tag) 960 { 961 const cpuid_t cpuid = ci->ci_cpuid; 962 uint32_t core = (uint32_t)(cpuid >> 2); 963 uint32_t thread = (uint32_t)(cpuid & __BITS(1,0)); 964 uint64_t req = 1 << tag; 965 uint32_t r; 966 967 if (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) 968 return -1; 969 970 KASSERT((tag >= 0) && (tag < NIPIS)); 971 972 r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT) 973 | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT) 974 | (RMIXL_INTRVEC_IPI + tag); 975 976 mutex_enter(&rmixl_ipi_lock); 977 atomic_or_64(&ci->ci_request_ipis, req); 978 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r); 979 mutex_exit(&rmixl_ipi_lock); 980 981 return 0; 982 } 983 984 static int 985 rmixl_ipi_intr(void *arg) 986 { 987 struct cpu_info * const ci = curcpu(); 988 const uint64_t ipi_mask = 1 << (uintptr_t)arg; 989 990 KASSERT(ci->ci_cpl >= IPL_SCHED); 991 KASSERT((uintptr_t)arg < NIPIS); 992 993 /* if the request is clear, it was previously processed */ 994 if ((ci->ci_request_ipis & ipi_mask) == 0) 995 return 0; 996 997 atomic_or_64(&ci->ci_active_ipis, ipi_mask); 998 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask); 999 1000 ipi_process(ci, ipi_mask); 1001 1002 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask); 1003 1004 return 1; 1005 } 1006 #endif /* MULTIPROCESSOR */ 1007 1008 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 1009 int 1010 rmixl_intrhand_print_subr(int vec) 1011 { 1012 rmixl_intrhand_t *ih = &rmixl_intrhand[vec]; 1013 printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n", 1014 vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl, 1015 ih->ih_cpumask); 1016 return 0; 1017 } 1018 int 1019 rmixl_intrhand_print(void) 1020 { 1021 for (int vec=0; vec < NINTRVECS ; vec++) 1022 rmixl_intrhand_print_subr(vec); 1023 return 0; 1024 } 1025 1026 static inline void 1027 rmixl_irt_entry_print(u_int irt) 1028 { 1029 uint32_t c0, c1; 1030 1031 if ((irt < 0) || (irt > NIRTS)) 1032 return; 1033 c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)); 1034 c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)); 1035 printf("irt[%d]: %#x, %#x\n", irt, c0, c1); 1036 } 1037 1038 int 1039 rmixl_irt_print(void) 1040 { 1041 printf("%s:\n", __func__); 1042 for (int irt=0; irt < NIRTS ; irt++) 1043 rmixl_irt_entry_print(irt); 1044 return 0; 1045 } 1046 1047 void 1048 rmixl_ipl_eimr_map_print(void) 1049 { 1050 printf("IPL_NONE=%d, mask %#"PRIx64"\n", 1051 IPL_NONE, ipl_eimr_map[IPL_NONE]); 1052 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n", 1053 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]); 1054 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n", 1055 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]); 1056 printf("IPL_VM=%d, mask %#"PRIx64"\n", 1057 IPL_VM, ipl_eimr_map[IPL_VM]); 1058 printf("IPL_SCHED=%d, mask %#"PRIx64"\n", 1059 IPL_SCHED, ipl_eimr_map[IPL_SCHED]); 1060 printf("IPL_DDB=%d, mask %#"PRIx64"\n", 1061 IPL_DDB, ipl_eimr_map[IPL_DDB]); 1062 printf("IPL_HIGH=%d, mask %#"PRIx64"\n", 1063 IPL_HIGH, ipl_eimr_map[IPL_HIGH]); 1064 } 1065 1066 #endif 1067