1 /* $NetBSD: rmixl_intr.c,v 1.8 2011/09/27 01:02:34 jym Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /*- 34 * Copyright (c) 2001 The NetBSD Foundation, Inc. 35 * All rights reserved. 36 * 37 * This code is derived from software contributed to The NetBSD Foundation 38 * by Jason R. Thorpe. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.8 2011/09/27 01:02:34 jym Exp $"); 68 69 #include "opt_ddb.h" 70 #include "opt_multiprocessor.h" 71 #define __INTR_PRIVATE 72 73 #include <sys/param.h> 74 #include <sys/atomic.h> 75 #include <sys/bus.h> 76 #include <sys/cpu.h> 77 #include <sys/device.h> 78 #include <sys/intr.h> 79 #include <sys/kernel.h> 80 #include <sys/malloc.h> 81 #include <sys/mutex.h> 82 #include <sys/systm.h> 83 84 #include <mips/cpuset.h> 85 #include <mips/locore.h> 86 87 #include <mips/rmi/rmixlreg.h> 88 #include <mips/rmi/rmixlvar.h> 89 90 #include <mips/rmi/rmixl_cpuvar.h> 91 #include <mips/rmi/rmixl_intr.h> 92 93 #include <dev/pci/pcireg.h> 94 #include <dev/pci/pcivar.h> 95 96 //#define IOINTR_DEBUG 1 97 #ifdef IOINTR_DEBUG 98 int iointr_debug = IOINTR_DEBUG; 99 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0) 100 #else 101 # define DPRINTF(x) 102 #endif 103 104 #define RMIXL_PICREG_READ(off) \ 105 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off)) 106 #define RMIXL_PICREG_WRITE(off, val) \ 107 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val)) 108 109 /* 110 * do not clear these when acking EIRR 111 * (otherwise they get lost) 112 */ 113 #define RMIXL_EIRR_PRESERVE_MASK \ 114 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8) 115 116 /* 117 * IRT assignments depends on the RMI chip family 118 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx) 119 * use the right display string table for the CPU that's running. 120 */ 121 122 /* 123 * rmixl_irtnames_xlrxxx 124 * - use for XLRxxx 125 */ 126 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = { 127 "pic int 0 (watchdog)", /* 0 */ 128 "pic int 1 (timer0)", /* 1 */ 129 "pic int 2 (timer1)", /* 2 */ 130 "pic int 3 (timer2)", /* 3 */ 131 "pic int 4 (timer3)", /* 4 */ 132 "pic int 5 (timer4)", /* 5 */ 133 "pic int 6 (timer5)", /* 6 */ 134 "pic int 7 (timer6)", /* 7 */ 135 "pic int 8 (timer7)", /* 8 */ 136 "pic int 9 (uart0)", /* 9 */ 137 "pic int 10 (uart1)", /* 10 */ 138 "pic int 11 (i2c0)", /* 11 */ 139 "pic int 12 (i2c1)", /* 12 */ 140 "pic int 13 (pcmcia)", /* 13 */ 141 "pic int 14 (gpio)", /* 14 */ 142 "pic int 15 (hyper)", /* 15 */ 143 "pic int 16 (pcix)", /* 16 */ 144 "pic int 17 (gmac0)", /* 17 */ 145 "pic int 18 (gmac1)", /* 18 */ 146 "pic int 19 (gmac2)", /* 19 */ 147 "pic int 20 (gmac3)", /* 20 */ 148 "pic int 21 (xgs0)", /* 21 */ 149 "pic int 22 (xgs1)", /* 22 */ 150 "pic int 23 (irq23)", /* 23 */ 151 "pic int 24 (hyper_fatal)", /* 24 */ 152 "pic int 25 (bridge_aerr)", /* 25 */ 153 "pic int 26 (bridge_berr)", /* 26 */ 154 "pic int 27 (bridge_tb)", /* 27 */ 155 "pic int 28 (bridge_nmi)", /* 28 */ 156 "pic int 29 (bridge_sram_derr)",/* 29 */ 157 "pic int 30 (gpio_fatal)", /* 30 */ 158 "pic int 31 (reserved)", /* 31 */ 159 }; 160 161 /* 162 * rmixl_irtnames_xls2xx 163 * - use for XLS2xx 164 */ 165 static const char * const rmixl_irtnames_xls2xx[NIRTS] = { 166 "pic int 0 (watchdog)", /* 0 */ 167 "pic int 1 (timer0)", /* 1 */ 168 "pic int 2 (timer1)", /* 2 */ 169 "pic int 3 (timer2)", /* 3 */ 170 "pic int 4 (timer3)", /* 4 */ 171 "pic int 5 (timer4)", /* 5 */ 172 "pic int 6 (timer5)", /* 6 */ 173 "pic int 7 (timer6)", /* 7 */ 174 "pic int 8 (timer7)", /* 8 */ 175 "pic int 9 (uart0)", /* 9 */ 176 "pic int 10 (uart1)", /* 10 */ 177 "pic int 11 (i2c0)", /* 11 */ 178 "pic int 12 (i2c1)", /* 12 */ 179 "pic int 13 (pcmcia)", /* 13 */ 180 "pic int 14 (gpio_a)", /* 14 */ 181 "pic int 15 (irq15)", /* 15 */ 182 "pic int 16 (bridge_tb)", /* 16 */ 183 "pic int 17 (gmac0)", /* 17 */ 184 "pic int 18 (gmac1)", /* 18 */ 185 "pic int 19 (gmac2)", /* 19 */ 186 "pic int 20 (gmac3)", /* 20 */ 187 "pic int 21 (irq21)", /* 21 */ 188 "pic int 22 (irq22)", /* 22 */ 189 "pic int 23 (pcie_link2)", /* 23 */ 190 "pic int 24 (pcie_link3)", /* 24 */ 191 "pic int 25 (bridge_err)", /* 25 */ 192 "pic int 26 (pcie_link0)", /* 26 */ 193 "pic int 27 (pcie_link1)", /* 27 */ 194 "pic int 28 (irq28)", /* 28 */ 195 "pic int 29 (pcie_err)", /* 29 */ 196 "pic int 30 (gpio_b)", /* 30 */ 197 "pic int 31 (usb)", /* 31 */ 198 }; 199 200 /* 201 * rmixl_irtnames_xls1xx 202 * - use for XLS1xx, XLS4xx-Lite 203 */ 204 static const char * const rmixl_irtnames_xls1xx[NIRTS] = { 205 "pic int 0 (watchdog)", /* 0 */ 206 "pic int 1 (timer0)", /* 1 */ 207 "pic int 2 (timer1)", /* 2 */ 208 "pic int 3 (timer2)", /* 3 */ 209 "pic int 4 (timer3)", /* 4 */ 210 "pic int 5 (timer4)", /* 5 */ 211 "pic int 6 (timer5)", /* 6 */ 212 "pic int 7 (timer6)", /* 7 */ 213 "pic int 8 (timer7)", /* 8 */ 214 "pic int 9 (uart0)", /* 9 */ 215 "pic int 10 (uart1)", /* 10 */ 216 "pic int 11 (i2c0)", /* 11 */ 217 "pic int 12 (i2c1)", /* 12 */ 218 "pic int 13 (pcmcia)", /* 13 */ 219 "pic int 14 (gpio_a)", /* 14 */ 220 "pic int 15 (irq15)", /* 15 */ 221 "pic int 16 (bridge_tb)", /* 16 */ 222 "pic int 17 (gmac0)", /* 17 */ 223 "pic int 18 (gmac1)", /* 18 */ 224 "pic int 19 (gmac2)", /* 19 */ 225 "pic int 20 (gmac3)", /* 20 */ 226 "pic int 21 (irq21)", /* 21 */ 227 "pic int 22 (irq22)", /* 22 */ 228 "pic int 23 (irq23)", /* 23 */ 229 "pic int 24 (irq24)", /* 24 */ 230 "pic int 25 (bridge_err)", /* 25 */ 231 "pic int 26 (pcie_link0)", /* 26 */ 232 "pic int 27 (pcie_link1)", /* 27 */ 233 "pic int 28 (irq28)", /* 28 */ 234 "pic int 29 (pcie_err)", /* 29 */ 235 "pic int 30 (gpio_b)", /* 30 */ 236 "pic int 31 (usb)", /* 31 */ 237 }; 238 239 /* 240 * rmixl_irtnames_xls4xx: 241 * - use for XLS4xx, XLS6xx 242 */ 243 static const char * const rmixl_irtnames_xls4xx[NIRTS] = { 244 "pic int 0 (watchdog)", /* 0 */ 245 "pic int 1 (timer0)", /* 1 */ 246 "pic int 2 (timer1)", /* 2 */ 247 "pic int 3 (timer2)", /* 3 */ 248 "pic int 4 (timer3)", /* 4 */ 249 "pic int 5 (timer4)", /* 5 */ 250 "pic int 6 (timer5)", /* 6 */ 251 "pic int 7 (timer6)", /* 7 */ 252 "pic int 8 (timer7)", /* 8 */ 253 "pic int 9 (uart0)", /* 9 */ 254 "pic int 10 (uart1)", /* 10 */ 255 "pic int 11 (i2c0)", /* 11 */ 256 "pic int 12 (i2c1)", /* 12 */ 257 "pic int 13 (pcmcia)", /* 13 */ 258 "pic int 14 (gpio_a)", /* 14 */ 259 "pic int 15 (irq15)", /* 15 */ 260 "pic int 16 (bridge_tb)", /* 16 */ 261 "pic int 17 (gmac0)", /* 17 */ 262 "pic int 18 (gmac1)", /* 18 */ 263 "pic int 19 (gmac2)", /* 19 */ 264 "pic int 20 (gmac3)", /* 20 */ 265 "pic int 21 (irq21)", /* 21 */ 266 "pic int 22 (irq22)", /* 22 */ 267 "pic int 23 (irq23)", /* 23 */ 268 "pic int 24 (irq24)", /* 24 */ 269 "pic int 25 (bridge_err)", /* 25 */ 270 "pic int 26 (pcie_link0)", /* 26 */ 271 "pic int 27 (pcie_link1)", /* 27 */ 272 "pic int 28 (pcie_link2)", /* 28 */ 273 "pic int 29 (pcie_link3)", /* 29 */ 274 "pic int 30 (gpio_b)", /* 30 */ 275 "pic int 31 (usb)", /* 31 */ 276 }; 277 278 /* 279 * rmixl_vecnames_common: 280 * - use for unknown cpu implementation 281 * - covers all vectors, not just IRT intrs 282 */ 283 static const char * const rmixl_vecnames_common[NINTRVECS] = { 284 "vec 0", /* 0 */ 285 "vec 1", /* 1 */ 286 "vec 2", /* 2 */ 287 "vec 3", /* 3 */ 288 "vec 4", /* 4 */ 289 "vec 5", /* 5 */ 290 "vec 6", /* 6 */ 291 "vec 7", /* 7 */ 292 "vec 8 (ipi 0)", /* 8 */ 293 "vec 9 (ipi 1)", /* 9 */ 294 "vec 10 (ipi 2)", /* 10 */ 295 "vec 11 (ipi 3)", /* 11 */ 296 "vec 12 (ipi 4)", /* 12 */ 297 "vec 13 (ipi 5)", /* 13 */ 298 "vec 14 (ipi 6)", /* 14 */ 299 "vec 15 (fmn)", /* 15 */ 300 "vec 16", /* 16 */ 301 "vec 17", /* 17 */ 302 "vec 18", /* 18 */ 303 "vec 19", /* 19 */ 304 "vec 20", /* 20 */ 305 "vec 21", /* 21 */ 306 "vec 22", /* 22 */ 307 "vec 23", /* 23 */ 308 "vec 24", /* 24 */ 309 "vec 25", /* 25 */ 310 "vec 26", /* 26 */ 311 "vec 27", /* 27 */ 312 "vec 28", /* 28 */ 313 "vec 29", /* 29 */ 314 "vec 30", /* 30 */ 315 "vec 31", /* 31 */ 316 "vec 32", /* 32 */ 317 "vec 33", /* 33 */ 318 "vec 34", /* 34 */ 319 "vec 35", /* 35 */ 320 "vec 36", /* 36 */ 321 "vec 37", /* 37 */ 322 "vec 38", /* 38 */ 323 "vec 39", /* 39 */ 324 "vec 40", /* 40 */ 325 "vec 41", /* 41 */ 326 "vec 42", /* 42 */ 327 "vec 43", /* 43 */ 328 "vec 44", /* 44 */ 329 "vec 45", /* 45 */ 330 "vec 46", /* 46 */ 331 "vec 47", /* 47 */ 332 "vec 48", /* 48 */ 333 "vec 49", /* 49 */ 334 "vec 50", /* 50 */ 335 "vec 51", /* 51 */ 336 "vec 52", /* 52 */ 337 "vec 53", /* 53 */ 338 "vec 54", /* 54 */ 339 "vec 55", /* 55 */ 340 "vec 56", /* 56 */ 341 "vec 57", /* 57 */ 342 "vec 58", /* 58 */ 343 "vec 59", /* 59 */ 344 "vec 60", /* 60 */ 345 "vec 61", /* 61 */ 346 "vec 62", /* 63 */ 347 "vec 63", /* 63 */ 348 }; 349 350 /* 351 * mask of CPUs attached 352 * once they are attached, this var is read-only so mp safe 353 */ 354 static uint32_t cpu_present_mask; 355 356 kmutex_t rmixl_ipi_lock __cacheline_aligned; 357 /* covers RMIXL_PIC_IPIBASE */ 358 kmutex_t rmixl_intr_lock __cacheline_aligned; 359 /* covers rest of PIC, and rmixl_intrhand[] */ 360 rmixl_intrhand_t rmixl_intrhand[NINTRVECS]; 361 362 #ifdef DIAGNOSTIC 363 static int rmixl_pic_init_done; 364 #endif 365 366 367 static const char *rmixl_intr_string_xlr(int); 368 static const char *rmixl_intr_string_xls(int); 369 static uint32_t rmixl_irt_thread_mask(int); 370 static void rmixl_irt_init(int); 371 static void rmixl_irt_disestablish(int); 372 static void rmixl_irt_establish(int, int, int, 373 rmixl_intr_trigger_t, rmixl_intr_polarity_t); 374 375 #ifdef MULTIPROCESSOR 376 static int rmixl_send_ipi(struct cpu_info *, int); 377 static int rmixl_ipi_intr(void *); 378 #endif 379 380 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 381 int rmixl_intrhand_print_subr(int); 382 int rmixl_intrhand_print(void); 383 int rmixl_irt_print(void); 384 void rmixl_ipl_eimr_map_print(void); 385 #endif 386 387 388 static inline u_int 389 dclz(uint64_t val) 390 { 391 int nlz; 392 393 asm volatile("dclz %0, %1;" 394 : "=r"(nlz) : "r"(val)); 395 396 return nlz; 397 } 398 399 void 400 evbmips_intr_init(void) 401 { 402 uint32_t r; 403 404 KASSERT(cpu_rmixlr(mips_options.mips_cpu) 405 || cpu_rmixls(mips_options.mips_cpu)); 406 407 408 #ifdef DIAGNOSTIC 409 if (rmixl_pic_init_done != 0) 410 panic("%s: rmixl_pic_init_done %d", 411 __func__, rmixl_pic_init_done); 412 #endif 413 414 mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH); 415 mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 416 417 mutex_enter(&rmixl_intr_lock); 418 419 /* 420 * initialize (zero) all IRT Entries in the PIC 421 */ 422 for (u_int i = 0; i < NIRTS; i++) { 423 rmixl_irt_init(i); 424 } 425 426 /* 427 * disable watchdog NMI, timers 428 * 429 * XXX 430 * WATCHDOG_ENB is preserved because clearing it causes 431 * hang on the XLS616 (but not on the XLS408) 432 */ 433 r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL); 434 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB; 435 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r); 436 437 #ifdef DIAGNOSTIC 438 rmixl_pic_init_done = 1; 439 #endif 440 mutex_exit(&rmixl_intr_lock); 441 442 } 443 444 /* 445 * establish vector for mips3 count/compare clock interrupt 446 * this ensures we enable in EIRR, 447 * even though cpu_intr() handles the interrupt 448 * note the 'mpsafe' arg here is a placeholder only 449 */ 450 void 451 rmixl_intr_init_clk(void) 452 { 453 const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1; 454 455 mutex_enter(&rmixl_intr_lock); 456 457 void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false); 458 if (ih == NULL) 459 panic("%s: establish vec %d failed", __func__, vec); 460 461 mutex_exit(&rmixl_intr_lock); 462 } 463 464 #ifdef MULTIPROCESSOR 465 /* 466 * establish IPI interrupt and send function 467 */ 468 void 469 rmixl_intr_init_ipi(void) 470 { 471 mutex_enter(&rmixl_intr_lock); 472 473 for (u_int ipi = 0; ipi < NIPIS; ipi++) { 474 const u_int vec = RMIXL_INTRVEC_IPI + ipi; 475 void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED, 476 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true); 477 if (ih == NULL) 478 panic("%s: establish ipi %d at vec %d failed", 479 __func__, ipi, vec); 480 } 481 482 mips_locoresw.lsw_send_ipi = rmixl_send_ipi; 483 484 mutex_exit(&rmixl_intr_lock); 485 } 486 #endif /* MULTIPROCESSOR */ 487 488 /* 489 * initialize per-cpu interrupt stuff in softc 490 * accumulate per-cpu bits in 'cpu_present_mask' 491 */ 492 void 493 rmixl_intr_init_cpu(struct cpu_info *ci) 494 { 495 struct rmixl_cpu_softc *sc = (void *)ci->ci_softc; 496 497 KASSERT(sc != NULL); 498 499 for (int vec=0; vec < NINTRVECS; vec++) 500 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec], 501 EVCNT_TYPE_INTR, NULL, 502 device_xname(sc->sc_dev), 503 rmixl_intr_string(vec)); 504 505 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8)); 506 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci)); 507 } 508 509 /* 510 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt 511 */ 512 const char * 513 rmixl_intr_string(int vec) 514 { 515 int irt; 516 517 if (vec < 0 || vec >= NINTRVECS) 518 panic("%s: vec index %d out of range, max %d", 519 __func__, vec, NINTRVECS - 1); 520 521 if (! RMIXL_VECTOR_IS_IRT(vec)) 522 return rmixl_vecnames_common[vec]; 523 524 irt = RMIXL_VECTOR_IRT(vec); 525 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) { 526 case CIDFL_RMI_TYPE_XLR: 527 return rmixl_intr_string_xlr(irt); 528 case CIDFL_RMI_TYPE_XLS: 529 return rmixl_intr_string_xls(irt); 530 case CIDFL_RMI_TYPE_XLP: 531 panic("%s: RMI XLP not yet supported", __func__); 532 } 533 534 return "undefined"; /* appease gcc */ 535 } 536 537 static const char * 538 rmixl_intr_string_xlr(int irt) 539 { 540 return rmixl_irtnames_xlrxxx[irt]; 541 } 542 543 static const char * 544 rmixl_intr_string_xls(int irt) 545 { 546 const char *name; 547 548 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 549 case MIPS_XLS104: 550 case MIPS_XLS108: 551 case MIPS_XLS404LITE: 552 case MIPS_XLS408LITE: 553 name = rmixl_irtnames_xls1xx[irt]; 554 break; 555 case MIPS_XLS204: 556 case MIPS_XLS208: 557 name = rmixl_irtnames_xls2xx[irt]; 558 break; 559 case MIPS_XLS404: 560 case MIPS_XLS408: 561 case MIPS_XLS416: 562 case MIPS_XLS608: 563 case MIPS_XLS616: 564 name = rmixl_irtnames_xls4xx[irt]; 565 break; 566 default: 567 name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)]; 568 break; 569 } 570 571 return name; 572 } 573 574 /* 575 * rmixl_irt_thread_mask 576 * 577 * given a bitmask of cpus, return a, IRT thread mask 578 */ 579 static uint32_t 580 rmixl_irt_thread_mask(int cpumask) 581 { 582 uint32_t irtc0; 583 584 #if defined(MULTIPROCESSOR) 585 #ifndef NOTYET 586 if (cpumask == -1) 587 return 1; /* XXX TMP FIXME */ 588 #endif 589 590 /* 591 * discount cpus not present 592 */ 593 cpumask &= cpu_present_mask; 594 595 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 596 case MIPS_XLS104: 597 case MIPS_XLS204: 598 case MIPS_XLS404: 599 case MIPS_XLS404LITE: 600 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0)); 601 irtc0 &= (__BITS(5,4) | __BITS(1,0)); 602 break; 603 case MIPS_XLS108: 604 case MIPS_XLS208: 605 case MIPS_XLS408: 606 case MIPS_XLS408LITE: 607 case MIPS_XLS608: 608 irtc0 = cpumask & __BITS(7,0); 609 break; 610 case MIPS_XLS416: 611 case MIPS_XLS616: 612 irtc0 = cpumask & __BITS(15,0); 613 break; 614 default: 615 panic("%s: unknown cpu ID %#x\n", __func__, 616 mips_options.mips_cpu_id); 617 } 618 #else 619 irtc0 = 1; 620 #endif /* MULTIPROCESSOR */ 621 622 return irtc0; 623 } 624 625 /* 626 * rmixl_irt_init 627 * - initialize IRT Entry for given index 628 * - unmask Thread#0 in low word (assume we only have 1 thread) 629 */ 630 static void 631 rmixl_irt_init(int irt) 632 { 633 KASSERT(irt < NIRTS); 634 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */ 635 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */ 636 } 637 638 /* 639 * rmixl_irt_disestablish 640 * - invalidate IRT Entry for given index 641 */ 642 static void 643 rmixl_irt_disestablish(int irt) 644 { 645 KASSERT(mutex_owned(&rmixl_intr_lock)); 646 DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0)); 647 rmixl_irt_init(irt); 648 } 649 650 /* 651 * rmixl_irt_establish 652 * - construct an IRT Entry for irt and write to PIC 653 */ 654 static void 655 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger, 656 rmixl_intr_polarity_t polarity) 657 { 658 uint32_t irtc1; 659 uint32_t irtc0; 660 661 KASSERT(mutex_owned(&rmixl_intr_lock)); 662 663 if (irt >= NIRTS) 664 panic("%s: bad irt %d\n", __func__, irt); 665 666 if (! RMIXL_VECTOR_IS_IRT(vec)) 667 panic("%s: bad vec %d\n", __func__, vec); 668 669 switch (trigger) { 670 case RMIXL_TRIG_EDGE: 671 case RMIXL_TRIG_LEVEL: 672 break; 673 default: 674 panic("%s: bad trigger %d\n", __func__, trigger); 675 } 676 677 switch (polarity) { 678 case RMIXL_POLR_RISING: 679 case RMIXL_POLR_HIGH: 680 case RMIXL_POLR_FALLING: 681 case RMIXL_POLR_LOW: 682 break; 683 default: 684 panic("%s: bad polarity %d\n", __func__, polarity); 685 } 686 687 /* 688 * XXX IRT entries are not shared 689 */ 690 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0); 691 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0); 692 693 irtc0 = rmixl_irt_thread_mask(cpumask); 694 695 irtc1 = RMIXL_PIC_IRTENTRYC1_VALID; 696 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */ 697 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 698 699 if (trigger == RMIXL_TRIG_LEVEL) 700 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG; 701 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 702 703 if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW)) 704 irtc1 |= RMIXL_PIC_IRTENTRYC1_P; 705 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 706 707 irtc1 |= vec; /* vector in EIRR */ 708 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 709 710 /* 711 * write IRT Entry to PIC 712 */ 713 DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n", 714 __func__, vec, vec, irt, irtc0, irtc1)); 715 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */ 716 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */ 717 } 718 719 void * 720 rmixl_vec_establish(int vec, int cpumask, int ipl, 721 int (*func)(void *), void *arg, bool mpsafe) 722 { 723 rmixl_intrhand_t *ih; 724 uint64_t eimr_bit; 725 int s; 726 727 KASSERT(mutex_owned(&rmixl_intr_lock)); 728 729 DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n", 730 __func__, vec, cpumask, ipl, func, arg, mpsafe)); 731 #ifdef DIAGNOSTIC 732 if (rmixl_pic_init_done == 0) 733 panic("%s: called before evbmips_intr_init", __func__); 734 #endif 735 736 /* 737 * check args 738 */ 739 if (vec < 0 || vec >= NINTRVECS) 740 panic("%s: vec %d out of range, max %d", 741 __func__, vec, NINTRVECS - 1); 742 if (ipl <= 0 || ipl >= _IPL_N) 743 panic("%s: ipl %d out of range, min %d, max %d", 744 __func__, ipl, 1, _IPL_N - 1); 745 746 s = splhigh(); 747 748 ih = &rmixl_intrhand[vec]; 749 if (ih->ih_func != NULL) { 750 #ifdef DIAGNOSTIC 751 printf("%s: intrhand[%d] busy\n", __func__, vec); 752 #endif 753 splx(s); 754 return NULL; 755 } 756 757 ih->ih_arg = arg; 758 ih->ih_mpsafe = mpsafe; 759 ih->ih_vec = vec; 760 ih->ih_ipl = ipl; 761 ih->ih_cpumask = cpumask; 762 763 eimr_bit = (uint64_t)1 << vec; 764 for (int i=ih->ih_ipl; --i >= 0; ) { 765 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0); 766 ipl_eimr_map[i] |= eimr_bit; 767 } 768 769 ih->ih_func = func; /* do this last */ 770 771 splx(s); 772 773 return ih; 774 } 775 776 /* 777 * rmixl_intr_establish 778 * - used to establish an IRT-based interrupt only 779 */ 780 void * 781 rmixl_intr_establish(int irt, int cpumask, int ipl, 782 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity, 783 int (*func)(void *), void *arg, bool mpsafe) 784 { 785 rmixl_intrhand_t *ih; 786 int vec; 787 788 #ifdef DIAGNOSTIC 789 if (rmixl_pic_init_done == 0) 790 panic("%s: called before rmixl_pic_init_done", __func__); 791 #endif 792 793 /* 794 * check args 795 */ 796 if (irt < 0 || irt >= NIRTS) 797 panic("%s: irt %d out of range, max %d", 798 __func__, irt, NIRTS - 1); 799 if (ipl <= 0 || ipl >= _IPL_N) 800 panic("%s: ipl %d out of range, min %d, max %d", 801 __func__, ipl, 1, _IPL_N - 1); 802 803 vec = RMIXL_IRT_VECTOR(irt); 804 805 DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl)); 806 807 mutex_enter(&rmixl_intr_lock); 808 809 /* 810 * establish vector 811 */ 812 ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe); 813 814 /* 815 * establish IRT Entry 816 */ 817 rmixl_irt_establish(irt, vec, cpumask, trigger, polarity); 818 819 mutex_exit(&rmixl_intr_lock); 820 821 return ih; 822 } 823 824 void 825 rmixl_vec_disestablish(void *cookie) 826 { 827 rmixl_intrhand_t *ih = cookie; 828 uint64_t eimr_bit; 829 830 KASSERT(mutex_owned(&rmixl_intr_lock)); 831 KASSERT(ih->ih_vec < NINTRVECS); 832 KASSERT(ih == &rmixl_intrhand[ih->ih_vec]); 833 834 ih->ih_func = NULL; /* do this first */ 835 836 eimr_bit = (uint64_t)1 << ih->ih_vec; 837 for (int i=ih->ih_ipl; --i >= 0; ) { 838 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0); 839 ipl_eimr_map[i] ^= eimr_bit; 840 } 841 } 842 843 void 844 rmixl_intr_disestablish(void *cookie) 845 { 846 rmixl_intrhand_t *ih = cookie; 847 const int vec = ih->ih_vec; 848 849 KASSERT(vec < NINTRVECS); 850 KASSERT(ih == &rmixl_intrhand[vec]); 851 852 mutex_enter(&rmixl_intr_lock); 853 854 /* 855 * disable/invalidate the IRT Entry if needed 856 */ 857 if (RMIXL_VECTOR_IS_IRT(vec)) 858 rmixl_irt_disestablish(vec); 859 860 /* 861 * disasociate from vector and free the handle 862 */ 863 rmixl_vec_disestablish(cookie); 864 865 mutex_exit(&rmixl_intr_lock); 866 } 867 868 void 869 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending) 870 { 871 struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc; 872 873 DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n", 874 __func__, cpu_number(), ipl, pc, pending)); 875 876 /* 877 * 'pending' arg is a summary that there is something to do 878 * the real pending status is obtained from EIRR 879 */ 880 KASSERT(pending == MIPS_INT_MASK_1); 881 882 for (;;) { 883 rmixl_intrhand_t *ih; 884 uint64_t eirr; 885 uint64_t eimr; 886 uint64_t vecbit; 887 int vec; 888 889 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr)); 890 asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); 891 892 #ifdef IOINTR_DEBUG 893 printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n", 894 __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]); 895 #endif /* IOINTR_DEBUG */ 896 897 /* 898 * reduce eirr to 899 * - ints that are enabled at or below this ipl 900 * - exclude count/compare clock and soft ints 901 * they are handled elsewhere 902 */ 903 eirr &= ipl_eimr_map[ipl-1]; 904 eirr &= ~ipl_eimr_map[ipl]; 905 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8); 906 if (eirr == 0) 907 break; 908 909 vec = 63 - dclz(eirr); 910 ih = &rmixl_intrhand[vec]; 911 vecbit = 1ULL << vec; 912 KASSERT (ih->ih_ipl == ipl); 913 KASSERT ((vecbit & eimr) == 0); 914 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0); 915 916 /* 917 * ack in EIRR, and in PIC if needed, 918 * the irq we are about to handle 919 */ 920 rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK); 921 if (RMIXL_VECTOR_IS_IRT(vec)) 922 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK, 923 1 << RMIXL_VECTOR_IRT(vec)); 924 925 if (ih->ih_func != NULL) { 926 #ifdef MULTIPROCESSOR 927 if (ih->ih_mpsafe) { 928 (void)(*ih->ih_func)(ih->ih_arg); 929 } else { 930 KASSERTMSG(ipl == IPL_VM, 931 "%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK", 932 __func__, sc->sc_vec_evcnts[vec].ev_name, 933 ipl); 934 KERNEL_LOCK(1, NULL); 935 (void)(*ih->ih_func)(ih->ih_arg); 936 KERNEL_UNLOCK_ONE(NULL); 937 } 938 #else 939 (void)(*ih->ih_func)(ih->ih_arg); 940 #endif /* MULTIPROCESSOR */ 941 } 942 KASSERT(ipl == ih->ih_ipl); 943 KASSERTMSG(curcpu()->ci_cpl >= ipl, 944 "%s: after %s: cpl (%d) < ipl %d", 945 __func__, sc->sc_vec_evcnts[vec].ev_name, 946 ipl, curcpu()->ci_cpl); 947 sc->sc_vec_evcnts[vec].ev_count++; 948 } 949 } 950 951 #ifdef MULTIPROCESSOR 952 static int 953 rmixl_send_ipi(struct cpu_info *ci, int tag) 954 { 955 const cpuid_t cpuid = ci->ci_cpuid; 956 uint32_t core = (uint32_t)(cpuid >> 2); 957 uint32_t thread = (uint32_t)(cpuid & __BITS(1,0)); 958 uint64_t req = 1 << tag; 959 uint32_t r; 960 961 if (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) 962 return -1; 963 964 KASSERT((tag >= 0) && (tag < NIPIS)); 965 966 r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT) 967 | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT) 968 | (RMIXL_INTRVEC_IPI + tag); 969 970 mutex_enter(&rmixl_ipi_lock); 971 atomic_or_64(&ci->ci_request_ipis, req); 972 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r); 973 mutex_exit(&rmixl_ipi_lock); 974 975 return 0; 976 } 977 978 static int 979 rmixl_ipi_intr(void *arg) 980 { 981 struct cpu_info * const ci = curcpu(); 982 const uint64_t ipi_mask = 1 << (uintptr_t)arg; 983 984 KASSERT(ci->ci_cpl >= IPL_SCHED); 985 KASSERT((uintptr_t)arg < NIPIS); 986 987 /* if the request is clear, it was previously processed */ 988 if ((ci->ci_request_ipis & ipi_mask) == 0) 989 return 0; 990 991 atomic_or_64(&ci->ci_active_ipis, ipi_mask); 992 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask); 993 994 ipi_process(ci, ipi_mask); 995 996 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask); 997 998 return 1; 999 } 1000 #endif /* MULTIPROCESSOR */ 1001 1002 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 1003 int 1004 rmixl_intrhand_print_subr(int vec) 1005 { 1006 rmixl_intrhand_t *ih = &rmixl_intrhand[vec]; 1007 printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n", 1008 vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl, 1009 ih->ih_cpumask); 1010 return 0; 1011 } 1012 int 1013 rmixl_intrhand_print(void) 1014 { 1015 for (int vec=0; vec < NINTRVECS ; vec++) 1016 rmixl_intrhand_print_subr(vec); 1017 return 0; 1018 } 1019 1020 static inline void 1021 rmixl_irt_entry_print(u_int irt) 1022 { 1023 uint32_t c0, c1; 1024 1025 if ((irt < 0) || (irt > NIRTS)) 1026 return; 1027 c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)); 1028 c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)); 1029 printf("irt[%d]: %#x, %#x\n", irt, c0, c1); 1030 } 1031 1032 int 1033 rmixl_irt_print(void) 1034 { 1035 printf("%s:\n", __func__); 1036 for (int irt=0; irt < NIRTS ; irt++) 1037 rmixl_irt_entry_print(irt); 1038 return 0; 1039 } 1040 1041 void 1042 rmixl_ipl_eimr_map_print(void) 1043 { 1044 printf("IPL_NONE=%d, mask %#"PRIx64"\n", 1045 IPL_NONE, ipl_eimr_map[IPL_NONE]); 1046 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n", 1047 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]); 1048 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n", 1049 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]); 1050 printf("IPL_VM=%d, mask %#"PRIx64"\n", 1051 IPL_VM, ipl_eimr_map[IPL_VM]); 1052 printf("IPL_SCHED=%d, mask %#"PRIx64"\n", 1053 IPL_SCHED, ipl_eimr_map[IPL_SCHED]); 1054 printf("IPL_DDB=%d, mask %#"PRIx64"\n", 1055 IPL_DDB, ipl_eimr_map[IPL_DDB]); 1056 printf("IPL_HIGH=%d, mask %#"PRIx64"\n", 1057 IPL_HIGH, ipl_eimr_map[IPL_HIGH]); 1058 } 1059 1060 #endif 1061