1 /* $NetBSD: rmixl_intr.c,v 1.12 2016/08/26 15:45:48 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /*- 34 * Copyright (c) 2001 The NetBSD Foundation, Inc. 35 * All rights reserved. 36 * 37 * This code is derived from software contributed to The NetBSD Foundation 38 * by Jason R. Thorpe. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Platform-specific interrupt support for the RMI XLP, XLR, XLS 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.12 2016/08/26 15:45:48 skrll Exp $"); 68 69 #include "opt_ddb.h" 70 #include "opt_multiprocessor.h" 71 #define __INTR_PRIVATE 72 73 #include <sys/param.h> 74 #include <sys/atomic.h> 75 #include <sys/bus.h> 76 #include <sys/cpu.h> 77 #include <sys/device.h> 78 #include <sys/intr.h> 79 #include <sys/kernel.h> 80 #include <sys/malloc.h> 81 #include <sys/mutex.h> 82 #include <sys/systm.h> 83 84 #include <mips/locore.h> 85 86 #include <mips/rmi/rmixlreg.h> 87 #include <mips/rmi/rmixlvar.h> 88 89 #include <mips/rmi/rmixl_cpuvar.h> 90 #include <mips/rmi/rmixl_intr.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 //#define IOINTR_DEBUG 1 96 #ifdef IOINTR_DEBUG 97 int iointr_debug = IOINTR_DEBUG; 98 # define DPRINTF(x) do { if (iointr_debug) printf x ; } while(0) 99 #else 100 # define DPRINTF(x) 101 #endif 102 103 #define RMIXL_PICREG_READ(off) \ 104 RMIXL_IOREG_READ(RMIXL_IO_DEV_PIC + (off)) 105 #define RMIXL_PICREG_WRITE(off, val) \ 106 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PIC + (off), (val)) 107 108 /* 109 * do not clear these when acking EIRR 110 * (otherwise they get lost) 111 */ 112 #define RMIXL_EIRR_PRESERVE_MASK \ 113 ((MIPS_INT_MASK_5|MIPS_SOFT_INT_MASK) >> 8) 114 115 /* 116 * IRT assignments depends on the RMI chip family 117 * (XLS1xx vs. XLS2xx vs. XLS3xx vs. XLS6xx) 118 * use the right display string table for the CPU that's running. 119 */ 120 121 /* 122 * rmixl_irtnames_xlrxxx 123 * - use for XLRxxx 124 */ 125 static const char * const rmixl_irtnames_xlrxxx[NIRTS] = { 126 "pic int 0 (watchdog)", /* 0 */ 127 "pic int 1 (timer0)", /* 1 */ 128 "pic int 2 (timer1)", /* 2 */ 129 "pic int 3 (timer2)", /* 3 */ 130 "pic int 4 (timer3)", /* 4 */ 131 "pic int 5 (timer4)", /* 5 */ 132 "pic int 6 (timer5)", /* 6 */ 133 "pic int 7 (timer6)", /* 7 */ 134 "pic int 8 (timer7)", /* 8 */ 135 "pic int 9 (uart0)", /* 9 */ 136 "pic int 10 (uart1)", /* 10 */ 137 "pic int 11 (i2c0)", /* 11 */ 138 "pic int 12 (i2c1)", /* 12 */ 139 "pic int 13 (pcmcia)", /* 13 */ 140 "pic int 14 (gpio)", /* 14 */ 141 "pic int 15 (hyper)", /* 15 */ 142 "pic int 16 (pcix)", /* 16 */ 143 "pic int 17 (gmac0)", /* 17 */ 144 "pic int 18 (gmac1)", /* 18 */ 145 "pic int 19 (gmac2)", /* 19 */ 146 "pic int 20 (gmac3)", /* 20 */ 147 "pic int 21 (xgs0)", /* 21 */ 148 "pic int 22 (xgs1)", /* 22 */ 149 "pic int 23 (irq23)", /* 23 */ 150 "pic int 24 (hyper_fatal)", /* 24 */ 151 "pic int 25 (bridge_aerr)", /* 25 */ 152 "pic int 26 (bridge_berr)", /* 26 */ 153 "pic int 27 (bridge_tb)", /* 27 */ 154 "pic int 28 (bridge_nmi)", /* 28 */ 155 "pic int 29 (bridge_sram_derr)",/* 29 */ 156 "pic int 30 (gpio_fatal)", /* 30 */ 157 "pic int 31 (reserved)", /* 31 */ 158 }; 159 160 /* 161 * rmixl_irtnames_xls2xx 162 * - use for XLS2xx 163 */ 164 static const char * const rmixl_irtnames_xls2xx[NIRTS] = { 165 "pic int 0 (watchdog)", /* 0 */ 166 "pic int 1 (timer0)", /* 1 */ 167 "pic int 2 (timer1)", /* 2 */ 168 "pic int 3 (timer2)", /* 3 */ 169 "pic int 4 (timer3)", /* 4 */ 170 "pic int 5 (timer4)", /* 5 */ 171 "pic int 6 (timer5)", /* 6 */ 172 "pic int 7 (timer6)", /* 7 */ 173 "pic int 8 (timer7)", /* 8 */ 174 "pic int 9 (uart0)", /* 9 */ 175 "pic int 10 (uart1)", /* 10 */ 176 "pic int 11 (i2c0)", /* 11 */ 177 "pic int 12 (i2c1)", /* 12 */ 178 "pic int 13 (pcmcia)", /* 13 */ 179 "pic int 14 (gpio_a)", /* 14 */ 180 "pic int 15 (irq15)", /* 15 */ 181 "pic int 16 (bridge_tb)", /* 16 */ 182 "pic int 17 (gmac0)", /* 17 */ 183 "pic int 18 (gmac1)", /* 18 */ 184 "pic int 19 (gmac2)", /* 19 */ 185 "pic int 20 (gmac3)", /* 20 */ 186 "pic int 21 (irq21)", /* 21 */ 187 "pic int 22 (irq22)", /* 22 */ 188 "pic int 23 (pcie_link2)", /* 23 */ 189 "pic int 24 (pcie_link3)", /* 24 */ 190 "pic int 25 (bridge_err)", /* 25 */ 191 "pic int 26 (pcie_link0)", /* 26 */ 192 "pic int 27 (pcie_link1)", /* 27 */ 193 "pic int 28 (irq28)", /* 28 */ 194 "pic int 29 (pcie_err)", /* 29 */ 195 "pic int 30 (gpio_b)", /* 30 */ 196 "pic int 31 (usb)", /* 31 */ 197 }; 198 199 /* 200 * rmixl_irtnames_xls1xx 201 * - use for XLS1xx, XLS4xx-Lite 202 */ 203 static const char * const rmixl_irtnames_xls1xx[NIRTS] = { 204 "pic int 0 (watchdog)", /* 0 */ 205 "pic int 1 (timer0)", /* 1 */ 206 "pic int 2 (timer1)", /* 2 */ 207 "pic int 3 (timer2)", /* 3 */ 208 "pic int 4 (timer3)", /* 4 */ 209 "pic int 5 (timer4)", /* 5 */ 210 "pic int 6 (timer5)", /* 6 */ 211 "pic int 7 (timer6)", /* 7 */ 212 "pic int 8 (timer7)", /* 8 */ 213 "pic int 9 (uart0)", /* 9 */ 214 "pic int 10 (uart1)", /* 10 */ 215 "pic int 11 (i2c0)", /* 11 */ 216 "pic int 12 (i2c1)", /* 12 */ 217 "pic int 13 (pcmcia)", /* 13 */ 218 "pic int 14 (gpio_a)", /* 14 */ 219 "pic int 15 (irq15)", /* 15 */ 220 "pic int 16 (bridge_tb)", /* 16 */ 221 "pic int 17 (gmac0)", /* 17 */ 222 "pic int 18 (gmac1)", /* 18 */ 223 "pic int 19 (gmac2)", /* 19 */ 224 "pic int 20 (gmac3)", /* 20 */ 225 "pic int 21 (irq21)", /* 21 */ 226 "pic int 22 (irq22)", /* 22 */ 227 "pic int 23 (irq23)", /* 23 */ 228 "pic int 24 (irq24)", /* 24 */ 229 "pic int 25 (bridge_err)", /* 25 */ 230 "pic int 26 (pcie_link0)", /* 26 */ 231 "pic int 27 (pcie_link1)", /* 27 */ 232 "pic int 28 (irq28)", /* 28 */ 233 "pic int 29 (pcie_err)", /* 29 */ 234 "pic int 30 (gpio_b)", /* 30 */ 235 "pic int 31 (usb)", /* 31 */ 236 }; 237 238 /* 239 * rmixl_irtnames_xls4xx: 240 * - use for XLS4xx, XLS6xx 241 */ 242 static const char * const rmixl_irtnames_xls4xx[NIRTS] = { 243 "pic int 0 (watchdog)", /* 0 */ 244 "pic int 1 (timer0)", /* 1 */ 245 "pic int 2 (timer1)", /* 2 */ 246 "pic int 3 (timer2)", /* 3 */ 247 "pic int 4 (timer3)", /* 4 */ 248 "pic int 5 (timer4)", /* 5 */ 249 "pic int 6 (timer5)", /* 6 */ 250 "pic int 7 (timer6)", /* 7 */ 251 "pic int 8 (timer7)", /* 8 */ 252 "pic int 9 (uart0)", /* 9 */ 253 "pic int 10 (uart1)", /* 10 */ 254 "pic int 11 (i2c0)", /* 11 */ 255 "pic int 12 (i2c1)", /* 12 */ 256 "pic int 13 (pcmcia)", /* 13 */ 257 "pic int 14 (gpio_a)", /* 14 */ 258 "pic int 15 (irq15)", /* 15 */ 259 "pic int 16 (bridge_tb)", /* 16 */ 260 "pic int 17 (gmac0)", /* 17 */ 261 "pic int 18 (gmac1)", /* 18 */ 262 "pic int 19 (gmac2)", /* 19 */ 263 "pic int 20 (gmac3)", /* 20 */ 264 "pic int 21 (irq21)", /* 21 */ 265 "pic int 22 (irq22)", /* 22 */ 266 "pic int 23 (irq23)", /* 23 */ 267 "pic int 24 (irq24)", /* 24 */ 268 "pic int 25 (bridge_err)", /* 25 */ 269 "pic int 26 (pcie_link0)", /* 26 */ 270 "pic int 27 (pcie_link1)", /* 27 */ 271 "pic int 28 (pcie_link2)", /* 28 */ 272 "pic int 29 (pcie_link3)", /* 29 */ 273 "pic int 30 (gpio_b)", /* 30 */ 274 "pic int 31 (usb)", /* 31 */ 275 }; 276 277 /* 278 * rmixl_vecnames_common: 279 * - use for unknown cpu implementation 280 * - covers all vectors, not just IRT intrs 281 */ 282 static const char * const rmixl_vecnames_common[NINTRVECS] = { 283 "vec 0", /* 0 */ 284 "vec 1", /* 1 */ 285 "vec 2", /* 2 */ 286 "vec 3", /* 3 */ 287 "vec 4", /* 4 */ 288 "vec 5", /* 5 */ 289 "vec 6", /* 6 */ 290 "vec 7", /* 7 */ 291 "vec 8 (ipi 0)", /* 8 */ 292 "vec 9 (ipi 1)", /* 9 */ 293 "vec 10 (ipi 2)", /* 10 */ 294 "vec 11 (ipi 3)", /* 11 */ 295 "vec 12 (ipi 4)", /* 12 */ 296 "vec 13 (ipi 5)", /* 13 */ 297 "vec 14 (ipi 6)", /* 14 */ 298 "vec 15 (fmn)", /* 15 */ 299 "vec 16", /* 16 */ 300 "vec 17", /* 17 */ 301 "vec 18", /* 18 */ 302 "vec 19", /* 19 */ 303 "vec 20", /* 20 */ 304 "vec 21", /* 21 */ 305 "vec 22", /* 22 */ 306 "vec 23", /* 23 */ 307 "vec 24", /* 24 */ 308 "vec 25", /* 25 */ 309 "vec 26", /* 26 */ 310 "vec 27", /* 27 */ 311 "vec 28", /* 28 */ 312 "vec 29", /* 29 */ 313 "vec 30", /* 30 */ 314 "vec 31", /* 31 */ 315 "vec 32", /* 32 */ 316 "vec 33", /* 33 */ 317 "vec 34", /* 34 */ 318 "vec 35", /* 35 */ 319 "vec 36", /* 36 */ 320 "vec 37", /* 37 */ 321 "vec 38", /* 38 */ 322 "vec 39", /* 39 */ 323 "vec 40", /* 40 */ 324 "vec 41", /* 41 */ 325 "vec 42", /* 42 */ 326 "vec 43", /* 43 */ 327 "vec 44", /* 44 */ 328 "vec 45", /* 45 */ 329 "vec 46", /* 46 */ 330 "vec 47", /* 47 */ 331 "vec 48", /* 48 */ 332 "vec 49", /* 49 */ 333 "vec 50", /* 50 */ 334 "vec 51", /* 51 */ 335 "vec 52", /* 52 */ 336 "vec 53", /* 53 */ 337 "vec 54", /* 54 */ 338 "vec 55", /* 55 */ 339 "vec 56", /* 56 */ 340 "vec 57", /* 57 */ 341 "vec 58", /* 58 */ 342 "vec 59", /* 59 */ 343 "vec 60", /* 60 */ 344 "vec 61", /* 61 */ 345 "vec 62", /* 63 */ 346 "vec 63", /* 63 */ 347 }; 348 349 /* 350 * mask of CPUs attached 351 * once they are attached, this var is read-only so mp safe 352 */ 353 static uint32_t cpu_present_mask; 354 355 kmutex_t rmixl_ipi_lock __cacheline_aligned; 356 /* covers RMIXL_PIC_IPIBASE */ 357 kmutex_t rmixl_intr_lock __cacheline_aligned; 358 /* covers rest of PIC, and rmixl_intrhand[] */ 359 rmixl_intrhand_t rmixl_intrhand[NINTRVECS]; 360 361 #ifdef DIAGNOSTIC 362 static int rmixl_pic_init_done; 363 #endif 364 365 366 static const char *rmixl_intr_string_xlr(int); 367 static const char *rmixl_intr_string_xls(int); 368 static uint32_t rmixl_irt_thread_mask(int); 369 static void rmixl_irt_init(int); 370 static void rmixl_irt_disestablish(int); 371 static void rmixl_irt_establish(int, int, int, 372 rmixl_intr_trigger_t, rmixl_intr_polarity_t); 373 374 #ifdef MULTIPROCESSOR 375 static int rmixl_send_ipi(struct cpu_info *, int); 376 static int rmixl_ipi_intr(void *); 377 #endif 378 379 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 380 int rmixl_intrhand_print_subr(int); 381 int rmixl_intrhand_print(void); 382 int rmixl_irt_print(void); 383 void rmixl_ipl_eimr_map_print(void); 384 #endif 385 386 387 static inline u_int 388 dclz(uint64_t val) 389 { 390 int nlz; 391 392 asm volatile("dclz %0, %1;" 393 : "=r"(nlz) : "r"(val)); 394 395 return nlz; 396 } 397 398 void 399 evbmips_intr_init(void) 400 { 401 uint32_t r; 402 403 KASSERT(cpu_rmixlr(mips_options.mips_cpu) 404 || cpu_rmixls(mips_options.mips_cpu)); 405 406 407 #ifdef DIAGNOSTIC 408 if (rmixl_pic_init_done != 0) 409 panic("%s: rmixl_pic_init_done %d", 410 __func__, rmixl_pic_init_done); 411 #endif 412 413 mutex_init(&rmixl_ipi_lock, MUTEX_DEFAULT, IPL_HIGH); 414 mutex_init(&rmixl_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 415 416 mutex_enter(&rmixl_intr_lock); 417 418 /* 419 * initialize (zero) all IRT Entries in the PIC 420 */ 421 for (u_int i = 0; i < NIRTS; i++) { 422 rmixl_irt_init(i); 423 } 424 425 /* 426 * disable watchdog NMI, timers 427 * 428 * XXX 429 * WATCHDOG_ENB is preserved because clearing it causes 430 * hang on the XLS616 (but not on the XLS408) 431 */ 432 r = RMIXL_PICREG_READ(RMIXL_PIC_CONTROL); 433 r &= RMIXL_PIC_CONTROL_RESV|RMIXL_PIC_CONTROL_WATCHDOG_ENB; 434 RMIXL_PICREG_WRITE(RMIXL_PIC_CONTROL, r); 435 436 #ifdef DIAGNOSTIC 437 rmixl_pic_init_done = 1; 438 #endif 439 mutex_exit(&rmixl_intr_lock); 440 441 } 442 443 /* 444 * establish vector for mips3 count/compare clock interrupt 445 * this ensures we enable in EIRR, 446 * even though cpu_intr() handles the interrupt 447 * note the 'mpsafe' arg here is a placeholder only 448 */ 449 void 450 rmixl_intr_init_clk(void) 451 { 452 const int vec = ffs(MIPS_INT_MASK_5 >> MIPS_INT_MASK_SHIFT) - 1; 453 454 mutex_enter(&rmixl_intr_lock); 455 456 void *ih = rmixl_vec_establish(vec, 0, IPL_SCHED, NULL, NULL, false); 457 if (ih == NULL) 458 panic("%s: establish vec %d failed", __func__, vec); 459 460 mutex_exit(&rmixl_intr_lock); 461 } 462 463 #ifdef MULTIPROCESSOR 464 /* 465 * establish IPI interrupt and send function 466 */ 467 void 468 rmixl_intr_init_ipi(void) 469 { 470 mutex_enter(&rmixl_intr_lock); 471 472 for (u_int ipi = 0; ipi < NIPIS; ipi++) { 473 const u_int vec = RMIXL_INTRVEC_IPI + ipi; 474 void * const ih = rmixl_vec_establish(vec, -1, IPL_SCHED, 475 rmixl_ipi_intr, (void *)(uintptr_t)ipi, true); 476 if (ih == NULL) 477 panic("%s: establish ipi %d at vec %d failed", 478 __func__, ipi, vec); 479 } 480 481 mips_locoresw.lsw_send_ipi = rmixl_send_ipi; 482 483 mutex_exit(&rmixl_intr_lock); 484 } 485 #endif /* MULTIPROCESSOR */ 486 487 /* 488 * initialize per-cpu interrupt stuff in softc 489 * accumulate per-cpu bits in 'cpu_present_mask' 490 */ 491 void 492 rmixl_intr_init_cpu(struct cpu_info *ci) 493 { 494 struct rmixl_cpu_softc *sc = (void *)ci->ci_softc; 495 496 KASSERT(sc != NULL); 497 498 for (int vec=0; vec < NINTRVECS; vec++) 499 evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec], 500 EVCNT_TYPE_INTR, NULL, 501 device_xname(sc->sc_dev), 502 rmixl_intr_string(vec)); 503 504 KASSERT(cpu_index(ci) < (sizeof(cpu_present_mask) * 8)); 505 atomic_or_32((volatile uint32_t *)&cpu_present_mask, 1 << cpu_index(ci)); 506 } 507 508 /* 509 * rmixl_intr_string - return pointer to display name of a PIC-based interrupt 510 */ 511 const char * 512 rmixl_intr_string(int vec) 513 { 514 int irt; 515 516 if (vec < 0 || vec >= NINTRVECS) 517 panic("%s: vec index %d out of range, max %d", 518 __func__, vec, NINTRVECS - 1); 519 520 if (! RMIXL_VECTOR_IS_IRT(vec)) 521 return rmixl_vecnames_common[vec]; 522 523 irt = RMIXL_VECTOR_IRT(vec); 524 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) { 525 case CIDFL_RMI_TYPE_XLR: 526 return rmixl_intr_string_xlr(irt); 527 case CIDFL_RMI_TYPE_XLS: 528 return rmixl_intr_string_xls(irt); 529 case CIDFL_RMI_TYPE_XLP: 530 panic("%s: RMI XLP not yet supported", __func__); 531 } 532 533 return "undefined"; /* appease gcc */ 534 } 535 536 static const char * 537 rmixl_intr_string_xlr(int irt) 538 { 539 return rmixl_irtnames_xlrxxx[irt]; 540 } 541 542 static const char * 543 rmixl_intr_string_xls(int irt) 544 { 545 const char *name; 546 547 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 548 case MIPS_XLS104: 549 case MIPS_XLS108: 550 case MIPS_XLS404LITE: 551 case MIPS_XLS408LITE: 552 name = rmixl_irtnames_xls1xx[irt]; 553 break; 554 case MIPS_XLS204: 555 case MIPS_XLS208: 556 name = rmixl_irtnames_xls2xx[irt]; 557 break; 558 case MIPS_XLS404: 559 case MIPS_XLS408: 560 case MIPS_XLS416: 561 case MIPS_XLS608: 562 case MIPS_XLS616: 563 name = rmixl_irtnames_xls4xx[irt]; 564 break; 565 default: 566 name = rmixl_vecnames_common[RMIXL_IRT_VECTOR(irt)]; 567 break; 568 } 569 570 return name; 571 } 572 573 /* 574 * rmixl_irt_thread_mask 575 * 576 * given a bitmask of cpus, return a, IRT thread mask 577 */ 578 static uint32_t 579 rmixl_irt_thread_mask(int cpumask) 580 { 581 uint32_t irtc0; 582 583 #if defined(MULTIPROCESSOR) 584 #ifndef NOTYET 585 if (cpumask == -1) 586 return 1; /* XXX TMP FIXME */ 587 #endif 588 589 /* 590 * discount cpus not present 591 */ 592 cpumask &= cpu_present_mask; 593 594 switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) { 595 case MIPS_XLS104: 596 case MIPS_XLS204: 597 case MIPS_XLS404: 598 case MIPS_XLS404LITE: 599 irtc0 = ((cpumask >> 2) << 4) | (cpumask & __BITS(1,0)); 600 irtc0 &= (__BITS(5,4) | __BITS(1,0)); 601 break; 602 case MIPS_XLS108: 603 case MIPS_XLS208: 604 case MIPS_XLS408: 605 case MIPS_XLS408LITE: 606 case MIPS_XLS608: 607 irtc0 = cpumask & __BITS(7,0); 608 break; 609 case MIPS_XLS416: 610 case MIPS_XLS616: 611 irtc0 = cpumask & __BITS(15,0); 612 break; 613 default: 614 panic("%s: unknown cpu ID %#x\n", __func__, 615 mips_options.mips_cpu_id); 616 } 617 #else 618 irtc0 = 1; 619 #endif /* MULTIPROCESSOR */ 620 621 return irtc0; 622 } 623 624 /* 625 * rmixl_irt_init 626 * - initialize IRT Entry for given index 627 * - unmask Thread#0 in low word (assume we only have 1 thread) 628 */ 629 static void 630 rmixl_irt_init(int irt) 631 { 632 KASSERT(irt < NIRTS); 633 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), 0); /* high word */ 634 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), 0); /* low word */ 635 } 636 637 /* 638 * rmixl_irt_disestablish 639 * - invalidate IRT Entry for given index 640 */ 641 static void 642 rmixl_irt_disestablish(int irt) 643 { 644 KASSERT(mutex_owned(&rmixl_intr_lock)); 645 DPRINTF(("%s: irt %d, irtc1 %#x\n", __func__, irt, 0)); 646 rmixl_irt_init(irt); 647 } 648 649 /* 650 * rmixl_irt_establish 651 * - construct an IRT Entry for irt and write to PIC 652 */ 653 static void 654 rmixl_irt_establish(int irt, int vec, int cpumask, rmixl_intr_trigger_t trigger, 655 rmixl_intr_polarity_t polarity) 656 { 657 uint32_t irtc1; 658 uint32_t irtc0; 659 660 KASSERT(mutex_owned(&rmixl_intr_lock)); 661 662 if (irt >= NIRTS) 663 panic("%s: bad irt %d\n", __func__, irt); 664 665 if (! RMIXL_VECTOR_IS_IRT(vec)) 666 panic("%s: bad vec %d\n", __func__, vec); 667 668 switch (trigger) { 669 case RMIXL_TRIG_EDGE: 670 case RMIXL_TRIG_LEVEL: 671 break; 672 default: 673 panic("%s: bad trigger %d\n", __func__, trigger); 674 } 675 676 switch (polarity) { 677 case RMIXL_POLR_RISING: 678 case RMIXL_POLR_HIGH: 679 case RMIXL_POLR_FALLING: 680 case RMIXL_POLR_LOW: 681 break; 682 default: 683 panic("%s: bad polarity %d\n", __func__, polarity); 684 } 685 686 /* 687 * XXX IRT entries are not shared 688 */ 689 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)) == 0); 690 KASSERT(RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)) == 0); 691 692 irtc0 = rmixl_irt_thread_mask(cpumask); 693 694 irtc1 = RMIXL_PIC_IRTENTRYC1_VALID; 695 irtc1 |= RMIXL_PIC_IRTENTRYC1_GL; /* local */ 696 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 697 698 if (trigger == RMIXL_TRIG_LEVEL) 699 irtc1 |= RMIXL_PIC_IRTENTRYC1_TRG; 700 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 701 702 if ((polarity == RMIXL_POLR_FALLING) || (polarity == RMIXL_POLR_LOW)) 703 irtc1 |= RMIXL_PIC_IRTENTRYC1_P; 704 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 705 706 irtc1 |= vec; /* vector in EIRR */ 707 KASSERT((irtc1 & RMIXL_PIC_IRTENTRYC1_NMI) == 0); 708 709 /* 710 * write IRT Entry to PIC 711 */ 712 DPRINTF(("%s: vec %d (%#x), irt %d, irtc0 %#x, irtc1 %#x\n", 713 __func__, vec, vec, irt, irtc0, irtc1)); 714 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC0(irt), irtc0); /* low word */ 715 RMIXL_PICREG_WRITE(RMIXL_PIC_IRTENTRYC1(irt), irtc1); /* high word */ 716 } 717 718 void * 719 rmixl_vec_establish(int vec, int cpumask, int ipl, 720 int (*func)(void *), void *arg, bool mpsafe) 721 { 722 rmixl_intrhand_t *ih; 723 uint64_t eimr_bit; 724 int s; 725 726 KASSERT(mutex_owned(&rmixl_intr_lock)); 727 728 DPRINTF(("%s: vec %d cpumask %#x ipl %d func %p arg %p mpsafe %d\n", 729 __func__, vec, cpumask, ipl, func, arg, mpsafe)); 730 #ifdef DIAGNOSTIC 731 if (rmixl_pic_init_done == 0) 732 panic("%s: called before evbmips_intr_init", __func__); 733 #endif 734 735 /* 736 * check args 737 */ 738 if (vec < 0 || vec >= NINTRVECS) 739 panic("%s: vec %d out of range, max %d", 740 __func__, vec, NINTRVECS - 1); 741 if (ipl <= 0 || ipl >= _IPL_N) 742 panic("%s: ipl %d out of range, min %d, max %d", 743 __func__, ipl, 1, _IPL_N - 1); 744 745 s = splhigh(); 746 747 ih = &rmixl_intrhand[vec]; 748 if (ih->ih_func != NULL) { 749 #ifdef DIAGNOSTIC 750 printf("%s: intrhand[%d] busy\n", __func__, vec); 751 #endif 752 splx(s); 753 return NULL; 754 } 755 756 ih->ih_arg = arg; 757 ih->ih_mpsafe = mpsafe; 758 ih->ih_vec = vec; 759 ih->ih_ipl = ipl; 760 ih->ih_cpumask = cpumask; 761 762 eimr_bit = (uint64_t)1 << vec; 763 for (int i=ih->ih_ipl; --i >= 0; ) { 764 KASSERT((ipl_eimr_map[i] & eimr_bit) == 0); 765 ipl_eimr_map[i] |= eimr_bit; 766 } 767 768 ih->ih_func = func; /* do this last */ 769 770 splx(s); 771 772 return ih; 773 } 774 775 /* 776 * rmixl_intr_establish 777 * - used to establish an IRT-based interrupt only 778 */ 779 void * 780 rmixl_intr_establish(int irt, int cpumask, int ipl, 781 rmixl_intr_trigger_t trigger, rmixl_intr_polarity_t polarity, 782 int (*func)(void *), void *arg, bool mpsafe) 783 { 784 rmixl_intrhand_t *ih; 785 int vec; 786 787 #ifdef DIAGNOSTIC 788 if (rmixl_pic_init_done == 0) 789 panic("%s: called before rmixl_pic_init_done", __func__); 790 #endif 791 792 /* 793 * check args 794 */ 795 if (irt < 0 || irt >= NIRTS) 796 panic("%s: irt %d out of range, max %d", 797 __func__, irt, NIRTS - 1); 798 if (ipl <= 0 || ipl >= _IPL_N) 799 panic("%s: ipl %d out of range, min %d, max %d", 800 __func__, ipl, 1, _IPL_N - 1); 801 802 vec = RMIXL_IRT_VECTOR(irt); 803 804 DPRINTF(("%s: irt %d, vec %d, ipl %d\n", __func__, irt, vec, ipl)); 805 806 mutex_enter(&rmixl_intr_lock); 807 808 /* 809 * establish vector 810 */ 811 ih = rmixl_vec_establish(vec, cpumask, ipl, func, arg, mpsafe); 812 813 /* 814 * establish IRT Entry 815 */ 816 rmixl_irt_establish(irt, vec, cpumask, trigger, polarity); 817 818 mutex_exit(&rmixl_intr_lock); 819 820 return ih; 821 } 822 823 void 824 rmixl_vec_disestablish(void *cookie) 825 { 826 rmixl_intrhand_t *ih = cookie; 827 uint64_t eimr_bit; 828 829 KASSERT(mutex_owned(&rmixl_intr_lock)); 830 KASSERT(ih->ih_vec < NINTRVECS); 831 KASSERT(ih == &rmixl_intrhand[ih->ih_vec]); 832 833 ih->ih_func = NULL; /* do this first */ 834 835 eimr_bit = (uint64_t)1 << ih->ih_vec; 836 for (int i=ih->ih_ipl; --i >= 0; ) { 837 KASSERT((ipl_eimr_map[i] & eimr_bit) != 0); 838 ipl_eimr_map[i] ^= eimr_bit; 839 } 840 } 841 842 void 843 rmixl_intr_disestablish(void *cookie) 844 { 845 rmixl_intrhand_t *ih = cookie; 846 const int vec = ih->ih_vec; 847 848 KASSERT(vec < NINTRVECS); 849 KASSERT(ih == &rmixl_intrhand[vec]); 850 851 mutex_enter(&rmixl_intr_lock); 852 853 /* 854 * disable/invalidate the IRT Entry if needed 855 */ 856 if (RMIXL_VECTOR_IS_IRT(vec)) 857 rmixl_irt_disestablish(vec); 858 859 /* 860 * disasociate from vector and free the handle 861 */ 862 rmixl_vec_disestablish(cookie); 863 864 mutex_exit(&rmixl_intr_lock); 865 } 866 867 void 868 evbmips_iointr(int ipl, uint32_t pending, struct clockframe *cf) 869 { 870 struct rmixl_cpu_softc *sc = (void *)curcpu()->ci_softc; 871 872 DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n", 873 __func__, cpu_number(), ipl, cf->pc, pending)); 874 875 /* 876 * 'pending' arg is a summary that there is something to do 877 * the real pending status is obtained from EIRR 878 */ 879 KASSERT(pending == MIPS_INT_MASK_1); 880 881 for (;;) { 882 rmixl_intrhand_t *ih; 883 uint64_t eirr; 884 uint64_t eimr; 885 uint64_t vecbit; 886 int vec; 887 888 asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr)); 889 asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); 890 891 #ifdef IOINTR_DEBUG 892 printf("%s: cpu%u: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n", 893 __func__, cpu_number(), eirr, eimr, ipl_eimr_map[ipl-1]); 894 #endif /* IOINTR_DEBUG */ 895 896 /* 897 * reduce eirr to 898 * - ints that are enabled at or below this ipl 899 * - exclude count/compare clock and soft ints 900 * they are handled elsewhere 901 */ 902 eirr &= ipl_eimr_map[ipl-1]; 903 eirr &= ~ipl_eimr_map[ipl]; 904 eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8); 905 if (eirr == 0) 906 break; 907 908 vec = 63 - dclz(eirr); 909 ih = &rmixl_intrhand[vec]; 910 vecbit = 1ULL << vec; 911 KASSERT (ih->ih_ipl == ipl); 912 KASSERT ((vecbit & eimr) == 0); 913 KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0); 914 915 /* 916 * ack in EIRR, and in PIC if needed, 917 * the irq we are about to handle 918 */ 919 rmixl_eirr_ack(eimr, vecbit, RMIXL_EIRR_PRESERVE_MASK); 920 if (RMIXL_VECTOR_IS_IRT(vec)) 921 RMIXL_PICREG_WRITE(RMIXL_PIC_INTRACK, 922 1 << RMIXL_VECTOR_IRT(vec)); 923 924 if (ih->ih_func != NULL) { 925 #ifdef MULTIPROCESSOR 926 if (ih->ih_mpsafe) { 927 (void)(*ih->ih_func)(ih->ih_arg); 928 } else { 929 KASSERTMSG(ipl == IPL_VM, 930 "%s: %s: ipl (%d) != IPL_VM for KERNEL_LOCK", 931 __func__, sc->sc_vec_evcnts[vec].ev_name, 932 ipl); 933 KERNEL_LOCK(1, NULL); 934 (void)(*ih->ih_func)(ih->ih_arg); 935 KERNEL_UNLOCK_ONE(NULL); 936 } 937 #else 938 (void)(*ih->ih_func)(ih->ih_arg); 939 #endif /* MULTIPROCESSOR */ 940 } 941 KASSERT(ipl == ih->ih_ipl); 942 KASSERTMSG(curcpu()->ci_cpl >= ipl, 943 "%s: after %s: cpl (%d) < ipl %d", 944 __func__, sc->sc_vec_evcnts[vec].ev_name, 945 ipl, curcpu()->ci_cpl); 946 sc->sc_vec_evcnts[vec].ev_count++; 947 } 948 } 949 950 #ifdef MULTIPROCESSOR 951 static int 952 rmixl_send_ipi(struct cpu_info *ci, int tag) 953 { 954 const cpuid_t cpuid = ci->ci_cpuid; 955 uint32_t core = (uint32_t)(cpuid >> 2); 956 uint32_t thread = (uint32_t)(cpuid & __BITS(1,0)); 957 uint64_t req = 1 << tag; 958 uint32_t r; 959 960 if (!kcpuset_isset(cpus_running, cpu_index(ci))) 961 return -1; 962 963 KASSERT((tag >= 0) && (tag < NIPIS)); 964 965 r = (thread << RMIXL_PIC_IPIBASE_ID_THREAD_SHIFT) 966 | (core << RMIXL_PIC_IPIBASE_ID_CORE_SHIFT) 967 | (RMIXL_INTRVEC_IPI + tag); 968 969 mutex_enter(&rmixl_ipi_lock); 970 atomic_or_64(&ci->ci_request_ipis, req); 971 RMIXL_PICREG_WRITE(RMIXL_PIC_IPIBASE, r); 972 mutex_exit(&rmixl_ipi_lock); 973 974 return 0; 975 } 976 977 static int 978 rmixl_ipi_intr(void *arg) 979 { 980 struct cpu_info * const ci = curcpu(); 981 const uint64_t ipi_mask = 1ULL << (uintptr_t)arg; 982 983 KASSERT(ci->ci_cpl >= IPL_SCHED); 984 KASSERT((uintptr_t)arg < NIPIS); 985 986 /* if the request is clear, it was previously processed */ 987 if ((ci->ci_request_ipis & ipi_mask) == 0) 988 return 0; 989 990 atomic_or_64(&ci->ci_active_ipis, ipi_mask); 991 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask); 992 993 ipi_process(ci, ipi_mask); 994 995 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask); 996 997 return 1; 998 } 999 #endif /* MULTIPROCESSOR */ 1000 1001 #if defined(DIAGNOSTIC) || defined(IOINTR_DEBUG) || defined(DDB) 1002 int 1003 rmixl_intrhand_print_subr(int vec) 1004 { 1005 rmixl_intrhand_t *ih = &rmixl_intrhand[vec]; 1006 printf("vec %d: func %p, arg %p, vec %d, ipl %d, mask %#x\n", 1007 vec, ih->ih_func, ih->ih_arg, ih->ih_vec, ih->ih_ipl, 1008 ih->ih_cpumask); 1009 return 0; 1010 } 1011 int 1012 rmixl_intrhand_print(void) 1013 { 1014 for (int vec=0; vec < NINTRVECS ; vec++) 1015 rmixl_intrhand_print_subr(vec); 1016 return 0; 1017 } 1018 1019 static inline void 1020 rmixl_irt_entry_print(u_int irt) 1021 { 1022 uint32_t c0, c1; 1023 1024 if ((irt < 0) || (irt > NIRTS)) 1025 return; 1026 c0 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC0(irt)); 1027 c1 = RMIXL_PICREG_READ(RMIXL_PIC_IRTENTRYC1(irt)); 1028 printf("irt[%d]: %#x, %#x\n", irt, c0, c1); 1029 } 1030 1031 int 1032 rmixl_irt_print(void) 1033 { 1034 printf("%s:\n", __func__); 1035 for (int irt=0; irt < NIRTS ; irt++) 1036 rmixl_irt_entry_print(irt); 1037 return 0; 1038 } 1039 1040 void 1041 rmixl_ipl_eimr_map_print(void) 1042 { 1043 printf("IPL_NONE=%d, mask %#"PRIx64"\n", 1044 IPL_NONE, ipl_eimr_map[IPL_NONE]); 1045 printf("IPL_SOFTCLOCK=%d, mask %#"PRIx64"\n", 1046 IPL_SOFTCLOCK, ipl_eimr_map[IPL_SOFTCLOCK]); 1047 printf("IPL_SOFTNET=%d, mask %#"PRIx64"\n", 1048 IPL_SOFTNET, ipl_eimr_map[IPL_SOFTNET]); 1049 printf("IPL_VM=%d, mask %#"PRIx64"\n", 1050 IPL_VM, ipl_eimr_map[IPL_VM]); 1051 printf("IPL_SCHED=%d, mask %#"PRIx64"\n", 1052 IPL_SCHED, ipl_eimr_map[IPL_SCHED]); 1053 printf("IPL_DDB=%d, mask %#"PRIx64"\n", 1054 IPL_DDB, ipl_eimr_map[IPL_DDB]); 1055 printf("IPL_HIGH=%d, mask %#"PRIx64"\n", 1056 IPL_HIGH, ipl_eimr_map[IPL_HIGH]); 1057 } 1058 1059 #endif 1060