1 /* $NetBSD: e500_intr.c,v 1.49 2024/09/15 19:08:34 andvar Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #define __INTR_PRIVATE 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: e500_intr.c,v 1.49 2024/09/15 19:08:34 andvar Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_mpc85xx.h" 44 #include "opt_multiprocessor.h" 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/proc.h> 49 #include <sys/intr.h> 50 #include <sys/cpu.h> 51 #include <sys/kmem.h> 52 #include <sys/atomic.h> 53 #include <sys/bus.h> 54 #include <sys/xcall.h> 55 #include <sys/ipi.h> 56 #include <sys/bitops.h> 57 #include <sys/interrupt.h> 58 #include <sys/systm.h> 59 60 #include <uvm/uvm_extern.h> 61 62 #ifdef __HAVE_FAST_SOFTINTS 63 #include <powerpc/softint.h> 64 #endif 65 66 #include <powerpc/spr.h> 67 #include <powerpc/booke/spr.h> 68 69 #include <powerpc/booke/cpuvar.h> 70 #include <powerpc/booke/e500reg.h> 71 #include <powerpc/booke/e500var.h> 72 #include <powerpc/booke/openpicreg.h> 73 74 #define IPL2CTPR(ipl) ((ipl) + 15 - IPL_HIGH) 75 #define CTPR2IPL(ctpr) ((ctpr) - (15 - IPL_HIGH)) 76 77 #define IST_PERCPU_P(ist) ((ist) >= IST_TIMER) 78 79 struct e500_intr_irq_info { 80 bus_addr_t irq_vpr; 81 bus_addr_t irq_dr; 82 u_int irq_vector; 83 }; 84 85 struct intr_source { 86 int (*is_func)(void *); 87 void *is_arg; 88 int8_t is_ipl; 89 uint8_t is_ist; 90 uint8_t is_irq; 91 uint8_t is_refcnt; 92 bus_size_t is_vpr; 93 bus_size_t is_dr; 94 char is_source[INTRIDBUF]; 95 char is_xname[INTRDEVNAMEBUF]; 96 }; 97 98 #define INTR_SOURCE_INITIALIZER \ 99 { .is_func = e500_intr_spurious, .is_arg = NULL, \ 100 .is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, \ 101 .is_source = "", .is_xname = "", } 102 103 struct e500_intr_name { 104 uint8_t in_irq; 105 const char in_name[15]; 106 }; 107 108 static const struct e500_intr_name e500_onchip_intr_names[] = { 109 { ISOURCE_L2, "l2" }, 110 { ISOURCE_ECM, "ecm" }, 111 { ISOURCE_DDR, "ddr" }, 112 { ISOURCE_LBC, "lbc" }, 113 { ISOURCE_DMA_CHAN1, "dma-chan1" }, 114 { ISOURCE_DMA_CHAN2, "dma-chan2" }, 115 { ISOURCE_DMA_CHAN3, "dma-chan3" }, 116 { ISOURCE_DMA_CHAN4, "dma-chan4" }, 117 { ISOURCE_PCI1, "pci1" }, 118 { ISOURCE_PCIEX2, "pcie2" }, 119 { ISOURCE_PCIEX , "pcie1" }, 120 { ISOURCE_PCIEX3, "pcie3" }, 121 { ISOURCE_USB1, "usb1" }, 122 { ISOURCE_ETSEC1_TX, "etsec1-tx" }, 123 { ISOURCE_ETSEC1_RX, "etsec1-rx" }, 124 { ISOURCE_ETSEC3_TX, "etsec3-tx" }, 125 { ISOURCE_ETSEC3_RX, "etsec3-rx" }, 126 { ISOURCE_ETSEC3_ERR, "etsec3-err" }, 127 { ISOURCE_ETSEC1_ERR, "etsec1-err" }, 128 { ISOURCE_ETSEC2_TX, "etsec2-tx" }, 129 { ISOURCE_ETSEC2_RX, "etsec2-rx" }, 130 { ISOURCE_ETSEC4_TX, "etsec4-tx" }, 131 { ISOURCE_ETSEC4_RX, "etsec4-rx" }, 132 { ISOURCE_ETSEC4_ERR, "etsec4-err" }, 133 { ISOURCE_ETSEC2_ERR, "etsec2-err" }, 134 { ISOURCE_DUART, "duart" }, 135 { ISOURCE_I2C, "i2c" }, 136 { ISOURCE_PERFMON, "perfmon" }, 137 { ISOURCE_SECURITY1, "sec1" }, 138 { ISOURCE_GPIO, "gpio" }, 139 { ISOURCE_SRIO_EWPU, "srio-ewpu" }, 140 { ISOURCE_SRIO_ODBELL, "srio-odbell" }, 141 { ISOURCE_SRIO_IDBELL, "srio-idbell" }, 142 { ISOURCE_SRIO_OMU1, "srio-omu1" }, 143 { ISOURCE_SRIO_IMU1, "srio-imu1" }, 144 { ISOURCE_SRIO_OMU2, "srio-omu2" }, 145 { ISOURCE_SRIO_IMU2, "srio-imu2" }, 146 { ISOURCE_SECURITY2, "sec2" }, 147 { ISOURCE_SPI, "spi" }, 148 { ISOURCE_ETSEC1_PTP, "etsec1-ptp" }, 149 { ISOURCE_ETSEC2_PTP, "etsec2-ptp" }, 150 { ISOURCE_ETSEC3_PTP, "etsec3-ptp" }, 151 { ISOURCE_ETSEC4_PTP, "etsec4-ptp" }, 152 { ISOURCE_ESDHC, "esdhc" }, 153 { 0, "" }, 154 }; 155 156 const struct e500_intr_name default_external_intr_names[] = { 157 { 0, "" }, 158 }; 159 160 static const struct e500_intr_name e500_msigroup_intr_names[] = { 161 { 0, "msigroup0" }, 162 { 1, "msigroup1" }, 163 { 2, "msigroup2" }, 164 { 3, "msigroup3" }, 165 { 4, "msigroup4" }, 166 { 5, "msigroup5" }, 167 { 6, "msigroup6" }, 168 { 7, "msigroup7" }, 169 { 0, "" }, 170 }; 171 172 static const struct e500_intr_name e500_timer_intr_names[] = { 173 { 0, "timer0" }, 174 { 1, "timer1" }, 175 { 2, "timer2" }, 176 { 3, "timer3" }, 177 { 0, "" }, 178 }; 179 180 static const struct e500_intr_name e500_ipi_intr_names[] = { 181 { 0, "ipi0" }, 182 { 1, "ipi1" }, 183 { 2, "ipi2" }, 184 { 3, "ipi3" }, 185 { 0, "" }, 186 }; 187 188 static const struct e500_intr_name e500_mi_intr_names[] = { 189 { 0, "mi0" }, 190 { 1, "mi1" }, 191 { 2, "mi2" }, 192 { 3, "mi3" }, 193 { 0, "" }, 194 }; 195 196 struct e500_intr_info { 197 u_int ii_external_sources; 198 uint32_t ii_onchip_bitmap[2]; 199 u_int ii_onchip_sources; 200 u_int ii_msigroup_sources; 201 u_int ii_ipi_sources; /* per-cpu */ 202 u_int ii_timer_sources; /* per-cpu */ 203 u_int ii_mi_sources; /* per-cpu */ 204 u_int ii_percpu_sources; 205 const struct e500_intr_name *ii_external_intr_names; 206 const struct e500_intr_name *ii_onchip_intr_names; 207 u_int8_t ii_ist_vectors[IST_MAX+1]; 208 }; 209 210 static kmutex_t e500_intr_lock __cacheline_aligned; 211 static struct e500_intr_info e500_intr_info; 212 213 #define INTR_INFO_DECL(lc_chip, UC_CHIP) \ 214 static const struct e500_intr_info lc_chip##_intr_info = { \ 215 .ii_external_sources = UC_CHIP ## _EXTERNALSOURCES, \ 216 .ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP, \ 217 .ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES, \ 218 .ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES, \ 219 .ii_timer_sources = UC_CHIP ## _TIMERSOURCES, \ 220 .ii_ipi_sources = UC_CHIP ## _IPISOURCES, \ 221 .ii_mi_sources = UC_CHIP ## _MISOURCES, \ 222 .ii_percpu_sources = UC_CHIP ## _TIMERSOURCES \ 223 + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, \ 224 .ii_external_intr_names = lc_chip ## _external_intr_names, \ 225 .ii_onchip_intr_names = lc_chip ## _onchip_intr_names, \ 226 .ii_ist_vectors = { \ 227 [IST_NONE] = ~0, \ 228 [IST_EDGE] = 0, \ 229 [IST_LEVEL_LOW] = 0, \ 230 [IST_LEVEL_HIGH] = 0, \ 231 [IST_PULSE] = 0, \ 232 [IST_ONCHIP] = UC_CHIP ## _EXTERNALSOURCES, \ 233 [IST_MSIGROUP] = UC_CHIP ## _EXTERNALSOURCES \ 234 + UC_CHIP ## _ONCHIPSOURCES, \ 235 [IST_TIMER] = UC_CHIP ## _EXTERNALSOURCES \ 236 + UC_CHIP ## _ONCHIPSOURCES \ 237 + UC_CHIP ## _MSIGROUPSOURCES, \ 238 [IST_IPI] = UC_CHIP ## _EXTERNALSOURCES \ 239 + UC_CHIP ## _ONCHIPSOURCES \ 240 + UC_CHIP ## _MSIGROUPSOURCES \ 241 + UC_CHIP ## _TIMERSOURCES, \ 242 [IST_MI] = UC_CHIP ## _EXTERNALSOURCES \ 243 + UC_CHIP ## _ONCHIPSOURCES \ 244 + UC_CHIP ## _MSIGROUPSOURCES \ 245 + UC_CHIP ## _TIMERSOURCES \ 246 + UC_CHIP ## _IPISOURCES, \ 247 [IST_MAX] = UC_CHIP ## _EXTERNALSOURCES \ 248 + UC_CHIP ## _ONCHIPSOURCES \ 249 + UC_CHIP ## _MSIGROUPSOURCES \ 250 + UC_CHIP ## _TIMERSOURCES \ 251 + UC_CHIP ## _IPISOURCES \ 252 + UC_CHIP ## _MISOURCES, \ 253 }, \ 254 } 255 256 #ifdef MPC8536 257 #define mpc8536_external_intr_names default_external_intr_names 258 const struct e500_intr_name mpc8536_onchip_intr_names[] = { 259 { ISOURCE_SATA2, "sata2" }, 260 { ISOURCE_USB2, "usb2" }, 261 { ISOURCE_USB3, "usb3" }, 262 { ISOURCE_SATA1, "sata1" }, 263 { 0, "" }, 264 }; 265 266 INTR_INFO_DECL(mpc8536, MPC8536); 267 #endif 268 269 #ifdef MPC8544 270 #define mpc8544_external_intr_names default_external_intr_names 271 const struct e500_intr_name mpc8544_onchip_intr_names[] = { 272 { 0, "" }, 273 }; 274 275 INTR_INFO_DECL(mpc8544, MPC8544); 276 #endif 277 #ifdef MPC8548 278 #define mpc8548_external_intr_names default_external_intr_names 279 const struct e500_intr_name mpc8548_onchip_intr_names[] = { 280 { ISOURCE_PCI1, "pci1" }, 281 { ISOURCE_PCI2, "pci2" }, 282 { 0, "" }, 283 }; 284 285 INTR_INFO_DECL(mpc8548, MPC8548); 286 #endif 287 #ifdef MPC8555 288 #define mpc8555_external_intr_names default_external_intr_names 289 const struct e500_intr_name mpc8555_onchip_intr_names[] = { 290 { ISOURCE_PCI2, "pci2" }, 291 { ISOURCE_CPM, "CPM" }, 292 { 0, "" }, 293 }; 294 295 INTR_INFO_DECL(mpc8555, MPC8555); 296 #endif 297 #ifdef MPC8568 298 #define mpc8568_external_intr_names default_external_intr_names 299 const struct e500_intr_name mpc8568_onchip_intr_names[] = { 300 { ISOURCE_QEB_LOW, "QEB low" }, 301 { ISOURCE_QEB_PORT, "QEB port" }, 302 { ISOURCE_QEB_IECC, "QEB iram ecc" }, 303 { ISOURCE_QEB_MUECC, "QEB ram ecc" }, 304 { ISOURCE_TLU1, "tlu1" }, 305 { ISOURCE_QEB_HIGH, "QEB high" }, 306 { 0, "" }, 307 }; 308 309 INTR_INFO_DECL(mpc8568, MPC8568); 310 #endif 311 #ifdef MPC8572 312 #define mpc8572_external_intr_names default_external_intr_names 313 const struct e500_intr_name mpc8572_onchip_intr_names[] = { 314 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 315 { ISOURCE_FEC, "fec" }, 316 { ISOURCE_PME_GENERAL, "pme" }, 317 { ISOURCE_TLU1, "tlu1" }, 318 { ISOURCE_TLU2, "tlu2" }, 319 { ISOURCE_PME_CHAN1, "pme-chan1" }, 320 { ISOURCE_PME_CHAN2, "pme-chan2" }, 321 { ISOURCE_PME_CHAN3, "pme-chan3" }, 322 { ISOURCE_PME_CHAN4, "pme-chan4" }, 323 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 324 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 325 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 326 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 327 { 0, "" }, 328 }; 329 330 INTR_INFO_DECL(mpc8572, MPC8572); 331 #endif 332 333 #ifdef P1025 334 #define p1025_external_intr_names default_external_intr_names 335 const struct e500_intr_name p1025_onchip_intr_names[] = { 336 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 337 { ISOURCE_ETSEC1_G1_TX, "etsec1-g1-tx" }, 338 { ISOURCE_ETSEC1_G1_RX, "etsec1-g1-rx" }, 339 { ISOURCE_ETSEC1_G1_ERR, "etsec1-g1-error" }, 340 { ISOURCE_ETSEC2_G1_TX, "etsec2-g1-tx" }, 341 { ISOURCE_ETSEC2_G1_RX, "etsec2-g1-rx" }, 342 { ISOURCE_ETSEC2_G1_ERR, "etsec2-g1-error" }, 343 { ISOURCE_ETSEC3_G1_TX, "etsec3-g1-tx" }, 344 { ISOURCE_ETSEC3_G1_RX, "etsec3-g1-rx" }, 345 { ISOURCE_ETSEC3_G1_ERR, "etsec3-g1-error" }, 346 { ISOURCE_QEB_MUECC, "qeb-low" }, 347 { ISOURCE_QEB_HIGH, "qeb-crit" }, 348 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 349 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 350 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 351 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 352 { 0, "" }, 353 }; 354 355 INTR_INFO_DECL(p1025, P1025); 356 #endif 357 358 #ifdef P2020 359 #define p20x0_external_intr_names default_external_intr_names 360 const struct e500_intr_name p20x0_onchip_intr_names[] = { 361 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 362 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 363 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 364 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 365 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 366 { 0, "" }, 367 }; 368 369 INTR_INFO_DECL(p20x0, P20x0); 370 #endif 371 372 #ifdef P1023 373 #define p1023_external_intr_names default_external_intr_names 374 const struct e500_intr_name p1023_onchip_intr_names[] = { 375 { ISOURCE_FMAN, "fman" }, 376 { ISOURCE_MDIO, "mdio" }, 377 { ISOURCE_QMAN0, "qman0" }, 378 { ISOURCE_BMAN0, "bman0" }, 379 { ISOURCE_QMAN1, "qman1" }, 380 { ISOURCE_BMAN1, "bman1" }, 381 { ISOURCE_QMAN2, "qman2" }, 382 { ISOURCE_BMAN2, "bman2" }, 383 { ISOURCE_SECURITY2_P1023, "sec2" }, 384 { ISOURCE_SEC_GENERAL, "sec-general" }, 385 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 386 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 387 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 388 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 389 { 0, "" }, 390 }; 391 392 INTR_INFO_DECL(p1023, P1023); 393 #endif 394 395 static const char ist_names[][12] = { 396 [IST_NONE] = "none", 397 [IST_EDGE] = "edge", 398 [IST_LEVEL_LOW] = "level-", 399 [IST_LEVEL_HIGH] = "level+", 400 [IST_PULSE] = "pulse", 401 [IST_MSI] = "msi", 402 [IST_ONCHIP] = "onchip", 403 [IST_MSIGROUP] = "msigroup", 404 [IST_TIMER] = "timer", 405 [IST_IPI] = "ipi", 406 [IST_MI] = "msgint", 407 }; 408 409 static struct intr_source *e500_intr_sources; 410 static const struct intr_source *e500_intr_last_source; 411 412 static void *e500_intr_establish(int, int, int, int (*)(void *), void *, 413 const char *); 414 static void e500_intr_disestablish(void *); 415 static void e500_intr_cpu_attach(struct cpu_info *ci); 416 static void e500_intr_cpu_hatch(struct cpu_info *ci); 417 static void e500_intr_cpu_send_ipi(cpuid_t, uintptr_t); 418 static void e500_intr_init(void); 419 static void e500_intr_init_precpu(void); 420 static const char *e500_intr_string(int, int, char *, size_t); 421 static const char *e500_intr_typename(int); 422 static void e500_critintr(struct trapframe *tf); 423 static void e500_decrintr(struct trapframe *tf); 424 static void e500_extintr(struct trapframe *tf); 425 static void e500_fitintr(struct trapframe *tf); 426 static void e500_wdogintr(struct trapframe *tf); 427 static void e500_spl0(void); 428 static int e500_splraise(int); 429 static void e500_splx(int); 430 static const char *e500_intr_all_name_lookup(int, int); 431 432 const struct intrsw e500_intrsw = { 433 .intrsw_establish = e500_intr_establish, 434 .intrsw_disestablish = e500_intr_disestablish, 435 .intrsw_init = e500_intr_init, 436 .intrsw_cpu_attach = e500_intr_cpu_attach, 437 .intrsw_cpu_hatch = e500_intr_cpu_hatch, 438 .intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi, 439 .intrsw_string = e500_intr_string, 440 .intrsw_typename = e500_intr_typename, 441 442 .intrsw_critintr = e500_critintr, 443 .intrsw_decrintr = e500_decrintr, 444 .intrsw_extintr = e500_extintr, 445 .intrsw_fitintr = e500_fitintr, 446 .intrsw_wdogintr = e500_wdogintr, 447 448 .intrsw_splraise = e500_splraise, 449 .intrsw_splx = e500_splx, 450 .intrsw_spl0 = e500_spl0, 451 452 #ifdef __HAVE_FAST_SOFTINTS 453 .intrsw_softint_init_md = powerpc_softint_init_md, 454 .intrsw_softint_trigger = powerpc_softint_trigger, 455 #endif 456 }; 457 458 static bool wdog_barked; 459 460 static inline uint32_t 461 openpic_read(struct cpu_softc *cpu, bus_size_t offset) 462 { 463 464 return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh, 465 OPENPIC_BASE + offset); 466 } 467 468 static inline void 469 openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val) 470 { 471 472 return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh, 473 OPENPIC_BASE + offset, val); 474 } 475 476 static const char * 477 e500_intr_external_name_lookup(int irq) 478 { 479 prop_array_t extirqs = board_info_get_object("external-irqs"); 480 prop_string_t irqname = prop_array_get(extirqs, irq); 481 KASSERT(irqname != NULL); 482 KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING); 483 484 return prop_string_value(irqname); 485 } 486 487 static const char * 488 e500_intr_name_lookup(const struct e500_intr_name *names, int irq) 489 { 490 for (; names->in_name[0] != '\0'; names++) { 491 if (names->in_irq == irq) 492 return names->in_name; 493 } 494 495 return NULL; 496 } 497 498 static const char * 499 e500_intr_onchip_name_lookup(int irq) 500 { 501 const char *name; 502 503 name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq); 504 if (name == NULL) 505 name = e500_intr_name_lookup(e500_onchip_intr_names, irq); 506 507 return name; 508 } 509 510 static inline void 511 e500_splset(struct cpu_info *ci, int ipl) 512 { 513 struct cpu_softc * const cpu = ci->ci_softc; 514 515 #ifdef __HAVE_FAST_SOFTINTS /* XXX */ 516 KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE); 517 #endif 518 const u_int ctpr = IPL2CTPR(ipl); 519 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == IPL2CTPR(ci->ci_cpl)); 520 openpic_write(cpu, OPENPIC_CTPR, ctpr); 521 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr); 522 #ifdef DIAGNOSTIC 523 cpu->cpu_spl_tb[ipl][ci->ci_cpl] = mftb(); 524 #endif 525 ci->ci_cpl = ipl; 526 } 527 528 static void 529 e500_spl0(void) 530 { 531 wrtee(0); 532 533 struct cpu_info * const ci = curcpu(); 534 535 #ifdef __HAVE_FAST_SOFTINTS 536 if (__predict_false(ci->ci_data.cpu_softints != 0)) { 537 e500_splset(ci, IPL_HIGH); 538 wrtee(PSL_EE); 539 powerpc_softint(ci, IPL_NONE, 540 (vaddr_t)__builtin_return_address(0)); 541 wrtee(0); 542 } 543 #endif /* __HAVE_FAST_SOFTINTS */ 544 e500_splset(ci, IPL_NONE); 545 546 wrtee(PSL_EE); 547 } 548 549 static void 550 e500_splx(int ipl) 551 { 552 struct cpu_info * const ci = curcpu(); 553 const int old_ipl = ci->ci_cpl; 554 555 /* if we panicked because of watchdog, PSL_CE will be clear. */ 556 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 557 558 if (ipl == old_ipl) 559 return; 560 561 if (__predict_false(ipl > old_ipl)) { 562 printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n", 563 __func__, __builtin_return_address(0), old_ipl, ipl); 564 if (old_ipl == IPL_NONE) 565 console_debugger(); 566 } 567 568 // const 569 register_t msr = wrtee(0); 570 #ifdef __HAVE_FAST_SOFTINTS 571 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << ipl); 572 if (__predict_false(softints != 0)) { 573 e500_splset(ci, IPL_HIGH); 574 wrtee(msr); 575 powerpc_softint(ci, ipl, 576 (vaddr_t)__builtin_return_address(0)); 577 wrtee(0); 578 } 579 #endif /* __HAVE_FAST_SOFTINTS */ 580 e500_splset(ci, ipl); 581 #if 1 582 if (ipl < IPL_VM && old_ipl >= IPL_VM) 583 msr = PSL_EE; 584 #endif 585 wrtee(msr); 586 } 587 588 static int 589 e500_splraise(int ipl) 590 { 591 struct cpu_info * const ci = curcpu(); 592 const int old_ipl = ci->ci_cpl; 593 594 /* if we panicked because of watchdog, PSL_CE will be clear. */ 595 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 596 597 if (old_ipl < ipl) { 598 //const 599 register_t msr = wrtee(0); 600 e500_splset(ci, ipl); 601 #if 0 602 if (old_ipl < IPL_VM && ipl >= IPL_VM) 603 msr = 0; 604 #endif 605 wrtee(msr); 606 } 607 #if 0 608 else if (ipl == IPL_NONE) { 609 panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)", 610 __func__, __builtin_return_address(0), old_ipl); 611 } else if (old_ipl > ipl) { 612 printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n", 613 __func__, __builtin_return_address(0), old_ipl, ipl); 614 } 615 #endif 616 617 return old_ipl; 618 } 619 620 static int 621 e500_intr_spurious(void *arg) 622 { 623 return 0; 624 } 625 626 static bool 627 e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist, 628 struct e500_intr_irq_info *ii) 629 { 630 const struct e500_intr_info * const info = &e500_intr_info; 631 bool ok; 632 633 #if DEBUG > 2 634 printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii); 635 #endif 636 637 if (ipl < IPL_VM || ipl > IPL_HIGH) { 638 #if DEBUG > 2 639 printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl); 640 #endif 641 return false; 642 } 643 644 if (ist <= IST_NONE || ist >= IST_MAX) { 645 #if DEBUG > 2 646 printf("%s:%d ist=%u\n", __func__, __LINE__, ist); 647 #endif 648 return false; 649 } 650 651 ii->irq_vector = irq + info->ii_ist_vectors[ist]; 652 if (IST_PERCPU_P(ist) && ist != IST_IPI) 653 ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources; 654 655 switch (ist) { 656 default: 657 ii->irq_vpr = OPENPIC_EIVPR(irq); 658 ii->irq_dr = OPENPIC_EIDR(irq); 659 ok = irq < info->ii_external_sources 660 && (ist == IST_EDGE 661 || ist == IST_LEVEL_LOW 662 || ist == IST_LEVEL_HIGH); 663 break; 664 case IST_PULSE: 665 ok = false; 666 break; 667 case IST_ONCHIP: 668 ii->irq_vpr = OPENPIC_IIVPR(irq); 669 ii->irq_dr = OPENPIC_IIDR(irq); 670 ok = irq < 32 * __arraycount(info->ii_onchip_bitmap); 671 #if DEBUG > 2 672 printf("%s: irq=%u: ok=%u\n", __func__, irq, ok); 673 #endif 674 ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31))); 675 #if DEBUG > 2 676 printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__, 677 irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0, 678 info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0], 679 ok); 680 #endif 681 break; 682 case IST_MSIGROUP: 683 ii->irq_vpr = OPENPIC_MSIVPR(irq); 684 ii->irq_dr = OPENPIC_MSIDR(irq); 685 ok = irq < info->ii_msigroup_sources 686 && ipl == IPL_VM; 687 break; 688 case IST_TIMER: 689 ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq); 690 ii->irq_dr = OPENPIC_GTDR(ci->ci_cpuid, irq); 691 ok = irq < info->ii_timer_sources; 692 #if DEBUG > 2 693 printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok); 694 #endif 695 break; 696 case IST_IPI: 697 ii->irq_vpr = OPENPIC_IPIVPR(irq); 698 ii->irq_dr = OPENPIC_IPIDR(irq); 699 ok = irq < info->ii_ipi_sources; 700 break; 701 case IST_MI: 702 ii->irq_vpr = OPENPIC_MIVPR(irq); 703 ii->irq_dr = OPENPIC_MIDR(irq); 704 ok = irq < info->ii_mi_sources; 705 break; 706 } 707 708 return ok; 709 } 710 711 static const char * 712 e500_intr_string(int irq, int ist, char *buf, size_t len) 713 { 714 struct cpu_info * const ci = curcpu(); 715 struct cpu_softc * const cpu = ci->ci_softc; 716 struct e500_intr_irq_info ii; 717 718 if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii)) 719 return NULL; 720 721 strlcpy(buf, cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name, len); 722 return buf; 723 } 724 725 __CTASSERT(__arraycount(ist_names) == IST_MAX); 726 727 static const char * 728 e500_intr_typename(int ist) 729 { 730 if (IST_NONE <= ist && ist < IST_MAX) 731 return ist_names[ist]; 732 733 return NULL; 734 } 735 736 static void * 737 e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist, 738 int (*handler)(void *), void *arg, const char *xname) 739 { 740 struct cpu_softc * const cpu = ci->ci_softc; 741 struct e500_intr_irq_info ii; 742 743 KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH); 744 KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI); 745 746 if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) { 747 printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n", 748 __func__, ci, irq, ipl, ist, &ii); 749 return NULL; 750 } 751 752 if (xname == NULL) { 753 xname = e500_intr_all_name_lookup(irq, ist); 754 if (xname == NULL) 755 xname = "unknown"; 756 } 757 758 struct intr_source * const is = &e500_intr_sources[ii.irq_vector]; 759 mutex_enter(&e500_intr_lock); 760 if (is->is_ipl != IPL_NONE) { 761 /* XXX IPI0 is shared by all CPU. */ 762 if (is->is_ist != IST_IPI || 763 is->is_irq != irq || 764 is->is_ipl != ipl || 765 is->is_ist != ist || 766 is->is_func != handler || 767 is->is_arg != arg) { 768 mutex_exit(&e500_intr_lock); 769 return NULL; 770 } 771 } 772 773 is->is_func = handler; 774 is->is_arg = arg; 775 is->is_ipl = ipl; 776 is->is_ist = ist; 777 is->is_irq = irq; 778 is->is_refcnt++; 779 is->is_vpr = ii.irq_vpr; 780 is->is_dr = ii.irq_dr; 781 switch (ist) { 782 case IST_EDGE: 783 case IST_LEVEL_LOW: 784 case IST_LEVEL_HIGH: 785 snprintf(is->is_source, sizeof(is->is_source), "extirq %d", 786 irq); 787 break; 788 case IST_ONCHIP: 789 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 790 break; 791 case IST_MSIGROUP: 792 snprintf(is->is_source, sizeof(is->is_source), "msigroup %d", 793 irq); 794 break; 795 case IST_TIMER: 796 snprintf(is->is_source, sizeof(is->is_source), "timer %d", irq); 797 break; 798 case IST_IPI: 799 snprintf(is->is_source, sizeof(is->is_source), "ipi %d", irq); 800 break; 801 case IST_MI: 802 snprintf(is->is_source, sizeof(is->is_source), "mi %d", irq); 803 break; 804 case IST_PULSE: 805 default: 806 panic("%s: invalid ist (%d)\n", __func__, ist); 807 } 808 strlcpy(is->is_xname, xname, sizeof(is->is_xname)); 809 810 uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl)) 811 | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl) 812 | (ist == IST_LEVEL_LOW 813 ? VPR_LEVEL_LOW 814 : (ist == IST_LEVEL_HIGH 815 ? VPR_LEVEL_HIGH 816 : (ist == IST_ONCHIP 817 ? VPR_P_HIGH 818 : 0))); 819 820 /* 821 * All interrupts go to the primary except per-cpu interrupts which get 822 * routed to the appropriate cpu. 823 */ 824 uint32_t dr = openpic_read(cpu, ii.irq_dr); 825 826 dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0); 827 828 /* 829 * Update the vector/priority and destination registers keeping the 830 * interrupt masked. 831 */ 832 const register_t msr = wrtee(0); /* disable interrupts */ 833 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK); 834 openpic_write(cpu, ii.irq_dr, dr); 835 836 /* 837 * Now unmask the interrupt. 838 */ 839 openpic_write(cpu, ii.irq_vpr, vpr); 840 841 wrtee(msr); /* re-enable interrupts */ 842 843 mutex_exit(&e500_intr_lock); 844 845 return is; 846 } 847 848 static void * 849 e500_intr_establish(int irq, int ipl, int ist, int (*handler)(void *), 850 void *arg, const char *xname) 851 { 852 return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg, 853 xname); 854 } 855 856 static void 857 e500_intr_disestablish(void *vis) 858 { 859 struct cpu_softc * const cpu = curcpu()->ci_softc; 860 struct intr_source * const is = vis; 861 struct e500_intr_irq_info ii; 862 863 KASSERT(e500_intr_sources <= is); 864 KASSERT(is < e500_intr_last_source); 865 KASSERT(!cpu_intr_p()); 866 867 bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl, 868 is->is_ist, &ii); 869 (void)ok; /* appease gcc */ 870 KASSERT(ok); 871 KASSERT(is - e500_intr_sources == ii.irq_vector); 872 873 mutex_enter(&e500_intr_lock); 874 875 if (is->is_refcnt-- > 1) { 876 mutex_exit(&e500_intr_lock); 877 return; 878 } 879 880 /* 881 * Mask the source using the mask (MSK) bit in the vector/priority reg. 882 */ 883 uint32_t vpr = openpic_read(cpu, ii.irq_vpr); 884 openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr); 885 886 /* 887 * Wait for the Activity (A) bit for the source to be cleared. 888 */ 889 while (openpic_read(cpu, ii.irq_vpr) & VPR_A) 890 ; 891 892 /* 893 * Now the source can be modified. 894 */ 895 openpic_write(cpu, ii.irq_dr, 0); /* stop delivery */ 896 openpic_write(cpu, ii.irq_vpr, VPR_MSK); /* mask/reset it */ 897 898 *is = (struct intr_source)INTR_SOURCE_INITIALIZER; 899 900 mutex_exit(&e500_intr_lock); 901 } 902 903 static void 904 e500_critintr(struct trapframe *tf) 905 { 906 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 907 } 908 909 static void 910 e500_decrintr(struct trapframe *tf) 911 { 912 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 913 } 914 915 static void 916 e500_fitintr(struct trapframe *tf) 917 { 918 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 919 } 920 921 static void 922 e500_wdogintr(struct trapframe *tf) 923 { 924 struct cpu_info * const ci = curcpu(); 925 mtspr(SPR_TSR, TSR_ENW|TSR_WIS); 926 wdog_barked = true; 927 dump_splhist(ci, NULL); 928 dump_trapframe(tf, NULL); 929 panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx" 930 " cpl=%d idepth=%d, mtxcount=%d", 931 __func__, tf, mftb(), tf->tf_srr0, tf->tf_srr1, 932 ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count); 933 } 934 935 static void 936 e500_extintr(struct trapframe *tf) 937 { 938 struct cpu_info * const ci = curcpu(); 939 struct cpu_softc * const cpu = ci->ci_softc; 940 const int old_ipl = ci->ci_cpl; 941 942 /* if we panicked because of watchdog, PSL_CE will be clear. */ 943 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 944 945 #if 0 946 // printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth); 947 if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE 948 || (register_t)tf < (register_t)curlwp->l_addr + NBPG) { 949 printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n", 950 __func__, curlwp->l_proc->p_pid, curlwp->l_lid, 951 curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf); 952 } 953 #endif 954 955 956 ci->ci_data.cpu_nintr++; 957 tf->tf_cf.cf_idepth = ci->ci_idepth++; 958 cpu->cpu_pcpls[ci->ci_idepth] = old_ipl; 959 #if 1 960 if (mfmsr() & PSL_EE) 961 panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr()); 962 if (old_ipl == IPL_HIGH 963 || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 964 panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) " 965 "|| old_ipl + %u != OPENPIC_CTPR (%u)", 966 __func__, tf, old_ipl, IPL_HIGH, 967 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 968 #else 969 if (old_ipl >= IPL_VM) 970 panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u", 971 __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR)); 972 #endif 973 974 for (;;) { 975 /* 976 * Find out the pending interrupt. 977 */ 978 KASSERTMSG((mfmsr() & PSL_EE) == 0, 979 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr()); 980 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 981 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 982 __func__, tf, __LINE__, old_ipl, 983 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 984 const uint32_t iack = openpic_read(cpu, OPENPIC_IACK); 985 const int ipl = iack & 0xf; 986 const int irq = (iack >> 4) - 1; 987 #if 0 988 printf("%s: iack=%d ipl=%d irq=%d <%s>\n", 989 __func__, iack, ipl, irq, 990 (iack != IRQ_SPURIOUS ? 991 cpu->cpu_evcnt_intrs[irq].ev_name : "spurious")); 992 #endif 993 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 994 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 995 __func__, tf, __LINE__, old_ipl, 996 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 997 if (iack == IRQ_SPURIOUS) 998 break; 999 1000 struct intr_source * const is = &e500_intr_sources[irq]; 1001 if (__predict_true(is < e500_intr_last_source)) { 1002 /* 1003 * Timer interrupts get their argument overridden with 1004 * the pointer to the trapframe. 1005 */ 1006 KASSERTMSG(is->is_ipl == ipl, 1007 "iack %#x: is %p: irq %d ipl %d != iack ipl %d", 1008 iack, is, irq, is->is_ipl, ipl); 1009 void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg); 1010 if (is->is_ipl <= old_ipl) 1011 panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n", 1012 __func__, tf, 1013 cpu->cpu_evcnt_intrs[irq].ev_name, irq, 1014 is->is_ipl, old_ipl); 1015 KASSERT(is->is_ipl > old_ipl); 1016 e500_splset(ci, is->is_ipl); /* change IPL */ 1017 if (__predict_false(is->is_func == NULL)) { 1018 aprint_error_dev(ci->ci_dev, 1019 "interrupt from unestablished irq %d\n", 1020 irq); 1021 } else { 1022 int (*func)(void *) = is->is_func; 1023 wrtee(PSL_EE); 1024 int rv = (*func)(arg); 1025 wrtee(0); 1026 #if DEBUG > 2 1027 printf("%s: %s handler %p(%p) returned %d\n", 1028 __func__, 1029 cpu->cpu_evcnt_intrs[irq].ev_name, 1030 func, arg, rv); 1031 #endif 1032 if (rv == 0) 1033 cpu->cpu_evcnt_spurious_intr.ev_count++; 1034 } 1035 e500_splset(ci, old_ipl); /* restore IPL */ 1036 cpu->cpu_evcnt_intrs[irq].ev_count++; 1037 } else { 1038 aprint_error_dev(ci->ci_dev, 1039 "interrupt from illegal irq %d\n", irq); 1040 cpu->cpu_evcnt_spurious_intr.ev_count++; 1041 } 1042 /* 1043 * If this is a nested interrupt, simply ack it and exit 1044 * because the loop we interrupted will complete looking 1045 * for interrupts. 1046 */ 1047 KASSERTMSG((mfmsr() & PSL_EE) == 0, 1048 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr()); 1049 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 1050 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 1051 __func__, tf, __LINE__, old_ipl, 1052 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 1053 1054 openpic_write(cpu, OPENPIC_EOI, 0); 1055 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 1056 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 1057 __func__, tf, __LINE__, old_ipl, 1058 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 1059 if (ci->ci_idepth > 0) 1060 break; 1061 } 1062 1063 ci->ci_idepth--; 1064 1065 #ifdef __HAVE_FAST_SOFTINTS 1066 /* 1067 * Before exiting, deal with any softints that need to be dealt with. 1068 */ 1069 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << old_ipl); 1070 if (__predict_false(softints != 0)) { 1071 KASSERT(old_ipl < IPL_VM); 1072 e500_splset(ci, IPL_HIGH); /* pop to high */ 1073 wrtee(PSL_EE); /* reenable interrupts */ 1074 powerpc_softint(ci, old_ipl, /* deal with them */ 1075 tf->tf_srr0); 1076 wrtee(0); /* disable interrupts */ 1077 e500_splset(ci, old_ipl); /* and drop back */ 1078 } 1079 #endif /* __HAVE_FAST_SOFTINTS */ 1080 KASSERT(ci->ci_cpl == old_ipl); 1081 1082 /* 1083 * If we interrupted while power-saving and we need to exit idle, 1084 * we need to clear PSL_POW so we won't go back into power-saving. 1085 */ 1086 if (__predict_false(tf->tf_srr1 & PSL_POW) && ci->ci_want_resched) 1087 tf->tf_srr1 &= ~PSL_POW; 1088 1089 // printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth); 1090 } 1091 1092 static void 1093 e500_intr_init(void) 1094 { 1095 struct cpu_info * const ci = curcpu(); 1096 struct cpu_softc * const cpu = ci->ci_softc; 1097 const uint32_t frr = openpic_read(cpu, OPENPIC_FRR); 1098 const u_int nirq = FRR_NIRQ_GET(frr) + 1; 1099 // const u_int ncpu = FRR_NCPU_GET(frr) + 1; 1100 struct intr_source *is; 1101 struct e500_intr_info * const ii = &e500_intr_info; 1102 1103 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 1104 switch (svr) { 1105 #ifdef MPC8536 1106 case SVR_MPC8536v1 >> 16: 1107 *ii = mpc8536_intr_info; 1108 break; 1109 #endif 1110 #ifdef MPC8544 1111 case SVR_MPC8544v1 >> 16: 1112 *ii = mpc8544_intr_info; 1113 break; 1114 #endif 1115 #ifdef MPC8548 1116 case SVR_MPC8543v1 >> 16: 1117 case SVR_MPC8548v1 >> 16: 1118 *ii = mpc8548_intr_info; 1119 break; 1120 #endif 1121 #ifdef MPC8555 1122 case SVR_MPC8541v1 >> 16: 1123 case SVR_MPC8555v1 >> 16: 1124 *ii = mpc8555_intr_info; 1125 break; 1126 #endif 1127 #ifdef MPC8568 1128 case SVR_MPC8568v1 >> 16: 1129 *ii = mpc8568_intr_info; 1130 break; 1131 #endif 1132 #ifdef MPC8572 1133 case SVR_MPC8572v1 >> 16: 1134 *ii = mpc8572_intr_info; 1135 break; 1136 #endif 1137 #ifdef P1023 1138 case SVR_P1017v1 >> 16: 1139 case SVR_P1023v1 >> 16: 1140 *ii = p1023_intr_info; 1141 break; 1142 #endif 1143 #ifdef P1025 1144 case SVR_P1016v1 >> 16: 1145 case SVR_P1025v1 >> 16: 1146 *ii = p1025_intr_info; 1147 break; 1148 #endif 1149 #ifdef P2020 1150 case SVR_P2010v2 >> 16: 1151 case SVR_P2020v2 >> 16: 1152 *ii = p20x0_intr_info; 1153 break; 1154 #endif 1155 default: 1156 panic("%s: don't know how to deal with SVR %#jx", 1157 __func__, (uintmax_t)mfspr(SPR_SVR)); 1158 } 1159 1160 /* 1161 * Initialize interrupt handler lock 1162 */ 1163 mutex_init(&e500_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 1164 1165 /* 1166 * We need to be in mixed mode. 1167 */ 1168 openpic_write(cpu, OPENPIC_GCR, GCR_M); 1169 1170 /* 1171 * Make we and the openpic both agree about the current SPL level. 1172 */ 1173 e500_splset(ci, ci->ci_cpl); 1174 1175 /* 1176 * Allow the required number of interrupt sources. 1177 */ 1178 is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP); 1179 e500_intr_sources = is; 1180 e500_intr_last_source = is + nirq; 1181 1182 /* 1183 * Initialize all the external interrupts as active low. 1184 */ 1185 for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) { 1186 openpic_write(cpu, OPENPIC_EIVPR(irq), 1187 VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW); 1188 } 1189 } 1190 1191 static void 1192 e500_intr_init_precpu(void) 1193 { 1194 struct cpu_info const *ci = curcpu(); 1195 struct cpu_softc * const cpu = ci->ci_softc; 1196 bus_addr_t dr; 1197 1198 /* 1199 * timer's DR is set to be delivered to cpu0 as initial value. 1200 */ 1201 for (u_int irq = 0; irq < e500_intr_info.ii_timer_sources; irq++) { 1202 dr = OPENPIC_GTDR(ci->ci_cpuid, irq); 1203 openpic_write(cpu, dr, 0); /* stop delivery */ 1204 } 1205 } 1206 1207 static void 1208 e500_idlespin(void) 1209 { 1210 KASSERTMSG(curcpu()->ci_cpl == IPL_NONE, 1211 "%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(), 1212 curcpu()->ci_cpl); 1213 KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE, 1214 "%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(), 1215 CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR))); 1216 KASSERT(mfmsr() & PSL_EE); 1217 1218 if (powersave > 0) 1219 mtmsr(mfmsr() | PSL_POW); 1220 } 1221 1222 static void 1223 e500_intr_cpu_attach(struct cpu_info *ci) 1224 { 1225 struct cpu_softc * const cpu = ci->ci_softc; 1226 const char * const xname = device_xname(ci->ci_dev); 1227 1228 const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR); 1229 const u_int nirq = FRR_NIRQ_GET(frr) + 1; 1230 // const u_int ncpu = FRR_NCPU_GET(frr) + 1; 1231 1232 const struct e500_intr_info * const info = &e500_intr_info; 1233 1234 cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER); 1235 1236 cpu->cpu_evcnt_intrs = 1237 kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP); 1238 1239 struct evcnt *evcnt = cpu->cpu_evcnt_intrs; 1240 for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) { 1241 const char *name = e500_intr_external_name_lookup(j); 1242 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name); 1243 } 1244 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]); 1245 for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) { 1246 if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) { 1247 const char *name = e500_intr_onchip_name_lookup(j); 1248 if (name != NULL) { 1249 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1250 NULL, xname, name); 1251 #ifdef DIAGNOSTIC 1252 } else { 1253 printf("%s: missing evcnt for onchip irq %zu\n", 1254 __func__, j); 1255 #endif 1256 } 1257 } 1258 } 1259 1260 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]); 1261 for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) { 1262 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1263 NULL, xname, e500_msigroup_intr_names[j].in_name); 1264 } 1265 1266 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]); 1267 evcnt += ci->ci_cpuid * info->ii_percpu_sources; 1268 for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) { 1269 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1270 NULL, xname, e500_timer_intr_names[j].in_name); 1271 } 1272 1273 for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) { 1274 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1275 NULL, xname, e500_ipi_intr_names[j].in_name); 1276 } 1277 1278 for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) { 1279 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1280 NULL, xname, e500_mi_intr_names[j].in_name); 1281 } 1282 1283 ci->ci_idlespin = e500_idlespin; 1284 } 1285 1286 static void 1287 e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg) 1288 { 1289 struct cpu_info * const ci = curcpu(); 1290 struct cpu_softc * const cpu = ci->ci_softc; 1291 uint32_t dstmask; 1292 1293 if (target >= CPU_MAXNUM) { 1294 CPU_INFO_ITERATOR cii; 1295 struct cpu_info *dst_ci; 1296 1297 KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL); 1298 1299 dstmask = 0; 1300 for (CPU_INFO_FOREACH(cii, dst_ci)) { 1301 if (target == IPI_DST_ALL || ci != dst_ci) { 1302 dstmask |= 1 << cpu_index(ci); 1303 if (ipimsg) 1304 atomic_or_32(&dst_ci->ci_pending_ipis, 1305 ipimsg); 1306 } 1307 } 1308 } else { 1309 struct cpu_info * const dst_ci = cpu_lookup(target); 1310 KASSERT(dst_ci != NULL); 1311 KASSERTMSG(target == cpu_index(dst_ci), 1312 "%s: target (%lu) != cpu_index(cpu%u)", 1313 __func__, target, cpu_index(dst_ci)); 1314 dstmask = (1 << target); 1315 if (ipimsg) 1316 atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg); 1317 } 1318 1319 openpic_write(cpu, OPENPIC_IPIDR(0), dstmask); 1320 } 1321 1322 typedef void (*ipifunc_t)(void); 1323 1324 #ifdef __HAVE_PREEMPTION 1325 static void 1326 e500_ipi_kpreempt(void) 1327 { 1328 poowerpc_softint_trigger(1 << IPL_NONE); 1329 } 1330 #endif 1331 1332 static void 1333 e500_ipi_suspend(void) 1334 { 1335 1336 #ifdef MULTIPROCESSOR 1337 cpu_pause(NULL); 1338 #endif /* MULTIPROCESSOR */ 1339 } 1340 1341 static void 1342 e500_ipi_ast(void) 1343 { 1344 curcpu()->ci_onproc->l_md.md_astpending = 1; 1345 } 1346 1347 static const ipifunc_t e500_ipifuncs[] = { 1348 [ilog2(IPI_XCALL)] = xc_ipi_handler, 1349 [ilog2(IPI_GENERIC)] = ipi_cpu_handler, 1350 [ilog2(IPI_HALT)] = e500_ipi_halt, 1351 #ifdef __HAVE_PREEMPTION 1352 [ilog2(IPI_KPREEMPT)] = e500_ipi_kpreempt, 1353 #endif 1354 [ilog2(IPI_TLB1SYNC)] = e500_tlb1_sync, 1355 [ilog2(IPI_SUSPEND)] = e500_ipi_suspend, 1356 [ilog2(IPI_AST)] = e500_ipi_ast, 1357 }; 1358 1359 static int 1360 e500_ipi_intr(void *v) 1361 { 1362 struct cpu_info * const ci = curcpu(); 1363 1364 ci->ci_ev_ipi.ev_count++; 1365 1366 uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0); 1367 for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) { 1368 const u_int bits = __builtin_clz(pending_ipis); 1369 ipi -= bits; 1370 pending_ipis <<= bits; 1371 KASSERT(e500_ipifuncs[ipi] != NULL); 1372 (*e500_ipifuncs[ipi])(); 1373 } 1374 1375 return 1; 1376 } 1377 1378 static void 1379 e500_intr_cpu_hatch(struct cpu_info *ci) 1380 { 1381 char iname[INTRIDBUF]; 1382 1383 /* Initialize percpu interrupts. */ 1384 e500_intr_init_precpu(); 1385 1386 /* 1387 * Establish clock interrupt for this CPU. 1388 */ 1389 snprintf(iname, sizeof(iname), "%s clock", device_xname(ci->ci_dev)); 1390 if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER, 1391 e500_clock_intr, NULL, iname) == NULL) 1392 panic("%s: failed to establish clock interrupt!", __func__); 1393 1394 /* 1395 * Establish the IPI interrupts for this CPU. 1396 */ 1397 if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr, 1398 NULL, "ipi") == NULL) 1399 panic("%s: failed to establish ipi interrupt!", __func__); 1400 1401 /* 1402 * Enable watchdog interrupts. 1403 */ 1404 uint32_t tcr = mfspr(SPR_TCR); 1405 tcr |= TCR_WIE; 1406 mtspr(SPR_TCR, tcr); 1407 } 1408 1409 static const char * 1410 e500_intr_all_name_lookup(int irq, int ist) 1411 { 1412 const struct e500_intr_info * const info = &e500_intr_info; 1413 1414 switch (ist) { 1415 default: 1416 if (irq < info->ii_external_sources && 1417 (ist == IST_EDGE || 1418 ist == IST_LEVEL_LOW || 1419 ist == IST_LEVEL_HIGH)) 1420 return e500_intr_name_lookup( 1421 info->ii_external_intr_names, irq); 1422 break; 1423 1424 case IST_PULSE: 1425 break; 1426 1427 case IST_ONCHIP: 1428 if (irq < info->ii_onchip_sources) 1429 return e500_intr_onchip_name_lookup(irq); 1430 break; 1431 1432 case IST_MSIGROUP: 1433 if (irq < info->ii_msigroup_sources) 1434 return e500_intr_name_lookup(e500_msigroup_intr_names, 1435 irq); 1436 break; 1437 1438 case IST_TIMER: 1439 if (irq < info->ii_timer_sources) 1440 return e500_intr_name_lookup(e500_timer_intr_names, 1441 irq); 1442 break; 1443 1444 case IST_IPI: 1445 if (irq < info->ii_ipi_sources) 1446 return e500_intr_name_lookup(e500_ipi_intr_names, irq); 1447 break; 1448 1449 case IST_MI: 1450 if (irq < info->ii_mi_sources) 1451 return e500_intr_name_lookup(e500_mi_intr_names, irq); 1452 break; 1453 } 1454 1455 return NULL; 1456 } 1457 1458 static void 1459 e500_intr_get_affinity(struct intr_source *is, kcpuset_t *cpuset) 1460 { 1461 struct cpu_info * const ci = curcpu(); 1462 struct cpu_softc * const cpu = ci->ci_softc; 1463 struct e500_intr_irq_info ii; 1464 1465 kcpuset_zero(cpuset); 1466 1467 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) { 1468 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, 1469 is->is_ist, &ii)) { 1470 uint32_t dr = openpic_read(cpu, ii.irq_dr); 1471 while (dr != 0) { 1472 u_int n = ffs(dr); 1473 if (n-- == 0) 1474 break; 1475 dr &= ~(1 << n); 1476 kcpuset_set(cpuset, n); 1477 } 1478 } 1479 } 1480 } 1481 1482 static int 1483 e500_intr_set_affinity(struct intr_source *is, const kcpuset_t *cpuset) 1484 { 1485 struct cpu_info * const ci = curcpu(); 1486 struct cpu_softc * const cpu = ci->ci_softc; 1487 struct e500_intr_irq_info ii; 1488 uint32_t ecpuset, tcpuset; 1489 1490 KASSERT(mutex_owned(&cpu_lock)); 1491 KASSERT(mutex_owned(&e500_intr_lock)); 1492 KASSERT(!kcpuset_iszero(cpuset)); 1493 1494 kcpuset_export_u32(cpuset, &ecpuset, sizeof(ecpuset)); 1495 tcpuset = ecpuset; 1496 while (tcpuset != 0) { 1497 u_int cpu_idx = ffs(tcpuset); 1498 if (cpu_idx-- == 0) 1499 break; 1500 1501 tcpuset &= ~(1 << cpu_idx); 1502 struct cpu_info * const newci = cpu_lookup(cpu_idx); 1503 if (newci == NULL) 1504 return EINVAL; 1505 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 1506 return EINVAL; 1507 } 1508 1509 if (!e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, 1510 &ii)) 1511 return ENXIO; 1512 1513 /* 1514 * Update the vector/priority and destination registers keeping the 1515 * interrupt masked. 1516 */ 1517 const register_t msr = wrtee(0); /* disable interrupts */ 1518 1519 uint32_t vpr = openpic_read(cpu, ii.irq_vpr); 1520 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK); 1521 1522 /* 1523 * Wait for the Activity (A) bit for the source to be cleared. 1524 */ 1525 while (openpic_read(cpu, ii.irq_vpr) & VPR_A) 1526 continue; 1527 1528 /* 1529 * Update destination register 1530 */ 1531 openpic_write(cpu, ii.irq_dr, ecpuset); 1532 1533 /* 1534 * Now unmask the interrupt. 1535 */ 1536 openpic_write(cpu, ii.irq_vpr, vpr); 1537 1538 wrtee(msr); /* re-enable interrupts */ 1539 1540 return 0; 1541 } 1542 1543 static bool 1544 e500_intr_is_affinity_intrsource(struct intr_source *is, 1545 const kcpuset_t *cpuset) 1546 { 1547 struct cpu_info * const ci = curcpu(); 1548 struct cpu_softc * const cpu = ci->ci_softc; 1549 struct e500_intr_irq_info ii; 1550 bool result = false; 1551 1552 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) { 1553 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, 1554 is->is_ist, &ii)) { 1555 uint32_t dr = openpic_read(cpu, ii.irq_dr); 1556 while (dr != 0 && !result) { 1557 u_int n = ffs(dr); 1558 if (n-- == 0) 1559 break; 1560 dr &= ~(1 << n); 1561 result = kcpuset_isset(cpuset, n); 1562 } 1563 } 1564 } 1565 return result; 1566 } 1567 1568 static struct intr_source * 1569 e500_intr_get_source(const char *intrid) 1570 { 1571 struct intr_source *is; 1572 1573 mutex_enter(&e500_intr_lock); 1574 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) { 1575 if (is->is_source[0] == '\0') 1576 continue; 1577 1578 if (!strncmp(intrid, is->is_source, sizeof(is->is_source) - 1)) 1579 break; 1580 } 1581 if (is == e500_intr_last_source) 1582 is = NULL; 1583 mutex_exit(&e500_intr_lock); 1584 return is; 1585 } 1586 1587 uint64_t 1588 interrupt_get_count(const char *intrid, u_int cpu_idx) 1589 { 1590 struct cpu_info * const ci = cpu_lookup(cpu_idx); 1591 struct cpu_softc * const cpu = ci->ci_softc; 1592 struct intr_source *is; 1593 struct e500_intr_irq_info ii; 1594 1595 is = e500_intr_get_source(intrid); 1596 if (is == NULL) 1597 return 0; 1598 1599 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, &ii)) 1600 return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_count; 1601 return 0; 1602 } 1603 1604 void 1605 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) 1606 { 1607 struct intr_source *is; 1608 1609 kcpuset_zero(cpuset); 1610 1611 is = e500_intr_get_source(intrid); 1612 if (is == NULL) 1613 return; 1614 1615 mutex_enter(&e500_intr_lock); 1616 e500_intr_get_affinity(is, cpuset); 1617 mutex_exit(&e500_intr_lock); 1618 } 1619 1620 void 1621 interrupt_get_available(kcpuset_t *cpuset) 1622 { 1623 CPU_INFO_ITERATOR cii; 1624 struct cpu_info *ci; 1625 1626 kcpuset_zero(cpuset); 1627 1628 mutex_enter(&cpu_lock); 1629 for (CPU_INFO_FOREACH(cii, ci)) { 1630 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) 1631 kcpuset_set(cpuset, cpu_index(ci)); 1632 } 1633 mutex_exit(&cpu_lock); 1634 } 1635 1636 void 1637 interrupt_get_devname(const char *intrid, char *buf, size_t len) 1638 { 1639 struct intr_source *is; 1640 1641 if (len == 0) 1642 return; 1643 1644 buf[0] = '\0'; 1645 1646 is = e500_intr_get_source(intrid); 1647 if (is != NULL) 1648 strlcpy(buf, is->is_xname, len); 1649 } 1650 1651 struct intrids_handler * 1652 interrupt_construct_intrids(const kcpuset_t *cpuset) 1653 { 1654 struct intr_source *is; 1655 struct intrids_handler *ii_handler; 1656 intrid_t *ids; 1657 int i, n; 1658 1659 if (kcpuset_iszero(cpuset)) 1660 return NULL; 1661 1662 n = 0; 1663 mutex_enter(&e500_intr_lock); 1664 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) { 1665 if (e500_intr_is_affinity_intrsource(is, cpuset)) 1666 ++n; 1667 } 1668 mutex_exit(&e500_intr_lock); 1669 1670 const size_t alloc_size = sizeof(int) + sizeof(intrid_t) * n; 1671 ii_handler = kmem_zalloc(alloc_size, KM_SLEEP); 1672 ii_handler->iih_nids = n; 1673 if (n == 0) 1674 return ii_handler; 1675 1676 ids = ii_handler->iih_intrids; 1677 mutex_enter(&e500_intr_lock); 1678 for (i = 0, is = e500_intr_sources; 1679 i < n && is < e500_intr_last_source; 1680 ++is) { 1681 if (!e500_intr_is_affinity_intrsource(is, cpuset)) 1682 continue; 1683 1684 if (is->is_source[0] != '\0') { 1685 strlcpy(ids[i], is->is_source, sizeof(ids[0])); 1686 ++i; 1687 } 1688 } 1689 mutex_exit(&e500_intr_lock); 1690 1691 return ii_handler; 1692 } 1693 1694 void 1695 interrupt_destruct_intrids(struct intrids_handler *ii_handler) 1696 { 1697 size_t iih_size; 1698 1699 if (ii_handler == NULL) 1700 return; 1701 1702 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids; 1703 kmem_free(ii_handler, iih_size); 1704 } 1705 1706 static int 1707 interrupt_distribute_locked(struct intr_source *is, const kcpuset_t *newset, 1708 kcpuset_t *oldset) 1709 { 1710 int error; 1711 1712 KASSERT(mutex_owned(&cpu_lock)); 1713 1714 if (is->is_ipl == IPL_NONE || IST_PERCPU_P(is->is_ist)) 1715 return EINVAL; 1716 1717 mutex_enter(&e500_intr_lock); 1718 if (oldset != NULL) 1719 e500_intr_get_affinity(is, oldset); 1720 error = e500_intr_set_affinity(is, newset); 1721 mutex_exit(&e500_intr_lock); 1722 1723 return error; 1724 } 1725 1726 int 1727 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset) 1728 { 1729 int error; 1730 1731 mutex_enter(&cpu_lock); 1732 error = interrupt_distribute_locked(ich, newset, oldset); 1733 mutex_exit(&cpu_lock); 1734 1735 return error; 1736 } 1737 1738 int 1739 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, 1740 kcpuset_t *oldset) 1741 { 1742 struct intr_source *is; 1743 int error; 1744 1745 is = e500_intr_get_source(intrid); 1746 if (is != NULL) { 1747 mutex_enter(&cpu_lock); 1748 error = interrupt_distribute_locked(is, newset, oldset); 1749 mutex_exit(&cpu_lock); 1750 } else 1751 error = ENOENT; 1752 1753 return error; 1754 } 1755