1 /* $OpenBSD: octcit.c,v 1.2 2017/08/09 15:10:38 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2017 Visa Hankala 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for OCTEON Central Interrupt Unit version 3 (CIU3). 21 * 22 * CIU3 is present on CN72xx, CN73xx, CN77xx, and CN78xx. 23 */ 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/atomic.h> 28 #include <sys/conf.h> 29 #include <sys/device.h> 30 #include <sys/kernel.h> 31 #include <sys/malloc.h> 32 33 #include <dev/ofw/fdt.h> 34 #include <dev/ofw/openfirm.h> 35 36 #include <mips64/mips_cpu.h> 37 38 #include <machine/autoconf.h> 39 #include <machine/fdt.h> 40 #include <machine/intr.h> 41 #include <machine/octeonreg.h> 42 43 #define CIU3_IDT(core, ipl) ((core) * 4 + (ipl)) 44 #define CIU3_IDT_CTL(idt) ((idt) * 8 + 0x110000u) 45 #define CIU3_IDT_PP(idt) ((idt) * 32 + 0x120000u) 46 #define CIU3_IDT_IO(idt) ((idt) * 8 + 0x130000u) 47 #define CIU3_DEST_PP_INT(core) ((core) * 8 + 0x200000u) 48 #define CIU3_DEST_PP_INT_INTSN 0x000fffff00000000ull 49 #define CIU3_DEST_PP_INT_INTSN_SHIFT 32 50 #define CIU3_DEST_PP_INT_INTR 0x0000000000000001ull 51 #define CIU3_ISC_CTL(intsn) ((intsn) * 8 + 0x80000000u) 52 #define CIU3_ISC_CTL_IDT 0x0000000000ff0000ull 53 #define CIU3_ISC_CTL_IDT_SHIFT 16 54 #define CIU3_ISC_CTL_IMP 0x0000000000008000ull 55 #define CIU3_ISC_CTL_EN 0x0000000000000002ull 56 #define CIU3_ISC_CTL_RAW 0x0000000000000001ull 57 #define CIU3_ISC_W1C(intsn) ((intsn) * 8 + 0x90000000u) 58 #define CIU3_ISC_W1C_EN 0x0000000000000002ull 59 #define CIU3_ISC_W1C_RAW 0x0000000000000001ull 60 #define CIU3_ISC_W1S(intsn) ((intsn) * 8 + 0xa0000000u) 61 #define CIU3_ISC_W1S_EN 0x0000000000000002ull 62 #define CIU3_ISC_W1S_RAW 0x0000000000000001ull 63 #define CIU3_NINTSN (1u << 20) 64 65 #define IS_MBOX(intsn) (((intsn) >> 12) == 4) 66 #define MBOX_INTSN(core) ((core) + 0x4000u) 67 68 #define CIU3_RD_8(sc, reg) \ 69 bus_space_read_8((sc)->sc_iot, (sc)->sc_ioh, (reg)) 70 #define CIU3_WR_8(sc, reg, val) \ 71 bus_space_write_8((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 72 73 #define INTPRI_CIU_0 (INTPRI_CLOCK + 1) 74 75 #define HASH_SIZE 64 76 77 struct octcit_intrhand { 78 SLIST_ENTRY(octcit_intrhand) 79 ih_list; 80 int (*ih_func)(void *); 81 void *ih_arg; 82 int ih_intsn; 83 int ih_flags; 84 #define CIH_MPSAFE 0x01 85 #define CIH_EDGE 0x02 /* edge-triggered */ 86 int ih_level; 87 struct evcount ih_count; 88 }; 89 90 struct octcit_softc { 91 struct device sc_dev; 92 bus_space_tag_t sc_iot; 93 bus_space_handle_t sc_ioh; 94 95 SLIST_HEAD(, octcit_intrhand) 96 sc_handlers[HASH_SIZE]; 97 int sc_minipl[MAXCPUS]; 98 int (*sc_ipi_handler)(void *); 99 100 struct intr_controller sc_ic; 101 }; 102 103 int octcit_match(struct device *, void *, void *); 104 void octcit_attach(struct device *, struct device *, void *); 105 106 void octcit_init(void); 107 uint32_t octcit_intr(uint32_t, struct trapframe *); 108 void *octcit_intr_establish(int, int, int (*)(void *), void *, 109 const char *); 110 void *octcit_intr_establish_intsn(int, int, int, int (*)(void *), 111 void *, const char *); 112 void *octcit_intr_establish_fdt_idx(void *, int, int, int, 113 int (*)(void *), void *, const char *); 114 void octcit_intr_disestablish(void *); 115 void octcit_splx(int); 116 117 uint32_t octcit_ipi_intr(uint32_t, struct trapframe *); 118 int octcit_ipi_establish(int (*)(void *), cpuid_t); 119 void octcit_ipi_set(cpuid_t); 120 void octcit_ipi_clear(cpuid_t); 121 122 const struct cfattach octcit_ca = { 123 sizeof(struct octcit_softc), octcit_match, octcit_attach 124 }; 125 126 struct cfdriver octcit_cd = { 127 NULL, "octcit", DV_DULL 128 }; 129 130 struct octcit_softc *octcit_sc; 131 132 int 133 octcit_match(struct device *parent, void *match, void *aux) 134 { 135 struct fdt_attach_args *faa = aux; 136 137 return OF_is_compatible(faa->fa_node, "cavium,octeon-7890-ciu3"); 138 } 139 140 void 141 octcit_attach(struct device *parent, struct device *self, void *aux) 142 { 143 struct fdt_attach_args *faa = aux; 144 struct octcit_softc *sc = (struct octcit_softc *)self; 145 uint64_t val; 146 int hash, intsn; 147 148 if (faa->fa_nreg != 1) { 149 printf(": expected one IO space, got %d\n", faa->fa_nreg); 150 return; 151 } 152 153 sc->sc_iot = faa->fa_iot; 154 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size, 155 0, &sc->sc_ioh)) { 156 printf(": could not map IO space\n"); 157 return; 158 } 159 160 for (hash = 0; hash < HASH_SIZE; hash++) 161 SLIST_INIT(&sc->sc_handlers[hash]); 162 163 /* Disable all interrupts and acknowledge any pending ones. */ 164 for (intsn = 0; intsn < CIU3_NINTSN; intsn++) { 165 val = CIU3_RD_8(sc, CIU3_ISC_CTL(intsn)); 166 if (ISSET(val, CIU3_ISC_CTL_IMP)) { 167 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW); 168 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), 0); 169 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn)); 170 } 171 } 172 173 printf("\n"); 174 175 sc->sc_ic.ic_cookie = sc; 176 sc->sc_ic.ic_node = faa->fa_node; 177 sc->sc_ic.ic_init = octcit_init; 178 sc->sc_ic.ic_establish = octcit_intr_establish; 179 sc->sc_ic.ic_establish_fdt_idx = octcit_intr_establish_fdt_idx; 180 sc->sc_ic.ic_disestablish = octcit_intr_disestablish; 181 #ifdef MULTIPROCESSOR 182 sc->sc_ic.ic_ipi_establish = octcit_ipi_establish; 183 sc->sc_ic.ic_ipi_set = octcit_ipi_set; 184 sc->sc_ic.ic_ipi_clear = octcit_ipi_clear; 185 #endif 186 187 octcit_sc = sc; 188 189 set_intr(INTPRI_CIU_0, CR_INT_0, octcit_intr); 190 #ifdef MULTIPROCESSOR 191 set_intr(INTPRI_IPI, CR_INT_1, octcit_ipi_intr); 192 #endif 193 194 octcit_init(); 195 196 register_splx_handler(octcit_splx); 197 octeon_intr_register(&sc->sc_ic); 198 } 199 200 static inline int 201 intsn_hash(int intsn) 202 { 203 int tmp; 204 205 tmp = intsn * 0xffb; 206 return ((tmp >> 14) ^ tmp) & (HASH_SIZE - 1); 207 } 208 209 void 210 octcit_init(void) 211 { 212 struct cpu_info *ci = curcpu(); 213 struct octcit_softc *sc = octcit_sc; 214 int core = ci->ci_cpuid; 215 216 sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH; 217 218 /* 219 * Set up interrupt routing. 220 */ 221 222 /* Route IP2. */ 223 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 0)), 0); 224 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core); 225 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 0)), 0); 226 227 /* Route IP3. */ 228 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core , 1)), 1); 229 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 1)), 1ul << core); 230 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 1)), 0); 231 232 /* Disable IP4. */ 233 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 2)), 0); 234 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 2)), 0); 235 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 2)), 0); 236 237 /* Disable IP5. */ 238 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 3)), 0); 239 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 3)), 0); 240 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 3)), 0); 241 } 242 243 void * 244 octcit_intr_establish(int irq, int level, int (*func)(void *), void *arg, 245 const char *name) 246 { 247 return octcit_intr_establish_intsn(irq, level, 0, func, arg, name); 248 } 249 250 void * 251 octcit_intr_establish_intsn(int intsn, int level, int flags, 252 int (*func)(void *), void *arg, const char *name) 253 { 254 struct cpu_info *ci = curcpu(); 255 struct octcit_intrhand *ih; 256 struct octcit_softc *sc = octcit_sc; 257 uint64_t val; 258 int s; 259 260 if ((unsigned int)intsn > CIU3_NINTSN) 261 panic("%s: illegal intsn 0x%x", __func__, intsn); 262 263 if (IS_MBOX(intsn)) 264 panic("%s: mbox intsn 0x%x not allowed", __func__, intsn); 265 266 if (ISSET(level, IPL_MPSAFE)) 267 flags |= CIH_MPSAFE; 268 level &= ~IPL_MPSAFE; 269 270 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); 271 if (ih == NULL) 272 return NULL; 273 274 ih->ih_func = func; 275 ih->ih_arg = arg; 276 ih->ih_level = level; 277 ih->ih_flags = flags; 278 ih->ih_intsn = intsn; 279 evcount_attach(&ih->ih_count, name, &ih->ih_intsn); 280 281 s = splhigh(); 282 283 SLIST_INSERT_HEAD(&sc->sc_handlers[intsn_hash(intsn)], ih, ih_list); 284 if (sc->sc_minipl[ci->ci_cpuid] > level) 285 sc->sc_minipl[ci->ci_cpuid] = level; 286 287 val = CIU3_ISC_CTL_EN | (CIU3_IDT(ci->ci_cpuid, 0) << 288 CIU3_ISC_CTL_IDT_SHIFT); 289 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val); 290 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn)); 291 292 splx(s); 293 294 return ih; 295 } 296 297 void * 298 octcit_intr_establish_fdt_idx(void *cookie, int node, int idx, int level, 299 int (*func)(void *), void *arg, const char *name) 300 { 301 uint32_t *cells; 302 int flags = 0; 303 int intsn, len, type; 304 305 len = OF_getproplen(node, "interrupts"); 306 if (len / (sizeof(uint32_t) * 2) <= idx || 307 len % (sizeof(uint32_t) * 2) != 0) 308 return NULL; 309 310 cells = malloc(len, M_TEMP, M_NOWAIT); 311 if (cells == NULL) 312 return NULL; 313 314 OF_getpropintarray(node, "interrupts", cells, len); 315 intsn = cells[idx * 2]; 316 type = cells[idx * 2 + 1]; 317 318 free(cells, M_TEMP, len); 319 320 if (type != 4) 321 flags |= CIH_EDGE; 322 323 return octcit_intr_establish_intsn(intsn, level, flags, func, arg, 324 name); 325 } 326 327 void 328 octcit_intr_disestablish(void *_ih) 329 { 330 struct cpu_info *ci = curcpu(); 331 struct octcit_intrhand *ih = _ih; 332 struct octcit_intrhand *tmp; 333 struct octcit_softc *sc = octcit_sc; 334 unsigned int count; 335 int found = 0; 336 int hash = intsn_hash(ih->ih_intsn); 337 int i, s; 338 339 count = 0; 340 SLIST_FOREACH(tmp, &sc->sc_handlers[hash], ih_list) { 341 if (tmp->ih_intsn == ih->ih_intsn) 342 count++; 343 if (tmp == ih) 344 found = 1; 345 } 346 if (found == 0) 347 panic("%s: intrhand %p not registered", __func__, ih); 348 349 s = splhigh(); 350 351 if (count == 0) { 352 CIU3_WR_8(sc, CIU3_ISC_W1C(ih->ih_intsn), CIU3_ISC_W1C_EN); 353 CIU3_WR_8(sc, CIU3_ISC_CTL(ih->ih_intsn), 0); 354 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(ih->ih_intsn)); 355 } 356 357 SLIST_REMOVE(&sc->sc_handlers[hash], ih, octcit_intrhand, ih_list); 358 359 /* Recompute IPL floor if necessary. */ 360 if (sc->sc_minipl[ci->ci_cpuid] == ih->ih_level) { 361 sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH; 362 for (i = 0; i < HASH_SIZE; i++) { 363 SLIST_FOREACH(tmp, &sc->sc_handlers[i], ih_list) { 364 if (sc->sc_minipl[ci->ci_cpuid] > 365 tmp->ih_level) 366 sc->sc_minipl[ci->ci_cpuid] = 367 tmp->ih_level; 368 } 369 } 370 } 371 372 splx(s); 373 374 free(ih, M_DEVBUF, sizeof(*ih)); 375 } 376 377 uint32_t 378 octcit_intr(uint32_t hwpend, struct trapframe *frame) 379 { 380 struct cpu_info *ci = curcpu(); 381 struct octcit_intrhand *ih; 382 struct octcit_softc *sc = octcit_sc; 383 uint64_t destpp; 384 uint64_t intsn; 385 unsigned int core = ci->ci_cpuid; 386 int handled = 0; 387 int ipl; 388 int ret; 389 #ifdef MULTIPROCESSOR 390 register_t sr; 391 int need_lock; 392 #endif 393 394 if (frame->ipl >= sc->sc_minipl[ci->ci_cpuid]) { 395 /* Disable IP2. */ 396 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 0); 397 (void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0))); 398 return hwpend; 399 } 400 401 destpp = CIU3_RD_8(sc, CIU3_DEST_PP_INT(core)); 402 if (!ISSET(destpp, CIU3_DEST_PP_INT_INTR)) 403 goto spurious; 404 405 __asm__ (".set noreorder\n"); 406 ipl = ci->ci_ipl; 407 mips_sync(); 408 __asm__ (".set reorder\n"); 409 410 intsn = (destpp & CIU3_DEST_PP_INT_INTSN) >> 411 CIU3_DEST_PP_INT_INTSN_SHIFT; 412 SLIST_FOREACH(ih, &sc->sc_handlers[intsn_hash(intsn)], ih_list) { 413 if (ih->ih_intsn != intsn) 414 continue; 415 416 splraise(ih->ih_level); 417 418 /* Acknowledge the interrupt. */ 419 if (ISSET(ih->ih_flags, CIH_EDGE)) { 420 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW); 421 (void)CIU3_RD_8(sc, CIU3_ISC_W1C(intsn)); 422 } 423 424 #ifdef MULTIPROCESSOR 425 if (ih->ih_level < IPL_IPI) { 426 sr = getsr(); 427 ENABLEIPI(); 428 } 429 if (ISSET(ih->ih_flags, IH_MPSAFE)) 430 need_lock = 0; 431 else 432 need_lock = ih->ih_level < IPL_CLOCK; 433 if (need_lock) 434 __mp_lock(&kernel_lock); 435 #endif 436 ret = (*ih->ih_func)(ih->ih_arg); 437 #ifdef MULTIPROCESSOR 438 if (need_lock) 439 __mp_unlock(&kernel_lock); 440 if (ih->ih_level < IPL_IPI) 441 setsr(sr); 442 #endif 443 444 if (ret != 0) { 445 handled = 1; 446 atomic_inc_long( 447 (unsigned long *)&ih->ih_count.ec_count); 448 } 449 450 /* 451 * Stop processing when one handler has claimed the interrupt. 452 * This saves cycles because interrupt sharing should not 453 * happen on this hardware. 454 */ 455 if (ret == 1) 456 break; 457 } 458 459 __asm__ (".set noreorder\n"); 460 ci->ci_ipl = ipl; 461 mips_sync(); 462 __asm__ (".set reorder\n"); 463 464 spurious: 465 if (handled == 0) 466 printf("cpu%lu: spurious interrupt: dest 0x%016llx\n", 467 ci->ci_cpuid, destpp); 468 469 return hwpend; 470 } 471 472 void 473 octcit_splx(int newipl) 474 { 475 struct octcit_softc *sc = octcit_sc; 476 struct cpu_info *ci = curcpu(); 477 unsigned int core = ci->ci_cpuid; 478 479 /* Update IPL. Order highly important! */ 480 __asm__ (".set noreorder\n"); 481 ci->ci_ipl = newipl; 482 mips_sync(); 483 __asm__ (".set reorder\n"); 484 485 if (newipl < sc->sc_minipl[ci->ci_cpuid]) 486 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core); 487 488 /* If we still have softints pending trigger processing. */ 489 if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) 490 setsoftintr0(); 491 } 492 493 #ifdef MULTIPROCESSOR 494 uint32_t 495 octcit_ipi_intr(uint32_t hwpend, struct trapframe *frame) 496 { 497 struct octcit_softc *sc = octcit_sc; 498 u_long cpuid = cpu_number(); 499 500 if (sc->sc_ipi_handler != NULL) 501 sc->sc_ipi_handler((void *)cpuid); 502 503 return hwpend; 504 } 505 506 int 507 octcit_ipi_establish(int (*func)(void *), cpuid_t cpuid) 508 { 509 struct octcit_softc *sc = octcit_sc; 510 uint64_t val; 511 int intsn; 512 513 if (cpuid == 0) 514 sc->sc_ipi_handler = func; 515 516 intsn = MBOX_INTSN(cpuid); 517 val = CIU3_ISC_CTL_EN | (CIU3_IDT(cpuid, 1) << CIU3_ISC_CTL_IDT_SHIFT); 518 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val); 519 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn)); 520 521 return 0; 522 } 523 524 void 525 octcit_ipi_set(cpuid_t cpuid) 526 { 527 struct octcit_softc *sc = octcit_sc; 528 529 CIU3_WR_8(sc, CIU3_ISC_W1S(MBOX_INTSN(cpuid)), CIU3_ISC_W1S_RAW); 530 } 531 532 void 533 octcit_ipi_clear(cpuid_t cpuid) 534 { 535 struct octcit_softc *sc = octcit_sc; 536 uint64_t reg = CIU3_ISC_W1C(MBOX_INTSN(cpuid)); 537 538 CIU3_WR_8(sc, reg, CIU3_ISC_W1C_RAW); 539 (void)CIU3_RD_8(sc, reg); 540 } 541 #endif /* MULTIPROCESSOR */ 542