1 /* $OpenBSD: octciu.c,v 1.8 2017/11/03 16:19:25 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Driver for OCTEON Central Interrupt Unit (CIU). 31 * 32 * CIU is present at least on CN3xxx, CN5xxx, CN60xx, CN61xx, 33 * CN70xx, and CN71xx. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/atomic.h> 39 #include <sys/conf.h> 40 #include <sys/device.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 44 #include <dev/ofw/fdt.h> 45 #include <dev/ofw/openfirm.h> 46 47 #include <mips64/mips_cpu.h> 48 49 #include <machine/autoconf.h> 50 #include <machine/fdt.h> 51 #include <machine/intr.h> 52 #include <machine/octeonreg.h> 53 54 #define OCTCIU_NINTS 192 55 56 #define INTPRI_CIU_0 (INTPRI_CLOCK + 1) 57 #define INTPRI_CIU_1 (INTPRI_CLOCK + 2) 58 59 struct intrbank { 60 uint64_t en; /* enable mask register */ 61 uint64_t sum; /* service request register */ 62 int id; /* bank number */ 63 }; 64 65 #define NBANKS 3 66 #define BANK_SIZE 64 67 #define IRQ_TO_BANK(x) ((x) >> 6) 68 #define IRQ_TO_BIT(x) ((x) & 0x3f) 69 70 #define IS_WORKQ_IRQ(x) ((unsigned int)(x) < 16) 71 72 struct octciu_cpu { 73 struct intrbank scpu_ibank[NBANKS]; 74 uint64_t scpu_intem[NBANKS]; 75 uint64_t scpu_imask[NIPLS][NBANKS]; 76 }; 77 78 struct octciu_softc { 79 struct device sc_dev; 80 bus_space_tag_t sc_iot; 81 bus_space_handle_t sc_ioh; 82 struct octciu_cpu sc_cpu[MAXCPUS]; 83 struct intrhand *sc_intrhand[OCTCIU_NINTS]; 84 unsigned int sc_nbanks; 85 86 int (*sc_ipi_handler)(void *); 87 88 struct intr_controller sc_ic; 89 }; 90 91 int octciu_match(struct device *, void *, void *); 92 void octciu_attach(struct device *, struct device *, void *); 93 94 void octciu_init(void); 95 void octciu_intr_makemasks(struct octciu_softc *); 96 uint32_t octciu_intr0(uint32_t, struct trapframe *); 97 uint32_t octciu_intr2(uint32_t, struct trapframe *); 98 uint32_t octciu_intr_bank(struct octciu_softc *, struct intrbank *, 99 struct trapframe *); 100 void *octciu_intr_establish(int, int, int (*)(void *), void *, 101 const char *); 102 void *octciu_intr_establish_fdt_idx(void *, int, int, int, 103 int (*)(void *), void *, const char *); 104 void octciu_intr_disestablish(void *); 105 void octciu_splx(int); 106 107 uint32_t octciu_ipi_intr(uint32_t, struct trapframe *); 108 int octciu_ipi_establish(int (*)(void *), cpuid_t); 109 void octciu_ipi_set(cpuid_t); 110 void octciu_ipi_clear(cpuid_t); 111 112 const struct cfattach octciu_ca = { 113 sizeof(struct octciu_softc), octciu_match, octciu_attach 114 }; 115 116 struct cfdriver octciu_cd = { 117 NULL, "octciu", DV_DULL 118 }; 119 120 struct octciu_softc *octciu_sc; 121 122 int 123 octciu_match(struct device *parent, void *match, void *aux) 124 { 125 struct fdt_attach_args *faa = aux; 126 127 return OF_is_compatible(faa->fa_node, "cavium,octeon-3860-ciu"); 128 } 129 130 void 131 octciu_attach(struct device *parent, struct device *self, void *aux) 132 { 133 struct fdt_attach_args *faa = aux; 134 struct octciu_softc *sc = (struct octciu_softc *)self; 135 136 if (faa->fa_nreg != 1) { 137 printf(": expected one IO space, got %d\n", faa->fa_nreg); 138 return; 139 } 140 141 sc->sc_iot = faa->fa_iot; 142 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size, 143 0, &sc->sc_ioh)) { 144 printf(": could not map IO space\n"); 145 return; 146 } 147 148 if (octeon_ver == OCTEON_2 || octeon_ver == OCTEON_3) 149 sc->sc_nbanks = 3; 150 else 151 sc->sc_nbanks = 2; 152 153 printf("\n"); 154 155 sc->sc_ic.ic_cookie = sc; 156 sc->sc_ic.ic_node = faa->fa_node; 157 sc->sc_ic.ic_init = octciu_init; 158 sc->sc_ic.ic_establish = octciu_intr_establish; 159 sc->sc_ic.ic_establish_fdt_idx = octciu_intr_establish_fdt_idx; 160 sc->sc_ic.ic_disestablish = octciu_intr_disestablish; 161 #ifdef MULTIPROCESSOR 162 sc->sc_ic.ic_ipi_establish = octciu_ipi_establish; 163 sc->sc_ic.ic_ipi_set = octciu_ipi_set; 164 sc->sc_ic.ic_ipi_clear = octciu_ipi_clear; 165 #endif 166 167 octciu_sc = sc; 168 169 set_intr(INTPRI_CIU_0, CR_INT_0, octciu_intr0); 170 if (sc->sc_nbanks == 3) 171 set_intr(INTPRI_CIU_1, CR_INT_2, octciu_intr2); 172 #ifdef MULTIPROCESSOR 173 set_intr(INTPRI_IPI, CR_INT_1, octciu_ipi_intr); 174 #endif 175 176 octciu_init(); 177 178 register_splx_handler(octciu_splx); 179 octeon_intr_register(&sc->sc_ic); 180 } 181 182 void 183 octciu_init(void) 184 { 185 struct octciu_softc *sc = octciu_sc; 186 struct octciu_cpu *scpu; 187 int cpuid = cpu_number(); 188 int s; 189 190 scpu = &sc->sc_cpu[cpuid]; 191 192 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN0(cpuid), 0); 193 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0); 194 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN1(cpuid), 0); 195 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN1(cpuid), 0); 196 197 if (sc->sc_nbanks == 3) 198 bus_space_write_8(sc->sc_iot, sc->sc_ioh, 199 CIU_IP4_EN2(cpuid), 0); 200 201 scpu->scpu_ibank[0].en = CIU_IP2_EN0(cpuid); 202 scpu->scpu_ibank[0].sum = CIU_IP2_SUM0(cpuid); 203 scpu->scpu_ibank[0].id = 0; 204 scpu->scpu_ibank[1].en = CIU_IP2_EN1(cpuid); 205 scpu->scpu_ibank[1].sum = CIU_INT32_SUM1; 206 scpu->scpu_ibank[1].id = 1; 207 scpu->scpu_ibank[2].en = CIU_IP4_EN2(cpuid); 208 scpu->scpu_ibank[2].sum = CIU_IP4_SUM2(cpuid); 209 scpu->scpu_ibank[2].id = 2; 210 211 s = splhigh(); 212 octciu_intr_makemasks(sc); 213 splx(s); /* causes hw mask update */ 214 } 215 216 void * 217 octciu_intr_establish(int irq, int level, int (*ih_fun)(void *), 218 void *ih_arg, const char *ih_what) 219 { 220 struct octciu_softc *sc = octciu_sc; 221 struct intrhand **p, *q, *ih; 222 int cpuid = cpu_number(); 223 int flags; 224 int s; 225 226 #ifdef DIAGNOSTIC 227 if (irq >= sc->sc_nbanks * BANK_SIZE || irq < 0) 228 panic("%s: illegal irq %d", __func__, irq); 229 #endif 230 231 #ifdef MULTIPROCESSOR 232 /* Span work queue interrupts across CPUs. */ 233 if (IS_WORKQ_IRQ(irq)) 234 cpuid = irq % ncpusfound; 235 #endif 236 237 flags = (level & IPL_MPSAFE) ? IH_MPSAFE : 0; 238 level &= ~IPL_MPSAFE; 239 240 ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT); 241 if (ih == NULL) 242 return NULL; 243 244 ih->ih_next = NULL; 245 ih->ih_fun = ih_fun; 246 ih->ih_arg = ih_arg; 247 ih->ih_level = level; 248 ih->ih_flags = flags; 249 ih->ih_irq = irq; 250 ih->ih_cpuid = cpuid; 251 evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq); 252 253 s = splhigh(); 254 255 /* 256 * Figure out where to put the handler. 257 * This is O(N^2), but we want to preserve the order, and N is 258 * generally small. 259 */ 260 for (p = &sc->sc_intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) 261 continue; 262 *p = ih; 263 264 sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] |= 265 1UL << IRQ_TO_BIT(irq); 266 octciu_intr_makemasks(sc); 267 268 splx(s); /* causes hw mask update */ 269 270 return (ih); 271 } 272 273 void * 274 octciu_intr_establish_fdt_idx(void *cookie, int node, int idx, int level, 275 int (*ih_fun)(void *), void *ih_arg, const char *ih_what) 276 { 277 uint32_t *cells; 278 int irq, len; 279 280 len = OF_getproplen(node, "interrupts"); 281 if (len / (sizeof(uint32_t) * 2) <= idx || 282 len % (sizeof(uint32_t) * 2) != 0) 283 return NULL; 284 285 cells = malloc(len, M_TEMP, M_NOWAIT); 286 if (cells == NULL) 287 return NULL; 288 289 OF_getpropintarray(node, "interrupts", cells, len); 290 irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1]; 291 292 free(cells, M_TEMP, len); 293 294 return octciu_intr_establish(irq, level, ih_fun, ih_arg, ih_what); 295 } 296 297 void 298 octciu_intr_disestablish(void *_ih) 299 { 300 struct intrhand *ih = _ih; 301 struct intrhand *p; 302 struct octciu_softc *sc = octciu_sc; 303 unsigned int irq = ih->ih_irq; 304 int cpuid = cpu_number(); 305 int s; 306 307 KASSERT(irq < sc->sc_nbanks * BANK_SIZE); 308 KASSERT(!IS_WORKQ_IRQ(irq)); 309 310 s = splhigh(); 311 312 if (ih == sc->sc_intrhand[irq]) { 313 sc->sc_intrhand[irq] = ih->ih_next; 314 315 if (sc->sc_intrhand[irq] == NULL) 316 sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] &= 317 ~(1UL << IRQ_TO_BIT(irq)); 318 } else { 319 for (p = sc->sc_intrhand[irq]; p != NULL; p = p->ih_next) { 320 if (p->ih_next == ih) { 321 p->ih_next = ih->ih_next; 322 break; 323 } 324 } 325 if (p == NULL) 326 panic("%s: intrhand %p has not been registered", 327 __func__, ih); 328 } 329 free(ih, M_DEVBUF, sizeof(*ih)); 330 331 octciu_intr_makemasks(sc); 332 splx(s); /* causes hw mask update */ 333 } 334 335 /* 336 * Recompute interrupt masks. 337 */ 338 void 339 octciu_intr_makemasks(struct octciu_softc *sc) 340 { 341 cpuid_t cpuid = cpu_number(); 342 struct octciu_cpu *scpu = &sc->sc_cpu[cpuid]; 343 struct intrhand *q; 344 uint intrlevel[OCTCIU_NINTS]; 345 int irq, level; 346 347 /* First, figure out which levels each IRQ uses. */ 348 for (irq = 0; irq < OCTCIU_NINTS; irq++) { 349 uint levels = 0; 350 for (q = sc->sc_intrhand[irq]; q != NULL; q = q->ih_next) { 351 if (q->ih_cpuid == cpuid) 352 levels |= 1 << q->ih_level; 353 } 354 intrlevel[irq] = levels; 355 } 356 357 /* 358 * Then figure out which IRQs use each level. 359 * Note that we make sure never to overwrite imask[IPL_HIGH], in 360 * case an interrupt occurs during intr_disestablish() and causes 361 * an unfortunate splx() while we are here recomputing the masks. 362 */ 363 for (level = IPL_NONE; level < NIPLS; level++) { 364 uint64_t mask[NBANKS] = {}; 365 for (irq = 0; irq < OCTCIU_NINTS; irq++) 366 if (intrlevel[irq] & (1 << level)) 367 mask[IRQ_TO_BANK(irq)] |= 368 1UL << IRQ_TO_BIT(irq); 369 scpu->scpu_imask[level][0] = mask[0]; 370 scpu->scpu_imask[level][1] = mask[1]; 371 scpu->scpu_imask[level][2] = mask[2]; 372 } 373 /* 374 * There are tty, network and disk drivers that use free() at interrupt 375 * time, so vm > (tty | net | bio). 376 * 377 * Enforce a hierarchy that gives slow devices a better chance at not 378 * dropping data. 379 */ 380 #define ADD_MASK(dst, src) do { \ 381 dst[0] |= src[0]; \ 382 dst[1] |= src[1]; \ 383 dst[2] |= src[2]; \ 384 } while (0) 385 ADD_MASK(scpu->scpu_imask[IPL_NET], scpu->scpu_imask[IPL_BIO]); 386 ADD_MASK(scpu->scpu_imask[IPL_TTY], scpu->scpu_imask[IPL_NET]); 387 ADD_MASK(scpu->scpu_imask[IPL_VM], scpu->scpu_imask[IPL_TTY]); 388 ADD_MASK(scpu->scpu_imask[IPL_CLOCK], scpu->scpu_imask[IPL_VM]); 389 ADD_MASK(scpu->scpu_imask[IPL_HIGH], scpu->scpu_imask[IPL_CLOCK]); 390 ADD_MASK(scpu->scpu_imask[IPL_IPI], scpu->scpu_imask[IPL_HIGH]); 391 392 /* 393 * These are pseudo-levels. 394 */ 395 scpu->scpu_imask[IPL_NONE][0] = 0; 396 scpu->scpu_imask[IPL_NONE][1] = 0; 397 scpu->scpu_imask[IPL_NONE][2] = 0; 398 } 399 400 static inline int 401 octciu_next_irq(uint64_t *isr) 402 { 403 uint64_t irq, tmp = *isr; 404 405 if (tmp == 0) 406 return -1; 407 408 asm volatile ( 409 " .set push\n" 410 " .set mips64\n" 411 " dclz %0, %0\n" 412 " .set pop\n" 413 : "=r" (tmp) : "0" (tmp)); 414 415 irq = 63u - tmp; 416 *isr &= ~(1u << irq); 417 return irq; 418 } 419 420 /* 421 * Dispatch interrupts in given bank. 422 */ 423 uint32_t 424 octciu_intr_bank(struct octciu_softc *sc, struct intrbank *bank, 425 struct trapframe *frame) 426 { 427 struct cpu_info *ci = curcpu(); 428 struct intrhand *ih; 429 struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid]; 430 uint64_t imr, isr, mask; 431 int handled, ipl, irq; 432 #ifdef MULTIPROCESSOR 433 register_t sr; 434 int need_lock; 435 #endif 436 437 isr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->sum); 438 imr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->en); 439 440 isr &= imr; 441 if (isr == 0) 442 return 0; /* not for us */ 443 444 /* 445 * Mask all pending interrupts. 446 */ 447 bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr & ~isr); 448 449 /* 450 * If interrupts are spl-masked, mask them and wait for splx() 451 * to reenable them when necessary. 452 */ 453 if ((mask = isr & scpu->scpu_imask[frame->ipl][bank->id]) 454 != 0) { 455 isr &= ~mask; 456 imr &= ~mask; 457 } 458 if (isr == 0) 459 return 1; 460 461 /* 462 * Now process allowed interrupts. 463 */ 464 465 __asm__ (".set noreorder\n"); 466 ipl = ci->ci_ipl; 467 mips_sync(); 468 __asm__ (".set reorder\n"); 469 470 while ((irq = octciu_next_irq(&isr)) >= 0) { 471 irq += bank->id * BANK_SIZE; 472 handled = 0; 473 for (ih = sc->sc_intrhand[irq]; ih != NULL; ih = ih->ih_next) { 474 splraise(ih->ih_level); 475 #ifdef MULTIPROCESSOR 476 if (ih->ih_level < IPL_IPI) { 477 sr = getsr(); 478 ENABLEIPI(); 479 } 480 if (ih->ih_flags & IH_MPSAFE) 481 need_lock = 0; 482 else 483 need_lock = ih->ih_level < IPL_CLOCK; 484 if (need_lock) 485 __mp_lock(&kernel_lock); 486 #endif 487 if ((*ih->ih_fun)(ih->ih_arg) != 0) { 488 handled = 1; 489 atomic_inc_long( 490 (unsigned long *)&ih->ih_count.ec_count); 491 } 492 #ifdef MULTIPROCESSOR 493 if (need_lock) 494 __mp_unlock(&kernel_lock); 495 if (ih->ih_level < IPL_IPI) 496 setsr(sr); 497 #endif 498 } 499 if (!handled) 500 printf("spurious interrupt %d\n", irq); 501 } 502 503 __asm__ (".set noreorder\n"); 504 ci->ci_ipl = ipl; 505 mips_sync(); 506 __asm__ (".set reorder\n"); 507 508 /* 509 * Reenable interrupts which have been serviced. 510 */ 511 bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr); 512 513 return 1; 514 } 515 516 uint32_t 517 octciu_intr0(uint32_t hwpend, struct trapframe *frame) 518 { 519 struct octciu_softc *sc = octciu_sc; 520 struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()]; 521 int handled; 522 523 handled = octciu_intr_bank(sc, &scpu->scpu_ibank[0], frame); 524 handled |= octciu_intr_bank(sc, &scpu->scpu_ibank[1], frame); 525 return handled ? hwpend : 0; 526 } 527 528 uint32_t 529 octciu_intr2(uint32_t hwpend, struct trapframe *frame) 530 { 531 struct octciu_softc *sc = octciu_sc; 532 struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()]; 533 int handled; 534 535 handled = octciu_intr_bank(sc, &scpu->scpu_ibank[2], frame); 536 return handled ? hwpend : 0; 537 } 538 539 void 540 octciu_splx(int newipl) 541 { 542 struct cpu_info *ci = curcpu(); 543 struct octciu_softc *sc = octciu_sc; 544 struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid]; 545 546 /* Update IPL. Order highly important! */ 547 __asm__ (".set noreorder\n"); 548 ci->ci_ipl = newipl; 549 mips_sync(); 550 __asm__ (".set reorder\n"); 551 552 /* Set hardware masks. */ 553 bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[0].en, 554 scpu->scpu_intem[0] & ~scpu->scpu_imask[newipl][0]); 555 bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[1].en, 556 scpu->scpu_intem[1] & ~scpu->scpu_imask[newipl][1]); 557 558 if (sc->sc_nbanks == 3) 559 bus_space_write_8(sc->sc_iot, sc->sc_ioh, 560 scpu->scpu_ibank[2].en, 561 scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]); 562 563 /* If we still have softints pending trigger processing. */ 564 if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) 565 setsoftintr0(); 566 } 567 568 #ifdef MULTIPROCESSOR 569 uint32_t 570 octciu_ipi_intr(uint32_t hwpend, struct trapframe *frame) 571 { 572 struct octciu_softc *sc = octciu_sc; 573 u_long cpuid = cpu_number(); 574 575 /* 576 * Mask all pending interrupts. 577 */ 578 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0); 579 580 if (sc->sc_ipi_handler == NULL) 581 return hwpend; 582 583 sc->sc_ipi_handler((void *)cpuid); 584 585 /* 586 * Reenable interrupts which have been serviced. 587 */ 588 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 589 (1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1)); 590 return hwpend; 591 } 592 593 int 594 octciu_ipi_establish(int (*func)(void *), cpuid_t cpuid) 595 { 596 struct octciu_softc *sc = octciu_sc; 597 598 if (cpuid == 0) 599 sc->sc_ipi_handler = func; 600 601 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid), 602 0xffffffff); 603 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 604 (1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1)); 605 606 return 0; 607 } 608 609 void 610 octciu_ipi_set(cpuid_t cpuid) 611 { 612 struct octciu_softc *sc = octciu_sc; 613 614 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_SET(cpuid), 1); 615 } 616 617 void 618 octciu_ipi_clear(cpuid_t cpuid) 619 { 620 struct octciu_softc *sc = octciu_sc; 621 uint64_t clr; 622 623 clr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid)); 624 bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid), clr); 625 } 626 #endif /* MULTIPROCESSOR */ 627