1 /* $NetBSD: iop.c,v 1.36 2003/05/03 18:11:10 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Support for I2O IOPs (intelligent I/O processors). 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.36 2003/05/03 18:11:10 wiz Exp $"); 45 46 #include "opt_i2o.h" 47 #include "iop.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/device.h> 53 #include <sys/queue.h> 54 #include <sys/proc.h> 55 #include <sys/malloc.h> 56 #include <sys/ioctl.h> 57 #include <sys/endian.h> 58 #include <sys/conf.h> 59 #include <sys/kthread.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/i2o/i2o.h> 66 #include <dev/i2o/iopio.h> 67 #include <dev/i2o/iopreg.h> 68 #include <dev/i2o/iopvar.h> 69 70 #define POLL(ms, cond) \ 71 do { \ 72 int i; \ 73 for (i = (ms) * 10; i; i--) { \ 74 if (cond) \ 75 break; \ 76 DELAY(100); \ 77 } \ 78 } while (/* CONSTCOND */0); 79 80 #ifdef I2ODEBUG 81 #define DPRINTF(x) printf x 82 #else 83 #define DPRINTF(x) 84 #endif 85 86 #ifdef I2OVERBOSE 87 #define IFVERBOSE(x) x 88 #define COMMENT(x) NULL 89 #else 90 #define IFVERBOSE(x) 91 #define COMMENT(x) 92 #endif 93 94 #define IOP_ICTXHASH_NBUCKETS 16 95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash]) 96 97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1) 98 99 #define IOP_TCTX_SHIFT 12 100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1) 101 102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl; 103 static u_long iop_ictxhash; 104 static void *iop_sdh; 105 static struct i2o_systab *iop_systab; 106 static int iop_systab_size; 107 108 extern struct cfdriver iop_cd; 109 110 dev_type_open(iopopen); 111 dev_type_close(iopclose); 112 dev_type_ioctl(iopioctl); 113 114 const struct cdevsw iop_cdevsw = { 115 iopopen, iopclose, noread, nowrite, iopioctl, 116 nostop, notty, nopoll, nommap, nokqfilter, 117 }; 118 119 #define IC_CONFIGURE 0x01 120 #define IC_PRIORITY 0x02 121 122 struct iop_class { 123 u_short ic_class; 124 u_short ic_flags; 125 #ifdef I2OVERBOSE 126 const char *ic_caption; 127 #endif 128 } static const iop_class[] = { 129 { 130 I2O_CLASS_EXECUTIVE, 131 0, 132 IFVERBOSE("executive") 133 }, 134 { 135 I2O_CLASS_DDM, 136 0, 137 COMMENT("device driver module") 138 }, 139 { 140 I2O_CLASS_RANDOM_BLOCK_STORAGE, 141 IC_CONFIGURE | IC_PRIORITY, 142 IFVERBOSE("random block storage") 143 }, 144 { 145 I2O_CLASS_SEQUENTIAL_STORAGE, 146 IC_CONFIGURE | IC_PRIORITY, 147 IFVERBOSE("sequential storage") 148 }, 149 { 150 I2O_CLASS_LAN, 151 IC_CONFIGURE | IC_PRIORITY, 152 IFVERBOSE("LAN port") 153 }, 154 { 155 I2O_CLASS_WAN, 156 IC_CONFIGURE | IC_PRIORITY, 157 IFVERBOSE("WAN port") 158 }, 159 { 160 I2O_CLASS_FIBRE_CHANNEL_PORT, 161 IC_CONFIGURE, 162 IFVERBOSE("fibrechannel port") 163 }, 164 { 165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL, 166 0, 167 COMMENT("fibrechannel peripheral") 168 }, 169 { 170 I2O_CLASS_SCSI_PERIPHERAL, 171 0, 172 COMMENT("SCSI peripheral") 173 }, 174 { 175 I2O_CLASS_ATE_PORT, 176 IC_CONFIGURE, 177 IFVERBOSE("ATE port") 178 }, 179 { 180 I2O_CLASS_ATE_PERIPHERAL, 181 0, 182 COMMENT("ATE peripheral") 183 }, 184 { 185 I2O_CLASS_FLOPPY_CONTROLLER, 186 IC_CONFIGURE, 187 IFVERBOSE("floppy controller") 188 }, 189 { 190 I2O_CLASS_FLOPPY_DEVICE, 191 0, 192 COMMENT("floppy device") 193 }, 194 { 195 I2O_CLASS_BUS_ADAPTER_PORT, 196 IC_CONFIGURE, 197 IFVERBOSE("bus adapter port" ) 198 }, 199 }; 200 201 #if defined(I2ODEBUG) && defined(I2OVERBOSE) 202 static const char * const iop_status[] = { 203 "success", 204 "abort (dirty)", 205 "abort (no data transfer)", 206 "abort (partial transfer)", 207 "error (dirty)", 208 "error (no data transfer)", 209 "error (partial transfer)", 210 "undefined error code", 211 "process abort (dirty)", 212 "process abort (no data transfer)", 213 "process abort (partial transfer)", 214 "transaction error", 215 }; 216 #endif 217 218 static inline u_int32_t iop_inl(struct iop_softc *, int); 219 static inline void iop_outl(struct iop_softc *, int, u_int32_t); 220 221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int); 222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t); 223 224 static void iop_config_interrupts(struct device *); 225 static void iop_configure_devices(struct iop_softc *, int, int); 226 static void iop_devinfo(int, char *); 227 static int iop_print(void *, const char *); 228 static void iop_shutdown(void *); 229 static int iop_submatch(struct device *, struct cfdata *, void *); 230 231 static void iop_adjqparam(struct iop_softc *, int); 232 static void iop_create_reconf_thread(void *); 233 static int iop_handle_reply(struct iop_softc *, u_int32_t); 234 static int iop_hrt_get(struct iop_softc *); 235 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int); 236 static void iop_intr_event(struct device *, struct iop_msg *, void *); 237 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int, 238 u_int32_t); 239 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int); 240 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int); 241 static int iop_ofifo_init(struct iop_softc *); 242 static int iop_passthrough(struct iop_softc *, struct ioppt *, 243 struct proc *); 244 static void iop_reconf_thread(void *); 245 static void iop_release_mfa(struct iop_softc *, u_int32_t); 246 static int iop_reset(struct iop_softc *); 247 static int iop_systab_set(struct iop_softc *); 248 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *); 249 250 #ifdef I2ODEBUG 251 static void iop_reply_print(struct iop_softc *, struct i2o_reply *); 252 #endif 253 254 static inline u_int32_t 255 iop_inl(struct iop_softc *sc, int off) 256 { 257 258 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 259 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 260 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 261 } 262 263 static inline void 264 iop_outl(struct iop_softc *sc, int off, u_int32_t val) 265 { 266 267 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 268 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 269 BUS_SPACE_BARRIER_WRITE); 270 } 271 272 static inline u_int32_t 273 iop_inl_msg(struct iop_softc *sc, int off) 274 { 275 276 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 277 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 278 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off)); 279 } 280 281 static inline void 282 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val) 283 { 284 285 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val); 286 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 287 BUS_SPACE_BARRIER_WRITE); 288 } 289 290 /* 291 * Initialise the IOP and our interface. 292 */ 293 void 294 iop_init(struct iop_softc *sc, const char *intrstr) 295 { 296 struct iop_msg *im; 297 int rv, i, j, state, nsegs; 298 u_int32_t mask; 299 char ident[64]; 300 301 state = 0; 302 303 printf("I2O adapter"); 304 305 if (iop_ictxhashtbl == NULL) 306 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST, 307 M_DEVBUF, M_NOWAIT, &iop_ictxhash); 308 309 /* Disable interrupts at the IOP. */ 310 mask = iop_inl(sc, IOP_REG_INTR_MASK); 311 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO); 312 313 /* Allocate a scratch DMA map for small miscellaneous shared data. */ 314 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 315 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) { 316 printf("%s: cannot create scratch dmamap\n", 317 sc->sc_dv.dv_xname); 318 return; 319 } 320 321 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 322 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 323 printf("%s: cannot alloc scratch dmamem\n", 324 sc->sc_dv.dv_xname); 325 goto bail_out; 326 } 327 state++; 328 329 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE, 330 &sc->sc_scr, 0)) { 331 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname); 332 goto bail_out; 333 } 334 state++; 335 336 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr, 337 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 338 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname); 339 goto bail_out; 340 } 341 state++; 342 343 #ifdef I2ODEBUG 344 /* So that our debug checks don't choke. */ 345 sc->sc_framesize = 128; 346 #endif 347 348 /* Reset the adapter and request status. */ 349 if ((rv = iop_reset(sc)) != 0) { 350 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname); 351 goto bail_out; 352 } 353 354 if ((rv = iop_status_get(sc, 1)) != 0) { 355 printf("%s: not responding (get status)\n", 356 sc->sc_dv.dv_xname); 357 goto bail_out; 358 } 359 360 sc->sc_flags |= IOP_HAVESTATUS; 361 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid), 362 ident, sizeof(ident)); 363 printf(" <%s>\n", ident); 364 365 #ifdef I2ODEBUG 366 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname, 367 le16toh(sc->sc_status.orgid), 368 (le32toh(sc->sc_status.segnumber) >> 12) & 15); 369 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname); 370 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname, 371 le32toh(sc->sc_status.desiredprivmemsize), 372 le32toh(sc->sc_status.currentprivmemsize), 373 le32toh(sc->sc_status.currentprivmembase)); 374 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname, 375 le32toh(sc->sc_status.desiredpriviosize), 376 le32toh(sc->sc_status.currentpriviosize), 377 le32toh(sc->sc_status.currentpriviobase)); 378 #endif 379 380 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes); 381 if (sc->sc_maxob > IOP_MAX_OUTBOUND) 382 sc->sc_maxob = IOP_MAX_OUTBOUND; 383 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes); 384 if (sc->sc_maxib > IOP_MAX_INBOUND) 385 sc->sc_maxib = IOP_MAX_INBOUND; 386 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2; 387 if (sc->sc_framesize > IOP_MAX_MSG_SIZE) 388 sc->sc_framesize = IOP_MAX_MSG_SIZE; 389 390 #if defined(I2ODEBUG) || defined(DIAGNOSTIC) 391 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) { 392 printf("%s: frame size too small (%d)\n", 393 sc->sc_dv.dv_xname, sc->sc_framesize); 394 goto bail_out; 395 } 396 #endif 397 398 /* Allocate message wrappers. */ 399 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO); 400 if (im == NULL) { 401 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname); 402 goto bail_out; 403 } 404 state++; 405 sc->sc_ims = im; 406 SLIST_INIT(&sc->sc_im_freelist); 407 408 for (i = 0, state++; i < sc->sc_maxib; i++, im++) { 409 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 410 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 411 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 412 &im->im_xfer[0].ix_map); 413 if (rv != 0) { 414 printf("%s: couldn't create dmamap (%d)", 415 sc->sc_dv.dv_xname, rv); 416 goto bail_out; 417 } 418 419 im->im_tctx = i; 420 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 421 } 422 423 /* Initialise the IOP's outbound FIFO. */ 424 if (iop_ofifo_init(sc) != 0) { 425 printf("%s: unable to init oubound FIFO\n", 426 sc->sc_dv.dv_xname); 427 goto bail_out; 428 } 429 430 /* 431 * Defer further configuration until (a) interrupts are working and 432 * (b) we have enough information to build the system table. 433 */ 434 config_interrupts((struct device *)sc, iop_config_interrupts); 435 436 /* Configure shutdown hook before we start any device activity. */ 437 if (iop_sdh == NULL) 438 iop_sdh = shutdownhook_establish(iop_shutdown, NULL); 439 440 /* Ensure interrupts are enabled at the IOP. */ 441 mask = iop_inl(sc, IOP_REG_INTR_MASK); 442 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO); 443 444 if (intrstr != NULL) 445 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, 446 intrstr); 447 448 #ifdef I2ODEBUG 449 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n", 450 sc->sc_dv.dv_xname, sc->sc_maxib, 451 le32toh(sc->sc_status.maxinboundmframes), 452 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes)); 453 #endif 454 455 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0); 456 return; 457 458 bail_out: 459 if (state > 3) { 460 for (j = 0; j < i; j++) 461 bus_dmamap_destroy(sc->sc_dmat, 462 sc->sc_ims[j].im_xfer[0].ix_map); 463 free(sc->sc_ims, M_DEVBUF); 464 } 465 if (state > 2) 466 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap); 467 if (state > 1) 468 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE); 469 if (state > 0) 470 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs); 471 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap); 472 } 473 474 /* 475 * Perform autoconfiguration tasks. 476 */ 477 static void 478 iop_config_interrupts(struct device *self) 479 { 480 struct iop_attach_args ia; 481 struct iop_softc *sc, *iop; 482 struct i2o_systab_entry *ste; 483 int rv, i, niop; 484 485 sc = (struct iop_softc *)self; 486 LIST_INIT(&sc->sc_iilist); 487 488 printf("%s: configuring...\n", sc->sc_dv.dv_xname); 489 490 if (iop_hrt_get(sc) != 0) { 491 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname); 492 return; 493 } 494 495 /* 496 * Build the system table. 497 */ 498 if (iop_systab == NULL) { 499 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) { 500 if ((iop = device_lookup(&iop_cd, i)) == NULL) 501 continue; 502 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 503 continue; 504 if (iop_status_get(iop, 1) != 0) { 505 printf("%s: unable to retrieve status\n", 506 sc->sc_dv.dv_xname); 507 iop->sc_flags &= ~IOP_HAVESTATUS; 508 continue; 509 } 510 niop++; 511 } 512 if (niop == 0) 513 return; 514 515 i = sizeof(struct i2o_systab_entry) * (niop - 1) + 516 sizeof(struct i2o_systab); 517 iop_systab_size = i; 518 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO); 519 520 iop_systab->numentries = niop; 521 iop_systab->version = I2O_VERSION_11; 522 523 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) { 524 if ((iop = device_lookup(&iop_cd, i)) == NULL) 525 continue; 526 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 527 continue; 528 529 ste->orgid = iop->sc_status.orgid; 530 ste->iopid = iop->sc_dv.dv_unit + 2; 531 ste->segnumber = 532 htole32(le32toh(iop->sc_status.segnumber) & ~4095); 533 ste->iopcaps = iop->sc_status.iopcaps; 534 ste->inboundmsgframesize = 535 iop->sc_status.inboundmframesize; 536 ste->inboundmsgportaddresslow = 537 htole32(iop->sc_memaddr + IOP_REG_IFIFO); 538 ste++; 539 } 540 } 541 542 /* 543 * Post the system table to the IOP and bring it to the OPERATIONAL 544 * state. 545 */ 546 if (iop_systab_set(sc) != 0) { 547 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname); 548 return; 549 } 550 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1, 551 30000) != 0) { 552 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname); 553 return; 554 } 555 556 /* 557 * Set up an event handler for this IOP. 558 */ 559 sc->sc_eventii.ii_dv = self; 560 sc->sc_eventii.ii_intr = iop_intr_event; 561 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY; 562 sc->sc_eventii.ii_tid = I2O_TID_IOP; 563 iop_initiator_register(sc, &sc->sc_eventii); 564 565 rv = iop_util_eventreg(sc, &sc->sc_eventii, 566 I2O_EVENT_EXEC_RESOURCE_LIMITS | 567 I2O_EVENT_EXEC_CONNECTION_FAIL | 568 I2O_EVENT_EXEC_ADAPTER_FAULT | 569 I2O_EVENT_EXEC_POWER_FAIL | 570 I2O_EVENT_EXEC_RESET_PENDING | 571 I2O_EVENT_EXEC_RESET_IMMINENT | 572 I2O_EVENT_EXEC_HARDWARE_FAIL | 573 I2O_EVENT_EXEC_XCT_CHANGE | 574 I2O_EVENT_EXEC_DDM_AVAILIBILITY | 575 I2O_EVENT_GEN_DEVICE_RESET | 576 I2O_EVENT_GEN_STATE_CHANGE | 577 I2O_EVENT_GEN_GENERAL_WARNING); 578 if (rv != 0) { 579 printf("%s: unable to register for events", sc->sc_dv.dv_xname); 580 return; 581 } 582 583 /* 584 * Attempt to match and attach a product-specific extension. 585 */ 586 ia.ia_class = I2O_CLASS_ANY; 587 ia.ia_tid = I2O_TID_IOP; 588 config_found_sm(self, &ia, iop_print, iop_submatch); 589 590 /* 591 * Start device configuration. 592 */ 593 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL); 594 if ((rv = iop_reconfigure(sc, 0)) == -1) { 595 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv); 596 return; 597 } 598 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 599 600 kthread_create(iop_create_reconf_thread, sc); 601 } 602 603 /* 604 * Create the reconfiguration thread. Called after the standard kernel 605 * threads have been created. 606 */ 607 static void 608 iop_create_reconf_thread(void *cookie) 609 { 610 struct iop_softc *sc; 611 int rv; 612 613 sc = cookie; 614 sc->sc_flags |= IOP_ONLINE; 615 616 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc, 617 "%s", sc->sc_dv.dv_xname); 618 if (rv != 0) { 619 printf("%s: unable to create reconfiguration thread (%d)", 620 sc->sc_dv.dv_xname, rv); 621 return; 622 } 623 } 624 625 /* 626 * Reconfiguration thread; listens for LCT change notification, and 627 * initiates re-configuration if received. 628 */ 629 static void 630 iop_reconf_thread(void *cookie) 631 { 632 struct iop_softc *sc; 633 struct lwp *l; 634 struct i2o_lct lct; 635 u_int32_t chgind; 636 int rv; 637 638 sc = cookie; 639 chgind = sc->sc_chgind + 1; 640 l = curlwp; 641 642 for (;;) { 643 DPRINTF(("%s: async reconfig: requested 0x%08x\n", 644 sc->sc_dv.dv_xname, chgind)); 645 646 PHOLD(l); 647 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind); 648 PRELE(l); 649 650 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n", 651 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv)); 652 653 if (rv == 0 && 654 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) { 655 iop_reconfigure(sc, le32toh(lct.changeindicator)); 656 chgind = sc->sc_chgind + 1; 657 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 658 } 659 660 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5); 661 } 662 } 663 664 /* 665 * Reconfigure: find new and removed devices. 666 */ 667 int 668 iop_reconfigure(struct iop_softc *sc, u_int chgind) 669 { 670 struct iop_msg *im; 671 struct i2o_hba_bus_scan mf; 672 struct i2o_lct_entry *le; 673 struct iop_initiator *ii, *nextii; 674 int rv, tid, i; 675 676 /* 677 * If the reconfiguration request isn't the result of LCT change 678 * notification, then be more thorough: ask all bus ports to scan 679 * their busses. Wait up to 5 minutes for each bus port to complete 680 * the request. 681 */ 682 if (chgind == 0) { 683 if ((rv = iop_lct_get(sc)) != 0) { 684 DPRINTF(("iop_reconfigure: unable to read LCT\n")); 685 return (rv); 686 } 687 688 le = sc->sc_lct->entry; 689 for (i = 0; i < sc->sc_nlctent; i++, le++) { 690 if ((le16toh(le->classid) & 4095) != 691 I2O_CLASS_BUS_ADAPTER_PORT) 692 continue; 693 tid = le16toh(le->localtid) & 4095; 694 695 im = iop_msg_alloc(sc, IM_WAIT); 696 697 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 698 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN); 699 mf.msgictx = IOP_ICTX; 700 mf.msgtctx = im->im_tctx; 701 702 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname, 703 tid)); 704 705 rv = iop_msg_post(sc, im, &mf, 5*60*1000); 706 iop_msg_free(sc, im); 707 #ifdef I2ODEBUG 708 if (rv != 0) 709 printf("%s: bus scan failed\n", 710 sc->sc_dv.dv_xname); 711 #endif 712 } 713 } else if (chgind <= sc->sc_chgind) { 714 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname)); 715 return (0); 716 } 717 718 /* Re-read the LCT and determine if it has changed. */ 719 if ((rv = iop_lct_get(sc)) != 0) { 720 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 721 return (rv); 722 } 723 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent)); 724 725 chgind = le32toh(sc->sc_lct->changeindicator); 726 if (chgind == sc->sc_chgind) { 727 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname)); 728 return (0); 729 } 730 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname)); 731 sc->sc_chgind = chgind; 732 733 if (sc->sc_tidmap != NULL) 734 free(sc->sc_tidmap, M_DEVBUF); 735 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap), 736 M_DEVBUF, M_NOWAIT|M_ZERO); 737 738 /* Allow 1 queued command per device while we're configuring. */ 739 iop_adjqparam(sc, 1); 740 741 /* 742 * Match and attach child devices. We configure high-level devices 743 * first so that any claims will propagate throughout the LCT, 744 * hopefully masking off aliased devices as a result. 745 * 746 * Re-reading the LCT at this point is a little dangerous, but we'll 747 * trust the IOP (and the operator) to behave itself... 748 */ 749 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 750 IC_CONFIGURE | IC_PRIORITY); 751 if ((rv = iop_lct_get(sc)) != 0) 752 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 753 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 754 IC_CONFIGURE); 755 756 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) { 757 nextii = LIST_NEXT(ii, ii_list); 758 759 /* Detach devices that were configured, but are now gone. */ 760 for (i = 0; i < sc->sc_nlctent; i++) 761 if (ii->ii_tid == sc->sc_tidmap[i].it_tid) 762 break; 763 if (i == sc->sc_nlctent || 764 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) 765 config_detach(ii->ii_dv, DETACH_FORCE); 766 767 /* 768 * Tell initiators that existed before the re-configuration 769 * to re-configure. 770 */ 771 if (ii->ii_reconfig == NULL) 772 continue; 773 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0) 774 printf("%s: %s failed reconfigure (%d)\n", 775 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv); 776 } 777 778 /* Re-adjust queue parameters and return. */ 779 if (sc->sc_nii != 0) 780 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE) 781 / sc->sc_nii); 782 783 return (0); 784 } 785 786 /* 787 * Configure I2O devices into the system. 788 */ 789 static void 790 iop_configure_devices(struct iop_softc *sc, int mask, int maskval) 791 { 792 struct iop_attach_args ia; 793 struct iop_initiator *ii; 794 const struct i2o_lct_entry *le; 795 struct device *dv; 796 int i, j, nent; 797 u_int usertid; 798 799 nent = sc->sc_nlctent; 800 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) { 801 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095; 802 803 /* Ignore the device if it's in use. */ 804 usertid = le32toh(le->usertid) & 4095; 805 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST) 806 continue; 807 808 ia.ia_class = le16toh(le->classid) & 4095; 809 ia.ia_tid = sc->sc_tidmap[i].it_tid; 810 811 /* Ignore uninteresting devices. */ 812 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++) 813 if (iop_class[j].ic_class == ia.ia_class) 814 break; 815 if (j < sizeof(iop_class) / sizeof(iop_class[0]) && 816 (iop_class[j].ic_flags & mask) != maskval) 817 continue; 818 819 /* 820 * Try to configure the device only if it's not already 821 * configured. 822 */ 823 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) { 824 if (ia.ia_tid == ii->ii_tid) { 825 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 826 strcpy(sc->sc_tidmap[i].it_dvname, 827 ii->ii_dv->dv_xname); 828 break; 829 } 830 } 831 if (ii != NULL) 832 continue; 833 834 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch); 835 if (dv != NULL) { 836 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 837 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname); 838 } 839 } 840 } 841 842 /* 843 * Adjust queue parameters for all child devices. 844 */ 845 static void 846 iop_adjqparam(struct iop_softc *sc, int mpi) 847 { 848 struct iop_initiator *ii; 849 850 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) 851 if (ii->ii_adjqparam != NULL) 852 (*ii->ii_adjqparam)(ii->ii_dv, mpi); 853 } 854 855 static void 856 iop_devinfo(int class, char *devinfo) 857 { 858 #ifdef I2OVERBOSE 859 int i; 860 861 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++) 862 if (class == iop_class[i].ic_class) 863 break; 864 865 if (i == sizeof(iop_class) / sizeof(iop_class[0])) 866 sprintf(devinfo, "device (class 0x%x)", class); 867 else 868 strcpy(devinfo, iop_class[i].ic_caption); 869 #else 870 871 sprintf(devinfo, "device (class 0x%x)", class); 872 #endif 873 } 874 875 static int 876 iop_print(void *aux, const char *pnp) 877 { 878 struct iop_attach_args *ia; 879 char devinfo[256]; 880 881 ia = aux; 882 883 if (pnp != NULL) { 884 iop_devinfo(ia->ia_class, devinfo); 885 aprint_normal("%s at %s", devinfo, pnp); 886 } 887 aprint_normal(" tid %d", ia->ia_tid); 888 return (UNCONF); 889 } 890 891 static int 892 iop_submatch(struct device *parent, struct cfdata *cf, void *aux) 893 { 894 struct iop_attach_args *ia; 895 896 ia = aux; 897 898 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid) 899 return (0); 900 901 return (config_match(parent, cf, aux)); 902 } 903 904 /* 905 * Shut down all configured IOPs. 906 */ 907 static void 908 iop_shutdown(void *junk) 909 { 910 struct iop_softc *sc; 911 int i; 912 913 printf("shutting down iop devices..."); 914 915 for (i = 0; i < iop_cd.cd_ndevs; i++) { 916 if ((sc = device_lookup(&iop_cd, i)) == NULL) 917 continue; 918 if ((sc->sc_flags & IOP_ONLINE) == 0) 919 continue; 920 921 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX, 922 0, 5000); 923 924 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) { 925 /* 926 * Some AMI firmware revisions will go to sleep and 927 * never come back after this. 928 */ 929 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, 930 IOP_ICTX, 0, 1000); 931 } 932 } 933 934 /* Wait. Some boards could still be flushing, stupidly enough. */ 935 delay(5000*1000); 936 printf(" done\n"); 937 } 938 939 /* 940 * Retrieve IOP status. 941 */ 942 int 943 iop_status_get(struct iop_softc *sc, int nosleep) 944 { 945 struct i2o_exec_status_get mf; 946 struct i2o_status *st; 947 paddr_t pa; 948 int rv, i; 949 950 pa = sc->sc_scr_seg->ds_addr; 951 st = (struct i2o_status *)sc->sc_scr; 952 953 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get); 954 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET); 955 mf.reserved[0] = 0; 956 mf.reserved[1] = 0; 957 mf.reserved[2] = 0; 958 mf.reserved[3] = 0; 959 mf.addrlow = (u_int32_t)pa; 960 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32); 961 mf.length = sizeof(sc->sc_status); 962 963 memset(st, 0, sizeof(*st)); 964 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 965 BUS_DMASYNC_PREREAD); 966 967 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0) 968 return (rv); 969 970 for (i = 25; i != 0; i--) { 971 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, 972 sizeof(*st), BUS_DMASYNC_POSTREAD); 973 if (st->syncbyte == 0xff) 974 break; 975 if (nosleep) 976 DELAY(100*1000); 977 else 978 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10); 979 } 980 981 if (st->syncbyte != 0xff) { 982 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname); 983 rv = EIO; 984 } else { 985 memcpy(&sc->sc_status, st, sizeof(sc->sc_status)); 986 rv = 0; 987 } 988 989 return (rv); 990 } 991 992 /* 993 * Initialize and populate the IOP's outbound FIFO. 994 */ 995 static int 996 iop_ofifo_init(struct iop_softc *sc) 997 { 998 bus_addr_t addr; 999 bus_dma_segment_t seg; 1000 struct i2o_exec_outbound_init *mf; 1001 int i, rseg, rv; 1002 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw; 1003 1004 sw = (u_int32_t *)sc->sc_scr; 1005 1006 mf = (struct i2o_exec_outbound_init *)mb; 1007 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init); 1008 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT); 1009 mf->msgictx = IOP_ICTX; 1010 mf->msgtctx = 0; 1011 mf->pagesize = PAGE_SIZE; 1012 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16); 1013 1014 /* 1015 * The I2O spec says that there are two SGLs: one for the status 1016 * word, and one for a list of discarded MFAs. It continues to say 1017 * that if you don't want to get the list of MFAs, an IGNORE SGL is 1018 * necessary; this isn't the case (and is in fact a bad thing). 1019 */ 1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) | 1021 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END; 1022 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = 1023 (u_int32_t)sc->sc_scr_seg->ds_addr; 1024 mb[0] += 2 << 16; 1025 1026 *sw = 0; 1027 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1028 BUS_DMASYNC_PREREAD); 1029 1030 if ((rv = iop_post(sc, mb)) != 0) 1031 return (rv); 1032 1033 POLL(5000, 1034 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1035 BUS_DMASYNC_POSTREAD), 1036 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE))); 1037 1038 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) { 1039 printf("%s: outbound FIFO init failed (%d)\n", 1040 sc->sc_dv.dv_xname, le32toh(*sw)); 1041 return (EIO); 1042 } 1043 1044 /* Allocate DMA safe memory for the reply frames. */ 1045 if (sc->sc_rep_phys == 0) { 1046 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize; 1047 1048 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE, 1049 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 1050 if (rv != 0) { 1051 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname, 1052 rv); 1053 return (rv); 1054 } 1055 1056 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size, 1057 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1058 if (rv != 0) { 1059 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv); 1060 return (rv); 1061 } 1062 1063 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1, 1064 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap); 1065 if (rv != 0) { 1066 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname, 1067 rv); 1068 return (rv); 1069 } 1070 1071 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, 1072 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT); 1073 if (rv != 0) { 1074 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv); 1075 return (rv); 1076 } 1077 1078 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr; 1079 } 1080 1081 /* Populate the outbound FIFO. */ 1082 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) { 1083 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr); 1084 addr += sc->sc_framesize; 1085 } 1086 1087 return (0); 1088 } 1089 1090 /* 1091 * Read the specified number of bytes from the IOP's hardware resource table. 1092 */ 1093 static int 1094 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size) 1095 { 1096 struct iop_msg *im; 1097 int rv; 1098 struct i2o_exec_hrt_get *mf; 1099 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1100 1101 im = iop_msg_alloc(sc, IM_WAIT); 1102 mf = (struct i2o_exec_hrt_get *)mb; 1103 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get); 1104 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET); 1105 mf->msgictx = IOP_ICTX; 1106 mf->msgtctx = im->im_tctx; 1107 1108 iop_msg_map(sc, im, mb, hrt, size, 0, NULL); 1109 rv = iop_msg_post(sc, im, mb, 30000); 1110 iop_msg_unmap(sc, im); 1111 iop_msg_free(sc, im); 1112 return (rv); 1113 } 1114 1115 /* 1116 * Read the IOP's hardware resource table. 1117 */ 1118 static int 1119 iop_hrt_get(struct iop_softc *sc) 1120 { 1121 struct i2o_hrt hrthdr, *hrt; 1122 int size, rv; 1123 1124 PHOLD(curlwp); 1125 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr)); 1126 PRELE(curlwp); 1127 if (rv != 0) 1128 return (rv); 1129 1130 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname, 1131 le16toh(hrthdr.numentries))); 1132 1133 size = sizeof(struct i2o_hrt) + 1134 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry); 1135 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT); 1136 1137 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) { 1138 free(hrt, M_DEVBUF); 1139 return (rv); 1140 } 1141 1142 if (sc->sc_hrt != NULL) 1143 free(sc->sc_hrt, M_DEVBUF); 1144 sc->sc_hrt = hrt; 1145 return (0); 1146 } 1147 1148 /* 1149 * Request the specified number of bytes from the IOP's logical 1150 * configuration table. If a change indicator is specified, this 1151 * is a verbatim notification request, so the caller is prepared 1152 * to wait indefinitely. 1153 */ 1154 static int 1155 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size, 1156 u_int32_t chgind) 1157 { 1158 struct iop_msg *im; 1159 struct i2o_exec_lct_notify *mf; 1160 int rv; 1161 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1162 1163 im = iop_msg_alloc(sc, IM_WAIT); 1164 memset(lct, 0, size); 1165 1166 mf = (struct i2o_exec_lct_notify *)mb; 1167 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify); 1168 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY); 1169 mf->msgictx = IOP_ICTX; 1170 mf->msgtctx = im->im_tctx; 1171 mf->classid = I2O_CLASS_ANY; 1172 mf->changeindicator = chgind; 1173 1174 #ifdef I2ODEBUG 1175 printf("iop_lct_get0: reading LCT"); 1176 if (chgind != 0) 1177 printf(" (async)"); 1178 printf("\n"); 1179 #endif 1180 1181 iop_msg_map(sc, im, mb, lct, size, 0, NULL); 1182 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0)); 1183 iop_msg_unmap(sc, im); 1184 iop_msg_free(sc, im); 1185 return (rv); 1186 } 1187 1188 /* 1189 * Read the IOP's logical configuration table. 1190 */ 1191 int 1192 iop_lct_get(struct iop_softc *sc) 1193 { 1194 int esize, size, rv; 1195 struct i2o_lct *lct; 1196 1197 esize = le32toh(sc->sc_status.expectedlctsize); 1198 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK); 1199 if (lct == NULL) 1200 return (ENOMEM); 1201 1202 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) { 1203 free(lct, M_DEVBUF); 1204 return (rv); 1205 } 1206 1207 size = le16toh(lct->tablesize) << 2; 1208 if (esize != size) { 1209 free(lct, M_DEVBUF); 1210 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK); 1211 if (lct == NULL) 1212 return (ENOMEM); 1213 1214 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) { 1215 free(lct, M_DEVBUF); 1216 return (rv); 1217 } 1218 } 1219 1220 /* Swap in the new LCT. */ 1221 if (sc->sc_lct != NULL) 1222 free(sc->sc_lct, M_DEVBUF); 1223 sc->sc_lct = lct; 1224 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) - 1225 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) / 1226 sizeof(struct i2o_lct_entry); 1227 return (0); 1228 } 1229 1230 /* 1231 * Request the specified parameter group from the target. If an initiator 1232 * is specified (a) don't wait for the operation to complete, but instead 1233 * let the initiator's interrupt handler deal with the reply and (b) place a 1234 * pointer to the parameter group op in the wrapper's `im_dvcontext' field. 1235 */ 1236 int 1237 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf, 1238 int size, struct iop_initiator *ii) 1239 { 1240 struct iop_msg *im; 1241 struct i2o_util_params_op *mf; 1242 struct i2o_reply *rf; 1243 int rv; 1244 struct iop_pgop *pgop; 1245 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1246 1247 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS); 1248 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) { 1249 iop_msg_free(sc, im); 1250 return (ENOMEM); 1251 } 1252 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) { 1253 iop_msg_free(sc, im); 1254 free(pgop, M_DEVBUF); 1255 return (ENOMEM); 1256 } 1257 im->im_dvcontext = pgop; 1258 im->im_rb = rf; 1259 1260 mf = (struct i2o_util_params_op *)mb; 1261 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1262 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET); 1263 mf->msgictx = IOP_ICTX; 1264 mf->msgtctx = im->im_tctx; 1265 mf->flags = 0; 1266 1267 pgop->olh.count = htole16(1); 1268 pgop->olh.reserved = htole16(0); 1269 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET); 1270 pgop->oat.fieldcount = htole16(0xffff); 1271 pgop->oat.group = htole16(group); 1272 1273 if (ii == NULL) 1274 PHOLD(curlwp); 1275 1276 memset(buf, 0, size); 1277 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL); 1278 iop_msg_map(sc, im, mb, buf, size, 0, NULL); 1279 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0)); 1280 1281 if (ii == NULL) 1282 PRELE(curlwp); 1283 1284 /* Detect errors; let partial transfers to count as success. */ 1285 if (ii == NULL && rv == 0) { 1286 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER && 1287 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR) 1288 rv = 0; 1289 else 1290 rv = (rf->reqstatus != 0 ? EIO : 0); 1291 1292 if (rv != 0) 1293 printf("%s: FIELD_GET failed for tid %d group %d\n", 1294 sc->sc_dv.dv_xname, tid, group); 1295 } 1296 1297 if (ii == NULL || rv != 0) { 1298 iop_msg_unmap(sc, im); 1299 iop_msg_free(sc, im); 1300 free(pgop, M_DEVBUF); 1301 free(rf, M_DEVBUF); 1302 } 1303 1304 return (rv); 1305 } 1306 1307 /* 1308 * Set a single field in a scalar parameter group. 1309 */ 1310 int 1311 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf, 1312 int size, int field) 1313 { 1314 struct iop_msg *im; 1315 struct i2o_util_params_op *mf; 1316 struct iop_pgop *pgop; 1317 int rv, totsize; 1318 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1319 1320 totsize = sizeof(*pgop) + size; 1321 1322 im = iop_msg_alloc(sc, IM_WAIT); 1323 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1324 iop_msg_free(sc, im); 1325 return (ENOMEM); 1326 } 1327 1328 mf = (struct i2o_util_params_op *)mb; 1329 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1330 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1331 mf->msgictx = IOP_ICTX; 1332 mf->msgtctx = im->im_tctx; 1333 mf->flags = 0; 1334 1335 pgop->olh.count = htole16(1); 1336 pgop->olh.reserved = htole16(0); 1337 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET); 1338 pgop->oat.fieldcount = htole16(1); 1339 pgop->oat.group = htole16(group); 1340 pgop->oat.fields[0] = htole16(field); 1341 memcpy(pgop + 1, buf, size); 1342 1343 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1344 rv = iop_msg_post(sc, im, mb, 30000); 1345 if (rv != 0) 1346 printf("%s: FIELD_SET failed for tid %d group %d\n", 1347 sc->sc_dv.dv_xname, tid, group); 1348 1349 iop_msg_unmap(sc, im); 1350 iop_msg_free(sc, im); 1351 free(pgop, M_DEVBUF); 1352 return (rv); 1353 } 1354 1355 /* 1356 * Delete all rows in a tablular parameter group. 1357 */ 1358 int 1359 iop_table_clear(struct iop_softc *sc, int tid, int group) 1360 { 1361 struct iop_msg *im; 1362 struct i2o_util_params_op *mf; 1363 struct iop_pgop pgop; 1364 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1365 int rv; 1366 1367 im = iop_msg_alloc(sc, IM_WAIT); 1368 1369 mf = (struct i2o_util_params_op *)mb; 1370 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1371 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1372 mf->msgictx = IOP_ICTX; 1373 mf->msgtctx = im->im_tctx; 1374 mf->flags = 0; 1375 1376 pgop.olh.count = htole16(1); 1377 pgop.olh.reserved = htole16(0); 1378 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR); 1379 pgop.oat.fieldcount = htole16(0); 1380 pgop.oat.group = htole16(group); 1381 pgop.oat.fields[0] = htole16(0); 1382 1383 PHOLD(curlwp); 1384 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL); 1385 rv = iop_msg_post(sc, im, mb, 30000); 1386 if (rv != 0) 1387 printf("%s: TABLE_CLEAR failed for tid %d group %d\n", 1388 sc->sc_dv.dv_xname, tid, group); 1389 1390 iop_msg_unmap(sc, im); 1391 PRELE(curlwp); 1392 iop_msg_free(sc, im); 1393 return (rv); 1394 } 1395 1396 /* 1397 * Add a single row to a tabular parameter group. The row can have only one 1398 * field. 1399 */ 1400 int 1401 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf, 1402 int size, int row) 1403 { 1404 struct iop_msg *im; 1405 struct i2o_util_params_op *mf; 1406 struct iop_pgop *pgop; 1407 int rv, totsize; 1408 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1409 1410 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size; 1411 1412 im = iop_msg_alloc(sc, IM_WAIT); 1413 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1414 iop_msg_free(sc, im); 1415 return (ENOMEM); 1416 } 1417 1418 mf = (struct i2o_util_params_op *)mb; 1419 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1420 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1421 mf->msgictx = IOP_ICTX; 1422 mf->msgtctx = im->im_tctx; 1423 mf->flags = 0; 1424 1425 pgop->olh.count = htole16(1); 1426 pgop->olh.reserved = htole16(0); 1427 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD); 1428 pgop->oat.fieldcount = htole16(1); 1429 pgop->oat.group = htole16(group); 1430 pgop->oat.fields[0] = htole16(0); /* FieldIdx */ 1431 pgop->oat.fields[1] = htole16(1); /* RowCount */ 1432 pgop->oat.fields[2] = htole16(row); /* KeyValue */ 1433 memcpy(&pgop->oat.fields[3], buf, size); 1434 1435 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1436 rv = iop_msg_post(sc, im, mb, 30000); 1437 if (rv != 0) 1438 printf("%s: ADD_ROW failed for tid %d group %d row %d\n", 1439 sc->sc_dv.dv_xname, tid, group, row); 1440 1441 iop_msg_unmap(sc, im); 1442 iop_msg_free(sc, im); 1443 free(pgop, M_DEVBUF); 1444 return (rv); 1445 } 1446 1447 /* 1448 * Execute a simple command (no parameters). 1449 */ 1450 int 1451 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx, 1452 int async, int timo) 1453 { 1454 struct iop_msg *im; 1455 struct i2o_msg mf; 1456 int rv, fl; 1457 1458 fl = (async != 0 ? IM_WAIT : IM_POLL); 1459 im = iop_msg_alloc(sc, fl); 1460 1461 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1462 mf.msgfunc = I2O_MSGFUNC(tid, function); 1463 mf.msgictx = ictx; 1464 mf.msgtctx = im->im_tctx; 1465 1466 rv = iop_msg_post(sc, im, &mf, timo); 1467 iop_msg_free(sc, im); 1468 return (rv); 1469 } 1470 1471 /* 1472 * Post the system table to the IOP. 1473 */ 1474 static int 1475 iop_systab_set(struct iop_softc *sc) 1476 { 1477 struct i2o_exec_sys_tab_set *mf; 1478 struct iop_msg *im; 1479 bus_space_handle_t bsh; 1480 bus_addr_t boo; 1481 u_int32_t mema[2], ioa[2]; 1482 int rv; 1483 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1484 1485 im = iop_msg_alloc(sc, IM_WAIT); 1486 1487 mf = (struct i2o_exec_sys_tab_set *)mb; 1488 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set); 1489 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET); 1490 mf->msgictx = IOP_ICTX; 1491 mf->msgtctx = im->im_tctx; 1492 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12; 1493 mf->segnumber = 0; 1494 1495 mema[1] = sc->sc_status.desiredprivmemsize; 1496 ioa[1] = sc->sc_status.desiredpriviosize; 1497 1498 if (mema[1] != 0) { 1499 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff, 1500 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh); 1501 mema[0] = htole32(boo); 1502 if (rv != 0) { 1503 printf("%s: can't alloc priv mem space, err = %d\n", 1504 sc->sc_dv.dv_xname, rv); 1505 mema[0] = 0; 1506 mema[1] = 0; 1507 } 1508 } 1509 1510 if (ioa[1] != 0) { 1511 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff, 1512 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh); 1513 ioa[0] = htole32(boo); 1514 if (rv != 0) { 1515 printf("%s: can't alloc priv i/o space, err = %d\n", 1516 sc->sc_dv.dv_xname, rv); 1517 ioa[0] = 0; 1518 ioa[1] = 0; 1519 } 1520 } 1521 1522 PHOLD(curlwp); 1523 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL); 1524 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL); 1525 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL); 1526 rv = iop_msg_post(sc, im, mb, 5000); 1527 iop_msg_unmap(sc, im); 1528 iop_msg_free(sc, im); 1529 PRELE(curlwp); 1530 return (rv); 1531 } 1532 1533 /* 1534 * Reset the IOP. Must be called with interrupts disabled. 1535 */ 1536 static int 1537 iop_reset(struct iop_softc *sc) 1538 { 1539 u_int32_t mfa, *sw; 1540 struct i2o_exec_iop_reset mf; 1541 int rv; 1542 paddr_t pa; 1543 1544 sw = (u_int32_t *)sc->sc_scr; 1545 pa = sc->sc_scr_seg->ds_addr; 1546 1547 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset); 1548 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET); 1549 mf.reserved[0] = 0; 1550 mf.reserved[1] = 0; 1551 mf.reserved[2] = 0; 1552 mf.reserved[3] = 0; 1553 mf.statuslow = (u_int32_t)pa; 1554 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32); 1555 1556 *sw = htole32(0); 1557 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1558 BUS_DMASYNC_PREREAD); 1559 1560 if ((rv = iop_post(sc, (u_int32_t *)&mf))) 1561 return (rv); 1562 1563 POLL(2500, 1564 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1565 BUS_DMASYNC_POSTREAD), *sw != 0)); 1566 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) { 1567 printf("%s: reset rejected, status 0x%x\n", 1568 sc->sc_dv.dv_xname, le32toh(*sw)); 1569 return (EIO); 1570 } 1571 1572 /* 1573 * IOP is now in the INIT state. Wait no more than 10 seconds for 1574 * the inbound queue to become responsive. 1575 */ 1576 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY); 1577 if (mfa == IOP_MFA_EMPTY) { 1578 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 1579 return (EIO); 1580 } 1581 1582 iop_release_mfa(sc, mfa); 1583 return (0); 1584 } 1585 1586 /* 1587 * Register a new initiator. Must be called with the configuration lock 1588 * held. 1589 */ 1590 void 1591 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii) 1592 { 1593 static int ictxgen; 1594 int s; 1595 1596 /* 0 is reserved (by us) for system messages. */ 1597 ii->ii_ictx = ++ictxgen; 1598 1599 /* 1600 * `Utility initiators' don't make it onto the per-IOP initiator list 1601 * (which is used only for configuration), but do get one slot on 1602 * the inbound queue. 1603 */ 1604 if ((ii->ii_flags & II_UTILITY) == 0) { 1605 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list); 1606 sc->sc_nii++; 1607 } else 1608 sc->sc_nuii++; 1609 1610 s = splbio(); 1611 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash); 1612 splx(s); 1613 } 1614 1615 /* 1616 * Unregister an initiator. Must be called with the configuration lock 1617 * held. 1618 */ 1619 void 1620 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii) 1621 { 1622 int s; 1623 1624 if ((ii->ii_flags & II_UTILITY) == 0) { 1625 LIST_REMOVE(ii, ii_list); 1626 sc->sc_nii--; 1627 } else 1628 sc->sc_nuii--; 1629 1630 s = splbio(); 1631 LIST_REMOVE(ii, ii_hash); 1632 splx(s); 1633 } 1634 1635 /* 1636 * Handle a reply frame from the IOP. 1637 */ 1638 static int 1639 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa) 1640 { 1641 struct iop_msg *im; 1642 struct i2o_reply *rb; 1643 struct i2o_fault_notify *fn; 1644 struct iop_initiator *ii; 1645 u_int off, ictx, tctx, status, size; 1646 1647 off = (int)(rmfa - sc->sc_rep_phys); 1648 rb = (struct i2o_reply *)(sc->sc_rep + off); 1649 1650 /* Perform reply queue DMA synchronisation. */ 1651 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, 1652 sc->sc_framesize, BUS_DMASYNC_POSTREAD); 1653 if (--sc->sc_curib != 0) 1654 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 1655 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD); 1656 1657 #ifdef I2ODEBUG 1658 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0) 1659 panic("iop_handle_reply: 64-bit reply"); 1660 #endif 1661 /* 1662 * Find the initiator. 1663 */ 1664 ictx = le32toh(rb->msgictx); 1665 if (ictx == IOP_ICTX) 1666 ii = NULL; 1667 else { 1668 ii = LIST_FIRST(IOP_ICTXHASH(ictx)); 1669 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash)) 1670 if (ii->ii_ictx == ictx) 1671 break; 1672 if (ii == NULL) { 1673 #ifdef I2ODEBUG 1674 iop_reply_print(sc, rb); 1675 #endif 1676 printf("%s: WARNING: bad ictx returned (%x)\n", 1677 sc->sc_dv.dv_xname, ictx); 1678 return (-1); 1679 } 1680 } 1681 1682 /* 1683 * If we received a transport failure notice, we've got to dig the 1684 * transaction context (if any) out of the original message frame, 1685 * and then release the original MFA back to the inbound FIFO. 1686 */ 1687 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 1688 status = I2O_STATUS_SUCCESS; 1689 1690 fn = (struct i2o_fault_notify *)rb; 1691 tctx = iop_inl_msg(sc, fn->lowmfa + 12); 1692 iop_release_mfa(sc, fn->lowmfa); 1693 iop_tfn_print(sc, fn); 1694 } else { 1695 status = rb->reqstatus; 1696 tctx = le32toh(rb->msgtctx); 1697 } 1698 1699 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) { 1700 /* 1701 * This initiator tracks state using message wrappers. 1702 * 1703 * Find the originating message wrapper, and if requested 1704 * notify the initiator. 1705 */ 1706 im = sc->sc_ims + (tctx & IOP_TCTX_MASK); 1707 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib || 1708 (im->im_flags & IM_ALLOCED) == 0 || 1709 tctx != im->im_tctx) { 1710 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n", 1711 sc->sc_dv.dv_xname, tctx, im); 1712 if (im != NULL) 1713 printf("%s: flags=0x%08x tctx=0x%08x\n", 1714 sc->sc_dv.dv_xname, im->im_flags, 1715 im->im_tctx); 1716 #ifdef I2ODEBUG 1717 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0) 1718 iop_reply_print(sc, rb); 1719 #endif 1720 return (-1); 1721 } 1722 1723 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1724 im->im_flags |= IM_FAIL; 1725 1726 #ifdef I2ODEBUG 1727 if ((im->im_flags & IM_REPLIED) != 0) 1728 panic("%s: dup reply", sc->sc_dv.dv_xname); 1729 #endif 1730 im->im_flags |= IM_REPLIED; 1731 1732 #ifdef I2ODEBUG 1733 if (status != I2O_STATUS_SUCCESS) 1734 iop_reply_print(sc, rb); 1735 #endif 1736 im->im_reqstatus = status; 1737 1738 /* Copy the reply frame, if requested. */ 1739 if (im->im_rb != NULL) { 1740 size = (le32toh(rb->msgflags) >> 14) & ~3; 1741 #ifdef I2ODEBUG 1742 if (size > sc->sc_framesize) 1743 panic("iop_handle_reply: reply too large"); 1744 #endif 1745 memcpy(im->im_rb, rb, size); 1746 } 1747 1748 /* Notify the initiator. */ 1749 if ((im->im_flags & IM_WAIT) != 0) 1750 wakeup(im); 1751 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) 1752 (*ii->ii_intr)(ii->ii_dv, im, rb); 1753 } else { 1754 /* 1755 * This initiator discards message wrappers. 1756 * 1757 * Simply pass the reply frame to the initiator. 1758 */ 1759 (*ii->ii_intr)(ii->ii_dv, NULL, rb); 1760 } 1761 1762 return (status); 1763 } 1764 1765 /* 1766 * Handle an interrupt from the IOP. 1767 */ 1768 int 1769 iop_intr(void *arg) 1770 { 1771 struct iop_softc *sc; 1772 u_int32_t rmfa; 1773 1774 sc = arg; 1775 1776 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) 1777 return (0); 1778 1779 for (;;) { 1780 /* Double read to account for IOP bug. */ 1781 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) { 1782 rmfa = iop_inl(sc, IOP_REG_OFIFO); 1783 if (rmfa == IOP_MFA_EMPTY) 1784 break; 1785 } 1786 iop_handle_reply(sc, rmfa); 1787 iop_outl(sc, IOP_REG_OFIFO, rmfa); 1788 } 1789 1790 return (1); 1791 } 1792 1793 /* 1794 * Handle an event signalled by the executive. 1795 */ 1796 static void 1797 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply) 1798 { 1799 struct i2o_util_event_register_reply *rb; 1800 struct iop_softc *sc; 1801 u_int event; 1802 1803 sc = (struct iop_softc *)dv; 1804 rb = reply; 1805 1806 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1807 return; 1808 1809 event = le32toh(rb->event); 1810 printf("%s: event 0x%08x received\n", dv->dv_xname, event); 1811 } 1812 1813 /* 1814 * Allocate a message wrapper. 1815 */ 1816 struct iop_msg * 1817 iop_msg_alloc(struct iop_softc *sc, int flags) 1818 { 1819 struct iop_msg *im; 1820 static u_int tctxgen; 1821 int s, i; 1822 1823 #ifdef I2ODEBUG 1824 if ((flags & IM_SYSMASK) != 0) 1825 panic("iop_msg_alloc: system flags specified"); 1826 #endif 1827 1828 s = splbio(); 1829 im = SLIST_FIRST(&sc->sc_im_freelist); 1830 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 1831 if (im == NULL) 1832 panic("iop_msg_alloc: no free wrappers"); 1833 #endif 1834 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain); 1835 splx(s); 1836 1837 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen; 1838 tctxgen += (1 << IOP_TCTX_SHIFT); 1839 im->im_flags = flags | IM_ALLOCED; 1840 im->im_rb = NULL; 1841 i = 0; 1842 do { 1843 im->im_xfer[i++].ix_size = 0; 1844 } while (i < IOP_MAX_MSG_XFERS); 1845 1846 return (im); 1847 } 1848 1849 /* 1850 * Free a message wrapper. 1851 */ 1852 void 1853 iop_msg_free(struct iop_softc *sc, struct iop_msg *im) 1854 { 1855 int s; 1856 1857 #ifdef I2ODEBUG 1858 if ((im->im_flags & IM_ALLOCED) == 0) 1859 panic("iop_msg_free: wrapper not allocated"); 1860 #endif 1861 1862 im->im_flags = 0; 1863 s = splbio(); 1864 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 1865 splx(s); 1866 } 1867 1868 /* 1869 * Map a data transfer. Write a scatter-gather list into the message frame. 1870 */ 1871 int 1872 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1873 void *xferaddr, int xfersize, int out, struct proc *up) 1874 { 1875 bus_dmamap_t dm; 1876 bus_dma_segment_t *ds; 1877 struct iop_xfer *ix; 1878 u_int rv, i, nsegs, flg, off, xn; 1879 u_int32_t *p; 1880 1881 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++) 1882 if (ix->ix_size == 0) 1883 break; 1884 1885 #ifdef I2ODEBUG 1886 if (xfersize == 0) 1887 panic("iop_msg_map: null transfer"); 1888 if (xfersize > IOP_MAX_XFER) 1889 panic("iop_msg_map: transfer too large"); 1890 if (xn == IOP_MAX_MSG_XFERS) 1891 panic("iop_msg_map: too many xfers"); 1892 #endif 1893 1894 /* 1895 * Only the first DMA map is static. 1896 */ 1897 if (xn != 0) { 1898 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 1899 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 1900 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map); 1901 if (rv != 0) 1902 return (rv); 1903 } 1904 1905 dm = ix->ix_map; 1906 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up, 1907 (up == NULL ? BUS_DMA_NOWAIT : 0)); 1908 if (rv != 0) 1909 goto bad; 1910 1911 /* 1912 * How many SIMPLE SG elements can we fit in this message? 1913 */ 1914 off = mb[0] >> 16; 1915 p = mb + off; 1916 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1917 1918 if (dm->dm_nsegs > nsegs) { 1919 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 1920 rv = EFBIG; 1921 DPRINTF(("iop_msg_map: too many segs\n")); 1922 goto bad; 1923 } 1924 1925 nsegs = dm->dm_nsegs; 1926 xfersize = 0; 1927 1928 /* 1929 * Write out the SG list. 1930 */ 1931 if (out) 1932 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 1933 else 1934 flg = I2O_SGL_SIMPLE; 1935 1936 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 1937 p[0] = (u_int32_t)ds->ds_len | flg; 1938 p[1] = (u_int32_t)ds->ds_addr; 1939 xfersize += ds->ds_len; 1940 } 1941 1942 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER; 1943 p[1] = (u_int32_t)ds->ds_addr; 1944 xfersize += ds->ds_len; 1945 1946 /* Fix up the transfer record, and sync the map. */ 1947 ix->ix_flags = (out ? IX_OUT : IX_IN); 1948 ix->ix_size = xfersize; 1949 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 1950 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 1951 1952 /* 1953 * If this is the first xfer we've mapped for this message, adjust 1954 * the SGL offset field in the message header. 1955 */ 1956 if ((im->im_flags & IM_SGLOFFADJ) == 0) { 1957 mb[0] += (mb[0] >> 12) & 0xf0; 1958 im->im_flags |= IM_SGLOFFADJ; 1959 } 1960 mb[0] += (nsegs << 17); 1961 return (0); 1962 1963 bad: 1964 if (xn != 0) 1965 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 1966 return (rv); 1967 } 1968 1969 /* 1970 * Map a block I/O data transfer (different in that there's only one per 1971 * message maximum, and PAGE addressing may be used). Write a scatter 1972 * gather list into the message frame. 1973 */ 1974 int 1975 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1976 void *xferaddr, int xfersize, int out) 1977 { 1978 bus_dma_segment_t *ds; 1979 bus_dmamap_t dm; 1980 struct iop_xfer *ix; 1981 u_int rv, i, nsegs, off, slen, tlen, flg; 1982 paddr_t saddr, eaddr; 1983 u_int32_t *p; 1984 1985 #ifdef I2ODEBUG 1986 if (xfersize == 0) 1987 panic("iop_msg_map_bio: null transfer"); 1988 if (xfersize > IOP_MAX_XFER) 1989 panic("iop_msg_map_bio: transfer too large"); 1990 if ((im->im_flags & IM_SGLOFFADJ) != 0) 1991 panic("iop_msg_map_bio: SGLOFFADJ"); 1992 #endif 1993 1994 ix = im->im_xfer; 1995 dm = ix->ix_map; 1996 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 1997 BUS_DMA_NOWAIT | BUS_DMA_STREAMING); 1998 if (rv != 0) 1999 return (rv); 2000 2001 off = mb[0] >> 16; 2002 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 2003 2004 /* 2005 * If the transfer is highly fragmented and won't fit using SIMPLE 2006 * elements, use PAGE_LIST elements instead. SIMPLE elements are 2007 * potentially more efficient, both for us and the IOP. 2008 */ 2009 if (dm->dm_nsegs > nsegs) { 2010 nsegs = 1; 2011 p = mb + off + 1; 2012 2013 /* XXX This should be done with a bus_space flag. */ 2014 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) { 2015 slen = ds->ds_len; 2016 saddr = ds->ds_addr; 2017 2018 while (slen > 0) { 2019 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 2020 tlen = min(eaddr - saddr, slen); 2021 slen -= tlen; 2022 *p++ = le32toh(saddr); 2023 saddr = eaddr; 2024 nsegs++; 2025 } 2026 } 2027 2028 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER | 2029 I2O_SGL_END; 2030 if (out) 2031 mb[off] |= I2O_SGL_DATA_OUT; 2032 } else { 2033 p = mb + off; 2034 nsegs = dm->dm_nsegs; 2035 2036 if (out) 2037 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 2038 else 2039 flg = I2O_SGL_SIMPLE; 2040 2041 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 2042 p[0] = (u_int32_t)ds->ds_len | flg; 2043 p[1] = (u_int32_t)ds->ds_addr; 2044 } 2045 2046 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER | 2047 I2O_SGL_END; 2048 p[1] = (u_int32_t)ds->ds_addr; 2049 nsegs <<= 1; 2050 } 2051 2052 /* Fix up the transfer record, and sync the map. */ 2053 ix->ix_flags = (out ? IX_OUT : IX_IN); 2054 ix->ix_size = xfersize; 2055 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 2056 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 2057 2058 /* 2059 * Adjust the SGL offset and total message size fields. We don't 2060 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements. 2061 */ 2062 mb[0] += ((off << 4) + (nsegs << 16)); 2063 return (0); 2064 } 2065 2066 /* 2067 * Unmap all data transfers associated with a message wrapper. 2068 */ 2069 void 2070 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im) 2071 { 2072 struct iop_xfer *ix; 2073 int i; 2074 2075 #ifdef I2ODEBUG 2076 if (im->im_xfer[0].ix_size == 0) 2077 panic("iop_msg_unmap: no transfers mapped"); 2078 #endif 2079 2080 for (ix = im->im_xfer, i = 0;;) { 2081 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size, 2082 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE : 2083 BUS_DMASYNC_POSTREAD); 2084 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 2085 2086 /* Only the first DMA map is static. */ 2087 if (i != 0) 2088 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2089 if ((++ix)->ix_size == 0) 2090 break; 2091 if (++i >= IOP_MAX_MSG_XFERS) 2092 break; 2093 } 2094 } 2095 2096 /* 2097 * Post a message frame to the IOP's inbound queue. 2098 */ 2099 int 2100 iop_post(struct iop_softc *sc, u_int32_t *mb) 2101 { 2102 u_int32_t mfa; 2103 int s; 2104 2105 #ifdef I2ODEBUG 2106 if ((mb[0] >> 16) > (sc->sc_framesize >> 2)) 2107 panic("iop_post: frame too large"); 2108 #endif 2109 2110 s = splbio(); 2111 2112 /* Allocate a slot with the IOP. */ 2113 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) 2114 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) { 2115 splx(s); 2116 printf("%s: mfa not forthcoming\n", 2117 sc->sc_dv.dv_xname); 2118 return (EAGAIN); 2119 } 2120 2121 /* Perform reply buffer DMA synchronisation. */ 2122 if (sc->sc_curib++ == 0) 2123 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0, 2124 sc->sc_rep_size, BUS_DMASYNC_PREREAD); 2125 2126 /* Copy out the message frame. */ 2127 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb, 2128 mb[0] >> 16); 2129 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, 2130 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE); 2131 2132 /* Post the MFA back to the IOP. */ 2133 iop_outl(sc, IOP_REG_IFIFO, mfa); 2134 2135 splx(s); 2136 return (0); 2137 } 2138 2139 /* 2140 * Post a message to the IOP and deal with completion. 2141 */ 2142 int 2143 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo) 2144 { 2145 u_int32_t *mb; 2146 int rv, s; 2147 2148 mb = xmb; 2149 2150 /* Terminate the scatter/gather list chain. */ 2151 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2152 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END; 2153 2154 if ((rv = iop_post(sc, mb)) != 0) 2155 return (rv); 2156 2157 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) { 2158 if ((im->im_flags & IM_POLL) != 0) 2159 iop_msg_poll(sc, im, timo); 2160 else 2161 iop_msg_wait(sc, im, timo); 2162 2163 s = splbio(); 2164 if ((im->im_flags & IM_REPLIED) != 0) { 2165 if ((im->im_flags & IM_NOSTATUS) != 0) 2166 rv = 0; 2167 else if ((im->im_flags & IM_FAIL) != 0) 2168 rv = ENXIO; 2169 else if (im->im_reqstatus != I2O_STATUS_SUCCESS) 2170 rv = EIO; 2171 else 2172 rv = 0; 2173 } else 2174 rv = EBUSY; 2175 splx(s); 2176 } else 2177 rv = 0; 2178 2179 return (rv); 2180 } 2181 2182 /* 2183 * Spin until the specified message is replied to. 2184 */ 2185 static void 2186 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo) 2187 { 2188 u_int32_t rmfa; 2189 int s; 2190 2191 s = splbio(); 2192 2193 /* Wait for completion. */ 2194 for (timo *= 10; timo != 0; timo--) { 2195 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) { 2196 /* Double read to account for IOP bug. */ 2197 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2198 if (rmfa == IOP_MFA_EMPTY) 2199 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2200 if (rmfa != IOP_MFA_EMPTY) { 2201 iop_handle_reply(sc, rmfa); 2202 2203 /* 2204 * Return the reply frame to the IOP's 2205 * outbound FIFO. 2206 */ 2207 iop_outl(sc, IOP_REG_OFIFO, rmfa); 2208 } 2209 } 2210 if ((im->im_flags & IM_REPLIED) != 0) 2211 break; 2212 DELAY(100); 2213 } 2214 2215 if (timo == 0) { 2216 #ifdef I2ODEBUG 2217 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname); 2218 if (iop_status_get(sc, 1) != 0) 2219 printf("iop_msg_poll: unable to retrieve status\n"); 2220 else 2221 printf("iop_msg_poll: IOP state = %d\n", 2222 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2223 #endif 2224 } 2225 2226 splx(s); 2227 } 2228 2229 /* 2230 * Sleep until the specified message is replied to. 2231 */ 2232 static void 2233 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo) 2234 { 2235 int s, rv; 2236 2237 s = splbio(); 2238 if ((im->im_flags & IM_REPLIED) != 0) { 2239 splx(s); 2240 return; 2241 } 2242 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo)); 2243 splx(s); 2244 2245 #ifdef I2ODEBUG 2246 if (rv != 0) { 2247 printf("iop_msg_wait: tsleep() == %d\n", rv); 2248 if (iop_status_get(sc, 0) != 0) 2249 printf("iop_msg_wait: unable to retrieve status\n"); 2250 else 2251 printf("iop_msg_wait: IOP state = %d\n", 2252 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2253 } 2254 #endif 2255 } 2256 2257 /* 2258 * Release an unused message frame back to the IOP's inbound fifo. 2259 */ 2260 static void 2261 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa) 2262 { 2263 2264 /* Use the frame to issue a no-op. */ 2265 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16)); 2266 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP)); 2267 iop_outl_msg(sc, mfa + 8, 0); 2268 iop_outl_msg(sc, mfa + 12, 0); 2269 2270 iop_outl(sc, IOP_REG_IFIFO, mfa); 2271 } 2272 2273 #ifdef I2ODEBUG 2274 /* 2275 * Dump a reply frame header. 2276 */ 2277 static void 2278 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb) 2279 { 2280 u_int function, detail; 2281 #ifdef I2OVERBOSE 2282 const char *statusstr; 2283 #endif 2284 2285 function = (le32toh(rb->msgfunc) >> 24) & 0xff; 2286 detail = le16toh(rb->detail); 2287 2288 printf("%s: reply:\n", sc->sc_dv.dv_xname); 2289 2290 #ifdef I2OVERBOSE 2291 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0])) 2292 statusstr = iop_status[rb->reqstatus]; 2293 else 2294 statusstr = "undefined error code"; 2295 2296 printf("%s: function=0x%02x status=0x%02x (%s)\n", 2297 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr); 2298 #else 2299 printf("%s: function=0x%02x status=0x%02x\n", 2300 sc->sc_dv.dv_xname, function, rb->reqstatus); 2301 #endif 2302 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n", 2303 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx), 2304 le32toh(rb->msgtctx)); 2305 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname, 2306 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095, 2307 (le32toh(rb->msgflags) >> 8) & 0xff); 2308 } 2309 #endif 2310 2311 /* 2312 * Dump a transport failure reply. 2313 */ 2314 static void 2315 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn) 2316 { 2317 2318 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname); 2319 2320 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname, 2321 le32toh(fn->msgictx), le32toh(fn->msgtctx)); 2322 printf("%s: failurecode=0x%02x severity=0x%02x\n", 2323 sc->sc_dv.dv_xname, fn->failurecode, fn->severity); 2324 printf("%s: highestver=0x%02x lowestver=0x%02x\n", 2325 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver); 2326 } 2327 2328 /* 2329 * Translate an I2O ASCII field into a C string. 2330 */ 2331 void 2332 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen) 2333 { 2334 int hc, lc, i, nit; 2335 2336 dlen--; 2337 lc = 0; 2338 hc = 0; 2339 i = 0; 2340 2341 /* 2342 * DPT use NUL as a space, whereas AMI use it as a terminator. The 2343 * spec has nothing to say about it. Since AMI fields are usually 2344 * filled with junk after the terminator, ... 2345 */ 2346 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT); 2347 2348 while (slen-- != 0 && dlen-- != 0) { 2349 if (nit && *src == '\0') 2350 break; 2351 else if (*src <= 0x20 || *src >= 0x7f) { 2352 if (hc) 2353 dst[i++] = ' '; 2354 } else { 2355 hc = 1; 2356 dst[i++] = *src; 2357 lc = i; 2358 } 2359 src++; 2360 } 2361 2362 dst[lc] = '\0'; 2363 } 2364 2365 /* 2366 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it. 2367 */ 2368 int 2369 iop_print_ident(struct iop_softc *sc, int tid) 2370 { 2371 struct { 2372 struct i2o_param_op_results pr; 2373 struct i2o_param_read_results prr; 2374 struct i2o_param_device_identity di; 2375 } __attribute__ ((__packed__)) p; 2376 char buf[32]; 2377 int rv; 2378 2379 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p, 2380 sizeof(p), NULL); 2381 if (rv != 0) 2382 return (rv); 2383 2384 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf, 2385 sizeof(buf)); 2386 printf(" <%s, ", buf); 2387 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf, 2388 sizeof(buf)); 2389 printf("%s, ", buf); 2390 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf)); 2391 printf("%s>", buf); 2392 2393 return (0); 2394 } 2395 2396 /* 2397 * Claim or unclaim the specified TID. 2398 */ 2399 int 2400 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release, 2401 int flags) 2402 { 2403 struct iop_msg *im; 2404 struct i2o_util_claim mf; 2405 int rv, func; 2406 2407 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM; 2408 im = iop_msg_alloc(sc, IM_WAIT); 2409 2410 /* We can use the same structure, as they're identical. */ 2411 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim); 2412 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func); 2413 mf.msgictx = ii->ii_ictx; 2414 mf.msgtctx = im->im_tctx; 2415 mf.flags = flags; 2416 2417 rv = iop_msg_post(sc, im, &mf, 5000); 2418 iop_msg_free(sc, im); 2419 return (rv); 2420 } 2421 2422 /* 2423 * Perform an abort. 2424 */ 2425 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func, 2426 int tctxabort, int flags) 2427 { 2428 struct iop_msg *im; 2429 struct i2o_util_abort mf; 2430 int rv; 2431 2432 im = iop_msg_alloc(sc, IM_WAIT); 2433 2434 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort); 2435 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT); 2436 mf.msgictx = ii->ii_ictx; 2437 mf.msgtctx = im->im_tctx; 2438 mf.flags = (func << 24) | flags; 2439 mf.tctxabort = tctxabort; 2440 2441 rv = iop_msg_post(sc, im, &mf, 5000); 2442 iop_msg_free(sc, im); 2443 return (rv); 2444 } 2445 2446 /* 2447 * Enable or disable reception of events for the specified device. 2448 */ 2449 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask) 2450 { 2451 struct i2o_util_event_register mf; 2452 2453 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register); 2454 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER); 2455 mf.msgictx = ii->ii_ictx; 2456 mf.msgtctx = 0; 2457 mf.eventmask = mask; 2458 2459 /* This message is replied to only when events are signalled. */ 2460 return (iop_post(sc, (u_int32_t *)&mf)); 2461 } 2462 2463 int 2464 iopopen(dev_t dev, int flag, int mode, struct proc *p) 2465 { 2466 struct iop_softc *sc; 2467 2468 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL) 2469 return (ENXIO); 2470 if ((sc->sc_flags & IOP_ONLINE) == 0) 2471 return (ENXIO); 2472 if ((sc->sc_flags & IOP_OPEN) != 0) 2473 return (EBUSY); 2474 sc->sc_flags |= IOP_OPEN; 2475 2476 return (0); 2477 } 2478 2479 int 2480 iopclose(dev_t dev, int flag, int mode, struct proc *p) 2481 { 2482 struct iop_softc *sc; 2483 2484 sc = device_lookup(&iop_cd, minor(dev)); 2485 sc->sc_flags &= ~IOP_OPEN; 2486 2487 return (0); 2488 } 2489 2490 int 2491 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 2492 { 2493 struct iop_softc *sc; 2494 struct iovec *iov; 2495 int rv, i; 2496 2497 if (securelevel >= 2) 2498 return (EPERM); 2499 2500 sc = device_lookup(&iop_cd, minor(dev)); 2501 2502 switch (cmd) { 2503 case IOPIOCPT: 2504 return (iop_passthrough(sc, (struct ioppt *)data, p)); 2505 2506 case IOPIOCGSTATUS: 2507 iov = (struct iovec *)data; 2508 i = sizeof(struct i2o_status); 2509 if (i > iov->iov_len) 2510 i = iov->iov_len; 2511 else 2512 iov->iov_len = i; 2513 if ((rv = iop_status_get(sc, 0)) == 0) 2514 rv = copyout(&sc->sc_status, iov->iov_base, i); 2515 return (rv); 2516 2517 case IOPIOCGLCT: 2518 case IOPIOCGTIDMAP: 2519 case IOPIOCRECONFIG: 2520 break; 2521 2522 default: 2523 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 2524 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd); 2525 #endif 2526 return (ENOTTY); 2527 } 2528 2529 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0) 2530 return (rv); 2531 2532 switch (cmd) { 2533 case IOPIOCGLCT: 2534 iov = (struct iovec *)data; 2535 i = le16toh(sc->sc_lct->tablesize) << 2; 2536 if (i > iov->iov_len) 2537 i = iov->iov_len; 2538 else 2539 iov->iov_len = i; 2540 rv = copyout(sc->sc_lct, iov->iov_base, i); 2541 break; 2542 2543 case IOPIOCRECONFIG: 2544 rv = iop_reconfigure(sc, 0); 2545 break; 2546 2547 case IOPIOCGTIDMAP: 2548 iov = (struct iovec *)data; 2549 i = sizeof(struct iop_tidmap) * sc->sc_nlctent; 2550 if (i > iov->iov_len) 2551 i = iov->iov_len; 2552 else 2553 iov->iov_len = i; 2554 rv = copyout(sc->sc_tidmap, iov->iov_base, i); 2555 break; 2556 } 2557 2558 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 2559 return (rv); 2560 } 2561 2562 static int 2563 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p) 2564 { 2565 struct iop_msg *im; 2566 struct i2o_msg *mf; 2567 struct ioppt_buf *ptb; 2568 int rv, i, mapped; 2569 2570 mf = NULL; 2571 im = NULL; 2572 mapped = 1; 2573 2574 if (pt->pt_msglen > sc->sc_framesize || 2575 pt->pt_msglen < sizeof(struct i2o_msg) || 2576 pt->pt_nbufs > IOP_MAX_MSG_XFERS || 2577 pt->pt_nbufs < 0 || pt->pt_replylen < 0 || 2578 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000) 2579 return (EINVAL); 2580 2581 for (i = 0; i < pt->pt_nbufs; i++) 2582 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) { 2583 rv = ENOMEM; 2584 goto bad; 2585 } 2586 2587 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK); 2588 if (mf == NULL) 2589 return (ENOMEM); 2590 2591 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0) 2592 goto bad; 2593 2594 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 2595 im->im_rb = (struct i2o_reply *)mf; 2596 mf->msgictx = IOP_ICTX; 2597 mf->msgtctx = im->im_tctx; 2598 2599 for (i = 0; i < pt->pt_nbufs; i++) { 2600 ptb = &pt->pt_bufs[i]; 2601 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data, 2602 ptb->ptb_datalen, ptb->ptb_out != 0, p); 2603 if (rv != 0) 2604 goto bad; 2605 mapped = 1; 2606 } 2607 2608 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0) 2609 goto bad; 2610 2611 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3; 2612 if (i > sc->sc_framesize) 2613 i = sc->sc_framesize; 2614 if (i > pt->pt_replylen) 2615 i = pt->pt_replylen; 2616 rv = copyout(im->im_rb, pt->pt_reply, i); 2617 2618 bad: 2619 if (mapped != 0) 2620 iop_msg_unmap(sc, im); 2621 if (im != NULL) 2622 iop_msg_free(sc, im); 2623 if (mf != NULL) 2624 free(mf, M_DEVBUF); 2625 return (rv); 2626 } 2627