1 /* $NetBSD: iop.c,v 1.20 2001/11/13 12:24:58 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Support for I2O IOPs (intelligent I/O processors). 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.20 2001/11/13 12:24:58 lukem Exp $"); 45 46 #include "opt_i2o.h" 47 #include "iop.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/device.h> 53 #include <sys/queue.h> 54 #include <sys/proc.h> 55 #include <sys/malloc.h> 56 #include <sys/ioctl.h> 57 #include <sys/endian.h> 58 #include <sys/conf.h> 59 #include <sys/kthread.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/i2o/i2o.h> 66 #include <dev/i2o/iopio.h> 67 #include <dev/i2o/iopreg.h> 68 #include <dev/i2o/iopvar.h> 69 70 #define POLL(ms, cond) \ 71 do { \ 72 int i; \ 73 for (i = (ms) * 10; i; i--) { \ 74 if (cond) \ 75 break; \ 76 DELAY(100); \ 77 } \ 78 } while (/* CONSTCOND */0); 79 80 #ifdef I2ODEBUG 81 #define DPRINTF(x) printf x 82 #else 83 #define DPRINTF(x) 84 #endif 85 86 #ifdef I2OVERBOSE 87 #define IFVERBOSE(x) x 88 #define COMMENT(x) NULL 89 #else 90 #define IFVERBOSE(x) 91 #define COMMENT(x) 92 #endif 93 94 #define IOP_ICTXHASH_NBUCKETS 16 95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash]) 96 97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1) 98 99 #define IOP_TCTX_SHIFT 12 100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1) 101 102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl; 103 static u_long iop_ictxhash; 104 static void *iop_sdh; 105 static struct i2o_systab *iop_systab; 106 static int iop_systab_size; 107 108 extern struct cfdriver iop_cd; 109 110 #define IC_CONFIGURE 0x01 111 #define IC_PRIORITY 0x02 112 113 struct iop_class { 114 u_short ic_class; 115 u_short ic_flags; 116 #ifdef I2OVERBOSE 117 const char *ic_caption; 118 #endif 119 } static const iop_class[] = { 120 { 121 I2O_CLASS_EXECUTIVE, 122 0, 123 COMMENT("executive") 124 }, 125 { 126 I2O_CLASS_DDM, 127 0, 128 COMMENT("device driver module") 129 }, 130 { 131 I2O_CLASS_RANDOM_BLOCK_STORAGE, 132 IC_CONFIGURE | IC_PRIORITY, 133 IFVERBOSE("random block storage") 134 }, 135 { 136 I2O_CLASS_SEQUENTIAL_STORAGE, 137 IC_CONFIGURE | IC_PRIORITY, 138 IFVERBOSE("sequential storage") 139 }, 140 { 141 I2O_CLASS_LAN, 142 IC_CONFIGURE | IC_PRIORITY, 143 IFVERBOSE("LAN port") 144 }, 145 { 146 I2O_CLASS_WAN, 147 IC_CONFIGURE | IC_PRIORITY, 148 IFVERBOSE("WAN port") 149 }, 150 { 151 I2O_CLASS_FIBRE_CHANNEL_PORT, 152 IC_CONFIGURE, 153 IFVERBOSE("fibrechannel port") 154 }, 155 { 156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL, 157 0, 158 COMMENT("fibrechannel peripheral") 159 }, 160 { 161 I2O_CLASS_SCSI_PERIPHERAL, 162 0, 163 COMMENT("SCSI peripheral") 164 }, 165 { 166 I2O_CLASS_ATE_PORT, 167 IC_CONFIGURE, 168 IFVERBOSE("ATE port") 169 }, 170 { 171 I2O_CLASS_ATE_PERIPHERAL, 172 0, 173 COMMENT("ATE peripheral") 174 }, 175 { 176 I2O_CLASS_FLOPPY_CONTROLLER, 177 IC_CONFIGURE, 178 IFVERBOSE("floppy controller") 179 }, 180 { 181 I2O_CLASS_FLOPPY_DEVICE, 182 0, 183 COMMENT("floppy device") 184 }, 185 { 186 I2O_CLASS_BUS_ADAPTER_PORT, 187 IC_CONFIGURE, 188 IFVERBOSE("bus adapter port" ) 189 }, 190 }; 191 192 #if defined(I2ODEBUG) && defined(I2OVERBOSE) 193 static const char * const iop_status[] = { 194 "success", 195 "abort (dirty)", 196 "abort (no data transfer)", 197 "abort (partial transfer)", 198 "error (dirty)", 199 "error (no data transfer)", 200 "error (partial transfer)", 201 "undefined error code", 202 "process abort (dirty)", 203 "process abort (no data transfer)", 204 "process abort (partial transfer)", 205 "transaction error", 206 }; 207 #endif 208 209 static inline u_int32_t iop_inl(struct iop_softc *, int); 210 static inline void iop_outl(struct iop_softc *, int, u_int32_t); 211 212 static void iop_config_interrupts(struct device *); 213 static void iop_configure_devices(struct iop_softc *, int, int); 214 static void iop_devinfo(int, char *); 215 static int iop_print(void *, const char *); 216 static void iop_shutdown(void *); 217 static int iop_submatch(struct device *, struct cfdata *, void *); 218 static int iop_vendor_print(void *, const char *); 219 220 static void iop_adjqparam(struct iop_softc *, int); 221 static void iop_create_reconf_thread(void *); 222 static int iop_handle_reply(struct iop_softc *, u_int32_t); 223 static int iop_hrt_get(struct iop_softc *); 224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int); 225 static void iop_intr_event(struct device *, struct iop_msg *, void *); 226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int, 227 u_int32_t); 228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int); 229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int); 230 static int iop_ofifo_init(struct iop_softc *); 231 static int iop_passthrough(struct iop_softc *, struct ioppt *, 232 struct proc *); 233 static void iop_reconf_thread(void *); 234 static void iop_release_mfa(struct iop_softc *, u_int32_t); 235 static int iop_reset(struct iop_softc *); 236 static int iop_systab_set(struct iop_softc *); 237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *); 238 239 #ifdef I2ODEBUG 240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *); 241 #endif 242 243 cdev_decl(iop); 244 245 static inline u_int32_t 246 iop_inl(struct iop_softc *sc, int off) 247 { 248 249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 250 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 251 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 252 } 253 254 static inline void 255 iop_outl(struct iop_softc *sc, int off, u_int32_t val) 256 { 257 258 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 260 BUS_SPACE_BARRIER_WRITE); 261 } 262 263 /* 264 * Initialise the IOP and our interface. 265 */ 266 void 267 iop_init(struct iop_softc *sc, const char *intrstr) 268 { 269 struct iop_msg *im; 270 int rv, i, j, state, nsegs; 271 u_int32_t mask; 272 char ident[64]; 273 274 state = 0; 275 276 printf("I2O adapter"); 277 278 if (iop_ictxhashtbl == NULL) 279 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST, 280 M_DEVBUF, M_NOWAIT, &iop_ictxhash); 281 282 /* Disable interrupts at the IOP. */ 283 mask = iop_inl(sc, IOP_REG_INTR_MASK); 284 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO); 285 286 /* Allocate a scratch DMA map for small miscellaneous shared data. */ 287 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) { 289 printf("%s: cannot create scratch dmamap\n", 290 sc->sc_dv.dv_xname); 291 return; 292 } 293 state++; 294 295 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 296 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 297 printf("%s: cannot alloc scratch dmamem\n", 298 sc->sc_dv.dv_xname); 299 goto bail_out; 300 } 301 state++; 302 303 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE, 304 &sc->sc_scr, 0)) { 305 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname); 306 goto bail_out; 307 } 308 state++; 309 310 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr, 311 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 312 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname); 313 goto bail_out; 314 } 315 state++; 316 317 /* Reset the adapter and request status. */ 318 if ((rv = iop_reset(sc)) != 0) { 319 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname); 320 goto bail_out; 321 } 322 323 if ((rv = iop_status_get(sc, 1)) != 0) { 324 printf("%s: not responding (get status)\n", 325 sc->sc_dv.dv_xname); 326 goto bail_out; 327 } 328 329 sc->sc_flags |= IOP_HAVESTATUS; 330 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid), 331 ident, sizeof(ident)); 332 printf(" <%s>\n", ident); 333 334 #ifdef I2ODEBUG 335 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname, 336 le16toh(sc->sc_status.orgid), 337 (le32toh(sc->sc_status.segnumber) >> 12) & 15); 338 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname); 339 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname, 340 le32toh(sc->sc_status.desiredprivmemsize), 341 le32toh(sc->sc_status.currentprivmemsize), 342 le32toh(sc->sc_status.currentprivmembase)); 343 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname, 344 le32toh(sc->sc_status.desiredpriviosize), 345 le32toh(sc->sc_status.currentpriviosize), 346 le32toh(sc->sc_status.currentpriviobase)); 347 #endif 348 349 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes); 350 if (sc->sc_maxob > IOP_MAX_OUTBOUND) 351 sc->sc_maxob = IOP_MAX_OUTBOUND; 352 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes); 353 if (sc->sc_maxib > IOP_MAX_INBOUND) 354 sc->sc_maxib = IOP_MAX_INBOUND; 355 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2; 356 if (sc->sc_framesize > IOP_MAX_MSG_SIZE) 357 sc->sc_framesize = IOP_MAX_MSG_SIZE; 358 359 #if defined(I2ODEBUG) || defined(DIAGNOSTIC) 360 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) { 361 printf("%s: frame size too small (%d)\n", 362 sc->sc_dv.dv_xname, sc->sc_framesize); 363 return; 364 } 365 #endif 366 367 /* Allocate message wrappers. */ 368 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT); 369 memset(im, 0, sizeof(*im) * sc->sc_maxib); 370 sc->sc_ims = im; 371 SLIST_INIT(&sc->sc_im_freelist); 372 373 for (i = 0, state++; i < sc->sc_maxib; i++, im++) { 374 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 375 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 376 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 377 &im->im_xfer[0].ix_map); 378 if (rv != 0) { 379 printf("%s: couldn't create dmamap (%d)", 380 sc->sc_dv.dv_xname, rv); 381 goto bail_out; 382 } 383 384 im->im_tctx = i; 385 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 386 } 387 388 /* Initialise the IOP's outbound FIFO. */ 389 if (iop_ofifo_init(sc) != 0) { 390 printf("%s: unable to init oubound FIFO\n", 391 sc->sc_dv.dv_xname); 392 goto bail_out; 393 } 394 395 /* 396 * Defer further configuration until (a) interrupts are working and 397 * (b) we have enough information to build the system table. 398 */ 399 config_interrupts((struct device *)sc, iop_config_interrupts); 400 401 /* Configure shutdown hook before we start any device activity. */ 402 if (iop_sdh == NULL) 403 iop_sdh = shutdownhook_establish(iop_shutdown, NULL); 404 405 /* Ensure interrupts are enabled at the IOP. */ 406 mask = iop_inl(sc, IOP_REG_INTR_MASK); 407 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO); 408 409 if (intrstr != NULL) 410 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, 411 intrstr); 412 413 #ifdef I2ODEBUG 414 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n", 415 sc->sc_dv.dv_xname, sc->sc_maxib, 416 le32toh(sc->sc_status.maxinboundmframes), 417 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes)); 418 #endif 419 420 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0); 421 return; 422 423 bail_out: 424 if (state > 3) { 425 for (j = 0; j < i; j++) 426 bus_dmamap_destroy(sc->sc_dmat, 427 sc->sc_ims[j].im_xfer[0].ix_map); 428 free(sc->sc_ims, M_DEVBUF); 429 } 430 if (state > 2) 431 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap); 432 if (state > 1) 433 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE); 434 if (state > 0) 435 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs); 436 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap); 437 438 } 439 440 /* 441 * Perform autoconfiguration tasks. 442 */ 443 static void 444 iop_config_interrupts(struct device *self) 445 { 446 struct iop_attach_args ia; 447 struct iop_softc *sc, *iop; 448 struct i2o_systab_entry *ste; 449 int rv, i, niop; 450 451 sc = (struct iop_softc *)self; 452 LIST_INIT(&sc->sc_iilist); 453 454 printf("%s: configuring...\n", sc->sc_dv.dv_xname); 455 456 if (iop_hrt_get(sc) != 0) { 457 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname); 458 return; 459 } 460 461 /* 462 * Build the system table. 463 */ 464 if (iop_systab == NULL) { 465 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) { 466 if ((iop = device_lookup(&iop_cd, i)) == NULL) 467 continue; 468 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 469 continue; 470 if (iop_status_get(iop, 1) != 0) { 471 printf("%s: unable to retrieve status\n", 472 sc->sc_dv.dv_xname); 473 iop->sc_flags &= ~IOP_HAVESTATUS; 474 continue; 475 } 476 niop++; 477 } 478 if (niop == 0) 479 return; 480 481 i = sizeof(struct i2o_systab_entry) * (niop - 1) + 482 sizeof(struct i2o_systab); 483 iop_systab_size = i; 484 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT); 485 486 memset(iop_systab, 0, i); 487 iop_systab->numentries = niop; 488 iop_systab->version = I2O_VERSION_11; 489 490 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) { 491 if ((iop = device_lookup(&iop_cd, i)) == NULL) 492 continue; 493 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 494 continue; 495 496 ste->orgid = iop->sc_status.orgid; 497 ste->iopid = iop->sc_dv.dv_unit + 2; 498 ste->segnumber = 499 htole32(le32toh(iop->sc_status.segnumber) & ~4095); 500 ste->iopcaps = iop->sc_status.iopcaps; 501 ste->inboundmsgframesize = 502 iop->sc_status.inboundmframesize; 503 ste->inboundmsgportaddresslow = 504 htole32(iop->sc_memaddr + IOP_REG_IFIFO); 505 ste++; 506 } 507 } 508 509 /* 510 * Post the system table to the IOP and bring it to the OPERATIONAL 511 * state. 512 */ 513 if (iop_systab_set(sc) != 0) { 514 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname); 515 return; 516 } 517 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1, 518 30000) != 0) { 519 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname); 520 return; 521 } 522 523 /* 524 * Set up an event handler for this IOP. 525 */ 526 sc->sc_eventii.ii_dv = self; 527 sc->sc_eventii.ii_intr = iop_intr_event; 528 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY; 529 sc->sc_eventii.ii_tid = I2O_TID_IOP; 530 iop_initiator_register(sc, &sc->sc_eventii); 531 532 rv = iop_util_eventreg(sc, &sc->sc_eventii, 533 I2O_EVENT_EXEC_RESOURCE_LIMITS | 534 I2O_EVENT_EXEC_CONNECTION_FAIL | 535 I2O_EVENT_EXEC_ADAPTER_FAULT | 536 I2O_EVENT_EXEC_POWER_FAIL | 537 I2O_EVENT_EXEC_RESET_PENDING | 538 I2O_EVENT_EXEC_RESET_IMMINENT | 539 I2O_EVENT_EXEC_HARDWARE_FAIL | 540 I2O_EVENT_EXEC_XCT_CHANGE | 541 I2O_EVENT_EXEC_DDM_AVAILIBILITY | 542 I2O_EVENT_GEN_DEVICE_RESET | 543 I2O_EVENT_GEN_STATE_CHANGE | 544 I2O_EVENT_GEN_GENERAL_WARNING); 545 if (rv != 0) { 546 printf("%s: unable to register for events", sc->sc_dv.dv_xname); 547 return; 548 } 549 550 /* 551 * Attempt to match and attach a product-specific extension. 552 */ 553 ia.ia_class = I2O_CLASS_ANY; 554 ia.ia_tid = I2O_TID_IOP; 555 config_found_sm(self, &ia, iop_vendor_print, iop_submatch); 556 557 /* 558 * Start device configuration. 559 */ 560 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL); 561 if ((rv = iop_reconfigure(sc, 0)) == -1) { 562 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv); 563 return; 564 } 565 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 566 567 kthread_create(iop_create_reconf_thread, sc); 568 } 569 570 /* 571 * Create the reconfiguration thread. Called after the standard kernel 572 * threads have been created. 573 */ 574 static void 575 iop_create_reconf_thread(void *cookie) 576 { 577 struct iop_softc *sc; 578 int rv; 579 580 sc = cookie; 581 sc->sc_flags |= IOP_ONLINE; 582 583 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc, 584 "%s", sc->sc_dv.dv_xname); 585 if (rv != 0) { 586 printf("%s: unable to create reconfiguration thread (%d)", 587 sc->sc_dv.dv_xname, rv); 588 return; 589 } 590 } 591 592 /* 593 * Reconfiguration thread; listens for LCT change notification, and 594 * initiates re-configuration if received. 595 */ 596 static void 597 iop_reconf_thread(void *cookie) 598 { 599 struct iop_softc *sc; 600 struct i2o_lct lct; 601 u_int32_t chgind; 602 int rv; 603 604 sc = cookie; 605 chgind = sc->sc_chgind + 1; 606 607 for (;;) { 608 DPRINTF(("%s: async reconfig: requested 0x%08x\n", 609 sc->sc_dv.dv_xname, chgind)); 610 611 PHOLD(sc->sc_reconf_proc); 612 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind); 613 PRELE(sc->sc_reconf_proc); 614 615 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n", 616 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv)); 617 618 if (rv == 0 && 619 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) { 620 iop_reconfigure(sc, le32toh(lct.changeindicator)); 621 chgind = sc->sc_chgind + 1; 622 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 623 } 624 625 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5); 626 } 627 } 628 629 /* 630 * Reconfigure: find new and removed devices. 631 */ 632 int 633 iop_reconfigure(struct iop_softc *sc, u_int chgind) 634 { 635 struct iop_msg *im; 636 struct i2o_hba_bus_scan mf; 637 struct i2o_lct_entry *le; 638 struct iop_initiator *ii, *nextii; 639 int rv, tid, i; 640 641 /* 642 * If the reconfiguration request isn't the result of LCT change 643 * notification, then be more thorough: ask all bus ports to scan 644 * their busses. Wait up to 5 minutes for each bus port to complete 645 * the request. 646 */ 647 if (chgind == 0) { 648 if ((rv = iop_lct_get(sc)) != 0) { 649 DPRINTF(("iop_reconfigure: unable to read LCT\n")); 650 return (rv); 651 } 652 653 le = sc->sc_lct->entry; 654 for (i = 0; i < sc->sc_nlctent; i++, le++) { 655 if ((le16toh(le->classid) & 4095) != 656 I2O_CLASS_BUS_ADAPTER_PORT) 657 continue; 658 tid = le16toh(le->localtid) & 4095; 659 660 im = iop_msg_alloc(sc, IM_WAIT); 661 662 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 663 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN); 664 mf.msgictx = IOP_ICTX; 665 mf.msgtctx = im->im_tctx; 666 667 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname, 668 tid)); 669 670 rv = iop_msg_post(sc, im, &mf, 5*60*1000); 671 iop_msg_free(sc, im); 672 #ifdef I2ODEBUG 673 if (rv != 0) 674 printf("%s: bus scan failed\n", 675 sc->sc_dv.dv_xname); 676 #endif 677 } 678 } else if (chgind <= sc->sc_chgind) { 679 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname)); 680 return (0); 681 } 682 683 /* Re-read the LCT and determine if it has changed. */ 684 if ((rv = iop_lct_get(sc)) != 0) { 685 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 686 return (rv); 687 } 688 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent)); 689 690 chgind = le32toh(sc->sc_lct->changeindicator); 691 if (chgind == sc->sc_chgind) { 692 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname)); 693 return (0); 694 } 695 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname)); 696 sc->sc_chgind = chgind; 697 698 if (sc->sc_tidmap != NULL) 699 free(sc->sc_tidmap, M_DEVBUF); 700 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap), 701 M_DEVBUF, M_NOWAIT); 702 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap)); 703 704 /* Allow 1 queued command per device while we're configuring. */ 705 iop_adjqparam(sc, 1); 706 707 /* 708 * Match and attach child devices. We configure high-level devices 709 * first so that any claims will propagate throughout the LCT, 710 * hopefully masking off aliased devices as a result. 711 * 712 * Re-reading the LCT at this point is a little dangerous, but we'll 713 * trust the IOP (and the operator) to behave itself... 714 */ 715 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 716 IC_CONFIGURE | IC_PRIORITY); 717 if ((rv = iop_lct_get(sc)) != 0) 718 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 719 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 720 IC_CONFIGURE); 721 722 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) { 723 nextii = LIST_NEXT(ii, ii_list); 724 725 /* Detach devices that were configured, but are now gone. */ 726 for (i = 0; i < sc->sc_nlctent; i++) 727 if (ii->ii_tid == sc->sc_tidmap[i].it_tid) 728 break; 729 if (i == sc->sc_nlctent || 730 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) 731 config_detach(ii->ii_dv, DETACH_FORCE); 732 733 /* 734 * Tell initiators that existed before the re-configuration 735 * to re-configure. 736 */ 737 if (ii->ii_reconfig == NULL) 738 continue; 739 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0) 740 printf("%s: %s failed reconfigure (%d)\n", 741 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv); 742 } 743 744 /* Re-adjust queue parameters and return. */ 745 if (sc->sc_nii != 0) 746 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE) 747 / sc->sc_nii); 748 749 return (0); 750 } 751 752 /* 753 * Configure I2O devices into the system. 754 */ 755 static void 756 iop_configure_devices(struct iop_softc *sc, int mask, int maskval) 757 { 758 struct iop_attach_args ia; 759 struct iop_initiator *ii; 760 const struct i2o_lct_entry *le; 761 struct device *dv; 762 int i, j, nent; 763 u_int usertid; 764 765 nent = sc->sc_nlctent; 766 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) { 767 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095; 768 769 /* Ignore the device if it's in use. */ 770 usertid = le32toh(le->usertid) & 4095; 771 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST) 772 continue; 773 774 ia.ia_class = le16toh(le->classid) & 4095; 775 ia.ia_tid = sc->sc_tidmap[i].it_tid; 776 777 /* Ignore uninteresting devices. */ 778 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++) 779 if (iop_class[j].ic_class == ia.ia_class) 780 break; 781 if (j < sizeof(iop_class) / sizeof(iop_class[0]) && 782 (iop_class[j].ic_flags & mask) != maskval) 783 continue; 784 785 /* 786 * Try to configure the device only if it's not already 787 * configured. 788 */ 789 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) { 790 if (ia.ia_tid == ii->ii_tid) { 791 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 792 strcpy(sc->sc_tidmap[i].it_dvname, 793 ii->ii_dv->dv_xname); 794 break; 795 } 796 } 797 if (ii != NULL) 798 continue; 799 800 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch); 801 if (dv != NULL) { 802 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 803 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname); 804 } 805 } 806 } 807 808 /* 809 * Adjust queue parameters for all child devices. 810 */ 811 static void 812 iop_adjqparam(struct iop_softc *sc, int mpi) 813 { 814 struct iop_initiator *ii; 815 816 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) 817 if (ii->ii_adjqparam != NULL) 818 (*ii->ii_adjqparam)(ii->ii_dv, mpi); 819 } 820 821 static void 822 iop_devinfo(int class, char *devinfo) 823 { 824 #ifdef I2OVERBOSE 825 int i; 826 827 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++) 828 if (class == iop_class[i].ic_class) 829 break; 830 831 if (i == sizeof(iop_class) / sizeof(iop_class[0])) 832 sprintf(devinfo, "device (class 0x%x)", class); 833 else 834 strcpy(devinfo, iop_class[i].ic_caption); 835 #else 836 837 sprintf(devinfo, "device (class 0x%x)", class); 838 #endif 839 } 840 841 static int 842 iop_print(void *aux, const char *pnp) 843 { 844 struct iop_attach_args *ia; 845 char devinfo[256]; 846 847 ia = aux; 848 849 if (pnp != NULL) { 850 iop_devinfo(ia->ia_class, devinfo); 851 printf("%s at %s", devinfo, pnp); 852 } 853 printf(" tid %d", ia->ia_tid); 854 return (UNCONF); 855 } 856 857 static int 858 iop_vendor_print(void *aux, const char *pnp) 859 { 860 861 return (QUIET); 862 } 863 864 static int 865 iop_submatch(struct device *parent, struct cfdata *cf, void *aux) 866 { 867 struct iop_attach_args *ia; 868 869 ia = aux; 870 871 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid) 872 return (0); 873 874 return ((*cf->cf_attach->ca_match)(parent, cf, aux)); 875 } 876 877 /* 878 * Shut down all configured IOPs. 879 */ 880 static void 881 iop_shutdown(void *junk) 882 { 883 struct iop_softc *sc; 884 int i; 885 886 printf("shutting down iop devices..."); 887 888 for (i = 0; i < iop_cd.cd_ndevs; i++) { 889 if ((sc = device_lookup(&iop_cd, i)) == NULL) 890 continue; 891 if ((sc->sc_flags & IOP_ONLINE) == 0) 892 continue; 893 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX, 894 0, 5000); 895 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX, 896 0, 1000); 897 } 898 899 /* Wait. Some boards could still be flushing, stupidly enough. */ 900 delay(5000*1000); 901 printf(" done\n"); 902 } 903 904 /* 905 * Retrieve IOP status. 906 */ 907 int 908 iop_status_get(struct iop_softc *sc, int nosleep) 909 { 910 struct i2o_exec_status_get mf; 911 struct i2o_status *st; 912 paddr_t pa; 913 int rv, i; 914 915 pa = sc->sc_scr_seg->ds_addr; 916 st = (struct i2o_status *)sc->sc_scr; 917 918 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get); 919 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET); 920 mf.reserved[0] = 0; 921 mf.reserved[1] = 0; 922 mf.reserved[2] = 0; 923 mf.reserved[3] = 0; 924 mf.addrlow = (u_int32_t)pa; 925 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32); 926 mf.length = sizeof(sc->sc_status); 927 928 memset(st, 0, sizeof(*st)); 929 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 930 BUS_DMASYNC_PREREAD); 931 932 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0) 933 return (rv); 934 935 for (i = 25; i != 0; i--) { 936 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, 937 sizeof(*st), BUS_DMASYNC_POSTREAD); 938 if (st->syncbyte == 0xff) 939 break; 940 if (nosleep) 941 DELAY(100*1000); 942 else 943 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10); 944 } 945 946 if (st->syncbyte != 0xff) 947 rv = EIO; 948 else { 949 memcpy(&sc->sc_status, st, sizeof(sc->sc_status)); 950 rv = 0; 951 } 952 953 return (rv); 954 } 955 956 /* 957 * Initialize and populate the IOP's outbound FIFO. 958 */ 959 static int 960 iop_ofifo_init(struct iop_softc *sc) 961 { 962 bus_addr_t addr; 963 bus_dma_segment_t seg; 964 struct i2o_exec_outbound_init *mf; 965 int i, rseg, rv; 966 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw; 967 968 sw = (u_int32_t *)sc->sc_scr; 969 970 mf = (struct i2o_exec_outbound_init *)mb; 971 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init); 972 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT); 973 mf->msgictx = IOP_ICTX; 974 mf->msgtctx = 0; 975 mf->pagesize = PAGE_SIZE; 976 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16); 977 978 /* 979 * The I2O spec says that there are two SGLs: one for the status 980 * word, and one for a list of discarded MFAs. It continues to say 981 * that if you don't want to get the list of MFAs, an IGNORE SGL is 982 * necessary; this isn't the case (and is in fact a bad thing). 983 */ 984 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) | 985 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END; 986 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = 987 (u_int32_t)sc->sc_scr_seg->ds_addr; 988 mb[0] += 2 << 16; 989 990 *sw = 0; 991 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 992 BUS_DMASYNC_PREREAD); 993 994 if ((rv = iop_post(sc, mb)) != 0) 995 return (rv); 996 997 POLL(5000, 998 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 999 BUS_DMASYNC_POSTREAD), 1000 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE))); 1001 1002 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) { 1003 printf("%s: outbound FIFO init failed (%d)\n", 1004 sc->sc_dv.dv_xname, le32toh(*sw)); 1005 return (EIO); 1006 } 1007 1008 /* Allocate DMA safe memory for the reply frames. */ 1009 if (sc->sc_rep_phys == 0) { 1010 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize; 1011 1012 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE, 1013 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 1014 if (rv != 0) { 1015 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname, 1016 rv); 1017 return (rv); 1018 } 1019 1020 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size, 1021 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1022 if (rv != 0) { 1023 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv); 1024 return (rv); 1025 } 1026 1027 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1, 1028 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap); 1029 if (rv != 0) { 1030 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, 1031 rv); 1032 return (rv); 1033 } 1034 1035 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, 1036 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT); 1037 if (rv != 0) { 1038 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv); 1039 return (rv); 1040 } 1041 1042 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr; 1043 } 1044 1045 /* Populate the outbound FIFO. */ 1046 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) { 1047 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr); 1048 addr += sc->sc_framesize; 1049 } 1050 1051 return (0); 1052 } 1053 1054 /* 1055 * Read the specified number of bytes from the IOP's hardware resource table. 1056 */ 1057 static int 1058 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size) 1059 { 1060 struct iop_msg *im; 1061 int rv; 1062 struct i2o_exec_hrt_get *mf; 1063 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1064 1065 im = iop_msg_alloc(sc, IM_WAIT); 1066 mf = (struct i2o_exec_hrt_get *)mb; 1067 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get); 1068 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET); 1069 mf->msgictx = IOP_ICTX; 1070 mf->msgtctx = im->im_tctx; 1071 1072 iop_msg_map(sc, im, mb, hrt, size, 0, NULL); 1073 rv = iop_msg_post(sc, im, mb, 30000); 1074 iop_msg_unmap(sc, im); 1075 iop_msg_free(sc, im); 1076 return (rv); 1077 } 1078 1079 /* 1080 * Read the IOP's hardware resource table. 1081 */ 1082 static int 1083 iop_hrt_get(struct iop_softc *sc) 1084 { 1085 struct i2o_hrt hrthdr, *hrt; 1086 int size, rv; 1087 1088 PHOLD(curproc); 1089 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr)); 1090 PRELE(curproc); 1091 if (rv != 0) 1092 return (rv); 1093 1094 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname, 1095 le16toh(hrthdr.numentries))); 1096 1097 size = sizeof(struct i2o_hrt) + 1098 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry); 1099 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT); 1100 1101 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) { 1102 free(hrt, M_DEVBUF); 1103 return (rv); 1104 } 1105 1106 if (sc->sc_hrt != NULL) 1107 free(sc->sc_hrt, M_DEVBUF); 1108 sc->sc_hrt = hrt; 1109 return (0); 1110 } 1111 1112 /* 1113 * Request the specified number of bytes from the IOP's logical 1114 * configuration table. If a change indicator is specified, this 1115 * is a verbatim notification request, so the caller is prepared 1116 * to wait indefinitely. 1117 */ 1118 static int 1119 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size, 1120 u_int32_t chgind) 1121 { 1122 struct iop_msg *im; 1123 struct i2o_exec_lct_notify *mf; 1124 int rv; 1125 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1126 1127 im = iop_msg_alloc(sc, IM_WAIT); 1128 memset(lct, 0, size); 1129 1130 mf = (struct i2o_exec_lct_notify *)mb; 1131 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify); 1132 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY); 1133 mf->msgictx = IOP_ICTX; 1134 mf->msgtctx = im->im_tctx; 1135 mf->classid = I2O_CLASS_ANY; 1136 mf->changeindicator = chgind; 1137 1138 #ifdef I2ODEBUG 1139 printf("iop_lct_get0: reading LCT"); 1140 if (chgind != 0) 1141 printf(" (async)"); 1142 printf("\n"); 1143 #endif 1144 1145 iop_msg_map(sc, im, mb, lct, size, 0, NULL); 1146 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0)); 1147 iop_msg_unmap(sc, im); 1148 iop_msg_free(sc, im); 1149 return (rv); 1150 } 1151 1152 /* 1153 * Read the IOP's logical configuration table. 1154 */ 1155 int 1156 iop_lct_get(struct iop_softc *sc) 1157 { 1158 int esize, size, rv; 1159 struct i2o_lct *lct; 1160 1161 esize = le32toh(sc->sc_status.expectedlctsize); 1162 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK); 1163 if (lct == NULL) 1164 return (ENOMEM); 1165 1166 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) { 1167 free(lct, M_DEVBUF); 1168 return (rv); 1169 } 1170 1171 size = le16toh(lct->tablesize) << 2; 1172 if (esize != size) { 1173 free(lct, M_DEVBUF); 1174 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK); 1175 if (lct == NULL) 1176 return (ENOMEM); 1177 1178 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) { 1179 free(lct, M_DEVBUF); 1180 return (rv); 1181 } 1182 } 1183 1184 /* Swap in the new LCT. */ 1185 if (sc->sc_lct != NULL) 1186 free(sc->sc_lct, M_DEVBUF); 1187 sc->sc_lct = lct; 1188 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) - 1189 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) / 1190 sizeof(struct i2o_lct_entry); 1191 return (0); 1192 } 1193 1194 /* 1195 * Request the specified parameter group from the target. If an initiator 1196 * is specified (a) don't wait for the operation to complete, but instead 1197 * let the initiator's interrupt handler deal with the reply and (b) place a 1198 * pointer to the parameter group op in the wrapper's `im_dvcontext' field. 1199 */ 1200 int 1201 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf, 1202 int size, struct iop_initiator *ii) 1203 { 1204 struct iop_msg *im; 1205 struct i2o_util_params_op *mf; 1206 struct i2o_reply *rf; 1207 int rv; 1208 struct iop_pgop *pgop; 1209 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1210 1211 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS); 1212 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) { 1213 iop_msg_free(sc, im); 1214 return (ENOMEM); 1215 } 1216 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) { 1217 iop_msg_free(sc, im); 1218 free(pgop, M_DEVBUF); 1219 return (ENOMEM); 1220 } 1221 im->im_dvcontext = pgop; 1222 im->im_rb = rf; 1223 1224 mf = (struct i2o_util_params_op *)mb; 1225 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1226 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET); 1227 mf->msgictx = IOP_ICTX; 1228 mf->msgtctx = im->im_tctx; 1229 mf->flags = 0; 1230 1231 pgop->olh.count = htole16(1); 1232 pgop->olh.reserved = htole16(0); 1233 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET); 1234 pgop->oat.fieldcount = htole16(0xffff); 1235 pgop->oat.group = htole16(group); 1236 1237 if (ii == NULL) 1238 PHOLD(curproc); 1239 1240 memset(buf, 0, size); 1241 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL); 1242 iop_msg_map(sc, im, mb, buf, size, 0, NULL); 1243 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0)); 1244 1245 if (ii == NULL) 1246 PRELE(curproc); 1247 1248 /* Detect errors; let partial transfers to count as success. */ 1249 if (ii == NULL && rv == 0) { 1250 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER && 1251 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR) 1252 rv = 0; 1253 else 1254 rv = (rf->reqstatus != 0 ? EIO : 0); 1255 1256 if (rv != 0) 1257 printf("%s: FIELD_GET failed for tid %d group %d\n", 1258 sc->sc_dv.dv_xname, tid, group); 1259 } 1260 1261 if (ii == NULL || rv != 0) { 1262 iop_msg_unmap(sc, im); 1263 iop_msg_free(sc, im); 1264 free(pgop, M_DEVBUF); 1265 free(rf, M_DEVBUF); 1266 } 1267 1268 return (rv); 1269 } 1270 1271 /* 1272 * Set a single field in a scalar parameter group. 1273 */ 1274 int 1275 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf, 1276 int size, int field) 1277 { 1278 struct iop_msg *im; 1279 struct i2o_util_params_op *mf; 1280 struct iop_pgop *pgop; 1281 int rv, totsize; 1282 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1283 1284 totsize = sizeof(*pgop) + size; 1285 1286 im = iop_msg_alloc(sc, IM_WAIT); 1287 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1288 iop_msg_free(sc, im); 1289 return (ENOMEM); 1290 } 1291 1292 mf = (struct i2o_util_params_op *)mb; 1293 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1294 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1295 mf->msgictx = IOP_ICTX; 1296 mf->msgtctx = im->im_tctx; 1297 mf->flags = 0; 1298 1299 pgop->olh.count = htole16(1); 1300 pgop->olh.reserved = htole16(0); 1301 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET); 1302 pgop->oat.fieldcount = htole16(1); 1303 pgop->oat.group = htole16(group); 1304 pgop->oat.fields[0] = htole16(field); 1305 memcpy(pgop + 1, buf, size); 1306 1307 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1308 rv = iop_msg_post(sc, im, mb, 30000); 1309 if (rv != 0) 1310 printf("%s: FIELD_SET failed for tid %d group %d\n", 1311 sc->sc_dv.dv_xname, tid, group); 1312 1313 iop_msg_unmap(sc, im); 1314 iop_msg_free(sc, im); 1315 free(pgop, M_DEVBUF); 1316 return (rv); 1317 } 1318 1319 /* 1320 * Delete all rows in a tablular parameter group. 1321 */ 1322 int 1323 iop_table_clear(struct iop_softc *sc, int tid, int group) 1324 { 1325 struct iop_msg *im; 1326 struct i2o_util_params_op *mf; 1327 struct iop_pgop pgop; 1328 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1329 int rv; 1330 1331 im = iop_msg_alloc(sc, IM_WAIT); 1332 1333 mf = (struct i2o_util_params_op *)mb; 1334 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1335 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1336 mf->msgictx = IOP_ICTX; 1337 mf->msgtctx = im->im_tctx; 1338 mf->flags = 0; 1339 1340 pgop.olh.count = htole16(1); 1341 pgop.olh.reserved = htole16(0); 1342 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR); 1343 pgop.oat.fieldcount = htole16(0); 1344 pgop.oat.group = htole16(group); 1345 pgop.oat.fields[0] = htole16(0); 1346 1347 PHOLD(curproc); 1348 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL); 1349 rv = iop_msg_post(sc, im, mb, 30000); 1350 if (rv != 0) 1351 printf("%s: TABLE_CLEAR failed for tid %d group %d\n", 1352 sc->sc_dv.dv_xname, tid, group); 1353 1354 iop_msg_unmap(sc, im); 1355 PRELE(curproc); 1356 iop_msg_free(sc, im); 1357 return (rv); 1358 } 1359 1360 /* 1361 * Add a single row to a tabular parameter group. The row can have only one 1362 * field. 1363 */ 1364 int 1365 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf, 1366 int size, int row) 1367 { 1368 struct iop_msg *im; 1369 struct i2o_util_params_op *mf; 1370 struct iop_pgop *pgop; 1371 int rv, totsize; 1372 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1373 1374 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size; 1375 1376 im = iop_msg_alloc(sc, IM_WAIT); 1377 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1378 iop_msg_free(sc, im); 1379 return (ENOMEM); 1380 } 1381 1382 mf = (struct i2o_util_params_op *)mb; 1383 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1384 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1385 mf->msgictx = IOP_ICTX; 1386 mf->msgtctx = im->im_tctx; 1387 mf->flags = 0; 1388 1389 pgop->olh.count = htole16(1); 1390 pgop->olh.reserved = htole16(0); 1391 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD); 1392 pgop->oat.fieldcount = htole16(1); 1393 pgop->oat.group = htole16(group); 1394 pgop->oat.fields[0] = htole16(0); /* FieldIdx */ 1395 pgop->oat.fields[1] = htole16(1); /* RowCount */ 1396 pgop->oat.fields[2] = htole16(row); /* KeyValue */ 1397 memcpy(&pgop->oat.fields[3], buf, size); 1398 1399 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1400 rv = iop_msg_post(sc, im, mb, 30000); 1401 if (rv != 0) 1402 printf("%s: ADD_ROW failed for tid %d group %d row %d\n", 1403 sc->sc_dv.dv_xname, tid, group, row); 1404 1405 iop_msg_unmap(sc, im); 1406 iop_msg_free(sc, im); 1407 free(pgop, M_DEVBUF); 1408 return (rv); 1409 } 1410 1411 /* 1412 * Execute a simple command (no parameters). 1413 */ 1414 int 1415 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx, 1416 int async, int timo) 1417 { 1418 struct iop_msg *im; 1419 struct i2o_msg mf; 1420 int rv, fl; 1421 1422 fl = (async != 0 ? IM_WAIT : IM_POLL); 1423 im = iop_msg_alloc(sc, fl); 1424 1425 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1426 mf.msgfunc = I2O_MSGFUNC(tid, function); 1427 mf.msgictx = ictx; 1428 mf.msgtctx = im->im_tctx; 1429 1430 rv = iop_msg_post(sc, im, &mf, timo); 1431 iop_msg_free(sc, im); 1432 return (rv); 1433 } 1434 1435 /* 1436 * Post the system table to the IOP. 1437 */ 1438 static int 1439 iop_systab_set(struct iop_softc *sc) 1440 { 1441 struct i2o_exec_sys_tab_set *mf; 1442 struct iop_msg *im; 1443 bus_space_handle_t bsh; 1444 bus_addr_t boo; 1445 u_int32_t mema[2], ioa[2]; 1446 int rv; 1447 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1448 1449 im = iop_msg_alloc(sc, IM_WAIT); 1450 1451 mf = (struct i2o_exec_sys_tab_set *)mb; 1452 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set); 1453 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET); 1454 mf->msgictx = IOP_ICTX; 1455 mf->msgtctx = im->im_tctx; 1456 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12; 1457 mf->segnumber = 0; 1458 1459 mema[1] = sc->sc_status.desiredprivmemsize; 1460 ioa[1] = sc->sc_status.desiredpriviosize; 1461 1462 if (mema[1] != 0) { 1463 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff, 1464 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh); 1465 mema[0] = htole32(boo); 1466 if (rv != 0) { 1467 printf("%s: can't alloc priv mem space, err = %d\n", 1468 sc->sc_dv.dv_xname, rv); 1469 mema[0] = 0; 1470 mema[1] = 0; 1471 } 1472 } 1473 1474 if (ioa[1] != 0) { 1475 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff, 1476 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh); 1477 ioa[0] = htole32(boo); 1478 if (rv != 0) { 1479 printf("%s: can't alloc priv i/o space, err = %d\n", 1480 sc->sc_dv.dv_xname, rv); 1481 ioa[0] = 0; 1482 ioa[1] = 0; 1483 } 1484 } 1485 1486 PHOLD(curproc); 1487 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL); 1488 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL); 1489 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL); 1490 rv = iop_msg_post(sc, im, mb, 5000); 1491 iop_msg_unmap(sc, im); 1492 iop_msg_free(sc, im); 1493 PRELE(curproc); 1494 return (rv); 1495 } 1496 1497 /* 1498 * Reset the IOP. Must be called with interrupts disabled. 1499 */ 1500 static int 1501 iop_reset(struct iop_softc *sc) 1502 { 1503 u_int32_t mfa, *sw; 1504 struct i2o_exec_iop_reset mf; 1505 int rv; 1506 paddr_t pa; 1507 1508 sw = (u_int32_t *)sc->sc_scr; 1509 pa = sc->sc_scr_seg->ds_addr; 1510 1511 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset); 1512 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET); 1513 mf.reserved[0] = 0; 1514 mf.reserved[1] = 0; 1515 mf.reserved[2] = 0; 1516 mf.reserved[3] = 0; 1517 mf.statuslow = (u_int32_t)pa; 1518 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32); 1519 1520 *sw = htole32(0); 1521 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1522 BUS_DMASYNC_PREREAD); 1523 1524 if ((rv = iop_post(sc, (u_int32_t *)&mf))) 1525 return (rv); 1526 1527 POLL(2500, 1528 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1529 BUS_DMASYNC_POSTREAD), *sw != 0)); 1530 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) { 1531 printf("%s: reset rejected, status 0x%x\n", 1532 sc->sc_dv.dv_xname, le32toh(*sw)); 1533 return (EIO); 1534 } 1535 1536 /* 1537 * IOP is now in the INIT state. Wait no more than 10 seconds for 1538 * the inbound queue to become responsive. 1539 */ 1540 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY); 1541 if (mfa == IOP_MFA_EMPTY) { 1542 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 1543 return (EIO); 1544 } 1545 1546 iop_release_mfa(sc, mfa); 1547 return (0); 1548 } 1549 1550 /* 1551 * Register a new initiator. Must be called with the configuration lock 1552 * held. 1553 */ 1554 void 1555 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii) 1556 { 1557 static int ictxgen; 1558 int s; 1559 1560 /* 0 is reserved (by us) for system messages. */ 1561 ii->ii_ictx = ++ictxgen; 1562 1563 /* 1564 * `Utility initiators' don't make it onto the per-IOP initiator list 1565 * (which is used only for configuration), but do get one slot on 1566 * the inbound queue. 1567 */ 1568 if ((ii->ii_flags & II_UTILITY) == 0) { 1569 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list); 1570 sc->sc_nii++; 1571 } else 1572 sc->sc_nuii++; 1573 1574 s = splbio(); 1575 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash); 1576 splx(s); 1577 } 1578 1579 /* 1580 * Unregister an initiator. Must be called with the configuration lock 1581 * held. 1582 */ 1583 void 1584 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii) 1585 { 1586 int s; 1587 1588 if ((ii->ii_flags & II_UTILITY) == 0) { 1589 LIST_REMOVE(ii, ii_list); 1590 sc->sc_nii--; 1591 } else 1592 sc->sc_nuii--; 1593 1594 s = splbio(); 1595 LIST_REMOVE(ii, ii_hash); 1596 splx(s); 1597 } 1598 1599 /* 1600 * Handle a reply frame from the IOP. 1601 */ 1602 static int 1603 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa) 1604 { 1605 struct iop_msg *im; 1606 struct i2o_reply *rb; 1607 struct i2o_fault_notify *fn; 1608 struct iop_initiator *ii; 1609 u_int off, ictx, tctx, status, size; 1610 1611 off = (int)(rmfa - sc->sc_rep_phys); 1612 rb = (struct i2o_reply *)(sc->sc_rep + off); 1613 1614 /* Perform reply queue DMA synchronisation. */ 1615 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, 1616 sc->sc_framesize, BUS_DMASYNC_POSTREAD); 1617 if (--sc->sc_curib != 0) 1618 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 1619 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD); 1620 1621 #ifdef I2ODEBUG 1622 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0) 1623 panic("iop_handle_reply: 64-bit reply"); 1624 #endif 1625 /* 1626 * Find the initiator. 1627 */ 1628 ictx = le32toh(rb->msgictx); 1629 if (ictx == IOP_ICTX) 1630 ii = NULL; 1631 else { 1632 ii = LIST_FIRST(IOP_ICTXHASH(ictx)); 1633 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash)) 1634 if (ii->ii_ictx == ictx) 1635 break; 1636 if (ii == NULL) { 1637 #ifdef I2ODEBUG 1638 iop_reply_print(sc, rb); 1639 #endif 1640 printf("%s: WARNING: bad ictx returned (%x)\n", 1641 sc->sc_dv.dv_xname, ictx); 1642 return (-1); 1643 } 1644 } 1645 1646 /* 1647 * If we received a transport failure notice, we've got to dig the 1648 * transaction context (if any) out of the original message frame, 1649 * and then release the original MFA back to the inbound FIFO. 1650 */ 1651 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 1652 status = I2O_STATUS_SUCCESS; 1653 1654 fn = (struct i2o_fault_notify *)rb; 1655 tctx = iop_inl(sc, fn->lowmfa + 12); 1656 iop_release_mfa(sc, fn->lowmfa); 1657 iop_tfn_print(sc, fn); 1658 } else { 1659 status = rb->reqstatus; 1660 tctx = le32toh(rb->msgtctx); 1661 } 1662 1663 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) { 1664 /* 1665 * This initiator tracks state using message wrappers. 1666 * 1667 * Find the originating message wrapper, and if requested 1668 * notify the initiator. 1669 */ 1670 im = sc->sc_ims + (tctx & IOP_TCTX_MASK); 1671 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib || 1672 (im->im_flags & IM_ALLOCED) == 0 || 1673 tctx != im->im_tctx) { 1674 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n", 1675 sc->sc_dv.dv_xname, tctx, im); 1676 if (im != NULL) 1677 printf("%s: flags=0x%08x tctx=0x%08x\n", 1678 sc->sc_dv.dv_xname, im->im_flags, 1679 im->im_tctx); 1680 #ifdef I2ODEBUG 1681 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0) 1682 iop_reply_print(sc, rb); 1683 #endif 1684 return (-1); 1685 } 1686 1687 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1688 im->im_flags |= IM_FAIL; 1689 1690 #ifdef I2ODEBUG 1691 if ((im->im_flags & IM_REPLIED) != 0) 1692 panic("%s: dup reply", sc->sc_dv.dv_xname); 1693 #endif 1694 im->im_flags |= IM_REPLIED; 1695 1696 #ifdef I2ODEBUG 1697 if (status != I2O_STATUS_SUCCESS) 1698 iop_reply_print(sc, rb); 1699 #endif 1700 im->im_reqstatus = status; 1701 1702 /* Copy the reply frame, if requested. */ 1703 if (im->im_rb != NULL) { 1704 size = (le32toh(rb->msgflags) >> 14) & ~3; 1705 #ifdef I2ODEBUG 1706 if (size > sc->sc_framesize) 1707 panic("iop_handle_reply: reply too large"); 1708 #endif 1709 memcpy(im->im_rb, rb, size); 1710 } 1711 1712 /* Notify the initiator. */ 1713 if ((im->im_flags & IM_WAIT) != 0) 1714 wakeup(im); 1715 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) 1716 (*ii->ii_intr)(ii->ii_dv, im, rb); 1717 } else { 1718 /* 1719 * This initiator discards message wrappers. 1720 * 1721 * Simply pass the reply frame to the initiator. 1722 */ 1723 (*ii->ii_intr)(ii->ii_dv, NULL, rb); 1724 } 1725 1726 return (status); 1727 } 1728 1729 /* 1730 * Handle an interrupt from the IOP. 1731 */ 1732 int 1733 iop_intr(void *arg) 1734 { 1735 struct iop_softc *sc; 1736 u_int32_t rmfa; 1737 1738 sc = arg; 1739 1740 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) 1741 return (0); 1742 1743 for (;;) { 1744 /* Double read to account for IOP bug. */ 1745 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) { 1746 rmfa = iop_inl(sc, IOP_REG_OFIFO); 1747 if (rmfa == IOP_MFA_EMPTY) 1748 break; 1749 } 1750 iop_handle_reply(sc, rmfa); 1751 iop_outl(sc, IOP_REG_OFIFO, rmfa); 1752 } 1753 1754 return (1); 1755 } 1756 1757 /* 1758 * Handle an event signalled by the executive. 1759 */ 1760 static void 1761 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply) 1762 { 1763 struct i2o_util_event_register_reply *rb; 1764 struct iop_softc *sc; 1765 u_int event; 1766 1767 sc = (struct iop_softc *)dv; 1768 rb = reply; 1769 1770 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1771 return; 1772 1773 event = le32toh(rb->event); 1774 printf("%s: event 0x%08x received\n", dv->dv_xname, event); 1775 } 1776 1777 /* 1778 * Allocate a message wrapper. 1779 */ 1780 struct iop_msg * 1781 iop_msg_alloc(struct iop_softc *sc, int flags) 1782 { 1783 struct iop_msg *im; 1784 static u_int tctxgen; 1785 int s, i; 1786 1787 #ifdef I2ODEBUG 1788 if ((flags & IM_SYSMASK) != 0) 1789 panic("iop_msg_alloc: system flags specified"); 1790 #endif 1791 1792 s = splbio(); 1793 im = SLIST_FIRST(&sc->sc_im_freelist); 1794 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 1795 if (im == NULL) 1796 panic("iop_msg_alloc: no free wrappers"); 1797 #endif 1798 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain); 1799 splx(s); 1800 1801 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen; 1802 tctxgen += (1 << IOP_TCTX_SHIFT); 1803 im->im_flags = flags | IM_ALLOCED; 1804 im->im_rb = NULL; 1805 i = 0; 1806 do { 1807 im->im_xfer[i++].ix_size = 0; 1808 } while (i < IOP_MAX_MSG_XFERS); 1809 1810 return (im); 1811 } 1812 1813 /* 1814 * Free a message wrapper. 1815 */ 1816 void 1817 iop_msg_free(struct iop_softc *sc, struct iop_msg *im) 1818 { 1819 int s; 1820 1821 #ifdef I2ODEBUG 1822 if ((im->im_flags & IM_ALLOCED) == 0) 1823 panic("iop_msg_free: wrapper not allocated"); 1824 #endif 1825 1826 im->im_flags = 0; 1827 s = splbio(); 1828 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 1829 splx(s); 1830 } 1831 1832 /* 1833 * Map a data transfer. Write a scatter-gather list into the message frame. 1834 */ 1835 int 1836 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1837 void *xferaddr, int xfersize, int out, struct proc *up) 1838 { 1839 bus_dmamap_t dm; 1840 bus_dma_segment_t *ds; 1841 struct iop_xfer *ix; 1842 u_int rv, i, nsegs, flg, off, xn; 1843 u_int32_t *p; 1844 1845 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++) 1846 if (ix->ix_size == 0) 1847 break; 1848 1849 #ifdef I2ODEBUG 1850 if (xfersize == 0) 1851 panic("iop_msg_map: null transfer"); 1852 if (xfersize > IOP_MAX_XFER) 1853 panic("iop_msg_map: transfer too large"); 1854 if (xn == IOP_MAX_MSG_XFERS) 1855 panic("iop_msg_map: too many xfers"); 1856 #endif 1857 1858 /* 1859 * Only the first DMA map is static. 1860 */ 1861 if (xn != 0) { 1862 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 1863 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 1864 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map); 1865 if (rv != 0) 1866 return (rv); 1867 } 1868 1869 dm = ix->ix_map; 1870 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up, 1871 (up == NULL ? BUS_DMA_NOWAIT : 0)); 1872 if (rv != 0) 1873 goto bad; 1874 1875 /* 1876 * How many SIMPLE SG elements can we fit in this message? 1877 */ 1878 off = mb[0] >> 16; 1879 p = mb + off; 1880 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1881 1882 if (dm->dm_nsegs > nsegs) { 1883 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 1884 rv = EFBIG; 1885 DPRINTF(("iop_msg_map: too many segs\n")); 1886 goto bad; 1887 } 1888 1889 nsegs = dm->dm_nsegs; 1890 xfersize = 0; 1891 1892 /* 1893 * Write out the SG list. 1894 */ 1895 if (out) 1896 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 1897 else 1898 flg = I2O_SGL_SIMPLE; 1899 1900 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 1901 p[0] = (u_int32_t)ds->ds_len | flg; 1902 p[1] = (u_int32_t)ds->ds_addr; 1903 xfersize += ds->ds_len; 1904 } 1905 1906 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER; 1907 p[1] = (u_int32_t)ds->ds_addr; 1908 xfersize += ds->ds_len; 1909 1910 /* Fix up the transfer record, and sync the map. */ 1911 ix->ix_flags = (out ? IX_OUT : IX_IN); 1912 ix->ix_size = xfersize; 1913 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 1914 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 1915 1916 /* 1917 * If this is the first xfer we've mapped for this message, adjust 1918 * the SGL offset field in the message header. 1919 */ 1920 if ((im->im_flags & IM_SGLOFFADJ) == 0) { 1921 mb[0] += (mb[0] >> 12) & 0xf0; 1922 im->im_flags |= IM_SGLOFFADJ; 1923 } 1924 mb[0] += (nsegs << 17); 1925 return (0); 1926 1927 bad: 1928 if (xn != 0) 1929 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 1930 return (rv); 1931 } 1932 1933 /* 1934 * Map a block I/O data transfer (different in that there's only one per 1935 * message maximum, and PAGE addressing may be used). Write a scatter 1936 * gather list into the message frame. 1937 */ 1938 int 1939 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1940 void *xferaddr, int xfersize, int out) 1941 { 1942 bus_dma_segment_t *ds; 1943 bus_dmamap_t dm; 1944 struct iop_xfer *ix; 1945 u_int rv, i, nsegs, off, slen, tlen, flg; 1946 paddr_t saddr, eaddr; 1947 u_int32_t *p; 1948 1949 #ifdef I2ODEBUG 1950 if (xfersize == 0) 1951 panic("iop_msg_map_bio: null transfer"); 1952 if (xfersize > IOP_MAX_XFER) 1953 panic("iop_msg_map_bio: transfer too large"); 1954 if ((im->im_flags & IM_SGLOFFADJ) != 0) 1955 panic("iop_msg_map_bio: SGLOFFADJ"); 1956 #endif 1957 1958 ix = im->im_xfer; 1959 dm = ix->ix_map; 1960 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 1961 BUS_DMA_NOWAIT | BUS_DMA_STREAMING); 1962 if (rv != 0) 1963 return (rv); 1964 1965 off = mb[0] >> 16; 1966 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1967 1968 /* 1969 * If the transfer is highly fragmented and won't fit using SIMPLE 1970 * elements, use PAGE_LIST elements instead. SIMPLE elements are 1971 * potentially more efficient, both for us and the IOP. 1972 */ 1973 if (dm->dm_nsegs > nsegs) { 1974 nsegs = 1; 1975 p = mb + off + 1; 1976 1977 /* XXX This should be done with a bus_space flag. */ 1978 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) { 1979 slen = ds->ds_len; 1980 saddr = ds->ds_addr; 1981 1982 while (slen > 0) { 1983 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 1984 tlen = min(eaddr - saddr, slen); 1985 slen -= tlen; 1986 *p++ = le32toh(saddr); 1987 saddr = eaddr; 1988 nsegs++; 1989 } 1990 } 1991 1992 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER | 1993 I2O_SGL_END; 1994 if (out) 1995 mb[off] |= I2O_SGL_DATA_OUT; 1996 } else { 1997 p = mb + off; 1998 nsegs = dm->dm_nsegs; 1999 2000 if (out) 2001 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 2002 else 2003 flg = I2O_SGL_SIMPLE; 2004 2005 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 2006 p[0] = (u_int32_t)ds->ds_len | flg; 2007 p[1] = (u_int32_t)ds->ds_addr; 2008 } 2009 2010 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER | 2011 I2O_SGL_END; 2012 p[1] = (u_int32_t)ds->ds_addr; 2013 nsegs <<= 1; 2014 } 2015 2016 /* Fix up the transfer record, and sync the map. */ 2017 ix->ix_flags = (out ? IX_OUT : IX_IN); 2018 ix->ix_size = xfersize; 2019 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 2020 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 2021 2022 /* 2023 * Adjust the SGL offset and total message size fields. We don't 2024 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements. 2025 */ 2026 mb[0] += ((off << 4) + (nsegs << 16)); 2027 return (0); 2028 } 2029 2030 /* 2031 * Unmap all data transfers associated with a message wrapper. 2032 */ 2033 void 2034 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im) 2035 { 2036 struct iop_xfer *ix; 2037 int i; 2038 2039 #ifdef I2ODEBUG 2040 if (im->im_xfer[0].ix_size == 0) 2041 panic("iop_msg_unmap: no transfers mapped"); 2042 #endif 2043 2044 for (ix = im->im_xfer, i = 0;;) { 2045 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size, 2046 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE : 2047 BUS_DMASYNC_POSTREAD); 2048 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 2049 2050 /* Only the first DMA map is static. */ 2051 if (i != 0) 2052 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2053 if ((++ix)->ix_size == 0) 2054 break; 2055 if (++i >= IOP_MAX_MSG_XFERS) 2056 break; 2057 } 2058 } 2059 2060 /* 2061 * Post a message frame to the IOP's inbound queue. 2062 */ 2063 int 2064 iop_post(struct iop_softc *sc, u_int32_t *mb) 2065 { 2066 u_int32_t mfa; 2067 int s; 2068 2069 #ifdef I2ODEBUG 2070 if ((mb[0] >> 16) > (sc->sc_framesize >> 2)) 2071 panic("iop_post: frame too large"); 2072 #endif 2073 2074 s = splbio(); 2075 2076 /* Allocate a slot with the IOP. */ 2077 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) 2078 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) { 2079 splx(s); 2080 printf("%s: mfa not forthcoming\n", 2081 sc->sc_dv.dv_xname); 2082 return (EAGAIN); 2083 } 2084 2085 /* Perform reply buffer DMA synchronisation. */ 2086 if (sc->sc_curib++ == 0) 2087 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0, 2088 sc->sc_rep_size, BUS_DMASYNC_PREREAD); 2089 2090 /* Copy out the message frame. */ 2091 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16); 2092 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3, 2093 BUS_SPACE_BARRIER_WRITE); 2094 2095 /* Post the MFA back to the IOP. */ 2096 iop_outl(sc, IOP_REG_IFIFO, mfa); 2097 2098 splx(s); 2099 return (0); 2100 } 2101 2102 /* 2103 * Post a message to the IOP and deal with completion. 2104 */ 2105 int 2106 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo) 2107 { 2108 u_int32_t *mb; 2109 int rv, s; 2110 2111 mb = xmb; 2112 2113 /* Terminate the scatter/gather list chain. */ 2114 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2115 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END; 2116 2117 if ((rv = iop_post(sc, mb)) != 0) 2118 return (rv); 2119 2120 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) { 2121 if ((im->im_flags & IM_POLL) != 0) 2122 iop_msg_poll(sc, im, timo); 2123 else 2124 iop_msg_wait(sc, im, timo); 2125 2126 s = splbio(); 2127 if ((im->im_flags & IM_REPLIED) != 0) { 2128 if ((im->im_flags & IM_NOSTATUS) != 0) 2129 rv = 0; 2130 else if ((im->im_flags & IM_FAIL) != 0) 2131 rv = ENXIO; 2132 else if (im->im_reqstatus != I2O_STATUS_SUCCESS) 2133 rv = EIO; 2134 else 2135 rv = 0; 2136 } else 2137 rv = EBUSY; 2138 splx(s); 2139 } else 2140 rv = 0; 2141 2142 return (rv); 2143 } 2144 2145 /* 2146 * Spin until the specified message is replied to. 2147 */ 2148 static void 2149 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo) 2150 { 2151 u_int32_t rmfa; 2152 int s, status; 2153 2154 s = splbio(); 2155 2156 /* Wait for completion. */ 2157 for (timo *= 10; timo != 0; timo--) { 2158 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) { 2159 /* Double read to account for IOP bug. */ 2160 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2161 if (rmfa == IOP_MFA_EMPTY) 2162 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2163 if (rmfa != IOP_MFA_EMPTY) { 2164 status = iop_handle_reply(sc, rmfa); 2165 2166 /* 2167 * Return the reply frame to the IOP's 2168 * outbound FIFO. 2169 */ 2170 iop_outl(sc, IOP_REG_OFIFO, rmfa); 2171 } 2172 } 2173 if ((im->im_flags & IM_REPLIED) != 0) 2174 break; 2175 DELAY(100); 2176 } 2177 2178 if (timo == 0) { 2179 #ifdef I2ODEBUG 2180 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname); 2181 if (iop_status_get(sc, 1) != 0) 2182 printf("iop_msg_poll: unable to retrieve status\n"); 2183 else 2184 printf("iop_msg_poll: IOP state = %d\n", 2185 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2186 #endif 2187 } 2188 2189 splx(s); 2190 } 2191 2192 /* 2193 * Sleep until the specified message is replied to. 2194 */ 2195 static void 2196 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo) 2197 { 2198 int s, rv; 2199 2200 s = splbio(); 2201 if ((im->im_flags & IM_REPLIED) != 0) { 2202 splx(s); 2203 return; 2204 } 2205 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000); 2206 splx(s); 2207 2208 #ifdef I2ODEBUG 2209 if (rv != 0) { 2210 printf("iop_msg_wait: tsleep() == %d\n", rv); 2211 if (iop_status_get(sc, 0) != 0) 2212 printf("iop_msg_wait: unable to retrieve status\n"); 2213 else 2214 printf("iop_msg_wait: IOP state = %d\n", 2215 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2216 } 2217 #endif 2218 } 2219 2220 /* 2221 * Release an unused message frame back to the IOP's inbound fifo. 2222 */ 2223 static void 2224 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa) 2225 { 2226 2227 /* Use the frame to issue a no-op. */ 2228 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16)); 2229 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP)); 2230 iop_outl(sc, mfa + 8, 0); 2231 iop_outl(sc, mfa + 12, 0); 2232 2233 iop_outl(sc, IOP_REG_IFIFO, mfa); 2234 } 2235 2236 #ifdef I2ODEBUG 2237 /* 2238 * Dump a reply frame header. 2239 */ 2240 static void 2241 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb) 2242 { 2243 u_int function, detail; 2244 #ifdef I2OVERBOSE 2245 const char *statusstr; 2246 #endif 2247 2248 function = (le32toh(rb->msgfunc) >> 24) & 0xff; 2249 detail = le16toh(rb->detail); 2250 2251 printf("%s: reply:\n", sc->sc_dv.dv_xname); 2252 2253 #ifdef I2OVERBOSE 2254 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0])) 2255 statusstr = iop_status[rb->reqstatus]; 2256 else 2257 statusstr = "undefined error code"; 2258 2259 printf("%s: function=0x%02x status=0x%02x (%s)\n", 2260 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr); 2261 #else 2262 printf("%s: function=0x%02x status=0x%02x\n", 2263 sc->sc_dv.dv_xname, function, rb->reqstatus); 2264 #endif 2265 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n", 2266 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx), 2267 le32toh(rb->msgtctx)); 2268 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname, 2269 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095, 2270 (le32toh(rb->msgflags) >> 8) & 0xff); 2271 } 2272 #endif 2273 2274 /* 2275 * Dump a transport failure reply. 2276 */ 2277 static void 2278 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn) 2279 { 2280 2281 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname); 2282 2283 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname, 2284 le32toh(fn->msgictx), le32toh(fn->msgtctx)); 2285 printf("%s: failurecode=0x%02x severity=0x%02x\n", 2286 sc->sc_dv.dv_xname, fn->failurecode, fn->severity); 2287 printf("%s: highestver=0x%02x lowestver=0x%02x\n", 2288 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver); 2289 } 2290 2291 /* 2292 * Translate an I2O ASCII field into a C string. 2293 */ 2294 void 2295 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen) 2296 { 2297 int hc, lc, i, nit; 2298 2299 dlen--; 2300 lc = 0; 2301 hc = 0; 2302 i = 0; 2303 2304 /* 2305 * DPT use NUL as a space, whereas AMI use it as a terminator. The 2306 * spec has nothing to say about it. Since AMI fields are usually 2307 * filled with junk after the terminator, ... 2308 */ 2309 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT); 2310 2311 while (slen-- != 0 && dlen-- != 0) { 2312 if (nit && *src == '\0') 2313 break; 2314 else if (*src <= 0x20 || *src >= 0x7f) { 2315 if (hc) 2316 dst[i++] = ' '; 2317 } else { 2318 hc = 1; 2319 dst[i++] = *src; 2320 lc = i; 2321 } 2322 src++; 2323 } 2324 2325 dst[lc] = '\0'; 2326 } 2327 2328 /* 2329 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it. 2330 */ 2331 int 2332 iop_print_ident(struct iop_softc *sc, int tid) 2333 { 2334 struct { 2335 struct i2o_param_op_results pr; 2336 struct i2o_param_read_results prr; 2337 struct i2o_param_device_identity di; 2338 } __attribute__ ((__packed__)) p; 2339 char buf[32]; 2340 int rv; 2341 2342 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p, 2343 sizeof(p), NULL); 2344 if (rv != 0) 2345 return (rv); 2346 2347 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf, 2348 sizeof(buf)); 2349 printf(" <%s, ", buf); 2350 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf, 2351 sizeof(buf)); 2352 printf("%s, ", buf); 2353 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf)); 2354 printf("%s>", buf); 2355 2356 return (0); 2357 } 2358 2359 /* 2360 * Claim or unclaim the specified TID. 2361 */ 2362 int 2363 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release, 2364 int flags) 2365 { 2366 struct iop_msg *im; 2367 struct i2o_util_claim mf; 2368 int rv, func; 2369 2370 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM; 2371 im = iop_msg_alloc(sc, IM_WAIT); 2372 2373 /* We can use the same structure, as they're identical. */ 2374 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim); 2375 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func); 2376 mf.msgictx = ii->ii_ictx; 2377 mf.msgtctx = im->im_tctx; 2378 mf.flags = flags; 2379 2380 rv = iop_msg_post(sc, im, &mf, 5000); 2381 iop_msg_free(sc, im); 2382 return (rv); 2383 } 2384 2385 /* 2386 * Perform an abort. 2387 */ 2388 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func, 2389 int tctxabort, int flags) 2390 { 2391 struct iop_msg *im; 2392 struct i2o_util_abort mf; 2393 int rv; 2394 2395 im = iop_msg_alloc(sc, IM_WAIT); 2396 2397 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort); 2398 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT); 2399 mf.msgictx = ii->ii_ictx; 2400 mf.msgtctx = im->im_tctx; 2401 mf.flags = (func << 24) | flags; 2402 mf.tctxabort = tctxabort; 2403 2404 rv = iop_msg_post(sc, im, &mf, 5000); 2405 iop_msg_free(sc, im); 2406 return (rv); 2407 } 2408 2409 /* 2410 * Enable or disable reception of events for the specified device. 2411 */ 2412 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask) 2413 { 2414 struct i2o_util_event_register mf; 2415 2416 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register); 2417 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER); 2418 mf.msgictx = ii->ii_ictx; 2419 mf.msgtctx = 0; 2420 mf.eventmask = mask; 2421 2422 /* This message is replied to only when events are signalled. */ 2423 return (iop_post(sc, (u_int32_t *)&mf)); 2424 } 2425 2426 int 2427 iopopen(dev_t dev, int flag, int mode, struct proc *p) 2428 { 2429 struct iop_softc *sc; 2430 2431 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL) 2432 return (ENXIO); 2433 if ((sc->sc_flags & IOP_ONLINE) == 0) 2434 return (ENXIO); 2435 if ((sc->sc_flags & IOP_OPEN) != 0) 2436 return (EBUSY); 2437 sc->sc_flags |= IOP_OPEN; 2438 2439 return (0); 2440 } 2441 2442 int 2443 iopclose(dev_t dev, int flag, int mode, struct proc *p) 2444 { 2445 struct iop_softc *sc; 2446 2447 sc = device_lookup(&iop_cd, minor(dev)); 2448 sc->sc_flags &= ~IOP_OPEN; 2449 2450 return (0); 2451 } 2452 2453 int 2454 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 2455 { 2456 struct iop_softc *sc; 2457 struct iovec *iov; 2458 int rv, i; 2459 2460 if (securelevel >= 2) 2461 return (EPERM); 2462 2463 sc = device_lookup(&iop_cd, minor(dev)); 2464 2465 switch (cmd) { 2466 case IOPIOCPT: 2467 return (iop_passthrough(sc, (struct ioppt *)data, p)); 2468 2469 case IOPIOCGSTATUS: 2470 iov = (struct iovec *)data; 2471 i = sizeof(struct i2o_status); 2472 if (i > iov->iov_len) 2473 i = iov->iov_len; 2474 else 2475 iov->iov_len = i; 2476 if ((rv = iop_status_get(sc, 0)) == 0) 2477 rv = copyout(&sc->sc_status, iov->iov_base, i); 2478 return (rv); 2479 2480 case IOPIOCGLCT: 2481 case IOPIOCGTIDMAP: 2482 case IOPIOCRECONFIG: 2483 break; 2484 2485 default: 2486 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 2487 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd); 2488 #endif 2489 return (ENOTTY); 2490 } 2491 2492 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0) 2493 return (rv); 2494 2495 switch (cmd) { 2496 case IOPIOCGLCT: 2497 iov = (struct iovec *)data; 2498 i = le16toh(sc->sc_lct->tablesize) << 2; 2499 if (i > iov->iov_len) 2500 i = iov->iov_len; 2501 else 2502 iov->iov_len = i; 2503 rv = copyout(sc->sc_lct, iov->iov_base, i); 2504 break; 2505 2506 case IOPIOCRECONFIG: 2507 rv = iop_reconfigure(sc, 0); 2508 break; 2509 2510 case IOPIOCGTIDMAP: 2511 iov = (struct iovec *)data; 2512 i = sizeof(struct iop_tidmap) * sc->sc_nlctent; 2513 if (i > iov->iov_len) 2514 i = iov->iov_len; 2515 else 2516 iov->iov_len = i; 2517 rv = copyout(sc->sc_tidmap, iov->iov_base, i); 2518 break; 2519 } 2520 2521 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 2522 return (rv); 2523 } 2524 2525 static int 2526 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p) 2527 { 2528 struct iop_msg *im; 2529 struct i2o_msg *mf; 2530 struct ioppt_buf *ptb; 2531 int rv, i, mapped; 2532 2533 mf = NULL; 2534 im = NULL; 2535 mapped = 1; 2536 2537 if (pt->pt_msglen > sc->sc_framesize || 2538 pt->pt_msglen < sizeof(struct i2o_msg) || 2539 pt->pt_nbufs > IOP_MAX_MSG_XFERS || 2540 pt->pt_nbufs < 0 || pt->pt_replylen < 0 || 2541 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000) 2542 return (EINVAL); 2543 2544 for (i = 0; i < pt->pt_nbufs; i++) 2545 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) { 2546 rv = ENOMEM; 2547 goto bad; 2548 } 2549 2550 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK); 2551 if (mf == NULL) 2552 return (ENOMEM); 2553 2554 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0) 2555 goto bad; 2556 2557 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 2558 im->im_rb = (struct i2o_reply *)mf; 2559 mf->msgictx = IOP_ICTX; 2560 mf->msgtctx = im->im_tctx; 2561 2562 for (i = 0; i < pt->pt_nbufs; i++) { 2563 ptb = &pt->pt_bufs[i]; 2564 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data, 2565 ptb->ptb_datalen, ptb->ptb_out != 0, p); 2566 if (rv != 0) 2567 goto bad; 2568 mapped = 1; 2569 } 2570 2571 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0) 2572 goto bad; 2573 2574 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3; 2575 if (i > sc->sc_framesize) 2576 i = sc->sc_framesize; 2577 if (i > pt->pt_replylen) 2578 i = pt->pt_replylen; 2579 rv = copyout(im->im_rb, pt->pt_reply, i); 2580 2581 bad: 2582 if (mapped != 0) 2583 iop_msg_unmap(sc, im); 2584 if (im != NULL) 2585 iop_msg_free(sc, im); 2586 if (mf != NULL) 2587 free(mf, M_DEVBUF); 2588 return (rv); 2589 } 2590