1 /* $NetBSD: iop.c,v 1.51 2005/12/11 12:21:23 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Support for I2O IOPs (intelligent I/O processors). 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.51 2005/12/11 12:21:23 christos Exp $"); 45 46 #include "opt_i2o.h" 47 #include "iop.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/device.h> 53 #include <sys/queue.h> 54 #include <sys/proc.h> 55 #include <sys/malloc.h> 56 #include <sys/ioctl.h> 57 #include <sys/endian.h> 58 #include <sys/conf.h> 59 #include <sys/kthread.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/i2o/i2o.h> 66 #include <dev/i2o/iopio.h> 67 #include <dev/i2o/iopreg.h> 68 #include <dev/i2o/iopvar.h> 69 70 #include "locators.h" 71 72 #define POLL(ms, cond) \ 73 do { \ 74 int xi; \ 75 for (xi = (ms) * 10; xi; xi--) { \ 76 if (cond) \ 77 break; \ 78 DELAY(100); \ 79 } \ 80 } while (/* CONSTCOND */0); 81 82 #ifdef I2ODEBUG 83 #define DPRINTF(x) printf x 84 #else 85 #define DPRINTF(x) 86 #endif 87 88 #ifdef I2OVERBOSE 89 #define IFVERBOSE(x) x 90 #define COMMENT(x) NULL 91 #else 92 #define IFVERBOSE(x) 93 #define COMMENT(x) 94 #endif 95 96 #define IOP_ICTXHASH_NBUCKETS 16 97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash]) 98 99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1) 100 101 #define IOP_TCTX_SHIFT 12 102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1) 103 104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl; 105 static u_long iop_ictxhash; 106 static void *iop_sdh; 107 static struct i2o_systab *iop_systab; 108 static int iop_systab_size; 109 110 extern struct cfdriver iop_cd; 111 112 dev_type_open(iopopen); 113 dev_type_close(iopclose); 114 dev_type_ioctl(iopioctl); 115 116 const struct cdevsw iop_cdevsw = { 117 iopopen, iopclose, noread, nowrite, iopioctl, 118 nostop, notty, nopoll, nommap, nokqfilter, 119 }; 120 121 #define IC_CONFIGURE 0x01 122 #define IC_PRIORITY 0x02 123 124 struct iop_class { 125 u_short ic_class; 126 u_short ic_flags; 127 #ifdef I2OVERBOSE 128 const char *ic_caption; 129 #endif 130 } static const iop_class[] = { 131 { 132 I2O_CLASS_EXECUTIVE, 133 0, 134 IFVERBOSE("executive") 135 }, 136 { 137 I2O_CLASS_DDM, 138 0, 139 COMMENT("device driver module") 140 }, 141 { 142 I2O_CLASS_RANDOM_BLOCK_STORAGE, 143 IC_CONFIGURE | IC_PRIORITY, 144 IFVERBOSE("random block storage") 145 }, 146 { 147 I2O_CLASS_SEQUENTIAL_STORAGE, 148 IC_CONFIGURE | IC_PRIORITY, 149 IFVERBOSE("sequential storage") 150 }, 151 { 152 I2O_CLASS_LAN, 153 IC_CONFIGURE | IC_PRIORITY, 154 IFVERBOSE("LAN port") 155 }, 156 { 157 I2O_CLASS_WAN, 158 IC_CONFIGURE | IC_PRIORITY, 159 IFVERBOSE("WAN port") 160 }, 161 { 162 I2O_CLASS_FIBRE_CHANNEL_PORT, 163 IC_CONFIGURE, 164 IFVERBOSE("fibrechannel port") 165 }, 166 { 167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL, 168 0, 169 COMMENT("fibrechannel peripheral") 170 }, 171 { 172 I2O_CLASS_SCSI_PERIPHERAL, 173 0, 174 COMMENT("SCSI peripheral") 175 }, 176 { 177 I2O_CLASS_ATE_PORT, 178 IC_CONFIGURE, 179 IFVERBOSE("ATE port") 180 }, 181 { 182 I2O_CLASS_ATE_PERIPHERAL, 183 0, 184 COMMENT("ATE peripheral") 185 }, 186 { 187 I2O_CLASS_FLOPPY_CONTROLLER, 188 IC_CONFIGURE, 189 IFVERBOSE("floppy controller") 190 }, 191 { 192 I2O_CLASS_FLOPPY_DEVICE, 193 0, 194 COMMENT("floppy device") 195 }, 196 { 197 I2O_CLASS_BUS_ADAPTER_PORT, 198 IC_CONFIGURE, 199 IFVERBOSE("bus adapter port" ) 200 }, 201 }; 202 203 #if defined(I2ODEBUG) && defined(I2OVERBOSE) 204 static const char * const iop_status[] = { 205 "success", 206 "abort (dirty)", 207 "abort (no data transfer)", 208 "abort (partial transfer)", 209 "error (dirty)", 210 "error (no data transfer)", 211 "error (partial transfer)", 212 "undefined error code", 213 "process abort (dirty)", 214 "process abort (no data transfer)", 215 "process abort (partial transfer)", 216 "transaction error", 217 }; 218 #endif 219 220 static inline u_int32_t iop_inl(struct iop_softc *, int); 221 static inline void iop_outl(struct iop_softc *, int, u_int32_t); 222 223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int); 224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t); 225 226 static void iop_config_interrupts(struct device *); 227 static void iop_configure_devices(struct iop_softc *, int, int); 228 static void iop_devinfo(int, char *, size_t); 229 static int iop_print(void *, const char *); 230 static void iop_shutdown(void *); 231 232 static void iop_adjqparam(struct iop_softc *, int); 233 static void iop_create_reconf_thread(void *); 234 static int iop_handle_reply(struct iop_softc *, u_int32_t); 235 static int iop_hrt_get(struct iop_softc *); 236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int); 237 static void iop_intr_event(struct device *, struct iop_msg *, void *); 238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int, 239 u_int32_t); 240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int); 241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int); 242 static int iop_ofifo_init(struct iop_softc *); 243 static int iop_passthrough(struct iop_softc *, struct ioppt *, 244 struct proc *); 245 static void iop_reconf_thread(void *); 246 static void iop_release_mfa(struct iop_softc *, u_int32_t); 247 static int iop_reset(struct iop_softc *); 248 static int iop_sys_enable(struct iop_softc *); 249 static int iop_systab_set(struct iop_softc *); 250 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *); 251 252 #ifdef I2ODEBUG 253 static void iop_reply_print(struct iop_softc *, struct i2o_reply *); 254 #endif 255 256 static inline u_int32_t 257 iop_inl(struct iop_softc *sc, int off) 258 { 259 260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 261 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 262 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 263 } 264 265 static inline void 266 iop_outl(struct iop_softc *sc, int off, u_int32_t val) 267 { 268 269 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 270 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 271 BUS_SPACE_BARRIER_WRITE); 272 } 273 274 static inline u_int32_t 275 iop_inl_msg(struct iop_softc *sc, int off) 276 { 277 278 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 279 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 280 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off)); 281 } 282 283 static inline void 284 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val) 285 { 286 287 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val); 288 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 289 BUS_SPACE_BARRIER_WRITE); 290 } 291 292 /* 293 * Initialise the IOP and our interface. 294 */ 295 void 296 iop_init(struct iop_softc *sc, const char *intrstr) 297 { 298 struct iop_msg *im; 299 int rv, i, j, state, nsegs; 300 u_int32_t mask; 301 char ident[64]; 302 303 state = 0; 304 305 printf("I2O adapter"); 306 307 if (iop_ictxhashtbl == NULL) 308 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST, 309 M_DEVBUF, M_NOWAIT, &iop_ictxhash); 310 311 /* Disable interrupts at the IOP. */ 312 mask = iop_inl(sc, IOP_REG_INTR_MASK); 313 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO); 314 315 /* Allocate a scratch DMA map for small miscellaneous shared data. */ 316 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 317 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) { 318 printf("%s: cannot create scratch dmamap\n", 319 sc->sc_dv.dv_xname); 320 return; 321 } 322 323 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 324 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 325 printf("%s: cannot alloc scratch dmamem\n", 326 sc->sc_dv.dv_xname); 327 goto bail_out; 328 } 329 state++; 330 331 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE, 332 &sc->sc_scr, 0)) { 333 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname); 334 goto bail_out; 335 } 336 state++; 337 338 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr, 339 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 340 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname); 341 goto bail_out; 342 } 343 state++; 344 345 #ifdef I2ODEBUG 346 /* So that our debug checks don't choke. */ 347 sc->sc_framesize = 128; 348 #endif 349 350 /* Reset the adapter and request status. */ 351 if ((rv = iop_reset(sc)) != 0) { 352 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname); 353 goto bail_out; 354 } 355 356 if ((rv = iop_status_get(sc, 1)) != 0) { 357 printf("%s: not responding (get status)\n", 358 sc->sc_dv.dv_xname); 359 goto bail_out; 360 } 361 362 sc->sc_flags |= IOP_HAVESTATUS; 363 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid), 364 ident, sizeof(ident)); 365 printf(" <%s>\n", ident); 366 367 #ifdef I2ODEBUG 368 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname, 369 le16toh(sc->sc_status.orgid), 370 (le32toh(sc->sc_status.segnumber) >> 12) & 15); 371 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname); 372 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname, 373 le32toh(sc->sc_status.desiredprivmemsize), 374 le32toh(sc->sc_status.currentprivmemsize), 375 le32toh(sc->sc_status.currentprivmembase)); 376 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname, 377 le32toh(sc->sc_status.desiredpriviosize), 378 le32toh(sc->sc_status.currentpriviosize), 379 le32toh(sc->sc_status.currentpriviobase)); 380 #endif 381 382 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes); 383 if (sc->sc_maxob > IOP_MAX_OUTBOUND) 384 sc->sc_maxob = IOP_MAX_OUTBOUND; 385 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes); 386 if (sc->sc_maxib > IOP_MAX_INBOUND) 387 sc->sc_maxib = IOP_MAX_INBOUND; 388 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2; 389 if (sc->sc_framesize > IOP_MAX_MSG_SIZE) 390 sc->sc_framesize = IOP_MAX_MSG_SIZE; 391 392 #if defined(I2ODEBUG) || defined(DIAGNOSTIC) 393 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) { 394 printf("%s: frame size too small (%d)\n", 395 sc->sc_dv.dv_xname, sc->sc_framesize); 396 goto bail_out; 397 } 398 #endif 399 400 /* Allocate message wrappers. */ 401 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO); 402 if (im == NULL) { 403 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname); 404 goto bail_out; 405 } 406 state++; 407 sc->sc_ims = im; 408 SLIST_INIT(&sc->sc_im_freelist); 409 410 for (i = 0; i < sc->sc_maxib; i++, im++) { 411 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 412 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 413 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 414 &im->im_xfer[0].ix_map); 415 if (rv != 0) { 416 printf("%s: couldn't create dmamap (%d)", 417 sc->sc_dv.dv_xname, rv); 418 goto bail_out3; 419 } 420 421 im->im_tctx = i; 422 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 423 } 424 425 /* Initialise the IOP's outbound FIFO. */ 426 if (iop_ofifo_init(sc) != 0) { 427 printf("%s: unable to init oubound FIFO\n", 428 sc->sc_dv.dv_xname); 429 goto bail_out3; 430 } 431 432 /* 433 * Defer further configuration until (a) interrupts are working and 434 * (b) we have enough information to build the system table. 435 */ 436 config_interrupts((struct device *)sc, iop_config_interrupts); 437 438 /* Configure shutdown hook before we start any device activity. */ 439 if (iop_sdh == NULL) 440 iop_sdh = shutdownhook_establish(iop_shutdown, NULL); 441 442 /* Ensure interrupts are enabled at the IOP. */ 443 mask = iop_inl(sc, IOP_REG_INTR_MASK); 444 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO); 445 446 if (intrstr != NULL) 447 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, 448 intrstr); 449 450 #ifdef I2ODEBUG 451 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n", 452 sc->sc_dv.dv_xname, sc->sc_maxib, 453 le32toh(sc->sc_status.maxinboundmframes), 454 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes)); 455 #endif 456 457 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0); 458 return; 459 460 bail_out3: 461 if (state > 3) { 462 for (j = 0; j < i; j++) 463 bus_dmamap_destroy(sc->sc_dmat, 464 sc->sc_ims[j].im_xfer[0].ix_map); 465 free(sc->sc_ims, M_DEVBUF); 466 } 467 bail_out: 468 if (state > 2) 469 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap); 470 if (state > 1) 471 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE); 472 if (state > 0) 473 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs); 474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap); 475 } 476 477 /* 478 * Perform autoconfiguration tasks. 479 */ 480 static void 481 iop_config_interrupts(struct device *self) 482 { 483 struct iop_attach_args ia; 484 struct iop_softc *sc, *iop; 485 struct i2o_systab_entry *ste; 486 int rv, i, niop; 487 int locs[IOPCF_NLOCS]; 488 489 sc = (struct iop_softc *)self; 490 LIST_INIT(&sc->sc_iilist); 491 492 printf("%s: configuring...\n", sc->sc_dv.dv_xname); 493 494 if (iop_hrt_get(sc) != 0) { 495 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname); 496 return; 497 } 498 499 /* 500 * Build the system table. 501 */ 502 if (iop_systab == NULL) { 503 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) { 504 if ((iop = device_lookup(&iop_cd, i)) == NULL) 505 continue; 506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 507 continue; 508 if (iop_status_get(iop, 1) != 0) { 509 printf("%s: unable to retrieve status\n", 510 sc->sc_dv.dv_xname); 511 iop->sc_flags &= ~IOP_HAVESTATUS; 512 continue; 513 } 514 niop++; 515 } 516 if (niop == 0) 517 return; 518 519 i = sizeof(struct i2o_systab_entry) * (niop - 1) + 520 sizeof(struct i2o_systab); 521 iop_systab_size = i; 522 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO); 523 524 iop_systab->numentries = niop; 525 iop_systab->version = I2O_VERSION_11; 526 527 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) { 528 if ((iop = device_lookup(&iop_cd, i)) == NULL) 529 continue; 530 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 531 continue; 532 533 ste->orgid = iop->sc_status.orgid; 534 ste->iopid = iop->sc_dv.dv_unit + 2; 535 ste->segnumber = 536 htole32(le32toh(iop->sc_status.segnumber) & ~4095); 537 ste->iopcaps = iop->sc_status.iopcaps; 538 ste->inboundmsgframesize = 539 iop->sc_status.inboundmframesize; 540 ste->inboundmsgportaddresslow = 541 htole32(iop->sc_memaddr + IOP_REG_IFIFO); 542 ste++; 543 } 544 } 545 546 /* 547 * Post the system table to the IOP and bring it to the OPERATIONAL 548 * state. 549 */ 550 if (iop_systab_set(sc) != 0) { 551 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname); 552 return; 553 } 554 if (iop_sys_enable(sc) != 0) { 555 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname); 556 return; 557 } 558 559 /* 560 * Set up an event handler for this IOP. 561 */ 562 sc->sc_eventii.ii_dv = self; 563 sc->sc_eventii.ii_intr = iop_intr_event; 564 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY; 565 sc->sc_eventii.ii_tid = I2O_TID_IOP; 566 iop_initiator_register(sc, &sc->sc_eventii); 567 568 rv = iop_util_eventreg(sc, &sc->sc_eventii, 569 I2O_EVENT_EXEC_RESOURCE_LIMITS | 570 I2O_EVENT_EXEC_CONNECTION_FAIL | 571 I2O_EVENT_EXEC_ADAPTER_FAULT | 572 I2O_EVENT_EXEC_POWER_FAIL | 573 I2O_EVENT_EXEC_RESET_PENDING | 574 I2O_EVENT_EXEC_RESET_IMMINENT | 575 I2O_EVENT_EXEC_HARDWARE_FAIL | 576 I2O_EVENT_EXEC_XCT_CHANGE | 577 I2O_EVENT_EXEC_DDM_AVAILIBILITY | 578 I2O_EVENT_GEN_DEVICE_RESET | 579 I2O_EVENT_GEN_STATE_CHANGE | 580 I2O_EVENT_GEN_GENERAL_WARNING); 581 if (rv != 0) { 582 printf("%s: unable to register for events", sc->sc_dv.dv_xname); 583 return; 584 } 585 586 /* 587 * Attempt to match and attach a product-specific extension. 588 */ 589 ia.ia_class = I2O_CLASS_ANY; 590 ia.ia_tid = I2O_TID_IOP; 591 locs[IOPCF_TID] = I2O_TID_IOP; 592 config_found_sm_loc(self, "iop", locs, &ia, iop_print, 593 config_stdsubmatch); 594 595 /* 596 * Start device configuration. 597 */ 598 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL); 599 if ((rv = iop_reconfigure(sc, 0)) == -1) { 600 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv); 601 return; 602 } 603 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 604 605 kthread_create(iop_create_reconf_thread, sc); 606 } 607 608 /* 609 * Create the reconfiguration thread. Called after the standard kernel 610 * threads have been created. 611 */ 612 static void 613 iop_create_reconf_thread(void *cookie) 614 { 615 struct iop_softc *sc; 616 int rv; 617 618 sc = cookie; 619 sc->sc_flags |= IOP_ONLINE; 620 621 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc, 622 "%s", sc->sc_dv.dv_xname); 623 if (rv != 0) { 624 printf("%s: unable to create reconfiguration thread (%d)", 625 sc->sc_dv.dv_xname, rv); 626 return; 627 } 628 } 629 630 /* 631 * Reconfiguration thread; listens for LCT change notification, and 632 * initiates re-configuration if received. 633 */ 634 static void 635 iop_reconf_thread(void *cookie) 636 { 637 struct iop_softc *sc; 638 struct lwp *l; 639 struct i2o_lct lct; 640 u_int32_t chgind; 641 int rv; 642 643 sc = cookie; 644 chgind = sc->sc_chgind + 1; 645 l = curlwp; 646 647 for (;;) { 648 DPRINTF(("%s: async reconfig: requested 0x%08x\n", 649 sc->sc_dv.dv_xname, chgind)); 650 651 PHOLD(l); 652 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind); 653 PRELE(l); 654 655 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n", 656 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv)); 657 658 if (rv == 0 && 659 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) { 660 iop_reconfigure(sc, le32toh(lct.changeindicator)); 661 chgind = sc->sc_chgind + 1; 662 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 663 } 664 665 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5); 666 } 667 } 668 669 /* 670 * Reconfigure: find new and removed devices. 671 */ 672 int 673 iop_reconfigure(struct iop_softc *sc, u_int chgind) 674 { 675 struct iop_msg *im; 676 struct i2o_hba_bus_scan mf; 677 struct i2o_lct_entry *le; 678 struct iop_initiator *ii, *nextii; 679 int rv, tid, i; 680 681 /* 682 * If the reconfiguration request isn't the result of LCT change 683 * notification, then be more thorough: ask all bus ports to scan 684 * their busses. Wait up to 5 minutes for each bus port to complete 685 * the request. 686 */ 687 if (chgind == 0) { 688 if ((rv = iop_lct_get(sc)) != 0) { 689 DPRINTF(("iop_reconfigure: unable to read LCT\n")); 690 return (rv); 691 } 692 693 le = sc->sc_lct->entry; 694 for (i = 0; i < sc->sc_nlctent; i++, le++) { 695 if ((le16toh(le->classid) & 4095) != 696 I2O_CLASS_BUS_ADAPTER_PORT) 697 continue; 698 tid = le16toh(le->localtid) & 4095; 699 700 im = iop_msg_alloc(sc, IM_WAIT); 701 702 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 703 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN); 704 mf.msgictx = IOP_ICTX; 705 mf.msgtctx = im->im_tctx; 706 707 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname, 708 tid)); 709 710 rv = iop_msg_post(sc, im, &mf, 5*60*1000); 711 iop_msg_free(sc, im); 712 #ifdef I2ODEBUG 713 if (rv != 0) 714 printf("%s: bus scan failed\n", 715 sc->sc_dv.dv_xname); 716 #endif 717 } 718 } else if (chgind <= sc->sc_chgind) { 719 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname)); 720 return (0); 721 } 722 723 /* Re-read the LCT and determine if it has changed. */ 724 if ((rv = iop_lct_get(sc)) != 0) { 725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 726 return (rv); 727 } 728 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent)); 729 730 chgind = le32toh(sc->sc_lct->changeindicator); 731 if (chgind == sc->sc_chgind) { 732 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname)); 733 return (0); 734 } 735 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname)); 736 sc->sc_chgind = chgind; 737 738 if (sc->sc_tidmap != NULL) 739 free(sc->sc_tidmap, M_DEVBUF); 740 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap), 741 M_DEVBUF, M_NOWAIT|M_ZERO); 742 743 /* Allow 1 queued command per device while we're configuring. */ 744 iop_adjqparam(sc, 1); 745 746 /* 747 * Match and attach child devices. We configure high-level devices 748 * first so that any claims will propagate throughout the LCT, 749 * hopefully masking off aliased devices as a result. 750 * 751 * Re-reading the LCT at this point is a little dangerous, but we'll 752 * trust the IOP (and the operator) to behave itself... 753 */ 754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 755 IC_CONFIGURE | IC_PRIORITY); 756 if ((rv = iop_lct_get(sc)) != 0) 757 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 758 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 759 IC_CONFIGURE); 760 761 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) { 762 nextii = LIST_NEXT(ii, ii_list); 763 764 /* Detach devices that were configured, but are now gone. */ 765 for (i = 0; i < sc->sc_nlctent; i++) 766 if (ii->ii_tid == sc->sc_tidmap[i].it_tid) 767 break; 768 if (i == sc->sc_nlctent || 769 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) 770 config_detach(ii->ii_dv, DETACH_FORCE); 771 772 /* 773 * Tell initiators that existed before the re-configuration 774 * to re-configure. 775 */ 776 if (ii->ii_reconfig == NULL) 777 continue; 778 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0) 779 printf("%s: %s failed reconfigure (%d)\n", 780 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv); 781 } 782 783 /* Re-adjust queue parameters and return. */ 784 if (sc->sc_nii != 0) 785 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE) 786 / sc->sc_nii); 787 788 return (0); 789 } 790 791 /* 792 * Configure I2O devices into the system. 793 */ 794 static void 795 iop_configure_devices(struct iop_softc *sc, int mask, int maskval) 796 { 797 struct iop_attach_args ia; 798 struct iop_initiator *ii; 799 const struct i2o_lct_entry *le; 800 struct device *dv; 801 int i, j, nent; 802 u_int usertid; 803 int locs[IOPCF_NLOCS]; 804 805 nent = sc->sc_nlctent; 806 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) { 807 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095; 808 809 /* Ignore the device if it's in use. */ 810 usertid = le32toh(le->usertid) & 4095; 811 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST) 812 continue; 813 814 ia.ia_class = le16toh(le->classid) & 4095; 815 ia.ia_tid = sc->sc_tidmap[i].it_tid; 816 817 /* Ignore uninteresting devices. */ 818 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++) 819 if (iop_class[j].ic_class == ia.ia_class) 820 break; 821 if (j < sizeof(iop_class) / sizeof(iop_class[0]) && 822 (iop_class[j].ic_flags & mask) != maskval) 823 continue; 824 825 /* 826 * Try to configure the device only if it's not already 827 * configured. 828 */ 829 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) { 830 if (ia.ia_tid == ii->ii_tid) { 831 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 832 strcpy(sc->sc_tidmap[i].it_dvname, 833 ii->ii_dv->dv_xname); 834 break; 835 } 836 } 837 if (ii != NULL) 838 continue; 839 840 locs[IOPCF_TID] = ia.ia_tid; 841 842 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia, 843 iop_print, config_stdsubmatch); 844 if (dv != NULL) { 845 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 846 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname); 847 } 848 } 849 } 850 851 /* 852 * Adjust queue parameters for all child devices. 853 */ 854 static void 855 iop_adjqparam(struct iop_softc *sc, int mpi) 856 { 857 struct iop_initiator *ii; 858 859 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) 860 if (ii->ii_adjqparam != NULL) 861 (*ii->ii_adjqparam)(ii->ii_dv, mpi); 862 } 863 864 static void 865 iop_devinfo(int class, char *devinfo, size_t l) 866 { 867 #ifdef I2OVERBOSE 868 int i; 869 870 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++) 871 if (class == iop_class[i].ic_class) 872 break; 873 874 if (i == sizeof(iop_class) / sizeof(iop_class[0])) 875 snprintf(devinfo, l, "device (class 0x%x)", class); 876 else 877 strlcpy(devinfo, iop_class[i].ic_caption, l); 878 #else 879 880 snprintf(devinfo, l, "device (class 0x%x)", class); 881 #endif 882 } 883 884 static int 885 iop_print(void *aux, const char *pnp) 886 { 887 struct iop_attach_args *ia; 888 char devinfo[256]; 889 890 ia = aux; 891 892 if (pnp != NULL) { 893 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo)); 894 aprint_normal("%s at %s", devinfo, pnp); 895 } 896 aprint_normal(" tid %d", ia->ia_tid); 897 return (UNCONF); 898 } 899 900 /* 901 * Shut down all configured IOPs. 902 */ 903 static void 904 iop_shutdown(void *junk) 905 { 906 struct iop_softc *sc; 907 int i; 908 909 printf("shutting down iop devices..."); 910 911 for (i = 0; i < iop_cd.cd_ndevs; i++) { 912 if ((sc = device_lookup(&iop_cd, i)) == NULL) 913 continue; 914 if ((sc->sc_flags & IOP_ONLINE) == 0) 915 continue; 916 917 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX, 918 0, 5000); 919 920 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) { 921 /* 922 * Some AMI firmware revisions will go to sleep and 923 * never come back after this. 924 */ 925 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, 926 IOP_ICTX, 0, 1000); 927 } 928 } 929 930 /* Wait. Some boards could still be flushing, stupidly enough. */ 931 delay(5000*1000); 932 printf(" done\n"); 933 } 934 935 /* 936 * Retrieve IOP status. 937 */ 938 int 939 iop_status_get(struct iop_softc *sc, int nosleep) 940 { 941 struct i2o_exec_status_get mf; 942 struct i2o_status *st; 943 paddr_t pa; 944 int rv, i; 945 946 pa = sc->sc_scr_seg->ds_addr; 947 st = (struct i2o_status *)sc->sc_scr; 948 949 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get); 950 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET); 951 mf.reserved[0] = 0; 952 mf.reserved[1] = 0; 953 mf.reserved[2] = 0; 954 mf.reserved[3] = 0; 955 mf.addrlow = (u_int32_t)pa; 956 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32); 957 mf.length = sizeof(sc->sc_status); 958 959 memset(st, 0, sizeof(*st)); 960 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 961 BUS_DMASYNC_PREREAD); 962 963 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0) 964 return (rv); 965 966 for (i = 25; i != 0; i--) { 967 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, 968 sizeof(*st), BUS_DMASYNC_POSTREAD); 969 if (st->syncbyte == 0xff) 970 break; 971 if (nosleep) 972 DELAY(100*1000); 973 else 974 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10); 975 } 976 977 if (st->syncbyte != 0xff) { 978 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname); 979 rv = EIO; 980 } else { 981 memcpy(&sc->sc_status, st, sizeof(sc->sc_status)); 982 rv = 0; 983 } 984 985 return (rv); 986 } 987 988 /* 989 * Initialize and populate the IOP's outbound FIFO. 990 */ 991 static int 992 iop_ofifo_init(struct iop_softc *sc) 993 { 994 bus_addr_t addr; 995 bus_dma_segment_t seg; 996 struct i2o_exec_outbound_init *mf; 997 int i, rseg, rv; 998 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw; 999 1000 sw = (u_int32_t *)sc->sc_scr; 1001 1002 mf = (struct i2o_exec_outbound_init *)mb; 1003 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init); 1004 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT); 1005 mf->msgictx = IOP_ICTX; 1006 mf->msgtctx = 0; 1007 mf->pagesize = PAGE_SIZE; 1008 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16); 1009 1010 /* 1011 * The I2O spec says that there are two SGLs: one for the status 1012 * word, and one for a list of discarded MFAs. It continues to say 1013 * that if you don't want to get the list of MFAs, an IGNORE SGL is 1014 * necessary; this isn't the case (and is in fact a bad thing). 1015 */ 1016 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) | 1017 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END; 1018 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = 1019 (u_int32_t)sc->sc_scr_seg->ds_addr; 1020 mb[0] += 2 << 16; 1021 1022 *sw = 0; 1023 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1024 BUS_DMASYNC_PREREAD); 1025 1026 if ((rv = iop_post(sc, mb)) != 0) 1027 return (rv); 1028 1029 POLL(5000, 1030 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1031 BUS_DMASYNC_POSTREAD), 1032 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE))); 1033 1034 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) { 1035 printf("%s: outbound FIFO init failed (%d)\n", 1036 sc->sc_dv.dv_xname, le32toh(*sw)); 1037 return (EIO); 1038 } 1039 1040 /* Allocate DMA safe memory for the reply frames. */ 1041 if (sc->sc_rep_phys == 0) { 1042 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize; 1043 1044 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE, 1045 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 1046 if (rv != 0) { 1047 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname, 1048 rv); 1049 return (rv); 1050 } 1051 1052 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size, 1053 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1054 if (rv != 0) { 1055 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv); 1056 return (rv); 1057 } 1058 1059 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1, 1060 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap); 1061 if (rv != 0) { 1062 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname, 1063 rv); 1064 return (rv); 1065 } 1066 1067 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, 1068 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT); 1069 if (rv != 0) { 1070 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv); 1071 return (rv); 1072 } 1073 1074 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr; 1075 } 1076 1077 /* Populate the outbound FIFO. */ 1078 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) { 1079 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr); 1080 addr += sc->sc_framesize; 1081 } 1082 1083 return (0); 1084 } 1085 1086 /* 1087 * Read the specified number of bytes from the IOP's hardware resource table. 1088 */ 1089 static int 1090 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size) 1091 { 1092 struct iop_msg *im; 1093 int rv; 1094 struct i2o_exec_hrt_get *mf; 1095 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1096 1097 im = iop_msg_alloc(sc, IM_WAIT); 1098 mf = (struct i2o_exec_hrt_get *)mb; 1099 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get); 1100 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET); 1101 mf->msgictx = IOP_ICTX; 1102 mf->msgtctx = im->im_tctx; 1103 1104 iop_msg_map(sc, im, mb, hrt, size, 0, NULL); 1105 rv = iop_msg_post(sc, im, mb, 30000); 1106 iop_msg_unmap(sc, im); 1107 iop_msg_free(sc, im); 1108 return (rv); 1109 } 1110 1111 /* 1112 * Read the IOP's hardware resource table. 1113 */ 1114 static int 1115 iop_hrt_get(struct iop_softc *sc) 1116 { 1117 struct i2o_hrt hrthdr, *hrt; 1118 int size, rv; 1119 1120 PHOLD(curlwp); 1121 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr)); 1122 PRELE(curlwp); 1123 if (rv != 0) 1124 return (rv); 1125 1126 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname, 1127 le16toh(hrthdr.numentries))); 1128 1129 size = sizeof(struct i2o_hrt) + 1130 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry); 1131 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT); 1132 1133 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) { 1134 free(hrt, M_DEVBUF); 1135 return (rv); 1136 } 1137 1138 if (sc->sc_hrt != NULL) 1139 free(sc->sc_hrt, M_DEVBUF); 1140 sc->sc_hrt = hrt; 1141 return (0); 1142 } 1143 1144 /* 1145 * Request the specified number of bytes from the IOP's logical 1146 * configuration table. If a change indicator is specified, this 1147 * is a verbatim notification request, so the caller is prepared 1148 * to wait indefinitely. 1149 */ 1150 static int 1151 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size, 1152 u_int32_t chgind) 1153 { 1154 struct iop_msg *im; 1155 struct i2o_exec_lct_notify *mf; 1156 int rv; 1157 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1158 1159 im = iop_msg_alloc(sc, IM_WAIT); 1160 memset(lct, 0, size); 1161 1162 mf = (struct i2o_exec_lct_notify *)mb; 1163 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify); 1164 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY); 1165 mf->msgictx = IOP_ICTX; 1166 mf->msgtctx = im->im_tctx; 1167 mf->classid = I2O_CLASS_ANY; 1168 mf->changeindicator = chgind; 1169 1170 #ifdef I2ODEBUG 1171 printf("iop_lct_get0: reading LCT"); 1172 if (chgind != 0) 1173 printf(" (async)"); 1174 printf("\n"); 1175 #endif 1176 1177 iop_msg_map(sc, im, mb, lct, size, 0, NULL); 1178 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0)); 1179 iop_msg_unmap(sc, im); 1180 iop_msg_free(sc, im); 1181 return (rv); 1182 } 1183 1184 /* 1185 * Read the IOP's logical configuration table. 1186 */ 1187 int 1188 iop_lct_get(struct iop_softc *sc) 1189 { 1190 int esize, size, rv; 1191 struct i2o_lct *lct; 1192 1193 esize = le32toh(sc->sc_status.expectedlctsize); 1194 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK); 1195 if (lct == NULL) 1196 return (ENOMEM); 1197 1198 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) { 1199 free(lct, M_DEVBUF); 1200 return (rv); 1201 } 1202 1203 size = le16toh(lct->tablesize) << 2; 1204 if (esize != size) { 1205 free(lct, M_DEVBUF); 1206 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK); 1207 if (lct == NULL) 1208 return (ENOMEM); 1209 1210 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) { 1211 free(lct, M_DEVBUF); 1212 return (rv); 1213 } 1214 } 1215 1216 /* Swap in the new LCT. */ 1217 if (sc->sc_lct != NULL) 1218 free(sc->sc_lct, M_DEVBUF); 1219 sc->sc_lct = lct; 1220 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) - 1221 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) / 1222 sizeof(struct i2o_lct_entry); 1223 return (0); 1224 } 1225 1226 /* 1227 * Post a SYS_ENABLE message to the adapter. 1228 */ 1229 int 1230 iop_sys_enable(struct iop_softc *sc) 1231 { 1232 struct iop_msg *im; 1233 struct i2o_msg mf; 1234 int rv; 1235 1236 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 1237 1238 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1239 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE); 1240 mf.msgictx = IOP_ICTX; 1241 mf.msgtctx = im->im_tctx; 1242 1243 rv = iop_msg_post(sc, im, &mf, 30000); 1244 if (rv == 0) { 1245 if ((im->im_flags & IM_FAIL) != 0) 1246 rv = ENXIO; 1247 else if (im->im_reqstatus == I2O_STATUS_SUCCESS || 1248 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER && 1249 im->im_detstatus == I2O_DSC_INVALID_REQUEST)) 1250 rv = 0; 1251 else 1252 rv = EIO; 1253 } 1254 1255 iop_msg_free(sc, im); 1256 return (rv); 1257 } 1258 1259 /* 1260 * Request the specified parameter group from the target. If an initiator 1261 * is specified (a) don't wait for the operation to complete, but instead 1262 * let the initiator's interrupt handler deal with the reply and (b) place a 1263 * pointer to the parameter group op in the wrapper's `im_dvcontext' field. 1264 */ 1265 int 1266 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf, 1267 int size, struct iop_initiator *ii) 1268 { 1269 struct iop_msg *im; 1270 struct i2o_util_params_op *mf; 1271 int rv; 1272 struct iop_pgop *pgop; 1273 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1274 1275 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS); 1276 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) { 1277 iop_msg_free(sc, im); 1278 return (ENOMEM); 1279 } 1280 im->im_dvcontext = pgop; 1281 1282 mf = (struct i2o_util_params_op *)mb; 1283 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1284 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET); 1285 mf->msgictx = IOP_ICTX; 1286 mf->msgtctx = im->im_tctx; 1287 mf->flags = 0; 1288 1289 pgop->olh.count = htole16(1); 1290 pgop->olh.reserved = htole16(0); 1291 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET); 1292 pgop->oat.fieldcount = htole16(0xffff); 1293 pgop->oat.group = htole16(group); 1294 1295 if (ii == NULL) 1296 PHOLD(curlwp); 1297 1298 memset(buf, 0, size); 1299 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL); 1300 iop_msg_map(sc, im, mb, buf, size, 0, NULL); 1301 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0)); 1302 1303 if (ii == NULL) 1304 PRELE(curlwp); 1305 1306 /* Detect errors; let partial transfers to count as success. */ 1307 if (ii == NULL && rv == 0) { 1308 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER && 1309 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR) 1310 rv = 0; 1311 else 1312 rv = (im->im_reqstatus != 0 ? EIO : 0); 1313 1314 if (rv != 0) 1315 printf("%s: FIELD_GET failed for tid %d group %d\n", 1316 sc->sc_dv.dv_xname, tid, group); 1317 } 1318 1319 if (ii == NULL || rv != 0) { 1320 iop_msg_unmap(sc, im); 1321 iop_msg_free(sc, im); 1322 free(pgop, M_DEVBUF); 1323 } 1324 1325 return (rv); 1326 } 1327 1328 /* 1329 * Set a single field in a scalar parameter group. 1330 */ 1331 int 1332 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf, 1333 int size, int field) 1334 { 1335 struct iop_msg *im; 1336 struct i2o_util_params_op *mf; 1337 struct iop_pgop *pgop; 1338 int rv, totsize; 1339 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1340 1341 totsize = sizeof(*pgop) + size; 1342 1343 im = iop_msg_alloc(sc, IM_WAIT); 1344 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1345 iop_msg_free(sc, im); 1346 return (ENOMEM); 1347 } 1348 1349 mf = (struct i2o_util_params_op *)mb; 1350 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1351 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1352 mf->msgictx = IOP_ICTX; 1353 mf->msgtctx = im->im_tctx; 1354 mf->flags = 0; 1355 1356 pgop->olh.count = htole16(1); 1357 pgop->olh.reserved = htole16(0); 1358 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET); 1359 pgop->oat.fieldcount = htole16(1); 1360 pgop->oat.group = htole16(group); 1361 pgop->oat.fields[0] = htole16(field); 1362 memcpy(pgop + 1, buf, size); 1363 1364 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1365 rv = iop_msg_post(sc, im, mb, 30000); 1366 if (rv != 0) 1367 printf("%s: FIELD_SET failed for tid %d group %d\n", 1368 sc->sc_dv.dv_xname, tid, group); 1369 1370 iop_msg_unmap(sc, im); 1371 iop_msg_free(sc, im); 1372 free(pgop, M_DEVBUF); 1373 return (rv); 1374 } 1375 1376 /* 1377 * Delete all rows in a tablular parameter group. 1378 */ 1379 int 1380 iop_table_clear(struct iop_softc *sc, int tid, int group) 1381 { 1382 struct iop_msg *im; 1383 struct i2o_util_params_op *mf; 1384 struct iop_pgop pgop; 1385 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1386 int rv; 1387 1388 im = iop_msg_alloc(sc, IM_WAIT); 1389 1390 mf = (struct i2o_util_params_op *)mb; 1391 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1392 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1393 mf->msgictx = IOP_ICTX; 1394 mf->msgtctx = im->im_tctx; 1395 mf->flags = 0; 1396 1397 pgop.olh.count = htole16(1); 1398 pgop.olh.reserved = htole16(0); 1399 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR); 1400 pgop.oat.fieldcount = htole16(0); 1401 pgop.oat.group = htole16(group); 1402 pgop.oat.fields[0] = htole16(0); 1403 1404 PHOLD(curlwp); 1405 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL); 1406 rv = iop_msg_post(sc, im, mb, 30000); 1407 if (rv != 0) 1408 printf("%s: TABLE_CLEAR failed for tid %d group %d\n", 1409 sc->sc_dv.dv_xname, tid, group); 1410 1411 iop_msg_unmap(sc, im); 1412 PRELE(curlwp); 1413 iop_msg_free(sc, im); 1414 return (rv); 1415 } 1416 1417 /* 1418 * Add a single row to a tabular parameter group. The row can have only one 1419 * field. 1420 */ 1421 int 1422 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf, 1423 int size, int row) 1424 { 1425 struct iop_msg *im; 1426 struct i2o_util_params_op *mf; 1427 struct iop_pgop *pgop; 1428 int rv, totsize; 1429 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1430 1431 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size; 1432 1433 im = iop_msg_alloc(sc, IM_WAIT); 1434 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1435 iop_msg_free(sc, im); 1436 return (ENOMEM); 1437 } 1438 1439 mf = (struct i2o_util_params_op *)mb; 1440 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1441 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1442 mf->msgictx = IOP_ICTX; 1443 mf->msgtctx = im->im_tctx; 1444 mf->flags = 0; 1445 1446 pgop->olh.count = htole16(1); 1447 pgop->olh.reserved = htole16(0); 1448 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD); 1449 pgop->oat.fieldcount = htole16(1); 1450 pgop->oat.group = htole16(group); 1451 pgop->oat.fields[0] = htole16(0); /* FieldIdx */ 1452 pgop->oat.fields[1] = htole16(1); /* RowCount */ 1453 pgop->oat.fields[2] = htole16(row); /* KeyValue */ 1454 memcpy(&pgop->oat.fields[3], buf, size); 1455 1456 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1457 rv = iop_msg_post(sc, im, mb, 30000); 1458 if (rv != 0) 1459 printf("%s: ADD_ROW failed for tid %d group %d row %d\n", 1460 sc->sc_dv.dv_xname, tid, group, row); 1461 1462 iop_msg_unmap(sc, im); 1463 iop_msg_free(sc, im); 1464 free(pgop, M_DEVBUF); 1465 return (rv); 1466 } 1467 1468 /* 1469 * Execute a simple command (no parameters). 1470 */ 1471 int 1472 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx, 1473 int async, int timo) 1474 { 1475 struct iop_msg *im; 1476 struct i2o_msg mf; 1477 int rv, fl; 1478 1479 fl = (async != 0 ? IM_WAIT : IM_POLL); 1480 im = iop_msg_alloc(sc, fl); 1481 1482 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1483 mf.msgfunc = I2O_MSGFUNC(tid, function); 1484 mf.msgictx = ictx; 1485 mf.msgtctx = im->im_tctx; 1486 1487 rv = iop_msg_post(sc, im, &mf, timo); 1488 iop_msg_free(sc, im); 1489 return (rv); 1490 } 1491 1492 /* 1493 * Post the system table to the IOP. 1494 */ 1495 static int 1496 iop_systab_set(struct iop_softc *sc) 1497 { 1498 struct i2o_exec_sys_tab_set *mf; 1499 struct iop_msg *im; 1500 bus_space_handle_t bsh; 1501 bus_addr_t boo; 1502 u_int32_t mema[2], ioa[2]; 1503 int rv; 1504 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1505 1506 im = iop_msg_alloc(sc, IM_WAIT); 1507 1508 mf = (struct i2o_exec_sys_tab_set *)mb; 1509 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set); 1510 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET); 1511 mf->msgictx = IOP_ICTX; 1512 mf->msgtctx = im->im_tctx; 1513 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12; 1514 mf->segnumber = 0; 1515 1516 mema[1] = sc->sc_status.desiredprivmemsize; 1517 ioa[1] = sc->sc_status.desiredpriviosize; 1518 1519 if (mema[1] != 0) { 1520 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff, 1521 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh); 1522 mema[0] = htole32(boo); 1523 if (rv != 0) { 1524 printf("%s: can't alloc priv mem space, err = %d\n", 1525 sc->sc_dv.dv_xname, rv); 1526 mema[0] = 0; 1527 mema[1] = 0; 1528 } 1529 } 1530 1531 if (ioa[1] != 0) { 1532 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff, 1533 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh); 1534 ioa[0] = htole32(boo); 1535 if (rv != 0) { 1536 printf("%s: can't alloc priv i/o space, err = %d\n", 1537 sc->sc_dv.dv_xname, rv); 1538 ioa[0] = 0; 1539 ioa[1] = 0; 1540 } 1541 } 1542 1543 PHOLD(curlwp); 1544 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL); 1545 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL); 1546 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL); 1547 rv = iop_msg_post(sc, im, mb, 5000); 1548 iop_msg_unmap(sc, im); 1549 iop_msg_free(sc, im); 1550 PRELE(curlwp); 1551 return (rv); 1552 } 1553 1554 /* 1555 * Reset the IOP. Must be called with interrupts disabled. 1556 */ 1557 static int 1558 iop_reset(struct iop_softc *sc) 1559 { 1560 u_int32_t mfa, *sw; 1561 struct i2o_exec_iop_reset mf; 1562 int rv; 1563 paddr_t pa; 1564 1565 sw = (u_int32_t *)sc->sc_scr; 1566 pa = sc->sc_scr_seg->ds_addr; 1567 1568 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset); 1569 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET); 1570 mf.reserved[0] = 0; 1571 mf.reserved[1] = 0; 1572 mf.reserved[2] = 0; 1573 mf.reserved[3] = 0; 1574 mf.statuslow = (u_int32_t)pa; 1575 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32); 1576 1577 *sw = htole32(0); 1578 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1579 BUS_DMASYNC_PREREAD); 1580 1581 if ((rv = iop_post(sc, (u_int32_t *)&mf))) 1582 return (rv); 1583 1584 POLL(2500, 1585 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1586 BUS_DMASYNC_POSTREAD), *sw != 0)); 1587 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) { 1588 printf("%s: reset rejected, status 0x%x\n", 1589 sc->sc_dv.dv_xname, le32toh(*sw)); 1590 return (EIO); 1591 } 1592 1593 /* 1594 * IOP is now in the INIT state. Wait no more than 10 seconds for 1595 * the inbound queue to become responsive. 1596 */ 1597 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY); 1598 if (mfa == IOP_MFA_EMPTY) { 1599 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 1600 return (EIO); 1601 } 1602 1603 iop_release_mfa(sc, mfa); 1604 return (0); 1605 } 1606 1607 /* 1608 * Register a new initiator. Must be called with the configuration lock 1609 * held. 1610 */ 1611 void 1612 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii) 1613 { 1614 static int ictxgen; 1615 int s; 1616 1617 /* 0 is reserved (by us) for system messages. */ 1618 ii->ii_ictx = ++ictxgen; 1619 1620 /* 1621 * `Utility initiators' don't make it onto the per-IOP initiator list 1622 * (which is used only for configuration), but do get one slot on 1623 * the inbound queue. 1624 */ 1625 if ((ii->ii_flags & II_UTILITY) == 0) { 1626 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list); 1627 sc->sc_nii++; 1628 } else 1629 sc->sc_nuii++; 1630 1631 s = splbio(); 1632 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash); 1633 splx(s); 1634 } 1635 1636 /* 1637 * Unregister an initiator. Must be called with the configuration lock 1638 * held. 1639 */ 1640 void 1641 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii) 1642 { 1643 int s; 1644 1645 if ((ii->ii_flags & II_UTILITY) == 0) { 1646 LIST_REMOVE(ii, ii_list); 1647 sc->sc_nii--; 1648 } else 1649 sc->sc_nuii--; 1650 1651 s = splbio(); 1652 LIST_REMOVE(ii, ii_hash); 1653 splx(s); 1654 } 1655 1656 /* 1657 * Handle a reply frame from the IOP. 1658 */ 1659 static int 1660 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa) 1661 { 1662 struct iop_msg *im; 1663 struct i2o_reply *rb; 1664 struct i2o_fault_notify *fn; 1665 struct iop_initiator *ii; 1666 u_int off, ictx, tctx, status, size; 1667 1668 off = (int)(rmfa - sc->sc_rep_phys); 1669 rb = (struct i2o_reply *)(sc->sc_rep + off); 1670 1671 /* Perform reply queue DMA synchronisation. */ 1672 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, 1673 sc->sc_framesize, BUS_DMASYNC_POSTREAD); 1674 if (--sc->sc_curib != 0) 1675 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 1676 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD); 1677 1678 #ifdef I2ODEBUG 1679 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0) 1680 panic("iop_handle_reply: 64-bit reply"); 1681 #endif 1682 /* 1683 * Find the initiator. 1684 */ 1685 ictx = le32toh(rb->msgictx); 1686 if (ictx == IOP_ICTX) 1687 ii = NULL; 1688 else { 1689 ii = LIST_FIRST(IOP_ICTXHASH(ictx)); 1690 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash)) 1691 if (ii->ii_ictx == ictx) 1692 break; 1693 if (ii == NULL) { 1694 #ifdef I2ODEBUG 1695 iop_reply_print(sc, rb); 1696 #endif 1697 printf("%s: WARNING: bad ictx returned (%x)\n", 1698 sc->sc_dv.dv_xname, ictx); 1699 return (-1); 1700 } 1701 } 1702 1703 /* 1704 * If we received a transport failure notice, we've got to dig the 1705 * transaction context (if any) out of the original message frame, 1706 * and then release the original MFA back to the inbound FIFO. 1707 */ 1708 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 1709 status = I2O_STATUS_SUCCESS; 1710 1711 fn = (struct i2o_fault_notify *)rb; 1712 tctx = iop_inl_msg(sc, fn->lowmfa + 12); 1713 iop_release_mfa(sc, fn->lowmfa); 1714 iop_tfn_print(sc, fn); 1715 } else { 1716 status = rb->reqstatus; 1717 tctx = le32toh(rb->msgtctx); 1718 } 1719 1720 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) { 1721 /* 1722 * This initiator tracks state using message wrappers. 1723 * 1724 * Find the originating message wrapper, and if requested 1725 * notify the initiator. 1726 */ 1727 im = sc->sc_ims + (tctx & IOP_TCTX_MASK); 1728 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib || 1729 (im->im_flags & IM_ALLOCED) == 0 || 1730 tctx != im->im_tctx) { 1731 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n", 1732 sc->sc_dv.dv_xname, tctx, im); 1733 if (im != NULL) 1734 printf("%s: flags=0x%08x tctx=0x%08x\n", 1735 sc->sc_dv.dv_xname, im->im_flags, 1736 im->im_tctx); 1737 #ifdef I2ODEBUG 1738 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0) 1739 iop_reply_print(sc, rb); 1740 #endif 1741 return (-1); 1742 } 1743 1744 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1745 im->im_flags |= IM_FAIL; 1746 1747 #ifdef I2ODEBUG 1748 if ((im->im_flags & IM_REPLIED) != 0) 1749 panic("%s: dup reply", sc->sc_dv.dv_xname); 1750 #endif 1751 im->im_flags |= IM_REPLIED; 1752 1753 #ifdef I2ODEBUG 1754 if (status != I2O_STATUS_SUCCESS) 1755 iop_reply_print(sc, rb); 1756 #endif 1757 im->im_reqstatus = status; 1758 im->im_detstatus = le16toh(rb->detail); 1759 1760 /* Copy the reply frame, if requested. */ 1761 if (im->im_rb != NULL) { 1762 size = (le32toh(rb->msgflags) >> 14) & ~3; 1763 #ifdef I2ODEBUG 1764 if (size > sc->sc_framesize) 1765 panic("iop_handle_reply: reply too large"); 1766 #endif 1767 memcpy(im->im_rb, rb, size); 1768 } 1769 1770 /* Notify the initiator. */ 1771 if ((im->im_flags & IM_WAIT) != 0) 1772 wakeup(im); 1773 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) 1774 (*ii->ii_intr)(ii->ii_dv, im, rb); 1775 } else { 1776 /* 1777 * This initiator discards message wrappers. 1778 * 1779 * Simply pass the reply frame to the initiator. 1780 */ 1781 (*ii->ii_intr)(ii->ii_dv, NULL, rb); 1782 } 1783 1784 return (status); 1785 } 1786 1787 /* 1788 * Handle an interrupt from the IOP. 1789 */ 1790 int 1791 iop_intr(void *arg) 1792 { 1793 struct iop_softc *sc; 1794 u_int32_t rmfa; 1795 1796 sc = arg; 1797 1798 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) 1799 return (0); 1800 1801 for (;;) { 1802 /* Double read to account for IOP bug. */ 1803 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) { 1804 rmfa = iop_inl(sc, IOP_REG_OFIFO); 1805 if (rmfa == IOP_MFA_EMPTY) 1806 break; 1807 } 1808 iop_handle_reply(sc, rmfa); 1809 iop_outl(sc, IOP_REG_OFIFO, rmfa); 1810 } 1811 1812 return (1); 1813 } 1814 1815 /* 1816 * Handle an event signalled by the executive. 1817 */ 1818 static void 1819 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply) 1820 { 1821 struct i2o_util_event_register_reply *rb; 1822 u_int event; 1823 1824 rb = reply; 1825 1826 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1827 return; 1828 1829 event = le32toh(rb->event); 1830 printf("%s: event 0x%08x received\n", dv->dv_xname, event); 1831 } 1832 1833 /* 1834 * Allocate a message wrapper. 1835 */ 1836 struct iop_msg * 1837 iop_msg_alloc(struct iop_softc *sc, int flags) 1838 { 1839 struct iop_msg *im; 1840 static u_int tctxgen; 1841 int s, i; 1842 1843 #ifdef I2ODEBUG 1844 if ((flags & IM_SYSMASK) != 0) 1845 panic("iop_msg_alloc: system flags specified"); 1846 #endif 1847 1848 s = splbio(); 1849 im = SLIST_FIRST(&sc->sc_im_freelist); 1850 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 1851 if (im == NULL) 1852 panic("iop_msg_alloc: no free wrappers"); 1853 #endif 1854 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain); 1855 splx(s); 1856 1857 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen; 1858 tctxgen += (1 << IOP_TCTX_SHIFT); 1859 im->im_flags = flags | IM_ALLOCED; 1860 im->im_rb = NULL; 1861 i = 0; 1862 do { 1863 im->im_xfer[i++].ix_size = 0; 1864 } while (i < IOP_MAX_MSG_XFERS); 1865 1866 return (im); 1867 } 1868 1869 /* 1870 * Free a message wrapper. 1871 */ 1872 void 1873 iop_msg_free(struct iop_softc *sc, struct iop_msg *im) 1874 { 1875 int s; 1876 1877 #ifdef I2ODEBUG 1878 if ((im->im_flags & IM_ALLOCED) == 0) 1879 panic("iop_msg_free: wrapper not allocated"); 1880 #endif 1881 1882 im->im_flags = 0; 1883 s = splbio(); 1884 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 1885 splx(s); 1886 } 1887 1888 /* 1889 * Map a data transfer. Write a scatter-gather list into the message frame. 1890 */ 1891 int 1892 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1893 void *xferaddr, int xfersize, int out, struct proc *up) 1894 { 1895 bus_dmamap_t dm; 1896 bus_dma_segment_t *ds; 1897 struct iop_xfer *ix; 1898 u_int rv, i, nsegs, flg, off, xn; 1899 u_int32_t *p; 1900 1901 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++) 1902 if (ix->ix_size == 0) 1903 break; 1904 1905 #ifdef I2ODEBUG 1906 if (xfersize == 0) 1907 panic("iop_msg_map: null transfer"); 1908 if (xfersize > IOP_MAX_XFER) 1909 panic("iop_msg_map: transfer too large"); 1910 if (xn == IOP_MAX_MSG_XFERS) 1911 panic("iop_msg_map: too many xfers"); 1912 #endif 1913 1914 /* 1915 * Only the first DMA map is static. 1916 */ 1917 if (xn != 0) { 1918 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 1919 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 1920 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map); 1921 if (rv != 0) 1922 return (rv); 1923 } 1924 1925 dm = ix->ix_map; 1926 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up, 1927 (up == NULL ? BUS_DMA_NOWAIT : 0)); 1928 if (rv != 0) 1929 goto bad; 1930 1931 /* 1932 * How many SIMPLE SG elements can we fit in this message? 1933 */ 1934 off = mb[0] >> 16; 1935 p = mb + off; 1936 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1937 1938 if (dm->dm_nsegs > nsegs) { 1939 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 1940 rv = EFBIG; 1941 DPRINTF(("iop_msg_map: too many segs\n")); 1942 goto bad; 1943 } 1944 1945 nsegs = dm->dm_nsegs; 1946 xfersize = 0; 1947 1948 /* 1949 * Write out the SG list. 1950 */ 1951 if (out) 1952 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 1953 else 1954 flg = I2O_SGL_SIMPLE; 1955 1956 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 1957 p[0] = (u_int32_t)ds->ds_len | flg; 1958 p[1] = (u_int32_t)ds->ds_addr; 1959 xfersize += ds->ds_len; 1960 } 1961 1962 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER; 1963 p[1] = (u_int32_t)ds->ds_addr; 1964 xfersize += ds->ds_len; 1965 1966 /* Fix up the transfer record, and sync the map. */ 1967 ix->ix_flags = (out ? IX_OUT : IX_IN); 1968 ix->ix_size = xfersize; 1969 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 1970 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 1971 1972 /* 1973 * If this is the first xfer we've mapped for this message, adjust 1974 * the SGL offset field in the message header. 1975 */ 1976 if ((im->im_flags & IM_SGLOFFADJ) == 0) { 1977 mb[0] += (mb[0] >> 12) & 0xf0; 1978 im->im_flags |= IM_SGLOFFADJ; 1979 } 1980 mb[0] += (nsegs << 17); 1981 return (0); 1982 1983 bad: 1984 if (xn != 0) 1985 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 1986 return (rv); 1987 } 1988 1989 /* 1990 * Map a block I/O data transfer (different in that there's only one per 1991 * message maximum, and PAGE addressing may be used). Write a scatter 1992 * gather list into the message frame. 1993 */ 1994 int 1995 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1996 void *xferaddr, int xfersize, int out) 1997 { 1998 bus_dma_segment_t *ds; 1999 bus_dmamap_t dm; 2000 struct iop_xfer *ix; 2001 u_int rv, i, nsegs, off, slen, tlen, flg; 2002 paddr_t saddr, eaddr; 2003 u_int32_t *p; 2004 2005 #ifdef I2ODEBUG 2006 if (xfersize == 0) 2007 panic("iop_msg_map_bio: null transfer"); 2008 if (xfersize > IOP_MAX_XFER) 2009 panic("iop_msg_map_bio: transfer too large"); 2010 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2011 panic("iop_msg_map_bio: SGLOFFADJ"); 2012 #endif 2013 2014 ix = im->im_xfer; 2015 dm = ix->ix_map; 2016 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 2017 BUS_DMA_NOWAIT | BUS_DMA_STREAMING); 2018 if (rv != 0) 2019 return (rv); 2020 2021 off = mb[0] >> 16; 2022 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 2023 2024 /* 2025 * If the transfer is highly fragmented and won't fit using SIMPLE 2026 * elements, use PAGE_LIST elements instead. SIMPLE elements are 2027 * potentially more efficient, both for us and the IOP. 2028 */ 2029 if (dm->dm_nsegs > nsegs) { 2030 nsegs = 1; 2031 p = mb + off + 1; 2032 2033 /* XXX This should be done with a bus_space flag. */ 2034 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) { 2035 slen = ds->ds_len; 2036 saddr = ds->ds_addr; 2037 2038 while (slen > 0) { 2039 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 2040 tlen = min(eaddr - saddr, slen); 2041 slen -= tlen; 2042 *p++ = le32toh(saddr); 2043 saddr = eaddr; 2044 nsegs++; 2045 } 2046 } 2047 2048 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER | 2049 I2O_SGL_END; 2050 if (out) 2051 mb[off] |= I2O_SGL_DATA_OUT; 2052 } else { 2053 p = mb + off; 2054 nsegs = dm->dm_nsegs; 2055 2056 if (out) 2057 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 2058 else 2059 flg = I2O_SGL_SIMPLE; 2060 2061 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 2062 p[0] = (u_int32_t)ds->ds_len | flg; 2063 p[1] = (u_int32_t)ds->ds_addr; 2064 } 2065 2066 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER | 2067 I2O_SGL_END; 2068 p[1] = (u_int32_t)ds->ds_addr; 2069 nsegs <<= 1; 2070 } 2071 2072 /* Fix up the transfer record, and sync the map. */ 2073 ix->ix_flags = (out ? IX_OUT : IX_IN); 2074 ix->ix_size = xfersize; 2075 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 2076 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 2077 2078 /* 2079 * Adjust the SGL offset and total message size fields. We don't 2080 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements. 2081 */ 2082 mb[0] += ((off << 4) + (nsegs << 16)); 2083 return (0); 2084 } 2085 2086 /* 2087 * Unmap all data transfers associated with a message wrapper. 2088 */ 2089 void 2090 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im) 2091 { 2092 struct iop_xfer *ix; 2093 int i; 2094 2095 #ifdef I2ODEBUG 2096 if (im->im_xfer[0].ix_size == 0) 2097 panic("iop_msg_unmap: no transfers mapped"); 2098 #endif 2099 2100 for (ix = im->im_xfer, i = 0;;) { 2101 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size, 2102 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE : 2103 BUS_DMASYNC_POSTREAD); 2104 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 2105 2106 /* Only the first DMA map is static. */ 2107 if (i != 0) 2108 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2109 if ((++ix)->ix_size == 0) 2110 break; 2111 if (++i >= IOP_MAX_MSG_XFERS) 2112 break; 2113 } 2114 } 2115 2116 /* 2117 * Post a message frame to the IOP's inbound queue. 2118 */ 2119 int 2120 iop_post(struct iop_softc *sc, u_int32_t *mb) 2121 { 2122 u_int32_t mfa; 2123 int s; 2124 2125 #ifdef I2ODEBUG 2126 if ((mb[0] >> 16) > (sc->sc_framesize >> 2)) 2127 panic("iop_post: frame too large"); 2128 #endif 2129 2130 s = splbio(); 2131 2132 /* Allocate a slot with the IOP. */ 2133 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) 2134 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) { 2135 splx(s); 2136 printf("%s: mfa not forthcoming\n", 2137 sc->sc_dv.dv_xname); 2138 return (EAGAIN); 2139 } 2140 2141 /* Perform reply buffer DMA synchronisation. */ 2142 if (sc->sc_curib++ == 0) 2143 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0, 2144 sc->sc_rep_size, BUS_DMASYNC_PREREAD); 2145 2146 /* Copy out the message frame. */ 2147 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb, 2148 mb[0] >> 16); 2149 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, 2150 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE); 2151 2152 /* Post the MFA back to the IOP. */ 2153 iop_outl(sc, IOP_REG_IFIFO, mfa); 2154 2155 splx(s); 2156 return (0); 2157 } 2158 2159 /* 2160 * Post a message to the IOP and deal with completion. 2161 */ 2162 int 2163 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo) 2164 { 2165 u_int32_t *mb; 2166 int rv, s; 2167 2168 mb = xmb; 2169 2170 /* Terminate the scatter/gather list chain. */ 2171 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2172 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END; 2173 2174 if ((rv = iop_post(sc, mb)) != 0) 2175 return (rv); 2176 2177 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) { 2178 if ((im->im_flags & IM_POLL) != 0) 2179 iop_msg_poll(sc, im, timo); 2180 else 2181 iop_msg_wait(sc, im, timo); 2182 2183 s = splbio(); 2184 if ((im->im_flags & IM_REPLIED) != 0) { 2185 if ((im->im_flags & IM_NOSTATUS) != 0) 2186 rv = 0; 2187 else if ((im->im_flags & IM_FAIL) != 0) 2188 rv = ENXIO; 2189 else if (im->im_reqstatus != I2O_STATUS_SUCCESS) 2190 rv = EIO; 2191 else 2192 rv = 0; 2193 } else 2194 rv = EBUSY; 2195 splx(s); 2196 } else 2197 rv = 0; 2198 2199 return (rv); 2200 } 2201 2202 /* 2203 * Spin until the specified message is replied to. 2204 */ 2205 static void 2206 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo) 2207 { 2208 u_int32_t rmfa; 2209 int s; 2210 2211 s = splbio(); 2212 2213 /* Wait for completion. */ 2214 for (timo *= 10; timo != 0; timo--) { 2215 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) { 2216 /* Double read to account for IOP bug. */ 2217 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2218 if (rmfa == IOP_MFA_EMPTY) 2219 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2220 if (rmfa != IOP_MFA_EMPTY) { 2221 iop_handle_reply(sc, rmfa); 2222 2223 /* 2224 * Return the reply frame to the IOP's 2225 * outbound FIFO. 2226 */ 2227 iop_outl(sc, IOP_REG_OFIFO, rmfa); 2228 } 2229 } 2230 if ((im->im_flags & IM_REPLIED) != 0) 2231 break; 2232 DELAY(100); 2233 } 2234 2235 if (timo == 0) { 2236 #ifdef I2ODEBUG 2237 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname); 2238 if (iop_status_get(sc, 1) != 0) 2239 printf("iop_msg_poll: unable to retrieve status\n"); 2240 else 2241 printf("iop_msg_poll: IOP state = %d\n", 2242 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2243 #endif 2244 } 2245 2246 splx(s); 2247 } 2248 2249 /* 2250 * Sleep until the specified message is replied to. 2251 */ 2252 static void 2253 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo) 2254 { 2255 int s, rv; 2256 2257 s = splbio(); 2258 if ((im->im_flags & IM_REPLIED) != 0) { 2259 splx(s); 2260 return; 2261 } 2262 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo)); 2263 splx(s); 2264 2265 #ifdef I2ODEBUG 2266 if (rv != 0) { 2267 printf("iop_msg_wait: tsleep() == %d\n", rv); 2268 if (iop_status_get(sc, 0) != 0) 2269 printf("iop_msg_wait: unable to retrieve status\n"); 2270 else 2271 printf("iop_msg_wait: IOP state = %d\n", 2272 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2273 } 2274 #endif 2275 } 2276 2277 /* 2278 * Release an unused message frame back to the IOP's inbound fifo. 2279 */ 2280 static void 2281 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa) 2282 { 2283 2284 /* Use the frame to issue a no-op. */ 2285 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16)); 2286 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP)); 2287 iop_outl_msg(sc, mfa + 8, 0); 2288 iop_outl_msg(sc, mfa + 12, 0); 2289 2290 iop_outl(sc, IOP_REG_IFIFO, mfa); 2291 } 2292 2293 #ifdef I2ODEBUG 2294 /* 2295 * Dump a reply frame header. 2296 */ 2297 static void 2298 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb) 2299 { 2300 u_int function, detail; 2301 #ifdef I2OVERBOSE 2302 const char *statusstr; 2303 #endif 2304 2305 function = (le32toh(rb->msgfunc) >> 24) & 0xff; 2306 detail = le16toh(rb->detail); 2307 2308 printf("%s: reply:\n", sc->sc_dv.dv_xname); 2309 2310 #ifdef I2OVERBOSE 2311 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0])) 2312 statusstr = iop_status[rb->reqstatus]; 2313 else 2314 statusstr = "undefined error code"; 2315 2316 printf("%s: function=0x%02x status=0x%02x (%s)\n", 2317 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr); 2318 #else 2319 printf("%s: function=0x%02x status=0x%02x\n", 2320 sc->sc_dv.dv_xname, function, rb->reqstatus); 2321 #endif 2322 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n", 2323 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx), 2324 le32toh(rb->msgtctx)); 2325 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname, 2326 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095, 2327 (le32toh(rb->msgflags) >> 8) & 0xff); 2328 } 2329 #endif 2330 2331 /* 2332 * Dump a transport failure reply. 2333 */ 2334 static void 2335 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn) 2336 { 2337 2338 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname); 2339 2340 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname, 2341 le32toh(fn->msgictx), le32toh(fn->msgtctx)); 2342 printf("%s: failurecode=0x%02x severity=0x%02x\n", 2343 sc->sc_dv.dv_xname, fn->failurecode, fn->severity); 2344 printf("%s: highestver=0x%02x lowestver=0x%02x\n", 2345 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver); 2346 } 2347 2348 /* 2349 * Translate an I2O ASCII field into a C string. 2350 */ 2351 void 2352 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen) 2353 { 2354 int hc, lc, i, nit; 2355 2356 dlen--; 2357 lc = 0; 2358 hc = 0; 2359 i = 0; 2360 2361 /* 2362 * DPT use NUL as a space, whereas AMI use it as a terminator. The 2363 * spec has nothing to say about it. Since AMI fields are usually 2364 * filled with junk after the terminator, ... 2365 */ 2366 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT); 2367 2368 while (slen-- != 0 && dlen-- != 0) { 2369 if (nit && *src == '\0') 2370 break; 2371 else if (*src <= 0x20 || *src >= 0x7f) { 2372 if (hc) 2373 dst[i++] = ' '; 2374 } else { 2375 hc = 1; 2376 dst[i++] = *src; 2377 lc = i; 2378 } 2379 src++; 2380 } 2381 2382 dst[lc] = '\0'; 2383 } 2384 2385 /* 2386 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it. 2387 */ 2388 int 2389 iop_print_ident(struct iop_softc *sc, int tid) 2390 { 2391 struct { 2392 struct i2o_param_op_results pr; 2393 struct i2o_param_read_results prr; 2394 struct i2o_param_device_identity di; 2395 } __attribute__ ((__packed__)) p; 2396 char buf[32]; 2397 int rv; 2398 2399 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p, 2400 sizeof(p), NULL); 2401 if (rv != 0) 2402 return (rv); 2403 2404 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf, 2405 sizeof(buf)); 2406 printf(" <%s, ", buf); 2407 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf, 2408 sizeof(buf)); 2409 printf("%s, ", buf); 2410 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf)); 2411 printf("%s>", buf); 2412 2413 return (0); 2414 } 2415 2416 /* 2417 * Claim or unclaim the specified TID. 2418 */ 2419 int 2420 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release, 2421 int flags) 2422 { 2423 struct iop_msg *im; 2424 struct i2o_util_claim mf; 2425 int rv, func; 2426 2427 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM; 2428 im = iop_msg_alloc(sc, IM_WAIT); 2429 2430 /* We can use the same structure, as they're identical. */ 2431 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim); 2432 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func); 2433 mf.msgictx = ii->ii_ictx; 2434 mf.msgtctx = im->im_tctx; 2435 mf.flags = flags; 2436 2437 rv = iop_msg_post(sc, im, &mf, 5000); 2438 iop_msg_free(sc, im); 2439 return (rv); 2440 } 2441 2442 /* 2443 * Perform an abort. 2444 */ 2445 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func, 2446 int tctxabort, int flags) 2447 { 2448 struct iop_msg *im; 2449 struct i2o_util_abort mf; 2450 int rv; 2451 2452 im = iop_msg_alloc(sc, IM_WAIT); 2453 2454 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort); 2455 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT); 2456 mf.msgictx = ii->ii_ictx; 2457 mf.msgtctx = im->im_tctx; 2458 mf.flags = (func << 24) | flags; 2459 mf.tctxabort = tctxabort; 2460 2461 rv = iop_msg_post(sc, im, &mf, 5000); 2462 iop_msg_free(sc, im); 2463 return (rv); 2464 } 2465 2466 /* 2467 * Enable or disable reception of events for the specified device. 2468 */ 2469 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask) 2470 { 2471 struct i2o_util_event_register mf; 2472 2473 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register); 2474 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER); 2475 mf.msgictx = ii->ii_ictx; 2476 mf.msgtctx = 0; 2477 mf.eventmask = mask; 2478 2479 /* This message is replied to only when events are signalled. */ 2480 return (iop_post(sc, (u_int32_t *)&mf)); 2481 } 2482 2483 int 2484 iopopen(dev_t dev, int flag, int mode, struct lwp *l) 2485 { 2486 struct iop_softc *sc; 2487 2488 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL) 2489 return (ENXIO); 2490 if ((sc->sc_flags & IOP_ONLINE) == 0) 2491 return (ENXIO); 2492 if ((sc->sc_flags & IOP_OPEN) != 0) 2493 return (EBUSY); 2494 sc->sc_flags |= IOP_OPEN; 2495 2496 return (0); 2497 } 2498 2499 int 2500 iopclose(dev_t dev, int flag, int mode, struct lwp *l) 2501 { 2502 struct iop_softc *sc; 2503 2504 sc = device_lookup(&iop_cd, minor(dev)); 2505 sc->sc_flags &= ~IOP_OPEN; 2506 2507 return (0); 2508 } 2509 2510 int 2511 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l) 2512 { 2513 struct iop_softc *sc; 2514 struct iovec *iov; 2515 int rv, i; 2516 2517 if (securelevel >= 2) 2518 return (EPERM); 2519 2520 sc = device_lookup(&iop_cd, minor(dev)); 2521 2522 switch (cmd) { 2523 case IOPIOCPT: 2524 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc)); 2525 2526 case IOPIOCGSTATUS: 2527 iov = (struct iovec *)data; 2528 i = sizeof(struct i2o_status); 2529 if (i > iov->iov_len) 2530 i = iov->iov_len; 2531 else 2532 iov->iov_len = i; 2533 if ((rv = iop_status_get(sc, 0)) == 0) 2534 rv = copyout(&sc->sc_status, iov->iov_base, i); 2535 return (rv); 2536 2537 case IOPIOCGLCT: 2538 case IOPIOCGTIDMAP: 2539 case IOPIOCRECONFIG: 2540 break; 2541 2542 default: 2543 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 2544 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd); 2545 #endif 2546 return (ENOTTY); 2547 } 2548 2549 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0) 2550 return (rv); 2551 2552 switch (cmd) { 2553 case IOPIOCGLCT: 2554 iov = (struct iovec *)data; 2555 i = le16toh(sc->sc_lct->tablesize) << 2; 2556 if (i > iov->iov_len) 2557 i = iov->iov_len; 2558 else 2559 iov->iov_len = i; 2560 rv = copyout(sc->sc_lct, iov->iov_base, i); 2561 break; 2562 2563 case IOPIOCRECONFIG: 2564 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0) 2565 rv = iop_reconfigure(sc, 0); 2566 break; 2567 2568 case IOPIOCGTIDMAP: 2569 iov = (struct iovec *)data; 2570 i = sizeof(struct iop_tidmap) * sc->sc_nlctent; 2571 if (i > iov->iov_len) 2572 i = iov->iov_len; 2573 else 2574 iov->iov_len = i; 2575 rv = copyout(sc->sc_tidmap, iov->iov_base, i); 2576 break; 2577 } 2578 2579 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 2580 return (rv); 2581 } 2582 2583 static int 2584 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p) 2585 { 2586 struct iop_msg *im; 2587 struct i2o_msg *mf; 2588 struct ioppt_buf *ptb; 2589 int rv, i, mapped; 2590 2591 mf = NULL; 2592 im = NULL; 2593 mapped = 1; 2594 2595 if (pt->pt_msglen > sc->sc_framesize || 2596 pt->pt_msglen < sizeof(struct i2o_msg) || 2597 pt->pt_nbufs > IOP_MAX_MSG_XFERS || 2598 pt->pt_nbufs < 0 || pt->pt_replylen < 0 || 2599 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000) 2600 return (EINVAL); 2601 2602 for (i = 0; i < pt->pt_nbufs; i++) 2603 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) { 2604 rv = ENOMEM; 2605 goto bad; 2606 } 2607 2608 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK); 2609 if (mf == NULL) 2610 return (ENOMEM); 2611 2612 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0) 2613 goto bad; 2614 2615 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 2616 im->im_rb = (struct i2o_reply *)mf; 2617 mf->msgictx = IOP_ICTX; 2618 mf->msgtctx = im->im_tctx; 2619 2620 for (i = 0; i < pt->pt_nbufs; i++) { 2621 ptb = &pt->pt_bufs[i]; 2622 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data, 2623 ptb->ptb_datalen, ptb->ptb_out != 0, p); 2624 if (rv != 0) 2625 goto bad; 2626 mapped = 1; 2627 } 2628 2629 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0) 2630 goto bad; 2631 2632 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3; 2633 if (i > sc->sc_framesize) 2634 i = sc->sc_framesize; 2635 if (i > pt->pt_replylen) 2636 i = pt->pt_replylen; 2637 rv = copyout(im->im_rb, pt->pt_reply, i); 2638 2639 bad: 2640 if (mapped != 0) 2641 iop_msg_unmap(sc, im); 2642 if (im != NULL) 2643 iop_msg_free(sc, im); 2644 if (mf != NULL) 2645 free(mf, M_DEVBUF); 2646 return (rv); 2647 } 2648