1 /* $NetBSD: iop.c,v 1.47 2005/02/27 00:27:00 perry Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Support for I2O IOPs (intelligent I/O processors). 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.47 2005/02/27 00:27:00 perry Exp $"); 45 46 #include "opt_i2o.h" 47 #include "iop.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/device.h> 53 #include <sys/queue.h> 54 #include <sys/proc.h> 55 #include <sys/malloc.h> 56 #include <sys/ioctl.h> 57 #include <sys/endian.h> 58 #include <sys/conf.h> 59 #include <sys/kthread.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/i2o/i2o.h> 66 #include <dev/i2o/iopio.h> 67 #include <dev/i2o/iopreg.h> 68 #include <dev/i2o/iopvar.h> 69 70 #include "locators.h" 71 72 #define POLL(ms, cond) \ 73 do { \ 74 int i; \ 75 for (i = (ms) * 10; i; i--) { \ 76 if (cond) \ 77 break; \ 78 DELAY(100); \ 79 } \ 80 } while (/* CONSTCOND */0); 81 82 #ifdef I2ODEBUG 83 #define DPRINTF(x) printf x 84 #else 85 #define DPRINTF(x) 86 #endif 87 88 #ifdef I2OVERBOSE 89 #define IFVERBOSE(x) x 90 #define COMMENT(x) NULL 91 #else 92 #define IFVERBOSE(x) 93 #define COMMENT(x) 94 #endif 95 96 #define IOP_ICTXHASH_NBUCKETS 16 97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash]) 98 99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1) 100 101 #define IOP_TCTX_SHIFT 12 102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1) 103 104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl; 105 static u_long iop_ictxhash; 106 static void *iop_sdh; 107 static struct i2o_systab *iop_systab; 108 static int iop_systab_size; 109 110 extern struct cfdriver iop_cd; 111 112 dev_type_open(iopopen); 113 dev_type_close(iopclose); 114 dev_type_ioctl(iopioctl); 115 116 const struct cdevsw iop_cdevsw = { 117 iopopen, iopclose, noread, nowrite, iopioctl, 118 nostop, notty, nopoll, nommap, nokqfilter, 119 }; 120 121 #define IC_CONFIGURE 0x01 122 #define IC_PRIORITY 0x02 123 124 struct iop_class { 125 u_short ic_class; 126 u_short ic_flags; 127 #ifdef I2OVERBOSE 128 const char *ic_caption; 129 #endif 130 } static const iop_class[] = { 131 { 132 I2O_CLASS_EXECUTIVE, 133 0, 134 IFVERBOSE("executive") 135 }, 136 { 137 I2O_CLASS_DDM, 138 0, 139 COMMENT("device driver module") 140 }, 141 { 142 I2O_CLASS_RANDOM_BLOCK_STORAGE, 143 IC_CONFIGURE | IC_PRIORITY, 144 IFVERBOSE("random block storage") 145 }, 146 { 147 I2O_CLASS_SEQUENTIAL_STORAGE, 148 IC_CONFIGURE | IC_PRIORITY, 149 IFVERBOSE("sequential storage") 150 }, 151 { 152 I2O_CLASS_LAN, 153 IC_CONFIGURE | IC_PRIORITY, 154 IFVERBOSE("LAN port") 155 }, 156 { 157 I2O_CLASS_WAN, 158 IC_CONFIGURE | IC_PRIORITY, 159 IFVERBOSE("WAN port") 160 }, 161 { 162 I2O_CLASS_FIBRE_CHANNEL_PORT, 163 IC_CONFIGURE, 164 IFVERBOSE("fibrechannel port") 165 }, 166 { 167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL, 168 0, 169 COMMENT("fibrechannel peripheral") 170 }, 171 { 172 I2O_CLASS_SCSI_PERIPHERAL, 173 0, 174 COMMENT("SCSI peripheral") 175 }, 176 { 177 I2O_CLASS_ATE_PORT, 178 IC_CONFIGURE, 179 IFVERBOSE("ATE port") 180 }, 181 { 182 I2O_CLASS_ATE_PERIPHERAL, 183 0, 184 COMMENT("ATE peripheral") 185 }, 186 { 187 I2O_CLASS_FLOPPY_CONTROLLER, 188 IC_CONFIGURE, 189 IFVERBOSE("floppy controller") 190 }, 191 { 192 I2O_CLASS_FLOPPY_DEVICE, 193 0, 194 COMMENT("floppy device") 195 }, 196 { 197 I2O_CLASS_BUS_ADAPTER_PORT, 198 IC_CONFIGURE, 199 IFVERBOSE("bus adapter port" ) 200 }, 201 }; 202 203 #if defined(I2ODEBUG) && defined(I2OVERBOSE) 204 static const char * const iop_status[] = { 205 "success", 206 "abort (dirty)", 207 "abort (no data transfer)", 208 "abort (partial transfer)", 209 "error (dirty)", 210 "error (no data transfer)", 211 "error (partial transfer)", 212 "undefined error code", 213 "process abort (dirty)", 214 "process abort (no data transfer)", 215 "process abort (partial transfer)", 216 "transaction error", 217 }; 218 #endif 219 220 static inline u_int32_t iop_inl(struct iop_softc *, int); 221 static inline void iop_outl(struct iop_softc *, int, u_int32_t); 222 223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int); 224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t); 225 226 static void iop_config_interrupts(struct device *); 227 static void iop_configure_devices(struct iop_softc *, int, int); 228 static void iop_devinfo(int, char *, size_t); 229 static int iop_print(void *, const char *); 230 static void iop_shutdown(void *); 231 static int iop_submatch(struct device *, struct cfdata *, 232 const locdesc_t *, void *); 233 234 static void iop_adjqparam(struct iop_softc *, int); 235 static void iop_create_reconf_thread(void *); 236 static int iop_handle_reply(struct iop_softc *, u_int32_t); 237 static int iop_hrt_get(struct iop_softc *); 238 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int); 239 static void iop_intr_event(struct device *, struct iop_msg *, void *); 240 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int, 241 u_int32_t); 242 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int); 243 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int); 244 static int iop_ofifo_init(struct iop_softc *); 245 static int iop_passthrough(struct iop_softc *, struct ioppt *, 246 struct proc *); 247 static void iop_reconf_thread(void *); 248 static void iop_release_mfa(struct iop_softc *, u_int32_t); 249 static int iop_reset(struct iop_softc *); 250 static int iop_sys_enable(struct iop_softc *); 251 static int iop_systab_set(struct iop_softc *); 252 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *); 253 254 #ifdef I2ODEBUG 255 static void iop_reply_print(struct iop_softc *, struct i2o_reply *); 256 #endif 257 258 static inline u_int32_t 259 iop_inl(struct iop_softc *sc, int off) 260 { 261 262 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 263 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 264 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 265 } 266 267 static inline void 268 iop_outl(struct iop_softc *sc, int off, u_int32_t val) 269 { 270 271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 272 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 273 BUS_SPACE_BARRIER_WRITE); 274 } 275 276 static inline u_int32_t 277 iop_inl_msg(struct iop_softc *sc, int off) 278 { 279 280 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 281 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 282 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off)); 283 } 284 285 static inline void 286 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val) 287 { 288 289 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val); 290 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 291 BUS_SPACE_BARRIER_WRITE); 292 } 293 294 /* 295 * Initialise the IOP and our interface. 296 */ 297 void 298 iop_init(struct iop_softc *sc, const char *intrstr) 299 { 300 struct iop_msg *im; 301 int rv, i, j, state, nsegs; 302 u_int32_t mask; 303 char ident[64]; 304 305 state = 0; 306 307 printf("I2O adapter"); 308 309 if (iop_ictxhashtbl == NULL) 310 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST, 311 M_DEVBUF, M_NOWAIT, &iop_ictxhash); 312 313 /* Disable interrupts at the IOP. */ 314 mask = iop_inl(sc, IOP_REG_INTR_MASK); 315 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO); 316 317 /* Allocate a scratch DMA map for small miscellaneous shared data. */ 318 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 319 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) { 320 printf("%s: cannot create scratch dmamap\n", 321 sc->sc_dv.dv_xname); 322 return; 323 } 324 325 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 326 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 327 printf("%s: cannot alloc scratch dmamem\n", 328 sc->sc_dv.dv_xname); 329 goto bail_out; 330 } 331 state++; 332 333 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE, 334 &sc->sc_scr, 0)) { 335 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname); 336 goto bail_out; 337 } 338 state++; 339 340 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr, 341 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 342 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname); 343 goto bail_out; 344 } 345 state++; 346 347 #ifdef I2ODEBUG 348 /* So that our debug checks don't choke. */ 349 sc->sc_framesize = 128; 350 #endif 351 352 /* Reset the adapter and request status. */ 353 if ((rv = iop_reset(sc)) != 0) { 354 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname); 355 goto bail_out; 356 } 357 358 if ((rv = iop_status_get(sc, 1)) != 0) { 359 printf("%s: not responding (get status)\n", 360 sc->sc_dv.dv_xname); 361 goto bail_out; 362 } 363 364 sc->sc_flags |= IOP_HAVESTATUS; 365 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid), 366 ident, sizeof(ident)); 367 printf(" <%s>\n", ident); 368 369 #ifdef I2ODEBUG 370 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname, 371 le16toh(sc->sc_status.orgid), 372 (le32toh(sc->sc_status.segnumber) >> 12) & 15); 373 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname); 374 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname, 375 le32toh(sc->sc_status.desiredprivmemsize), 376 le32toh(sc->sc_status.currentprivmemsize), 377 le32toh(sc->sc_status.currentprivmembase)); 378 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname, 379 le32toh(sc->sc_status.desiredpriviosize), 380 le32toh(sc->sc_status.currentpriviosize), 381 le32toh(sc->sc_status.currentpriviobase)); 382 #endif 383 384 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes); 385 if (sc->sc_maxob > IOP_MAX_OUTBOUND) 386 sc->sc_maxob = IOP_MAX_OUTBOUND; 387 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes); 388 if (sc->sc_maxib > IOP_MAX_INBOUND) 389 sc->sc_maxib = IOP_MAX_INBOUND; 390 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2; 391 if (sc->sc_framesize > IOP_MAX_MSG_SIZE) 392 sc->sc_framesize = IOP_MAX_MSG_SIZE; 393 394 #if defined(I2ODEBUG) || defined(DIAGNOSTIC) 395 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) { 396 printf("%s: frame size too small (%d)\n", 397 sc->sc_dv.dv_xname, sc->sc_framesize); 398 goto bail_out; 399 } 400 #endif 401 402 /* Allocate message wrappers. */ 403 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO); 404 if (im == NULL) { 405 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname); 406 goto bail_out; 407 } 408 state++; 409 sc->sc_ims = im; 410 SLIST_INIT(&sc->sc_im_freelist); 411 412 for (i = 0; i < sc->sc_maxib; i++, im++) { 413 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 414 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 415 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 416 &im->im_xfer[0].ix_map); 417 if (rv != 0) { 418 printf("%s: couldn't create dmamap (%d)", 419 sc->sc_dv.dv_xname, rv); 420 goto bail_out3; 421 } 422 423 im->im_tctx = i; 424 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 425 } 426 427 /* Initialise the IOP's outbound FIFO. */ 428 if (iop_ofifo_init(sc) != 0) { 429 printf("%s: unable to init oubound FIFO\n", 430 sc->sc_dv.dv_xname); 431 goto bail_out3; 432 } 433 434 /* 435 * Defer further configuration until (a) interrupts are working and 436 * (b) we have enough information to build the system table. 437 */ 438 config_interrupts((struct device *)sc, iop_config_interrupts); 439 440 /* Configure shutdown hook before we start any device activity. */ 441 if (iop_sdh == NULL) 442 iop_sdh = shutdownhook_establish(iop_shutdown, NULL); 443 444 /* Ensure interrupts are enabled at the IOP. */ 445 mask = iop_inl(sc, IOP_REG_INTR_MASK); 446 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO); 447 448 if (intrstr != NULL) 449 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, 450 intrstr); 451 452 #ifdef I2ODEBUG 453 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n", 454 sc->sc_dv.dv_xname, sc->sc_maxib, 455 le32toh(sc->sc_status.maxinboundmframes), 456 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes)); 457 #endif 458 459 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0); 460 return; 461 462 bail_out3: 463 if (state > 3) { 464 for (j = 0; j < i; j++) 465 bus_dmamap_destroy(sc->sc_dmat, 466 sc->sc_ims[j].im_xfer[0].ix_map); 467 free(sc->sc_ims, M_DEVBUF); 468 } 469 bail_out: 470 if (state > 2) 471 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap); 472 if (state > 1) 473 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE); 474 if (state > 0) 475 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs); 476 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap); 477 } 478 479 /* 480 * Perform autoconfiguration tasks. 481 */ 482 static void 483 iop_config_interrupts(struct device *self) 484 { 485 struct iop_attach_args ia; 486 struct iop_softc *sc, *iop; 487 struct i2o_systab_entry *ste; 488 int rv, i, niop; 489 int help[2]; 490 locdesc_t *ldesc = (void *)help; /* XXX */ 491 492 sc = (struct iop_softc *)self; 493 LIST_INIT(&sc->sc_iilist); 494 495 printf("%s: configuring...\n", sc->sc_dv.dv_xname); 496 497 if (iop_hrt_get(sc) != 0) { 498 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname); 499 return; 500 } 501 502 /* 503 * Build the system table. 504 */ 505 if (iop_systab == NULL) { 506 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) { 507 if ((iop = device_lookup(&iop_cd, i)) == NULL) 508 continue; 509 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 510 continue; 511 if (iop_status_get(iop, 1) != 0) { 512 printf("%s: unable to retrieve status\n", 513 sc->sc_dv.dv_xname); 514 iop->sc_flags &= ~IOP_HAVESTATUS; 515 continue; 516 } 517 niop++; 518 } 519 if (niop == 0) 520 return; 521 522 i = sizeof(struct i2o_systab_entry) * (niop - 1) + 523 sizeof(struct i2o_systab); 524 iop_systab_size = i; 525 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO); 526 527 iop_systab->numentries = niop; 528 iop_systab->version = I2O_VERSION_11; 529 530 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) { 531 if ((iop = device_lookup(&iop_cd, i)) == NULL) 532 continue; 533 if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 534 continue; 535 536 ste->orgid = iop->sc_status.orgid; 537 ste->iopid = iop->sc_dv.dv_unit + 2; 538 ste->segnumber = 539 htole32(le32toh(iop->sc_status.segnumber) & ~4095); 540 ste->iopcaps = iop->sc_status.iopcaps; 541 ste->inboundmsgframesize = 542 iop->sc_status.inboundmframesize; 543 ste->inboundmsgportaddresslow = 544 htole32(iop->sc_memaddr + IOP_REG_IFIFO); 545 ste++; 546 } 547 } 548 549 /* 550 * Post the system table to the IOP and bring it to the OPERATIONAL 551 * state. 552 */ 553 if (iop_systab_set(sc) != 0) { 554 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname); 555 return; 556 } 557 if (iop_sys_enable(sc) != 0) { 558 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname); 559 return; 560 } 561 562 /* 563 * Set up an event handler for this IOP. 564 */ 565 sc->sc_eventii.ii_dv = self; 566 sc->sc_eventii.ii_intr = iop_intr_event; 567 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY; 568 sc->sc_eventii.ii_tid = I2O_TID_IOP; 569 iop_initiator_register(sc, &sc->sc_eventii); 570 571 rv = iop_util_eventreg(sc, &sc->sc_eventii, 572 I2O_EVENT_EXEC_RESOURCE_LIMITS | 573 I2O_EVENT_EXEC_CONNECTION_FAIL | 574 I2O_EVENT_EXEC_ADAPTER_FAULT | 575 I2O_EVENT_EXEC_POWER_FAIL | 576 I2O_EVENT_EXEC_RESET_PENDING | 577 I2O_EVENT_EXEC_RESET_IMMINENT | 578 I2O_EVENT_EXEC_HARDWARE_FAIL | 579 I2O_EVENT_EXEC_XCT_CHANGE | 580 I2O_EVENT_EXEC_DDM_AVAILIBILITY | 581 I2O_EVENT_GEN_DEVICE_RESET | 582 I2O_EVENT_GEN_STATE_CHANGE | 583 I2O_EVENT_GEN_GENERAL_WARNING); 584 if (rv != 0) { 585 printf("%s: unable to register for events", sc->sc_dv.dv_xname); 586 return; 587 } 588 589 /* 590 * Attempt to match and attach a product-specific extension. 591 */ 592 ia.ia_class = I2O_CLASS_ANY; 593 ia.ia_tid = I2O_TID_IOP; 594 ldesc->len = 1; 595 ldesc->locs[IOPCF_TID] = I2O_TID_IOP; 596 config_found_sm_loc(self, "iop", ldesc, &ia, iop_print, iop_submatch); 597 598 /* 599 * Start device configuration. 600 */ 601 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL); 602 if ((rv = iop_reconfigure(sc, 0)) == -1) { 603 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv); 604 return; 605 } 606 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 607 608 kthread_create(iop_create_reconf_thread, sc); 609 } 610 611 /* 612 * Create the reconfiguration thread. Called after the standard kernel 613 * threads have been created. 614 */ 615 static void 616 iop_create_reconf_thread(void *cookie) 617 { 618 struct iop_softc *sc; 619 int rv; 620 621 sc = cookie; 622 sc->sc_flags |= IOP_ONLINE; 623 624 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc, 625 "%s", sc->sc_dv.dv_xname); 626 if (rv != 0) { 627 printf("%s: unable to create reconfiguration thread (%d)", 628 sc->sc_dv.dv_xname, rv); 629 return; 630 } 631 } 632 633 /* 634 * Reconfiguration thread; listens for LCT change notification, and 635 * initiates re-configuration if received. 636 */ 637 static void 638 iop_reconf_thread(void *cookie) 639 { 640 struct iop_softc *sc; 641 struct lwp *l; 642 struct i2o_lct lct; 643 u_int32_t chgind; 644 int rv; 645 646 sc = cookie; 647 chgind = sc->sc_chgind + 1; 648 l = curlwp; 649 650 for (;;) { 651 DPRINTF(("%s: async reconfig: requested 0x%08x\n", 652 sc->sc_dv.dv_xname, chgind)); 653 654 PHOLD(l); 655 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind); 656 PRELE(l); 657 658 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n", 659 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv)); 660 661 if (rv == 0 && 662 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) { 663 iop_reconfigure(sc, le32toh(lct.changeindicator)); 664 chgind = sc->sc_chgind + 1; 665 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 666 } 667 668 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5); 669 } 670 } 671 672 /* 673 * Reconfigure: find new and removed devices. 674 */ 675 int 676 iop_reconfigure(struct iop_softc *sc, u_int chgind) 677 { 678 struct iop_msg *im; 679 struct i2o_hba_bus_scan mf; 680 struct i2o_lct_entry *le; 681 struct iop_initiator *ii, *nextii; 682 int rv, tid, i; 683 684 /* 685 * If the reconfiguration request isn't the result of LCT change 686 * notification, then be more thorough: ask all bus ports to scan 687 * their busses. Wait up to 5 minutes for each bus port to complete 688 * the request. 689 */ 690 if (chgind == 0) { 691 if ((rv = iop_lct_get(sc)) != 0) { 692 DPRINTF(("iop_reconfigure: unable to read LCT\n")); 693 return (rv); 694 } 695 696 le = sc->sc_lct->entry; 697 for (i = 0; i < sc->sc_nlctent; i++, le++) { 698 if ((le16toh(le->classid) & 4095) != 699 I2O_CLASS_BUS_ADAPTER_PORT) 700 continue; 701 tid = le16toh(le->localtid) & 4095; 702 703 im = iop_msg_alloc(sc, IM_WAIT); 704 705 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 706 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN); 707 mf.msgictx = IOP_ICTX; 708 mf.msgtctx = im->im_tctx; 709 710 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname, 711 tid)); 712 713 rv = iop_msg_post(sc, im, &mf, 5*60*1000); 714 iop_msg_free(sc, im); 715 #ifdef I2ODEBUG 716 if (rv != 0) 717 printf("%s: bus scan failed\n", 718 sc->sc_dv.dv_xname); 719 #endif 720 } 721 } else if (chgind <= sc->sc_chgind) { 722 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname)); 723 return (0); 724 } 725 726 /* Re-read the LCT and determine if it has changed. */ 727 if ((rv = iop_lct_get(sc)) != 0) { 728 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 729 return (rv); 730 } 731 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent)); 732 733 chgind = le32toh(sc->sc_lct->changeindicator); 734 if (chgind == sc->sc_chgind) { 735 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname)); 736 return (0); 737 } 738 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname)); 739 sc->sc_chgind = chgind; 740 741 if (sc->sc_tidmap != NULL) 742 free(sc->sc_tidmap, M_DEVBUF); 743 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap), 744 M_DEVBUF, M_NOWAIT|M_ZERO); 745 746 /* Allow 1 queued command per device while we're configuring. */ 747 iop_adjqparam(sc, 1); 748 749 /* 750 * Match and attach child devices. We configure high-level devices 751 * first so that any claims will propagate throughout the LCT, 752 * hopefully masking off aliased devices as a result. 753 * 754 * Re-reading the LCT at this point is a little dangerous, but we'll 755 * trust the IOP (and the operator) to behave itself... 756 */ 757 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 758 IC_CONFIGURE | IC_PRIORITY); 759 if ((rv = iop_lct_get(sc)) != 0) 760 DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 761 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 762 IC_CONFIGURE); 763 764 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) { 765 nextii = LIST_NEXT(ii, ii_list); 766 767 /* Detach devices that were configured, but are now gone. */ 768 for (i = 0; i < sc->sc_nlctent; i++) 769 if (ii->ii_tid == sc->sc_tidmap[i].it_tid) 770 break; 771 if (i == sc->sc_nlctent || 772 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) 773 config_detach(ii->ii_dv, DETACH_FORCE); 774 775 /* 776 * Tell initiators that existed before the re-configuration 777 * to re-configure. 778 */ 779 if (ii->ii_reconfig == NULL) 780 continue; 781 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0) 782 printf("%s: %s failed reconfigure (%d)\n", 783 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv); 784 } 785 786 /* Re-adjust queue parameters and return. */ 787 if (sc->sc_nii != 0) 788 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE) 789 / sc->sc_nii); 790 791 return (0); 792 } 793 794 /* 795 * Configure I2O devices into the system. 796 */ 797 static void 798 iop_configure_devices(struct iop_softc *sc, int mask, int maskval) 799 { 800 struct iop_attach_args ia; 801 struct iop_initiator *ii; 802 const struct i2o_lct_entry *le; 803 struct device *dv; 804 int i, j, nent; 805 u_int usertid; 806 int help[2]; 807 locdesc_t *ldesc = (void *)help; /* XXX */ 808 809 nent = sc->sc_nlctent; 810 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) { 811 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095; 812 813 /* Ignore the device if it's in use. */ 814 usertid = le32toh(le->usertid) & 4095; 815 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST) 816 continue; 817 818 ia.ia_class = le16toh(le->classid) & 4095; 819 ia.ia_tid = sc->sc_tidmap[i].it_tid; 820 821 /* Ignore uninteresting devices. */ 822 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++) 823 if (iop_class[j].ic_class == ia.ia_class) 824 break; 825 if (j < sizeof(iop_class) / sizeof(iop_class[0]) && 826 (iop_class[j].ic_flags & mask) != maskval) 827 continue; 828 829 /* 830 * Try to configure the device only if it's not already 831 * configured. 832 */ 833 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) { 834 if (ia.ia_tid == ii->ii_tid) { 835 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 836 strcpy(sc->sc_tidmap[i].it_dvname, 837 ii->ii_dv->dv_xname); 838 break; 839 } 840 } 841 if (ii != NULL) 842 continue; 843 844 ldesc->len = 1; 845 ldesc->locs[IOPCF_TID] = ia.ia_tid; 846 847 dv = config_found_sm_loc(&sc->sc_dv, "iop", ldesc, &ia, 848 iop_print, iop_submatch); 849 if (dv != NULL) { 850 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 851 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname); 852 } 853 } 854 } 855 856 /* 857 * Adjust queue parameters for all child devices. 858 */ 859 static void 860 iop_adjqparam(struct iop_softc *sc, int mpi) 861 { 862 struct iop_initiator *ii; 863 864 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) 865 if (ii->ii_adjqparam != NULL) 866 (*ii->ii_adjqparam)(ii->ii_dv, mpi); 867 } 868 869 static void 870 iop_devinfo(int class, char *devinfo, size_t l) 871 { 872 #ifdef I2OVERBOSE 873 int i; 874 875 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++) 876 if (class == iop_class[i].ic_class) 877 break; 878 879 if (i == sizeof(iop_class) / sizeof(iop_class[0])) 880 snprintf(devinfo, l, "device (class 0x%x)", class); 881 else 882 strlcpy(devinfo, iop_class[i].ic_caption, l); 883 #else 884 885 snprintf(devinfo, l, "device (class 0x%x)", class); 886 #endif 887 } 888 889 static int 890 iop_print(void *aux, const char *pnp) 891 { 892 struct iop_attach_args *ia; 893 char devinfo[256]; 894 895 ia = aux; 896 897 if (pnp != NULL) { 898 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo)); 899 aprint_normal("%s at %s", devinfo, pnp); 900 } 901 aprint_normal(" tid %d", ia->ia_tid); 902 return (UNCONF); 903 } 904 905 static int 906 iop_submatch(struct device *parent, struct cfdata *cf, 907 const locdesc_t *ldesc, void *aux) 908 { 909 910 if (cf->cf_loc[IOPCF_TID] != IOPCF_TID_DEFAULT && 911 cf->cf_loc[IOPCF_TID] != ldesc->locs[IOPCF_TID]) 912 return (0); 913 914 return (config_match(parent, cf, aux)); 915 } 916 917 /* 918 * Shut down all configured IOPs. 919 */ 920 static void 921 iop_shutdown(void *junk) 922 { 923 struct iop_softc *sc; 924 int i; 925 926 printf("shutting down iop devices..."); 927 928 for (i = 0; i < iop_cd.cd_ndevs; i++) { 929 if ((sc = device_lookup(&iop_cd, i)) == NULL) 930 continue; 931 if ((sc->sc_flags & IOP_ONLINE) == 0) 932 continue; 933 934 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX, 935 0, 5000); 936 937 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) { 938 /* 939 * Some AMI firmware revisions will go to sleep and 940 * never come back after this. 941 */ 942 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, 943 IOP_ICTX, 0, 1000); 944 } 945 } 946 947 /* Wait. Some boards could still be flushing, stupidly enough. */ 948 delay(5000*1000); 949 printf(" done\n"); 950 } 951 952 /* 953 * Retrieve IOP status. 954 */ 955 int 956 iop_status_get(struct iop_softc *sc, int nosleep) 957 { 958 struct i2o_exec_status_get mf; 959 struct i2o_status *st; 960 paddr_t pa; 961 int rv, i; 962 963 pa = sc->sc_scr_seg->ds_addr; 964 st = (struct i2o_status *)sc->sc_scr; 965 966 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get); 967 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET); 968 mf.reserved[0] = 0; 969 mf.reserved[1] = 0; 970 mf.reserved[2] = 0; 971 mf.reserved[3] = 0; 972 mf.addrlow = (u_int32_t)pa; 973 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32); 974 mf.length = sizeof(sc->sc_status); 975 976 memset(st, 0, sizeof(*st)); 977 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 978 BUS_DMASYNC_PREREAD); 979 980 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0) 981 return (rv); 982 983 for (i = 25; i != 0; i--) { 984 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, 985 sizeof(*st), BUS_DMASYNC_POSTREAD); 986 if (st->syncbyte == 0xff) 987 break; 988 if (nosleep) 989 DELAY(100*1000); 990 else 991 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10); 992 } 993 994 if (st->syncbyte != 0xff) { 995 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname); 996 rv = EIO; 997 } else { 998 memcpy(&sc->sc_status, st, sizeof(sc->sc_status)); 999 rv = 0; 1000 } 1001 1002 return (rv); 1003 } 1004 1005 /* 1006 * Initialize and populate the IOP's outbound FIFO. 1007 */ 1008 static int 1009 iop_ofifo_init(struct iop_softc *sc) 1010 { 1011 bus_addr_t addr; 1012 bus_dma_segment_t seg; 1013 struct i2o_exec_outbound_init *mf; 1014 int i, rseg, rv; 1015 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw; 1016 1017 sw = (u_int32_t *)sc->sc_scr; 1018 1019 mf = (struct i2o_exec_outbound_init *)mb; 1020 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init); 1021 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT); 1022 mf->msgictx = IOP_ICTX; 1023 mf->msgtctx = 0; 1024 mf->pagesize = PAGE_SIZE; 1025 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16); 1026 1027 /* 1028 * The I2O spec says that there are two SGLs: one for the status 1029 * word, and one for a list of discarded MFAs. It continues to say 1030 * that if you don't want to get the list of MFAs, an IGNORE SGL is 1031 * necessary; this isn't the case (and is in fact a bad thing). 1032 */ 1033 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) | 1034 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END; 1035 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = 1036 (u_int32_t)sc->sc_scr_seg->ds_addr; 1037 mb[0] += 2 << 16; 1038 1039 *sw = 0; 1040 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1041 BUS_DMASYNC_PREREAD); 1042 1043 if ((rv = iop_post(sc, mb)) != 0) 1044 return (rv); 1045 1046 POLL(5000, 1047 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1048 BUS_DMASYNC_POSTREAD), 1049 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE))); 1050 1051 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) { 1052 printf("%s: outbound FIFO init failed (%d)\n", 1053 sc->sc_dv.dv_xname, le32toh(*sw)); 1054 return (EIO); 1055 } 1056 1057 /* Allocate DMA safe memory for the reply frames. */ 1058 if (sc->sc_rep_phys == 0) { 1059 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize; 1060 1061 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE, 1062 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 1063 if (rv != 0) { 1064 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname, 1065 rv); 1066 return (rv); 1067 } 1068 1069 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size, 1070 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1071 if (rv != 0) { 1072 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv); 1073 return (rv); 1074 } 1075 1076 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1, 1077 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap); 1078 if (rv != 0) { 1079 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname, 1080 rv); 1081 return (rv); 1082 } 1083 1084 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, 1085 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT); 1086 if (rv != 0) { 1087 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv); 1088 return (rv); 1089 } 1090 1091 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr; 1092 } 1093 1094 /* Populate the outbound FIFO. */ 1095 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) { 1096 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr); 1097 addr += sc->sc_framesize; 1098 } 1099 1100 return (0); 1101 } 1102 1103 /* 1104 * Read the specified number of bytes from the IOP's hardware resource table. 1105 */ 1106 static int 1107 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size) 1108 { 1109 struct iop_msg *im; 1110 int rv; 1111 struct i2o_exec_hrt_get *mf; 1112 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1113 1114 im = iop_msg_alloc(sc, IM_WAIT); 1115 mf = (struct i2o_exec_hrt_get *)mb; 1116 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get); 1117 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET); 1118 mf->msgictx = IOP_ICTX; 1119 mf->msgtctx = im->im_tctx; 1120 1121 iop_msg_map(sc, im, mb, hrt, size, 0, NULL); 1122 rv = iop_msg_post(sc, im, mb, 30000); 1123 iop_msg_unmap(sc, im); 1124 iop_msg_free(sc, im); 1125 return (rv); 1126 } 1127 1128 /* 1129 * Read the IOP's hardware resource table. 1130 */ 1131 static int 1132 iop_hrt_get(struct iop_softc *sc) 1133 { 1134 struct i2o_hrt hrthdr, *hrt; 1135 int size, rv; 1136 1137 PHOLD(curlwp); 1138 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr)); 1139 PRELE(curlwp); 1140 if (rv != 0) 1141 return (rv); 1142 1143 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname, 1144 le16toh(hrthdr.numentries))); 1145 1146 size = sizeof(struct i2o_hrt) + 1147 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry); 1148 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT); 1149 1150 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) { 1151 free(hrt, M_DEVBUF); 1152 return (rv); 1153 } 1154 1155 if (sc->sc_hrt != NULL) 1156 free(sc->sc_hrt, M_DEVBUF); 1157 sc->sc_hrt = hrt; 1158 return (0); 1159 } 1160 1161 /* 1162 * Request the specified number of bytes from the IOP's logical 1163 * configuration table. If a change indicator is specified, this 1164 * is a verbatim notification request, so the caller is prepared 1165 * to wait indefinitely. 1166 */ 1167 static int 1168 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size, 1169 u_int32_t chgind) 1170 { 1171 struct iop_msg *im; 1172 struct i2o_exec_lct_notify *mf; 1173 int rv; 1174 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1175 1176 im = iop_msg_alloc(sc, IM_WAIT); 1177 memset(lct, 0, size); 1178 1179 mf = (struct i2o_exec_lct_notify *)mb; 1180 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify); 1181 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY); 1182 mf->msgictx = IOP_ICTX; 1183 mf->msgtctx = im->im_tctx; 1184 mf->classid = I2O_CLASS_ANY; 1185 mf->changeindicator = chgind; 1186 1187 #ifdef I2ODEBUG 1188 printf("iop_lct_get0: reading LCT"); 1189 if (chgind != 0) 1190 printf(" (async)"); 1191 printf("\n"); 1192 #endif 1193 1194 iop_msg_map(sc, im, mb, lct, size, 0, NULL); 1195 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0)); 1196 iop_msg_unmap(sc, im); 1197 iop_msg_free(sc, im); 1198 return (rv); 1199 } 1200 1201 /* 1202 * Read the IOP's logical configuration table. 1203 */ 1204 int 1205 iop_lct_get(struct iop_softc *sc) 1206 { 1207 int esize, size, rv; 1208 struct i2o_lct *lct; 1209 1210 esize = le32toh(sc->sc_status.expectedlctsize); 1211 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK); 1212 if (lct == NULL) 1213 return (ENOMEM); 1214 1215 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) { 1216 free(lct, M_DEVBUF); 1217 return (rv); 1218 } 1219 1220 size = le16toh(lct->tablesize) << 2; 1221 if (esize != size) { 1222 free(lct, M_DEVBUF); 1223 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK); 1224 if (lct == NULL) 1225 return (ENOMEM); 1226 1227 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) { 1228 free(lct, M_DEVBUF); 1229 return (rv); 1230 } 1231 } 1232 1233 /* Swap in the new LCT. */ 1234 if (sc->sc_lct != NULL) 1235 free(sc->sc_lct, M_DEVBUF); 1236 sc->sc_lct = lct; 1237 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) - 1238 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) / 1239 sizeof(struct i2o_lct_entry); 1240 return (0); 1241 } 1242 1243 /* 1244 * Post a SYS_ENABLE message to the adapter. 1245 */ 1246 int 1247 iop_sys_enable(struct iop_softc *sc) 1248 { 1249 struct iop_msg *im; 1250 struct i2o_msg mf; 1251 int rv; 1252 1253 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 1254 1255 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1256 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE); 1257 mf.msgictx = IOP_ICTX; 1258 mf.msgtctx = im->im_tctx; 1259 1260 rv = iop_msg_post(sc, im, &mf, 30000); 1261 if (rv == 0) { 1262 if ((im->im_flags & IM_FAIL) != 0) 1263 rv = ENXIO; 1264 else if (im->im_reqstatus == I2O_STATUS_SUCCESS || 1265 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER && 1266 im->im_detstatus == I2O_DSC_INVALID_REQUEST)) 1267 rv = 0; 1268 else 1269 rv = EIO; 1270 } 1271 1272 iop_msg_free(sc, im); 1273 return (rv); 1274 } 1275 1276 /* 1277 * Request the specified parameter group from the target. If an initiator 1278 * is specified (a) don't wait for the operation to complete, but instead 1279 * let the initiator's interrupt handler deal with the reply and (b) place a 1280 * pointer to the parameter group op in the wrapper's `im_dvcontext' field. 1281 */ 1282 int 1283 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf, 1284 int size, struct iop_initiator *ii) 1285 { 1286 struct iop_msg *im; 1287 struct i2o_util_params_op *mf; 1288 int rv; 1289 struct iop_pgop *pgop; 1290 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1291 1292 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS); 1293 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) { 1294 iop_msg_free(sc, im); 1295 return (ENOMEM); 1296 } 1297 im->im_dvcontext = pgop; 1298 1299 mf = (struct i2o_util_params_op *)mb; 1300 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1301 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET); 1302 mf->msgictx = IOP_ICTX; 1303 mf->msgtctx = im->im_tctx; 1304 mf->flags = 0; 1305 1306 pgop->olh.count = htole16(1); 1307 pgop->olh.reserved = htole16(0); 1308 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET); 1309 pgop->oat.fieldcount = htole16(0xffff); 1310 pgop->oat.group = htole16(group); 1311 1312 if (ii == NULL) 1313 PHOLD(curlwp); 1314 1315 memset(buf, 0, size); 1316 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL); 1317 iop_msg_map(sc, im, mb, buf, size, 0, NULL); 1318 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0)); 1319 1320 if (ii == NULL) 1321 PRELE(curlwp); 1322 1323 /* Detect errors; let partial transfers to count as success. */ 1324 if (ii == NULL && rv == 0) { 1325 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER && 1326 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR) 1327 rv = 0; 1328 else 1329 rv = (im->im_reqstatus != 0 ? EIO : 0); 1330 1331 if (rv != 0) 1332 printf("%s: FIELD_GET failed for tid %d group %d\n", 1333 sc->sc_dv.dv_xname, tid, group); 1334 } 1335 1336 if (ii == NULL || rv != 0) { 1337 iop_msg_unmap(sc, im); 1338 iop_msg_free(sc, im); 1339 free(pgop, M_DEVBUF); 1340 } 1341 1342 return (rv); 1343 } 1344 1345 /* 1346 * Set a single field in a scalar parameter group. 1347 */ 1348 int 1349 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf, 1350 int size, int field) 1351 { 1352 struct iop_msg *im; 1353 struct i2o_util_params_op *mf; 1354 struct iop_pgop *pgop; 1355 int rv, totsize; 1356 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1357 1358 totsize = sizeof(*pgop) + size; 1359 1360 im = iop_msg_alloc(sc, IM_WAIT); 1361 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1362 iop_msg_free(sc, im); 1363 return (ENOMEM); 1364 } 1365 1366 mf = (struct i2o_util_params_op *)mb; 1367 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1368 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1369 mf->msgictx = IOP_ICTX; 1370 mf->msgtctx = im->im_tctx; 1371 mf->flags = 0; 1372 1373 pgop->olh.count = htole16(1); 1374 pgop->olh.reserved = htole16(0); 1375 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET); 1376 pgop->oat.fieldcount = htole16(1); 1377 pgop->oat.group = htole16(group); 1378 pgop->oat.fields[0] = htole16(field); 1379 memcpy(pgop + 1, buf, size); 1380 1381 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1382 rv = iop_msg_post(sc, im, mb, 30000); 1383 if (rv != 0) 1384 printf("%s: FIELD_SET failed for tid %d group %d\n", 1385 sc->sc_dv.dv_xname, tid, group); 1386 1387 iop_msg_unmap(sc, im); 1388 iop_msg_free(sc, im); 1389 free(pgop, M_DEVBUF); 1390 return (rv); 1391 } 1392 1393 /* 1394 * Delete all rows in a tablular parameter group. 1395 */ 1396 int 1397 iop_table_clear(struct iop_softc *sc, int tid, int group) 1398 { 1399 struct iop_msg *im; 1400 struct i2o_util_params_op *mf; 1401 struct iop_pgop pgop; 1402 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1403 int rv; 1404 1405 im = iop_msg_alloc(sc, IM_WAIT); 1406 1407 mf = (struct i2o_util_params_op *)mb; 1408 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1409 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1410 mf->msgictx = IOP_ICTX; 1411 mf->msgtctx = im->im_tctx; 1412 mf->flags = 0; 1413 1414 pgop.olh.count = htole16(1); 1415 pgop.olh.reserved = htole16(0); 1416 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR); 1417 pgop.oat.fieldcount = htole16(0); 1418 pgop.oat.group = htole16(group); 1419 pgop.oat.fields[0] = htole16(0); 1420 1421 PHOLD(curlwp); 1422 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL); 1423 rv = iop_msg_post(sc, im, mb, 30000); 1424 if (rv != 0) 1425 printf("%s: TABLE_CLEAR failed for tid %d group %d\n", 1426 sc->sc_dv.dv_xname, tid, group); 1427 1428 iop_msg_unmap(sc, im); 1429 PRELE(curlwp); 1430 iop_msg_free(sc, im); 1431 return (rv); 1432 } 1433 1434 /* 1435 * Add a single row to a tabular parameter group. The row can have only one 1436 * field. 1437 */ 1438 int 1439 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf, 1440 int size, int row) 1441 { 1442 struct iop_msg *im; 1443 struct i2o_util_params_op *mf; 1444 struct iop_pgop *pgop; 1445 int rv, totsize; 1446 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1447 1448 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size; 1449 1450 im = iop_msg_alloc(sc, IM_WAIT); 1451 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) { 1452 iop_msg_free(sc, im); 1453 return (ENOMEM); 1454 } 1455 1456 mf = (struct i2o_util_params_op *)mb; 1457 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1458 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1459 mf->msgictx = IOP_ICTX; 1460 mf->msgtctx = im->im_tctx; 1461 mf->flags = 0; 1462 1463 pgop->olh.count = htole16(1); 1464 pgop->olh.reserved = htole16(0); 1465 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD); 1466 pgop->oat.fieldcount = htole16(1); 1467 pgop->oat.group = htole16(group); 1468 pgop->oat.fields[0] = htole16(0); /* FieldIdx */ 1469 pgop->oat.fields[1] = htole16(1); /* RowCount */ 1470 pgop->oat.fields[2] = htole16(row); /* KeyValue */ 1471 memcpy(&pgop->oat.fields[3], buf, size); 1472 1473 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1474 rv = iop_msg_post(sc, im, mb, 30000); 1475 if (rv != 0) 1476 printf("%s: ADD_ROW failed for tid %d group %d row %d\n", 1477 sc->sc_dv.dv_xname, tid, group, row); 1478 1479 iop_msg_unmap(sc, im); 1480 iop_msg_free(sc, im); 1481 free(pgop, M_DEVBUF); 1482 return (rv); 1483 } 1484 1485 /* 1486 * Execute a simple command (no parameters). 1487 */ 1488 int 1489 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx, 1490 int async, int timo) 1491 { 1492 struct iop_msg *im; 1493 struct i2o_msg mf; 1494 int rv, fl; 1495 1496 fl = (async != 0 ? IM_WAIT : IM_POLL); 1497 im = iop_msg_alloc(sc, fl); 1498 1499 mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1500 mf.msgfunc = I2O_MSGFUNC(tid, function); 1501 mf.msgictx = ictx; 1502 mf.msgtctx = im->im_tctx; 1503 1504 rv = iop_msg_post(sc, im, &mf, timo); 1505 iop_msg_free(sc, im); 1506 return (rv); 1507 } 1508 1509 /* 1510 * Post the system table to the IOP. 1511 */ 1512 static int 1513 iop_systab_set(struct iop_softc *sc) 1514 { 1515 struct i2o_exec_sys_tab_set *mf; 1516 struct iop_msg *im; 1517 bus_space_handle_t bsh; 1518 bus_addr_t boo; 1519 u_int32_t mema[2], ioa[2]; 1520 int rv; 1521 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1522 1523 im = iop_msg_alloc(sc, IM_WAIT); 1524 1525 mf = (struct i2o_exec_sys_tab_set *)mb; 1526 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set); 1527 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET); 1528 mf->msgictx = IOP_ICTX; 1529 mf->msgtctx = im->im_tctx; 1530 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12; 1531 mf->segnumber = 0; 1532 1533 mema[1] = sc->sc_status.desiredprivmemsize; 1534 ioa[1] = sc->sc_status.desiredpriviosize; 1535 1536 if (mema[1] != 0) { 1537 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff, 1538 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh); 1539 mema[0] = htole32(boo); 1540 if (rv != 0) { 1541 printf("%s: can't alloc priv mem space, err = %d\n", 1542 sc->sc_dv.dv_xname, rv); 1543 mema[0] = 0; 1544 mema[1] = 0; 1545 } 1546 } 1547 1548 if (ioa[1] != 0) { 1549 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff, 1550 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh); 1551 ioa[0] = htole32(boo); 1552 if (rv != 0) { 1553 printf("%s: can't alloc priv i/o space, err = %d\n", 1554 sc->sc_dv.dv_xname, rv); 1555 ioa[0] = 0; 1556 ioa[1] = 0; 1557 } 1558 } 1559 1560 PHOLD(curlwp); 1561 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL); 1562 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL); 1563 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL); 1564 rv = iop_msg_post(sc, im, mb, 5000); 1565 iop_msg_unmap(sc, im); 1566 iop_msg_free(sc, im); 1567 PRELE(curlwp); 1568 return (rv); 1569 } 1570 1571 /* 1572 * Reset the IOP. Must be called with interrupts disabled. 1573 */ 1574 static int 1575 iop_reset(struct iop_softc *sc) 1576 { 1577 u_int32_t mfa, *sw; 1578 struct i2o_exec_iop_reset mf; 1579 int rv; 1580 paddr_t pa; 1581 1582 sw = (u_int32_t *)sc->sc_scr; 1583 pa = sc->sc_scr_seg->ds_addr; 1584 1585 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset); 1586 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET); 1587 mf.reserved[0] = 0; 1588 mf.reserved[1] = 0; 1589 mf.reserved[2] = 0; 1590 mf.reserved[3] = 0; 1591 mf.statuslow = (u_int32_t)pa; 1592 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32); 1593 1594 *sw = htole32(0); 1595 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1596 BUS_DMASYNC_PREREAD); 1597 1598 if ((rv = iop_post(sc, (u_int32_t *)&mf))) 1599 return (rv); 1600 1601 POLL(2500, 1602 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1603 BUS_DMASYNC_POSTREAD), *sw != 0)); 1604 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) { 1605 printf("%s: reset rejected, status 0x%x\n", 1606 sc->sc_dv.dv_xname, le32toh(*sw)); 1607 return (EIO); 1608 } 1609 1610 /* 1611 * IOP is now in the INIT state. Wait no more than 10 seconds for 1612 * the inbound queue to become responsive. 1613 */ 1614 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY); 1615 if (mfa == IOP_MFA_EMPTY) { 1616 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 1617 return (EIO); 1618 } 1619 1620 iop_release_mfa(sc, mfa); 1621 return (0); 1622 } 1623 1624 /* 1625 * Register a new initiator. Must be called with the configuration lock 1626 * held. 1627 */ 1628 void 1629 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii) 1630 { 1631 static int ictxgen; 1632 int s; 1633 1634 /* 0 is reserved (by us) for system messages. */ 1635 ii->ii_ictx = ++ictxgen; 1636 1637 /* 1638 * `Utility initiators' don't make it onto the per-IOP initiator list 1639 * (which is used only for configuration), but do get one slot on 1640 * the inbound queue. 1641 */ 1642 if ((ii->ii_flags & II_UTILITY) == 0) { 1643 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list); 1644 sc->sc_nii++; 1645 } else 1646 sc->sc_nuii++; 1647 1648 s = splbio(); 1649 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash); 1650 splx(s); 1651 } 1652 1653 /* 1654 * Unregister an initiator. Must be called with the configuration lock 1655 * held. 1656 */ 1657 void 1658 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii) 1659 { 1660 int s; 1661 1662 if ((ii->ii_flags & II_UTILITY) == 0) { 1663 LIST_REMOVE(ii, ii_list); 1664 sc->sc_nii--; 1665 } else 1666 sc->sc_nuii--; 1667 1668 s = splbio(); 1669 LIST_REMOVE(ii, ii_hash); 1670 splx(s); 1671 } 1672 1673 /* 1674 * Handle a reply frame from the IOP. 1675 */ 1676 static int 1677 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa) 1678 { 1679 struct iop_msg *im; 1680 struct i2o_reply *rb; 1681 struct i2o_fault_notify *fn; 1682 struct iop_initiator *ii; 1683 u_int off, ictx, tctx, status, size; 1684 1685 off = (int)(rmfa - sc->sc_rep_phys); 1686 rb = (struct i2o_reply *)(sc->sc_rep + off); 1687 1688 /* Perform reply queue DMA synchronisation. */ 1689 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, 1690 sc->sc_framesize, BUS_DMASYNC_POSTREAD); 1691 if (--sc->sc_curib != 0) 1692 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 1693 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD); 1694 1695 #ifdef I2ODEBUG 1696 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0) 1697 panic("iop_handle_reply: 64-bit reply"); 1698 #endif 1699 /* 1700 * Find the initiator. 1701 */ 1702 ictx = le32toh(rb->msgictx); 1703 if (ictx == IOP_ICTX) 1704 ii = NULL; 1705 else { 1706 ii = LIST_FIRST(IOP_ICTXHASH(ictx)); 1707 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash)) 1708 if (ii->ii_ictx == ictx) 1709 break; 1710 if (ii == NULL) { 1711 #ifdef I2ODEBUG 1712 iop_reply_print(sc, rb); 1713 #endif 1714 printf("%s: WARNING: bad ictx returned (%x)\n", 1715 sc->sc_dv.dv_xname, ictx); 1716 return (-1); 1717 } 1718 } 1719 1720 /* 1721 * If we received a transport failure notice, we've got to dig the 1722 * transaction context (if any) out of the original message frame, 1723 * and then release the original MFA back to the inbound FIFO. 1724 */ 1725 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 1726 status = I2O_STATUS_SUCCESS; 1727 1728 fn = (struct i2o_fault_notify *)rb; 1729 tctx = iop_inl_msg(sc, fn->lowmfa + 12); 1730 iop_release_mfa(sc, fn->lowmfa); 1731 iop_tfn_print(sc, fn); 1732 } else { 1733 status = rb->reqstatus; 1734 tctx = le32toh(rb->msgtctx); 1735 } 1736 1737 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) { 1738 /* 1739 * This initiator tracks state using message wrappers. 1740 * 1741 * Find the originating message wrapper, and if requested 1742 * notify the initiator. 1743 */ 1744 im = sc->sc_ims + (tctx & IOP_TCTX_MASK); 1745 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib || 1746 (im->im_flags & IM_ALLOCED) == 0 || 1747 tctx != im->im_tctx) { 1748 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n", 1749 sc->sc_dv.dv_xname, tctx, im); 1750 if (im != NULL) 1751 printf("%s: flags=0x%08x tctx=0x%08x\n", 1752 sc->sc_dv.dv_xname, im->im_flags, 1753 im->im_tctx); 1754 #ifdef I2ODEBUG 1755 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0) 1756 iop_reply_print(sc, rb); 1757 #endif 1758 return (-1); 1759 } 1760 1761 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1762 im->im_flags |= IM_FAIL; 1763 1764 #ifdef I2ODEBUG 1765 if ((im->im_flags & IM_REPLIED) != 0) 1766 panic("%s: dup reply", sc->sc_dv.dv_xname); 1767 #endif 1768 im->im_flags |= IM_REPLIED; 1769 1770 #ifdef I2ODEBUG 1771 if (status != I2O_STATUS_SUCCESS) 1772 iop_reply_print(sc, rb); 1773 #endif 1774 im->im_reqstatus = status; 1775 im->im_detstatus = le16toh(rb->detail); 1776 1777 /* Copy the reply frame, if requested. */ 1778 if (im->im_rb != NULL) { 1779 size = (le32toh(rb->msgflags) >> 14) & ~3; 1780 #ifdef I2ODEBUG 1781 if (size > sc->sc_framesize) 1782 panic("iop_handle_reply: reply too large"); 1783 #endif 1784 memcpy(im->im_rb, rb, size); 1785 } 1786 1787 /* Notify the initiator. */ 1788 if ((im->im_flags & IM_WAIT) != 0) 1789 wakeup(im); 1790 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) 1791 (*ii->ii_intr)(ii->ii_dv, im, rb); 1792 } else { 1793 /* 1794 * This initiator discards message wrappers. 1795 * 1796 * Simply pass the reply frame to the initiator. 1797 */ 1798 (*ii->ii_intr)(ii->ii_dv, NULL, rb); 1799 } 1800 1801 return (status); 1802 } 1803 1804 /* 1805 * Handle an interrupt from the IOP. 1806 */ 1807 int 1808 iop_intr(void *arg) 1809 { 1810 struct iop_softc *sc; 1811 u_int32_t rmfa; 1812 1813 sc = arg; 1814 1815 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) 1816 return (0); 1817 1818 for (;;) { 1819 /* Double read to account for IOP bug. */ 1820 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) { 1821 rmfa = iop_inl(sc, IOP_REG_OFIFO); 1822 if (rmfa == IOP_MFA_EMPTY) 1823 break; 1824 } 1825 iop_handle_reply(sc, rmfa); 1826 iop_outl(sc, IOP_REG_OFIFO, rmfa); 1827 } 1828 1829 return (1); 1830 } 1831 1832 /* 1833 * Handle an event signalled by the executive. 1834 */ 1835 static void 1836 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply) 1837 { 1838 struct i2o_util_event_register_reply *rb; 1839 u_int event; 1840 1841 rb = reply; 1842 1843 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1844 return; 1845 1846 event = le32toh(rb->event); 1847 printf("%s: event 0x%08x received\n", dv->dv_xname, event); 1848 } 1849 1850 /* 1851 * Allocate a message wrapper. 1852 */ 1853 struct iop_msg * 1854 iop_msg_alloc(struct iop_softc *sc, int flags) 1855 { 1856 struct iop_msg *im; 1857 static u_int tctxgen; 1858 int s, i; 1859 1860 #ifdef I2ODEBUG 1861 if ((flags & IM_SYSMASK) != 0) 1862 panic("iop_msg_alloc: system flags specified"); 1863 #endif 1864 1865 s = splbio(); 1866 im = SLIST_FIRST(&sc->sc_im_freelist); 1867 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 1868 if (im == NULL) 1869 panic("iop_msg_alloc: no free wrappers"); 1870 #endif 1871 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain); 1872 splx(s); 1873 1874 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen; 1875 tctxgen += (1 << IOP_TCTX_SHIFT); 1876 im->im_flags = flags | IM_ALLOCED; 1877 im->im_rb = NULL; 1878 i = 0; 1879 do { 1880 im->im_xfer[i++].ix_size = 0; 1881 } while (i < IOP_MAX_MSG_XFERS); 1882 1883 return (im); 1884 } 1885 1886 /* 1887 * Free a message wrapper. 1888 */ 1889 void 1890 iop_msg_free(struct iop_softc *sc, struct iop_msg *im) 1891 { 1892 int s; 1893 1894 #ifdef I2ODEBUG 1895 if ((im->im_flags & IM_ALLOCED) == 0) 1896 panic("iop_msg_free: wrapper not allocated"); 1897 #endif 1898 1899 im->im_flags = 0; 1900 s = splbio(); 1901 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 1902 splx(s); 1903 } 1904 1905 /* 1906 * Map a data transfer. Write a scatter-gather list into the message frame. 1907 */ 1908 int 1909 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1910 void *xferaddr, int xfersize, int out, struct proc *up) 1911 { 1912 bus_dmamap_t dm; 1913 bus_dma_segment_t *ds; 1914 struct iop_xfer *ix; 1915 u_int rv, i, nsegs, flg, off, xn; 1916 u_int32_t *p; 1917 1918 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++) 1919 if (ix->ix_size == 0) 1920 break; 1921 1922 #ifdef I2ODEBUG 1923 if (xfersize == 0) 1924 panic("iop_msg_map: null transfer"); 1925 if (xfersize > IOP_MAX_XFER) 1926 panic("iop_msg_map: transfer too large"); 1927 if (xn == IOP_MAX_MSG_XFERS) 1928 panic("iop_msg_map: too many xfers"); 1929 #endif 1930 1931 /* 1932 * Only the first DMA map is static. 1933 */ 1934 if (xn != 0) { 1935 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 1936 IOP_MAX_SEGS, IOP_MAX_XFER, 0, 1937 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map); 1938 if (rv != 0) 1939 return (rv); 1940 } 1941 1942 dm = ix->ix_map; 1943 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up, 1944 (up == NULL ? BUS_DMA_NOWAIT : 0)); 1945 if (rv != 0) 1946 goto bad; 1947 1948 /* 1949 * How many SIMPLE SG elements can we fit in this message? 1950 */ 1951 off = mb[0] >> 16; 1952 p = mb + off; 1953 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1954 1955 if (dm->dm_nsegs > nsegs) { 1956 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 1957 rv = EFBIG; 1958 DPRINTF(("iop_msg_map: too many segs\n")); 1959 goto bad; 1960 } 1961 1962 nsegs = dm->dm_nsegs; 1963 xfersize = 0; 1964 1965 /* 1966 * Write out the SG list. 1967 */ 1968 if (out) 1969 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 1970 else 1971 flg = I2O_SGL_SIMPLE; 1972 1973 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 1974 p[0] = (u_int32_t)ds->ds_len | flg; 1975 p[1] = (u_int32_t)ds->ds_addr; 1976 xfersize += ds->ds_len; 1977 } 1978 1979 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER; 1980 p[1] = (u_int32_t)ds->ds_addr; 1981 xfersize += ds->ds_len; 1982 1983 /* Fix up the transfer record, and sync the map. */ 1984 ix->ix_flags = (out ? IX_OUT : IX_IN); 1985 ix->ix_size = xfersize; 1986 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 1987 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 1988 1989 /* 1990 * If this is the first xfer we've mapped for this message, adjust 1991 * the SGL offset field in the message header. 1992 */ 1993 if ((im->im_flags & IM_SGLOFFADJ) == 0) { 1994 mb[0] += (mb[0] >> 12) & 0xf0; 1995 im->im_flags |= IM_SGLOFFADJ; 1996 } 1997 mb[0] += (nsegs << 17); 1998 return (0); 1999 2000 bad: 2001 if (xn != 0) 2002 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2003 return (rv); 2004 } 2005 2006 /* 2007 * Map a block I/O data transfer (different in that there's only one per 2008 * message maximum, and PAGE addressing may be used). Write a scatter 2009 * gather list into the message frame. 2010 */ 2011 int 2012 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 2013 void *xferaddr, int xfersize, int out) 2014 { 2015 bus_dma_segment_t *ds; 2016 bus_dmamap_t dm; 2017 struct iop_xfer *ix; 2018 u_int rv, i, nsegs, off, slen, tlen, flg; 2019 paddr_t saddr, eaddr; 2020 u_int32_t *p; 2021 2022 #ifdef I2ODEBUG 2023 if (xfersize == 0) 2024 panic("iop_msg_map_bio: null transfer"); 2025 if (xfersize > IOP_MAX_XFER) 2026 panic("iop_msg_map_bio: transfer too large"); 2027 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2028 panic("iop_msg_map_bio: SGLOFFADJ"); 2029 #endif 2030 2031 ix = im->im_xfer; 2032 dm = ix->ix_map; 2033 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 2034 BUS_DMA_NOWAIT | BUS_DMA_STREAMING); 2035 if (rv != 0) 2036 return (rv); 2037 2038 off = mb[0] >> 16; 2039 nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 2040 2041 /* 2042 * If the transfer is highly fragmented and won't fit using SIMPLE 2043 * elements, use PAGE_LIST elements instead. SIMPLE elements are 2044 * potentially more efficient, both for us and the IOP. 2045 */ 2046 if (dm->dm_nsegs > nsegs) { 2047 nsegs = 1; 2048 p = mb + off + 1; 2049 2050 /* XXX This should be done with a bus_space flag. */ 2051 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) { 2052 slen = ds->ds_len; 2053 saddr = ds->ds_addr; 2054 2055 while (slen > 0) { 2056 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 2057 tlen = min(eaddr - saddr, slen); 2058 slen -= tlen; 2059 *p++ = le32toh(saddr); 2060 saddr = eaddr; 2061 nsegs++; 2062 } 2063 } 2064 2065 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER | 2066 I2O_SGL_END; 2067 if (out) 2068 mb[off] |= I2O_SGL_DATA_OUT; 2069 } else { 2070 p = mb + off; 2071 nsegs = dm->dm_nsegs; 2072 2073 if (out) 2074 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 2075 else 2076 flg = I2O_SGL_SIMPLE; 2077 2078 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 2079 p[0] = (u_int32_t)ds->ds_len | flg; 2080 p[1] = (u_int32_t)ds->ds_addr; 2081 } 2082 2083 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER | 2084 I2O_SGL_END; 2085 p[1] = (u_int32_t)ds->ds_addr; 2086 nsegs <<= 1; 2087 } 2088 2089 /* Fix up the transfer record, and sync the map. */ 2090 ix->ix_flags = (out ? IX_OUT : IX_IN); 2091 ix->ix_size = xfersize; 2092 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 2093 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 2094 2095 /* 2096 * Adjust the SGL offset and total message size fields. We don't 2097 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements. 2098 */ 2099 mb[0] += ((off << 4) + (nsegs << 16)); 2100 return (0); 2101 } 2102 2103 /* 2104 * Unmap all data transfers associated with a message wrapper. 2105 */ 2106 void 2107 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im) 2108 { 2109 struct iop_xfer *ix; 2110 int i; 2111 2112 #ifdef I2ODEBUG 2113 if (im->im_xfer[0].ix_size == 0) 2114 panic("iop_msg_unmap: no transfers mapped"); 2115 #endif 2116 2117 for (ix = im->im_xfer, i = 0;;) { 2118 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size, 2119 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE : 2120 BUS_DMASYNC_POSTREAD); 2121 bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 2122 2123 /* Only the first DMA map is static. */ 2124 if (i != 0) 2125 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2126 if ((++ix)->ix_size == 0) 2127 break; 2128 if (++i >= IOP_MAX_MSG_XFERS) 2129 break; 2130 } 2131 } 2132 2133 /* 2134 * Post a message frame to the IOP's inbound queue. 2135 */ 2136 int 2137 iop_post(struct iop_softc *sc, u_int32_t *mb) 2138 { 2139 u_int32_t mfa; 2140 int s; 2141 2142 #ifdef I2ODEBUG 2143 if ((mb[0] >> 16) > (sc->sc_framesize >> 2)) 2144 panic("iop_post: frame too large"); 2145 #endif 2146 2147 s = splbio(); 2148 2149 /* Allocate a slot with the IOP. */ 2150 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) 2151 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) { 2152 splx(s); 2153 printf("%s: mfa not forthcoming\n", 2154 sc->sc_dv.dv_xname); 2155 return (EAGAIN); 2156 } 2157 2158 /* Perform reply buffer DMA synchronisation. */ 2159 if (sc->sc_curib++ == 0) 2160 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0, 2161 sc->sc_rep_size, BUS_DMASYNC_PREREAD); 2162 2163 /* Copy out the message frame. */ 2164 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb, 2165 mb[0] >> 16); 2166 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, 2167 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE); 2168 2169 /* Post the MFA back to the IOP. */ 2170 iop_outl(sc, IOP_REG_IFIFO, mfa); 2171 2172 splx(s); 2173 return (0); 2174 } 2175 2176 /* 2177 * Post a message to the IOP and deal with completion. 2178 */ 2179 int 2180 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo) 2181 { 2182 u_int32_t *mb; 2183 int rv, s; 2184 2185 mb = xmb; 2186 2187 /* Terminate the scatter/gather list chain. */ 2188 if ((im->im_flags & IM_SGLOFFADJ) != 0) 2189 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END; 2190 2191 if ((rv = iop_post(sc, mb)) != 0) 2192 return (rv); 2193 2194 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) { 2195 if ((im->im_flags & IM_POLL) != 0) 2196 iop_msg_poll(sc, im, timo); 2197 else 2198 iop_msg_wait(sc, im, timo); 2199 2200 s = splbio(); 2201 if ((im->im_flags & IM_REPLIED) != 0) { 2202 if ((im->im_flags & IM_NOSTATUS) != 0) 2203 rv = 0; 2204 else if ((im->im_flags & IM_FAIL) != 0) 2205 rv = ENXIO; 2206 else if (im->im_reqstatus != I2O_STATUS_SUCCESS) 2207 rv = EIO; 2208 else 2209 rv = 0; 2210 } else 2211 rv = EBUSY; 2212 splx(s); 2213 } else 2214 rv = 0; 2215 2216 return (rv); 2217 } 2218 2219 /* 2220 * Spin until the specified message is replied to. 2221 */ 2222 static void 2223 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo) 2224 { 2225 u_int32_t rmfa; 2226 int s; 2227 2228 s = splbio(); 2229 2230 /* Wait for completion. */ 2231 for (timo *= 10; timo != 0; timo--) { 2232 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) { 2233 /* Double read to account for IOP bug. */ 2234 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2235 if (rmfa == IOP_MFA_EMPTY) 2236 rmfa = iop_inl(sc, IOP_REG_OFIFO); 2237 if (rmfa != IOP_MFA_EMPTY) { 2238 iop_handle_reply(sc, rmfa); 2239 2240 /* 2241 * Return the reply frame to the IOP's 2242 * outbound FIFO. 2243 */ 2244 iop_outl(sc, IOP_REG_OFIFO, rmfa); 2245 } 2246 } 2247 if ((im->im_flags & IM_REPLIED) != 0) 2248 break; 2249 DELAY(100); 2250 } 2251 2252 if (timo == 0) { 2253 #ifdef I2ODEBUG 2254 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname); 2255 if (iop_status_get(sc, 1) != 0) 2256 printf("iop_msg_poll: unable to retrieve status\n"); 2257 else 2258 printf("iop_msg_poll: IOP state = %d\n", 2259 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2260 #endif 2261 } 2262 2263 splx(s); 2264 } 2265 2266 /* 2267 * Sleep until the specified message is replied to. 2268 */ 2269 static void 2270 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo) 2271 { 2272 int s, rv; 2273 2274 s = splbio(); 2275 if ((im->im_flags & IM_REPLIED) != 0) { 2276 splx(s); 2277 return; 2278 } 2279 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo)); 2280 splx(s); 2281 2282 #ifdef I2ODEBUG 2283 if (rv != 0) { 2284 printf("iop_msg_wait: tsleep() == %d\n", rv); 2285 if (iop_status_get(sc, 0) != 0) 2286 printf("iop_msg_wait: unable to retrieve status\n"); 2287 else 2288 printf("iop_msg_wait: IOP state = %d\n", 2289 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2290 } 2291 #endif 2292 } 2293 2294 /* 2295 * Release an unused message frame back to the IOP's inbound fifo. 2296 */ 2297 static void 2298 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa) 2299 { 2300 2301 /* Use the frame to issue a no-op. */ 2302 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16)); 2303 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP)); 2304 iop_outl_msg(sc, mfa + 8, 0); 2305 iop_outl_msg(sc, mfa + 12, 0); 2306 2307 iop_outl(sc, IOP_REG_IFIFO, mfa); 2308 } 2309 2310 #ifdef I2ODEBUG 2311 /* 2312 * Dump a reply frame header. 2313 */ 2314 static void 2315 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb) 2316 { 2317 u_int function, detail; 2318 #ifdef I2OVERBOSE 2319 const char *statusstr; 2320 #endif 2321 2322 function = (le32toh(rb->msgfunc) >> 24) & 0xff; 2323 detail = le16toh(rb->detail); 2324 2325 printf("%s: reply:\n", sc->sc_dv.dv_xname); 2326 2327 #ifdef I2OVERBOSE 2328 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0])) 2329 statusstr = iop_status[rb->reqstatus]; 2330 else 2331 statusstr = "undefined error code"; 2332 2333 printf("%s: function=0x%02x status=0x%02x (%s)\n", 2334 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr); 2335 #else 2336 printf("%s: function=0x%02x status=0x%02x\n", 2337 sc->sc_dv.dv_xname, function, rb->reqstatus); 2338 #endif 2339 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n", 2340 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx), 2341 le32toh(rb->msgtctx)); 2342 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname, 2343 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095, 2344 (le32toh(rb->msgflags) >> 8) & 0xff); 2345 } 2346 #endif 2347 2348 /* 2349 * Dump a transport failure reply. 2350 */ 2351 static void 2352 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn) 2353 { 2354 2355 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname); 2356 2357 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname, 2358 le32toh(fn->msgictx), le32toh(fn->msgtctx)); 2359 printf("%s: failurecode=0x%02x severity=0x%02x\n", 2360 sc->sc_dv.dv_xname, fn->failurecode, fn->severity); 2361 printf("%s: highestver=0x%02x lowestver=0x%02x\n", 2362 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver); 2363 } 2364 2365 /* 2366 * Translate an I2O ASCII field into a C string. 2367 */ 2368 void 2369 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen) 2370 { 2371 int hc, lc, i, nit; 2372 2373 dlen--; 2374 lc = 0; 2375 hc = 0; 2376 i = 0; 2377 2378 /* 2379 * DPT use NUL as a space, whereas AMI use it as a terminator. The 2380 * spec has nothing to say about it. Since AMI fields are usually 2381 * filled with junk after the terminator, ... 2382 */ 2383 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT); 2384 2385 while (slen-- != 0 && dlen-- != 0) { 2386 if (nit && *src == '\0') 2387 break; 2388 else if (*src <= 0x20 || *src >= 0x7f) { 2389 if (hc) 2390 dst[i++] = ' '; 2391 } else { 2392 hc = 1; 2393 dst[i++] = *src; 2394 lc = i; 2395 } 2396 src++; 2397 } 2398 2399 dst[lc] = '\0'; 2400 } 2401 2402 /* 2403 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it. 2404 */ 2405 int 2406 iop_print_ident(struct iop_softc *sc, int tid) 2407 { 2408 struct { 2409 struct i2o_param_op_results pr; 2410 struct i2o_param_read_results prr; 2411 struct i2o_param_device_identity di; 2412 } __attribute__ ((__packed__)) p; 2413 char buf[32]; 2414 int rv; 2415 2416 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p, 2417 sizeof(p), NULL); 2418 if (rv != 0) 2419 return (rv); 2420 2421 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf, 2422 sizeof(buf)); 2423 printf(" <%s, ", buf); 2424 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf, 2425 sizeof(buf)); 2426 printf("%s, ", buf); 2427 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf)); 2428 printf("%s>", buf); 2429 2430 return (0); 2431 } 2432 2433 /* 2434 * Claim or unclaim the specified TID. 2435 */ 2436 int 2437 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release, 2438 int flags) 2439 { 2440 struct iop_msg *im; 2441 struct i2o_util_claim mf; 2442 int rv, func; 2443 2444 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM; 2445 im = iop_msg_alloc(sc, IM_WAIT); 2446 2447 /* We can use the same structure, as they're identical. */ 2448 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim); 2449 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func); 2450 mf.msgictx = ii->ii_ictx; 2451 mf.msgtctx = im->im_tctx; 2452 mf.flags = flags; 2453 2454 rv = iop_msg_post(sc, im, &mf, 5000); 2455 iop_msg_free(sc, im); 2456 return (rv); 2457 } 2458 2459 /* 2460 * Perform an abort. 2461 */ 2462 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func, 2463 int tctxabort, int flags) 2464 { 2465 struct iop_msg *im; 2466 struct i2o_util_abort mf; 2467 int rv; 2468 2469 im = iop_msg_alloc(sc, IM_WAIT); 2470 2471 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort); 2472 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT); 2473 mf.msgictx = ii->ii_ictx; 2474 mf.msgtctx = im->im_tctx; 2475 mf.flags = (func << 24) | flags; 2476 mf.tctxabort = tctxabort; 2477 2478 rv = iop_msg_post(sc, im, &mf, 5000); 2479 iop_msg_free(sc, im); 2480 return (rv); 2481 } 2482 2483 /* 2484 * Enable or disable reception of events for the specified device. 2485 */ 2486 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask) 2487 { 2488 struct i2o_util_event_register mf; 2489 2490 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register); 2491 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER); 2492 mf.msgictx = ii->ii_ictx; 2493 mf.msgtctx = 0; 2494 mf.eventmask = mask; 2495 2496 /* This message is replied to only when events are signalled. */ 2497 return (iop_post(sc, (u_int32_t *)&mf)); 2498 } 2499 2500 int 2501 iopopen(dev_t dev, int flag, int mode, struct proc *p) 2502 { 2503 struct iop_softc *sc; 2504 2505 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL) 2506 return (ENXIO); 2507 if ((sc->sc_flags & IOP_ONLINE) == 0) 2508 return (ENXIO); 2509 if ((sc->sc_flags & IOP_OPEN) != 0) 2510 return (EBUSY); 2511 sc->sc_flags |= IOP_OPEN; 2512 2513 return (0); 2514 } 2515 2516 int 2517 iopclose(dev_t dev, int flag, int mode, struct proc *p) 2518 { 2519 struct iop_softc *sc; 2520 2521 sc = device_lookup(&iop_cd, minor(dev)); 2522 sc->sc_flags &= ~IOP_OPEN; 2523 2524 return (0); 2525 } 2526 2527 int 2528 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 2529 { 2530 struct iop_softc *sc; 2531 struct iovec *iov; 2532 int rv, i; 2533 2534 if (securelevel >= 2) 2535 return (EPERM); 2536 2537 sc = device_lookup(&iop_cd, minor(dev)); 2538 2539 switch (cmd) { 2540 case IOPIOCPT: 2541 return (iop_passthrough(sc, (struct ioppt *)data, p)); 2542 2543 case IOPIOCGSTATUS: 2544 iov = (struct iovec *)data; 2545 i = sizeof(struct i2o_status); 2546 if (i > iov->iov_len) 2547 i = iov->iov_len; 2548 else 2549 iov->iov_len = i; 2550 if ((rv = iop_status_get(sc, 0)) == 0) 2551 rv = copyout(&sc->sc_status, iov->iov_base, i); 2552 return (rv); 2553 2554 case IOPIOCGLCT: 2555 case IOPIOCGTIDMAP: 2556 case IOPIOCRECONFIG: 2557 break; 2558 2559 default: 2560 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 2561 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd); 2562 #endif 2563 return (ENOTTY); 2564 } 2565 2566 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0) 2567 return (rv); 2568 2569 switch (cmd) { 2570 case IOPIOCGLCT: 2571 iov = (struct iovec *)data; 2572 i = le16toh(sc->sc_lct->tablesize) << 2; 2573 if (i > iov->iov_len) 2574 i = iov->iov_len; 2575 else 2576 iov->iov_len = i; 2577 rv = copyout(sc->sc_lct, iov->iov_base, i); 2578 break; 2579 2580 case IOPIOCRECONFIG: 2581 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0) 2582 rv = iop_reconfigure(sc, 0); 2583 break; 2584 2585 case IOPIOCGTIDMAP: 2586 iov = (struct iovec *)data; 2587 i = sizeof(struct iop_tidmap) * sc->sc_nlctent; 2588 if (i > iov->iov_len) 2589 i = iov->iov_len; 2590 else 2591 iov->iov_len = i; 2592 rv = copyout(sc->sc_tidmap, iov->iov_base, i); 2593 break; 2594 } 2595 2596 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL); 2597 return (rv); 2598 } 2599 2600 static int 2601 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p) 2602 { 2603 struct iop_msg *im; 2604 struct i2o_msg *mf; 2605 struct ioppt_buf *ptb; 2606 int rv, i, mapped; 2607 2608 mf = NULL; 2609 im = NULL; 2610 mapped = 1; 2611 2612 if (pt->pt_msglen > sc->sc_framesize || 2613 pt->pt_msglen < sizeof(struct i2o_msg) || 2614 pt->pt_nbufs > IOP_MAX_MSG_XFERS || 2615 pt->pt_nbufs < 0 || pt->pt_replylen < 0 || 2616 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000) 2617 return (EINVAL); 2618 2619 for (i = 0; i < pt->pt_nbufs; i++) 2620 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) { 2621 rv = ENOMEM; 2622 goto bad; 2623 } 2624 2625 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK); 2626 if (mf == NULL) 2627 return (ENOMEM); 2628 2629 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0) 2630 goto bad; 2631 2632 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 2633 im->im_rb = (struct i2o_reply *)mf; 2634 mf->msgictx = IOP_ICTX; 2635 mf->msgtctx = im->im_tctx; 2636 2637 for (i = 0; i < pt->pt_nbufs; i++) { 2638 ptb = &pt->pt_bufs[i]; 2639 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data, 2640 ptb->ptb_datalen, ptb->ptb_out != 0, p); 2641 if (rv != 0) 2642 goto bad; 2643 mapped = 1; 2644 } 2645 2646 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0) 2647 goto bad; 2648 2649 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3; 2650 if (i > sc->sc_framesize) 2651 i = sc->sc_framesize; 2652 if (i > pt->pt_replylen) 2653 i = pt->pt_replylen; 2654 rv = copyout(im->im_rb, pt->pt_reply, i); 2655 2656 bad: 2657 if (mapped != 0) 2658 iop_msg_unmap(sc, im); 2659 if (im != NULL) 2660 iop_msg_free(sc, im); 2661 if (mf != NULL) 2662 free(mf, M_DEVBUF); 2663 return (rv); 2664 } 2665