1 /* $NetBSD: iopsp.c,v 1.30 2007/12/01 18:12:37 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Raw SCSI device support for I2O. IOPs present SCSI devices individually; 41 * we group them by controlling port. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: iopsp.c,v 1.30 2007/12/01 18:12:37 ad Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/device.h> 51 #include <sys/queue.h> 52 #include <sys/proc.h> 53 #include <sys/buf.h> 54 #include <sys/endian.h> 55 #include <sys/malloc.h> 56 #include <sys/scsiio.h> 57 58 #include <sys/bswap.h> 59 #include <sys/bus.h> 60 61 #include <dev/scsipi/scsi_all.h> 62 #include <dev/scsipi/scsi_disk.h> 63 #include <dev/scsipi/scsipi_all.h> 64 #include <dev/scsipi/scsiconf.h> 65 #include <dev/scsipi/scsi_message.h> 66 67 #include <dev/i2o/i2o.h> 68 #include <dev/i2o/iopio.h> 69 #include <dev/i2o/iopvar.h> 70 #include <dev/i2o/iopspvar.h> 71 72 static void iopsp_adjqparam(struct device *, int); 73 static void iopsp_attach(struct device *, struct device *, void *); 74 static void iopsp_intr(struct device *, struct iop_msg *, void *); 75 static int iopsp_ioctl(struct scsipi_channel *, u_long, 76 void *, int, struct proc *); 77 static int iopsp_match(struct device *, struct cfdata *, void *); 78 static int iopsp_rescan(struct iopsp_softc *); 79 static int iopsp_reconfig(struct device *); 80 static void iopsp_scsipi_request(struct scsipi_channel *, 81 scsipi_adapter_req_t, void *); 82 83 CFATTACH_DECL(iopsp, sizeof(struct iopsp_softc), 84 iopsp_match, iopsp_attach, NULL, NULL); 85 86 /* 87 * Match a supported device. 88 */ 89 static int 90 iopsp_match(struct device *parent, struct cfdata *match, void *aux) 91 { 92 struct iop_attach_args *ia; 93 struct { 94 struct i2o_param_op_results pr; 95 struct i2o_param_read_results prr; 96 struct i2o_param_hba_ctlr_info ci; 97 } __attribute__ ((__packed__)) param; 98 99 ia = aux; 100 101 if (ia->ia_class != I2O_CLASS_BUS_ADAPTER_PORT) 102 return (0); 103 104 if (iop_field_get_all((struct iop_softc *)parent, ia->ia_tid, 105 I2O_PARAM_HBA_CTLR_INFO, ¶m, sizeof(param), NULL) != 0) 106 return (0); 107 108 return (param.ci.bustype == I2O_HBA_BUS_SCSI || 109 param.ci.bustype == I2O_HBA_BUS_FCA); 110 } 111 112 /* 113 * Attach a supported device. 114 */ 115 static void 116 iopsp_attach(struct device *parent, struct device *self, void *aux) 117 { 118 struct iop_attach_args *ia; 119 struct iopsp_softc *sc; 120 struct iop_softc *iop; 121 struct { 122 struct i2o_param_op_results pr; 123 struct i2o_param_read_results prr; 124 union { 125 struct i2o_param_hba_ctlr_info ci; 126 struct i2o_param_hba_scsi_ctlr_info sci; 127 struct i2o_param_hba_scsi_port_info spi; 128 } p; 129 } __attribute__ ((__packed__)) param; 130 int fc, rv; 131 int size; 132 133 ia = (struct iop_attach_args *)aux; 134 sc = device_private(self); 135 iop = device_private(parent); 136 137 /* Register us as an initiator. */ 138 sc->sc_ii.ii_dv = self; 139 sc->sc_ii.ii_intr = iopsp_intr; 140 sc->sc_ii.ii_flags = 0; 141 sc->sc_ii.ii_tid = ia->ia_tid; 142 sc->sc_ii.ii_reconfig = iopsp_reconfig; 143 sc->sc_ii.ii_adjqparam = iopsp_adjqparam; 144 iop_initiator_register(iop, &sc->sc_ii); 145 146 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO, 147 ¶m, sizeof(param), NULL); 148 if (rv != 0) 149 goto bad; 150 151 fc = (param.p.ci.bustype == I2O_HBA_BUS_FCA); 152 153 /* 154 * Say what the device is. If we can find out what the controling 155 * device is, say what that is too. 156 */ 157 aprint_normal(": SCSI port"); 158 iop_print_ident(iop, ia->ia_tid); 159 aprint_normal("\n"); 160 161 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_SCSI_CTLR_INFO, 162 ¶m, sizeof(param), NULL); 163 if (rv != 0) 164 goto bad; 165 166 aprint_normal("%s: ", sc->sc_dv.dv_xname); 167 if (fc) 168 aprint_normal("FC"); 169 else 170 aprint_normal("%d-bit", param.p.sci.maxdatawidth); 171 aprint_normal(", max sync rate %dMHz, initiator ID %d\n", 172 (u_int32_t)le64toh(param.p.sci.maxsyncrate) / 1000, 173 le32toh(param.p.sci.initiatorid)); 174 175 sc->sc_openings = 1; 176 177 sc->sc_adapter.adapt_dev = &sc->sc_dv; 178 sc->sc_adapter.adapt_nchannels = 1; 179 sc->sc_adapter.adapt_openings = 1; 180 sc->sc_adapter.adapt_max_periph = 1; 181 sc->sc_adapter.adapt_ioctl = iopsp_ioctl; 182 sc->sc_adapter.adapt_minphys = minphys; 183 sc->sc_adapter.adapt_request = iopsp_scsipi_request; 184 185 memset(&sc->sc_channel, 0, sizeof(sc->sc_channel)); 186 sc->sc_channel.chan_adapter = &sc->sc_adapter; 187 sc->sc_channel.chan_bustype = &scsi_bustype; 188 sc->sc_channel.chan_channel = 0; 189 sc->sc_channel.chan_ntargets = fc ? 190 IOPSP_MAX_FC_TARGET : param.p.sci.maxdatawidth; 191 sc->sc_channel.chan_nluns = IOPSP_MAX_LUN; 192 sc->sc_channel.chan_id = le32toh(param.p.sci.initiatorid); 193 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE; 194 195 /* 196 * Allocate the target map. Currently used for informational 197 * purposes only. 198 */ 199 size = sc->sc_channel.chan_ntargets * sizeof(struct iopsp_target); 200 sc->sc_targetmap = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO); 201 202 /* Build the two maps, and attach to scsipi. */ 203 if (iopsp_reconfig(self) != 0) { 204 aprint_error("%s: configure failed\n", sc->sc_dv.dv_xname); 205 goto bad; 206 } 207 config_found(self, &sc->sc_channel, scsiprint); 208 return; 209 210 bad: 211 iop_initiator_unregister(iop, &sc->sc_ii); 212 } 213 214 /* 215 * Scan the LCT to determine which devices we control, and enter them into 216 * the maps. 217 */ 218 static int 219 iopsp_reconfig(struct device *dv) 220 { 221 struct iopsp_softc *sc; 222 struct iop_softc *iop; 223 struct i2o_lct_entry *le; 224 struct scsipi_channel *sc_chan; 225 struct { 226 struct i2o_param_op_results pr; 227 struct i2o_param_read_results prr; 228 struct i2o_param_scsi_device_info sdi; 229 } __attribute__ ((__packed__)) param; 230 u_int tid, nent, i, targ, lun, size, rv, bptid; 231 u_short *tidmap; 232 void *tofree; 233 struct iopsp_target *it; 234 int syncrate; 235 236 sc = (struct iopsp_softc *)dv; 237 iop = (struct iop_softc *)device_parent(&sc->sc_dv); 238 sc_chan = &sc->sc_channel; 239 240 KASSERT(mutex_owned(&iop->sc_conflock)); 241 242 /* Anything to do? */ 243 if (iop->sc_chgind == sc->sc_chgind) 244 return (0); 245 246 /* 247 * Allocate memory for the target/LUN -> TID map. Use zero to 248 * denote absent targets (zero is the TID of the I2O executive, 249 * and we never address that here). 250 */ 251 size = sc_chan->chan_ntargets * (IOPSP_MAX_LUN) * sizeof(u_short); 252 if ((tidmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) 253 return (ENOMEM); 254 255 for (i = 0; i < sc_chan->chan_ntargets; i++) 256 sc->sc_targetmap[i].it_flags &= ~IT_PRESENT; 257 258 /* 259 * A quick hack to handle Intel's stacked bus port arrangement. 260 */ 261 bptid = sc->sc_ii.ii_tid; 262 nent = iop->sc_nlctent; 263 for (le = iop->sc_lct->entry; nent != 0; nent--, le++) 264 if ((le16toh(le->classid) & 4095) == 265 I2O_CLASS_BUS_ADAPTER_PORT && 266 (le32toh(le->usertid) & 4095) == bptid) { 267 bptid = le16toh(le->localtid) & 4095; 268 break; 269 } 270 271 nent = iop->sc_nlctent; 272 for (i = 0, le = iop->sc_lct->entry; i < nent; i++, le++) { 273 if ((le16toh(le->classid) & 4095) != I2O_CLASS_SCSI_PERIPHERAL) 274 continue; 275 if (((le32toh(le->usertid) >> 12) & 4095) != bptid) 276 continue; 277 tid = le16toh(le->localtid) & 4095; 278 279 rv = iop_field_get_all(iop, tid, I2O_PARAM_SCSI_DEVICE_INFO, 280 ¶m, sizeof(param), NULL); 281 if (rv != 0) 282 continue; 283 targ = le32toh(param.sdi.identifier); 284 lun = param.sdi.luninfo[1]; 285 #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 286 if (targ >= sc_chan->chan_ntargets || 287 lun >= sc_chan->chan_nluns) { 288 aprint_error("%s: target %d,%d (tid %d): " 289 "bad target/LUN\n", sc->sc_dv.dv_xname, 290 targ, lun, tid); 291 continue; 292 } 293 #endif 294 295 /* 296 * If we've already described this target, and nothing has 297 * changed, then don't describe it again. 298 */ 299 it = &sc->sc_targetmap[targ]; 300 it->it_flags |= IT_PRESENT; 301 syncrate = ((int)le64toh(param.sdi.negsyncrate) + 500) / 1000; 302 if (it->it_width != param.sdi.negdatawidth || 303 it->it_offset != param.sdi.negoffset || 304 it->it_syncrate != syncrate) { 305 it->it_width = param.sdi.negdatawidth; 306 it->it_offset = param.sdi.negoffset; 307 it->it_syncrate = syncrate; 308 309 aprint_verbose("%s: target %d (tid %d): %d-bit, ", 310 sc->sc_dv.dv_xname, targ, tid, it->it_width); 311 if (it->it_syncrate == 0) 312 aprint_verbose("asynchronous\n"); 313 else 314 aprint_verbose("synchronous at %dMHz, " 315 "offset 0x%x\n", it->it_syncrate, 316 it->it_offset); 317 } 318 319 /* Ignore the device if it's in use by somebody else. */ 320 if ((le32toh(le->usertid) & 4095) != I2O_TID_NONE) { 321 if (sc->sc_tidmap == NULL || 322 IOPSP_TIDMAP(sc->sc_tidmap, targ, lun) != 323 IOPSP_TID_INUSE) { 324 aprint_verbose("%s: target %d,%d (tid %d): " 325 "in use by tid %d\n", sc->sc_dv.dv_xname, 326 targ, lun, tid, 327 le32toh(le->usertid) & 4095); 328 } 329 IOPSP_TIDMAP(tidmap, targ, lun) = IOPSP_TID_INUSE; 330 } else 331 IOPSP_TIDMAP(tidmap, targ, lun) = (u_short)tid; 332 } 333 334 for (i = 0; i < sc_chan->chan_ntargets; i++) 335 if ((sc->sc_targetmap[i].it_flags & IT_PRESENT) == 0) 336 sc->sc_targetmap[i].it_width = 0; 337 338 /* Swap in the new map and return. */ 339 mutex_spin_enter(&iop->sc_intrlock); 340 tofree = sc->sc_tidmap; 341 sc->sc_tidmap = tidmap; 342 mutex_spin_exit(&iop->sc_intrlock); 343 344 if (tofree != NULL) 345 free(tofree, M_DEVBUF); 346 sc->sc_chgind = iop->sc_chgind; 347 return (0); 348 } 349 350 /* 351 * Re-scan the bus; to be called from a higher level (e.g. scsipi). 352 */ 353 static int 354 iopsp_rescan(struct iopsp_softc *sc) 355 { 356 struct iop_softc *iop; 357 struct iop_msg *im; 358 struct i2o_hba_bus_scan mf; 359 int rv; 360 361 iop = (struct iop_softc *)device_parent(&sc->sc_dv); 362 363 mutex_enter(&iop->sc_conflock); 364 im = iop_msg_alloc(iop, IM_WAIT); 365 366 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 367 mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_HBA_BUS_SCAN); 368 mf.msgictx = sc->sc_ii.ii_ictx; 369 mf.msgtctx = im->im_tctx; 370 371 rv = iop_msg_post(iop, im, &mf, 5*60*1000); 372 iop_msg_free(iop, im); 373 if (rv != 0) 374 aprint_error("%s: bus rescan failed (error %d)\n", 375 sc->sc_dv.dv_xname, rv); 376 377 if ((rv = iop_lct_get(iop)) == 0) 378 rv = iopsp_reconfig(&sc->sc_dv); 379 380 mutex_exit(&iop->sc_conflock); 381 return (rv); 382 } 383 384 /* 385 * Start a SCSI command. 386 */ 387 static void 388 iopsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 389 void *arg) 390 { 391 struct scsipi_xfer *xs; 392 struct scsipi_periph *periph; 393 struct iopsp_softc *sc; 394 struct iop_msg *im; 395 struct iop_softc *iop; 396 struct i2o_scsi_scb_exec *mf; 397 int error, flags, tid; 398 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 399 400 sc = (void *)chan->chan_adapter->adapt_dev; 401 iop = (struct iop_softc *)device_parent(&sc->sc_dv); 402 403 switch (req) { 404 case ADAPTER_REQ_RUN_XFER: 405 xs = arg; 406 periph = xs->xs_periph; 407 flags = xs->xs_control; 408 409 SC_DEBUG(periph, SCSIPI_DB2, ("iopsp_scsi_request run_xfer\n")); 410 411 tid = IOPSP_TIDMAP(sc->sc_tidmap, periph->periph_target, 412 periph->periph_lun); 413 if (tid == IOPSP_TID_ABSENT || tid == IOPSP_TID_INUSE) { 414 xs->error = XS_SELTIMEOUT; 415 scsipi_done(xs); 416 return; 417 } 418 419 /* Need to reset the target? */ 420 if ((flags & XS_CTL_RESET) != 0) { 421 if (iop_simple_cmd(iop, tid, I2O_SCSI_DEVICE_RESET, 422 sc->sc_ii.ii_ictx, 1, 30*1000) != 0) { 423 aprint_error("%s: reset failed\n", 424 sc->sc_dv.dv_xname); 425 xs->error = XS_DRIVER_STUFFUP; 426 } else 427 xs->error = XS_NOERROR; 428 429 scsipi_done(xs); 430 return; 431 } 432 433 #if defined(I2ODEBUG) || defined(SCSIDEBUG) 434 if (xs->cmdlen > sizeof(mf->cdb)) 435 panic("%s: CDB too large", sc->sc_dv.dv_xname); 436 #endif 437 438 im = iop_msg_alloc(iop, IM_POLL_INTR | 439 IM_NOSTATUS | ((flags & XS_CTL_POLL) != 0 ? IM_POLL : 0)); 440 im->im_dvcontext = xs; 441 442 mf = (struct i2o_scsi_scb_exec *)mb; 443 mf->msgflags = I2O_MSGFLAGS(i2o_scsi_scb_exec); 444 mf->msgfunc = I2O_MSGFUNC(tid, I2O_SCSI_SCB_EXEC); 445 mf->msgictx = sc->sc_ii.ii_ictx; 446 mf->msgtctx = im->im_tctx; 447 mf->flags = xs->cmdlen | I2O_SCB_FLAG_ENABLE_DISCONNECT | 448 I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; 449 mf->datalen = xs->datalen; 450 memcpy(mf->cdb, xs->cmd, xs->cmdlen); 451 452 switch (xs->xs_tag_type) { 453 case MSG_ORDERED_Q_TAG: 454 mf->flags |= I2O_SCB_FLAG_ORDERED_QUEUE_TAG; 455 break; 456 case MSG_SIMPLE_Q_TAG: 457 mf->flags |= I2O_SCB_FLAG_SIMPLE_QUEUE_TAG; 458 break; 459 case MSG_HEAD_OF_Q_TAG: 460 mf->flags |= I2O_SCB_FLAG_HEAD_QUEUE_TAG; 461 break; 462 default: 463 break; 464 } 465 466 if (xs->datalen != 0) { 467 error = iop_msg_map_bio(iop, im, mb, xs->data, 468 xs->datalen, (flags & XS_CTL_DATA_OUT) == 0); 469 if (error) { 470 xs->error = XS_DRIVER_STUFFUP; 471 iop_msg_free(iop, im); 472 scsipi_done(xs); 473 return; 474 } 475 if ((flags & XS_CTL_DATA_IN) == 0) 476 mf->flags |= I2O_SCB_FLAG_XFER_TO_DEVICE; 477 else 478 mf->flags |= I2O_SCB_FLAG_XFER_FROM_DEVICE; 479 } 480 481 if (iop_msg_post(iop, im, mb, xs->timeout)) { 482 if (xs->datalen != 0) 483 iop_msg_unmap(iop, im); 484 iop_msg_free(iop, im); 485 xs->error = XS_DRIVER_STUFFUP; 486 scsipi_done(xs); 487 } 488 break; 489 490 case ADAPTER_REQ_GROW_RESOURCES: 491 /* 492 * Not supported. 493 */ 494 break; 495 496 case ADAPTER_REQ_SET_XFER_MODE: 497 /* 498 * The DDM takes care of this, and we can't modify its 499 * behaviour. 500 */ 501 break; 502 } 503 } 504 505 #ifdef notyet 506 /* 507 * Abort the specified I2O_SCSI_SCB_EXEC message and its associated SCB. 508 */ 509 static int 510 iopsp_scsi_abort(struct iopsp_softc *sc, int atid, struct iop_msg *aim) 511 { 512 struct iop_msg *im; 513 struct i2o_scsi_scb_abort mf; 514 struct iop_softc *iop; 515 int rv, s; 516 517 iop = (struct iop_softc *)device_parent(&sc->sc_dv); 518 im = iop_msg_alloc(iop, IM_POLL); 519 520 mf.msgflags = I2O_MSGFLAGS(i2o_scsi_scb_abort); 521 mf.msgfunc = I2O_MSGFUNC(atid, I2O_SCSI_SCB_ABORT); 522 mf.msgictx = sc->sc_ii.ii_ictx; 523 mf.msgtctx = im->im_tctx; 524 mf.tctxabort = aim->im_tctx; 525 526 rv = iop_msg_post(iop, im, &mf, 30000); 527 iop_msg_free(iop, im); 528 529 return (rv); 530 } 531 #endif 532 533 /* 534 * We have a message which has been processed and replied to by the IOP - 535 * deal with it. 536 */ 537 static void 538 iopsp_intr(struct device *dv, struct iop_msg *im, void *reply) 539 { 540 struct scsipi_xfer *xs; 541 struct iopsp_softc *sc; 542 struct i2o_scsi_reply *rb; 543 struct iop_softc *iop; 544 u_int sl; 545 546 sc = (struct iopsp_softc *)dv; 547 xs = (struct scsipi_xfer *)im->im_dvcontext; 548 iop = (struct iop_softc *)device_parent(dv); 549 rb = reply; 550 551 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("iopsp_intr\n")); 552 553 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 554 xs->error = XS_DRIVER_STUFFUP; 555 xs->resid = xs->datalen; 556 } else { 557 if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) { 558 switch (rb->hbastatus) { 559 case I2O_SCSI_DSC_ADAPTER_BUSY: 560 case I2O_SCSI_DSC_SCSI_BUS_RESET: 561 case I2O_SCSI_DSC_BUS_BUSY: 562 xs->error = XS_BUSY; 563 break; 564 case I2O_SCSI_DSC_SELECTION_TIMEOUT: 565 xs->error = XS_SELTIMEOUT; 566 break; 567 case I2O_SCSI_DSC_COMMAND_TIMEOUT: 568 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT: 569 case I2O_SCSI_DSC_LUN_INVALID: 570 case I2O_SCSI_DSC_SCSI_TID_INVALID: 571 xs->error = XS_TIMEOUT; 572 break; 573 default: 574 xs->error = XS_DRIVER_STUFFUP; 575 break; 576 } 577 aprint_error("%s: HBA status 0x%02x\n", 578 sc->sc_dv.dv_xname, rb->hbastatus); 579 } else if (rb->scsistatus != SCSI_OK) { 580 switch (rb->scsistatus) { 581 case SCSI_CHECK: 582 xs->error = XS_SENSE; 583 sl = le32toh(rb->senselen); 584 if (sl > sizeof(xs->sense.scsi_sense)) 585 sl = sizeof(xs->sense.scsi_sense); 586 memcpy(&xs->sense.scsi_sense, rb->sense, sl); 587 break; 588 case SCSI_QUEUE_FULL: 589 case SCSI_BUSY: 590 xs->error = XS_BUSY; 591 break; 592 default: 593 xs->error = XS_DRIVER_STUFFUP; 594 break; 595 } 596 } else 597 xs->error = XS_NOERROR; 598 599 xs->resid = xs->datalen - le32toh(rb->datalen); 600 xs->status = rb->scsistatus; 601 } 602 603 /* Free the message wrapper and pass the news to scsipi. */ 604 if (xs->datalen != 0) 605 iop_msg_unmap(iop, im); 606 iop_msg_free(iop, im); 607 608 scsipi_done(xs); 609 } 610 611 /* 612 * ioctl hook; used here only to initiate low-level rescans. 613 */ 614 static int 615 iopsp_ioctl(struct scsipi_channel *chan, u_long cmd, void *data, 616 int flag, struct proc *p) 617 { 618 int rv; 619 620 switch (cmd) { 621 case SCBUSIOLLSCAN: 622 /* 623 * If it's boot time, the bus will have been scanned and the 624 * maps built. Locking would stop re-configuration, but we 625 * want to fake success. 626 */ 627 if (curlwp != &lwp0) 628 rv = iopsp_rescan( 629 (struct iopsp_softc *)chan->chan_adapter->adapt_dev); 630 else 631 rv = 0; 632 break; 633 634 default: 635 rv = ENOTTY; 636 break; 637 } 638 639 return (rv); 640 } 641 642 /* 643 * The number of openings available to us has changed, so inform scsipi. 644 */ 645 static void 646 iopsp_adjqparam(struct device *dv, int mpi) 647 { 648 struct iopsp_softc *sc; 649 struct iop_softc *iop; 650 651 sc = device_private(dv); 652 iop = device_private(device_parent(dv)); 653 654 mutex_spin_enter(&iop->sc_intrlock); 655 sc->sc_adapter.adapt_openings += mpi - sc->sc_openings; 656 sc->sc_openings = mpi; 657 mutex_spin_exit(&iop->sc_intrlock); 658 } 659