xref: /netbsd-src/sys/dev/i2o/iopsp.c (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /*	$NetBSD: iopsp.c,v 1.29 2007/10/19 11:59:44 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Raw SCSI device support for I2O.  IOPs present SCSI devices individually;
41  * we group them by controlling port.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: iopsp.c,v 1.29 2007/10/19 11:59:44 ad Exp $");
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/device.h>
51 #include <sys/queue.h>
52 #include <sys/proc.h>
53 #include <sys/buf.h>
54 #include <sys/endian.h>
55 #include <sys/malloc.h>
56 #include <sys/scsiio.h>
57 
58 #include <sys/bswap.h>
59 #include <sys/bus.h>
60 
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_disk.h>
63 #include <dev/scsipi/scsipi_all.h>
64 #include <dev/scsipi/scsiconf.h>
65 #include <dev/scsipi/scsi_message.h>
66 
67 #include <dev/i2o/i2o.h>
68 #include <dev/i2o/iopio.h>
69 #include <dev/i2o/iopvar.h>
70 #include <dev/i2o/iopspvar.h>
71 
72 static void	iopsp_adjqparam(struct device *, int);
73 static void	iopsp_attach(struct device *, struct device *, void *);
74 static void	iopsp_intr(struct device *, struct iop_msg *, void *);
75 static int	iopsp_ioctl(struct scsipi_channel *, u_long,
76 			    void *, int, struct proc *);
77 static int	iopsp_match(struct device *, struct cfdata *, void *);
78 static int	iopsp_rescan(struct iopsp_softc *);
79 static int	iopsp_reconfig(struct device *);
80 static void	iopsp_scsipi_request(struct scsipi_channel *,
81 				     scsipi_adapter_req_t, void *);
82 
83 CFATTACH_DECL(iopsp, sizeof(struct iopsp_softc),
84     iopsp_match, iopsp_attach, NULL, NULL);
85 
86 /*
87  * Match a supported device.
88  */
89 static int
90 iopsp_match(struct device *parent, struct cfdata *match, void *aux)
91 {
92 	struct iop_attach_args *ia;
93 	struct {
94 		struct	i2o_param_op_results pr;
95 		struct	i2o_param_read_results prr;
96 		struct	i2o_param_hba_ctlr_info ci;
97 	} __attribute__ ((__packed__)) param;
98 
99 	ia = aux;
100 
101 	if (ia->ia_class != I2O_CLASS_BUS_ADAPTER_PORT)
102 		return (0);
103 
104 	if (iop_field_get_all((struct iop_softc *)parent, ia->ia_tid,
105 	    I2O_PARAM_HBA_CTLR_INFO, &param, sizeof(param), NULL) != 0)
106 		return (0);
107 
108 	return (param.ci.bustype == I2O_HBA_BUS_SCSI ||
109 	    param.ci.bustype == I2O_HBA_BUS_FCA);
110 }
111 
112 /*
113  * Attach a supported device.
114  */
115 static void
116 iopsp_attach(struct device *parent, struct device *self, void *aux)
117 {
118 	struct iop_attach_args *ia;
119 	struct iopsp_softc *sc;
120 	struct iop_softc *iop;
121 	struct {
122 		struct	i2o_param_op_results pr;
123 		struct	i2o_param_read_results prr;
124 		union {
125 			struct	i2o_param_hba_ctlr_info ci;
126 			struct	i2o_param_hba_scsi_ctlr_info sci;
127 			struct	i2o_param_hba_scsi_port_info spi;
128 		} p;
129 	} __attribute__ ((__packed__)) param;
130 	int fc, rv;
131 	int size;
132 
133 	ia = (struct iop_attach_args *)aux;
134 	sc = device_private(self);
135 	iop = device_private(parent);
136 
137 	/* Register us as an initiator. */
138 	sc->sc_ii.ii_dv = self;
139 	sc->sc_ii.ii_intr = iopsp_intr;
140 	sc->sc_ii.ii_flags = 0;
141 	sc->sc_ii.ii_tid = ia->ia_tid;
142 	sc->sc_ii.ii_reconfig = iopsp_reconfig;
143 	sc->sc_ii.ii_adjqparam = iopsp_adjqparam;
144 	iop_initiator_register(iop, &sc->sc_ii);
145 
146 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO,
147 	    &param, sizeof(param), NULL);
148 	if (rv != 0)
149 		goto bad;
150 
151 	fc = (param.p.ci.bustype == I2O_HBA_BUS_FCA);
152 
153 	/*
154 	 * Say what the device is.  If we can find out what the controling
155 	 * device is, say what that is too.
156 	 */
157 	printf(": SCSI port");
158 	iop_print_ident(iop, ia->ia_tid);
159 	printf("\n");
160 
161 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_SCSI_CTLR_INFO,
162 	    &param, sizeof(param), NULL);
163 	if (rv != 0)
164 		goto bad;
165 
166 	printf("%s: ", sc->sc_dv.dv_xname);
167 	if (fc)
168 		printf("FC");
169 	else
170 		printf("%d-bit", param.p.sci.maxdatawidth);
171 	printf(", max sync rate %dMHz, initiator ID %d\n",
172 	    (u_int32_t)le64toh(param.p.sci.maxsyncrate) / 1000,
173 	    le32toh(param.p.sci.initiatorid));
174 
175 	sc->sc_openings = 1;
176 
177 	sc->sc_adapter.adapt_dev = &sc->sc_dv;
178 	sc->sc_adapter.adapt_nchannels = 1;
179 	sc->sc_adapter.adapt_openings = 1;
180 	sc->sc_adapter.adapt_max_periph = 1;
181 	sc->sc_adapter.adapt_ioctl = iopsp_ioctl;
182 	sc->sc_adapter.adapt_minphys = minphys;
183 	sc->sc_adapter.adapt_request = iopsp_scsipi_request;
184 
185 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
186 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
187 	sc->sc_channel.chan_bustype = &scsi_bustype;
188 	sc->sc_channel.chan_channel = 0;
189 	sc->sc_channel.chan_ntargets = fc ?
190 	    IOPSP_MAX_FC_TARGET : param.p.sci.maxdatawidth;
191 	sc->sc_channel.chan_nluns = IOPSP_MAX_LUN;
192 	sc->sc_channel.chan_id = le32toh(param.p.sci.initiatorid);
193 	sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
194 
195 	/*
196 	 * Allocate the target map.  Currently used for informational
197 	 * purposes only.
198 	 */
199 	size = sc->sc_channel.chan_ntargets * sizeof(struct iopsp_target);
200 	sc->sc_targetmap = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
201 
202  	/* Build the two maps, and attach to scsipi. */
203 	if (iopsp_reconfig(self) != 0) {
204 		printf("%s: configure failed\n", sc->sc_dv.dv_xname);
205 		goto bad;
206 	}
207 	config_found(self, &sc->sc_channel, scsiprint);
208 	return;
209 
210  bad:
211 	iop_initiator_unregister(iop, &sc->sc_ii);
212 }
213 
214 /*
215  * Scan the LCT to determine which devices we control, and enter them into
216  * the maps.
217  */
218 static int
219 iopsp_reconfig(struct device *dv)
220 {
221 	struct iopsp_softc *sc;
222 	struct iop_softc *iop;
223 	struct i2o_lct_entry *le;
224 	struct scsipi_channel *sc_chan;
225 	struct {
226 		struct	i2o_param_op_results pr;
227 		struct	i2o_param_read_results prr;
228 		struct	i2o_param_scsi_device_info sdi;
229 	} __attribute__ ((__packed__)) param;
230 	u_int tid, nent, i, targ, lun, size, rv, bptid;
231 	u_short *tidmap;
232 	void *tofree;
233 	struct iopsp_target *it;
234 	int syncrate;
235 
236 	sc = (struct iopsp_softc *)dv;
237 	iop = (struct iop_softc *)device_parent(&sc->sc_dv);
238 	sc_chan = &sc->sc_channel;
239 
240 	KASSERT(mutex_owned(&iop->sc_conflock));
241 
242 	/* Anything to do? */
243 	if (iop->sc_chgind == sc->sc_chgind)
244 		return (0);
245 
246 	/*
247 	 * Allocate memory for the target/LUN -> TID map.  Use zero to
248 	 * denote absent targets (zero is the TID of the I2O executive,
249 	 * and we never address that here).
250 	 */
251 	size = sc_chan->chan_ntargets * (IOPSP_MAX_LUN) * sizeof(u_short);
252 	if ((tidmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
253 		return (ENOMEM);
254 
255 	for (i = 0; i < sc_chan->chan_ntargets; i++)
256 		sc->sc_targetmap[i].it_flags &= ~IT_PRESENT;
257 
258 	/*
259 	 * A quick hack to handle Intel's stacked bus port arrangement.
260 	 */
261 	bptid = sc->sc_ii.ii_tid;
262 	nent = iop->sc_nlctent;
263 	for (le = iop->sc_lct->entry; nent != 0; nent--, le++)
264 		if ((le16toh(le->classid) & 4095) ==
265 		    I2O_CLASS_BUS_ADAPTER_PORT &&
266 		    (le32toh(le->usertid) & 4095) == bptid) {
267 			bptid = le16toh(le->localtid) & 4095;
268 			break;
269 		}
270 
271 	nent = iop->sc_nlctent;
272 	for (i = 0, le = iop->sc_lct->entry; i < nent; i++, le++) {
273 		if ((le16toh(le->classid) & 4095) != I2O_CLASS_SCSI_PERIPHERAL)
274 			continue;
275 		if (((le32toh(le->usertid) >> 12) & 4095) != bptid)
276 			continue;
277 		tid = le16toh(le->localtid) & 4095;
278 
279 		rv = iop_field_get_all(iop, tid, I2O_PARAM_SCSI_DEVICE_INFO,
280 		    &param, sizeof(param), NULL);
281 		if (rv != 0)
282 			continue;
283 		targ = le32toh(param.sdi.identifier);
284 		lun = param.sdi.luninfo[1];
285 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
286 		if (targ >= sc_chan->chan_ntargets ||
287 		    lun >= sc_chan->chan_nluns) {
288 			printf("%s: target %d,%d (tid %d): bad target/LUN\n",
289 			    sc->sc_dv.dv_xname, targ, lun, tid);
290 			continue;
291 		}
292 #endif
293 
294 		/*
295 		 * If we've already described this target, and nothing has
296 		 * changed, then don't describe it again.
297 		 */
298 		it = &sc->sc_targetmap[targ];
299 		it->it_flags |= IT_PRESENT;
300 		syncrate = ((int)le64toh(param.sdi.negsyncrate) + 500) / 1000;
301 		if (it->it_width != param.sdi.negdatawidth ||
302 		    it->it_offset != param.sdi.negoffset ||
303 		    it->it_syncrate != syncrate) {
304 			it->it_width = param.sdi.negdatawidth;
305 			it->it_offset = param.sdi.negoffset;
306 			it->it_syncrate = syncrate;
307 
308 			printf("%s: target %d (tid %d): %d-bit, ",
309 			    sc->sc_dv.dv_xname, targ, tid, it->it_width);
310 			if (it->it_syncrate == 0)
311 				printf("asynchronous\n");
312 			else
313 				printf("synchronous at %dMHz, offset 0x%x\n",
314 				    it->it_syncrate, it->it_offset);
315 		}
316 
317 		/* Ignore the device if it's in use by somebody else. */
318 		if ((le32toh(le->usertid) & 4095) != I2O_TID_NONE) {
319 			if (sc->sc_tidmap == NULL ||
320 			    IOPSP_TIDMAP(sc->sc_tidmap, targ, lun) !=
321 			    IOPSP_TID_INUSE)
322 				printf("%s: target %d,%d (tid %d): in use by"
323 				    " tid %d\n", sc->sc_dv.dv_xname,
324 				    targ, lun, tid,
325 				    le32toh(le->usertid) & 4095);
326 			IOPSP_TIDMAP(tidmap, targ, lun) = IOPSP_TID_INUSE;
327 		} else
328 			IOPSP_TIDMAP(tidmap, targ, lun) = (u_short)tid;
329 	}
330 
331 	for (i = 0; i < sc_chan->chan_ntargets; i++)
332 		if ((sc->sc_targetmap[i].it_flags & IT_PRESENT) == 0)
333 			sc->sc_targetmap[i].it_width = 0;
334 
335 	/* Swap in the new map and return. */
336 	mutex_spin_enter(&iop->sc_intrlock);
337 	tofree = sc->sc_tidmap;
338 	sc->sc_tidmap = tidmap;
339 	mutex_spin_exit(&iop->sc_intrlock);
340 
341 	if (tofree != NULL)
342 		free(tofree, M_DEVBUF);
343 	sc->sc_chgind = iop->sc_chgind;
344 	return (0);
345 }
346 
347 /*
348  * Re-scan the bus; to be called from a higher level (e.g. scsipi).
349  */
350 static int
351 iopsp_rescan(struct iopsp_softc *sc)
352 {
353 	struct iop_softc *iop;
354 	struct iop_msg *im;
355 	struct i2o_hba_bus_scan mf;
356 	int rv;
357 
358 	iop = (struct iop_softc *)device_parent(&sc->sc_dv);
359 
360 	mutex_enter(&iop->sc_conflock);
361 	im = iop_msg_alloc(iop, IM_WAIT);
362 
363 	mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
364 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_HBA_BUS_SCAN);
365 	mf.msgictx = sc->sc_ii.ii_ictx;
366 	mf.msgtctx = im->im_tctx;
367 
368 	rv = iop_msg_post(iop, im, &mf, 5*60*1000);
369 	iop_msg_free(iop, im);
370 	if (rv != 0)
371 		printf("%s: bus rescan failed (error %d)\n",
372 		    sc->sc_dv.dv_xname, rv);
373 
374 	if ((rv = iop_lct_get(iop)) == 0)
375 		rv = iopsp_reconfig(&sc->sc_dv);
376 
377 	mutex_exit(&iop->sc_conflock);
378 	return (rv);
379 }
380 
381 /*
382  * Start a SCSI command.
383  */
384 static void
385 iopsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
386 		     void *arg)
387 {
388 	struct scsipi_xfer *xs;
389 	struct scsipi_periph *periph;
390 	struct iopsp_softc *sc;
391 	struct iop_msg *im;
392 	struct iop_softc *iop;
393 	struct i2o_scsi_scb_exec *mf;
394 	int error, flags, tid;
395 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
396 
397 	sc = (void *)chan->chan_adapter->adapt_dev;
398 	iop = (struct iop_softc *)device_parent(&sc->sc_dv);
399 
400 	switch (req) {
401 	case ADAPTER_REQ_RUN_XFER:
402 		xs = arg;
403 		periph = xs->xs_periph;
404 		flags = xs->xs_control;
405 
406 		SC_DEBUG(periph, SCSIPI_DB2, ("iopsp_scsi_request run_xfer\n"));
407 
408 		tid = IOPSP_TIDMAP(sc->sc_tidmap, periph->periph_target,
409 		    periph->periph_lun);
410 		if (tid == IOPSP_TID_ABSENT || tid == IOPSP_TID_INUSE) {
411 			xs->error = XS_SELTIMEOUT;
412 			scsipi_done(xs);
413 			return;
414 		}
415 
416 		/* Need to reset the target? */
417 		if ((flags & XS_CTL_RESET) != 0) {
418 			if (iop_simple_cmd(iop, tid, I2O_SCSI_DEVICE_RESET,
419 			    sc->sc_ii.ii_ictx, 1, 30*1000) != 0) {
420 #ifdef I2ODEBUG
421 				printf("%s: reset failed\n",
422 				    sc->sc_dv.dv_xname);
423 #endif
424 				xs->error = XS_DRIVER_STUFFUP;
425 			} else
426 				xs->error = XS_NOERROR;
427 
428 			scsipi_done(xs);
429 			return;
430 		}
431 
432 #if defined(I2ODEBUG) || defined(SCSIDEBUG)
433 		if (xs->cmdlen > sizeof(mf->cdb))
434 			panic("%s: CDB too large", sc->sc_dv.dv_xname);
435 #endif
436 
437 		im = iop_msg_alloc(iop, IM_POLL_INTR |
438 		    IM_NOSTATUS | ((flags & XS_CTL_POLL) != 0 ? IM_POLL : 0));
439 		im->im_dvcontext = xs;
440 
441 		mf = (struct i2o_scsi_scb_exec *)mb;
442 		mf->msgflags = I2O_MSGFLAGS(i2o_scsi_scb_exec);
443 		mf->msgfunc = I2O_MSGFUNC(tid, I2O_SCSI_SCB_EXEC);
444 		mf->msgictx = sc->sc_ii.ii_ictx;
445 		mf->msgtctx = im->im_tctx;
446 		mf->flags = xs->cmdlen | I2O_SCB_FLAG_ENABLE_DISCONNECT |
447 		    I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
448 		mf->datalen = xs->datalen;
449 		memcpy(mf->cdb, xs->cmd, xs->cmdlen);
450 
451 		switch (xs->xs_tag_type) {
452 		case MSG_ORDERED_Q_TAG:
453 			mf->flags |= I2O_SCB_FLAG_ORDERED_QUEUE_TAG;
454 			break;
455 		case MSG_SIMPLE_Q_TAG:
456 			mf->flags |= I2O_SCB_FLAG_SIMPLE_QUEUE_TAG;
457 			break;
458 		case MSG_HEAD_OF_Q_TAG:
459 			mf->flags |= I2O_SCB_FLAG_HEAD_QUEUE_TAG;
460 			break;
461 		default:
462 			break;
463 		}
464 
465 		if (xs->datalen != 0) {
466 			error = iop_msg_map_bio(iop, im, mb, xs->data,
467 			    xs->datalen, (flags & XS_CTL_DATA_OUT) == 0);
468 			if (error) {
469 				xs->error = XS_DRIVER_STUFFUP;
470 				iop_msg_free(iop, im);
471 				scsipi_done(xs);
472 				return;
473 			}
474 			if ((flags & XS_CTL_DATA_IN) == 0)
475 				mf->flags |= I2O_SCB_FLAG_XFER_TO_DEVICE;
476 			else
477 				mf->flags |= I2O_SCB_FLAG_XFER_FROM_DEVICE;
478 		}
479 
480 		if (iop_msg_post(iop, im, mb, xs->timeout)) {
481 			if (xs->datalen != 0)
482 				iop_msg_unmap(iop, im);
483 			iop_msg_free(iop, im);
484 			xs->error = XS_DRIVER_STUFFUP;
485 			scsipi_done(xs);
486 		}
487 		break;
488 
489 	case ADAPTER_REQ_GROW_RESOURCES:
490 		/*
491 		 * Not supported.
492 		 */
493 		break;
494 
495 	case ADAPTER_REQ_SET_XFER_MODE:
496 		/*
497 		 * The DDM takes care of this, and we can't modify its
498 		 * behaviour.
499 		 */
500 		break;
501 	}
502 }
503 
504 #ifdef notyet
505 /*
506  * Abort the specified I2O_SCSI_SCB_EXEC message and its associated SCB.
507  */
508 static int
509 iopsp_scsi_abort(struct iopsp_softc *sc, int atid, struct iop_msg *aim)
510 {
511 	struct iop_msg *im;
512 	struct i2o_scsi_scb_abort mf;
513 	struct iop_softc *iop;
514 	int rv, s;
515 
516 	iop = (struct iop_softc *)device_parent(&sc->sc_dv);
517 	im = iop_msg_alloc(iop, IM_POLL);
518 
519 	mf.msgflags = I2O_MSGFLAGS(i2o_scsi_scb_abort);
520 	mf.msgfunc = I2O_MSGFUNC(atid, I2O_SCSI_SCB_ABORT);
521 	mf.msgictx = sc->sc_ii.ii_ictx;
522 	mf.msgtctx = im->im_tctx;
523 	mf.tctxabort = aim->im_tctx;
524 
525 	rv = iop_msg_post(iop, im, &mf, 30000);
526 	iop_msg_free(iop, im);
527 
528 	return (rv);
529 }
530 #endif
531 
532 /*
533  * We have a message which has been processed and replied to by the IOP -
534  * deal with it.
535  */
536 static void
537 iopsp_intr(struct device *dv, struct iop_msg *im, void *reply)
538 {
539 	struct scsipi_xfer *xs;
540 	struct iopsp_softc *sc;
541 	struct i2o_scsi_reply *rb;
542  	struct iop_softc *iop;
543 	u_int sl;
544 
545 	sc = (struct iopsp_softc *)dv;
546 	xs = (struct scsipi_xfer *)im->im_dvcontext;
547 	iop = (struct iop_softc *)device_parent(dv);
548 	rb = reply;
549 
550 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("iopsp_intr\n"));
551 
552 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
553 		xs->error = XS_DRIVER_STUFFUP;
554 		xs->resid = xs->datalen;
555 	} else {
556 		if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) {
557 			switch (rb->hbastatus) {
558 			case I2O_SCSI_DSC_ADAPTER_BUSY:
559 			case I2O_SCSI_DSC_SCSI_BUS_RESET:
560 			case I2O_SCSI_DSC_BUS_BUSY:
561 				xs->error = XS_BUSY;
562 				break;
563 			case I2O_SCSI_DSC_SELECTION_TIMEOUT:
564 				xs->error = XS_SELTIMEOUT;
565 				break;
566 			case I2O_SCSI_DSC_COMMAND_TIMEOUT:
567 			case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
568 			case I2O_SCSI_DSC_LUN_INVALID:
569 			case I2O_SCSI_DSC_SCSI_TID_INVALID:
570 				xs->error = XS_TIMEOUT;
571 				break;
572 			default:
573 				xs->error = XS_DRIVER_STUFFUP;
574 				break;
575 			}
576 			printf("%s: HBA status 0x%02x\n", sc->sc_dv.dv_xname,
577 			   rb->hbastatus);
578 		} else if (rb->scsistatus != SCSI_OK) {
579 			switch (rb->scsistatus) {
580 			case SCSI_CHECK:
581 				xs->error = XS_SENSE;
582 				sl = le32toh(rb->senselen);
583 				if (sl > sizeof(xs->sense.scsi_sense))
584 					sl = sizeof(xs->sense.scsi_sense);
585 				memcpy(&xs->sense.scsi_sense, rb->sense, sl);
586 				break;
587 			case SCSI_QUEUE_FULL:
588 			case SCSI_BUSY:
589 				xs->error = XS_BUSY;
590 				break;
591 			default:
592 				xs->error = XS_DRIVER_STUFFUP;
593 				break;
594 			}
595 		} else
596 			xs->error = XS_NOERROR;
597 
598 		xs->resid = xs->datalen - le32toh(rb->datalen);
599 		xs->status = rb->scsistatus;
600 	}
601 
602 	/* Free the message wrapper and pass the news to scsipi. */
603 	if (xs->datalen != 0)
604 		iop_msg_unmap(iop, im);
605 	iop_msg_free(iop, im);
606 
607 	scsipi_done(xs);
608 }
609 
610 /*
611  * ioctl hook; used here only to initiate low-level rescans.
612  */
613 static int
614 iopsp_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
615     int flag, struct proc *p)
616 {
617 	int rv;
618 
619 	switch (cmd) {
620 	case SCBUSIOLLSCAN:
621 		/*
622 		 * If it's boot time, the bus will have been scanned and the
623 		 * maps built.  Locking would stop re-configuration, but we
624 		 * want to fake success.
625 		 */
626 		if (curlwp != &lwp0)
627 			rv = iopsp_rescan(
628 			   (struct iopsp_softc *)chan->chan_adapter->adapt_dev);
629 		else
630 			rv = 0;
631 		break;
632 
633 	default:
634 		rv = ENOTTY;
635 		break;
636 	}
637 
638 	return (rv);
639 }
640 
641 /*
642  * The number of openings available to us has changed, so inform scsipi.
643  */
644 static void
645 iopsp_adjqparam(struct device *dv, int mpi)
646 {
647 	struct iopsp_softc *sc;
648 	struct iop_softc *iop;
649 
650 	sc = device_private(dv);
651 	iop = device_private(device_parent(dv));
652 
653 	mutex_spin_enter(&iop->sc_intrlock);
654 	sc->sc_adapter.adapt_openings += mpi - sc->sc_openings;
655 	sc->sc_openings = mpi;
656 	mutex_spin_exit(&iop->sc_intrlock);
657 }
658