xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: mpt_netbsd.c,v 1.6 2003/05/01 20:18:35 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  */
76 
77 #include <dev/ic/mpt.h>			/* pulls in all headers */
78 
79 #include <machine/stdarg.h>		/* for mpt_prt() */
80 
81 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
82 static void	mpt_timeout(void *);
83 static void	mpt_done(mpt_softc_t *, uint32_t);
84 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
85 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
86 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
87 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
88 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
89 
90 static void	mpt_scsipi_request(struct scsipi_channel *,
91 		    scsipi_adapter_req_t, void *);
92 static void	mpt_minphys(struct buf *);
93 
94 void
95 mpt_scsipi_attach(mpt_softc_t *mpt)
96 {
97 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
98 	struct scsipi_channel *chan = &mpt->sc_channel;
99 	int maxq;
100 
101 	mpt->bus = 0;		/* XXX ?? */
102 
103 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
104 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
105 
106 	/* Fill in the scsipi_adapter. */
107 	memset(adapt, 0, sizeof(*adapt));
108 	adapt->adapt_dev = &mpt->sc_dev;
109 	adapt->adapt_nchannels = 1;
110 	adapt->adapt_openings = maxq;
111 	adapt->adapt_max_periph = maxq;
112 	adapt->adapt_request = mpt_scsipi_request;
113 	adapt->adapt_minphys = mpt_minphys;
114 
115 	/* Fill in the scsipi_channel. */
116 	memset(chan, 0, sizeof(*chan));
117 	chan->chan_adapter = adapt;
118 	chan->chan_bustype = &scsi_bustype;
119 	chan->chan_channel = 0;
120 	chan->chan_flags = 0;
121 	chan->chan_nluns = 8;
122 	if (mpt->is_fc) {
123 		chan->chan_ntargets = 256;
124 		chan->chan_id = 256;
125 	} else {
126 		chan->chan_ntargets = 16;
127 		chan->chan_id = mpt->mpt_ini_id;
128 	}
129 
130 	(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132 
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 	bus_dma_segment_t reply_seg, request_seg;
137 	int reply_rseg, request_rseg;
138 	bus_addr_t pptr, end;
139 	caddr_t vptr;
140 	size_t len;
141 	int error, i;
142 
143 	/* Check if we have already allocated the reply memory. */
144 	if (mpt->reply != NULL)
145 		return (0);
146 
147 	/*
148 	 * Allocate the request pool.  This isn't really DMA'd memory,
149 	 * but it's a convenient place to do it.
150 	 */
151 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 	if (mpt->request_pool == NULL) {
154 		aprint_error("%s: unable to allocate request pool\n",
155 		    mpt->sc_dev.dv_xname);
156 		return (ENOMEM);
157 	}
158 
159 	/*
160 	 * Allocate DMA resources for reply buffers.
161 	 */
162 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
163 	    &reply_seg, 1, &reply_rseg, 0);
164 	if (error) {
165 		aprint_error("%s: unable to allocate reply area, error = %d\n",
166 		    mpt->sc_dev.dv_xname, error);
167 		goto fail_0;
168 	}
169 
170 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
171 	    (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
172 	if (error) {
173 		aprint_error("%s: unable to map reply area, error = %d\n",
174 		    mpt->sc_dev.dv_xname, error);
175 		goto fail_1;
176 	}
177 
178 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
179 	    0, 0, &mpt->reply_dmap);
180 	if (error) {
181 		aprint_error("%s: unable to create reply DMA map, error = %d\n",
182 		    mpt->sc_dev.dv_xname, error);
183 		goto fail_2;
184 	}
185 
186 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
187 	    PAGE_SIZE, NULL, 0);
188 	if (error) {
189 		aprint_error("%s: unable to load reply DMA map, error = %d\n",
190 		    mpt->sc_dev.dv_xname, error);
191 		goto fail_3;
192 	}
193 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
194 
195 	/*
196 	 * Allocate DMA resources for request buffers.
197 	 */
198 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
199 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
200 	if (error) {
201 		aprint_error("%s: unable to allocate request area, "
202 		    "error = %d\n", mpt->sc_dev.dv_xname, error);
203 		goto fail_4;
204 	}
205 
206 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
207 	    MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request, 0);
208 	if (error) {
209 		aprint_error("%s: unable to map request area, error = %d\n",
210 		    mpt->sc_dev.dv_xname, error);
211 		goto fail_5;
212 	}
213 
214 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
215 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
216 	if (error) {
217 		aprint_error("%s: unable to create request DMA map, "
218 		    "error = %d\n", mpt->sc_dev.dv_xname, error);
219 		goto fail_6;
220 	}
221 
222 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
223 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
224 	if (error) {
225 		aprint_error("%s: unable to load request DMA map, error = %d\n",
226 		    mpt->sc_dev.dv_xname, error);
227 		goto fail_7;
228 	}
229 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
230 
231 	pptr = mpt->request_phys;
232 	vptr = (caddr_t) mpt->request;
233 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
234 
235 	for (i = 0; pptr < end; i++) {
236 		request_t *req = &mpt->request_pool[i];
237 		req->index = i;
238 
239 		/* Store location of Request Data */
240 		req->req_pbuf = pptr;
241 		req->req_vbuf = vptr;
242 
243 		pptr += MPT_REQUEST_AREA;
244 		vptr += MPT_REQUEST_AREA;
245 
246 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
247 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
248 
249 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
250 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
251 		if (error) {
252 			aprint_error("%s: unable to create req %d DMA map, "
253 			    "error = %d\n", mpt->sc_dev.dv_xname, i, error);
254 			goto fail_8;
255 		}
256 	}
257 
258 	return (0);
259 
260  fail_8:
261 	for (--i; i >= 0; i--) {
262 		request_t *req = &mpt->request_pool[i];
263 		if (req->dmap != NULL)
264 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
265 	}
266 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
267  fail_7:
268 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
269  fail_6:
270 	bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE);
271  fail_5:
272 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
273  fail_4:
274 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
275  fail_3:
276 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
277  fail_2:
278 	bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE);
279  fail_1:
280 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
281  fail_0:
282 	free(mpt->request_pool, M_DEVBUF);
283 
284 	mpt->reply = NULL;
285 	mpt->request = NULL;
286 	mpt->request_pool = NULL;
287 
288 	return (error);
289 }
290 
291 int
292 mpt_intr(void *arg)
293 {
294 	mpt_softc_t *mpt = arg;
295 	int nrepl = 0;
296 	uint32_t reply;
297 
298 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
299 		return (0);
300 
301 	reply = mpt_pop_reply_queue(mpt);
302 	while (reply != MPT_REPLY_EMPTY) {
303 		nrepl++;
304 		if (mpt->verbose > 1) {
305 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
306 				/* Address reply; IOC has something to say */
307 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
308 			} else {
309 				/* Context reply; all went well */
310 				mpt_prt(mpt, "context %u reply OK", reply);
311 			}
312 		}
313 		mpt_done(mpt, reply);
314 		reply = mpt_pop_reply_queue(mpt);
315 	}
316 	return (nrepl != 0);
317 }
318 
319 void
320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
321 {
322 	va_list ap;
323 
324 	printf("%s: ", mpt->sc_dev.dv_xname);
325 	va_start(ap, fmt);
326 	vprintf(fmt, ap);
327 	va_end(ap);
328 	printf("\n");
329 }
330 
331 static int
332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
333 {
334 
335 	/* Timeouts are in msec, so we loop in 1000usec cycles */
336 	while (count) {
337 		mpt_intr(mpt);
338 		if (xs->xs_status & XS_STS_DONE)
339 			return (0);
340 		delay(1000);		/* only happens in boot, so ok */
341 		count--;
342 	}
343 	return (1);
344 }
345 
346 static void
347 mpt_timeout(void *arg)
348 {
349 	request_t *req = arg;
350 	struct scsipi_xfer *xs = req->xfer;
351 	struct scsipi_periph *periph = xs->xs_periph;
352 	mpt_softc_t *mpt =
353 	    (void *) periph->periph_channel->chan_adapter->adapt_dev;
354 	uint32_t oseq;
355 	int s;
356 
357 	scsipi_printaddr(periph);
358 	printf("command timeout\n");
359 
360 	s = splbio();
361 
362 	oseq = req->sequence;
363 	mpt->timeouts++;
364 	if (mpt_intr(mpt)) {
365 		if (req->sequence != oseq) {
366 			mpt_prt(mpt, "recovered from command timeout");
367 			splx(s);
368 			return;
369 		}
370 	}
371 	mpt_prt(mpt,
372 	    "timeout on request index = 0x%x, seq = 0x%08x",
373 	    req->index, req->sequence);
374 	mpt_check_doorbell(mpt);
375 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
376 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
377 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
378 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
379 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
380 	if (mpt->verbose > 1)
381 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
382 
383 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
384 	req->xfer = NULL;
385 	mpt_free_request(mpt, req);
386 
387 	xs->error = XS_TIMEOUT;
388 	scsipi_done(xs);
389 
390 	splx(s);
391 }
392 
393 static void
394 mpt_done(mpt_softc_t *mpt, uint32_t reply)
395 {
396 	struct scsipi_xfer *xs = NULL;
397 	struct scsipi_periph *periph;
398 	int index;
399 	request_t *req;
400 	MSG_REQUEST_HEADER *mpt_req;
401 	MSG_SCSI_IO_REPLY *mpt_reply;
402 
403 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
404 		/* context reply (ok) */
405 		mpt_reply = NULL;
406 		index = reply & MPT_CONTEXT_MASK;
407 	} else {
408 		/* address reply (error) */
409 
410 		/* XXX BUS_DMASYNC_POSTREAD XXX */
411 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
412 		if (mpt->verbose > 1) {
413 			uint32_t *pReply = (uint32_t *) mpt_reply;
414 
415 			mpt_prt(mpt, "Address Reply (index %u):",
416 			    mpt_reply->MsgContext & 0xffff);
417 			mpt_prt(mpt, "%08x %08x %08x %08x",
418 			    pReply[0], pReply[1], pReply[2], pReply[3]);
419 			mpt_prt(mpt, "%08x %08x %08x %08x",
420 			    pReply[4], pReply[5], pReply[6], pReply[7]);
421 			mpt_prt(mpt, "%08x %08x %08x %08x",
422 			    pReply[8], pReply[9], pReply[10], pReply[11]);
423 		}
424 		index = mpt_reply->MsgContext;
425 	}
426 
427 	/*
428 	 * Address reply with MessageContext high bit set.
429 	 * This is most likely a notify message, so we try
430 	 * to process it, then free it.
431 	 */
432 	if (__predict_false((index & 0x80000000) != 0)) {
433 		if (mpt_reply != NULL)
434 			mpt_ctlop(mpt, mpt_reply, reply);
435 		else
436 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
437 		return;
438 	}
439 
440 	/* Did we end up with a valid index into the table? */
441 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
442 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
443 		return;
444 	}
445 
446 	req = &mpt->request_pool[index];
447 
448 	/* Make sure memory hasn't been trashed. */
449 	if (__predict_false(req->index != index)) {
450 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
451 		return;
452 	}
453 
454 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
455 	mpt_req = req->req_vbuf;
456 
457 	/* Short cut for task management replies; nothing more for us to do. */
458 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
459 		if (mpt->verbose > 1)
460 			mpt_prt(mpt, "mpt_done: TASK MGMT");
461 		goto done;
462 	}
463 
464 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
465 		goto done;
466 
467 	/*
468 	 * At this point, it had better be a SCSI I/O command, but don't
469 	 * crash if it isn't.
470 	 */
471 	if (__predict_false(mpt_req->Function !=
472 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
473 		if (mpt->verbose > 1)
474 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
475 			    mpt_req->Function, index);
476 		goto done;
477 	}
478 
479 	/* Recover scsipi_xfer from the request structure. */
480 	xs = req->xfer;
481 
482 	/* Can't have a SCSI command without a scsipi_xfer. */
483 	if (__predict_false(xs == NULL)) {
484 		mpt_prt(mpt,
485 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
486 		    req->index, req->sequence);
487 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
488 		mpt_prt(mpt, "mpt_request:");
489 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
490 
491 		if (mpt_reply != NULL) {
492 			mpt_prt(mpt, "mpt_reply:");
493 			mpt_print_reply(mpt_reply);
494 		} else {
495 			mpt_prt(mpt, "context reply: 0x%08x", reply);
496 		}
497 		goto done;
498 	}
499 
500 	callout_stop(&xs->xs_callout);
501 
502 	periph = xs->xs_periph;
503 
504 	/*
505 	 * If we were a data transfer, unload the map that described
506 	 * the data buffer.
507 	 */
508 	if (__predict_true(xs->datalen != 0)) {
509 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
510 		    req->dmap->dm_mapsize,
511 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
512 						      : BUS_DMASYNC_POSTWRITE);
513 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
514 	}
515 
516 	if (__predict_true(mpt_reply == NULL)) {
517 		/*
518 		 * Context reply; report that the command was
519 		 * successful!
520 		 *
521 		 * Also report the xfer mode, if necessary.
522 		 */
523 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
524 			if ((mpt->mpt_report_xfer_mode &
525 			     (1 << periph->periph_target)) != 0)
526 				mpt_get_xfer_mode(mpt, periph);
527 		}
528 		xs->error = XS_NOERROR;
529 		xs->status = SCSI_OK;
530 		xs->resid = 0;
531 		mpt_free_request(mpt, req);
532 		scsipi_done(xs);
533 		return;
534 	}
535 
536 	xs->status = mpt_reply->SCSIStatus;
537 	switch (mpt_reply->IOCStatus) {
538 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
539 		xs->error = XS_DRIVER_STUFFUP;
540 		break;
541 
542 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
543 		/*
544 		 * Yikes!  Tagged queue full comes through this path!
545 		 *
546 		 * So we'll change it to a status error and anything
547 		 * that returns status should probably be a status
548 		 * error as well.
549 		 */
550 		xs->resid = xs->datalen - mpt_reply->TransferCount;
551 		if (mpt_reply->SCSIState &
552 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
553 			xs->error = XS_DRIVER_STUFFUP;
554 			break;
555 		}
556 		/* FALLTHROUGH */
557 	case MPI_IOCSTATUS_SUCCESS:
558 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
559 		switch (xs->status) {
560 		case SCSI_OK:
561 			/* Report the xfer mode, if necessary. */
562 			if ((mpt->mpt_report_xfer_mode &
563 			     (1 << periph->periph_target)) != 0)
564 				mpt_get_xfer_mode(mpt, periph);
565 			xs->resid = 0;
566 			break;
567 
568 		case SCSI_CHECK:
569 			xs->error = XS_SENSE;
570 			break;
571 
572 		case SCSI_BUSY:
573 		case SCSI_QUEUE_FULL:
574 			xs->error = XS_BUSY;
575 			break;
576 
577 		default:
578 			scsipi_printaddr(periph);
579 			printf("invalid status code %d\n", xs->status);
580 			xs->error = XS_DRIVER_STUFFUP;
581 			break;
582 		}
583 		break;
584 
585 	case MPI_IOCSTATUS_BUSY:
586 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
587 		xs->error = XS_RESOURCE_SHORTAGE;
588 		break;
589 
590 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
591 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
592 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
593 		xs->error = XS_SELTIMEOUT;
594 		break;
595 
596 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
597 		xs->error = XS_DRIVER_STUFFUP;
598 		break;
599 
600 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
601 		/* XXX What should we do here? */
602 		break;
603 
604 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
605 		/* XXX */
606 		xs->error = XS_DRIVER_STUFFUP;
607 		break;
608 
609 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
610 		/* XXX */
611 		xs->error = XS_DRIVER_STUFFUP;
612 		break;
613 
614 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
615 		/* XXX This is a bus-reset */
616 		xs->error = XS_DRIVER_STUFFUP;
617 		break;
618 
619 	default:
620 		/* XXX unrecognized HBA error */
621 		xs->error = XS_DRIVER_STUFFUP;
622 		break;
623 	}
624 
625 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
626 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
627 		    sizeof(xs->sense.scsi_sense));
628 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
629 		/*
630 		 * This will cause the scsipi layer to issue
631 		 * a REQUEST SENSE.
632 		 */
633 		if (xs->status == SCSI_CHECK)
634 			xs->error = XS_BUSY;
635 	}
636 
637  done:
638 	/* If IOC done with this requeset, free it up. */
639 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
640 		mpt_free_request(mpt, req);
641 
642 	/* If address reply, give the buffer back to the IOC. */
643 	if (mpt_reply != NULL)
644 		mpt_free_reply(mpt, (reply << 1));
645 
646 	if (xs != NULL)
647 		scsipi_done(xs);
648 }
649 
650 static void
651 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
652 {
653 	struct scsipi_periph *periph = xs->xs_periph;
654 	request_t *req;
655 	MSG_SCSI_IO_REQUEST *mpt_req;
656 	int error, s;
657 
658 	s = splbio();
659 	req = mpt_get_request(mpt);
660 	if (__predict_false(req == NULL)) {
661 		/* This should happen very infrequently. */
662 		xs->error = XS_RESOURCE_SHORTAGE;
663 		scsipi_done(xs);
664 		splx(s);
665 		return;
666 	}
667 	splx(s);
668 
669 	/* Link the req and the scsipi_xfer. */
670 	req->xfer = xs;
671 
672 	/* Now we build the command for the IOC */
673 	mpt_req = req->req_vbuf;
674 	memset(mpt_req, 0, sizeof(*mpt_req));
675 
676 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
677 	mpt_req->Bus = mpt->bus;
678 
679 	mpt_req->SenseBufferLength =
680 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
681 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
682 
683 	/*
684 	 * We use the message context to find the request structure when
685 	 * we get the command completion interrupt from the IOC.
686 	 */
687 	mpt_req->MsgContext = req->index;
688 
689 	/* Which physical device to do the I/O on. */
690 	mpt_req->TargetID = periph->periph_target;
691 	mpt_req->LUN[1] = periph->periph_lun;
692 
693 	/* Set the direction of the transfer. */
694 	if (xs->xs_control & XS_CTL_DATA_IN)
695 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
696 	else if (xs->xs_control & XS_CTL_DATA_OUT)
697 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
698 	else
699 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
700 
701 	/* Set the queue behavior. */
702 	if (__predict_true(mpt->is_fc ||
703 			   (mpt->mpt_tag_enable &
704 			    (1 << periph->periph_target)))) {
705 		switch (XS_CTL_TAGTYPE(xs)) {
706 		case XS_CTL_HEAD_TAG:
707 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
708 			break;
709 
710 #if 0	/* XXX */
711 		case XS_CTL_ACA_TAG:
712 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
713 			break;
714 #endif
715 
716 		case XS_CTL_ORDERED_TAG:
717 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
718 			break;
719 
720 		case XS_CTL_SIMPLE_TAG:
721 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
722 			break;
723 
724 		default:
725 			if (mpt->is_fc)
726 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
727 			else
728 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
729 			break;
730 		}
731 	} else
732 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
733 
734 	if (__predict_false(mpt->is_fc == 0 &&
735 			    (mpt->mpt_disc_enable &
736 			     (1 << periph->periph_target)) == 0))
737 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
738 
739 	/* Copy the SCSI command block into place. */
740 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
741 
742 	mpt_req->CDBLength = xs->cmdlen;
743 	mpt_req->DataLength = xs->datalen;
744 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
745 
746 	/*
747 	 * Map the DMA transfer.
748 	 */
749 	if (xs->datalen) {
750 		SGE_SIMPLE32 *se;
751 
752 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
753 		    xs->datalen, NULL,
754 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
755 						       : BUS_DMA_WAITOK) |
756 		    BUS_DMA_STREAMING |
757 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
758 						       : BUS_DMA_WRITE));
759 		switch (error) {
760 		case 0:
761 			break;
762 
763 		case ENOMEM:
764 		case EAGAIN:
765 			xs->error = XS_RESOURCE_SHORTAGE;
766 			goto out_bad;
767 
768 		default:
769 			xs->error = XS_DRIVER_STUFFUP;
770 			mpt_prt(mpt, "error %d loading DMA map", error);
771  out_bad:
772 			s = splbio();
773 			mpt_free_request(mpt, req);
774 			scsipi_done(xs);
775 			splx(s);
776 			return;
777 		}
778 
779 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
780 			int seg, i, nleft = req->dmap->dm_nsegs;
781 			uint32_t flags;
782 			SGE_CHAIN32 *ce;
783 
784 			seg = 0;
785 
786 			mpt_req->DataLength = xs->datalen;
787 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
788 			if (xs->xs_control & XS_CTL_DATA_OUT)
789 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
790 
791 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
792 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
793 			     i++, se++, seg++) {
794 				uint32_t tf;
795 
796 				memset(se, 0, sizeof(*se));
797 				se->Address = req->dmap->dm_segs[seg].ds_addr;
798 				MPI_pSGE_SET_LENGTH(se,
799 				    req->dmap->dm_segs[seg].ds_len);
800 				tf = flags;
801 				if (i == MPT_NSGL_FIRST(mpt) - 2)
802 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
803 				MPI_pSGE_SET_FLAGS(se, tf);
804 				nleft--;
805 			}
806 
807 			/*
808 			 * Tell the IOC where to find the first chain element.
809 			 */
810 			mpt_req->ChainOffset =
811 			    ((char *)se - (char *)mpt_req) >> 2;
812 
813 			/*
814 			 * Until we're finished with all segments...
815 			 */
816 			while (nleft) {
817 				int ntodo;
818 
819 				/*
820 				 * Construct the chain element that points to
821 				 * the next segment.
822 				 */
823 				ce = (SGE_CHAIN32 *) se++;
824 				if (nleft > MPT_NSGL(mpt)) {
825 					ntodo = MPT_NSGL(mpt) - 1;
826 					ce->NextChainOffset = (MPT_RQSL(mpt) -
827 					    sizeof(SGE_SIMPLE32)) >> 2;
828 				} else {
829 					ntodo = nleft;
830 					ce->NextChainOffset = 0;
831 				}
832 				ce->Length = ntodo * sizeof(SGE_SIMPLE32);
833 				ce->Address = req->req_pbuf +
834 				    ((char *)se - (char *)mpt_req);
835 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
836 				for (i = 0; i < ntodo; i++, se++, seg++) {
837 					uint32_t tf;
838 
839 					memset(se, 0, sizeof(*se));
840 					se->Address =
841 					    req->dmap->dm_segs[seg].ds_addr;
842 					MPI_pSGE_SET_LENGTH(se,
843 					    req->dmap->dm_segs[seg].ds_len);
844 					tf = flags;
845 					if (i == ntodo - 1) {
846 						tf |=
847 						    MPI_SGE_FLAGS_LAST_ELEMENT;
848 						if (ce->NextChainOffset == 0) {
849 							tf |=
850 						    MPI_SGE_FLAGS_END_OF_LIST |
851 						    MPI_SGE_FLAGS_END_OF_BUFFER;
852 						}
853 					}
854 					MPI_pSGE_SET_FLAGS(se, tf);
855 					nleft--;
856 				}
857 			}
858 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
859 			    req->dmap->dm_mapsize,
860 			    (xs->xs_control & XS_CTL_DATA_IN) ?
861 			    				BUS_DMASYNC_PREREAD
862 						      : BUS_DMASYNC_PREWRITE);
863 		} else {
864 			int i;
865 			uint32_t flags;
866 
867 			mpt_req->DataLength = xs->datalen;
868 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
869 			if (xs->xs_control & XS_CTL_DATA_OUT)
870 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
871 
872 			/* Copy the segments into our SG list. */
873 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
874 			for (i = 0; i < req->dmap->dm_nsegs;
875 			     i++, se++) {
876 				uint32_t tf;
877 
878 				memset(se, 0, sizeof(*se));
879 				se->Address = req->dmap->dm_segs[i].ds_addr;
880 				MPI_pSGE_SET_LENGTH(se,
881 				    req->dmap->dm_segs[i].ds_len);
882 				tf = flags;
883 				if (i == req->dmap->dm_nsegs - 1) {
884 					tf |=
885 					    MPI_SGE_FLAGS_LAST_ELEMENT |
886 					    MPI_SGE_FLAGS_END_OF_BUFFER |
887 					    MPI_SGE_FLAGS_END_OF_LIST;
888 				}
889 				MPI_pSGE_SET_FLAGS(se, tf);
890 			}
891 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
892 			    req->dmap->dm_mapsize,
893 			    (xs->xs_control & XS_CTL_DATA_IN) ?
894 			    				BUS_DMASYNC_PREREAD
895 						      : BUS_DMASYNC_PREWRITE);
896 		}
897 	} else {
898 		/*
899 		 * No data to transfer; just make a single simple SGL
900 		 * with zero length.
901 		 */
902 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
903 		memset(se, 0, sizeof(*se));
904 		MPI_pSGE_SET_FLAGS(se,
905 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
906 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
907 	}
908 
909 	if (mpt->verbose > 1)
910 		mpt_print_scsi_io_request(mpt_req);
911 
912 	s = splbio();
913 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
914 		callout_reset(&xs->xs_callout,
915 		    mstohz(xs->timeout), mpt_timeout, req);
916 	mpt_send_cmd(mpt, req);
917 	splx(s);
918 
919 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
920 		return;
921 
922 	/*
923 	 * If we can't use interrupts, poll on completion.
924 	 */
925 	if (mpt_poll(mpt, xs, xs->timeout))
926 		mpt_timeout(req);
927 }
928 
929 static void
930 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
931 {
932 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
933 
934 	if (mpt->is_fc) {
935 		/*
936 		 * SCSI transport settings don't make any sense for
937 		 * Fibre Channel; silently ignore the request.
938 		 */
939 		return;
940 	}
941 
942 	/*
943 	 * Always allow disconnect; we don't have a way to disable
944 	 * it right now, in any case.
945 	 */
946 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
947 
948 	if (xm->xm_mode & PERIPH_CAP_TQING)
949 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
950 	else
951 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
952 
953 	tmp = mpt->mpt_dev_page1[xm->xm_target];
954 
955 	/*
956 	 * Set the wide/narrow parameter for the target.
957 	 */
958 	if (xm->xm_mode & PERIPH_CAP_WIDE16)
959 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
960 	else
961 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
962 
963 	/*
964 	 * Set the synchronous parameters for the target.
965 	 *
966 	 * XXX If we request sync transfers, we just go ahead and
967 	 * XXX request the maximum available.  We need finer control
968 	 * XXX in order to implement Domain Validation.
969 	 */
970 	tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
971 	    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
972 	    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
973 	    MPI_SCSIDEVPAGE1_RP_IU);
974 	if (xm->xm_mode & PERIPH_CAP_SYNC) {
975 		int factor, offset, np;
976 
977 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
978 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
979 		np = 0;
980 		if (factor < 0x9) {
981 			/* Ultra320 */
982 			np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
983 		}
984 		if (factor < 0xa) {
985 			/* at least Ultra160 */
986 			np |= MPI_SCSIDEVPAGE1_RP_DT;
987 		}
988 		np |= (factor << 8) | (offset << 16);
989 		tmp.RequestedParameters |= np;
990 	}
991 
992 	if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
993 		mpt_prt(mpt, "unable to write Device Page 1");
994 		return;
995 	}
996 
997 	if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
998 		mpt_prt(mpt, "unable to read back Device Page 1");
999 		return;
1000 	}
1001 
1002 	mpt->mpt_dev_page1[xm->xm_target] = tmp;
1003 	if (mpt->verbose > 1) {
1004 		mpt_prt(mpt,
1005 		    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1006 		    xm->xm_target,
1007 		    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1008 		    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1009 	}
1010 
1011 	/*
1012 	 * Make a note that we should perform an async callback at the
1013 	 * end of the next successful command completion to report the
1014 	 * negotiated transfer mode.
1015 	 */
1016 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1017 }
1018 
1019 static void
1020 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1021 {
1022 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1023 	struct scsipi_xfer_mode xm;
1024 	int period, offset;
1025 
1026 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1027 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1028 		mpt_prt(mpt, "unable to read Device Page 0");
1029 		return;
1030 	}
1031 
1032 	if (mpt->verbose > 1) {
1033 		mpt_prt(mpt,
1034 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1035 		    periph->periph_target,
1036 		    tmp.NegotiatedParameters, tmp.Information);
1037 	}
1038 
1039 	xm.xm_target = periph->periph_target;
1040 	xm.xm_mode = 0;
1041 
1042 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1043 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1044 
1045 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1046 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1047 	if (offset) {
1048 		xm.xm_period = period;
1049 		xm.xm_offset = offset;
1050 		xm.xm_mode |= PERIPH_CAP_SYNC;
1051 	}
1052 
1053 	/*
1054 	 * Tagged queueing is all controlled by us; there is no
1055 	 * other setting to query.
1056 	 */
1057 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1058 		xm.xm_mode |= PERIPH_CAP_TQING;
1059 
1060 	/*
1061 	 * We're going to deliver the async event, so clear the marker.
1062 	 */
1063 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1064 
1065 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1066 }
1067 
1068 static void
1069 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1070 {
1071 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1072 
1073 	switch (dmsg->Function) {
1074 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1075 		mpt_event_notify_reply(mpt, vmsg);
1076 		mpt_free_reply(mpt, (reply << 1));
1077 		break;
1078 
1079 	case MPI_FUNCTION_EVENT_ACK:
1080 		mpt_free_reply(mpt, (reply << 1));
1081 		break;
1082 
1083 	case MPI_FUNCTION_PORT_ENABLE:
1084 	    {
1085 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1086 		int index = msg->MsgContext & ~0x80000000;
1087 		if (mpt->verbose > 1)
1088 			mpt_prt(mpt, "enable port reply index %d", index);
1089 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1090 			request_t *req = &mpt->request_pool[index];
1091 			req->debug = REQ_DONE;
1092 		}
1093 		mpt_free_reply(mpt, (reply << 1));
1094 		break;
1095 	    }
1096 
1097 	case MPI_FUNCTION_CONFIG:
1098 	    {
1099 		MSG_CONFIG_REPLY *msg = vmsg;
1100 		int index = msg->MsgContext & ~0x80000000;
1101 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1102 			request_t *req = &mpt->request_pool[index];
1103 			req->debug = REQ_DONE;
1104 			req->sequence = reply;
1105 		} else
1106 			mpt_free_reply(mpt, (reply << 1));
1107 		break;
1108 	    }
1109 
1110 	default:
1111 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1112 	}
1113 }
1114 
1115 static void
1116 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1117 {
1118 
1119 	switch (msg->Event) {
1120 	case MPI_EVENT_LOG_DATA:
1121 	    {
1122 		int i;
1123 
1124 		/* Some error occurrerd that the Fusion wants logged. */
1125 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1126 		mpt_prt(mpt, "EvtLogData: Event Data:");
1127 		for (i = 0; i < msg->EventDataLength; i++) {
1128 			if ((i % 4) == 0)
1129 				printf("%s:\t", mpt->sc_dev.dv_xname);
1130 			printf("0x%08x%c", msg->Data[i],
1131 			    ((i % 4) == 3) ? '\n' : ' ');
1132 		}
1133 		if ((i % 4) != 0)
1134 			printf("\n");
1135 		break;
1136 	    }
1137 
1138 	case MPI_EVENT_UNIT_ATTENTION:
1139 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1140 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1141 		break;
1142 
1143 	case MPI_EVENT_IOC_BUS_RESET:
1144 		/* We generated a bus reset. */
1145 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1146 		    (msg->Data[0] >> 8) & 0xff);
1147 		break;
1148 
1149 	case MPI_EVENT_EXT_BUS_RESET:
1150 		/* Someone else generated a bus reset. */
1151 		mpt_prt(mpt, "External Bus Reset");
1152 		/*
1153 		 * These replies don't return EventData like the MPI
1154 		 * spec says they do.
1155 		 */
1156 		/* XXX Send an async event? */
1157 		break;
1158 
1159 	case MPI_EVENT_RESCAN:
1160 		/*
1161 		 * In general, thise means a device has been added
1162 		 * to the loop.
1163 		 */
1164 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1165 		/* XXX Send an async event? */
1166 		break;
1167 
1168 	case MPI_EVENT_LINK_STATUS_CHANGE:
1169 		mpt_prt(mpt, "Port %d: Link state %s",
1170 		    (msg->Data[1] >> 8) & 0xff,
1171 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1172 		break;
1173 
1174 	case MPI_EVENT_LOOP_STATE_CHANGE:
1175 		switch ((msg->Data[0] >> 16) & 0xff) {
1176 		case 0x01:
1177 			mpt_prt(mpt,
1178 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1179 			    "(Loop Initialization)",
1180 			    (msg->Data[1] >> 8) & 0xff,
1181 			    (msg->Data[0] >> 8) & 0xff,
1182 			    (msg->Data[0]     ) & 0xff);
1183 			switch ((msg->Data[0] >> 8) & 0xff) {
1184 			case 0xf7:
1185 				if ((msg->Data[0] & 0xff) == 0xf7)
1186 					mpt_prt(mpt, "\tDevice needs AL_PA");
1187 				else
1188 					mpt_prt(mpt, "\tDevice %02x doesn't "
1189 					    "like FC performance",
1190 					    msg->Data[0] & 0xff);
1191 				break;
1192 
1193 			case 0xf8:
1194 				if ((msg->Data[0] & 0xff) == 0xf7)
1195 					mpt_prt(mpt, "\tDevice detected loop "
1196 					    "failure before acquiring AL_PA");
1197 				else
1198 					mpt_prt(mpt, "\tDevice %02x detected "
1199 					    "loop failure",
1200 					    msg->Data[0] & 0xff);
1201 				break;
1202 
1203 			default:
1204 				mpt_prt(mpt, "\tDevice %02x requests that "
1205 				    "device %02x reset itself",
1206 				    msg->Data[0] & 0xff,
1207 				    (msg->Data[0] >> 8) & 0xff);
1208 				break;
1209 			}
1210 			break;
1211 
1212 		case 0x02:
1213 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1214 			    "(Loop Port Enable)",
1215 			    (msg->Data[1] >> 8) & 0xff,
1216 			    (msg->Data[0] >> 8) & 0xff,
1217 			    (msg->Data[0]     ) & 0xff);
1218 			break;
1219 
1220 		case 0x03:
1221 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1222 			    "(Loop Port Bypass)",
1223 			    (msg->Data[1] >> 8) & 0xff,
1224 			    (msg->Data[0] >> 8) & 0xff,
1225 			    (msg->Data[0]     ) & 0xff);
1226 			break;
1227 
1228 		default:
1229 			mpt_prt(mpt, "Port %d: FC Link Event: "
1230 			    "Unknown event (%02x %02x %02x)",
1231 			    (msg->Data[1] >>  8) & 0xff,
1232 			    (msg->Data[0] >> 16) & 0xff,
1233 			    (msg->Data[0] >>  8) & 0xff,
1234 			    (msg->Data[0]      ) & 0xff);
1235 			break;
1236 		}
1237 		break;
1238 
1239 	case MPI_EVENT_LOGOUT:
1240 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1241 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1242 		break;
1243 
1244 	case MPI_EVENT_EVENT_CHANGE:
1245 		/*
1246 		 * This is just an acknowledgement of our
1247 		 * mpt_send_event_request().
1248 		 */
1249 		break;
1250 
1251 	default:
1252 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1253 		break;
1254 	}
1255 
1256 	if (msg->AckRequired) {
1257 		MSG_EVENT_ACK *ackp;
1258 		request_t *req;
1259 
1260 		if ((req = mpt_get_request(mpt)) == NULL) {
1261 			/* XXX XXX XXX XXXJRT */
1262 			panic("mpt_event_notify_reply: unable to allocate "
1263 			    "request structure");
1264 		}
1265 
1266 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1267 		memset(ackp, 0, sizeof(*ackp));
1268 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1269 		ackp->Event = msg->Event;
1270 		ackp->EventContext = msg->EventContext;
1271 		ackp->MsgContext = req->index | 0x80000000;
1272 		mpt_check_doorbell(mpt);
1273 		mpt_send_cmd(mpt, req);
1274 	}
1275 }
1276 
1277 /* XXXJRT mpt_bus_reset() */
1278 
1279 /*****************************************************************************
1280  * SCSI interface routines
1281  *****************************************************************************/
1282 
1283 static void
1284 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1285     void *arg)
1286 {
1287 	struct scsipi_adapter *adapt = chan->chan_adapter;
1288 	mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1289 
1290 	switch (req) {
1291 	case ADAPTER_REQ_RUN_XFER:
1292 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1293 		return;
1294 
1295 	case ADAPTER_REQ_GROW_RESOURCES:
1296 		/* Not supported. */
1297 		return;
1298 
1299 	case ADAPTER_REQ_SET_XFER_MODE:
1300 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1301 		return;
1302 	}
1303 }
1304 
1305 static void
1306 mpt_minphys(struct buf *bp)
1307 {
1308 
1309 /*
1310  * Subtract one from the SGL limit, since we need an extra one to handle
1311  * an non-page-aligned transfer.
1312  */
1313 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1314 
1315 	if (bp->b_bcount > MPT_MAX_XFER)
1316 		bp->b_bcount = MPT_MAX_XFER;
1317 	minphys(bp);
1318 }
1319