xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision 9ddb6ab554e70fb9bbd90c3d96b812bc57755a14)
1 /*	$NetBSD: mpt_netbsd.c,v 1.17 2012/01/30 17:45:14 mhitch Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  *
76  * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.17 2012/01/30 17:45:14 mhitch Exp $");
81 
82 #include <dev/ic/mpt.h>			/* pulls in all headers */
83 
84 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
85 static void	mpt_timeout(void *);
86 static void	mpt_done(mpt_softc_t *, uint32_t);
87 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
88 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
89 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
90 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
91 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
92 
93 static void	mpt_scsipi_request(struct scsipi_channel *,
94 		    scsipi_adapter_req_t, void *);
95 static void	mpt_minphys(struct buf *);
96 
97 void
98 mpt_scsipi_attach(mpt_softc_t *mpt)
99 {
100 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
101 	struct scsipi_channel *chan = &mpt->sc_channel;
102 	int maxq;
103 
104 	mpt->bus = 0;		/* XXX ?? */
105 
106 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
107 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
108 
109 	/* Fill in the scsipi_adapter. */
110 	memset(adapt, 0, sizeof(*adapt));
111 	adapt->adapt_dev = &mpt->sc_dev;
112 	adapt->adapt_nchannels = 1;
113 	adapt->adapt_openings = maxq - 2;	/* Reserve 2 for driver use*/
114 	adapt->adapt_max_periph = maxq - 2;
115 	adapt->adapt_request = mpt_scsipi_request;
116 	adapt->adapt_minphys = mpt_minphys;
117 
118 	/* Fill in the scsipi_channel. */
119 	memset(chan, 0, sizeof(*chan));
120 	chan->chan_adapter = adapt;
121 	chan->chan_bustype = &scsi_bustype;
122 	chan->chan_channel = 0;
123 	chan->chan_flags = 0;
124 	chan->chan_nluns = 8;
125 	chan->chan_ntargets = mpt->mpt_max_devices;
126 	chan->chan_id = mpt->mpt_ini_id;
127 
128 	(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
129 }
130 
131 int
132 mpt_dma_mem_alloc(mpt_softc_t *mpt)
133 {
134 	bus_dma_segment_t reply_seg, request_seg;
135 	int reply_rseg, request_rseg;
136 	bus_addr_t pptr, end;
137 	char *vptr;
138 	size_t len;
139 	int error, i;
140 
141 	/* Check if we have already allocated the reply memory. */
142 	if (mpt->reply != NULL)
143 		return (0);
144 
145 	/*
146 	 * Allocate the request pool.  This isn't really DMA'd memory,
147 	 * but it's a convenient place to do it.
148 	 */
149 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
150 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
151 	if (mpt->request_pool == NULL) {
152 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
153 		return (ENOMEM);
154 	}
155 
156 	/*
157 	 * Allocate DMA resources for reply buffers.
158 	 */
159 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
160 	    &reply_seg, 1, &reply_rseg, 0);
161 	if (error) {
162 		aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
163 		    error);
164 		goto fail_0;
165 	}
166 
167 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
168 	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
169 	if (error) {
170 		aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
171 		    error);
172 		goto fail_1;
173 	}
174 
175 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
176 	    0, 0, &mpt->reply_dmap);
177 	if (error) {
178 		aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
179 		    error);
180 		goto fail_2;
181 	}
182 
183 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
184 	    PAGE_SIZE, NULL, 0);
185 	if (error) {
186 		aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
187 		    error);
188 		goto fail_3;
189 	}
190 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
191 
192 	/*
193 	 * Allocate DMA resources for request buffers.
194 	 */
195 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
196 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
197 	if (error) {
198 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
199 		    "error = %d\n", error);
200 		goto fail_4;
201 	}
202 
203 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
204 	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
205 	if (error) {
206 		aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
207 		    error);
208 		goto fail_5;
209 	}
210 
211 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
212 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
213 	if (error) {
214 		aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
215 		    "error = %d\n", error);
216 		goto fail_6;
217 	}
218 
219 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
220 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
221 	if (error) {
222 		aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
223 		    error);
224 		goto fail_7;
225 	}
226 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
227 
228 	pptr = mpt->request_phys;
229 	vptr = (void *) mpt->request;
230 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
231 
232 	for (i = 0; pptr < end; i++) {
233 		request_t *req = &mpt->request_pool[i];
234 		req->index = i;
235 
236 		/* Store location of Request Data */
237 		req->req_pbuf = pptr;
238 		req->req_vbuf = vptr;
239 
240 		pptr += MPT_REQUEST_AREA;
241 		vptr += MPT_REQUEST_AREA;
242 
243 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
244 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
245 
246 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
247 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
248 		if (error) {
249 			aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
250 			    "error = %d\n", i, error);
251 			goto fail_8;
252 		}
253 	}
254 
255 	return (0);
256 
257  fail_8:
258 	for (--i; i >= 0; i--) {
259 		request_t *req = &mpt->request_pool[i];
260 		if (req->dmap != NULL)
261 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
262 	}
263 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
264  fail_7:
265 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
266  fail_6:
267 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
268  fail_5:
269 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
270  fail_4:
271 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
272  fail_3:
273 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
274  fail_2:
275 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
276  fail_1:
277 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
278  fail_0:
279 	free(mpt->request_pool, M_DEVBUF);
280 
281 	mpt->reply = NULL;
282 	mpt->request = NULL;
283 	mpt->request_pool = NULL;
284 
285 	return (error);
286 }
287 
288 int
289 mpt_intr(void *arg)
290 {
291 	mpt_softc_t *mpt = arg;
292 	int nrepl = 0;
293 	uint32_t reply;
294 
295 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
296 		return (0);
297 
298 	reply = mpt_pop_reply_queue(mpt);
299 	while (reply != MPT_REPLY_EMPTY) {
300 		nrepl++;
301 		if (mpt->verbose > 1) {
302 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
303 				/* Address reply; IOC has something to say */
304 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
305 			} else {
306 				/* Context reply; all went well */
307 				mpt_prt(mpt, "context %u reply OK", reply);
308 			}
309 		}
310 		mpt_done(mpt, reply);
311 		reply = mpt_pop_reply_queue(mpt);
312 	}
313 	return (nrepl != 0);
314 }
315 
316 void
317 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
318 {
319 	va_list ap;
320 
321 	printf("%s: ", device_xname(&mpt->sc_dev));
322 	va_start(ap, fmt);
323 	vprintf(fmt, ap);
324 	va_end(ap);
325 	printf("\n");
326 }
327 
328 static int
329 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
330 {
331 
332 	/* Timeouts are in msec, so we loop in 1000usec cycles */
333 	while (count) {
334 		mpt_intr(mpt);
335 		if (xs->xs_status & XS_STS_DONE)
336 			return (0);
337 		delay(1000);		/* only happens in boot, so ok */
338 		count--;
339 	}
340 	return (1);
341 }
342 
343 static void
344 mpt_timeout(void *arg)
345 {
346 	request_t *req = arg;
347 	struct scsipi_xfer *xs = req->xfer;
348 	struct scsipi_periph *periph = xs->xs_periph;
349 	mpt_softc_t *mpt =
350 	    (void *) periph->periph_channel->chan_adapter->adapt_dev;
351 	uint32_t oseq;
352 	int s;
353 
354 	scsipi_printaddr(periph);
355 	printf("command timeout\n");
356 
357 	s = splbio();
358 
359 	oseq = req->sequence;
360 	mpt->timeouts++;
361 	if (mpt_intr(mpt)) {
362 		if (req->sequence != oseq) {
363 			mpt_prt(mpt, "recovered from command timeout");
364 			splx(s);
365 			return;
366 		}
367 	}
368 	mpt_prt(mpt,
369 	    "timeout on request index = 0x%x, seq = 0x%08x",
370 	    req->index, req->sequence);
371 	mpt_check_doorbell(mpt);
372 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
373 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
374 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
375 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
376 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
377 	if (mpt->verbose > 1)
378 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
379 
380 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
381 	req->xfer = NULL;
382 	mpt_free_request(mpt, req);
383 
384 	xs->error = XS_TIMEOUT;
385 	scsipi_done(xs);
386 
387 	splx(s);
388 }
389 
390 static void
391 mpt_done(mpt_softc_t *mpt, uint32_t reply)
392 {
393 	struct scsipi_xfer *xs = NULL;
394 	struct scsipi_periph *periph;
395 	int index;
396 	request_t *req;
397 	MSG_REQUEST_HEADER *mpt_req;
398 	MSG_SCSI_IO_REPLY *mpt_reply;
399 
400 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
401 		/* context reply (ok) */
402 		mpt_reply = NULL;
403 		index = reply & MPT_CONTEXT_MASK;
404 	} else {
405 		/* address reply (error) */
406 
407 		/* XXX BUS_DMASYNC_POSTREAD XXX */
408 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
409 		if (mpt->verbose > 1) {
410 			uint32_t *pReply = (uint32_t *) mpt_reply;
411 
412 			mpt_prt(mpt, "Address Reply (index %u):",
413 			    le32toh(mpt_reply->MsgContext) & 0xffff);
414 			mpt_prt(mpt, "%08x %08x %08x %08x",
415 			    pReply[0], pReply[1], pReply[2], pReply[3]);
416 			mpt_prt(mpt, "%08x %08x %08x %08x",
417 			    pReply[4], pReply[5], pReply[6], pReply[7]);
418 			mpt_prt(mpt, "%08x %08x %08x %08x",
419 			    pReply[8], pReply[9], pReply[10], pReply[11]);
420 		}
421 		index = le32toh(mpt_reply->MsgContext);
422 	}
423 
424 	/*
425 	 * Address reply with MessageContext high bit set.
426 	 * This is most likely a notify message, so we try
427 	 * to process it, then free it.
428 	 */
429 	if (__predict_false((index & 0x80000000) != 0)) {
430 		if (mpt_reply != NULL)
431 			mpt_ctlop(mpt, mpt_reply, reply);
432 		else
433 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
434 		return;
435 	}
436 
437 	/* Did we end up with a valid index into the table? */
438 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
439 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
440 		return;
441 	}
442 
443 	req = &mpt->request_pool[index];
444 
445 	/* Make sure memory hasn't been trashed. */
446 	if (__predict_false(req->index != index)) {
447 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
448 		return;
449 	}
450 
451 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
452 	mpt_req = req->req_vbuf;
453 
454 	/* Short cut for task management replies; nothing more for us to do. */
455 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
456 		if (mpt->verbose > 1)
457 			mpt_prt(mpt, "mpt_done: TASK MGMT");
458 		goto done;
459 	}
460 
461 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
462 		goto done;
463 
464 	/*
465 	 * At this point, it had better be a SCSI I/O command, but don't
466 	 * crash if it isn't.
467 	 */
468 	if (__predict_false(mpt_req->Function !=
469 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
470 		if (mpt->verbose > 1)
471 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
472 			    mpt_req->Function, index);
473 		goto done;
474 	}
475 
476 	/* Recover scsipi_xfer from the request structure. */
477 	xs = req->xfer;
478 
479 	/* Can't have a SCSI command without a scsipi_xfer. */
480 	if (__predict_false(xs == NULL)) {
481 		mpt_prt(mpt,
482 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
483 		    req->index, req->sequence);
484 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
485 		mpt_prt(mpt, "mpt_request:");
486 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
487 
488 		if (mpt_reply != NULL) {
489 			mpt_prt(mpt, "mpt_reply:");
490 			mpt_print_reply(mpt_reply);
491 		} else {
492 			mpt_prt(mpt, "context reply: 0x%08x", reply);
493 		}
494 		goto done;
495 	}
496 
497 	callout_stop(&xs->xs_callout);
498 
499 	periph = xs->xs_periph;
500 
501 	/*
502 	 * If we were a data transfer, unload the map that described
503 	 * the data buffer.
504 	 */
505 	if (__predict_true(xs->datalen != 0)) {
506 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
507 		    req->dmap->dm_mapsize,
508 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
509 						      : BUS_DMASYNC_POSTWRITE);
510 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
511 	}
512 
513 	if (__predict_true(mpt_reply == NULL)) {
514 		/*
515 		 * Context reply; report that the command was
516 		 * successful!
517 		 *
518 		 * Also report the xfer mode, if necessary.
519 		 */
520 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
521 			if ((mpt->mpt_report_xfer_mode &
522 			     (1 << periph->periph_target)) != 0)
523 				mpt_get_xfer_mode(mpt, periph);
524 		}
525 		xs->error = XS_NOERROR;
526 		xs->status = SCSI_OK;
527 		xs->resid = 0;
528 		mpt_free_request(mpt, req);
529 		scsipi_done(xs);
530 		return;
531 	}
532 
533 	xs->status = mpt_reply->SCSIStatus;
534 	switch (le16toh(mpt_reply->IOCStatus)) {
535 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
536 		xs->error = XS_DRIVER_STUFFUP;
537 		break;
538 
539 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
540 		/*
541 		 * Yikes!  Tagged queue full comes through this path!
542 		 *
543 		 * So we'll change it to a status error and anything
544 		 * that returns status should probably be a status
545 		 * error as well.
546 		 */
547 		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
548 		if (mpt_reply->SCSIState &
549 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
550 			xs->error = XS_DRIVER_STUFFUP;
551 			break;
552 		}
553 		/* FALLTHROUGH */
554 	case MPI_IOCSTATUS_SUCCESS:
555 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
556 		switch (xs->status) {
557 		case SCSI_OK:
558 			/* Report the xfer mode, if necessary. */
559 			if ((mpt->mpt_report_xfer_mode &
560 			     (1 << periph->periph_target)) != 0)
561 				mpt_get_xfer_mode(mpt, periph);
562 			xs->resid = 0;
563 			break;
564 
565 		case SCSI_CHECK:
566 			xs->error = XS_SENSE;
567 			break;
568 
569 		case SCSI_BUSY:
570 		case SCSI_QUEUE_FULL:
571 			xs->error = XS_BUSY;
572 			break;
573 
574 		default:
575 			scsipi_printaddr(periph);
576 			printf("invalid status code %d\n", xs->status);
577 			xs->error = XS_DRIVER_STUFFUP;
578 			break;
579 		}
580 		break;
581 
582 	case MPI_IOCSTATUS_BUSY:
583 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
584 		xs->error = XS_RESOURCE_SHORTAGE;
585 		break;
586 
587 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
588 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
589 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
590 		xs->error = XS_SELTIMEOUT;
591 		break;
592 
593 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
594 		xs->error = XS_DRIVER_STUFFUP;
595 		break;
596 
597 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
598 		/* XXX What should we do here? */
599 		break;
600 
601 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
602 		/* XXX */
603 		xs->error = XS_DRIVER_STUFFUP;
604 		break;
605 
606 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
607 		/* XXX */
608 		xs->error = XS_DRIVER_STUFFUP;
609 		break;
610 
611 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
612 		/* XXX This is a bus-reset */
613 		xs->error = XS_DRIVER_STUFFUP;
614 		break;
615 
616 	default:
617 		/* XXX unrecognized HBA error */
618 		xs->error = XS_DRIVER_STUFFUP;
619 		break;
620 	}
621 
622 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
623 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
624 		    sizeof(xs->sense.scsi_sense));
625 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
626 		/*
627 		 * This will cause the scsipi layer to issue
628 		 * a REQUEST SENSE.
629 		 */
630 		if (xs->status == SCSI_CHECK)
631 			xs->error = XS_BUSY;
632 	}
633 
634  done:
635 	/* If IOC done with this requeset, free it up. */
636 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
637 		mpt_free_request(mpt, req);
638 
639 	/* If address reply, give the buffer back to the IOC. */
640 	if (mpt_reply != NULL)
641 		mpt_free_reply(mpt, (reply << 1));
642 
643 	if (xs != NULL)
644 		scsipi_done(xs);
645 }
646 
647 static void
648 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
649 {
650 	struct scsipi_periph *periph = xs->xs_periph;
651 	request_t *req;
652 	MSG_SCSI_IO_REQUEST *mpt_req;
653 	int error, s;
654 
655 	s = splbio();
656 	req = mpt_get_request(mpt);
657 	if (__predict_false(req == NULL)) {
658 		/* This should happen very infrequently. */
659 		xs->error = XS_RESOURCE_SHORTAGE;
660 		scsipi_done(xs);
661 		splx(s);
662 		return;
663 	}
664 	splx(s);
665 
666 	/* Link the req and the scsipi_xfer. */
667 	req->xfer = xs;
668 
669 	/* Now we build the command for the IOC */
670 	mpt_req = req->req_vbuf;
671 	memset(mpt_req, 0, sizeof(*mpt_req));
672 
673 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
674 	mpt_req->Bus = mpt->bus;
675 
676 	mpt_req->SenseBufferLength =
677 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
678 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
679 
680 	/*
681 	 * We use the message context to find the request structure when
682 	 * we get the command completion interrupt from the IOC.
683 	 */
684 	mpt_req->MsgContext = htole32(req->index);
685 
686 	/* Which physical device to do the I/O on. */
687 	mpt_req->TargetID = periph->periph_target;
688 	mpt_req->LUN[1] = periph->periph_lun;
689 
690 	/* Set the direction of the transfer. */
691 	if (xs->xs_control & XS_CTL_DATA_IN)
692 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
693 	else if (xs->xs_control & XS_CTL_DATA_OUT)
694 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
695 	else
696 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
697 
698 	/* Set the queue behavior. */
699 	if (__predict_true((!mpt->is_scsi) ||
700 			   (mpt->mpt_tag_enable &
701 			    (1 << periph->periph_target)))) {
702 		switch (XS_CTL_TAGTYPE(xs)) {
703 		case XS_CTL_HEAD_TAG:
704 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
705 			break;
706 
707 #if 0	/* XXX */
708 		case XS_CTL_ACA_TAG:
709 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
710 			break;
711 #endif
712 
713 		case XS_CTL_ORDERED_TAG:
714 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
715 			break;
716 
717 		case XS_CTL_SIMPLE_TAG:
718 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
719 			break;
720 
721 		default:
722 			if (mpt->is_scsi)
723 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
724 			else
725 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
726 			break;
727 		}
728 	} else
729 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
730 
731 	if (__predict_false(mpt->is_scsi &&
732 			    (mpt->mpt_disc_enable &
733 			     (1 << periph->periph_target)) == 0))
734 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
735 
736 	mpt_req->Control = htole32(mpt_req->Control);
737 
738 	/* Copy the SCSI command block into place. */
739 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
740 
741 	mpt_req->CDBLength = xs->cmdlen;
742 	mpt_req->DataLength = htole32(xs->datalen);
743 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
744 
745 	/*
746 	 * Map the DMA transfer.
747 	 */
748 	if (xs->datalen) {
749 		SGE_SIMPLE32 *se;
750 
751 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
752 		    xs->datalen, NULL,
753 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
754 						       : BUS_DMA_WAITOK) |
755 		    BUS_DMA_STREAMING |
756 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
757 						       : BUS_DMA_WRITE));
758 		switch (error) {
759 		case 0:
760 			break;
761 
762 		case ENOMEM:
763 		case EAGAIN:
764 			xs->error = XS_RESOURCE_SHORTAGE;
765 			goto out_bad;
766 
767 		default:
768 			xs->error = XS_DRIVER_STUFFUP;
769 			mpt_prt(mpt, "error %d loading DMA map", error);
770  out_bad:
771 			s = splbio();
772 			mpt_free_request(mpt, req);
773 			scsipi_done(xs);
774 			splx(s);
775 			return;
776 		}
777 
778 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
779 			int seg, i, nleft = req->dmap->dm_nsegs;
780 			uint32_t flags;
781 			SGE_CHAIN32 *ce;
782 
783 			seg = 0;
784 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
785 			if (xs->xs_control & XS_CTL_DATA_OUT)
786 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
787 
788 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
789 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
790 			     i++, se++, seg++) {
791 				uint32_t tf;
792 
793 				memset(se, 0, sizeof(*se));
794 				se->Address =
795 				    htole32(req->dmap->dm_segs[seg].ds_addr);
796 				MPI_pSGE_SET_LENGTH(se,
797 				    req->dmap->dm_segs[seg].ds_len);
798 				tf = flags;
799 				if (i == MPT_NSGL_FIRST(mpt) - 2)
800 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
801 				MPI_pSGE_SET_FLAGS(se, tf);
802 				se->FlagsLength = htole32(se->FlagsLength);
803 				nleft--;
804 			}
805 
806 			/*
807 			 * Tell the IOC where to find the first chain element.
808 			 */
809 			mpt_req->ChainOffset =
810 			    ((char *)se - (char *)mpt_req) >> 2;
811 
812 			/*
813 			 * Until we're finished with all segments...
814 			 */
815 			while (nleft) {
816 				int ntodo;
817 
818 				/*
819 				 * Construct the chain element that points to
820 				 * the next segment.
821 				 */
822 				ce = (SGE_CHAIN32 *) se++;
823 				if (nleft > MPT_NSGL(mpt)) {
824 					ntodo = MPT_NSGL(mpt) - 1;
825 					ce->NextChainOffset = (MPT_RQSL(mpt) -
826 					    sizeof(SGE_SIMPLE32)) >> 2;
827 					ce->Length = htole16(MPT_NSGL(mpt)
828 						* sizeof(SGE_SIMPLE32));
829 				} else {
830 					ntodo = nleft;
831 					ce->NextChainOffset = 0;
832 					ce->Length = htole16(ntodo
833 						* sizeof(SGE_SIMPLE32));
834 				}
835 				ce->Address = htole32(req->req_pbuf +
836 				    ((char *)se - (char *)mpt_req));
837 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
838 				for (i = 0; i < ntodo; i++, se++, seg++) {
839 					uint32_t tf;
840 
841 					memset(se, 0, sizeof(*se));
842 					se->Address = htole32(
843 					    req->dmap->dm_segs[seg].ds_addr);
844 					MPI_pSGE_SET_LENGTH(se,
845 					    req->dmap->dm_segs[seg].ds_len);
846 					tf = flags;
847 					if (i == ntodo - 1) {
848 						tf |=
849 						    MPI_SGE_FLAGS_LAST_ELEMENT;
850 						if (ce->NextChainOffset == 0) {
851 							tf |=
852 						    MPI_SGE_FLAGS_END_OF_LIST |
853 						    MPI_SGE_FLAGS_END_OF_BUFFER;
854 						}
855 					}
856 					MPI_pSGE_SET_FLAGS(se, tf);
857 					se->FlagsLength =
858 					    htole32(se->FlagsLength);
859 					nleft--;
860 				}
861 			}
862 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
863 			    req->dmap->dm_mapsize,
864 			    (xs->xs_control & XS_CTL_DATA_IN) ?
865 			    				BUS_DMASYNC_PREREAD
866 						      : BUS_DMASYNC_PREWRITE);
867 		} else {
868 			int i;
869 			uint32_t flags;
870 
871 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
872 			if (xs->xs_control & XS_CTL_DATA_OUT)
873 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
874 
875 			/* Copy the segments into our SG list. */
876 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
877 			for (i = 0; i < req->dmap->dm_nsegs;
878 			     i++, se++) {
879 				uint32_t tf;
880 
881 				memset(se, 0, sizeof(*se));
882 				se->Address =
883 				    htole32(req->dmap->dm_segs[i].ds_addr);
884 				MPI_pSGE_SET_LENGTH(se,
885 				    req->dmap->dm_segs[i].ds_len);
886 				tf = flags;
887 				if (i == req->dmap->dm_nsegs - 1) {
888 					tf |=
889 					    MPI_SGE_FLAGS_LAST_ELEMENT |
890 					    MPI_SGE_FLAGS_END_OF_BUFFER |
891 					    MPI_SGE_FLAGS_END_OF_LIST;
892 				}
893 				MPI_pSGE_SET_FLAGS(se, tf);
894 				se->FlagsLength = htole32(se->FlagsLength);
895 			}
896 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
897 			    req->dmap->dm_mapsize,
898 			    (xs->xs_control & XS_CTL_DATA_IN) ?
899 			    				BUS_DMASYNC_PREREAD
900 						      : BUS_DMASYNC_PREWRITE);
901 		}
902 	} else {
903 		/*
904 		 * No data to transfer; just make a single simple SGL
905 		 * with zero length.
906 		 */
907 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
908 		memset(se, 0, sizeof(*se));
909 		MPI_pSGE_SET_FLAGS(se,
910 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
911 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
912 		se->FlagsLength = htole32(se->FlagsLength);
913 	}
914 
915 	if (mpt->verbose > 1)
916 		mpt_print_scsi_io_request(mpt_req);
917 
918 	s = splbio();
919 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
920 		callout_reset(&xs->xs_callout,
921 		    mstohz(xs->timeout), mpt_timeout, req);
922 	mpt_send_cmd(mpt, req);
923 	splx(s);
924 
925 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
926 		return;
927 
928 	/*
929 	 * If we can't use interrupts, poll on completion.
930 	 */
931 	if (mpt_poll(mpt, xs, xs->timeout))
932 		mpt_timeout(req);
933 }
934 
935 static void
936 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
937 {
938 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
939 
940 	/*
941 	 * Always allow disconnect; we don't have a way to disable
942 	 * it right now, in any case.
943 	 */
944 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
945 
946 	if (xm->xm_mode & PERIPH_CAP_TQING)
947 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
948 	else
949 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
950 
951 	if (mpt->is_scsi) {
952 		/*
953 		 * SCSI transport settings only make any sense for
954 		 * SCSI
955 		 */
956 
957 		tmp = mpt->mpt_dev_page1[xm->xm_target];
958 
959 		/*
960 		 * Set the wide/narrow parameter for the target.
961 		 */
962 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
963 			tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
964 		else
965 			tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
966 
967 		/*
968 		 * Set the synchronous parameters for the target.
969 		 *
970 		 * XXX If we request sync transfers, we just go ahead and
971 		 * XXX request the maximum available.  We need finer control
972 		 * XXX in order to implement Domain Validation.
973 		 */
974 		tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
975 		    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
976 		    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
977 		    MPI_SCSIDEVPAGE1_RP_IU);
978 		if (xm->xm_mode & PERIPH_CAP_SYNC) {
979 			int factor, offset, np;
980 
981 			factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
982 			offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
983 			np = 0;
984 			if (factor < 0x9) {
985 				/* Ultra320 */
986 				np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
987 			}
988 			if (factor < 0xa) {
989 				/* at least Ultra160 */
990 				np |= MPI_SCSIDEVPAGE1_RP_DT;
991 			}
992 			np |= (factor << 8) | (offset << 16);
993 			tmp.RequestedParameters |= np;
994 		}
995 
996 		host2mpt_config_page_scsi_device_1(&tmp);
997 		if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
998 			mpt_prt(mpt, "unable to write Device Page 1");
999 			return;
1000 		}
1001 
1002 		if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1003 			mpt_prt(mpt, "unable to read back Device Page 1");
1004 			return;
1005 		}
1006 
1007 		mpt2host_config_page_scsi_device_1(&tmp);
1008 		mpt->mpt_dev_page1[xm->xm_target] = tmp;
1009 		if (mpt->verbose > 1) {
1010 			mpt_prt(mpt,
1011 			    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1012 			    xm->xm_target,
1013 			    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1014 			    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1015 		}
1016 	}
1017 
1018 	/*
1019 	 * Make a note that we should perform an async callback at the
1020 	 * end of the next successful command completion to report the
1021 	 * negotiated transfer mode.
1022 	 */
1023 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1024 }
1025 
1026 static void
1027 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1028 {
1029 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1030 	struct scsipi_xfer_mode xm;
1031 	int period, offset;
1032 
1033 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1034 	host2mpt_config_page_scsi_device_0(&tmp);
1035 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1036 		mpt_prt(mpt, "unable to read Device Page 0");
1037 		return;
1038 	}
1039 	mpt2host_config_page_scsi_device_0(&tmp);
1040 
1041 	if (mpt->verbose > 1) {
1042 		mpt_prt(mpt,
1043 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1044 		    periph->periph_target,
1045 		    tmp.NegotiatedParameters, tmp.Information);
1046 	}
1047 
1048 	xm.xm_target = periph->periph_target;
1049 	xm.xm_mode = 0;
1050 
1051 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1052 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1053 
1054 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1055 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1056 	if (offset) {
1057 		xm.xm_period = period;
1058 		xm.xm_offset = offset;
1059 		xm.xm_mode |= PERIPH_CAP_SYNC;
1060 	}
1061 
1062 	/*
1063 	 * Tagged queueing is all controlled by us; there is no
1064 	 * other setting to query.
1065 	 */
1066 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1067 		xm.xm_mode |= PERIPH_CAP_TQING;
1068 
1069 	/*
1070 	 * We're going to deliver the async event, so clear the marker.
1071 	 */
1072 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1073 
1074 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1075 }
1076 
1077 static void
1078 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1079 {
1080 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1081 
1082 	switch (dmsg->Function) {
1083 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1084 		mpt_event_notify_reply(mpt, vmsg);
1085 		mpt_free_reply(mpt, (reply << 1));
1086 		break;
1087 
1088 	case MPI_FUNCTION_EVENT_ACK:
1089 		mpt_free_reply(mpt, (reply << 1));
1090 		break;
1091 
1092 	case MPI_FUNCTION_PORT_ENABLE:
1093 	    {
1094 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1095 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1096 		if (mpt->verbose > 1)
1097 			mpt_prt(mpt, "enable port reply index %d", index);
1098 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1099 			request_t *req = &mpt->request_pool[index];
1100 			req->debug = REQ_DONE;
1101 		}
1102 		mpt_free_reply(mpt, (reply << 1));
1103 		break;
1104 	    }
1105 
1106 	case MPI_FUNCTION_CONFIG:
1107 	    {
1108 		MSG_CONFIG_REPLY *msg = vmsg;
1109 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1110 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1111 			request_t *req = &mpt->request_pool[index];
1112 			req->debug = REQ_DONE;
1113 			req->sequence = reply;
1114 		} else
1115 			mpt_free_reply(mpt, (reply << 1));
1116 		break;
1117 	    }
1118 
1119 	default:
1120 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1121 	}
1122 }
1123 
1124 static void
1125 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1126 {
1127 
1128 	switch (le32toh(msg->Event)) {
1129 	case MPI_EVENT_LOG_DATA:
1130 	    {
1131 		int i;
1132 
1133 		/* Some error occurrerd that the Fusion wants logged. */
1134 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1135 		mpt_prt(mpt, "EvtLogData: Event Data:");
1136 		for (i = 0; i < msg->EventDataLength; i++) {
1137 			if ((i % 4) == 0)
1138 				printf("%s:\t", device_xname(&mpt->sc_dev));
1139 			printf("0x%08x%c", msg->Data[i],
1140 			    ((i % 4) == 3) ? '\n' : ' ');
1141 		}
1142 		if ((i % 4) != 0)
1143 			printf("\n");
1144 		break;
1145 	    }
1146 
1147 	case MPI_EVENT_UNIT_ATTENTION:
1148 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1149 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1150 		break;
1151 
1152 	case MPI_EVENT_IOC_BUS_RESET:
1153 		/* We generated a bus reset. */
1154 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1155 		    (msg->Data[0] >> 8) & 0xff);
1156 		break;
1157 
1158 	case MPI_EVENT_EXT_BUS_RESET:
1159 		/* Someone else generated a bus reset. */
1160 		mpt_prt(mpt, "External Bus Reset");
1161 		/*
1162 		 * These replies don't return EventData like the MPI
1163 		 * spec says they do.
1164 		 */
1165 		/* XXX Send an async event? */
1166 		break;
1167 
1168 	case MPI_EVENT_RESCAN:
1169 		/*
1170 		 * In general, thise means a device has been added
1171 		 * to the loop.
1172 		 */
1173 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1174 		/* XXX Send an async event? */
1175 		break;
1176 
1177 	case MPI_EVENT_LINK_STATUS_CHANGE:
1178 		mpt_prt(mpt, "Port %d: Link state %s",
1179 		    (msg->Data[1] >> 8) & 0xff,
1180 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1181 		break;
1182 
1183 	case MPI_EVENT_LOOP_STATE_CHANGE:
1184 		switch ((msg->Data[0] >> 16) & 0xff) {
1185 		case 0x01:
1186 			mpt_prt(mpt,
1187 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1188 			    "(Loop Initialization)",
1189 			    (msg->Data[1] >> 8) & 0xff,
1190 			    (msg->Data[0] >> 8) & 0xff,
1191 			    (msg->Data[0]     ) & 0xff);
1192 			switch ((msg->Data[0] >> 8) & 0xff) {
1193 			case 0xf7:
1194 				if ((msg->Data[0] & 0xff) == 0xf7)
1195 					mpt_prt(mpt, "\tDevice needs AL_PA");
1196 				else
1197 					mpt_prt(mpt, "\tDevice %02x doesn't "
1198 					    "like FC performance",
1199 					    msg->Data[0] & 0xff);
1200 				break;
1201 
1202 			case 0xf8:
1203 				if ((msg->Data[0] & 0xff) == 0xf7)
1204 					mpt_prt(mpt, "\tDevice detected loop "
1205 					    "failure before acquiring AL_PA");
1206 				else
1207 					mpt_prt(mpt, "\tDevice %02x detected "
1208 					    "loop failure",
1209 					    msg->Data[0] & 0xff);
1210 				break;
1211 
1212 			default:
1213 				mpt_prt(mpt, "\tDevice %02x requests that "
1214 				    "device %02x reset itself",
1215 				    msg->Data[0] & 0xff,
1216 				    (msg->Data[0] >> 8) & 0xff);
1217 				break;
1218 			}
1219 			break;
1220 
1221 		case 0x02:
1222 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1223 			    "(Loop Port Enable)",
1224 			    (msg->Data[1] >> 8) & 0xff,
1225 			    (msg->Data[0] >> 8) & 0xff,
1226 			    (msg->Data[0]     ) & 0xff);
1227 			break;
1228 
1229 		case 0x03:
1230 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1231 			    "(Loop Port Bypass)",
1232 			    (msg->Data[1] >> 8) & 0xff,
1233 			    (msg->Data[0] >> 8) & 0xff,
1234 			    (msg->Data[0]     ) & 0xff);
1235 			break;
1236 
1237 		default:
1238 			mpt_prt(mpt, "Port %d: FC Link Event: "
1239 			    "Unknown event (%02x %02x %02x)",
1240 			    (msg->Data[1] >>  8) & 0xff,
1241 			    (msg->Data[0] >> 16) & 0xff,
1242 			    (msg->Data[0] >>  8) & 0xff,
1243 			    (msg->Data[0]      ) & 0xff);
1244 			break;
1245 		}
1246 		break;
1247 
1248 	case MPI_EVENT_LOGOUT:
1249 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1250 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1251 		break;
1252 
1253 	case MPI_EVENT_EVENT_CHANGE:
1254 		/*
1255 		 * This is just an acknowledgement of our
1256 		 * mpt_send_event_request().
1257 		 */
1258 		break;
1259 
1260 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
1261 		switch ((msg->Data[0] >> 12) & 0x0f) {
1262 		case 0x00:
1263 			mpt_prt(mpt, "Phy %d: Link Status Unknown",
1264 			    msg->Data[0] & 0xff);
1265 			break;
1266 		case 0x01:
1267 			mpt_prt(mpt, "Phy %d: Link Disabled",
1268 			    msg->Data[0] & 0xff);
1269 			break;
1270 		case 0x02:
1271 			mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1272 			    msg->Data[0] & 0xff);
1273 			break;
1274 		case 0x03:
1275 			mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1276 			    msg->Data[0] & 0xff);
1277 			break;
1278 		case 0x08:
1279 			mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1280 			    msg->Data[0] & 0xff);
1281 			break;
1282 		case 0x09:
1283 			mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1284 			    msg->Data[0] & 0xff);
1285 			break;
1286 		default:
1287 			mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1288 			    "Unknown event (%0x)",
1289 			    msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1290 		}
1291 		break;
1292 
1293 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1294 	case MPI_EVENT_SAS_DISCOVERY:
1295 		/* ignore these events for now */
1296 		break;
1297 
1298 	case MPI_EVENT_QUEUE_FULL:
1299 		/* This can get a little chatty */
1300 		if (mpt->verbose > 0)
1301 			mpt_prt(mpt, "Queue Full Event");
1302 		break;
1303 
1304 	default:
1305 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1306 		break;
1307 	}
1308 
1309 	if (msg->AckRequired) {
1310 		MSG_EVENT_ACK *ackp;
1311 		request_t *req;
1312 
1313 		if ((req = mpt_get_request(mpt)) == NULL) {
1314 			/* XXX XXX XXX XXXJRT */
1315 			panic("mpt_event_notify_reply: unable to allocate "
1316 			    "request structure");
1317 		}
1318 
1319 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1320 		memset(ackp, 0, sizeof(*ackp));
1321 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1322 		ackp->Event = msg->Event;
1323 		ackp->EventContext = msg->EventContext;
1324 		ackp->MsgContext = htole32(req->index | 0x80000000);
1325 		mpt_check_doorbell(mpt);
1326 		mpt_send_cmd(mpt, req);
1327 	}
1328 }
1329 
1330 /* XXXJRT mpt_bus_reset() */
1331 
1332 /*****************************************************************************
1333  * SCSI interface routines
1334  *****************************************************************************/
1335 
1336 static void
1337 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1338     void *arg)
1339 {
1340 	struct scsipi_adapter *adapt = chan->chan_adapter;
1341 	mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1342 
1343 	switch (req) {
1344 	case ADAPTER_REQ_RUN_XFER:
1345 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1346 		return;
1347 
1348 	case ADAPTER_REQ_GROW_RESOURCES:
1349 		/* Not supported. */
1350 		return;
1351 
1352 	case ADAPTER_REQ_SET_XFER_MODE:
1353 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1354 		return;
1355 	}
1356 }
1357 
1358 static void
1359 mpt_minphys(struct buf *bp)
1360 {
1361 
1362 /*
1363  * Subtract one from the SGL limit, since we need an extra one to handle
1364  * an non-page-aligned transfer.
1365  */
1366 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1367 
1368 	if (bp->b_bcount > MPT_MAX_XFER)
1369 		bp->b_bcount = MPT_MAX_XFER;
1370 	minphys(bp);
1371 }
1372