xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision 6dffe8d42bd46273f674d7ab834e7be9b1af990e)
1 /*	$NetBSD: mpt_netbsd.c,v 1.14 2008/04/08 12:07:26 cegger Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  *
76  * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.14 2008/04/08 12:07:26 cegger Exp $");
81 
82 #include <dev/ic/mpt.h>			/* pulls in all headers */
83 
84 #include <machine/stdarg.h>		/* for mpt_prt() */
85 
86 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
87 static void	mpt_timeout(void *);
88 static void	mpt_done(mpt_softc_t *, uint32_t);
89 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
90 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
91 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
92 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
93 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
94 
95 static void	mpt_scsipi_request(struct scsipi_channel *,
96 		    scsipi_adapter_req_t, void *);
97 static void	mpt_minphys(struct buf *);
98 
99 void
100 mpt_scsipi_attach(mpt_softc_t *mpt)
101 {
102 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
103 	struct scsipi_channel *chan = &mpt->sc_channel;
104 	int maxq;
105 
106 	mpt->bus = 0;		/* XXX ?? */
107 
108 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
109 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
110 
111 	/* Fill in the scsipi_adapter. */
112 	memset(adapt, 0, sizeof(*adapt));
113 	adapt->adapt_dev = &mpt->sc_dev;
114 	adapt->adapt_nchannels = 1;
115 	adapt->adapt_openings = maxq;
116 	adapt->adapt_max_periph = maxq;
117 	adapt->adapt_request = mpt_scsipi_request;
118 	adapt->adapt_minphys = mpt_minphys;
119 
120 	/* Fill in the scsipi_channel. */
121 	memset(chan, 0, sizeof(*chan));
122 	chan->chan_adapter = adapt;
123 	chan->chan_bustype = &scsi_bustype;
124 	chan->chan_channel = 0;
125 	chan->chan_flags = 0;
126 	chan->chan_nluns = 8;
127 	chan->chan_ntargets = mpt->mpt_max_devices;
128 	chan->chan_id = mpt->mpt_ini_id;
129 
130 	(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132 
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 	bus_dma_segment_t reply_seg, request_seg;
137 	int reply_rseg, request_rseg;
138 	bus_addr_t pptr, end;
139 	char *vptr;
140 	size_t len;
141 	int error, i;
142 
143 	/* Check if we have already allocated the reply memory. */
144 	if (mpt->reply != NULL)
145 		return (0);
146 
147 	/*
148 	 * Allocate the request pool.  This isn't really DMA'd memory,
149 	 * but it's a convenient place to do it.
150 	 */
151 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 	if (mpt->request_pool == NULL) {
154 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
155 		return (ENOMEM);
156 	}
157 
158 	/*
159 	 * Allocate DMA resources for reply buffers.
160 	 */
161 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
162 	    &reply_seg, 1, &reply_rseg, 0);
163 	if (error) {
164 		aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
165 		    error);
166 		goto fail_0;
167 	}
168 
169 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
170 	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
171 	if (error) {
172 		aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
173 		    error);
174 		goto fail_1;
175 	}
176 
177 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
178 	    0, 0, &mpt->reply_dmap);
179 	if (error) {
180 		aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
181 		    error);
182 		goto fail_2;
183 	}
184 
185 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
186 	    PAGE_SIZE, NULL, 0);
187 	if (error) {
188 		aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
189 		    error);
190 		goto fail_3;
191 	}
192 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
193 
194 	/*
195 	 * Allocate DMA resources for request buffers.
196 	 */
197 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
198 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
199 	if (error) {
200 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
201 		    "error = %d\n", error);
202 		goto fail_4;
203 	}
204 
205 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
206 	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
207 	if (error) {
208 		aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
209 		    error);
210 		goto fail_5;
211 	}
212 
213 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
214 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
215 	if (error) {
216 		aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
217 		    "error = %d\n", error);
218 		goto fail_6;
219 	}
220 
221 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
222 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
223 	if (error) {
224 		aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
225 		    error);
226 		goto fail_7;
227 	}
228 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
229 
230 	pptr = mpt->request_phys;
231 	vptr = (void *) mpt->request;
232 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
233 
234 	for (i = 0; pptr < end; i++) {
235 		request_t *req = &mpt->request_pool[i];
236 		req->index = i;
237 
238 		/* Store location of Request Data */
239 		req->req_pbuf = pptr;
240 		req->req_vbuf = vptr;
241 
242 		pptr += MPT_REQUEST_AREA;
243 		vptr += MPT_REQUEST_AREA;
244 
245 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
246 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
247 
248 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
249 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
250 		if (error) {
251 			aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
252 			    "error = %d\n", i, error);
253 			goto fail_8;
254 		}
255 	}
256 
257 	return (0);
258 
259  fail_8:
260 	for (--i; i >= 0; i--) {
261 		request_t *req = &mpt->request_pool[i];
262 		if (req->dmap != NULL)
263 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
264 	}
265 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
266  fail_7:
267 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
268  fail_6:
269 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
270  fail_5:
271 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
272  fail_4:
273 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
274  fail_3:
275 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
276  fail_2:
277 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
278  fail_1:
279 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
280  fail_0:
281 	free(mpt->request_pool, M_DEVBUF);
282 
283 	mpt->reply = NULL;
284 	mpt->request = NULL;
285 	mpt->request_pool = NULL;
286 
287 	return (error);
288 }
289 
290 int
291 mpt_intr(void *arg)
292 {
293 	mpt_softc_t *mpt = arg;
294 	int nrepl = 0;
295 	uint32_t reply;
296 
297 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
298 		return (0);
299 
300 	reply = mpt_pop_reply_queue(mpt);
301 	while (reply != MPT_REPLY_EMPTY) {
302 		nrepl++;
303 		if (mpt->verbose > 1) {
304 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
305 				/* Address reply; IOC has something to say */
306 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
307 			} else {
308 				/* Context reply; all went well */
309 				mpt_prt(mpt, "context %u reply OK", reply);
310 			}
311 		}
312 		mpt_done(mpt, reply);
313 		reply = mpt_pop_reply_queue(mpt);
314 	}
315 	return (nrepl != 0);
316 }
317 
318 void
319 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
320 {
321 	va_list ap;
322 
323 	printf("%s: ", device_xname(&mpt->sc_dev));
324 	va_start(ap, fmt);
325 	vprintf(fmt, ap);
326 	va_end(ap);
327 	printf("\n");
328 }
329 
330 static int
331 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
332 {
333 
334 	/* Timeouts are in msec, so we loop in 1000usec cycles */
335 	while (count) {
336 		mpt_intr(mpt);
337 		if (xs->xs_status & XS_STS_DONE)
338 			return (0);
339 		delay(1000);		/* only happens in boot, so ok */
340 		count--;
341 	}
342 	return (1);
343 }
344 
345 static void
346 mpt_timeout(void *arg)
347 {
348 	request_t *req = arg;
349 	struct scsipi_xfer *xs = req->xfer;
350 	struct scsipi_periph *periph = xs->xs_periph;
351 	mpt_softc_t *mpt =
352 	    (void *) periph->periph_channel->chan_adapter->adapt_dev;
353 	uint32_t oseq;
354 	int s;
355 
356 	scsipi_printaddr(periph);
357 	printf("command timeout\n");
358 
359 	s = splbio();
360 
361 	oseq = req->sequence;
362 	mpt->timeouts++;
363 	if (mpt_intr(mpt)) {
364 		if (req->sequence != oseq) {
365 			mpt_prt(mpt, "recovered from command timeout");
366 			splx(s);
367 			return;
368 		}
369 	}
370 	mpt_prt(mpt,
371 	    "timeout on request index = 0x%x, seq = 0x%08x",
372 	    req->index, req->sequence);
373 	mpt_check_doorbell(mpt);
374 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
375 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
376 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
377 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
378 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
379 	if (mpt->verbose > 1)
380 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
381 
382 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
383 	req->xfer = NULL;
384 	mpt_free_request(mpt, req);
385 
386 	xs->error = XS_TIMEOUT;
387 	scsipi_done(xs);
388 
389 	splx(s);
390 }
391 
392 static void
393 mpt_done(mpt_softc_t *mpt, uint32_t reply)
394 {
395 	struct scsipi_xfer *xs = NULL;
396 	struct scsipi_periph *periph;
397 	int index;
398 	request_t *req;
399 	MSG_REQUEST_HEADER *mpt_req;
400 	MSG_SCSI_IO_REPLY *mpt_reply;
401 
402 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
403 		/* context reply (ok) */
404 		mpt_reply = NULL;
405 		index = reply & MPT_CONTEXT_MASK;
406 	} else {
407 		/* address reply (error) */
408 
409 		/* XXX BUS_DMASYNC_POSTREAD XXX */
410 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
411 		if (mpt->verbose > 1) {
412 			uint32_t *pReply = (uint32_t *) mpt_reply;
413 
414 			mpt_prt(mpt, "Address Reply (index %u):",
415 			    mpt_reply->MsgContext & 0xffff);
416 			mpt_prt(mpt, "%08x %08x %08x %08x",
417 			    pReply[0], pReply[1], pReply[2], pReply[3]);
418 			mpt_prt(mpt, "%08x %08x %08x %08x",
419 			    pReply[4], pReply[5], pReply[6], pReply[7]);
420 			mpt_prt(mpt, "%08x %08x %08x %08x",
421 			    pReply[8], pReply[9], pReply[10], pReply[11]);
422 		}
423 		index = mpt_reply->MsgContext;
424 	}
425 
426 	/*
427 	 * Address reply with MessageContext high bit set.
428 	 * This is most likely a notify message, so we try
429 	 * to process it, then free it.
430 	 */
431 	if (__predict_false((index & 0x80000000) != 0)) {
432 		if (mpt_reply != NULL)
433 			mpt_ctlop(mpt, mpt_reply, reply);
434 		else
435 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
436 		return;
437 	}
438 
439 	/* Did we end up with a valid index into the table? */
440 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
441 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
442 		return;
443 	}
444 
445 	req = &mpt->request_pool[index];
446 
447 	/* Make sure memory hasn't been trashed. */
448 	if (__predict_false(req->index != index)) {
449 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
450 		return;
451 	}
452 
453 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
454 	mpt_req = req->req_vbuf;
455 
456 	/* Short cut for task management replies; nothing more for us to do. */
457 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
458 		if (mpt->verbose > 1)
459 			mpt_prt(mpt, "mpt_done: TASK MGMT");
460 		goto done;
461 	}
462 
463 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
464 		goto done;
465 
466 	/*
467 	 * At this point, it had better be a SCSI I/O command, but don't
468 	 * crash if it isn't.
469 	 */
470 	if (__predict_false(mpt_req->Function !=
471 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
472 		if (mpt->verbose > 1)
473 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
474 			    mpt_req->Function, index);
475 		goto done;
476 	}
477 
478 	/* Recover scsipi_xfer from the request structure. */
479 	xs = req->xfer;
480 
481 	/* Can't have a SCSI command without a scsipi_xfer. */
482 	if (__predict_false(xs == NULL)) {
483 		mpt_prt(mpt,
484 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
485 		    req->index, req->sequence);
486 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
487 		mpt_prt(mpt, "mpt_request:");
488 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
489 
490 		if (mpt_reply != NULL) {
491 			mpt_prt(mpt, "mpt_reply:");
492 			mpt_print_reply(mpt_reply);
493 		} else {
494 			mpt_prt(mpt, "context reply: 0x%08x", reply);
495 		}
496 		goto done;
497 	}
498 
499 	callout_stop(&xs->xs_callout);
500 
501 	periph = xs->xs_periph;
502 
503 	/*
504 	 * If we were a data transfer, unload the map that described
505 	 * the data buffer.
506 	 */
507 	if (__predict_true(xs->datalen != 0)) {
508 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
509 		    req->dmap->dm_mapsize,
510 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
511 						      : BUS_DMASYNC_POSTWRITE);
512 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
513 	}
514 
515 	if (__predict_true(mpt_reply == NULL)) {
516 		/*
517 		 * Context reply; report that the command was
518 		 * successful!
519 		 *
520 		 * Also report the xfer mode, if necessary.
521 		 */
522 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
523 			if ((mpt->mpt_report_xfer_mode &
524 			     (1 << periph->periph_target)) != 0)
525 				mpt_get_xfer_mode(mpt, periph);
526 		}
527 		xs->error = XS_NOERROR;
528 		xs->status = SCSI_OK;
529 		xs->resid = 0;
530 		mpt_free_request(mpt, req);
531 		scsipi_done(xs);
532 		return;
533 	}
534 
535 	xs->status = mpt_reply->SCSIStatus;
536 	switch (mpt_reply->IOCStatus) {
537 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
538 		xs->error = XS_DRIVER_STUFFUP;
539 		break;
540 
541 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
542 		/*
543 		 * Yikes!  Tagged queue full comes through this path!
544 		 *
545 		 * So we'll change it to a status error and anything
546 		 * that returns status should probably be a status
547 		 * error as well.
548 		 */
549 		xs->resid = xs->datalen - mpt_reply->TransferCount;
550 		if (mpt_reply->SCSIState &
551 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
552 			xs->error = XS_DRIVER_STUFFUP;
553 			break;
554 		}
555 		/* FALLTHROUGH */
556 	case MPI_IOCSTATUS_SUCCESS:
557 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
558 		switch (xs->status) {
559 		case SCSI_OK:
560 			/* Report the xfer mode, if necessary. */
561 			if ((mpt->mpt_report_xfer_mode &
562 			     (1 << periph->periph_target)) != 0)
563 				mpt_get_xfer_mode(mpt, periph);
564 			xs->resid = 0;
565 			break;
566 
567 		case SCSI_CHECK:
568 			xs->error = XS_SENSE;
569 			break;
570 
571 		case SCSI_BUSY:
572 		case SCSI_QUEUE_FULL:
573 			xs->error = XS_BUSY;
574 			break;
575 
576 		default:
577 			scsipi_printaddr(periph);
578 			printf("invalid status code %d\n", xs->status);
579 			xs->error = XS_DRIVER_STUFFUP;
580 			break;
581 		}
582 		break;
583 
584 	case MPI_IOCSTATUS_BUSY:
585 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
586 		xs->error = XS_RESOURCE_SHORTAGE;
587 		break;
588 
589 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
590 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
591 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
592 		xs->error = XS_SELTIMEOUT;
593 		break;
594 
595 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
596 		xs->error = XS_DRIVER_STUFFUP;
597 		break;
598 
599 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
600 		/* XXX What should we do here? */
601 		break;
602 
603 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
604 		/* XXX */
605 		xs->error = XS_DRIVER_STUFFUP;
606 		break;
607 
608 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
609 		/* XXX */
610 		xs->error = XS_DRIVER_STUFFUP;
611 		break;
612 
613 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
614 		/* XXX This is a bus-reset */
615 		xs->error = XS_DRIVER_STUFFUP;
616 		break;
617 
618 	default:
619 		/* XXX unrecognized HBA error */
620 		xs->error = XS_DRIVER_STUFFUP;
621 		break;
622 	}
623 
624 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
625 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
626 		    sizeof(xs->sense.scsi_sense));
627 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
628 		/*
629 		 * This will cause the scsipi layer to issue
630 		 * a REQUEST SENSE.
631 		 */
632 		if (xs->status == SCSI_CHECK)
633 			xs->error = XS_BUSY;
634 	}
635 
636  done:
637 	/* If IOC done with this requeset, free it up. */
638 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
639 		mpt_free_request(mpt, req);
640 
641 	/* If address reply, give the buffer back to the IOC. */
642 	if (mpt_reply != NULL)
643 		mpt_free_reply(mpt, (reply << 1));
644 
645 	if (xs != NULL)
646 		scsipi_done(xs);
647 }
648 
649 static void
650 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
651 {
652 	struct scsipi_periph *periph = xs->xs_periph;
653 	request_t *req;
654 	MSG_SCSI_IO_REQUEST *mpt_req;
655 	int error, s;
656 
657 	s = splbio();
658 	req = mpt_get_request(mpt);
659 	if (__predict_false(req == NULL)) {
660 		/* This should happen very infrequently. */
661 		xs->error = XS_RESOURCE_SHORTAGE;
662 		scsipi_done(xs);
663 		splx(s);
664 		return;
665 	}
666 	splx(s);
667 
668 	/* Link the req and the scsipi_xfer. */
669 	req->xfer = xs;
670 
671 	/* Now we build the command for the IOC */
672 	mpt_req = req->req_vbuf;
673 	memset(mpt_req, 0, sizeof(*mpt_req));
674 
675 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
676 	mpt_req->Bus = mpt->bus;
677 
678 	mpt_req->SenseBufferLength =
679 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
680 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
681 
682 	/*
683 	 * We use the message context to find the request structure when
684 	 * we get the command completion interrupt from the IOC.
685 	 */
686 	mpt_req->MsgContext = req->index;
687 
688 	/* Which physical device to do the I/O on. */
689 	mpt_req->TargetID = periph->periph_target;
690 	mpt_req->LUN[1] = periph->periph_lun;
691 
692 	/* Set the direction of the transfer. */
693 	if (xs->xs_control & XS_CTL_DATA_IN)
694 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
695 	else if (xs->xs_control & XS_CTL_DATA_OUT)
696 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
697 	else
698 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
699 
700 	/* Set the queue behavior. */
701 	if (__predict_true((!mpt->is_scsi) ||
702 			   (mpt->mpt_tag_enable &
703 			    (1 << periph->periph_target)))) {
704 		switch (XS_CTL_TAGTYPE(xs)) {
705 		case XS_CTL_HEAD_TAG:
706 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
707 			break;
708 
709 #if 0	/* XXX */
710 		case XS_CTL_ACA_TAG:
711 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
712 			break;
713 #endif
714 
715 		case XS_CTL_ORDERED_TAG:
716 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
717 			break;
718 
719 		case XS_CTL_SIMPLE_TAG:
720 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
721 			break;
722 
723 		default:
724 			if (mpt->is_scsi)
725 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
726 			else
727 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
728 			break;
729 		}
730 	} else
731 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
732 
733 	if (__predict_false(mpt->is_scsi &&
734 			    (mpt->mpt_disc_enable &
735 			     (1 << periph->periph_target)) == 0))
736 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
737 
738 	/* Copy the SCSI command block into place. */
739 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
740 
741 	mpt_req->CDBLength = xs->cmdlen;
742 	mpt_req->DataLength = xs->datalen;
743 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
744 
745 	/*
746 	 * Map the DMA transfer.
747 	 */
748 	if (xs->datalen) {
749 		SGE_SIMPLE32 *se;
750 
751 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
752 		    xs->datalen, NULL,
753 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
754 						       : BUS_DMA_WAITOK) |
755 		    BUS_DMA_STREAMING |
756 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
757 						       : BUS_DMA_WRITE));
758 		switch (error) {
759 		case 0:
760 			break;
761 
762 		case ENOMEM:
763 		case EAGAIN:
764 			xs->error = XS_RESOURCE_SHORTAGE;
765 			goto out_bad;
766 
767 		default:
768 			xs->error = XS_DRIVER_STUFFUP;
769 			mpt_prt(mpt, "error %d loading DMA map", error);
770  out_bad:
771 			s = splbio();
772 			mpt_free_request(mpt, req);
773 			scsipi_done(xs);
774 			splx(s);
775 			return;
776 		}
777 
778 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
779 			int seg, i, nleft = req->dmap->dm_nsegs;
780 			uint32_t flags;
781 			SGE_CHAIN32 *ce;
782 
783 			seg = 0;
784 
785 			mpt_req->DataLength = xs->datalen;
786 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
787 			if (xs->xs_control & XS_CTL_DATA_OUT)
788 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
789 
790 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
791 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
792 			     i++, se++, seg++) {
793 				uint32_t tf;
794 
795 				memset(se, 0, sizeof(*se));
796 				se->Address = req->dmap->dm_segs[seg].ds_addr;
797 				MPI_pSGE_SET_LENGTH(se,
798 				    req->dmap->dm_segs[seg].ds_len);
799 				tf = flags;
800 				if (i == MPT_NSGL_FIRST(mpt) - 2)
801 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
802 				MPI_pSGE_SET_FLAGS(se, tf);
803 				nleft--;
804 			}
805 
806 			/*
807 			 * Tell the IOC where to find the first chain element.
808 			 */
809 			mpt_req->ChainOffset =
810 			    ((char *)se - (char *)mpt_req) >> 2;
811 
812 			/*
813 			 * Until we're finished with all segments...
814 			 */
815 			while (nleft) {
816 				int ntodo;
817 
818 				/*
819 				 * Construct the chain element that points to
820 				 * the next segment.
821 				 */
822 				ce = (SGE_CHAIN32 *) se++;
823 				if (nleft > MPT_NSGL(mpt)) {
824 					ntodo = MPT_NSGL(mpt) - 1;
825 					ce->NextChainOffset = (MPT_RQSL(mpt) -
826 					    sizeof(SGE_SIMPLE32)) >> 2;
827 					ce->Length = MPT_NSGL(mpt)
828 						* sizeof(SGE_SIMPLE32);
829 				} else {
830 					ntodo = nleft;
831 					ce->NextChainOffset = 0;
832 					ce->Length = ntodo
833 						* sizeof(SGE_SIMPLE32);
834 				}
835 				ce->Address = req->req_pbuf +
836 				    ((char *)se - (char *)mpt_req);
837 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
838 				for (i = 0; i < ntodo; i++, se++, seg++) {
839 					uint32_t tf;
840 
841 					memset(se, 0, sizeof(*se));
842 					se->Address =
843 					    req->dmap->dm_segs[seg].ds_addr;
844 					MPI_pSGE_SET_LENGTH(se,
845 					    req->dmap->dm_segs[seg].ds_len);
846 					tf = flags;
847 					if (i == ntodo - 1) {
848 						tf |=
849 						    MPI_SGE_FLAGS_LAST_ELEMENT;
850 						if (ce->NextChainOffset == 0) {
851 							tf |=
852 						    MPI_SGE_FLAGS_END_OF_LIST |
853 						    MPI_SGE_FLAGS_END_OF_BUFFER;
854 						}
855 					}
856 					MPI_pSGE_SET_FLAGS(se, tf);
857 					nleft--;
858 				}
859 			}
860 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
861 			    req->dmap->dm_mapsize,
862 			    (xs->xs_control & XS_CTL_DATA_IN) ?
863 			    				BUS_DMASYNC_PREREAD
864 						      : BUS_DMASYNC_PREWRITE);
865 		} else {
866 			int i;
867 			uint32_t flags;
868 
869 			mpt_req->DataLength = xs->datalen;
870 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
871 			if (xs->xs_control & XS_CTL_DATA_OUT)
872 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
873 
874 			/* Copy the segments into our SG list. */
875 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
876 			for (i = 0; i < req->dmap->dm_nsegs;
877 			     i++, se++) {
878 				uint32_t tf;
879 
880 				memset(se, 0, sizeof(*se));
881 				se->Address = req->dmap->dm_segs[i].ds_addr;
882 				MPI_pSGE_SET_LENGTH(se,
883 				    req->dmap->dm_segs[i].ds_len);
884 				tf = flags;
885 				if (i == req->dmap->dm_nsegs - 1) {
886 					tf |=
887 					    MPI_SGE_FLAGS_LAST_ELEMENT |
888 					    MPI_SGE_FLAGS_END_OF_BUFFER |
889 					    MPI_SGE_FLAGS_END_OF_LIST;
890 				}
891 				MPI_pSGE_SET_FLAGS(se, tf);
892 			}
893 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
894 			    req->dmap->dm_mapsize,
895 			    (xs->xs_control & XS_CTL_DATA_IN) ?
896 			    				BUS_DMASYNC_PREREAD
897 						      : BUS_DMASYNC_PREWRITE);
898 		}
899 	} else {
900 		/*
901 		 * No data to transfer; just make a single simple SGL
902 		 * with zero length.
903 		 */
904 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
905 		memset(se, 0, sizeof(*se));
906 		MPI_pSGE_SET_FLAGS(se,
907 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
908 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
909 	}
910 
911 	if (mpt->verbose > 1)
912 		mpt_print_scsi_io_request(mpt_req);
913 
914 	s = splbio();
915 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
916 		callout_reset(&xs->xs_callout,
917 		    mstohz(xs->timeout), mpt_timeout, req);
918 	mpt_send_cmd(mpt, req);
919 	splx(s);
920 
921 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
922 		return;
923 
924 	/*
925 	 * If we can't use interrupts, poll on completion.
926 	 */
927 	if (mpt_poll(mpt, xs, xs->timeout))
928 		mpt_timeout(req);
929 }
930 
931 static void
932 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
933 {
934 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
935 
936 	if (!mpt->is_scsi) {
937 		/*
938 		 * SCSI transport settings don't make any sense for
939 		 * Fibre Channel; silently ignore the request.
940 		 */
941 		return;
942 	}
943 
944 	/*
945 	 * Always allow disconnect; we don't have a way to disable
946 	 * it right now, in any case.
947 	 */
948 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
949 
950 	if (xm->xm_mode & PERIPH_CAP_TQING)
951 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
952 	else
953 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
954 
955 	tmp = mpt->mpt_dev_page1[xm->xm_target];
956 
957 	/*
958 	 * Set the wide/narrow parameter for the target.
959 	 */
960 	if (xm->xm_mode & PERIPH_CAP_WIDE16)
961 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
962 	else
963 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
964 
965 	/*
966 	 * Set the synchronous parameters for the target.
967 	 *
968 	 * XXX If we request sync transfers, we just go ahead and
969 	 * XXX request the maximum available.  We need finer control
970 	 * XXX in order to implement Domain Validation.
971 	 */
972 	tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
973 	    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
974 	    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
975 	    MPI_SCSIDEVPAGE1_RP_IU);
976 	if (xm->xm_mode & PERIPH_CAP_SYNC) {
977 		int factor, offset, np;
978 
979 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
980 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
981 		np = 0;
982 		if (factor < 0x9) {
983 			/* Ultra320 */
984 			np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
985 		}
986 		if (factor < 0xa) {
987 			/* at least Ultra160 */
988 			np |= MPI_SCSIDEVPAGE1_RP_DT;
989 		}
990 		np |= (factor << 8) | (offset << 16);
991 		tmp.RequestedParameters |= np;
992 	}
993 
994 	if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
995 		mpt_prt(mpt, "unable to write Device Page 1");
996 		return;
997 	}
998 
999 	if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1000 		mpt_prt(mpt, "unable to read back Device Page 1");
1001 		return;
1002 	}
1003 
1004 	mpt->mpt_dev_page1[xm->xm_target] = tmp;
1005 	if (mpt->verbose > 1) {
1006 		mpt_prt(mpt,
1007 		    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1008 		    xm->xm_target,
1009 		    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1010 		    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1011 	}
1012 
1013 	/*
1014 	 * Make a note that we should perform an async callback at the
1015 	 * end of the next successful command completion to report the
1016 	 * negotiated transfer mode.
1017 	 */
1018 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1019 }
1020 
1021 static void
1022 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1023 {
1024 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1025 	struct scsipi_xfer_mode xm;
1026 	int period, offset;
1027 
1028 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1029 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1030 		mpt_prt(mpt, "unable to read Device Page 0");
1031 		return;
1032 	}
1033 
1034 	if (mpt->verbose > 1) {
1035 		mpt_prt(mpt,
1036 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1037 		    periph->periph_target,
1038 		    tmp.NegotiatedParameters, tmp.Information);
1039 	}
1040 
1041 	xm.xm_target = periph->periph_target;
1042 	xm.xm_mode = 0;
1043 
1044 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1045 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1046 
1047 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1048 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1049 	if (offset) {
1050 		xm.xm_period = period;
1051 		xm.xm_offset = offset;
1052 		xm.xm_mode |= PERIPH_CAP_SYNC;
1053 	}
1054 
1055 	/*
1056 	 * Tagged queueing is all controlled by us; there is no
1057 	 * other setting to query.
1058 	 */
1059 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1060 		xm.xm_mode |= PERIPH_CAP_TQING;
1061 
1062 	/*
1063 	 * We're going to deliver the async event, so clear the marker.
1064 	 */
1065 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1066 
1067 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1068 }
1069 
1070 static void
1071 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1072 {
1073 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1074 
1075 	switch (dmsg->Function) {
1076 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1077 		mpt_event_notify_reply(mpt, vmsg);
1078 		mpt_free_reply(mpt, (reply << 1));
1079 		break;
1080 
1081 	case MPI_FUNCTION_EVENT_ACK:
1082 		mpt_free_reply(mpt, (reply << 1));
1083 		break;
1084 
1085 	case MPI_FUNCTION_PORT_ENABLE:
1086 	    {
1087 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1088 		int index = msg->MsgContext & ~0x80000000;
1089 		if (mpt->verbose > 1)
1090 			mpt_prt(mpt, "enable port reply index %d", index);
1091 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1092 			request_t *req = &mpt->request_pool[index];
1093 			req->debug = REQ_DONE;
1094 		}
1095 		mpt_free_reply(mpt, (reply << 1));
1096 		break;
1097 	    }
1098 
1099 	case MPI_FUNCTION_CONFIG:
1100 	    {
1101 		MSG_CONFIG_REPLY *msg = vmsg;
1102 		int index = msg->MsgContext & ~0x80000000;
1103 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1104 			request_t *req = &mpt->request_pool[index];
1105 			req->debug = REQ_DONE;
1106 			req->sequence = reply;
1107 		} else
1108 			mpt_free_reply(mpt, (reply << 1));
1109 		break;
1110 	    }
1111 
1112 	default:
1113 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1114 	}
1115 }
1116 
1117 static void
1118 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1119 {
1120 
1121 	switch (msg->Event) {
1122 	case MPI_EVENT_LOG_DATA:
1123 	    {
1124 		int i;
1125 
1126 		/* Some error occurrerd that the Fusion wants logged. */
1127 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1128 		mpt_prt(mpt, "EvtLogData: Event Data:");
1129 		for (i = 0; i < msg->EventDataLength; i++) {
1130 			if ((i % 4) == 0)
1131 				printf("%s:\t", device_xname(&mpt->sc_dev));
1132 			printf("0x%08x%c", msg->Data[i],
1133 			    ((i % 4) == 3) ? '\n' : ' ');
1134 		}
1135 		if ((i % 4) != 0)
1136 			printf("\n");
1137 		break;
1138 	    }
1139 
1140 	case MPI_EVENT_UNIT_ATTENTION:
1141 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1142 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1143 		break;
1144 
1145 	case MPI_EVENT_IOC_BUS_RESET:
1146 		/* We generated a bus reset. */
1147 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1148 		    (msg->Data[0] >> 8) & 0xff);
1149 		break;
1150 
1151 	case MPI_EVENT_EXT_BUS_RESET:
1152 		/* Someone else generated a bus reset. */
1153 		mpt_prt(mpt, "External Bus Reset");
1154 		/*
1155 		 * These replies don't return EventData like the MPI
1156 		 * spec says they do.
1157 		 */
1158 		/* XXX Send an async event? */
1159 		break;
1160 
1161 	case MPI_EVENT_RESCAN:
1162 		/*
1163 		 * In general, thise means a device has been added
1164 		 * to the loop.
1165 		 */
1166 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1167 		/* XXX Send an async event? */
1168 		break;
1169 
1170 	case MPI_EVENT_LINK_STATUS_CHANGE:
1171 		mpt_prt(mpt, "Port %d: Link state %s",
1172 		    (msg->Data[1] >> 8) & 0xff,
1173 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1174 		break;
1175 
1176 	case MPI_EVENT_LOOP_STATE_CHANGE:
1177 		switch ((msg->Data[0] >> 16) & 0xff) {
1178 		case 0x01:
1179 			mpt_prt(mpt,
1180 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1181 			    "(Loop Initialization)",
1182 			    (msg->Data[1] >> 8) & 0xff,
1183 			    (msg->Data[0] >> 8) & 0xff,
1184 			    (msg->Data[0]     ) & 0xff);
1185 			switch ((msg->Data[0] >> 8) & 0xff) {
1186 			case 0xf7:
1187 				if ((msg->Data[0] & 0xff) == 0xf7)
1188 					mpt_prt(mpt, "\tDevice needs AL_PA");
1189 				else
1190 					mpt_prt(mpt, "\tDevice %02x doesn't "
1191 					    "like FC performance",
1192 					    msg->Data[0] & 0xff);
1193 				break;
1194 
1195 			case 0xf8:
1196 				if ((msg->Data[0] & 0xff) == 0xf7)
1197 					mpt_prt(mpt, "\tDevice detected loop "
1198 					    "failure before acquiring AL_PA");
1199 				else
1200 					mpt_prt(mpt, "\tDevice %02x detected "
1201 					    "loop failure",
1202 					    msg->Data[0] & 0xff);
1203 				break;
1204 
1205 			default:
1206 				mpt_prt(mpt, "\tDevice %02x requests that "
1207 				    "device %02x reset itself",
1208 				    msg->Data[0] & 0xff,
1209 				    (msg->Data[0] >> 8) & 0xff);
1210 				break;
1211 			}
1212 			break;
1213 
1214 		case 0x02:
1215 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1216 			    "(Loop Port Enable)",
1217 			    (msg->Data[1] >> 8) & 0xff,
1218 			    (msg->Data[0] >> 8) & 0xff,
1219 			    (msg->Data[0]     ) & 0xff);
1220 			break;
1221 
1222 		case 0x03:
1223 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1224 			    "(Loop Port Bypass)",
1225 			    (msg->Data[1] >> 8) & 0xff,
1226 			    (msg->Data[0] >> 8) & 0xff,
1227 			    (msg->Data[0]     ) & 0xff);
1228 			break;
1229 
1230 		default:
1231 			mpt_prt(mpt, "Port %d: FC Link Event: "
1232 			    "Unknown event (%02x %02x %02x)",
1233 			    (msg->Data[1] >>  8) & 0xff,
1234 			    (msg->Data[0] >> 16) & 0xff,
1235 			    (msg->Data[0] >>  8) & 0xff,
1236 			    (msg->Data[0]      ) & 0xff);
1237 			break;
1238 		}
1239 		break;
1240 
1241 	case MPI_EVENT_LOGOUT:
1242 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1243 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1244 		break;
1245 
1246 	case MPI_EVENT_EVENT_CHANGE:
1247 		/*
1248 		 * This is just an acknowledgement of our
1249 		 * mpt_send_event_request().
1250 		 */
1251 		break;
1252 
1253 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
1254 		switch ((msg->Data[0] >> 12) & 0x0f) {
1255 		case 0x00:
1256 			mpt_prt(mpt, "Phy %d: Link Status Unknown",
1257 			    msg->Data[0] & 0xff);
1258 			break;
1259 		case 0x01:
1260 			mpt_prt(mpt, "Phy %d: Link Disabled",
1261 			    msg->Data[0] & 0xff);
1262 			break;
1263 		case 0x02:
1264 			mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1265 			    msg->Data[0] & 0xff);
1266 			break;
1267 		case 0x03:
1268 			mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1269 			    msg->Data[0] & 0xff);
1270 			break;
1271 		case 0x08:
1272 			mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1273 			    msg->Data[0] & 0xff);
1274 			break;
1275 		case 0x09:
1276 			mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1277 			    msg->Data[0] & 0xff);
1278 			break;
1279 		default:
1280 			mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1281 			    "Unknown event (%0x)",
1282 			    msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1283 		}
1284 		break;
1285 
1286 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1287 	case MPI_EVENT_SAS_DISCOVERY:
1288 		/* ignore these events for now */
1289 		break;
1290 
1291 	default:
1292 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1293 		break;
1294 	}
1295 
1296 	if (msg->AckRequired) {
1297 		MSG_EVENT_ACK *ackp;
1298 		request_t *req;
1299 
1300 		if ((req = mpt_get_request(mpt)) == NULL) {
1301 			/* XXX XXX XXX XXXJRT */
1302 			panic("mpt_event_notify_reply: unable to allocate "
1303 			    "request structure");
1304 		}
1305 
1306 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1307 		memset(ackp, 0, sizeof(*ackp));
1308 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1309 		ackp->Event = msg->Event;
1310 		ackp->EventContext = msg->EventContext;
1311 		ackp->MsgContext = req->index | 0x80000000;
1312 		mpt_check_doorbell(mpt);
1313 		mpt_send_cmd(mpt, req);
1314 	}
1315 }
1316 
1317 /* XXXJRT mpt_bus_reset() */
1318 
1319 /*****************************************************************************
1320  * SCSI interface routines
1321  *****************************************************************************/
1322 
1323 static void
1324 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1325     void *arg)
1326 {
1327 	struct scsipi_adapter *adapt = chan->chan_adapter;
1328 	mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1329 
1330 	switch (req) {
1331 	case ADAPTER_REQ_RUN_XFER:
1332 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1333 		return;
1334 
1335 	case ADAPTER_REQ_GROW_RESOURCES:
1336 		/* Not supported. */
1337 		return;
1338 
1339 	case ADAPTER_REQ_SET_XFER_MODE:
1340 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1341 		return;
1342 	}
1343 }
1344 
1345 static void
1346 mpt_minphys(struct buf *bp)
1347 {
1348 
1349 /*
1350  * Subtract one from the SGL limit, since we need an extra one to handle
1351  * an non-page-aligned transfer.
1352  */
1353 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1354 
1355 	if (bp->b_bcount > MPT_MAX_XFER)
1356 		bp->b_bcount = MPT_MAX_XFER;
1357 	minphys(bp);
1358 }
1359