xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: mpt_netbsd.c,v 1.19 2012/09/23 01:13:21 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  *
76  * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.19 2012/09/23 01:13:21 chs Exp $");
81 
82 #include <dev/ic/mpt.h>			/* pulls in all headers */
83 
84 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
85 static void	mpt_timeout(void *);
86 static void	mpt_done(mpt_softc_t *, uint32_t);
87 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
88 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
89 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
90 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
91 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
92 
93 static void	mpt_scsipi_request(struct scsipi_channel *,
94 		    scsipi_adapter_req_t, void *);
95 static void	mpt_minphys(struct buf *);
96 
97 /*
98  * XXX - this assumes the device_private() of the attachement starts with
99  * a struct mpt_softc, so we can use the return value of device_private()
100  * straight without any offset.
101  */
102 #define DEV_TO_MPT(DEV)	device_private(DEV)
103 
104 void
105 mpt_scsipi_attach(mpt_softc_t *mpt)
106 {
107 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
108 	struct scsipi_channel *chan = &mpt->sc_channel;
109 	int maxq;
110 
111 	mpt->bus = 0;		/* XXX ?? */
112 
113 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
114 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
115 
116 	/* Fill in the scsipi_adapter. */
117 	memset(adapt, 0, sizeof(*adapt));
118 	adapt->adapt_dev = mpt->sc_dev;
119 	adapt->adapt_nchannels = 1;
120 	adapt->adapt_openings = maxq - 2;	/* Reserve 2 for driver use*/
121 	adapt->adapt_max_periph = maxq - 2;
122 	adapt->adapt_request = mpt_scsipi_request;
123 	adapt->adapt_minphys = mpt_minphys;
124 
125 	/* Fill in the scsipi_channel. */
126 	memset(chan, 0, sizeof(*chan));
127 	chan->chan_adapter = adapt;
128 	if (mpt->is_sas) {
129 		chan->chan_bustype = &scsi_sas_bustype;
130 	} else if (mpt->is_fc) {
131 		chan->chan_bustype = &scsi_fc_bustype;
132 	} else {
133 		chan->chan_bustype = &scsi_bustype;
134 	}
135 	chan->chan_channel = 0;
136 	chan->chan_flags = 0;
137 	chan->chan_nluns = 8;
138 	chan->chan_ntargets = mpt->mpt_max_devices;
139 	chan->chan_id = mpt->mpt_ini_id;
140 
141 	(void) config_found(mpt->sc_dev, &mpt->sc_channel, scsiprint);
142 }
143 
144 int
145 mpt_dma_mem_alloc(mpt_softc_t *mpt)
146 {
147 	bus_dma_segment_t reply_seg, request_seg;
148 	int reply_rseg, request_rseg;
149 	bus_addr_t pptr, end;
150 	char *vptr;
151 	size_t len;
152 	int error, i;
153 
154 	/* Check if we have already allocated the reply memory. */
155 	if (mpt->reply != NULL)
156 		return (0);
157 
158 	/*
159 	 * Allocate the request pool.  This isn't really DMA'd memory,
160 	 * but it's a convenient place to do it.
161 	 */
162 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
163 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
164 	if (mpt->request_pool == NULL) {
165 		aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
166 		return (ENOMEM);
167 	}
168 
169 	/*
170 	 * Allocate DMA resources for reply buffers.
171 	 */
172 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
173 	    &reply_seg, 1, &reply_rseg, 0);
174 	if (error) {
175 		aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
176 		    error);
177 		goto fail_0;
178 	}
179 
180 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
181 	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
182 	if (error) {
183 		aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
184 		    error);
185 		goto fail_1;
186 	}
187 
188 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
189 	    0, 0, &mpt->reply_dmap);
190 	if (error) {
191 		aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
192 		    error);
193 		goto fail_2;
194 	}
195 
196 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
197 	    PAGE_SIZE, NULL, 0);
198 	if (error) {
199 		aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
200 		    error);
201 		goto fail_3;
202 	}
203 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
204 
205 	/*
206 	 * Allocate DMA resources for request buffers.
207 	 */
208 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
209 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
210 	if (error) {
211 		aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
212 		    "error = %d\n", error);
213 		goto fail_4;
214 	}
215 
216 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
217 	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
218 	if (error) {
219 		aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
220 		    error);
221 		goto fail_5;
222 	}
223 
224 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
225 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
226 	if (error) {
227 		aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
228 		    "error = %d\n", error);
229 		goto fail_6;
230 	}
231 
232 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
233 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
234 	if (error) {
235 		aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
236 		    error);
237 		goto fail_7;
238 	}
239 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
240 
241 	pptr = mpt->request_phys;
242 	vptr = (void *) mpt->request;
243 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
244 
245 	for (i = 0; pptr < end; i++) {
246 		request_t *req = &mpt->request_pool[i];
247 		req->index = i;
248 
249 		/* Store location of Request Data */
250 		req->req_pbuf = pptr;
251 		req->req_vbuf = vptr;
252 
253 		pptr += MPT_REQUEST_AREA;
254 		vptr += MPT_REQUEST_AREA;
255 
256 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
257 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
258 
259 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
260 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
261 		if (error) {
262 			aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
263 			    "error = %d\n", i, error);
264 			goto fail_8;
265 		}
266 	}
267 
268 	return (0);
269 
270  fail_8:
271 	for (--i; i >= 0; i--) {
272 		request_t *req = &mpt->request_pool[i];
273 		if (req->dmap != NULL)
274 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
275 	}
276 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
277  fail_7:
278 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
279  fail_6:
280 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
281  fail_5:
282 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
283  fail_4:
284 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
285  fail_3:
286 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
287  fail_2:
288 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
289  fail_1:
290 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
291  fail_0:
292 	free(mpt->request_pool, M_DEVBUF);
293 
294 	mpt->reply = NULL;
295 	mpt->request = NULL;
296 	mpt->request_pool = NULL;
297 
298 	return (error);
299 }
300 
301 int
302 mpt_intr(void *arg)
303 {
304 	mpt_softc_t *mpt = arg;
305 	int nrepl = 0;
306 	uint32_t reply;
307 
308 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
309 		return (0);
310 
311 	reply = mpt_pop_reply_queue(mpt);
312 	while (reply != MPT_REPLY_EMPTY) {
313 		nrepl++;
314 		if (mpt->verbose > 1) {
315 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
316 				/* Address reply; IOC has something to say */
317 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
318 			} else {
319 				/* Context reply; all went well */
320 				mpt_prt(mpt, "context %u reply OK", reply);
321 			}
322 		}
323 		mpt_done(mpt, reply);
324 		reply = mpt_pop_reply_queue(mpt);
325 	}
326 	return (nrepl != 0);
327 }
328 
329 void
330 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
331 {
332 	va_list ap;
333 
334 	printf("%s: ", device_xname(mpt->sc_dev));
335 	va_start(ap, fmt);
336 	vprintf(fmt, ap);
337 	va_end(ap);
338 	printf("\n");
339 }
340 
341 static int
342 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
343 {
344 
345 	/* Timeouts are in msec, so we loop in 1000usec cycles */
346 	while (count) {
347 		mpt_intr(mpt);
348 		if (xs->xs_status & XS_STS_DONE)
349 			return (0);
350 		delay(1000);		/* only happens in boot, so ok */
351 		count--;
352 	}
353 	return (1);
354 }
355 
356 static void
357 mpt_timeout(void *arg)
358 {
359 	request_t *req = arg;
360 	struct scsipi_xfer *xs = req->xfer;
361 	struct scsipi_periph *periph = xs->xs_periph;
362 	mpt_softc_t *mpt = DEV_TO_MPT(
363 	    periph->periph_channel->chan_adapter->adapt_dev);
364 	uint32_t oseq;
365 	int s;
366 
367 	scsipi_printaddr(periph);
368 	printf("command timeout\n");
369 
370 	s = splbio();
371 
372 	oseq = req->sequence;
373 	mpt->timeouts++;
374 	if (mpt_intr(mpt)) {
375 		if (req->sequence != oseq) {
376 			mpt_prt(mpt, "recovered from command timeout");
377 			splx(s);
378 			return;
379 		}
380 	}
381 	mpt_prt(mpt,
382 	    "timeout on request index = 0x%x, seq = 0x%08x",
383 	    req->index, req->sequence);
384 	mpt_check_doorbell(mpt);
385 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
386 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
387 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
388 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
389 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
390 	if (mpt->verbose > 1)
391 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
392 
393 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
394 	req->xfer = NULL;
395 	mpt_free_request(mpt, req);
396 
397 	xs->error = XS_TIMEOUT;
398 	scsipi_done(xs);
399 
400 	splx(s);
401 }
402 
403 static void
404 mpt_done(mpt_softc_t *mpt, uint32_t reply)
405 {
406 	struct scsipi_xfer *xs = NULL;
407 	struct scsipi_periph *periph;
408 	int index;
409 	request_t *req;
410 	MSG_REQUEST_HEADER *mpt_req;
411 	MSG_SCSI_IO_REPLY *mpt_reply;
412 
413 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
414 		/* context reply (ok) */
415 		mpt_reply = NULL;
416 		index = reply & MPT_CONTEXT_MASK;
417 	} else {
418 		/* address reply (error) */
419 
420 		/* XXX BUS_DMASYNC_POSTREAD XXX */
421 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
422 		if (mpt->verbose > 1) {
423 			uint32_t *pReply = (uint32_t *) mpt_reply;
424 
425 			mpt_prt(mpt, "Address Reply (index %u):",
426 			    le32toh(mpt_reply->MsgContext) & 0xffff);
427 			mpt_prt(mpt, "%08x %08x %08x %08x",
428 			    pReply[0], pReply[1], pReply[2], pReply[3]);
429 			mpt_prt(mpt, "%08x %08x %08x %08x",
430 			    pReply[4], pReply[5], pReply[6], pReply[7]);
431 			mpt_prt(mpt, "%08x %08x %08x %08x",
432 			    pReply[8], pReply[9], pReply[10], pReply[11]);
433 		}
434 		index = le32toh(mpt_reply->MsgContext);
435 	}
436 
437 	/*
438 	 * Address reply with MessageContext high bit set.
439 	 * This is most likely a notify message, so we try
440 	 * to process it, then free it.
441 	 */
442 	if (__predict_false((index & 0x80000000) != 0)) {
443 		if (mpt_reply != NULL)
444 			mpt_ctlop(mpt, mpt_reply, reply);
445 		else
446 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
447 		return;
448 	}
449 
450 	/* Did we end up with a valid index into the table? */
451 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
452 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
453 		return;
454 	}
455 
456 	req = &mpt->request_pool[index];
457 
458 	/* Make sure memory hasn't been trashed. */
459 	if (__predict_false(req->index != index)) {
460 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
461 		return;
462 	}
463 
464 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
465 	mpt_req = req->req_vbuf;
466 
467 	/* Short cut for task management replies; nothing more for us to do. */
468 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
469 		if (mpt->verbose > 1)
470 			mpt_prt(mpt, "mpt_done: TASK MGMT");
471 		goto done;
472 	}
473 
474 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
475 		goto done;
476 
477 	/*
478 	 * At this point, it had better be a SCSI I/O command, but don't
479 	 * crash if it isn't.
480 	 */
481 	if (__predict_false(mpt_req->Function !=
482 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
483 		if (mpt->verbose > 1)
484 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
485 			    mpt_req->Function, index);
486 		goto done;
487 	}
488 
489 	/* Recover scsipi_xfer from the request structure. */
490 	xs = req->xfer;
491 
492 	/* Can't have a SCSI command without a scsipi_xfer. */
493 	if (__predict_false(xs == NULL)) {
494 		mpt_prt(mpt,
495 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
496 		    req->index, req->sequence);
497 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
498 		mpt_prt(mpt, "mpt_request:");
499 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
500 
501 		if (mpt_reply != NULL) {
502 			mpt_prt(mpt, "mpt_reply:");
503 			mpt_print_reply(mpt_reply);
504 		} else {
505 			mpt_prt(mpt, "context reply: 0x%08x", reply);
506 		}
507 		goto done;
508 	}
509 
510 	callout_stop(&xs->xs_callout);
511 
512 	periph = xs->xs_periph;
513 
514 	/*
515 	 * If we were a data transfer, unload the map that described
516 	 * the data buffer.
517 	 */
518 	if (__predict_true(xs->datalen != 0)) {
519 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
520 		    req->dmap->dm_mapsize,
521 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
522 						      : BUS_DMASYNC_POSTWRITE);
523 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
524 	}
525 
526 	if (__predict_true(mpt_reply == NULL)) {
527 		/*
528 		 * Context reply; report that the command was
529 		 * successful!
530 		 *
531 		 * Also report the xfer mode, if necessary.
532 		 */
533 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
534 			if ((mpt->mpt_report_xfer_mode &
535 			     (1 << periph->periph_target)) != 0)
536 				mpt_get_xfer_mode(mpt, periph);
537 		}
538 		xs->error = XS_NOERROR;
539 		xs->status = SCSI_OK;
540 		xs->resid = 0;
541 		mpt_free_request(mpt, req);
542 		scsipi_done(xs);
543 		return;
544 	}
545 
546 	xs->status = mpt_reply->SCSIStatus;
547 	switch (le16toh(mpt_reply->IOCStatus)) {
548 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
549 		xs->error = XS_DRIVER_STUFFUP;
550 		break;
551 
552 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
553 		/*
554 		 * Yikes!  Tagged queue full comes through this path!
555 		 *
556 		 * So we'll change it to a status error and anything
557 		 * that returns status should probably be a status
558 		 * error as well.
559 		 */
560 		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
561 		if (mpt_reply->SCSIState &
562 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
563 			xs->error = XS_DRIVER_STUFFUP;
564 			break;
565 		}
566 		/* FALLTHROUGH */
567 	case MPI_IOCSTATUS_SUCCESS:
568 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
569 		switch (xs->status) {
570 		case SCSI_OK:
571 			/* Report the xfer mode, if necessary. */
572 			if ((mpt->mpt_report_xfer_mode &
573 			     (1 << periph->periph_target)) != 0)
574 				mpt_get_xfer_mode(mpt, periph);
575 			xs->resid = 0;
576 			break;
577 
578 		case SCSI_CHECK:
579 			xs->error = XS_SENSE;
580 			break;
581 
582 		case SCSI_BUSY:
583 		case SCSI_QUEUE_FULL:
584 			xs->error = XS_BUSY;
585 			break;
586 
587 		default:
588 			scsipi_printaddr(periph);
589 			printf("invalid status code %d\n", xs->status);
590 			xs->error = XS_DRIVER_STUFFUP;
591 			break;
592 		}
593 		break;
594 
595 	case MPI_IOCSTATUS_BUSY:
596 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
597 		xs->error = XS_RESOURCE_SHORTAGE;
598 		break;
599 
600 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
601 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
602 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
603 		xs->error = XS_SELTIMEOUT;
604 		break;
605 
606 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
607 		xs->error = XS_DRIVER_STUFFUP;
608 		break;
609 
610 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
611 		/* XXX What should we do here? */
612 		break;
613 
614 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
615 		/* XXX */
616 		xs->error = XS_DRIVER_STUFFUP;
617 		break;
618 
619 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
620 		/* XXX */
621 		xs->error = XS_DRIVER_STUFFUP;
622 		break;
623 
624 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
625 		/* XXX This is a bus-reset */
626 		xs->error = XS_DRIVER_STUFFUP;
627 		break;
628 
629 	default:
630 		/* XXX unrecognized HBA error */
631 		xs->error = XS_DRIVER_STUFFUP;
632 		break;
633 	}
634 
635 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
636 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
637 		    sizeof(xs->sense.scsi_sense));
638 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
639 		/*
640 		 * This will cause the scsipi layer to issue
641 		 * a REQUEST SENSE.
642 		 */
643 		if (xs->status == SCSI_CHECK)
644 			xs->error = XS_BUSY;
645 	}
646 
647  done:
648 	/* If IOC done with this requeset, free it up. */
649 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
650 		mpt_free_request(mpt, req);
651 
652 	/* If address reply, give the buffer back to the IOC. */
653 	if (mpt_reply != NULL)
654 		mpt_free_reply(mpt, (reply << 1));
655 
656 	if (xs != NULL)
657 		scsipi_done(xs);
658 }
659 
660 static void
661 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
662 {
663 	struct scsipi_periph *periph = xs->xs_periph;
664 	request_t *req;
665 	MSG_SCSI_IO_REQUEST *mpt_req;
666 	int error, s;
667 
668 	s = splbio();
669 	req = mpt_get_request(mpt);
670 	if (__predict_false(req == NULL)) {
671 		/* This should happen very infrequently. */
672 		xs->error = XS_RESOURCE_SHORTAGE;
673 		scsipi_done(xs);
674 		splx(s);
675 		return;
676 	}
677 	splx(s);
678 
679 	/* Link the req and the scsipi_xfer. */
680 	req->xfer = xs;
681 
682 	/* Now we build the command for the IOC */
683 	mpt_req = req->req_vbuf;
684 	memset(mpt_req, 0, sizeof(*mpt_req));
685 
686 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
687 	mpt_req->Bus = mpt->bus;
688 
689 	mpt_req->SenseBufferLength =
690 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
691 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
692 
693 	/*
694 	 * We use the message context to find the request structure when
695 	 * we get the command completion interrupt from the IOC.
696 	 */
697 	mpt_req->MsgContext = htole32(req->index);
698 
699 	/* Which physical device to do the I/O on. */
700 	mpt_req->TargetID = periph->periph_target;
701 	mpt_req->LUN[1] = periph->periph_lun;
702 
703 	/* Set the direction of the transfer. */
704 	if (xs->xs_control & XS_CTL_DATA_IN)
705 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
706 	else if (xs->xs_control & XS_CTL_DATA_OUT)
707 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
708 	else
709 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
710 
711 	/* Set the queue behavior. */
712 	if (__predict_true((!mpt->is_scsi) ||
713 			   (mpt->mpt_tag_enable &
714 			    (1 << periph->periph_target)))) {
715 		switch (XS_CTL_TAGTYPE(xs)) {
716 		case XS_CTL_HEAD_TAG:
717 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
718 			break;
719 
720 #if 0	/* XXX */
721 		case XS_CTL_ACA_TAG:
722 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
723 			break;
724 #endif
725 
726 		case XS_CTL_ORDERED_TAG:
727 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
728 			break;
729 
730 		case XS_CTL_SIMPLE_TAG:
731 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
732 			break;
733 
734 		default:
735 			if (mpt->is_scsi)
736 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
737 			else
738 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
739 			break;
740 		}
741 	} else
742 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
743 
744 	if (__predict_false(mpt->is_scsi &&
745 			    (mpt->mpt_disc_enable &
746 			     (1 << periph->periph_target)) == 0))
747 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
748 
749 	mpt_req->Control = htole32(mpt_req->Control);
750 
751 	/* Copy the SCSI command block into place. */
752 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
753 
754 	mpt_req->CDBLength = xs->cmdlen;
755 	mpt_req->DataLength = htole32(xs->datalen);
756 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
757 
758 	/*
759 	 * Map the DMA transfer.
760 	 */
761 	if (xs->datalen) {
762 		SGE_SIMPLE32 *se;
763 
764 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
765 		    xs->datalen, NULL,
766 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
767 						       : BUS_DMA_WAITOK) |
768 		    BUS_DMA_STREAMING |
769 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
770 						       : BUS_DMA_WRITE));
771 		switch (error) {
772 		case 0:
773 			break;
774 
775 		case ENOMEM:
776 		case EAGAIN:
777 			xs->error = XS_RESOURCE_SHORTAGE;
778 			goto out_bad;
779 
780 		default:
781 			xs->error = XS_DRIVER_STUFFUP;
782 			mpt_prt(mpt, "error %d loading DMA map", error);
783  out_bad:
784 			s = splbio();
785 			mpt_free_request(mpt, req);
786 			scsipi_done(xs);
787 			splx(s);
788 			return;
789 		}
790 
791 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
792 			int seg, i, nleft = req->dmap->dm_nsegs;
793 			uint32_t flags;
794 			SGE_CHAIN32 *ce;
795 
796 			seg = 0;
797 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
798 			if (xs->xs_control & XS_CTL_DATA_OUT)
799 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
800 
801 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
802 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
803 			     i++, se++, seg++) {
804 				uint32_t tf;
805 
806 				memset(se, 0, sizeof(*se));
807 				se->Address =
808 				    htole32(req->dmap->dm_segs[seg].ds_addr);
809 				MPI_pSGE_SET_LENGTH(se,
810 				    req->dmap->dm_segs[seg].ds_len);
811 				tf = flags;
812 				if (i == MPT_NSGL_FIRST(mpt) - 2)
813 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
814 				MPI_pSGE_SET_FLAGS(se, tf);
815 				se->FlagsLength = htole32(se->FlagsLength);
816 				nleft--;
817 			}
818 
819 			/*
820 			 * Tell the IOC where to find the first chain element.
821 			 */
822 			mpt_req->ChainOffset =
823 			    ((char *)se - (char *)mpt_req) >> 2;
824 
825 			/*
826 			 * Until we're finished with all segments...
827 			 */
828 			while (nleft) {
829 				int ntodo;
830 
831 				/*
832 				 * Construct the chain element that points to
833 				 * the next segment.
834 				 */
835 				ce = (SGE_CHAIN32 *) se++;
836 				if (nleft > MPT_NSGL(mpt)) {
837 					ntodo = MPT_NSGL(mpt) - 1;
838 					ce->NextChainOffset = (MPT_RQSL(mpt) -
839 					    sizeof(SGE_SIMPLE32)) >> 2;
840 					ce->Length = htole16(MPT_NSGL(mpt)
841 						* sizeof(SGE_SIMPLE32));
842 				} else {
843 					ntodo = nleft;
844 					ce->NextChainOffset = 0;
845 					ce->Length = htole16(ntodo
846 						* sizeof(SGE_SIMPLE32));
847 				}
848 				ce->Address = htole32(req->req_pbuf +
849 				    ((char *)se - (char *)mpt_req));
850 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
851 				for (i = 0; i < ntodo; i++, se++, seg++) {
852 					uint32_t tf;
853 
854 					memset(se, 0, sizeof(*se));
855 					se->Address = htole32(
856 					    req->dmap->dm_segs[seg].ds_addr);
857 					MPI_pSGE_SET_LENGTH(se,
858 					    req->dmap->dm_segs[seg].ds_len);
859 					tf = flags;
860 					if (i == ntodo - 1) {
861 						tf |=
862 						    MPI_SGE_FLAGS_LAST_ELEMENT;
863 						if (ce->NextChainOffset == 0) {
864 							tf |=
865 						    MPI_SGE_FLAGS_END_OF_LIST |
866 						    MPI_SGE_FLAGS_END_OF_BUFFER;
867 						}
868 					}
869 					MPI_pSGE_SET_FLAGS(se, tf);
870 					se->FlagsLength =
871 					    htole32(se->FlagsLength);
872 					nleft--;
873 				}
874 			}
875 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
876 			    req->dmap->dm_mapsize,
877 			    (xs->xs_control & XS_CTL_DATA_IN) ?
878 			    				BUS_DMASYNC_PREREAD
879 						      : BUS_DMASYNC_PREWRITE);
880 		} else {
881 			int i;
882 			uint32_t flags;
883 
884 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
885 			if (xs->xs_control & XS_CTL_DATA_OUT)
886 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
887 
888 			/* Copy the segments into our SG list. */
889 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
890 			for (i = 0; i < req->dmap->dm_nsegs;
891 			     i++, se++) {
892 				uint32_t tf;
893 
894 				memset(se, 0, sizeof(*se));
895 				se->Address =
896 				    htole32(req->dmap->dm_segs[i].ds_addr);
897 				MPI_pSGE_SET_LENGTH(se,
898 				    req->dmap->dm_segs[i].ds_len);
899 				tf = flags;
900 				if (i == req->dmap->dm_nsegs - 1) {
901 					tf |=
902 					    MPI_SGE_FLAGS_LAST_ELEMENT |
903 					    MPI_SGE_FLAGS_END_OF_BUFFER |
904 					    MPI_SGE_FLAGS_END_OF_LIST;
905 				}
906 				MPI_pSGE_SET_FLAGS(se, tf);
907 				se->FlagsLength = htole32(se->FlagsLength);
908 			}
909 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
910 			    req->dmap->dm_mapsize,
911 			    (xs->xs_control & XS_CTL_DATA_IN) ?
912 			    				BUS_DMASYNC_PREREAD
913 						      : BUS_DMASYNC_PREWRITE);
914 		}
915 	} else {
916 		/*
917 		 * No data to transfer; just make a single simple SGL
918 		 * with zero length.
919 		 */
920 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
921 		memset(se, 0, sizeof(*se));
922 		MPI_pSGE_SET_FLAGS(se,
923 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
924 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
925 		se->FlagsLength = htole32(se->FlagsLength);
926 	}
927 
928 	if (mpt->verbose > 1)
929 		mpt_print_scsi_io_request(mpt_req);
930 
931 	s = splbio();
932 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
933 		callout_reset(&xs->xs_callout,
934 		    mstohz(xs->timeout), mpt_timeout, req);
935 	mpt_send_cmd(mpt, req);
936 	splx(s);
937 
938 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
939 		return;
940 
941 	/*
942 	 * If we can't use interrupts, poll on completion.
943 	 */
944 	if (mpt_poll(mpt, xs, xs->timeout))
945 		mpt_timeout(req);
946 }
947 
948 static void
949 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
950 {
951 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
952 
953 	/*
954 	 * Always allow disconnect; we don't have a way to disable
955 	 * it right now, in any case.
956 	 */
957 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
958 
959 	if (xm->xm_mode & PERIPH_CAP_TQING)
960 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
961 	else
962 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
963 
964 	if (mpt->is_scsi) {
965 		/*
966 		 * SCSI transport settings only make any sense for
967 		 * SCSI
968 		 */
969 
970 		tmp = mpt->mpt_dev_page1[xm->xm_target];
971 
972 		/*
973 		 * Set the wide/narrow parameter for the target.
974 		 */
975 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
976 			tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
977 		else
978 			tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
979 
980 		/*
981 		 * Set the synchronous parameters for the target.
982 		 *
983 		 * XXX If we request sync transfers, we just go ahead and
984 		 * XXX request the maximum available.  We need finer control
985 		 * XXX in order to implement Domain Validation.
986 		 */
987 		tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
988 		    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
989 		    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
990 		    MPI_SCSIDEVPAGE1_RP_IU);
991 		if (xm->xm_mode & PERIPH_CAP_SYNC) {
992 			int factor, offset, np;
993 
994 			factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
995 			offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
996 			np = 0;
997 			if (factor < 0x9) {
998 				/* Ultra320 */
999 				np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1000 			}
1001 			if (factor < 0xa) {
1002 				/* at least Ultra160 */
1003 				np |= MPI_SCSIDEVPAGE1_RP_DT;
1004 			}
1005 			np |= (factor << 8) | (offset << 16);
1006 			tmp.RequestedParameters |= np;
1007 		}
1008 
1009 		host2mpt_config_page_scsi_device_1(&tmp);
1010 		if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1011 			mpt_prt(mpt, "unable to write Device Page 1");
1012 			return;
1013 		}
1014 
1015 		if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1016 			mpt_prt(mpt, "unable to read back Device Page 1");
1017 			return;
1018 		}
1019 
1020 		mpt2host_config_page_scsi_device_1(&tmp);
1021 		mpt->mpt_dev_page1[xm->xm_target] = tmp;
1022 		if (mpt->verbose > 1) {
1023 			mpt_prt(mpt,
1024 			    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1025 			    xm->xm_target,
1026 			    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1027 			    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * Make a note that we should perform an async callback at the
1033 	 * end of the next successful command completion to report the
1034 	 * negotiated transfer mode.
1035 	 */
1036 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1037 }
1038 
1039 static void
1040 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1041 {
1042 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1043 	struct scsipi_xfer_mode xm;
1044 	int period, offset;
1045 
1046 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1047 	host2mpt_config_page_scsi_device_0(&tmp);
1048 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1049 		mpt_prt(mpt, "unable to read Device Page 0");
1050 		return;
1051 	}
1052 	mpt2host_config_page_scsi_device_0(&tmp);
1053 
1054 	if (mpt->verbose > 1) {
1055 		mpt_prt(mpt,
1056 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1057 		    periph->periph_target,
1058 		    tmp.NegotiatedParameters, tmp.Information);
1059 	}
1060 
1061 	xm.xm_target = periph->periph_target;
1062 	xm.xm_mode = 0;
1063 
1064 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1065 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1066 
1067 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1068 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1069 	if (offset) {
1070 		xm.xm_period = period;
1071 		xm.xm_offset = offset;
1072 		xm.xm_mode |= PERIPH_CAP_SYNC;
1073 	}
1074 
1075 	/*
1076 	 * Tagged queueing is all controlled by us; there is no
1077 	 * other setting to query.
1078 	 */
1079 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1080 		xm.xm_mode |= PERIPH_CAP_TQING;
1081 
1082 	/*
1083 	 * We're going to deliver the async event, so clear the marker.
1084 	 */
1085 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1086 
1087 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1088 }
1089 
1090 static void
1091 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1092 {
1093 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1094 
1095 	switch (dmsg->Function) {
1096 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1097 		mpt_event_notify_reply(mpt, vmsg);
1098 		mpt_free_reply(mpt, (reply << 1));
1099 		break;
1100 
1101 	case MPI_FUNCTION_EVENT_ACK:
1102 		mpt_free_reply(mpt, (reply << 1));
1103 		break;
1104 
1105 	case MPI_FUNCTION_PORT_ENABLE:
1106 	    {
1107 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1108 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1109 		if (mpt->verbose > 1)
1110 			mpt_prt(mpt, "enable port reply index %d", index);
1111 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1112 			request_t *req = &mpt->request_pool[index];
1113 			req->debug = REQ_DONE;
1114 		}
1115 		mpt_free_reply(mpt, (reply << 1));
1116 		break;
1117 	    }
1118 
1119 	case MPI_FUNCTION_CONFIG:
1120 	    {
1121 		MSG_CONFIG_REPLY *msg = vmsg;
1122 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1123 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1124 			request_t *req = &mpt->request_pool[index];
1125 			req->debug = REQ_DONE;
1126 			req->sequence = reply;
1127 		} else
1128 			mpt_free_reply(mpt, (reply << 1));
1129 		break;
1130 	    }
1131 
1132 	default:
1133 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1134 	}
1135 }
1136 
1137 static void
1138 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1139 {
1140 
1141 	switch (le32toh(msg->Event)) {
1142 	case MPI_EVENT_LOG_DATA:
1143 	    {
1144 		int i;
1145 
1146 		/* Some error occurrerd that the Fusion wants logged. */
1147 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1148 		mpt_prt(mpt, "EvtLogData: Event Data:");
1149 		for (i = 0; i < msg->EventDataLength; i++) {
1150 			if ((i % 4) == 0)
1151 				printf("%s:\t", device_xname(mpt->sc_dev));
1152 			printf("0x%08x%c", msg->Data[i],
1153 			    ((i % 4) == 3) ? '\n' : ' ');
1154 		}
1155 		if ((i % 4) != 0)
1156 			printf("\n");
1157 		break;
1158 	    }
1159 
1160 	case MPI_EVENT_UNIT_ATTENTION:
1161 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1162 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1163 		break;
1164 
1165 	case MPI_EVENT_IOC_BUS_RESET:
1166 		/* We generated a bus reset. */
1167 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1168 		    (msg->Data[0] >> 8) & 0xff);
1169 		break;
1170 
1171 	case MPI_EVENT_EXT_BUS_RESET:
1172 		/* Someone else generated a bus reset. */
1173 		mpt_prt(mpt, "External Bus Reset");
1174 		/*
1175 		 * These replies don't return EventData like the MPI
1176 		 * spec says they do.
1177 		 */
1178 		/* XXX Send an async event? */
1179 		break;
1180 
1181 	case MPI_EVENT_RESCAN:
1182 		/*
1183 		 * In general, thise means a device has been added
1184 		 * to the loop.
1185 		 */
1186 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1187 		/* XXX Send an async event? */
1188 		break;
1189 
1190 	case MPI_EVENT_LINK_STATUS_CHANGE:
1191 		mpt_prt(mpt, "Port %d: Link state %s",
1192 		    (msg->Data[1] >> 8) & 0xff,
1193 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1194 		break;
1195 
1196 	case MPI_EVENT_LOOP_STATE_CHANGE:
1197 		switch ((msg->Data[0] >> 16) & 0xff) {
1198 		case 0x01:
1199 			mpt_prt(mpt,
1200 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1201 			    "(Loop Initialization)",
1202 			    (msg->Data[1] >> 8) & 0xff,
1203 			    (msg->Data[0] >> 8) & 0xff,
1204 			    (msg->Data[0]     ) & 0xff);
1205 			switch ((msg->Data[0] >> 8) & 0xff) {
1206 			case 0xf7:
1207 				if ((msg->Data[0] & 0xff) == 0xf7)
1208 					mpt_prt(mpt, "\tDevice needs AL_PA");
1209 				else
1210 					mpt_prt(mpt, "\tDevice %02x doesn't "
1211 					    "like FC performance",
1212 					    msg->Data[0] & 0xff);
1213 				break;
1214 
1215 			case 0xf8:
1216 				if ((msg->Data[0] & 0xff) == 0xf7)
1217 					mpt_prt(mpt, "\tDevice detected loop "
1218 					    "failure before acquiring AL_PA");
1219 				else
1220 					mpt_prt(mpt, "\tDevice %02x detected "
1221 					    "loop failure",
1222 					    msg->Data[0] & 0xff);
1223 				break;
1224 
1225 			default:
1226 				mpt_prt(mpt, "\tDevice %02x requests that "
1227 				    "device %02x reset itself",
1228 				    msg->Data[0] & 0xff,
1229 				    (msg->Data[0] >> 8) & 0xff);
1230 				break;
1231 			}
1232 			break;
1233 
1234 		case 0x02:
1235 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1236 			    "(Loop Port Enable)",
1237 			    (msg->Data[1] >> 8) & 0xff,
1238 			    (msg->Data[0] >> 8) & 0xff,
1239 			    (msg->Data[0]     ) & 0xff);
1240 			break;
1241 
1242 		case 0x03:
1243 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1244 			    "(Loop Port Bypass)",
1245 			    (msg->Data[1] >> 8) & 0xff,
1246 			    (msg->Data[0] >> 8) & 0xff,
1247 			    (msg->Data[0]     ) & 0xff);
1248 			break;
1249 
1250 		default:
1251 			mpt_prt(mpt, "Port %d: FC Link Event: "
1252 			    "Unknown event (%02x %02x %02x)",
1253 			    (msg->Data[1] >>  8) & 0xff,
1254 			    (msg->Data[0] >> 16) & 0xff,
1255 			    (msg->Data[0] >>  8) & 0xff,
1256 			    (msg->Data[0]      ) & 0xff);
1257 			break;
1258 		}
1259 		break;
1260 
1261 	case MPI_EVENT_LOGOUT:
1262 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1263 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1264 		break;
1265 
1266 	case MPI_EVENT_EVENT_CHANGE:
1267 		/*
1268 		 * This is just an acknowledgement of our
1269 		 * mpt_send_event_request().
1270 		 */
1271 		break;
1272 
1273 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
1274 		switch ((msg->Data[0] >> 12) & 0x0f) {
1275 		case 0x00:
1276 			mpt_prt(mpt, "Phy %d: Link Status Unknown",
1277 			    msg->Data[0] & 0xff);
1278 			break;
1279 		case 0x01:
1280 			mpt_prt(mpt, "Phy %d: Link Disabled",
1281 			    msg->Data[0] & 0xff);
1282 			break;
1283 		case 0x02:
1284 			mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1285 			    msg->Data[0] & 0xff);
1286 			break;
1287 		case 0x03:
1288 			mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1289 			    msg->Data[0] & 0xff);
1290 			break;
1291 		case 0x08:
1292 			mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1293 			    msg->Data[0] & 0xff);
1294 			break;
1295 		case 0x09:
1296 			mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1297 			    msg->Data[0] & 0xff);
1298 			break;
1299 		default:
1300 			mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1301 			    "Unknown event (%0x)",
1302 			    msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1303 		}
1304 		break;
1305 
1306 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1307 	case MPI_EVENT_SAS_DISCOVERY:
1308 		/* ignore these events for now */
1309 		break;
1310 
1311 	case MPI_EVENT_QUEUE_FULL:
1312 		/* This can get a little chatty */
1313 		if (mpt->verbose > 0)
1314 			mpt_prt(mpt, "Queue Full Event");
1315 		break;
1316 
1317 	default:
1318 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1319 		break;
1320 	}
1321 
1322 	if (msg->AckRequired) {
1323 		MSG_EVENT_ACK *ackp;
1324 		request_t *req;
1325 
1326 		if ((req = mpt_get_request(mpt)) == NULL) {
1327 			/* XXX XXX XXX XXXJRT */
1328 			panic("mpt_event_notify_reply: unable to allocate "
1329 			    "request structure");
1330 		}
1331 
1332 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1333 		memset(ackp, 0, sizeof(*ackp));
1334 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1335 		ackp->Event = msg->Event;
1336 		ackp->EventContext = msg->EventContext;
1337 		ackp->MsgContext = htole32(req->index | 0x80000000);
1338 		mpt_check_doorbell(mpt);
1339 		mpt_send_cmd(mpt, req);
1340 	}
1341 }
1342 
1343 /* XXXJRT mpt_bus_reset() */
1344 
1345 /*****************************************************************************
1346  * SCSI interface routines
1347  *****************************************************************************/
1348 
1349 static void
1350 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1351     void *arg)
1352 {
1353 	struct scsipi_adapter *adapt = chan->chan_adapter;
1354 	mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1355 
1356 	switch (req) {
1357 	case ADAPTER_REQ_RUN_XFER:
1358 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1359 		return;
1360 
1361 	case ADAPTER_REQ_GROW_RESOURCES:
1362 		/* Not supported. */
1363 		return;
1364 
1365 	case ADAPTER_REQ_SET_XFER_MODE:
1366 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1367 		return;
1368 	}
1369 }
1370 
1371 static void
1372 mpt_minphys(struct buf *bp)
1373 {
1374 
1375 /*
1376  * Subtract one from the SGL limit, since we need an extra one to handle
1377  * an non-page-aligned transfer.
1378  */
1379 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1380 
1381 	if (bp->b_bcount > MPT_MAX_XFER)
1382 		bp->b_bcount = MPT_MAX_XFER;
1383 	minphys(bp);
1384 }
1385