xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: mpt_netbsd.c,v 1.15 2010/04/28 22:45:27 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  *
76  * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.15 2010/04/28 22:45:27 chs Exp $");
81 
82 #include <dev/ic/mpt.h>			/* pulls in all headers */
83 
84 #include <machine/stdarg.h>		/* for mpt_prt() */
85 
86 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
87 static void	mpt_timeout(void *);
88 static void	mpt_done(mpt_softc_t *, uint32_t);
89 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
90 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
91 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
92 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
93 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
94 
95 static void	mpt_scsipi_request(struct scsipi_channel *,
96 		    scsipi_adapter_req_t, void *);
97 static void	mpt_minphys(struct buf *);
98 
99 void
100 mpt_scsipi_attach(mpt_softc_t *mpt)
101 {
102 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
103 	struct scsipi_channel *chan = &mpt->sc_channel;
104 	int maxq;
105 
106 	mpt->bus = 0;		/* XXX ?? */
107 
108 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
109 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
110 
111 	/* Fill in the scsipi_adapter. */
112 	memset(adapt, 0, sizeof(*adapt));
113 	adapt->adapt_dev = &mpt->sc_dev;
114 	adapt->adapt_nchannels = 1;
115 	adapt->adapt_openings = maxq;
116 	adapt->adapt_max_periph = maxq;
117 	adapt->adapt_request = mpt_scsipi_request;
118 	adapt->adapt_minphys = mpt_minphys;
119 
120 	/* Fill in the scsipi_channel. */
121 	memset(chan, 0, sizeof(*chan));
122 	chan->chan_adapter = adapt;
123 	chan->chan_bustype = &scsi_bustype;
124 	chan->chan_channel = 0;
125 	chan->chan_flags = 0;
126 	chan->chan_nluns = 8;
127 	chan->chan_ntargets = mpt->mpt_max_devices;
128 	chan->chan_id = mpt->mpt_ini_id;
129 
130 	(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132 
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 	bus_dma_segment_t reply_seg, request_seg;
137 	int reply_rseg, request_rseg;
138 	bus_addr_t pptr, end;
139 	char *vptr;
140 	size_t len;
141 	int error, i;
142 
143 	/* Check if we have already allocated the reply memory. */
144 	if (mpt->reply != NULL)
145 		return (0);
146 
147 	/*
148 	 * Allocate the request pool.  This isn't really DMA'd memory,
149 	 * but it's a convenient place to do it.
150 	 */
151 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 	if (mpt->request_pool == NULL) {
154 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
155 		return (ENOMEM);
156 	}
157 
158 	/*
159 	 * Allocate DMA resources for reply buffers.
160 	 */
161 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
162 	    &reply_seg, 1, &reply_rseg, 0);
163 	if (error) {
164 		aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
165 		    error);
166 		goto fail_0;
167 	}
168 
169 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
170 	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
171 	if (error) {
172 		aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
173 		    error);
174 		goto fail_1;
175 	}
176 
177 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
178 	    0, 0, &mpt->reply_dmap);
179 	if (error) {
180 		aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
181 		    error);
182 		goto fail_2;
183 	}
184 
185 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
186 	    PAGE_SIZE, NULL, 0);
187 	if (error) {
188 		aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
189 		    error);
190 		goto fail_3;
191 	}
192 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
193 
194 	/*
195 	 * Allocate DMA resources for request buffers.
196 	 */
197 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
198 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
199 	if (error) {
200 		aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
201 		    "error = %d\n", error);
202 		goto fail_4;
203 	}
204 
205 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
206 	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
207 	if (error) {
208 		aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
209 		    error);
210 		goto fail_5;
211 	}
212 
213 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
214 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
215 	if (error) {
216 		aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
217 		    "error = %d\n", error);
218 		goto fail_6;
219 	}
220 
221 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
222 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
223 	if (error) {
224 		aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
225 		    error);
226 		goto fail_7;
227 	}
228 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
229 
230 	pptr = mpt->request_phys;
231 	vptr = (void *) mpt->request;
232 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
233 
234 	for (i = 0; pptr < end; i++) {
235 		request_t *req = &mpt->request_pool[i];
236 		req->index = i;
237 
238 		/* Store location of Request Data */
239 		req->req_pbuf = pptr;
240 		req->req_vbuf = vptr;
241 
242 		pptr += MPT_REQUEST_AREA;
243 		vptr += MPT_REQUEST_AREA;
244 
245 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
246 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
247 
248 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
249 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
250 		if (error) {
251 			aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
252 			    "error = %d\n", i, error);
253 			goto fail_8;
254 		}
255 	}
256 
257 	return (0);
258 
259  fail_8:
260 	for (--i; i >= 0; i--) {
261 		request_t *req = &mpt->request_pool[i];
262 		if (req->dmap != NULL)
263 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
264 	}
265 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
266  fail_7:
267 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
268  fail_6:
269 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
270  fail_5:
271 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
272  fail_4:
273 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
274  fail_3:
275 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
276  fail_2:
277 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
278  fail_1:
279 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
280  fail_0:
281 	free(mpt->request_pool, M_DEVBUF);
282 
283 	mpt->reply = NULL;
284 	mpt->request = NULL;
285 	mpt->request_pool = NULL;
286 
287 	return (error);
288 }
289 
290 int
291 mpt_intr(void *arg)
292 {
293 	mpt_softc_t *mpt = arg;
294 	int nrepl = 0;
295 	uint32_t reply;
296 
297 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
298 		return (0);
299 
300 	reply = mpt_pop_reply_queue(mpt);
301 	while (reply != MPT_REPLY_EMPTY) {
302 		nrepl++;
303 		if (mpt->verbose > 1) {
304 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
305 				/* Address reply; IOC has something to say */
306 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
307 			} else {
308 				/* Context reply; all went well */
309 				mpt_prt(mpt, "context %u reply OK", reply);
310 			}
311 		}
312 		mpt_done(mpt, reply);
313 		reply = mpt_pop_reply_queue(mpt);
314 	}
315 	return (nrepl != 0);
316 }
317 
318 void
319 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
320 {
321 	va_list ap;
322 
323 	printf("%s: ", device_xname(&mpt->sc_dev));
324 	va_start(ap, fmt);
325 	vprintf(fmt, ap);
326 	va_end(ap);
327 	printf("\n");
328 }
329 
330 static int
331 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
332 {
333 
334 	/* Timeouts are in msec, so we loop in 1000usec cycles */
335 	while (count) {
336 		mpt_intr(mpt);
337 		if (xs->xs_status & XS_STS_DONE)
338 			return (0);
339 		delay(1000);		/* only happens in boot, so ok */
340 		count--;
341 	}
342 	return (1);
343 }
344 
345 static void
346 mpt_timeout(void *arg)
347 {
348 	request_t *req = arg;
349 	struct scsipi_xfer *xs = req->xfer;
350 	struct scsipi_periph *periph = xs->xs_periph;
351 	mpt_softc_t *mpt =
352 	    (void *) periph->periph_channel->chan_adapter->adapt_dev;
353 	uint32_t oseq;
354 	int s;
355 
356 	scsipi_printaddr(periph);
357 	printf("command timeout\n");
358 
359 	s = splbio();
360 
361 	oseq = req->sequence;
362 	mpt->timeouts++;
363 	if (mpt_intr(mpt)) {
364 		if (req->sequence != oseq) {
365 			mpt_prt(mpt, "recovered from command timeout");
366 			splx(s);
367 			return;
368 		}
369 	}
370 	mpt_prt(mpt,
371 	    "timeout on request index = 0x%x, seq = 0x%08x",
372 	    req->index, req->sequence);
373 	mpt_check_doorbell(mpt);
374 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
375 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
376 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
377 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
378 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
379 	if (mpt->verbose > 1)
380 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
381 
382 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
383 	req->xfer = NULL;
384 	mpt_free_request(mpt, req);
385 
386 	xs->error = XS_TIMEOUT;
387 	scsipi_done(xs);
388 
389 	splx(s);
390 }
391 
392 static void
393 mpt_done(mpt_softc_t *mpt, uint32_t reply)
394 {
395 	struct scsipi_xfer *xs = NULL;
396 	struct scsipi_periph *periph;
397 	int index;
398 	request_t *req;
399 	MSG_REQUEST_HEADER *mpt_req;
400 	MSG_SCSI_IO_REPLY *mpt_reply;
401 
402 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
403 		/* context reply (ok) */
404 		mpt_reply = NULL;
405 		index = reply & MPT_CONTEXT_MASK;
406 	} else {
407 		/* address reply (error) */
408 
409 		/* XXX BUS_DMASYNC_POSTREAD XXX */
410 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
411 		if (mpt->verbose > 1) {
412 			uint32_t *pReply = (uint32_t *) mpt_reply;
413 
414 			mpt_prt(mpt, "Address Reply (index %u):",
415 			    le32toh(mpt_reply->MsgContext) & 0xffff);
416 			mpt_prt(mpt, "%08x %08x %08x %08x",
417 			    pReply[0], pReply[1], pReply[2], pReply[3]);
418 			mpt_prt(mpt, "%08x %08x %08x %08x",
419 			    pReply[4], pReply[5], pReply[6], pReply[7]);
420 			mpt_prt(mpt, "%08x %08x %08x %08x",
421 			    pReply[8], pReply[9], pReply[10], pReply[11]);
422 		}
423 		index = le32toh(mpt_reply->MsgContext);
424 	}
425 
426 	/*
427 	 * Address reply with MessageContext high bit set.
428 	 * This is most likely a notify message, so we try
429 	 * to process it, then free it.
430 	 */
431 	if (__predict_false((index & 0x80000000) != 0)) {
432 		if (mpt_reply != NULL)
433 			mpt_ctlop(mpt, mpt_reply, reply);
434 		else
435 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
436 		return;
437 	}
438 
439 	/* Did we end up with a valid index into the table? */
440 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
441 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
442 		return;
443 	}
444 
445 	req = &mpt->request_pool[index];
446 
447 	/* Make sure memory hasn't been trashed. */
448 	if (__predict_false(req->index != index)) {
449 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
450 		return;
451 	}
452 
453 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
454 	mpt_req = req->req_vbuf;
455 
456 	/* Short cut for task management replies; nothing more for us to do. */
457 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
458 		if (mpt->verbose > 1)
459 			mpt_prt(mpt, "mpt_done: TASK MGMT");
460 		goto done;
461 	}
462 
463 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
464 		goto done;
465 
466 	/*
467 	 * At this point, it had better be a SCSI I/O command, but don't
468 	 * crash if it isn't.
469 	 */
470 	if (__predict_false(mpt_req->Function !=
471 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
472 		if (mpt->verbose > 1)
473 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
474 			    mpt_req->Function, index);
475 		goto done;
476 	}
477 
478 	/* Recover scsipi_xfer from the request structure. */
479 	xs = req->xfer;
480 
481 	/* Can't have a SCSI command without a scsipi_xfer. */
482 	if (__predict_false(xs == NULL)) {
483 		mpt_prt(mpt,
484 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
485 		    req->index, req->sequence);
486 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
487 		mpt_prt(mpt, "mpt_request:");
488 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
489 
490 		if (mpt_reply != NULL) {
491 			mpt_prt(mpt, "mpt_reply:");
492 			mpt_print_reply(mpt_reply);
493 		} else {
494 			mpt_prt(mpt, "context reply: 0x%08x", reply);
495 		}
496 		goto done;
497 	}
498 
499 	callout_stop(&xs->xs_callout);
500 
501 	periph = xs->xs_periph;
502 
503 	/*
504 	 * If we were a data transfer, unload the map that described
505 	 * the data buffer.
506 	 */
507 	if (__predict_true(xs->datalen != 0)) {
508 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
509 		    req->dmap->dm_mapsize,
510 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
511 						      : BUS_DMASYNC_POSTWRITE);
512 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
513 	}
514 
515 	if (__predict_true(mpt_reply == NULL)) {
516 		/*
517 		 * Context reply; report that the command was
518 		 * successful!
519 		 *
520 		 * Also report the xfer mode, if necessary.
521 		 */
522 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
523 			if ((mpt->mpt_report_xfer_mode &
524 			     (1 << periph->periph_target)) != 0)
525 				mpt_get_xfer_mode(mpt, periph);
526 		}
527 		xs->error = XS_NOERROR;
528 		xs->status = SCSI_OK;
529 		xs->resid = 0;
530 		mpt_free_request(mpt, req);
531 		scsipi_done(xs);
532 		return;
533 	}
534 
535 	xs->status = mpt_reply->SCSIStatus;
536 	switch (le16toh(mpt_reply->IOCStatus)) {
537 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
538 		xs->error = XS_DRIVER_STUFFUP;
539 		break;
540 
541 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
542 		/*
543 		 * Yikes!  Tagged queue full comes through this path!
544 		 *
545 		 * So we'll change it to a status error and anything
546 		 * that returns status should probably be a status
547 		 * error as well.
548 		 */
549 		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
550 		if (mpt_reply->SCSIState &
551 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
552 			xs->error = XS_DRIVER_STUFFUP;
553 			break;
554 		}
555 		/* FALLTHROUGH */
556 	case MPI_IOCSTATUS_SUCCESS:
557 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
558 		switch (xs->status) {
559 		case SCSI_OK:
560 			/* Report the xfer mode, if necessary. */
561 			if ((mpt->mpt_report_xfer_mode &
562 			     (1 << periph->periph_target)) != 0)
563 				mpt_get_xfer_mode(mpt, periph);
564 			xs->resid = 0;
565 			break;
566 
567 		case SCSI_CHECK:
568 			xs->error = XS_SENSE;
569 			break;
570 
571 		case SCSI_BUSY:
572 		case SCSI_QUEUE_FULL:
573 			xs->error = XS_BUSY;
574 			break;
575 
576 		default:
577 			scsipi_printaddr(periph);
578 			printf("invalid status code %d\n", xs->status);
579 			xs->error = XS_DRIVER_STUFFUP;
580 			break;
581 		}
582 		break;
583 
584 	case MPI_IOCSTATUS_BUSY:
585 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
586 		xs->error = XS_RESOURCE_SHORTAGE;
587 		break;
588 
589 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
590 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
591 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
592 		xs->error = XS_SELTIMEOUT;
593 		break;
594 
595 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
596 		xs->error = XS_DRIVER_STUFFUP;
597 		break;
598 
599 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
600 		/* XXX What should we do here? */
601 		break;
602 
603 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
604 		/* XXX */
605 		xs->error = XS_DRIVER_STUFFUP;
606 		break;
607 
608 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
609 		/* XXX */
610 		xs->error = XS_DRIVER_STUFFUP;
611 		break;
612 
613 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
614 		/* XXX This is a bus-reset */
615 		xs->error = XS_DRIVER_STUFFUP;
616 		break;
617 
618 	default:
619 		/* XXX unrecognized HBA error */
620 		xs->error = XS_DRIVER_STUFFUP;
621 		break;
622 	}
623 
624 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
625 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
626 		    sizeof(xs->sense.scsi_sense));
627 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
628 		/*
629 		 * This will cause the scsipi layer to issue
630 		 * a REQUEST SENSE.
631 		 */
632 		if (xs->status == SCSI_CHECK)
633 			xs->error = XS_BUSY;
634 	}
635 
636  done:
637 	/* If IOC done with this requeset, free it up. */
638 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
639 		mpt_free_request(mpt, req);
640 
641 	/* If address reply, give the buffer back to the IOC. */
642 	if (mpt_reply != NULL)
643 		mpt_free_reply(mpt, (reply << 1));
644 
645 	if (xs != NULL)
646 		scsipi_done(xs);
647 }
648 
649 static void
650 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
651 {
652 	struct scsipi_periph *periph = xs->xs_periph;
653 	request_t *req;
654 	MSG_SCSI_IO_REQUEST *mpt_req;
655 	int error, s;
656 
657 	s = splbio();
658 	req = mpt_get_request(mpt);
659 	if (__predict_false(req == NULL)) {
660 		/* This should happen very infrequently. */
661 		xs->error = XS_RESOURCE_SHORTAGE;
662 		scsipi_done(xs);
663 		splx(s);
664 		return;
665 	}
666 	splx(s);
667 
668 	/* Link the req and the scsipi_xfer. */
669 	req->xfer = xs;
670 
671 	/* Now we build the command for the IOC */
672 	mpt_req = req->req_vbuf;
673 	memset(mpt_req, 0, sizeof(*mpt_req));
674 
675 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
676 	mpt_req->Bus = mpt->bus;
677 
678 	mpt_req->SenseBufferLength =
679 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
680 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
681 
682 	/*
683 	 * We use the message context to find the request structure when
684 	 * we get the command completion interrupt from the IOC.
685 	 */
686 	mpt_req->MsgContext = htole32(req->index);
687 
688 	/* Which physical device to do the I/O on. */
689 	mpt_req->TargetID = periph->periph_target;
690 	mpt_req->LUN[1] = periph->periph_lun;
691 
692 	/* Set the direction of the transfer. */
693 	if (xs->xs_control & XS_CTL_DATA_IN)
694 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
695 	else if (xs->xs_control & XS_CTL_DATA_OUT)
696 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
697 	else
698 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
699 
700 	/* Set the queue behavior. */
701 	if (__predict_true((!mpt->is_scsi) ||
702 			   (mpt->mpt_tag_enable &
703 			    (1 << periph->periph_target)))) {
704 		switch (XS_CTL_TAGTYPE(xs)) {
705 		case XS_CTL_HEAD_TAG:
706 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
707 			break;
708 
709 #if 0	/* XXX */
710 		case XS_CTL_ACA_TAG:
711 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
712 			break;
713 #endif
714 
715 		case XS_CTL_ORDERED_TAG:
716 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
717 			break;
718 
719 		case XS_CTL_SIMPLE_TAG:
720 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
721 			break;
722 
723 		default:
724 			if (mpt->is_scsi)
725 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
726 			else
727 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
728 			break;
729 		}
730 	} else
731 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
732 
733 	if (__predict_false(mpt->is_scsi &&
734 			    (mpt->mpt_disc_enable &
735 			     (1 << periph->periph_target)) == 0))
736 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
737 
738 	mpt_req->Control = htole32(mpt_req->Control);
739 
740 	/* Copy the SCSI command block into place. */
741 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
742 
743 	mpt_req->CDBLength = xs->cmdlen;
744 	mpt_req->DataLength = htole32(xs->datalen);
745 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
746 
747 	/*
748 	 * Map the DMA transfer.
749 	 */
750 	if (xs->datalen) {
751 		SGE_SIMPLE32 *se;
752 
753 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
754 		    xs->datalen, NULL,
755 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
756 						       : BUS_DMA_WAITOK) |
757 		    BUS_DMA_STREAMING |
758 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
759 						       : BUS_DMA_WRITE));
760 		switch (error) {
761 		case 0:
762 			break;
763 
764 		case ENOMEM:
765 		case EAGAIN:
766 			xs->error = XS_RESOURCE_SHORTAGE;
767 			goto out_bad;
768 
769 		default:
770 			xs->error = XS_DRIVER_STUFFUP;
771 			mpt_prt(mpt, "error %d loading DMA map", error);
772  out_bad:
773 			s = splbio();
774 			mpt_free_request(mpt, req);
775 			scsipi_done(xs);
776 			splx(s);
777 			return;
778 		}
779 
780 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
781 			int seg, i, nleft = req->dmap->dm_nsegs;
782 			uint32_t flags;
783 			SGE_CHAIN32 *ce;
784 
785 			seg = 0;
786 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
787 			if (xs->xs_control & XS_CTL_DATA_OUT)
788 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
789 
790 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
791 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
792 			     i++, se++, seg++) {
793 				uint32_t tf;
794 
795 				memset(se, 0, sizeof(*se));
796 				se->Address =
797 				    htole32(req->dmap->dm_segs[seg].ds_addr);
798 				MPI_pSGE_SET_LENGTH(se,
799 				    req->dmap->dm_segs[seg].ds_len);
800 				tf = flags;
801 				if (i == MPT_NSGL_FIRST(mpt) - 2)
802 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
803 				MPI_pSGE_SET_FLAGS(se, tf);
804 				se->FlagsLength = htole32(se->FlagsLength);
805 				nleft--;
806 			}
807 
808 			/*
809 			 * Tell the IOC where to find the first chain element.
810 			 */
811 			mpt_req->ChainOffset =
812 			    ((char *)se - (char *)mpt_req) >> 2;
813 
814 			/*
815 			 * Until we're finished with all segments...
816 			 */
817 			while (nleft) {
818 				int ntodo;
819 
820 				/*
821 				 * Construct the chain element that points to
822 				 * the next segment.
823 				 */
824 				ce = (SGE_CHAIN32 *) se++;
825 				if (nleft > MPT_NSGL(mpt)) {
826 					ntodo = MPT_NSGL(mpt) - 1;
827 					ce->NextChainOffset = (MPT_RQSL(mpt) -
828 					    sizeof(SGE_SIMPLE32)) >> 2;
829 					ce->Length = htole16(MPT_NSGL(mpt)
830 						* sizeof(SGE_SIMPLE32));
831 				} else {
832 					ntodo = nleft;
833 					ce->NextChainOffset = 0;
834 					ce->Length = htole16(ntodo
835 						* sizeof(SGE_SIMPLE32));
836 				}
837 				ce->Address = htole32(req->req_pbuf +
838 				    ((char *)se - (char *)mpt_req));
839 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
840 				for (i = 0; i < ntodo; i++, se++, seg++) {
841 					uint32_t tf;
842 
843 					memset(se, 0, sizeof(*se));
844 					se->Address = htole32(
845 					    req->dmap->dm_segs[seg].ds_addr);
846 					MPI_pSGE_SET_LENGTH(se,
847 					    req->dmap->dm_segs[seg].ds_len);
848 					tf = flags;
849 					if (i == ntodo - 1) {
850 						tf |=
851 						    MPI_SGE_FLAGS_LAST_ELEMENT;
852 						if (ce->NextChainOffset == 0) {
853 							tf |=
854 						    MPI_SGE_FLAGS_END_OF_LIST |
855 						    MPI_SGE_FLAGS_END_OF_BUFFER;
856 						}
857 					}
858 					MPI_pSGE_SET_FLAGS(se, tf);
859 					se->FlagsLength =
860 					    htole32(se->FlagsLength);
861 					nleft--;
862 				}
863 			}
864 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
865 			    req->dmap->dm_mapsize,
866 			    (xs->xs_control & XS_CTL_DATA_IN) ?
867 			    				BUS_DMASYNC_PREREAD
868 						      : BUS_DMASYNC_PREWRITE);
869 		} else {
870 			int i;
871 			uint32_t flags;
872 
873 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
874 			if (xs->xs_control & XS_CTL_DATA_OUT)
875 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
876 
877 			/* Copy the segments into our SG list. */
878 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
879 			for (i = 0; i < req->dmap->dm_nsegs;
880 			     i++, se++) {
881 				uint32_t tf;
882 
883 				memset(se, 0, sizeof(*se));
884 				se->Address =
885 				    htole32(req->dmap->dm_segs[i].ds_addr);
886 				MPI_pSGE_SET_LENGTH(se,
887 				    req->dmap->dm_segs[i].ds_len);
888 				tf = flags;
889 				if (i == req->dmap->dm_nsegs - 1) {
890 					tf |=
891 					    MPI_SGE_FLAGS_LAST_ELEMENT |
892 					    MPI_SGE_FLAGS_END_OF_BUFFER |
893 					    MPI_SGE_FLAGS_END_OF_LIST;
894 				}
895 				MPI_pSGE_SET_FLAGS(se, tf);
896 				se->FlagsLength = htole32(se->FlagsLength);
897 			}
898 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
899 			    req->dmap->dm_mapsize,
900 			    (xs->xs_control & XS_CTL_DATA_IN) ?
901 			    				BUS_DMASYNC_PREREAD
902 						      : BUS_DMASYNC_PREWRITE);
903 		}
904 	} else {
905 		/*
906 		 * No data to transfer; just make a single simple SGL
907 		 * with zero length.
908 		 */
909 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
910 		memset(se, 0, sizeof(*se));
911 		MPI_pSGE_SET_FLAGS(se,
912 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
913 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
914 		se->FlagsLength = htole32(se->FlagsLength);
915 	}
916 
917 	if (mpt->verbose > 1)
918 		mpt_print_scsi_io_request(mpt_req);
919 
920 	s = splbio();
921 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
922 		callout_reset(&xs->xs_callout,
923 		    mstohz(xs->timeout), mpt_timeout, req);
924 	mpt_send_cmd(mpt, req);
925 	splx(s);
926 
927 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
928 		return;
929 
930 	/*
931 	 * If we can't use interrupts, poll on completion.
932 	 */
933 	if (mpt_poll(mpt, xs, xs->timeout))
934 		mpt_timeout(req);
935 }
936 
937 static void
938 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
939 {
940 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
941 
942 	if (!mpt->is_scsi) {
943 		/*
944 		 * SCSI transport settings don't make any sense for
945 		 * Fibre Channel; silently ignore the request.
946 		 */
947 		return;
948 	}
949 
950 	/*
951 	 * Always allow disconnect; we don't have a way to disable
952 	 * it right now, in any case.
953 	 */
954 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
955 
956 	if (xm->xm_mode & PERIPH_CAP_TQING)
957 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
958 	else
959 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
960 
961 	tmp = mpt->mpt_dev_page1[xm->xm_target];
962 
963 	/*
964 	 * Set the wide/narrow parameter for the target.
965 	 */
966 	if (xm->xm_mode & PERIPH_CAP_WIDE16)
967 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
968 	else
969 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
970 
971 	/*
972 	 * Set the synchronous parameters for the target.
973 	 *
974 	 * XXX If we request sync transfers, we just go ahead and
975 	 * XXX request the maximum available.  We need finer control
976 	 * XXX in order to implement Domain Validation.
977 	 */
978 	tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
979 	    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
980 	    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
981 	    MPI_SCSIDEVPAGE1_RP_IU);
982 	if (xm->xm_mode & PERIPH_CAP_SYNC) {
983 		int factor, offset, np;
984 
985 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
986 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
987 		np = 0;
988 		if (factor < 0x9) {
989 			/* Ultra320 */
990 			np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
991 		}
992 		if (factor < 0xa) {
993 			/* at least Ultra160 */
994 			np |= MPI_SCSIDEVPAGE1_RP_DT;
995 		}
996 		np |= (factor << 8) | (offset << 16);
997 		tmp.RequestedParameters |= np;
998 	}
999 
1000 	host2mpt_config_page_scsi_device_1(&tmp);
1001 	if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1002 		mpt_prt(mpt, "unable to write Device Page 1");
1003 		return;
1004 	}
1005 
1006 	if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1007 		mpt_prt(mpt, "unable to read back Device Page 1");
1008 		return;
1009 	}
1010 
1011 	mpt2host_config_page_scsi_device_1(&tmp);
1012 	mpt->mpt_dev_page1[xm->xm_target] = tmp;
1013 	if (mpt->verbose > 1) {
1014 		mpt_prt(mpt,
1015 		    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1016 		    xm->xm_target,
1017 		    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1018 		    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1019 	}
1020 
1021 	/*
1022 	 * Make a note that we should perform an async callback at the
1023 	 * end of the next successful command completion to report the
1024 	 * negotiated transfer mode.
1025 	 */
1026 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1027 }
1028 
1029 static void
1030 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1031 {
1032 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1033 	struct scsipi_xfer_mode xm;
1034 	int period, offset;
1035 
1036 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1037 	host2mpt_config_page_scsi_device_0(&tmp);
1038 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1039 		mpt_prt(mpt, "unable to read Device Page 0");
1040 		return;
1041 	}
1042 	mpt2host_config_page_scsi_device_0(&tmp);
1043 
1044 	if (mpt->verbose > 1) {
1045 		mpt_prt(mpt,
1046 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1047 		    periph->periph_target,
1048 		    tmp.NegotiatedParameters, tmp.Information);
1049 	}
1050 
1051 	xm.xm_target = periph->periph_target;
1052 	xm.xm_mode = 0;
1053 
1054 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1055 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1056 
1057 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1058 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1059 	if (offset) {
1060 		xm.xm_period = period;
1061 		xm.xm_offset = offset;
1062 		xm.xm_mode |= PERIPH_CAP_SYNC;
1063 	}
1064 
1065 	/*
1066 	 * Tagged queueing is all controlled by us; there is no
1067 	 * other setting to query.
1068 	 */
1069 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1070 		xm.xm_mode |= PERIPH_CAP_TQING;
1071 
1072 	/*
1073 	 * We're going to deliver the async event, so clear the marker.
1074 	 */
1075 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1076 
1077 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1078 }
1079 
1080 static void
1081 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1082 {
1083 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1084 
1085 	switch (dmsg->Function) {
1086 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1087 		mpt_event_notify_reply(mpt, vmsg);
1088 		mpt_free_reply(mpt, (reply << 1));
1089 		break;
1090 
1091 	case MPI_FUNCTION_EVENT_ACK:
1092 		mpt_free_reply(mpt, (reply << 1));
1093 		break;
1094 
1095 	case MPI_FUNCTION_PORT_ENABLE:
1096 	    {
1097 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1098 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1099 		if (mpt->verbose > 1)
1100 			mpt_prt(mpt, "enable port reply index %d", index);
1101 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1102 			request_t *req = &mpt->request_pool[index];
1103 			req->debug = REQ_DONE;
1104 		}
1105 		mpt_free_reply(mpt, (reply << 1));
1106 		break;
1107 	    }
1108 
1109 	case MPI_FUNCTION_CONFIG:
1110 	    {
1111 		MSG_CONFIG_REPLY *msg = vmsg;
1112 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1113 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1114 			request_t *req = &mpt->request_pool[index];
1115 			req->debug = REQ_DONE;
1116 			req->sequence = reply;
1117 		} else
1118 			mpt_free_reply(mpt, (reply << 1));
1119 		break;
1120 	    }
1121 
1122 	default:
1123 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1124 	}
1125 }
1126 
1127 static void
1128 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1129 {
1130 
1131 	switch (le32toh(msg->Event)) {
1132 	case MPI_EVENT_LOG_DATA:
1133 	    {
1134 		int i;
1135 
1136 		/* Some error occurrerd that the Fusion wants logged. */
1137 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1138 		mpt_prt(mpt, "EvtLogData: Event Data:");
1139 		for (i = 0; i < msg->EventDataLength; i++) {
1140 			if ((i % 4) == 0)
1141 				printf("%s:\t", device_xname(&mpt->sc_dev));
1142 			printf("0x%08x%c", msg->Data[i],
1143 			    ((i % 4) == 3) ? '\n' : ' ');
1144 		}
1145 		if ((i % 4) != 0)
1146 			printf("\n");
1147 		break;
1148 	    }
1149 
1150 	case MPI_EVENT_UNIT_ATTENTION:
1151 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1152 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1153 		break;
1154 
1155 	case MPI_EVENT_IOC_BUS_RESET:
1156 		/* We generated a bus reset. */
1157 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1158 		    (msg->Data[0] >> 8) & 0xff);
1159 		break;
1160 
1161 	case MPI_EVENT_EXT_BUS_RESET:
1162 		/* Someone else generated a bus reset. */
1163 		mpt_prt(mpt, "External Bus Reset");
1164 		/*
1165 		 * These replies don't return EventData like the MPI
1166 		 * spec says they do.
1167 		 */
1168 		/* XXX Send an async event? */
1169 		break;
1170 
1171 	case MPI_EVENT_RESCAN:
1172 		/*
1173 		 * In general, thise means a device has been added
1174 		 * to the loop.
1175 		 */
1176 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1177 		/* XXX Send an async event? */
1178 		break;
1179 
1180 	case MPI_EVENT_LINK_STATUS_CHANGE:
1181 		mpt_prt(mpt, "Port %d: Link state %s",
1182 		    (msg->Data[1] >> 8) & 0xff,
1183 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1184 		break;
1185 
1186 	case MPI_EVENT_LOOP_STATE_CHANGE:
1187 		switch ((msg->Data[0] >> 16) & 0xff) {
1188 		case 0x01:
1189 			mpt_prt(mpt,
1190 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1191 			    "(Loop Initialization)",
1192 			    (msg->Data[1] >> 8) & 0xff,
1193 			    (msg->Data[0] >> 8) & 0xff,
1194 			    (msg->Data[0]     ) & 0xff);
1195 			switch ((msg->Data[0] >> 8) & 0xff) {
1196 			case 0xf7:
1197 				if ((msg->Data[0] & 0xff) == 0xf7)
1198 					mpt_prt(mpt, "\tDevice needs AL_PA");
1199 				else
1200 					mpt_prt(mpt, "\tDevice %02x doesn't "
1201 					    "like FC performance",
1202 					    msg->Data[0] & 0xff);
1203 				break;
1204 
1205 			case 0xf8:
1206 				if ((msg->Data[0] & 0xff) == 0xf7)
1207 					mpt_prt(mpt, "\tDevice detected loop "
1208 					    "failure before acquiring AL_PA");
1209 				else
1210 					mpt_prt(mpt, "\tDevice %02x detected "
1211 					    "loop failure",
1212 					    msg->Data[0] & 0xff);
1213 				break;
1214 
1215 			default:
1216 				mpt_prt(mpt, "\tDevice %02x requests that "
1217 				    "device %02x reset itself",
1218 				    msg->Data[0] & 0xff,
1219 				    (msg->Data[0] >> 8) & 0xff);
1220 				break;
1221 			}
1222 			break;
1223 
1224 		case 0x02:
1225 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1226 			    "(Loop Port Enable)",
1227 			    (msg->Data[1] >> 8) & 0xff,
1228 			    (msg->Data[0] >> 8) & 0xff,
1229 			    (msg->Data[0]     ) & 0xff);
1230 			break;
1231 
1232 		case 0x03:
1233 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1234 			    "(Loop Port Bypass)",
1235 			    (msg->Data[1] >> 8) & 0xff,
1236 			    (msg->Data[0] >> 8) & 0xff,
1237 			    (msg->Data[0]     ) & 0xff);
1238 			break;
1239 
1240 		default:
1241 			mpt_prt(mpt, "Port %d: FC Link Event: "
1242 			    "Unknown event (%02x %02x %02x)",
1243 			    (msg->Data[1] >>  8) & 0xff,
1244 			    (msg->Data[0] >> 16) & 0xff,
1245 			    (msg->Data[0] >>  8) & 0xff,
1246 			    (msg->Data[0]      ) & 0xff);
1247 			break;
1248 		}
1249 		break;
1250 
1251 	case MPI_EVENT_LOGOUT:
1252 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1253 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1254 		break;
1255 
1256 	case MPI_EVENT_EVENT_CHANGE:
1257 		/*
1258 		 * This is just an acknowledgement of our
1259 		 * mpt_send_event_request().
1260 		 */
1261 		break;
1262 
1263 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
1264 		switch ((msg->Data[0] >> 12) & 0x0f) {
1265 		case 0x00:
1266 			mpt_prt(mpt, "Phy %d: Link Status Unknown",
1267 			    msg->Data[0] & 0xff);
1268 			break;
1269 		case 0x01:
1270 			mpt_prt(mpt, "Phy %d: Link Disabled",
1271 			    msg->Data[0] & 0xff);
1272 			break;
1273 		case 0x02:
1274 			mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1275 			    msg->Data[0] & 0xff);
1276 			break;
1277 		case 0x03:
1278 			mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1279 			    msg->Data[0] & 0xff);
1280 			break;
1281 		case 0x08:
1282 			mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1283 			    msg->Data[0] & 0xff);
1284 			break;
1285 		case 0x09:
1286 			mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1287 			    msg->Data[0] & 0xff);
1288 			break;
1289 		default:
1290 			mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1291 			    "Unknown event (%0x)",
1292 			    msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1293 		}
1294 		break;
1295 
1296 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1297 	case MPI_EVENT_SAS_DISCOVERY:
1298 		/* ignore these events for now */
1299 		break;
1300 
1301 	default:
1302 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1303 		break;
1304 	}
1305 
1306 	if (msg->AckRequired) {
1307 		MSG_EVENT_ACK *ackp;
1308 		request_t *req;
1309 
1310 		if ((req = mpt_get_request(mpt)) == NULL) {
1311 			/* XXX XXX XXX XXXJRT */
1312 			panic("mpt_event_notify_reply: unable to allocate "
1313 			    "request structure");
1314 		}
1315 
1316 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1317 		memset(ackp, 0, sizeof(*ackp));
1318 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1319 		ackp->Event = msg->Event;
1320 		ackp->EventContext = msg->EventContext;
1321 		ackp->MsgContext = htole32(req->index | 0x80000000);
1322 		mpt_check_doorbell(mpt);
1323 		mpt_send_cmd(mpt, req);
1324 	}
1325 }
1326 
1327 /* XXXJRT mpt_bus_reset() */
1328 
1329 /*****************************************************************************
1330  * SCSI interface routines
1331  *****************************************************************************/
1332 
1333 static void
1334 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1335     void *arg)
1336 {
1337 	struct scsipi_adapter *adapt = chan->chan_adapter;
1338 	mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1339 
1340 	switch (req) {
1341 	case ADAPTER_REQ_RUN_XFER:
1342 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1343 		return;
1344 
1345 	case ADAPTER_REQ_GROW_RESOURCES:
1346 		/* Not supported. */
1347 		return;
1348 
1349 	case ADAPTER_REQ_SET_XFER_MODE:
1350 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1351 		return;
1352 	}
1353 }
1354 
1355 static void
1356 mpt_minphys(struct buf *bp)
1357 {
1358 
1359 /*
1360  * Subtract one from the SGL limit, since we need an extra one to handle
1361  * an non-page-aligned transfer.
1362  */
1363 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1364 
1365 	if (bp->b_bcount > MPT_MAX_XFER)
1366 		bp->b_bcount = MPT_MAX_XFER;
1367 	minphys(bp);
1368 }
1369