xref: /netbsd-src/sys/dev/ic/mpt_netbsd.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: mpt_netbsd.c,v 1.24 2014/04/17 16:08:42 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 2000, 2001 by Greg Ansley
40  * Partially derived from Matt Jacob's ISP driver.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice immediately at the beginning of the file, without modification,
47  *    this list of conditions, and the following disclaimer.
48  * 2. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  */
63 /*
64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65  */
66 
67 /*
68  * mpt_netbsd.c:
69  *
70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
71  * bus_dma glue, and SCSIPI glue.
72  *
73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74  * Wasabi Systems, Inc.
75  *
76  * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.24 2014/04/17 16:08:42 christos Exp $");
81 
82 #include <dev/ic/mpt.h>			/* pulls in all headers */
83 #include <sys/scsiio.h>
84 
85 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
86 static void	mpt_timeout(void *);
87 static void	mpt_restart(mpt_softc_t *, request_t *);
88 static void	mpt_done(mpt_softc_t *, uint32_t);
89 static int	mpt_drain_queue(mpt_softc_t *);
90 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
91 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
92 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
93 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
94 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
95 static void  mpt_bus_reset(mpt_softc_t *);
96 
97 static void	mpt_scsipi_request(struct scsipi_channel *,
98 		    scsipi_adapter_req_t, void *);
99 static void	mpt_minphys(struct buf *);
100 static int 	mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
101 	struct proc *);
102 
103 /*
104  * XXX - this assumes the device_private() of the attachement starts with
105  * a struct mpt_softc, so we can use the return value of device_private()
106  * straight without any offset.
107  */
108 #define DEV_TO_MPT(DEV)	device_private(DEV)
109 
110 void
111 mpt_scsipi_attach(mpt_softc_t *mpt)
112 {
113 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
114 	struct scsipi_channel *chan = &mpt->sc_channel;
115 	int maxq;
116 
117 	mpt->bus = 0;		/* XXX ?? */
118 
119 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
120 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
121 
122 	/* Fill in the scsipi_adapter. */
123 	memset(adapt, 0, sizeof(*adapt));
124 	adapt->adapt_dev = mpt->sc_dev;
125 	adapt->adapt_nchannels = 1;
126 	adapt->adapt_openings = maxq - 2;	/* Reserve 2 for driver use*/
127 	adapt->adapt_max_periph = maxq - 2;
128 	adapt->adapt_request = mpt_scsipi_request;
129 	adapt->adapt_minphys = mpt_minphys;
130 	adapt->adapt_ioctl = mpt_ioctl;
131 
132 	/* Fill in the scsipi_channel. */
133 	memset(chan, 0, sizeof(*chan));
134 	chan->chan_adapter = adapt;
135 	if (mpt->is_sas) {
136 		chan->chan_bustype = &scsi_sas_bustype;
137 	} else if (mpt->is_fc) {
138 		chan->chan_bustype = &scsi_fc_bustype;
139 	} else {
140 		chan->chan_bustype = &scsi_bustype;
141 	}
142 	chan->chan_channel = 0;
143 	chan->chan_flags = 0;
144 	chan->chan_nluns = 8;
145 	chan->chan_ntargets = mpt->mpt_max_devices;
146 	chan->chan_id = mpt->mpt_ini_id;
147 
148 	/*
149 	* Save the output of the config so we can rescan the bus in case of
150 	* errors
151 	*/
152 	mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
153 	scsiprint);
154 }
155 
156 int
157 mpt_dma_mem_alloc(mpt_softc_t *mpt)
158 {
159 	bus_dma_segment_t reply_seg, request_seg;
160 	int reply_rseg, request_rseg;
161 	bus_addr_t pptr, end;
162 	char *vptr;
163 	size_t len;
164 	int error, i;
165 
166 	/* Check if we have already allocated the reply memory. */
167 	if (mpt->reply != NULL)
168 		return (0);
169 
170 	/*
171 	 * Allocate the request pool.  This isn't really DMA'd memory,
172 	 * but it's a convenient place to do it.
173 	 */
174 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
175 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
176 	if (mpt->request_pool == NULL) {
177 		aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
178 		return (ENOMEM);
179 	}
180 
181 	/*
182 	 * Allocate DMA resources for reply buffers.
183 	 */
184 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
185 	    &reply_seg, 1, &reply_rseg, 0);
186 	if (error) {
187 		aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
188 		    error);
189 		goto fail_0;
190 	}
191 
192 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
193 	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
194 	if (error) {
195 		aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
196 		    error);
197 		goto fail_1;
198 	}
199 
200 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
201 	    0, 0, &mpt->reply_dmap);
202 	if (error) {
203 		aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
204 		    error);
205 		goto fail_2;
206 	}
207 
208 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
209 	    PAGE_SIZE, NULL, 0);
210 	if (error) {
211 		aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
212 		    error);
213 		goto fail_3;
214 	}
215 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
216 
217 	/*
218 	 * Allocate DMA resources for request buffers.
219 	 */
220 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
221 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
222 	if (error) {
223 		aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
224 		    "error = %d\n", error);
225 		goto fail_4;
226 	}
227 
228 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
229 	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
230 	if (error) {
231 		aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
232 		    error);
233 		goto fail_5;
234 	}
235 
236 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
237 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
238 	if (error) {
239 		aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
240 		    "error = %d\n", error);
241 		goto fail_6;
242 	}
243 
244 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
245 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
246 	if (error) {
247 		aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
248 		    error);
249 		goto fail_7;
250 	}
251 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
252 
253 	pptr = mpt->request_phys;
254 	vptr = (void *) mpt->request;
255 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
256 
257 	for (i = 0; pptr < end; i++) {
258 		request_t *req = &mpt->request_pool[i];
259 		req->index = i;
260 
261 		/* Store location of Request Data */
262 		req->req_pbuf = pptr;
263 		req->req_vbuf = vptr;
264 
265 		pptr += MPT_REQUEST_AREA;
266 		vptr += MPT_REQUEST_AREA;
267 
268 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
269 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
270 
271 		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
272 		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
273 		if (error) {
274 			aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
275 			    "error = %d\n", i, error);
276 			goto fail_8;
277 		}
278 	}
279 
280 	return (0);
281 
282  fail_8:
283 	for (--i; i >= 0; i--) {
284 		request_t *req = &mpt->request_pool[i];
285 		if (req->dmap != NULL)
286 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
287 	}
288 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
289  fail_7:
290 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
291  fail_6:
292 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
293  fail_5:
294 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
295  fail_4:
296 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
297  fail_3:
298 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
299  fail_2:
300 	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
301  fail_1:
302 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
303  fail_0:
304 	free(mpt->request_pool, M_DEVBUF);
305 
306 	mpt->reply = NULL;
307 	mpt->request = NULL;
308 	mpt->request_pool = NULL;
309 
310 	return (error);
311 }
312 
313 int
314 mpt_intr(void *arg)
315 {
316 	mpt_softc_t *mpt = arg;
317 	int nrepl = 0;
318 
319 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
320 		return (0);
321 
322 	nrepl = mpt_drain_queue(mpt);
323 	return (nrepl != 0);
324 }
325 
326 void
327 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
328 {
329 	va_list ap;
330 
331 	printf("%s: ", device_xname(mpt->sc_dev));
332 	va_start(ap, fmt);
333 	vprintf(fmt, ap);
334 	va_end(ap);
335 	printf("\n");
336 }
337 
338 static int
339 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
340 {
341 
342 	/* Timeouts are in msec, so we loop in 1000usec cycles */
343 	while (count) {
344 		mpt_intr(mpt);
345 		if (xs->xs_status & XS_STS_DONE)
346 			return (0);
347 		delay(1000);		/* only happens in boot, so ok */
348 		count--;
349 	}
350 	return (1);
351 }
352 
353 static void
354 mpt_timeout(void *arg)
355 {
356 	request_t *req = arg;
357 	struct scsipi_xfer *xs;
358 	struct scsipi_periph *periph;
359 	mpt_softc_t *mpt;
360  	uint32_t oseq;
361 	int s, nrepl = 0;
362 
363 	if (req->xfer  == NULL) {
364 		printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
365 		req->index, req->sequence);
366 		return;
367 	}
368 	xs = req->xfer;
369 	periph = xs->xs_periph;
370 	mpt = (void *) periph->periph_channel->chan_adapter->adapt_dev;
371 	scsipi_printaddr(periph);
372 	printf("command timeout\n");
373 
374 	s = splbio();
375 
376 	oseq = req->sequence;
377 	mpt->timeouts++;
378 	if (mpt_intr(mpt)) {
379 		if (req->sequence != oseq) {
380 			mpt->success++;
381 			mpt_prt(mpt, "recovered from command timeout");
382 			splx(s);
383 			return;
384 		}
385 	}
386 
387 	/*
388 	 * Ensure the IOC is really done giving us data since it appears it can
389 	 * sometimes fail to give us interrupts under heavy load.
390 	 */
391 	nrepl = mpt_drain_queue(mpt);
392 	if (nrepl ) {
393 		mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
394 	}
395 
396 	if (req->sequence != oseq) {
397 		mpt->success++;
398 		splx(s);
399 		return;
400 	}
401 
402 	mpt_prt(mpt,
403 	    "timeout on request index = 0x%x, seq = 0x%08x",
404 	    req->index, req->sequence);
405 	mpt_check_doorbell(mpt);
406 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
407 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
408 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
409 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
410 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
411 	if (mpt->verbose > 1)
412 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
413 
414 	xs->error = XS_TIMEOUT;
415 	splx(s);
416 	mpt_restart(mpt, req);
417 }
418 
419 static void
420 mpt_restart(mpt_softc_t *mpt, request_t *req0)
421 {
422 	int i, s, nreq;
423 	request_t *req;
424 	struct scsipi_xfer *xs;
425 
426 	/* first, reset the IOC, leaving stopped so all requests are idle */
427 	if (mpt_soft_reset(mpt) != MPT_OK) {
428 		mpt_prt(mpt, "soft reset failed");
429 		/*
430 		* Don't try a hard reset since this mangles the PCI
431 		* configuration registers.
432 		*/
433 		return;
434 	}
435 
436 	/* Freeze the channel so scsipi doesn't queue more commands. */
437 	scsipi_channel_freeze(&mpt->sc_channel, 1);
438 
439 	/* Return all pending requests to scsipi and de-allocate them. */
440 	s = splbio();
441 	nreq = 0;
442 	for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
443 		req = &mpt->request_pool[i];
444 		xs = req->xfer;
445 		if (xs != NULL) {
446 			if (xs->datalen != 0)
447 				bus_dmamap_unload(mpt->sc_dmat, req->dmap);
448 			req->xfer = NULL;
449 			callout_stop(&xs->xs_callout);
450 			if (req != req0) {
451 				nreq++;
452 				xs->error = XS_REQUEUE;
453 			}
454 			scsipi_done(xs);
455 			/*
456 			* Don't need to mpt_free_request() since mpt_init()
457 			* below will free all requests anyway.
458 			*/
459 			mpt_free_request(mpt, req);
460 		}
461 	}
462 	splx(s);
463 	if (nreq > 0)
464 		mpt_prt(mpt, "re-queued %d requests", nreq);
465 
466 	/* Re-initialize the IOC (which restarts it). */
467 	if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
468 		mpt_prt(mpt, "restart succeeded");
469 	/* else error message already printed */
470 
471 	/* Thaw the channel, causing scsipi to re-queue the commands. */
472 	scsipi_channel_thaw(&mpt->sc_channel, 1);
473 }
474 
475 static int
476 mpt_drain_queue(mpt_softc_t *mpt)
477 {
478 	int nrepl = 0;
479 	uint32_t reply;
480 
481 	reply = mpt_pop_reply_queue(mpt);
482 	while (reply != MPT_REPLY_EMPTY) {
483 		nrepl++;
484 		if (mpt->verbose > 1) {
485 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
486 				/* Address reply; IOC has something to say */
487 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
488 			} else {
489 				/* Context reply; all went well */
490 				mpt_prt(mpt, "context %u reply OK", reply);
491 			}
492 		}
493 		mpt_done(mpt, reply);
494 		reply = mpt_pop_reply_queue(mpt);
495 	}
496 	return (nrepl);
497 }
498 
499 static void
500 mpt_done(mpt_softc_t *mpt, uint32_t reply)
501 {
502 	struct scsipi_xfer *xs = NULL;
503 	struct scsipi_periph *periph;
504 	int index;
505 	request_t *req;
506 	MSG_REQUEST_HEADER *mpt_req;
507 	MSG_SCSI_IO_REPLY *mpt_reply;
508 	int restart = 0; /* nonzero if we need to restart the IOC*/
509 
510 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
511 		/* context reply (ok) */
512 		mpt_reply = NULL;
513 		index = reply & MPT_CONTEXT_MASK;
514 	} else {
515 		/* address reply (error) */
516 
517 		/* XXX BUS_DMASYNC_POSTREAD XXX */
518 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
519 		if (mpt_reply != NULL) {
520 			if (mpt->verbose > 1) {
521 				uint32_t *pReply = (uint32_t *) mpt_reply;
522 
523 				mpt_prt(mpt, "Address Reply (index %u):",
524 				    le32toh(mpt_reply->MsgContext) & 0xffff);
525 				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
526 				    pReply[1], pReply[2], pReply[3]);
527 				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
528 				    pReply[5], pReply[6], pReply[7]);
529 				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
530 				    pReply[9], pReply[10], pReply[11]);
531 			}
532 			index = le32toh(mpt_reply->MsgContext);
533 		} else
534 			index = reply & MPT_CONTEXT_MASK;
535 	}
536 
537 	/*
538 	 * Address reply with MessageContext high bit set.
539 	 * This is most likely a notify message, so we try
540 	 * to process it, then free it.
541 	 */
542 	if (__predict_false((index & 0x80000000) != 0)) {
543 		if (mpt_reply != NULL)
544 			mpt_ctlop(mpt, mpt_reply, reply);
545 		else
546 			mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
547 			    index);
548 		return;
549 	}
550 
551 	/* Did we end up with a valid index into the table? */
552 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
553 		mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
554 		    index);
555 		return;
556 	}
557 
558 	req = &mpt->request_pool[index];
559 
560 	/* Make sure memory hasn't been trashed. */
561 	if (__predict_false(req->index != index)) {
562 		mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
563 		    index);
564 		return;
565 	}
566 
567 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
568 	mpt_req = req->req_vbuf;
569 
570 	/* Short cut for task management replies; nothing more for us to do. */
571 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
572 		if (mpt->verbose > 1)
573 			mpt_prt(mpt, "%s: TASK MGMT", __func__);
574 		KASSERT(req == mpt->mngt_req);
575 		mpt->mngt_req = NULL;
576 		goto done;
577 	}
578 
579 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
580 		goto done;
581 
582 	/*
583 	 * At this point, it had better be a SCSI I/O command, but don't
584 	 * crash if it isn't.
585 	 */
586 	if (__predict_false(mpt_req->Function !=
587 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
588 		if (mpt->verbose > 1)
589 			mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
590 			    __func__, mpt_req->Function, index);
591 		goto done;
592 	}
593 
594 	/* Recover scsipi_xfer from the request structure. */
595 	xs = req->xfer;
596 
597 	/* Can't have a SCSI command without a scsipi_xfer. */
598 	if (__predict_false(xs == NULL)) {
599 		mpt_prt(mpt,
600 		    "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
601 		    req->index, req->sequence);
602 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
603 		mpt_prt(mpt, "mpt_request:");
604 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
605 
606 		if (mpt_reply != NULL) {
607 			mpt_prt(mpt, "mpt_reply:");
608 			mpt_print_reply(mpt_reply);
609 		} else {
610 			mpt_prt(mpt, "context reply: 0x%08x", reply);
611 		}
612 		goto done;
613 	}
614 
615 	callout_stop(&xs->xs_callout);
616 
617 	periph = xs->xs_periph;
618 
619 	/*
620 	 * If we were a data transfer, unload the map that described
621 	 * the data buffer.
622 	 */
623 	if (__predict_true(xs->datalen != 0)) {
624 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
625 		    req->dmap->dm_mapsize,
626 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
627 						      : BUS_DMASYNC_POSTWRITE);
628 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
629 	}
630 
631 	if (__predict_true(mpt_reply == NULL)) {
632 		/*
633 		 * Context reply; report that the command was
634 		 * successful!
635 		 *
636 		 * Also report the xfer mode, if necessary.
637 		 */
638 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
639 			if ((mpt->mpt_report_xfer_mode &
640 			     (1 << periph->periph_target)) != 0)
641 				mpt_get_xfer_mode(mpt, periph);
642 		}
643 		xs->error = XS_NOERROR;
644 		xs->status = SCSI_OK;
645 		xs->resid = 0;
646 		mpt_free_request(mpt, req);
647 		scsipi_done(xs);
648 		return;
649 	}
650 
651 	xs->status = mpt_reply->SCSIStatus;
652 	switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
653 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
654 		xs->error = XS_DRIVER_STUFFUP;
655 		mpt_prt(mpt, "%s: IOC overrun!", __func__);
656 		break;
657 
658 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
659 		/*
660 		 * Yikes!  Tagged queue full comes through this path!
661 		 *
662 		 * So we'll change it to a status error and anything
663 		 * that returns status should probably be a status
664 		 * error as well.
665 		 */
666 		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
667 		if (mpt_reply->SCSIState &
668 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
669 			xs->error = XS_DRIVER_STUFFUP;
670 			break;
671 		}
672 		/* FALLTHROUGH */
673 	case MPI_IOCSTATUS_SUCCESS:
674 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
675 		switch (xs->status) {
676 		case SCSI_OK:
677 			/* Report the xfer mode, if necessary. */
678 			if ((mpt->mpt_report_xfer_mode &
679 			     (1 << periph->periph_target)) != 0)
680 				mpt_get_xfer_mode(mpt, periph);
681 			xs->resid = 0;
682 			break;
683 
684 		case SCSI_CHECK:
685 			xs->error = XS_SENSE;
686 			break;
687 
688 		case SCSI_BUSY:
689 		case SCSI_QUEUE_FULL:
690 			xs->error = XS_BUSY;
691 			break;
692 
693 		default:
694 			scsipi_printaddr(periph);
695 			printf("invalid status code %d\n", xs->status);
696 			xs->error = XS_DRIVER_STUFFUP;
697 			break;
698 		}
699 		break;
700 
701 	case MPI_IOCSTATUS_BUSY:
702 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
703 		xs->error = XS_RESOURCE_SHORTAGE;
704 		break;
705 
706 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
707 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
708 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
709 		xs->error = XS_SELTIMEOUT;
710 		break;
711 
712 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
713 		xs->error = XS_DRIVER_STUFFUP;
714 		mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
715 		restart = 1;
716 		break;
717 
718 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
719 		/* XXX What should we do here? */
720 		mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
721 		restart = 1;
722 		break;
723 
724 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
725 		/* XXX */
726 		xs->error = XS_DRIVER_STUFFUP;
727 		mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
728 		restart = 1;
729 		break;
730 
731 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
732 		/* XXX */
733 		xs->error = XS_DRIVER_STUFFUP;
734 		mpt_prt(mpt, "%s: IOC task terminated!", __func__);
735 		restart = 1;
736 		break;
737 
738 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
739 		/* XXX This is a bus-reset */
740 		xs->error = XS_DRIVER_STUFFUP;
741 		mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
742 		restart = 1;
743 		break;
744 
745 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
746 		/*
747 		 * FreeBSD and Linux indicate this is a phase error between
748 		 * the IOC and the drive itself. When this happens, the IOC
749 		 * becomes unhappy and stops processing all transactions.
750 		 * Call mpt_timeout which knows how to get the IOC back
751 		 * on its feet.
752 		 */
753 		 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
754 		     "recovering...", __func__);
755 		xs->error = XS_TIMEOUT;
756 		restart = 1;
757 
758 		break;
759 
760 	default:
761 		/* XXX unrecognized HBA error */
762 		xs->error = XS_DRIVER_STUFFUP;
763 		mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
764 		    le16toh(mpt_reply->IOCStatus));
765 		restart = 1;
766 		break;
767 	}
768 
769 	if (mpt_reply != NULL) {
770 		if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
771 			memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
772 			    sizeof(xs->sense.scsi_sense));
773 		} else if (mpt_reply->SCSIState &
774 		    MPI_SCSI_STATE_AUTOSENSE_FAILED) {
775 			/*
776 			 * This will cause the scsipi layer to issue
777 			 * a REQUEST SENSE.
778 			 */
779 			if (xs->status == SCSI_CHECK)
780 				xs->error = XS_BUSY;
781 		}
782 	}
783 
784  done:
785 	if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
786 	MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
787 		mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
788 		mpt_ctlop(mpt, mpt_reply, reply);
789 	}
790 
791 	/* If IOC done with this request, free it up. */
792 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
793 		mpt_free_request(mpt, req);
794 
795 	/* If address reply, give the buffer back to the IOC. */
796 	if (mpt_reply != NULL)
797 		mpt_free_reply(mpt, (reply << 1));
798 
799 	if (xs != NULL)
800 		scsipi_done(xs);
801 
802 	if (restart) {
803 		mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
804 		mpt_restart(mpt, NULL);
805 	}
806 }
807 
808 static void
809 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
810 {
811 	struct scsipi_periph *periph = xs->xs_periph;
812 	request_t *req;
813 	MSG_SCSI_IO_REQUEST *mpt_req;
814 	int error, s;
815 
816 	s = splbio();
817 	req = mpt_get_request(mpt);
818 	if (__predict_false(req == NULL)) {
819 		/* This should happen very infrequently. */
820 		xs->error = XS_RESOURCE_SHORTAGE;
821 		scsipi_done(xs);
822 		splx(s);
823 		return;
824 	}
825 	splx(s);
826 
827 	/* Link the req and the scsipi_xfer. */
828 	req->xfer = xs;
829 
830 	/* Now we build the command for the IOC */
831 	mpt_req = req->req_vbuf;
832 	memset(mpt_req, 0, sizeof(*mpt_req));
833 
834 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
835 	mpt_req->Bus = mpt->bus;
836 
837 	mpt_req->SenseBufferLength =
838 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
839 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
840 
841 	/*
842 	 * We use the message context to find the request structure when
843 	 * we get the command completion interrupt from the IOC.
844 	 */
845 	mpt_req->MsgContext = htole32(req->index);
846 
847 	/* Which physical device to do the I/O on. */
848 	mpt_req->TargetID = periph->periph_target;
849 	mpt_req->LUN[1] = periph->periph_lun;
850 
851 	/* Set the direction of the transfer. */
852 	if (xs->xs_control & XS_CTL_DATA_IN)
853 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
854 	else if (xs->xs_control & XS_CTL_DATA_OUT)
855 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
856 	else
857 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
858 
859 	/* Set the queue behavior. */
860 	if (__predict_true((!mpt->is_scsi) ||
861 			   (mpt->mpt_tag_enable &
862 			    (1 << periph->periph_target)))) {
863 		switch (XS_CTL_TAGTYPE(xs)) {
864 		case XS_CTL_HEAD_TAG:
865 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
866 			break;
867 
868 #if 0	/* XXX */
869 		case XS_CTL_ACA_TAG:
870 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
871 			break;
872 #endif
873 
874 		case XS_CTL_ORDERED_TAG:
875 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
876 			break;
877 
878 		case XS_CTL_SIMPLE_TAG:
879 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
880 			break;
881 
882 		default:
883 			if (mpt->is_scsi)
884 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
885 			else
886 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
887 			break;
888 		}
889 	} else
890 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
891 
892 	if (__predict_false(mpt->is_scsi &&
893 			    (mpt->mpt_disc_enable &
894 			     (1 << periph->periph_target)) == 0))
895 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
896 
897 	mpt_req->Control = htole32(mpt_req->Control);
898 
899 	/* Copy the SCSI command block into place. */
900 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
901 
902 	mpt_req->CDBLength = xs->cmdlen;
903 	mpt_req->DataLength = htole32(xs->datalen);
904 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
905 
906 	/*
907 	 * Map the DMA transfer.
908 	 */
909 	if (xs->datalen) {
910 		SGE_SIMPLE32 *se;
911 
912 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
913 		    xs->datalen, NULL,
914 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
915 						       : BUS_DMA_WAITOK) |
916 		    BUS_DMA_STREAMING |
917 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
918 						       : BUS_DMA_WRITE));
919 		switch (error) {
920 		case 0:
921 			break;
922 
923 		case ENOMEM:
924 		case EAGAIN:
925 			xs->error = XS_RESOURCE_SHORTAGE;
926 			goto out_bad;
927 
928 		default:
929 			xs->error = XS_DRIVER_STUFFUP;
930 			mpt_prt(mpt, "error %d loading DMA map", error);
931  out_bad:
932 			s = splbio();
933 			mpt_free_request(mpt, req);
934 			scsipi_done(xs);
935 			splx(s);
936 			return;
937 		}
938 
939 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
940 			int seg, i, nleft = req->dmap->dm_nsegs;
941 			uint32_t flags;
942 			SGE_CHAIN32 *ce;
943 
944 			seg = 0;
945 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
946 			if (xs->xs_control & XS_CTL_DATA_OUT)
947 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
948 
949 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
950 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
951 			     i++, se++, seg++) {
952 				uint32_t tf;
953 
954 				memset(se, 0, sizeof(*se));
955 				se->Address =
956 				    htole32(req->dmap->dm_segs[seg].ds_addr);
957 				MPI_pSGE_SET_LENGTH(se,
958 				    req->dmap->dm_segs[seg].ds_len);
959 				tf = flags;
960 				if (i == MPT_NSGL_FIRST(mpt) - 2)
961 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
962 				MPI_pSGE_SET_FLAGS(se, tf);
963 				se->FlagsLength = htole32(se->FlagsLength);
964 				nleft--;
965 			}
966 
967 			/*
968 			 * Tell the IOC where to find the first chain element.
969 			 */
970 			mpt_req->ChainOffset =
971 			    ((char *)se - (char *)mpt_req) >> 2;
972 
973 			/*
974 			 * Until we're finished with all segments...
975 			 */
976 			while (nleft) {
977 				int ntodo;
978 
979 				/*
980 				 * Construct the chain element that points to
981 				 * the next segment.
982 				 */
983 				ce = (SGE_CHAIN32 *) se++;
984 				if (nleft > MPT_NSGL(mpt)) {
985 					ntodo = MPT_NSGL(mpt) - 1;
986 					ce->NextChainOffset = (MPT_RQSL(mpt) -
987 					    sizeof(SGE_SIMPLE32)) >> 2;
988 					ce->Length = htole16(MPT_NSGL(mpt)
989 						* sizeof(SGE_SIMPLE32));
990 				} else {
991 					ntodo = nleft;
992 					ce->NextChainOffset = 0;
993 					ce->Length = htole16(ntodo
994 						* sizeof(SGE_SIMPLE32));
995 				}
996 				ce->Address = htole32(req->req_pbuf +
997 				    ((char *)se - (char *)mpt_req));
998 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
999 				for (i = 0; i < ntodo; i++, se++, seg++) {
1000 					uint32_t tf;
1001 
1002 					memset(se, 0, sizeof(*se));
1003 					se->Address = htole32(
1004 					    req->dmap->dm_segs[seg].ds_addr);
1005 					MPI_pSGE_SET_LENGTH(se,
1006 					    req->dmap->dm_segs[seg].ds_len);
1007 					tf = flags;
1008 					if (i == ntodo - 1) {
1009 						tf |=
1010 						    MPI_SGE_FLAGS_LAST_ELEMENT;
1011 						if (ce->NextChainOffset == 0) {
1012 							tf |=
1013 						    MPI_SGE_FLAGS_END_OF_LIST |
1014 						    MPI_SGE_FLAGS_END_OF_BUFFER;
1015 						}
1016 					}
1017 					MPI_pSGE_SET_FLAGS(se, tf);
1018 					se->FlagsLength =
1019 					    htole32(se->FlagsLength);
1020 					nleft--;
1021 				}
1022 			}
1023 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1024 			    req->dmap->dm_mapsize,
1025 			    (xs->xs_control & XS_CTL_DATA_IN) ?
1026 			    				BUS_DMASYNC_PREREAD
1027 						      : BUS_DMASYNC_PREWRITE);
1028 		} else {
1029 			int i;
1030 			uint32_t flags;
1031 
1032 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1033 			if (xs->xs_control & XS_CTL_DATA_OUT)
1034 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1035 
1036 			/* Copy the segments into our SG list. */
1037 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1038 			for (i = 0; i < req->dmap->dm_nsegs;
1039 			     i++, se++) {
1040 				uint32_t tf;
1041 
1042 				memset(se, 0, sizeof(*se));
1043 				se->Address =
1044 				    htole32(req->dmap->dm_segs[i].ds_addr);
1045 				MPI_pSGE_SET_LENGTH(se,
1046 				    req->dmap->dm_segs[i].ds_len);
1047 				tf = flags;
1048 				if (i == req->dmap->dm_nsegs - 1) {
1049 					tf |=
1050 					    MPI_SGE_FLAGS_LAST_ELEMENT |
1051 					    MPI_SGE_FLAGS_END_OF_BUFFER |
1052 					    MPI_SGE_FLAGS_END_OF_LIST;
1053 				}
1054 				MPI_pSGE_SET_FLAGS(se, tf);
1055 				se->FlagsLength = htole32(se->FlagsLength);
1056 			}
1057 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1058 			    req->dmap->dm_mapsize,
1059 			    (xs->xs_control & XS_CTL_DATA_IN) ?
1060 			    				BUS_DMASYNC_PREREAD
1061 						      : BUS_DMASYNC_PREWRITE);
1062 		}
1063 	} else {
1064 		/*
1065 		 * No data to transfer; just make a single simple SGL
1066 		 * with zero length.
1067 		 */
1068 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1069 		memset(se, 0, sizeof(*se));
1070 		MPI_pSGE_SET_FLAGS(se,
1071 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1072 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1073 		se->FlagsLength = htole32(se->FlagsLength);
1074 	}
1075 
1076 	if (mpt->verbose > 1)
1077 		mpt_print_scsi_io_request(mpt_req);
1078 
1079 		if (xs->timeout == 0) {
1080 			mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1081 			req->index);
1082 			xs->timeout = 500;
1083 		}
1084 
1085 	s = splbio();
1086 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1087 		callout_reset(&xs->xs_callout,
1088 		    mstohz(xs->timeout), mpt_timeout, req);
1089 	mpt_send_cmd(mpt, req);
1090 	splx(s);
1091 
1092 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1093 		return;
1094 
1095 	/*
1096 	 * If we can't use interrupts, poll on completion.
1097 	 */
1098 	if (mpt_poll(mpt, xs, xs->timeout))
1099 		mpt_timeout(req);
1100 }
1101 
1102 static void
1103 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1104 {
1105 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1106 
1107 	/*
1108 	 * Always allow disconnect; we don't have a way to disable
1109 	 * it right now, in any case.
1110 	 */
1111 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
1112 
1113 	if (xm->xm_mode & PERIPH_CAP_TQING)
1114 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
1115 	else
1116 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1117 
1118 	if (mpt->is_scsi) {
1119 		/*
1120 		 * SCSI transport settings only make any sense for
1121 		 * SCSI
1122 		 */
1123 
1124 		tmp = mpt->mpt_dev_page1[xm->xm_target];
1125 
1126 		/*
1127 		 * Set the wide/narrow parameter for the target.
1128 		 */
1129 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
1130 			tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1131 		else
1132 			tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1133 
1134 		/*
1135 		 * Set the synchronous parameters for the target.
1136 		 *
1137 		 * XXX If we request sync transfers, we just go ahead and
1138 		 * XXX request the maximum available.  We need finer control
1139 		 * XXX in order to implement Domain Validation.
1140 		 */
1141 		tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1142 		    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1143 		    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1144 		    MPI_SCSIDEVPAGE1_RP_IU);
1145 		if (xm->xm_mode & PERIPH_CAP_SYNC) {
1146 			int factor, offset, np;
1147 
1148 			factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1149 			offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1150 			np = 0;
1151 			if (factor < 0x9) {
1152 				/* Ultra320 */
1153 				np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1154 			}
1155 			if (factor < 0xa) {
1156 				/* at least Ultra160 */
1157 				np |= MPI_SCSIDEVPAGE1_RP_DT;
1158 			}
1159 			np |= (factor << 8) | (offset << 16);
1160 			tmp.RequestedParameters |= np;
1161 		}
1162 
1163 		host2mpt_config_page_scsi_device_1(&tmp);
1164 		if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1165 			mpt_prt(mpt, "unable to write Device Page 1");
1166 			return;
1167 		}
1168 
1169 		if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1170 			mpt_prt(mpt, "unable to read back Device Page 1");
1171 			return;
1172 		}
1173 
1174 		mpt2host_config_page_scsi_device_1(&tmp);
1175 		mpt->mpt_dev_page1[xm->xm_target] = tmp;
1176 		if (mpt->verbose > 1) {
1177 			mpt_prt(mpt,
1178 			    "SPI Target %d Page 1: RequestedParameters %x Config %x",
1179 			    xm->xm_target,
1180 			    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1181 			    mpt->mpt_dev_page1[xm->xm_target].Configuration);
1182 		}
1183 	}
1184 
1185 	/*
1186 	 * Make a note that we should perform an async callback at the
1187 	 * end of the next successful command completion to report the
1188 	 * negotiated transfer mode.
1189 	 */
1190 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1191 }
1192 
1193 static void
1194 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1195 {
1196 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1197 	struct scsipi_xfer_mode xm;
1198 	int period, offset;
1199 
1200 	tmp = mpt->mpt_dev_page0[periph->periph_target];
1201 	host2mpt_config_page_scsi_device_0(&tmp);
1202 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1203 		mpt_prt(mpt, "unable to read Device Page 0");
1204 		return;
1205 	}
1206 	mpt2host_config_page_scsi_device_0(&tmp);
1207 
1208 	if (mpt->verbose > 1) {
1209 		mpt_prt(mpt,
1210 		    "SPI Tgt %d Page 0: NParms %x Information %x",
1211 		    periph->periph_target,
1212 		    tmp.NegotiatedParameters, tmp.Information);
1213 	}
1214 
1215 	xm.xm_target = periph->periph_target;
1216 	xm.xm_mode = 0;
1217 
1218 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1219 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1220 
1221 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
1222 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1223 	if (offset) {
1224 		xm.xm_period = period;
1225 		xm.xm_offset = offset;
1226 		xm.xm_mode |= PERIPH_CAP_SYNC;
1227 	}
1228 
1229 	/*
1230 	 * Tagged queueing is all controlled by us; there is no
1231 	 * other setting to query.
1232 	 */
1233 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1234 		xm.xm_mode |= PERIPH_CAP_TQING;
1235 
1236 	/*
1237 	 * We're going to deliver the async event, so clear the marker.
1238 	 */
1239 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1240 
1241 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1242 }
1243 
1244 static void
1245 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1246 {
1247 	MSG_DEFAULT_REPLY *dmsg = vmsg;
1248 
1249 	switch (dmsg->Function) {
1250 	case MPI_FUNCTION_EVENT_NOTIFICATION:
1251 		mpt_event_notify_reply(mpt, vmsg);
1252 		mpt_free_reply(mpt, (reply << 1));
1253 		break;
1254 
1255 	case MPI_FUNCTION_EVENT_ACK:
1256 		mpt_free_reply(mpt, (reply << 1));
1257 		break;
1258 
1259 	case MPI_FUNCTION_PORT_ENABLE:
1260 	    {
1261 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
1262 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1263 		if (mpt->verbose > 1)
1264 			mpt_prt(mpt, "enable port reply index %d", index);
1265 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1266 			request_t *req = &mpt->request_pool[index];
1267 			req->debug = REQ_DONE;
1268 		}
1269 		mpt_free_reply(mpt, (reply << 1));
1270 		break;
1271 	    }
1272 
1273 	case MPI_FUNCTION_CONFIG:
1274 	    {
1275 		MSG_CONFIG_REPLY *msg = vmsg;
1276 		int index = le32toh(msg->MsgContext) & ~0x80000000;
1277 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1278 			request_t *req = &mpt->request_pool[index];
1279 			req->debug = REQ_DONE;
1280 			req->sequence = reply;
1281 		} else
1282 			mpt_free_reply(mpt, (reply << 1));
1283 		break;
1284 	    }
1285 
1286 	default:
1287 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1288 	}
1289 }
1290 
1291 static void
1292 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1293 {
1294 
1295 	switch (le32toh(msg->Event)) {
1296 	case MPI_EVENT_LOG_DATA:
1297 	    {
1298 		int i;
1299 
1300 		/* Some error occurrerd that the Fusion wants logged. */
1301 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1302 		mpt_prt(mpt, "EvtLogData: Event Data:");
1303 		for (i = 0; i < msg->EventDataLength; i++) {
1304 			if ((i % 4) == 0)
1305 				printf("%s:\t", device_xname(mpt->sc_dev));
1306 			printf("0x%08x%c", msg->Data[i],
1307 			    ((i % 4) == 3) ? '\n' : ' ');
1308 		}
1309 		if ((i % 4) != 0)
1310 			printf("\n");
1311 		break;
1312 	    }
1313 
1314 	case MPI_EVENT_UNIT_ATTENTION:
1315 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1316 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1317 		break;
1318 
1319 	case MPI_EVENT_IOC_BUS_RESET:
1320 		/* We generated a bus reset. */
1321 		mpt_prt(mpt, "IOC Bus Reset Port %d",
1322 		    (msg->Data[0] >> 8) & 0xff);
1323 		break;
1324 
1325 	case MPI_EVENT_EXT_BUS_RESET:
1326 		/* Someone else generated a bus reset. */
1327 		mpt_prt(mpt, "External Bus Reset");
1328 		/*
1329 		 * These replies don't return EventData like the MPI
1330 		 * spec says they do.
1331 		 */
1332 		/* XXX Send an async event? */
1333 		break;
1334 
1335 	case MPI_EVENT_RESCAN:
1336 		/*
1337 		 * In general, thise means a device has been added
1338 		 * to the loop.
1339 		 */
1340 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1341 		/* XXX Send an async event? */
1342 		break;
1343 
1344 	case MPI_EVENT_LINK_STATUS_CHANGE:
1345 		mpt_prt(mpt, "Port %d: Link state %s",
1346 		    (msg->Data[1] >> 8) & 0xff,
1347 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1348 		break;
1349 
1350 	case MPI_EVENT_LOOP_STATE_CHANGE:
1351 		switch ((msg->Data[0] >> 16) & 0xff) {
1352 		case 0x01:
1353 			mpt_prt(mpt,
1354 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
1355 			    "(Loop Initialization)",
1356 			    (msg->Data[1] >> 8) & 0xff,
1357 			    (msg->Data[0] >> 8) & 0xff,
1358 			    (msg->Data[0]     ) & 0xff);
1359 			switch ((msg->Data[0] >> 8) & 0xff) {
1360 			case 0xf7:
1361 				if ((msg->Data[0] & 0xff) == 0xf7)
1362 					mpt_prt(mpt, "\tDevice needs AL_PA");
1363 				else
1364 					mpt_prt(mpt, "\tDevice %02x doesn't "
1365 					    "like FC performance",
1366 					    msg->Data[0] & 0xff);
1367 				break;
1368 
1369 			case 0xf8:
1370 				if ((msg->Data[0] & 0xff) == 0xf7)
1371 					mpt_prt(mpt, "\tDevice detected loop "
1372 					    "failure before acquiring AL_PA");
1373 				else
1374 					mpt_prt(mpt, "\tDevice %02x detected "
1375 					    "loop failure",
1376 					    msg->Data[0] & 0xff);
1377 				break;
1378 
1379 			default:
1380 				mpt_prt(mpt, "\tDevice %02x requests that "
1381 				    "device %02x reset itself",
1382 				    msg->Data[0] & 0xff,
1383 				    (msg->Data[0] >> 8) & 0xff);
1384 				break;
1385 			}
1386 			break;
1387 
1388 		case 0x02:
1389 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1390 			    "(Loop Port Enable)",
1391 			    (msg->Data[1] >> 8) & 0xff,
1392 			    (msg->Data[0] >> 8) & 0xff,
1393 			    (msg->Data[0]     ) & 0xff);
1394 			break;
1395 
1396 		case 0x03:
1397 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1398 			    "(Loop Port Bypass)",
1399 			    (msg->Data[1] >> 8) & 0xff,
1400 			    (msg->Data[0] >> 8) & 0xff,
1401 			    (msg->Data[0]     ) & 0xff);
1402 			break;
1403 
1404 		default:
1405 			mpt_prt(mpt, "Port %d: FC Link Event: "
1406 			    "Unknown event (%02x %02x %02x)",
1407 			    (msg->Data[1] >>  8) & 0xff,
1408 			    (msg->Data[0] >> 16) & 0xff,
1409 			    (msg->Data[0] >>  8) & 0xff,
1410 			    (msg->Data[0]      ) & 0xff);
1411 			break;
1412 		}
1413 		break;
1414 
1415 	case MPI_EVENT_LOGOUT:
1416 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1417 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1418 		break;
1419 
1420 	case MPI_EVENT_EVENT_CHANGE:
1421 		/*
1422 		 * This is just an acknowledgement of our
1423 		 * mpt_send_event_request().
1424 		 */
1425 		break;
1426 
1427 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
1428 		switch ((msg->Data[0] >> 12) & 0x0f) {
1429 		case 0x00:
1430 			mpt_prt(mpt, "Phy %d: Link Status Unknown",
1431 			    msg->Data[0] & 0xff);
1432 			break;
1433 		case 0x01:
1434 			mpt_prt(mpt, "Phy %d: Link Disabled",
1435 			    msg->Data[0] & 0xff);
1436 			break;
1437 		case 0x02:
1438 			mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1439 			    msg->Data[0] & 0xff);
1440 			break;
1441 		case 0x03:
1442 			mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1443 			    msg->Data[0] & 0xff);
1444 			break;
1445 		case 0x08:
1446 			mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1447 			    msg->Data[0] & 0xff);
1448 			break;
1449 		case 0x09:
1450 			mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1451 			    msg->Data[0] & 0xff);
1452 			break;
1453 		default:
1454 			mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1455 			    "Unknown event (%0x)",
1456 			    msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1457 		}
1458 		break;
1459 
1460 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1461 	case MPI_EVENT_SAS_DISCOVERY:
1462 		/* ignore these events for now */
1463 		break;
1464 
1465 	case MPI_EVENT_QUEUE_FULL:
1466 		/* This can get a little chatty */
1467 		if (mpt->verbose > 0)
1468 			mpt_prt(mpt, "Queue Full Event");
1469 		break;
1470 
1471 	default:
1472 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1473 		break;
1474 	}
1475 
1476 	if (msg->AckRequired) {
1477 		MSG_EVENT_ACK *ackp;
1478 		request_t *req;
1479 
1480 		if ((req = mpt_get_request(mpt)) == NULL) {
1481 			/* XXX XXX XXX XXXJRT */
1482 			panic("mpt_event_notify_reply: unable to allocate "
1483 			    "request structure");
1484 		}
1485 
1486 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1487 		memset(ackp, 0, sizeof(*ackp));
1488 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
1489 		ackp->Event = msg->Event;
1490 		ackp->EventContext = msg->EventContext;
1491 		ackp->MsgContext = htole32(req->index | 0x80000000);
1492 		mpt_check_doorbell(mpt);
1493 		mpt_send_cmd(mpt, req);
1494 	}
1495 }
1496 
1497 static void
1498 mpt_bus_reset(mpt_softc_t *mpt)
1499 {
1500 	request_t *req;
1501 	MSG_SCSI_TASK_MGMT *mngt_req;
1502 	int s;
1503 
1504 	s = splbio();
1505 	if (mpt->mngt_req) {
1506 		/* request already queued; can't do more */
1507 		splx(s);
1508 		return;
1509 	}
1510 	req = mpt_get_request(mpt);
1511 	if (__predict_false(req == NULL)) {
1512 		mpt_prt(mpt, "no mngt request\n");
1513 		splx(s);
1514 		return;
1515 	}
1516 	mpt->mngt_req = req;
1517 	splx(s);
1518 	mngt_req = req->req_vbuf;
1519 	memset(mngt_req, 0, sizeof(*mngt_req));
1520 	mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1521 	mngt_req->Bus = mpt->bus;
1522 	mngt_req->TargetID = 0;
1523 	mngt_req->ChainOffset = 0;
1524 	mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1525 	mngt_req->Reserved1 = 0;
1526 	mngt_req->MsgFlags =
1527 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1528 	mngt_req->MsgContext = req->index;
1529 	mngt_req->TaskMsgContext = 0;
1530 	s = splbio();
1531 	mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1532 	splx(s);
1533 }
1534 
1535 /*****************************************************************************
1536  * SCSI interface routines
1537  *****************************************************************************/
1538 
1539 static void
1540 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1541     void *arg)
1542 {
1543 	struct scsipi_adapter *adapt = chan->chan_adapter;
1544 	mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1545 
1546 	switch (req) {
1547 	case ADAPTER_REQ_RUN_XFER:
1548 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1549 		return;
1550 
1551 	case ADAPTER_REQ_GROW_RESOURCES:
1552 		/* Not supported. */
1553 		return;
1554 
1555 	case ADAPTER_REQ_SET_XFER_MODE:
1556 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1557 		return;
1558 	}
1559 }
1560 
1561 static void
1562 mpt_minphys(struct buf *bp)
1563 {
1564 
1565 /*
1566  * Subtract one from the SGL limit, since we need an extra one to handle
1567  * an non-page-aligned transfer.
1568  */
1569 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
1570 
1571 	if (bp->b_bcount > MPT_MAX_XFER)
1572 		bp->b_bcount = MPT_MAX_XFER;
1573 	minphys(bp);
1574 }
1575 
1576 static int
1577 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1578     int flag, struct proc *p)
1579 {
1580 	mpt_softc_t *mpt;
1581 	int s;
1582 
1583 	mpt = device_private(chan->chan_adapter->adapt_dev);
1584 	switch (cmd) {
1585 	case SCBUSIORESET:
1586 		mpt_bus_reset(mpt);
1587 		s = splbio();
1588 		mpt_intr(mpt);
1589 		splx(s);
1590 		return(0);
1591 	default:
1592 		return (ENOTTY);
1593 	}
1594 }
1595