xref: /dflybsd-src/sys/dev/disk/mpt/mpt_cam.c (revision 2d0700913d3c55b6181d2b703dd69aae2179ce8c)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  *
96  * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
97  */
98 
99 #include <dev/disk/mpt/mpt.h>
100 #include <dev/disk/mpt/mpt_cam.h>
101 #include <dev/disk/mpt/mpt_raid.h>
102 
103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/disk/mpt/mpilib/mpi_init.h"
105 #include "dev/disk/mpt/mpilib/mpi_targ.h"
106 #include "dev/disk/mpt/mpilib/mpi_fc.h"
107 #include "dev/disk/mpt/mpilib/mpi_sas.h"
108 #include <sys/sysctl.h>
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 
112 #ifndef	CAM_NEW_TRAN_CODE
113 #define	CAM_NEW_TRAN_CODE	1
114 #endif
115 
116 static void mpt_poll(struct cam_sim *);
117 static timeout_t mpt_timeout;
118 static void mpt_action(struct cam_sim *, union ccb *);
119 static int
120 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
121 static void mpt_setwidth(struct mpt_softc *, int, int);
122 static void mpt_setsync(struct mpt_softc *, int, int, int);
123 static int mpt_update_spi_config(struct mpt_softc *, int);
124 
125 static mpt_reply_handler_t mpt_scsi_reply_handler;
126 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
127 static mpt_reply_handler_t mpt_fc_els_reply_handler;
128 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
129 					MSG_DEFAULT_REPLY *);
130 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
131 static int mpt_fc_reset_link(struct mpt_softc *, int);
132 
133 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
134 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
135 static void mpt_recovery_thread(void *arg);
136 static void mpt_recover_commands(struct mpt_softc *mpt);
137 
138 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
139     u_int, u_int, u_int, int);
140 
141 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
142 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
143 static int mpt_add_els_buffers(struct mpt_softc *mpt);
144 static int mpt_add_target_commands(struct mpt_softc *mpt);
145 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
146 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
147 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
148 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
149 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
150 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
151     uint8_t, uint8_t const *);
152 static void
153 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
154     tgt_resource_t *, int);
155 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
156 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
157 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
158 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
159 
160 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
161 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
162 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
163 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
164 
165 static mpt_probe_handler_t	mpt_cam_probe;
166 static mpt_attach_handler_t	mpt_cam_attach;
167 static mpt_enable_handler_t	mpt_cam_enable;
168 static mpt_ready_handler_t	mpt_cam_ready;
169 static mpt_event_handler_t	mpt_cam_event;
170 static mpt_reset_handler_t	mpt_cam_ioc_reset;
171 static mpt_detach_handler_t	mpt_cam_detach;
172 
173 static struct mpt_personality mpt_cam_personality =
174 {
175 	.name		= "mpt_cam",
176 	.probe		= mpt_cam_probe,
177 	.attach		= mpt_cam_attach,
178 	.enable		= mpt_cam_enable,
179 	.ready		= mpt_cam_ready,
180 	.event		= mpt_cam_event,
181 	.reset		= mpt_cam_ioc_reset,
182 	.detach		= mpt_cam_detach,
183 };
184 
185 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
186 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
187 
188 int mpt_enable_sata_wc = -1;
189 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
190 
191 static int
192 mpt_cam_probe(struct mpt_softc *mpt)
193 {
194 	int role;
195 
196 	/*
197 	 * Only attach to nodes that support the initiator or target role
198 	 * (or want to) or have RAID physical devices that need CAM pass-thru
199 	 * support.
200 	 */
201 	if (mpt->do_cfg_role) {
202 		role = mpt->cfg_role;
203 	} else {
204 		role = mpt->role;
205 	}
206 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
207 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
208 		return (0);
209 	}
210 	return (ENODEV);
211 }
212 
213 static int
214 mpt_cam_attach(struct mpt_softc *mpt)
215 {
216 	struct cam_devq *devq;
217 	mpt_handler_t	 handler;
218 	int		 maxq;
219 	int		 error;
220 
221 	MPT_LOCK(mpt);
222 	TAILQ_INIT(&mpt->request_timeout_list);
223 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
224 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
225 
226 	handler.reply_handler = mpt_scsi_reply_handler;
227 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
228 				     &scsi_io_handler_id);
229 	if (error != 0) {
230 		MPT_UNLOCK(mpt);
231 		goto cleanup;
232 	}
233 
234 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
235 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
236 				     &scsi_tmf_handler_id);
237 	if (error != 0) {
238 		MPT_UNLOCK(mpt);
239 		goto cleanup;
240 	}
241 
242 	/*
243 	 * If we're fibre channel and could support target mode, we register
244 	 * an ELS reply handler and give it resources.
245 	 */
246 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
247 		handler.reply_handler = mpt_fc_els_reply_handler;
248 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
249 		    &fc_els_handler_id);
250 		if (error != 0) {
251 			MPT_UNLOCK(mpt);
252 			goto cleanup;
253 		}
254 		if (mpt_add_els_buffers(mpt) == FALSE) {
255 			error = ENOMEM;
256 			MPT_UNLOCK(mpt);
257 			goto cleanup;
258 		}
259 		maxq -= mpt->els_cmds_allocated;
260 	}
261 
262 	/*
263 	 * If we support target mode, we register a reply handler for it,
264 	 * but don't add command resources until we actually enable target
265 	 * mode.
266 	 */
267 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
268 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
269 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
270 		    &mpt->scsi_tgt_handler_id);
271 		if (error != 0) {
272 			MPT_UNLOCK(mpt);
273 			goto cleanup;
274 		}
275 	}
276 
277 	if (mpt->is_sas) {
278 		handler.reply_handler = mpt_sata_pass_reply_handler;
279 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
280 		    &sata_pass_handler_id);
281 		if (error != 0) {
282 			MPT_UNLOCK(mpt);
283 			goto cleanup;
284 		}
285 	}
286 
287 	/*
288 	 * We keep one request reserved for timeout TMF requests.
289 	 */
290 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
291 	if (mpt->tmf_req == NULL) {
292 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
293 		error = ENOMEM;
294 		MPT_UNLOCK(mpt);
295 		goto cleanup;
296 	}
297 
298 	/*
299 	 * Mark the request as free even though not on the free list.
300 	 * There is only one TMF request allowed to be outstanding at
301 	 * a time and the TMF routines perform their own allocation
302 	 * tracking using the standard state flags.
303 	 */
304 	mpt->tmf_req->state = REQ_STATE_FREE;
305 	maxq--;
306 
307 	/*
308 	 * The rest of this is CAM foo, for which we need to drop our lock
309 	 */
310 	MPT_UNLOCK(mpt);
311 
312 	if (mpt_spawn_recovery_thread(mpt) != 0) {
313 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
314 		error = ENOMEM;
315 		goto cleanup;
316 	}
317 
318 	/*
319 	 * Create the device queue for our SIM(s).
320 	 */
321 	devq = cam_simq_alloc(maxq);
322 	if (devq == NULL) {
323 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
324 		error = ENOMEM;
325 		goto cleanup;
326 	}
327 
328 	/*
329 	 * Construct our SIM entry.
330 	 */
331 	mpt->sim =
332 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
333 	if (mpt->sim == NULL) {
334 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
335 		cam_devq_release(devq);
336 		error = ENOMEM;
337 		goto cleanup;
338 	}
339 
340 	/*
341 	 * Register exactly this bus.
342 	 */
343 	MPT_LOCK(mpt);
344 	if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
345 		mpt_prt(mpt, "Bus registration Failed!\n");
346 		error = ENOMEM;
347 		MPT_UNLOCK(mpt);
348 		goto cleanup;
349 	}
350 
351 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
352 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
353 		mpt_prt(mpt, "Unable to allocate Path!\n");
354 		error = ENOMEM;
355 		MPT_UNLOCK(mpt);
356 		goto cleanup;
357 	}
358 	MPT_UNLOCK(mpt);
359 
360 	/*
361 	 * Only register a second bus for RAID physical
362 	 * devices if the controller supports RAID.
363 	 */
364 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
365 		return (0);
366 	}
367 
368 	/*
369 	 * Create a "bus" to export all hidden disks to CAM.
370 	 */
371 	mpt->phydisk_sim =
372 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
373 	if (mpt->phydisk_sim == NULL) {
374 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
375 		error = ENOMEM;
376 		goto cleanup;
377 	}
378 
379 	/*
380 	 * Register this bus.
381 	 */
382 	MPT_LOCK(mpt);
383 	if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
384 	    CAM_SUCCESS) {
385 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
386 		error = ENOMEM;
387 		MPT_UNLOCK(mpt);
388 		goto cleanup;
389 	}
390 
391 	if (xpt_create_path(&mpt->phydisk_path, NULL,
392 	    cam_sim_path(mpt->phydisk_sim),
393 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
394 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
395 		error = ENOMEM;
396 		MPT_UNLOCK(mpt);
397 		goto cleanup;
398 	}
399 	MPT_UNLOCK(mpt);
400 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
401 	return (0);
402 
403 cleanup:
404 	mpt_cam_detach(mpt);
405 	return (error);
406 }
407 
408 /*
409  * Read FC configuration information
410  */
411 static int
412 mpt_read_config_info_fc(struct mpt_softc *mpt)
413 {
414 	char *topology = NULL;
415 	int rv;
416 
417 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
418 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
419 	if (rv) {
420 		return (-1);
421 	}
422 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
423 		 mpt->mpt_fcport_page0.Header.PageVersion,
424 		 mpt->mpt_fcport_page0.Header.PageLength,
425 		 mpt->mpt_fcport_page0.Header.PageNumber,
426 		 mpt->mpt_fcport_page0.Header.PageType);
427 
428 
429 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
430 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
431 	if (rv) {
432 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
433 		return (-1);
434 	}
435 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
436 
437 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
438 
439 	switch (mpt->mpt_fcport_page0.Flags &
440 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
441 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
442 		mpt->mpt_fcport_speed = 0;
443 		topology = "<NO LOOP>";
444 		break;
445 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
446 		topology = "N-Port";
447 		break;
448 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
449 		topology = "NL-Port";
450 		break;
451 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
452 		topology = "F-Port";
453 		break;
454 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
455 		topology = "FL-Port";
456 		break;
457 	default:
458 		mpt->mpt_fcport_speed = 0;
459 		topology = "?";
460 		break;
461 	}
462 
463 	mpt_lprt(mpt, MPT_PRT_INFO,
464 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
465 	    "Speed %u-Gbit\n", topology,
466 	    mpt->mpt_fcport_page0.WWNN.High,
467 	    mpt->mpt_fcport_page0.WWNN.Low,
468 	    mpt->mpt_fcport_page0.WWPN.High,
469 	    mpt->mpt_fcport_page0.WWPN.Low,
470 	    mpt->mpt_fcport_speed);
471 	MPT_UNLOCK(mpt);
472 	{
473 		ksnprintf(mpt->scinfo.fc.wwnn,
474 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
475 		    mpt->mpt_fcport_page0.WWNN.High,
476 		    mpt->mpt_fcport_page0.WWNN.Low);
477 
478 		ksnprintf(mpt->scinfo.fc.wwpn,
479 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
480 		    mpt->mpt_fcport_page0.WWPN.High,
481 		    mpt->mpt_fcport_page0.WWPN.Low);
482 
483 		SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
484 		       SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
485 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
486 		       "World Wide Node Name");
487 
488 		SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
489 		       SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
490 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
491 		       "World Wide Port Name");
492 
493 	}
494 	MPT_LOCK(mpt);
495 	return (0);
496 }
497 
498 /*
499  * Set FC configuration information.
500  */
501 static int
502 mpt_set_initial_config_fc(struct mpt_softc *mpt)
503 {
504 	CONFIG_PAGE_FC_PORT_1 fc;
505 	U32 fl;
506 	int r, doit = 0;
507 	int role;
508 
509 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
510 	    &fc.Header, FALSE, 5000);
511 	if (r) {
512 		mpt_prt(mpt, "failed to read FC page 1 header\n");
513 		return (mpt_fc_reset_link(mpt, 1));
514 	}
515 
516 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
517 	    &fc.Header, sizeof (fc), FALSE, 5000);
518 	if (r) {
519 		mpt_prt(mpt, "failed to read FC page 1\n");
520 		return (mpt_fc_reset_link(mpt, 1));
521 	}
522 	mpt2host_config_page_fc_port_1(&fc);
523 
524 	/*
525 	 * Check our flags to make sure we support the role we want.
526 	 */
527 	doit = 0;
528 	role = 0;
529 	fl = fc.Flags;
530 
531 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
532 		role |= MPT_ROLE_INITIATOR;
533 	}
534 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
535 		role |= MPT_ROLE_TARGET;
536 	}
537 
538 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
539 
540 	if (mpt->do_cfg_role == 0) {
541 		role = mpt->cfg_role;
542 	} else {
543 		mpt->do_cfg_role = 0;
544 	}
545 
546 	if (role != mpt->cfg_role) {
547 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
548 			if ((role & MPT_ROLE_INITIATOR) == 0) {
549 				mpt_prt(mpt, "adding initiator role\n");
550 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
551 				doit++;
552 			} else {
553 				mpt_prt(mpt, "keeping initiator role\n");
554 			}
555 		} else if (role & MPT_ROLE_INITIATOR) {
556 			mpt_prt(mpt, "removing initiator role\n");
557 			doit++;
558 		}
559 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
560 			if ((role & MPT_ROLE_TARGET) == 0) {
561 				mpt_prt(mpt, "adding target role\n");
562 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
563 				doit++;
564 			} else {
565 				mpt_prt(mpt, "keeping target role\n");
566 			}
567 		} else if (role & MPT_ROLE_TARGET) {
568 			mpt_prt(mpt, "removing target role\n");
569 			doit++;
570 		}
571 		mpt->role = mpt->cfg_role;
572 	}
573 
574 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
575 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
576 			mpt_prt(mpt, "adding OXID option\n");
577 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
578 			doit++;
579 		}
580 	}
581 
582 	if (doit) {
583 		fc.Flags = fl;
584 		host2mpt_config_page_fc_port_1(&fc);
585 		r = mpt_write_cfg_page(mpt,
586 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
587 		    sizeof(fc), FALSE, 5000);
588 		if (r != 0) {
589 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
590 			return (0);
591 		}
592 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
593 		    "effect until next reboot or IOC reset\n");
594 	}
595 	return (0);
596 }
597 
598 static int
599 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
600 {
601 	ConfigExtendedPageHeader_t hdr;
602 	struct mptsas_phyinfo *phyinfo;
603 	SasIOUnitPage0_t *buffer;
604 	int error, len, i;
605 
606 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
607 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
608 				       &hdr, 0, 10000);
609 	if (error)
610 		goto out;
611 	if (hdr.ExtPageLength == 0) {
612 		error = ENXIO;
613 		goto out;
614 	}
615 
616 	len = hdr.ExtPageLength * 4;
617 	buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
618 	if (buffer == NULL) {
619 		error = ENOMEM;
620 		goto out;
621 	}
622 
623 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
624 				     0, &hdr, buffer, len, 0, 10000);
625 	if (error) {
626 		kfree(buffer, M_DEVBUF);
627 		goto out;
628 	}
629 
630 	portinfo->num_phys = buffer->NumPhys;
631 	portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
632 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
633 	if (portinfo->phy_info == NULL) {
634 		kfree(buffer, M_DEVBUF);
635 		error = ENOMEM;
636 		goto out;
637 	}
638 
639 	for (i = 0; i < portinfo->num_phys; i++) {
640 		phyinfo = &portinfo->phy_info[i];
641 		phyinfo->phy_num = i;
642 		phyinfo->port_id = buffer->PhyData[i].Port;
643 		phyinfo->negotiated_link_rate =
644 		    buffer->PhyData[i].NegotiatedLinkRate;
645 		phyinfo->handle =
646 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
647 	}
648 
649 	kfree(buffer, M_DEVBUF);
650 out:
651 	return (error);
652 }
653 
654 static int
655 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
656 	uint32_t form, uint32_t form_specific)
657 {
658 	ConfigExtendedPageHeader_t hdr;
659 	SasPhyPage0_t *buffer;
660 	int error;
661 
662 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
663 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
664 				       0, 10000);
665 	if (error)
666 		goto out;
667 	if (hdr.ExtPageLength == 0) {
668 		error = ENXIO;
669 		goto out;
670 	}
671 
672 	buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
673 	if (buffer == NULL) {
674 		error = ENOMEM;
675 		goto out;
676 	}
677 
678 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
679 				     form + form_specific, &hdr, buffer,
680 				     sizeof(SasPhyPage0_t), 0, 10000);
681 	if (error) {
682 		kfree(buffer, M_DEVBUF);
683 		goto out;
684 	}
685 
686 	phy_info->hw_link_rate = buffer->HwLinkRate;
687 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
688 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
689 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
690 
691 	kfree(buffer, M_DEVBUF);
692 out:
693 	return (error);
694 }
695 
696 static int
697 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
698 	uint32_t form, uint32_t form_specific)
699 {
700 	ConfigExtendedPageHeader_t hdr;
701 	SasDevicePage0_t *buffer;
702 	uint64_t sas_address;
703 	int error = 0;
704 
705 	bzero(device_info, sizeof(*device_info));
706 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
707 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
708 				       &hdr, 0, 10000);
709 	if (error)
710 		goto out;
711 	if (hdr.ExtPageLength == 0) {
712 		error = ENXIO;
713 		goto out;
714 	}
715 
716 	buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
717 	if (buffer == NULL) {
718 		error = ENOMEM;
719 		goto out;
720 	}
721 
722 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
723 				     form + form_specific, &hdr, buffer,
724 				     sizeof(SasDevicePage0_t), 0, 10000);
725 	if (error) {
726 		kfree(buffer, M_DEVBUF);
727 		goto out;
728 	}
729 
730 	device_info->dev_handle = le16toh(buffer->DevHandle);
731 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
732 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
733 	device_info->slot = le16toh(buffer->Slot);
734 	device_info->phy_num = buffer->PhyNum;
735 	device_info->physical_port = buffer->PhysicalPort;
736 	device_info->target_id = buffer->TargetID;
737 	device_info->bus = buffer->Bus;
738 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
739 	device_info->sas_address = le64toh(sas_address);
740 	device_info->device_info = le32toh(buffer->DeviceInfo);
741 
742 	kfree(buffer, M_DEVBUF);
743 out:
744 	return (error);
745 }
746 
747 /*
748  * Read SAS configuration information. Nothing to do yet.
749  */
750 static int
751 mpt_read_config_info_sas(struct mpt_softc *mpt)
752 {
753 	struct mptsas_portinfo *portinfo;
754 	struct mptsas_phyinfo *phyinfo;
755 	int error, i;
756 
757 	portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
758 	if (portinfo == NULL)
759 		return (ENOMEM);
760 
761 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
762 	if (error) {
763 		kfree(portinfo, M_DEVBUF);
764 		return (0);
765 	}
766 
767 	for (i = 0; i < portinfo->num_phys; i++) {
768 		phyinfo = &portinfo->phy_info[i];
769 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
770 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
771 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
772 		if (error)
773 			break;
774 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
775 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
776 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
777 		    phyinfo->handle);
778 		if (error)
779 			break;
780 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
781 		if (phyinfo->attached.dev_handle)
782 			error = mptsas_sas_device_pg0(mpt,
783 			    &phyinfo->attached,
784 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
785 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
786 			    phyinfo->attached.dev_handle);
787 		if (error)
788 			break;
789 	}
790 	mpt->sas_portinfo = portinfo;
791 	return (0);
792 }
793 
794 static void
795 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
796 	int enabled)
797 {
798 	SataPassthroughRequest_t	*pass;
799 	request_t *req;
800 	int error, status;
801 
802 	req = mpt_get_request(mpt, 0);
803 	if (req == NULL)
804 		return;
805 
806 	pass = req->req_vbuf;
807 	bzero(pass, sizeof(SataPassthroughRequest_t));
808 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
809 	pass->TargetID = devinfo->target_id;
810 	pass->Bus = devinfo->bus;
811 	pass->PassthroughFlags = 0;
812 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
813 	pass->DataLength = 0;
814 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
815 	pass->CommandFIS[0] = 0x27;
816 	pass->CommandFIS[1] = 0x80;
817 	pass->CommandFIS[2] = 0xef;
818 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
819 	pass->CommandFIS[7] = 0x40;
820 	pass->CommandFIS[15] = 0x08;
821 
822 	mpt_check_doorbell(mpt);
823 	mpt_send_cmd(mpt, req);
824 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
825 			     10 * 1000);
826 	if (error) {
827 		mpt_free_request(mpt, req);
828 		kprintf("error %d sending passthrough\n", error);
829 		return;
830 	}
831 
832 	status = le16toh(req->IOCStatus);
833 	if (status != MPI_IOCSTATUS_SUCCESS) {
834 		mpt_free_request(mpt, req);
835 		kprintf("IOCSTATUS %d\n", status);
836 		return;
837 	}
838 
839 	mpt_free_request(mpt, req);
840 }
841 
842 /*
843  * Set SAS configuration information. Nothing to do yet.
844  */
845 static int
846 mpt_set_initial_config_sas(struct mpt_softc *mpt)
847 {
848 	struct mptsas_phyinfo *phyinfo;
849 	int i;
850 
851 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
852 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
853 			phyinfo = &mpt->sas_portinfo->phy_info[i];
854 			if (phyinfo->attached.dev_handle == 0)
855 				continue;
856 			if ((phyinfo->attached.device_info &
857 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
858 				continue;
859 			if (bootverbose)
860 				device_printf(mpt->dev,
861 				    "%sabling SATA WC on phy %d\n",
862 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
863 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
864 					   mpt_enable_sata_wc);
865 		}
866 	}
867 
868 	return (0);
869 }
870 
871 static int
872 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
873  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
874 {
875 
876 	if (req != NULL) {
877 		if (reply_frame != NULL) {
878 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
879 		}
880 		req->state &= ~REQ_STATE_QUEUED;
881 		req->state |= REQ_STATE_DONE;
882 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
883 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
884 			wakeup(req);
885 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
886 			/*
887 			 * Whew- we can free this request (late completion)
888 			 */
889 			mpt_free_request(mpt, req);
890 		}
891 	}
892 
893 	return (TRUE);
894 }
895 
896 /*
897  * Read SCSI configuration information
898  */
899 static int
900 mpt_read_config_info_spi(struct mpt_softc *mpt)
901 {
902 	int rv, i;
903 
904 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
905 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
906 	if (rv) {
907 		return (-1);
908 	}
909 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
910 	    mpt->mpt_port_page0.Header.PageVersion,
911 	    mpt->mpt_port_page0.Header.PageLength,
912 	    mpt->mpt_port_page0.Header.PageNumber,
913 	    mpt->mpt_port_page0.Header.PageType);
914 
915 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
916 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
917 	if (rv) {
918 		return (-1);
919 	}
920 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
921 	    mpt->mpt_port_page1.Header.PageVersion,
922 	    mpt->mpt_port_page1.Header.PageLength,
923 	    mpt->mpt_port_page1.Header.PageNumber,
924 	    mpt->mpt_port_page1.Header.PageType);
925 
926 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
927 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
928 	if (rv) {
929 		return (-1);
930 	}
931 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
932 	    mpt->mpt_port_page2.Header.PageVersion,
933 	    mpt->mpt_port_page2.Header.PageLength,
934 	    mpt->mpt_port_page2.Header.PageNumber,
935 	    mpt->mpt_port_page2.Header.PageType);
936 
937 	for (i = 0; i < 16; i++) {
938 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
939 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
940 		if (rv) {
941 			return (-1);
942 		}
943 		mpt_lprt(mpt, MPT_PRT_DEBUG,
944 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
945 		    mpt->mpt_dev_page0[i].Header.PageVersion,
946 		    mpt->mpt_dev_page0[i].Header.PageLength,
947 		    mpt->mpt_dev_page0[i].Header.PageNumber,
948 		    mpt->mpt_dev_page0[i].Header.PageType);
949 
950 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
951 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
952 		if (rv) {
953 			return (-1);
954 		}
955 		mpt_lprt(mpt, MPT_PRT_DEBUG,
956 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
957 		    mpt->mpt_dev_page1[i].Header.PageVersion,
958 		    mpt->mpt_dev_page1[i].Header.PageLength,
959 		    mpt->mpt_dev_page1[i].Header.PageNumber,
960 		    mpt->mpt_dev_page1[i].Header.PageType);
961 	}
962 
963 	/*
964 	 * At this point, we don't *have* to fail. As long as we have
965 	 * valid config header information, we can (barely) lurch
966 	 * along.
967 	 */
968 
969 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
970 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
971 	if (rv) {
972 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
973 	} else {
974 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
975 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
976 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
977 		    mpt->mpt_port_page0.Capabilities,
978 		    mpt->mpt_port_page0.PhysicalInterface);
979 	}
980 
981 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
982 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
983 	if (rv) {
984 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
985 	} else {
986 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
987 		mpt_lprt(mpt, MPT_PRT_DEBUG,
988 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
989 		    mpt->mpt_port_page1.Configuration,
990 		    mpt->mpt_port_page1.OnBusTimerValue);
991 	}
992 
993 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
994 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
995 	if (rv) {
996 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
997 	} else {
998 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
999 		    "Port Page 2: Flags %x Settings %x\n",
1000 		    mpt->mpt_port_page2.PortFlags,
1001 		    mpt->mpt_port_page2.PortSettings);
1002 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1003 		for (i = 0; i < 16; i++) {
1004 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1005 			    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1006 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1007 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1008 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1009 		}
1010 	}
1011 
1012 	for (i = 0; i < 16; i++) {
1013 		rv = mpt_read_cur_cfg_page(mpt, i,
1014 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1015 		    FALSE, 5000);
1016 		if (rv) {
1017 			mpt_prt(mpt,
1018 			    "cannot read SPI Target %d Device Page 0\n", i);
1019 			continue;
1020 		}
1021 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1022 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1023 		    "target %d page 0: Negotiated Params %x Information %x\n",
1024 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1025 		    mpt->mpt_dev_page0[i].Information);
1026 
1027 		rv = mpt_read_cur_cfg_page(mpt, i,
1028 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1029 		    FALSE, 5000);
1030 		if (rv) {
1031 			mpt_prt(mpt,
1032 			    "cannot read SPI Target %d Device Page 1\n", i);
1033 			continue;
1034 		}
1035 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1036 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1037 		    "target %d page 1: Requested Params %x Configuration %x\n",
1038 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1039 		    mpt->mpt_dev_page1[i].Configuration);
1040 	}
1041 	return (0);
1042 }
1043 
1044 /*
1045  * Validate SPI configuration information.
1046  *
1047  * In particular, validate SPI Port Page 1.
1048  */
1049 static int
1050 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1051 {
1052 	int error, i, pp1val;
1053 
1054 	mpt->mpt_disc_enable = 0xff;
1055 	mpt->mpt_tag_enable = 0;
1056 
1057 	pp1val = ((1 << mpt->mpt_ini_id) <<
1058 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1059 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1060 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1061 
1062 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1063 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1064 		tmp = mpt->mpt_port_page1;
1065 		tmp.Configuration = pp1val;
1066 		host2mpt_config_page_scsi_port_1(&tmp);
1067 		error = mpt_write_cur_cfg_page(mpt, 0,
1068 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1069 		if (error) {
1070 			return (-1);
1071 		}
1072 		error = mpt_read_cur_cfg_page(mpt, 0,
1073 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1074 		if (error) {
1075 			return (-1);
1076 		}
1077 		mpt2host_config_page_scsi_port_1(&tmp);
1078 		if (tmp.Configuration != pp1val) {
1079 			mpt_prt(mpt,
1080 			    "failed to reset SPI Port Page 1 Config value\n");
1081 			return (-1);
1082 		}
1083 		mpt->mpt_port_page1 = tmp;
1084 	}
1085 
1086 	/*
1087 	 * The purpose of this exercise is to get
1088 	 * all targets back to async/narrow.
1089 	 *
1090 	 * We skip this step if the BIOS has already negotiated
1091 	 * speeds with the targets.
1092 	 */
1093 	i = mpt->mpt_port_page2.PortSettings &
1094 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1095 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1096 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1097 		    "honoring BIOS transfer negotiations\n");
1098 	} else {
1099 		for (i = 0; i < 16; i++) {
1100 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1101 			mpt->mpt_dev_page1[i].Configuration = 0;
1102 			(void) mpt_update_spi_config(mpt, i);
1103 		}
1104 	}
1105 	return (0);
1106 }
1107 
1108 static int
1109 mpt_cam_enable(struct mpt_softc *mpt)
1110 {
1111 	int error;
1112 
1113 	MPT_LOCK(mpt);
1114 
1115 	error = EIO;
1116 	if (mpt->is_fc) {
1117 		if (mpt_read_config_info_fc(mpt)) {
1118 			goto out;
1119 		}
1120 		if (mpt_set_initial_config_fc(mpt)) {
1121 			goto out;
1122 		}
1123 	} else if (mpt->is_sas) {
1124 		if (mpt_read_config_info_sas(mpt)) {
1125 			goto out;
1126 		}
1127 		if (mpt_set_initial_config_sas(mpt)) {
1128 			goto out;
1129 		}
1130 	} else if (mpt->is_spi) {
1131 		if (mpt_read_config_info_spi(mpt)) {
1132 			goto out;
1133 		}
1134 		if (mpt_set_initial_config_spi(mpt)) {
1135 			goto out;
1136 		}
1137 	}
1138 	error = 0;
1139 
1140 out:
1141 	MPT_UNLOCK(mpt);
1142 	return (error);
1143 }
1144 
1145 static void
1146 mpt_cam_ready(struct mpt_softc *mpt)
1147 {
1148 
1149 	/*
1150 	 * If we're in target mode, hang out resources now
1151 	 * so we don't cause the world to hang talking to us.
1152 	 */
1153 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1154 		/*
1155 		 * Try to add some target command resources
1156 		 */
1157 		MPT_LOCK(mpt);
1158 		if (mpt_add_target_commands(mpt) == FALSE) {
1159 			mpt_prt(mpt, "failed to add target commands\n");
1160 		}
1161 		MPT_UNLOCK(mpt);
1162 	}
1163 	mpt->ready = 1;
1164 }
1165 
1166 static void
1167 mpt_cam_detach(struct mpt_softc *mpt)
1168 {
1169 	mpt_handler_t handler;
1170 
1171 	MPT_LOCK(mpt);
1172 	mpt->ready = 0;
1173 	mpt_terminate_recovery_thread(mpt);
1174 
1175 	handler.reply_handler = mpt_scsi_reply_handler;
1176 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1177 			       scsi_io_handler_id);
1178 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1179 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1180 			       scsi_tmf_handler_id);
1181 	handler.reply_handler = mpt_fc_els_reply_handler;
1182 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1183 			       fc_els_handler_id);
1184 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1185 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1186 			       mpt->scsi_tgt_handler_id);
1187 	handler.reply_handler = mpt_sata_pass_reply_handler;
1188 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1189 			       sata_pass_handler_id);
1190 
1191 	if (mpt->tmf_req != NULL) {
1192 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1193 		mpt_free_request(mpt, mpt->tmf_req);
1194 		mpt->tmf_req = NULL;
1195 	}
1196 	if (mpt->sas_portinfo != NULL) {
1197 		kfree(mpt->sas_portinfo, M_DEVBUF);
1198 		mpt->sas_portinfo = NULL;
1199 	}
1200 
1201 	if (mpt->sim != NULL) {
1202 		xpt_free_path(mpt->path);
1203 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1204 		cam_sim_free(mpt->sim);
1205 		mpt->sim = NULL;
1206 	}
1207 
1208 	if (mpt->phydisk_sim != NULL) {
1209 		xpt_free_path(mpt->phydisk_path);
1210 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1211 		cam_sim_free(mpt->phydisk_sim);
1212 		mpt->phydisk_sim = NULL;
1213 	}
1214 	MPT_UNLOCK(mpt);
1215 }
1216 
1217 /* This routine is used after a system crash to dump core onto the swap device.
1218  */
1219 static void
1220 mpt_poll(struct cam_sim *sim)
1221 {
1222 	struct mpt_softc *mpt;
1223 
1224 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1225 	mpt_intr(mpt);
1226 }
1227 
1228 /*
1229  * Watchdog timeout routine for SCSI requests.
1230  */
1231 static void
1232 mpt_timeout(void *arg)
1233 {
1234 	union ccb	 *ccb;
1235 	struct mpt_softc *mpt;
1236 	request_t	 *req;
1237 
1238 	ccb = (union ccb *)arg;
1239 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1240 
1241 	MPT_LOCK(mpt);
1242 	req = ccb->ccb_h.ccb_req_ptr;
1243 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1244 	    req->serno, ccb, req->ccb);
1245 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1246 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1247 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1248 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1249 		req->state |= REQ_STATE_TIMEDOUT;
1250 		mpt_wakeup_recovery_thread(mpt);
1251 	}
1252 	MPT_UNLOCK(mpt);
1253 }
1254 
1255 /*
1256  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1257  *
1258  * Takes a list of physical segments and builds the SGL for SCSI IO command
1259  * and forwards the commard to the IOC after one last check that CAM has not
1260  * aborted the transaction.
1261  */
1262 static void
1263 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1264 {
1265 	request_t *req, *trq;
1266 	char *mpt_off;
1267 	union ccb *ccb;
1268 	struct mpt_softc *mpt;
1269 	bus_addr_t chain_list_addr;
1270 	int first_lim, seg, this_seg_lim;
1271 	uint32_t addr, cur_off, flags, nxt_off, tf;
1272 	void *sglp = NULL;
1273 	MSG_REQUEST_HEADER *hdrp;
1274 	SGE_SIMPLE64 *se;
1275 	SGE_CHAIN64 *ce;
1276 	int istgt = 0;
1277 
1278 	req = (request_t *)arg;
1279 	ccb = req->ccb;
1280 
1281 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1282 	req = ccb->ccb_h.ccb_req_ptr;
1283 
1284 	hdrp = req->req_vbuf;
1285 	mpt_off = req->req_vbuf;
1286 
1287 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1288 		error = EFBIG;
1289 	}
1290 
1291 	if (error == 0) {
1292 		switch (hdrp->Function) {
1293 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1294 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1295 			istgt = 0;
1296 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1297 			break;
1298 		case MPI_FUNCTION_TARGET_ASSIST:
1299 			istgt = 1;
1300 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1301 			break;
1302 		default:
1303 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1304 			    hdrp->Function);
1305 			error = EINVAL;
1306 			break;
1307 		}
1308 	}
1309 
1310 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1311 		error = EFBIG;
1312 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1313 		    nseg, mpt->max_seg_cnt);
1314 	}
1315 
1316 bad:
1317 	if (error != 0) {
1318 		if (error != EFBIG && error != ENOMEM) {
1319 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1320 		}
1321 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1322 			cam_status status;
1323 			mpt_freeze_ccb(ccb);
1324 			if (error == EFBIG) {
1325 				status = CAM_REQ_TOO_BIG;
1326 			} else if (error == ENOMEM) {
1327 				if (mpt->outofbeer == 0) {
1328 					mpt->outofbeer = 1;
1329 					xpt_freeze_simq(mpt->sim, 1);
1330 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1331 					    "FREEZEQ\n");
1332 				}
1333 				status = CAM_REQUEUE_REQ;
1334 			} else {
1335 				status = CAM_REQ_CMP_ERR;
1336 			}
1337 			mpt_set_ccb_status(ccb, status);
1338 		}
1339 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1340 			request_t *cmd_req =
1341 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1342 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1343 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1344 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1345 		}
1346 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1347 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1348 		xpt_done(ccb);
1349 		mpt_free_request(mpt, req);
1350 		return;
1351 	}
1352 
1353 	/*
1354 	 * No data to transfer?
1355 	 * Just make a single simple SGL with zero length.
1356 	 */
1357 
1358 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1359 		int tidx = ((char *)sglp) - mpt_off;
1360 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1361 	}
1362 
1363 	if (nseg == 0) {
1364 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1365 		MPI_pSGE_SET_FLAGS(se1,
1366 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1367 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1368 		se1->FlagsLength = htole32(se1->FlagsLength);
1369 		goto out;
1370 	}
1371 
1372 
1373 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1374 	if (istgt == 0) {
1375 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1376 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1377 		}
1378 	} else {
1379 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1380 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1381 		}
1382 	}
1383 
1384 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1385 		bus_dmasync_op_t op;
1386 		if (istgt == 0) {
1387 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1388 				op = BUS_DMASYNC_PREREAD;
1389 			} else {
1390 				op = BUS_DMASYNC_PREWRITE;
1391 			}
1392 		} else {
1393 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1394 				op = BUS_DMASYNC_PREWRITE;
1395 			} else {
1396 				op = BUS_DMASYNC_PREREAD;
1397 			}
1398 		}
1399 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1400 	}
1401 
1402 	/*
1403 	 * Okay, fill in what we can at the end of the command frame.
1404 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1405 	 * the command frame.
1406 	 *
1407 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1408 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1409 	 * that.
1410 	 */
1411 
1412 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1413 		first_lim = nseg;
1414 	} else {
1415 		/*
1416 		 * Leave room for CHAIN element
1417 		 */
1418 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1419 	}
1420 
1421 	se = (SGE_SIMPLE64 *) sglp;
1422 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1423 		tf = flags;
1424 		memset(se, 0, sizeof (*se));
1425 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1426 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1427 		if (sizeof(bus_addr_t) > 4) {
1428 			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1429 			/* SAS1078 36GB limitation WAR */
1430 			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1431 			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1432 				addr |= (1 << 31);
1433 				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1434 			}
1435 			se->Address.High = htole32(addr);
1436 		}
1437 		if (seg == first_lim - 1) {
1438 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1439 		}
1440 		if (seg == nseg - 1) {
1441 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1442 				MPI_SGE_FLAGS_END_OF_BUFFER;
1443 		}
1444 		MPI_pSGE_SET_FLAGS(se, tf);
1445 		se->FlagsLength = htole32(se->FlagsLength);
1446 	}
1447 
1448 	if (seg == nseg) {
1449 		goto out;
1450 	}
1451 
1452 	/*
1453 	 * Tell the IOC where to find the first chain element.
1454 	 */
1455 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1456 	nxt_off = MPT_RQSL(mpt);
1457 	trq = req;
1458 
1459 	/*
1460 	 * Make up the rest of the data segments out of a chain element
1461 	 * (contained in the current request frame) which points to
1462 	 * SIMPLE64 elements in the next request frame, possibly ending
1463 	 * with *another* chain element (if there's more).
1464 	 */
1465 	while (seg < nseg) {
1466 		/*
1467 		 * Point to the chain descriptor. Note that the chain
1468 		 * descriptor is at the end of the *previous* list (whether
1469 		 * chain or simple).
1470 		 */
1471 		ce = (SGE_CHAIN64 *) se;
1472 
1473 		/*
1474 		 * Before we change our current pointer, make  sure we won't
1475 		 * overflow the request area with this frame. Note that we
1476 		 * test against 'greater than' here as it's okay in this case
1477 		 * to have next offset be just outside the request area.
1478 		 */
1479 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1480 			nxt_off = MPT_REQUEST_AREA;
1481 			goto next_chain;
1482 		}
1483 
1484 		/*
1485 		 * Set our SGE element pointer to the beginning of the chain
1486 		 * list and update our next chain list offset.
1487 		 */
1488 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1489 		cur_off = nxt_off;
1490 		nxt_off += MPT_RQSL(mpt);
1491 
1492 		/*
1493 		 * Now initialize the chain descriptor.
1494 		 */
1495 		memset(ce, 0, sizeof (*ce));
1496 
1497 		/*
1498 		 * Get the physical address of the chain list.
1499 		 */
1500 		chain_list_addr = trq->req_pbuf;
1501 		chain_list_addr += cur_off;
1502 		if (sizeof (bus_addr_t) > 4) {
1503 			ce->Address.High =
1504 			    htole32(((uint64_t)chain_list_addr) >> 32);
1505 		}
1506 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1507 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1508 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1509 
1510 		/*
1511 		 * If we have more than a frame's worth of segments left,
1512 		 * set up the chain list to have the last element be another
1513 		 * chain descriptor.
1514 		 */
1515 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1516 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1517 			/*
1518 			 * The length of the chain is the length in bytes of the
1519 			 * number of segments plus the next chain element.
1520 			 *
1521 			 * The next chain descriptor offset is the length,
1522 			 * in words, of the number of segments.
1523 			 */
1524 			ce->Length = (this_seg_lim - seg) *
1525 			    sizeof (SGE_SIMPLE64);
1526 			ce->NextChainOffset = ce->Length >> 2;
1527 			ce->Length += sizeof (SGE_CHAIN64);
1528 		} else {
1529 			this_seg_lim = nseg;
1530 			ce->Length = (this_seg_lim - seg) *
1531 			    sizeof (SGE_SIMPLE64);
1532 		}
1533 		ce->Length = htole16(ce->Length);
1534 
1535 		/*
1536 		 * Fill in the chain list SGE elements with our segment data.
1537 		 *
1538 		 * If we're the last element in this chain list, set the last
1539 		 * element flag. If we're the completely last element period,
1540 		 * set the end of list and end of buffer flags.
1541 		 */
1542 		while (seg < this_seg_lim) {
1543 			tf = flags;
1544 			memset(se, 0, sizeof (*se));
1545 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1546 			se->Address.Low = htole32(dm_segs->ds_addr &
1547 			    0xffffffff);
1548 			if (sizeof (bus_addr_t) > 4) {
1549 				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1550 				/* SAS1078 36GB limitation WAR */
1551 				if (mpt->is_1078 &&
1552 				    (((uint64_t)dm_segs->ds_addr +
1553 				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1554 				    32) == 9) {
1555 					addr |= (1 << 31);
1556 					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1557 				}
1558 				se->Address.High = htole32(addr);
1559 			}
1560 			if (seg == this_seg_lim - 1) {
1561 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1562 			}
1563 			if (seg == nseg - 1) {
1564 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1565 					MPI_SGE_FLAGS_END_OF_BUFFER;
1566 			}
1567 			MPI_pSGE_SET_FLAGS(se, tf);
1568 			se->FlagsLength = htole32(se->FlagsLength);
1569 			se++;
1570 			seg++;
1571 			dm_segs++;
1572 		}
1573 
1574     next_chain:
1575 		/*
1576 		 * If we have more segments to do and we've used up all of
1577 		 * the space in a request area, go allocate another one
1578 		 * and chain to that.
1579 		 */
1580 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1581 			request_t *nrq;
1582 
1583 			nrq = mpt_get_request(mpt, FALSE);
1584 
1585 			if (nrq == NULL) {
1586 				error = ENOMEM;
1587 				goto bad;
1588 			}
1589 
1590 			/*
1591 			 * Append the new request area on the tail of our list.
1592 			 */
1593 			if ((trq = req->chain) == NULL) {
1594 				req->chain = nrq;
1595 			} else {
1596 				while (trq->chain != NULL) {
1597 					trq = trq->chain;
1598 				}
1599 				trq->chain = nrq;
1600 			}
1601 			trq = nrq;
1602 			mpt_off = trq->req_vbuf;
1603 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1604 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1605 			}
1606 			nxt_off = 0;
1607 		}
1608 	}
1609 out:
1610 
1611 	/*
1612 	 * Last time we need to check if this CCB needs to be aborted.
1613 	 */
1614 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1615 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1616 			request_t *cmd_req =
1617 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1618 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1619 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1620 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1621 		}
1622 		mpt_prt(mpt,
1623 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1624 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1625 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1626 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1627 		}
1628 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1629 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1630 		xpt_done(ccb);
1631 		mpt_free_request(mpt, req);
1632 		return;
1633 	}
1634 
1635 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1636 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1637 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1638 		    mpt_timeout, ccb);
1639 	}
1640 	if (mpt->verbose > MPT_PRT_DEBUG) {
1641 		int nc = 0;
1642 		mpt_print_request(req->req_vbuf);
1643 		for (trq = req->chain; trq; trq = trq->chain) {
1644 			kprintf("  Additional Chain Area %d\n", nc++);
1645 			mpt_dump_sgl(trq->req_vbuf, 0);
1646 		}
1647 	}
1648 
1649 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1650 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1651 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1652 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1653 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1654 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1655 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1656 		} else {
1657 			tgt->state = TGT_STATE_MOVING_DATA;
1658 		}
1659 #else
1660 		tgt->state = TGT_STATE_MOVING_DATA;
1661 #endif
1662 	}
1663 	mpt_send_cmd(mpt, req);
1664 }
1665 
1666 static void
1667 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1668 {
1669 	request_t *req, *trq;
1670 	char *mpt_off;
1671 	union ccb *ccb;
1672 	struct mpt_softc *mpt;
1673 	int seg, first_lim;
1674 	uint32_t flags, nxt_off;
1675 	void *sglp = NULL;
1676 	MSG_REQUEST_HEADER *hdrp;
1677 	SGE_SIMPLE32 *se;
1678 	SGE_CHAIN32 *ce;
1679 	int istgt = 0;
1680 
1681 	req = (request_t *)arg;
1682 	ccb = req->ccb;
1683 
1684 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1685 	req = ccb->ccb_h.ccb_req_ptr;
1686 
1687 	hdrp = req->req_vbuf;
1688 	mpt_off = req->req_vbuf;
1689 
1690 
1691 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1692 		error = EFBIG;
1693 	}
1694 
1695 	if (error == 0) {
1696 		switch (hdrp->Function) {
1697 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1698 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1699 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1700 			break;
1701 		case MPI_FUNCTION_TARGET_ASSIST:
1702 			istgt = 1;
1703 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1704 			break;
1705 		default:
1706 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1707 			    hdrp->Function);
1708 			error = EINVAL;
1709 			break;
1710 		}
1711 	}
1712 
1713 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1714 		error = EFBIG;
1715 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1716 		    nseg, mpt->max_seg_cnt);
1717 	}
1718 
1719 bad:
1720 	if (error != 0) {
1721 		if (error != EFBIG && error != ENOMEM) {
1722 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1723 		}
1724 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1725 			cam_status status;
1726 			mpt_freeze_ccb(ccb);
1727 			if (error == EFBIG) {
1728 				status = CAM_REQ_TOO_BIG;
1729 			} else if (error == ENOMEM) {
1730 				if (mpt->outofbeer == 0) {
1731 					mpt->outofbeer = 1;
1732 					xpt_freeze_simq(mpt->sim, 1);
1733 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1734 					    "FREEZEQ\n");
1735 				}
1736 				status = CAM_REQUEUE_REQ;
1737 			} else {
1738 				status = CAM_REQ_CMP_ERR;
1739 			}
1740 			mpt_set_ccb_status(ccb, status);
1741 		}
1742 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1743 			request_t *cmd_req =
1744 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1745 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1746 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1747 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1748 		}
1749 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1750 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1751 		xpt_done(ccb);
1752 		mpt_free_request(mpt, req);
1753 		return;
1754 	}
1755 
1756 	/*
1757 	 * No data to transfer?
1758 	 * Just make a single simple SGL with zero length.
1759 	 */
1760 
1761 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1762 		int tidx = ((char *)sglp) - mpt_off;
1763 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1764 	}
1765 
1766 	if (nseg == 0) {
1767 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1768 		MPI_pSGE_SET_FLAGS(se1,
1769 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1770 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1771 		se1->FlagsLength = htole32(se1->FlagsLength);
1772 		goto out;
1773 	}
1774 
1775 
1776 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1777 	if (istgt == 0) {
1778 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1779 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1780 		}
1781 	} else {
1782 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1783 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1784 		}
1785 	}
1786 
1787 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1788 		bus_dmasync_op_t op;
1789 		if (istgt) {
1790 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1791 				op = BUS_DMASYNC_PREREAD;
1792 			} else {
1793 				op = BUS_DMASYNC_PREWRITE;
1794 			}
1795 		} else {
1796 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1797 				op = BUS_DMASYNC_PREWRITE;
1798 			} else {
1799 				op = BUS_DMASYNC_PREREAD;
1800 			}
1801 		}
1802 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1803 	}
1804 
1805 	/*
1806 	 * Okay, fill in what we can at the end of the command frame.
1807 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1808 	 * the command frame.
1809 	 *
1810 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1811 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1812 	 * that.
1813 	 */
1814 
1815 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1816 		first_lim = nseg;
1817 	} else {
1818 		/*
1819 		 * Leave room for CHAIN element
1820 		 */
1821 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1822 	}
1823 
1824 	se = (SGE_SIMPLE32 *) sglp;
1825 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1826 		uint32_t tf;
1827 
1828 		memset(se, 0,sizeof (*se));
1829 		se->Address = htole32(dm_segs->ds_addr);
1830 
1831 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1832 		tf = flags;
1833 		if (seg == first_lim - 1) {
1834 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1835 		}
1836 		if (seg == nseg - 1) {
1837 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1838 				MPI_SGE_FLAGS_END_OF_BUFFER;
1839 		}
1840 		MPI_pSGE_SET_FLAGS(se, tf);
1841 		se->FlagsLength = htole32(se->FlagsLength);
1842 	}
1843 
1844 	if (seg == nseg) {
1845 		goto out;
1846 	}
1847 
1848 	/*
1849 	 * Tell the IOC where to find the first chain element.
1850 	 */
1851 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1852 	nxt_off = MPT_RQSL(mpt);
1853 	trq = req;
1854 
1855 	/*
1856 	 * Make up the rest of the data segments out of a chain element
1857 	 * (contained in the current request frame) which points to
1858 	 * SIMPLE32 elements in the next request frame, possibly ending
1859 	 * with *another* chain element (if there's more).
1860 	 */
1861 	while (seg < nseg) {
1862 		int this_seg_lim;
1863 		uint32_t tf, cur_off;
1864 		bus_addr_t chain_list_addr;
1865 
1866 		/*
1867 		 * Point to the chain descriptor. Note that the chain
1868 		 * descriptor is at the end of the *previous* list (whether
1869 		 * chain or simple).
1870 		 */
1871 		ce = (SGE_CHAIN32 *) se;
1872 
1873 		/*
1874 		 * Before we change our current pointer, make  sure we won't
1875 		 * overflow the request area with this frame. Note that we
1876 		 * test against 'greater than' here as it's okay in this case
1877 		 * to have next offset be just outside the request area.
1878 		 */
1879 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1880 			nxt_off = MPT_REQUEST_AREA;
1881 			goto next_chain;
1882 		}
1883 
1884 		/*
1885 		 * Set our SGE element pointer to the beginning of the chain
1886 		 * list and update our next chain list offset.
1887 		 */
1888 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1889 		cur_off = nxt_off;
1890 		nxt_off += MPT_RQSL(mpt);
1891 
1892 		/*
1893 		 * Now initialize the chain descriptor.
1894 		 */
1895 		memset(ce, 0, sizeof (*ce));
1896 
1897 		/*
1898 		 * Get the physical address of the chain list.
1899 		 */
1900 		chain_list_addr = trq->req_pbuf;
1901 		chain_list_addr += cur_off;
1902 
1903 
1904 
1905 		ce->Address = htole32(chain_list_addr);
1906 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1907 
1908 
1909 		/*
1910 		 * If we have more than a frame's worth of segments left,
1911 		 * set up the chain list to have the last element be another
1912 		 * chain descriptor.
1913 		 */
1914 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1915 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1916 			/*
1917 			 * The length of the chain is the length in bytes of the
1918 			 * number of segments plus the next chain element.
1919 			 *
1920 			 * The next chain descriptor offset is the length,
1921 			 * in words, of the number of segments.
1922 			 */
1923 			ce->Length = (this_seg_lim - seg) *
1924 			    sizeof (SGE_SIMPLE32);
1925 			ce->NextChainOffset = ce->Length >> 2;
1926 			ce->Length += sizeof (SGE_CHAIN32);
1927 		} else {
1928 			this_seg_lim = nseg;
1929 			ce->Length = (this_seg_lim - seg) *
1930 			    sizeof (SGE_SIMPLE32);
1931 		}
1932 		ce->Length = htole16(ce->Length);
1933 
1934 		/*
1935 		 * Fill in the chain list SGE elements with our segment data.
1936 		 *
1937 		 * If we're the last element in this chain list, set the last
1938 		 * element flag. If we're the completely last element period,
1939 		 * set the end of list and end of buffer flags.
1940 		 */
1941 		while (seg < this_seg_lim) {
1942 			memset(se, 0, sizeof (*se));
1943 			se->Address = htole32(dm_segs->ds_addr);
1944 
1945 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1946 			tf = flags;
1947 			if (seg == this_seg_lim - 1) {
1948 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1949 			}
1950 			if (seg == nseg - 1) {
1951 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1952 					MPI_SGE_FLAGS_END_OF_BUFFER;
1953 			}
1954 			MPI_pSGE_SET_FLAGS(se, tf);
1955 			se->FlagsLength = htole32(se->FlagsLength);
1956 			se++;
1957 			seg++;
1958 			dm_segs++;
1959 		}
1960 
1961     next_chain:
1962 		/*
1963 		 * If we have more segments to do and we've used up all of
1964 		 * the space in a request area, go allocate another one
1965 		 * and chain to that.
1966 		 */
1967 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1968 			request_t *nrq;
1969 
1970 			nrq = mpt_get_request(mpt, FALSE);
1971 
1972 			if (nrq == NULL) {
1973 				error = ENOMEM;
1974 				goto bad;
1975 			}
1976 
1977 			/*
1978 			 * Append the new request area on the tail of our list.
1979 			 */
1980 			if ((trq = req->chain) == NULL) {
1981 				req->chain = nrq;
1982 			} else {
1983 				while (trq->chain != NULL) {
1984 					trq = trq->chain;
1985 				}
1986 				trq->chain = nrq;
1987 			}
1988 			trq = nrq;
1989 			mpt_off = trq->req_vbuf;
1990 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1991 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1992 			}
1993 			nxt_off = 0;
1994 		}
1995 	}
1996 out:
1997 
1998 	/*
1999 	 * Last time we need to check if this CCB needs to be aborted.
2000 	 */
2001 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2002 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2003 			request_t *cmd_req =
2004 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2005 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2006 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2007 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2008 		}
2009 		mpt_prt(mpt,
2010 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2011 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2012 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2013 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2014 		}
2015 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2016 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2017 		xpt_done(ccb);
2018 		mpt_free_request(mpt, req);
2019 		return;
2020 	}
2021 
2022 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2023 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2024 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2025 		    mpt_timeout, ccb);
2026 	}
2027 	if (mpt->verbose > MPT_PRT_DEBUG) {
2028 		int nc = 0;
2029 		mpt_print_request(req->req_vbuf);
2030 		for (trq = req->chain; trq; trq = trq->chain) {
2031 			kprintf("  Additional Chain Area %d\n", nc++);
2032 			mpt_dump_sgl(trq->req_vbuf, 0);
2033 		}
2034 	}
2035 
2036 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2037 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2038 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2039 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2040 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2041 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2042 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2043 		} else {
2044 			tgt->state = TGT_STATE_MOVING_DATA;
2045 		}
2046 #else
2047 		tgt->state = TGT_STATE_MOVING_DATA;
2048 #endif
2049 	}
2050 	mpt_send_cmd(mpt, req);
2051 }
2052 
2053 static void
2054 mpt_start(struct cam_sim *sim, union ccb *ccb)
2055 {
2056 	request_t *req;
2057 	struct mpt_softc *mpt;
2058 	MSG_SCSI_IO_REQUEST *mpt_req;
2059 	struct ccb_scsiio *csio = &ccb->csio;
2060 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2061 	bus_dmamap_callback_t *cb;
2062 	target_id_t tgt;
2063 	int raid_passthru;
2064 
2065 	/* Get the pointer for the physical addapter */
2066 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2067 	raid_passthru = (sim == mpt->phydisk_sim);
2068 
2069 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2070 		if (mpt->outofbeer == 0) {
2071 			mpt->outofbeer = 1;
2072 			xpt_freeze_simq(mpt->sim, 1);
2073 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2074 		}
2075 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2076 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2077 		xpt_done(ccb);
2078 		return;
2079 	}
2080 #ifdef	INVARIANTS
2081 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2082 #endif
2083 
2084 	if (sizeof (bus_addr_t) > 4) {
2085 		cb = mpt_execute_req_a64;
2086 	} else {
2087 		cb = mpt_execute_req;
2088 	}
2089 
2090 	/*
2091 	 * Link the ccb and the request structure so we can find
2092 	 * the other knowing either the request or the ccb
2093 	 */
2094 	req->ccb = ccb;
2095 	ccb->ccb_h.ccb_req_ptr = req;
2096 
2097 	/* Now we build the command for the IOC */
2098 	mpt_req = req->req_vbuf;
2099 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2100 
2101 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2102 	if (raid_passthru) {
2103 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2104 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2105 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2106 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2107 			xpt_done(ccb);
2108 			return;
2109 		}
2110 		mpt_req->Bus = 0;	/* we never set bus here */
2111 	} else {
2112 		tgt = ccb->ccb_h.target_id;
2113 		mpt_req->Bus = 0;	/* XXX */
2114 
2115 	}
2116 	mpt_req->SenseBufferLength =
2117 		(csio->sense_len < MPT_SENSE_SIZE) ?
2118 		 csio->sense_len : MPT_SENSE_SIZE;
2119 
2120 	/*
2121 	 * We use the message context to find the request structure when we
2122 	 * Get the command completion interrupt from the IOC.
2123 	 */
2124 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2125 
2126 	/* Which physical device to do the I/O on */
2127 	mpt_req->TargetID = tgt;
2128 
2129 	/* We assume a single level LUN type */
2130 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2131 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2132 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2133 	} else {
2134 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2135 	}
2136 
2137 	/* Set the direction of the transfer */
2138 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2139 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2140 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2141 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2142 	} else {
2143 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2144 	}
2145 
2146 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2147 		switch(ccb->csio.tag_action) {
2148 		case MSG_HEAD_OF_Q_TAG:
2149 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2150 			break;
2151 		case MSG_ACA_TASK:
2152 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2153 			break;
2154 		case MSG_ORDERED_Q_TAG:
2155 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2156 			break;
2157 		case MSG_SIMPLE_Q_TAG:
2158 		default:
2159 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2160 			break;
2161 		}
2162 	} else {
2163 		if (mpt->is_fc || mpt->is_sas) {
2164 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2165 		} else {
2166 			/* XXX No such thing for a target doing packetized. */
2167 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2168 		}
2169 	}
2170 
2171 	if (mpt->is_spi) {
2172 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2173 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2174 		}
2175 	}
2176 	mpt_req->Control = htole32(mpt_req->Control);
2177 
2178 	/* Copy the scsi command block into place */
2179 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2180 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2181 	} else {
2182 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2183 	}
2184 
2185 	mpt_req->CDBLength = csio->cdb_len;
2186 	mpt_req->DataLength = htole32(csio->dxfer_len);
2187 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2188 
2189 	/*
2190 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2191 	 */
2192 	if (mpt->verbose == MPT_PRT_DEBUG) {
2193 		U32 df;
2194 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2195 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2196 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2197 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2198 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2199 			mpt_prtc(mpt, "(%s %u byte%s ",
2200 			    (df == MPI_SCSIIO_CONTROL_READ)?
2201 			    "read" : "write",  csio->dxfer_len,
2202 			    (csio->dxfer_len == 1)? ")" : "s)");
2203 		}
2204 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2205 		    ccb->ccb_h.target_lun, req, req->serno);
2206 	}
2207 
2208 	/*
2209 	 * If we have any data to send with this command map it into bus space.
2210 	 */
2211 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2212 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2213 			/*
2214 			 * We've been given a pointer to a single buffer.
2215 			 */
2216 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2217 				/*
2218 				 * Virtual address that needs to translated into
2219 				 * one or more physical address ranges.
2220 				 */
2221 				int error;
2222 				crit_enter();
2223 				error = bus_dmamap_load(mpt->buffer_dmat,
2224 				    req->dmap, csio->data_ptr, csio->dxfer_len,
2225 				    cb, req, 0);
2226 				crit_exit();
2227 				if (error == EINPROGRESS) {
2228 					/*
2229 					 * So as to maintain ordering,
2230 					 * freeze the controller queue
2231 					 * until our mapping is
2232 					 * returned.
2233 					 */
2234 					xpt_freeze_simq(mpt->sim, 1);
2235 					ccbh->status |= CAM_RELEASE_SIMQ;
2236 				}
2237 			} else {
2238 				/*
2239 				 * We have been given a pointer to single
2240 				 * physical buffer.
2241 				 */
2242 				struct bus_dma_segment seg;
2243 				seg.ds_addr =
2244 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
2245 				seg.ds_len = csio->dxfer_len;
2246 				(*cb)(req, &seg, 1, 0);
2247 			}
2248 		} else {
2249 			/*
2250 			 * We have been given a list of addresses.
2251 			 * This case could be easily supported but they are not
2252 			 * currently generated by the CAM subsystem so there
2253 			 * is no point in wasting the time right now.
2254 			 */
2255 			struct bus_dma_segment *segs;
2256 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2257 				(*cb)(req, NULL, 0, EFAULT);
2258 			} else {
2259 				/* Just use the segments provided */
2260 				segs = (struct bus_dma_segment *)csio->data_ptr;
2261 				(*cb)(req, segs, csio->sglist_cnt, 0);
2262 			}
2263 		}
2264 	} else {
2265 		(*cb)(req, NULL, 0, 0);
2266 	}
2267 }
2268 
2269 static int
2270 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2271     int sleep_ok)
2272 {
2273 	int   error;
2274 	uint16_t status;
2275 	uint8_t response;
2276 
2277 	error = mpt_scsi_send_tmf(mpt,
2278 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2279 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2280 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2281 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2282 	    0,	/* XXX How do I get the channel ID? */
2283 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2284 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2285 	    0, sleep_ok);
2286 
2287 	if (error != 0) {
2288 		/*
2289 		 * mpt_scsi_send_tmf hard resets on failure, so no
2290 		 * need to do so here.
2291 		 */
2292 		mpt_prt(mpt,
2293 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2294 		return (EIO);
2295 	}
2296 
2297 	/* Wait for bus reset to be processed by the IOC. */
2298 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2299 	    REQ_STATE_DONE, sleep_ok, 5000);
2300 
2301 	status = le16toh(mpt->tmf_req->IOCStatus);
2302 	response = mpt->tmf_req->ResponseCode;
2303 	mpt->tmf_req->state = REQ_STATE_FREE;
2304 
2305 	if (error) {
2306 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2307 		    "Resetting controller.\n");
2308 		mpt_reset(mpt, TRUE);
2309 		return (ETIMEDOUT);
2310 	}
2311 
2312 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2313 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2314 		    "Resetting controller.\n", status);
2315 		mpt_reset(mpt, TRUE);
2316 		return (EIO);
2317 	}
2318 
2319 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2320 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2321 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2322 		    "Resetting controller.\n", response);
2323 		mpt_reset(mpt, TRUE);
2324 		return (EIO);
2325 	}
2326 	return (0);
2327 }
2328 
2329 static int
2330 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2331 {
2332 	int r = 0;
2333 	request_t *req;
2334 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2335 
2336 	req = mpt_get_request(mpt, FALSE);
2337 	if (req == NULL) {
2338 		return (ENOMEM);
2339 	}
2340 	fc = req->req_vbuf;
2341 	memset(fc, 0, sizeof(*fc));
2342 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2343 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2344 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2345 	mpt_send_cmd(mpt, req);
2346 	if (dowait) {
2347 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2348 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2349 		if (r == 0) {
2350 			mpt_free_request(mpt, req);
2351 		}
2352 	}
2353 	return (r);
2354 }
2355 
2356 static void
2357 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2358 {
2359     xpt_free_path(ccb->ccb_h.path);
2360     kfree(ccb, M_TEMP);
2361 }
2362 
2363 static int
2364 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2365 	      MSG_EVENT_NOTIFY_REPLY *msg)
2366 {
2367 	uint32_t data0, data1;
2368 
2369 	data0 = le32toh(msg->Data[0]);
2370 	data1 = le32toh(msg->Data[1]);
2371 	switch(msg->Event & 0xFF) {
2372 	case MPI_EVENT_UNIT_ATTENTION:
2373 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2374 		    (data0 >> 8) & 0xff, data0 & 0xff);
2375 		break;
2376 
2377 	case MPI_EVENT_IOC_BUS_RESET:
2378 		/* We generated a bus reset */
2379 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2380 		    (data0 >> 8) & 0xff);
2381 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2382 		break;
2383 
2384 	case MPI_EVENT_EXT_BUS_RESET:
2385 		/* Someone else generated a bus reset */
2386 		mpt_prt(mpt, "External Bus Reset Detected\n");
2387 		/*
2388 		 * These replies don't return EventData like the MPI
2389 		 * spec says they do
2390 		 */
2391 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2392 		break;
2393 
2394 	case MPI_EVENT_RESCAN:
2395 	{
2396 		union ccb *ccb;
2397 		uint32_t pathid;
2398 		/*
2399 		 * In general this means a device has been added to the loop.
2400 		 */
2401 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2402 		if (mpt->ready == 0) {
2403 			break;
2404 		}
2405 		if (mpt->phydisk_sim) {
2406 			pathid = cam_sim_path(mpt->phydisk_sim);
2407 		} else {
2408 			pathid = cam_sim_path(mpt->sim);
2409 		}
2410 		/*
2411 		 * Allocate a CCB, create a wildcard path for this bus,
2412 		 * and schedule a rescan.
2413 		 */
2414 		ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
2415 
2416 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2417 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2418 			mpt_prt(mpt, "unable to create path for rescan\n");
2419 			kfree(ccb, M_TEMP);
2420 			break;
2421 		}
2422 
2423 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
2424 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
2425 		ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2426 		ccb->crcn.flags = CAM_FLAG_NONE;
2427 		xpt_action(ccb);
2428 
2429 		/* scan is now in progress */
2430 
2431 		break;
2432 	}
2433 	case MPI_EVENT_LINK_STATUS_CHANGE:
2434 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2435 		    (data1 >> 8) & 0xff,
2436 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2437 		break;
2438 
2439 	case MPI_EVENT_LOOP_STATE_CHANGE:
2440 		switch ((data0 >> 16) & 0xff) {
2441 		case 0x01:
2442 			mpt_prt(mpt,
2443 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2444 			    "(Loop Initialization)\n",
2445 			    (data1 >> 8) & 0xff,
2446 			    (data0 >> 8) & 0xff,
2447 			    (data0     ) & 0xff);
2448 			switch ((data0 >> 8) & 0xff) {
2449 			case 0xF7:
2450 				if ((data0 & 0xff) == 0xF7) {
2451 					mpt_prt(mpt, "Device needs AL_PA\n");
2452 				} else {
2453 					mpt_prt(mpt, "Device %02x doesn't like "
2454 					    "FC performance\n",
2455 					    data0 & 0xFF);
2456 				}
2457 				break;
2458 			case 0xF8:
2459 				if ((data0 & 0xff) == 0xF7) {
2460 					mpt_prt(mpt, "Device had loop failure "
2461 					    "at its receiver prior to acquiring"
2462 					    " AL_PA\n");
2463 				} else {
2464 					mpt_prt(mpt, "Device %02x detected loop"
2465 					    " failure at its receiver\n",
2466 					    data0 & 0xFF);
2467 				}
2468 				break;
2469 			default:
2470 				mpt_prt(mpt, "Device %02x requests that device "
2471 				    "%02x reset itself\n",
2472 				    data0 & 0xFF,
2473 				    (data0 >> 8) & 0xFF);
2474 				break;
2475 			}
2476 			break;
2477 		case 0x02:
2478 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2479 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2480 			    (data1 >> 8) & 0xff, /* Port */
2481 			    (data0 >>  8) & 0xff, /* Character 3 */
2482 			    (data0      ) & 0xff  /* Character 4 */);
2483 			break;
2484 		case 0x03:
2485 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2486 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2487 			    (data1 >> 8) & 0xff, /* Port */
2488 			    (data0 >> 8) & 0xff, /* Character 3 */
2489 			    (data0     ) & 0xff  /* Character 4 */);
2490 			break;
2491 		default:
2492 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2493 			    "FC event (%02x %02x %02x)\n",
2494 			    (data1 >> 8) & 0xff, /* Port */
2495 			    (data0 >> 16) & 0xff, /* Event */
2496 			    (data0 >>  8) & 0xff, /* Character 3 */
2497 			    (data0      ) & 0xff  /* Character 4 */);
2498 		}
2499 		break;
2500 
2501 	case MPI_EVENT_LOGOUT:
2502 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2503 		    (data1 >> 8) & 0xff, data0);
2504 		break;
2505 	case MPI_EVENT_QUEUE_FULL:
2506 	{
2507 		struct cam_sim *sim;
2508 		struct cam_path *tmppath;
2509 		struct ccb_relsim crs;
2510 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2511 		lun_id_t lun_id;
2512 
2513 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2514 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2515 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2516 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2517 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2518 		    pqf->TargetID) != 0) {
2519 			sim = mpt->phydisk_sim;
2520 		} else {
2521 			sim = mpt->sim;
2522 		}
2523 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2524 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2525 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2526 				mpt_prt(mpt, "unable to create a path to send "
2527 				    "XPT_REL_SIMQ");
2528 				break;
2529 			}
2530 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2531 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2532 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2533 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2534 			crs.openings = pqf->CurrentDepth - 1;
2535 			xpt_action((union ccb *)&crs);
2536 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2537 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2538 			}
2539 			xpt_free_path(tmppath);
2540 		}
2541 		break;
2542 	}
2543 	case MPI_EVENT_IR_RESYNC_UPDATE:
2544 		mpt_prt(mpt, "IR resync update %d completed\n",
2545 		    (data0 >> 16) & 0xff);
2546 		break;
2547 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2548 	{
2549 		union ccb *ccb;
2550 		struct cam_sim *sim;
2551 		struct cam_path *tmppath;
2552 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2553 
2554 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2555 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2556 		    psdsc->TargetID) != 0)
2557 			sim = mpt->phydisk_sim;
2558 		else
2559 			sim = mpt->sim;
2560 		switch(psdsc->ReasonCode) {
2561 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2562 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
2563 			    M_WAITOK | M_ZERO);
2564 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2565 			    cam_sim_path(sim), psdsc->TargetID,
2566 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2567 				mpt_prt(mpt,
2568 				    "unable to create path for rescan\n");
2569 				kfree(ccb, M_TEMP);
2570 				break;
2571 			}
2572 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
2573 			    5/*priority (low)*/);
2574 			ccb->ccb_h.func_code = XPT_SCAN_BUS;
2575 			ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2576 			ccb->crcn.flags = CAM_FLAG_NONE;
2577 			xpt_action(ccb);
2578 			break;
2579 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2580 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2581 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2582 			    CAM_REQ_CMP) {
2583 				mpt_prt(mpt,
2584 				    "unable to create path for async event");
2585 				break;
2586 			}
2587 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2588 			xpt_free_path(tmppath);
2589 			break;
2590 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2591 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2592 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2593 			break;
2594 		default:
2595 			mpt_lprt(mpt, MPT_PRT_WARN,
2596 			    "SAS device status change: Bus: 0x%02x TargetID: "
2597 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2598 			    psdsc->TargetID, psdsc->ReasonCode);
2599 			break;
2600 		}
2601 		break;
2602 	}
2603 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2604 	{
2605 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2606 
2607 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2608 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2609 		mpt_lprt(mpt, MPT_PRT_WARN,
2610 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2611 		    pde->Port, pde->DiscoveryStatus);
2612 		break;
2613 	}
2614 	case MPI_EVENT_EVENT_CHANGE:
2615 	case MPI_EVENT_INTEGRATED_RAID:
2616 	case MPI_EVENT_IR2:
2617 	case MPI_EVENT_LOG_ENTRY_ADDED:
2618 	case MPI_EVENT_SAS_DISCOVERY:
2619 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2620 	case MPI_EVENT_SAS_SES:
2621 		break;
2622 	default:
2623 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2624 		    msg->Event & 0xFF);
2625 		return (0);
2626 	}
2627 	return (1);
2628 }
2629 
2630 /*
2631  * Reply path for all SCSI I/O requests, called from our
2632  * interrupt handler by extracting our handler index from
2633  * the MsgContext field of the reply from the IOC.
2634  *
2635  * This routine is optimized for the common case of a
2636  * completion without error.  All exception handling is
2637  * offloaded to non-inlined helper routines to minimize
2638  * cache footprint.
2639  */
2640 static int
2641 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2642     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2643 {
2644 	MSG_SCSI_IO_REQUEST *scsi_req;
2645 	union ccb *ccb;
2646 
2647 	if (req->state == REQ_STATE_FREE) {
2648 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2649 		return (TRUE);
2650 	}
2651 
2652 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2653 	ccb = req->ccb;
2654 	if (ccb == NULL) {
2655 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2656 		    req, req->serno);
2657 		return (TRUE);
2658 	}
2659 
2660 	mpt_req_untimeout(req, mpt_timeout, ccb);
2661 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2662 
2663 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2664 		bus_dmasync_op_t op;
2665 
2666 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2667 			op = BUS_DMASYNC_POSTREAD;
2668 		else
2669 			op = BUS_DMASYNC_POSTWRITE;
2670 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2671 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2672 	}
2673 
2674 	if (reply_frame == NULL) {
2675 		/*
2676 		 * Context only reply, completion without error status.
2677 		 */
2678 		ccb->csio.resid = 0;
2679 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2680 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2681 	} else {
2682 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2683 	}
2684 
2685 	if (mpt->outofbeer) {
2686 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2687 		mpt->outofbeer = 0;
2688 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2689 	}
2690 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2691 		struct scsi_inquiry_data *iq =
2692 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2693 		if (scsi_req->Function ==
2694 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2695 			/*
2696 			 * Fake out the device type so that only the
2697 			 * pass-thru device will attach.
2698 			 */
2699 			iq->device &= ~0x1F;
2700 			iq->device |= T_NODEVICE;
2701 		}
2702 	}
2703 	if (mpt->verbose == MPT_PRT_DEBUG) {
2704 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2705 		    req, req->serno);
2706 	}
2707 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2708 	xpt_done(ccb);
2709 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2710 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2711 	} else {
2712 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2713 		    req, req->serno);
2714 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2715 	}
2716 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2717 	    ("CCB req needed wakeup"));
2718 #ifdef	INVARIANTS
2719 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2720 #endif
2721 	mpt_free_request(mpt, req);
2722 	return (TRUE);
2723 }
2724 
2725 static int
2726 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2727     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2728 {
2729 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2730 
2731 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2732 #ifdef	INVARIANTS
2733 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2734 #endif
2735 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2736 	/* Record IOC Status and Response Code of TMF for any waiters. */
2737 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2738 	req->ResponseCode = tmf_reply->ResponseCode;
2739 
2740 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2741 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2742 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2743 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2744 		req->state |= REQ_STATE_DONE;
2745 		wakeup(req);
2746 	} else {
2747 		mpt->tmf_req->state = REQ_STATE_FREE;
2748 	}
2749 	return (TRUE);
2750 }
2751 
2752 /*
2753  * XXX: Move to definitions file
2754  */
2755 #define	ELS	0x22
2756 #define	FC4LS	0x32
2757 #define	ABTS	0x81
2758 #define	BA_ACC	0x84
2759 
2760 #define	LS_RJT	0x01
2761 #define	LS_ACC	0x02
2762 #define	PLOGI	0x03
2763 #define	LOGO	0x05
2764 #define SRR	0x14
2765 #define PRLI	0x20
2766 #define PRLO	0x21
2767 #define ADISC	0x52
2768 #define RSCN	0x61
2769 
2770 static void
2771 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2772     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2773 {
2774 	uint32_t fl;
2775 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2776 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2777 
2778 	/*
2779 	 * We are going to reuse the ELS request to send this response back.
2780 	 */
2781 	rsp = &tmp;
2782 	memset(rsp, 0, sizeof(*rsp));
2783 
2784 #ifdef	USE_IMMEDIATE_LINK_DATA
2785 	/*
2786 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2787 	 */
2788 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2789 #endif
2790 	rsp->RspLength = length;
2791 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2792 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2793 
2794 	/*
2795 	 * Copy over information from the original reply frame to
2796 	 * it's correct place in the response.
2797 	 */
2798 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2799 
2800 	/*
2801 	 * And now copy back the temporary area to the original frame.
2802 	 */
2803 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2804 	rsp = req->req_vbuf;
2805 
2806 #ifdef	USE_IMMEDIATE_LINK_DATA
2807 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2808 #else
2809 {
2810 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2811 	bus_addr_t paddr = req->req_pbuf;
2812 	paddr += MPT_RQSL(mpt);
2813 
2814 	fl =
2815 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2816 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2817 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2818 		MPI_SGE_FLAGS_END_OF_LIST	|
2819 		MPI_SGE_FLAGS_END_OF_BUFFER;
2820 	fl <<= MPI_SGE_FLAGS_SHIFT;
2821 	fl |= (length);
2822 	se->FlagsLength = htole32(fl);
2823 	se->Address = htole32((uint32_t) paddr);
2824 }
2825 #endif
2826 
2827 	/*
2828 	 * Send it on...
2829 	 */
2830 	mpt_send_cmd(mpt, req);
2831 }
2832 
2833 static int
2834 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2835     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2836 {
2837 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2838 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2839 	U8 rctl;
2840 	U8 type;
2841 	U8 cmd;
2842 	U16 status = le16toh(reply_frame->IOCStatus);
2843 	U32 *elsbuf;
2844 	int ioindex;
2845 	int do_refresh = TRUE;
2846 
2847 #ifdef	INVARIANTS
2848 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2849 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2850 	    req, req->serno, rp->Function));
2851 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2852 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2853 	} else {
2854 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2855 	}
2856 #endif
2857 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2858 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2859 	    req, req->serno, reply_frame, reply_frame->Function);
2860 
2861 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2862 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2863 		    status, reply_frame->Function);
2864 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2865 			/*
2866 			 * XXX: to get around shutdown issue
2867 			 */
2868 			mpt->disabled = 1;
2869 			return (TRUE);
2870 		}
2871 		return (TRUE);
2872 	}
2873 
2874 	/*
2875 	 * If the function of a link service response, we recycle the
2876 	 * response to be a refresh for a new link service request.
2877 	 *
2878 	 * The request pointer is bogus in this case and we have to fetch
2879 	 * it based upon the TransactionContext.
2880 	 */
2881 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2882 		/* Freddie Uncle Charlie Katie */
2883 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2884 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2885 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2886 				break;
2887 			}
2888 
2889 		KASSERT(ioindex < mpt->els_cmds_allocated,
2890 		    ("can't find my mommie!"));
2891 
2892 		/* remove from active list as we're going to re-post it */
2893 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2894 		req->state &= ~REQ_STATE_QUEUED;
2895 		req->state |= REQ_STATE_DONE;
2896 		mpt_fc_post_els(mpt, req, ioindex);
2897 		return (TRUE);
2898 	}
2899 
2900 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2901 		/* remove from active list as we're done */
2902 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2903 		req->state &= ~REQ_STATE_QUEUED;
2904 		req->state |= REQ_STATE_DONE;
2905 		if (req->state & REQ_STATE_TIMEDOUT) {
2906 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2907 			    "Sync Primitive Send Completed After Timeout\n");
2908 			mpt_free_request(mpt, req);
2909 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2910 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2911 			    "Async Primitive Send Complete\n");
2912 			mpt_free_request(mpt, req);
2913 		} else {
2914 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2915 			    "Sync Primitive Send Complete- Waking Waiter\n");
2916 			wakeup(req);
2917 		}
2918 		return (TRUE);
2919 	}
2920 
2921 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2922 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2923 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2924 		    rp->MsgLength, rp->MsgFlags);
2925 		return (TRUE);
2926 	}
2927 
2928 	if (rp->MsgLength <= 5) {
2929 		/*
2930 		 * This is just a ack of an original ELS buffer post
2931 		 */
2932 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2933 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2934 		return (TRUE);
2935 	}
2936 
2937 
2938 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2939 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2940 
2941 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2942 	cmd = be32toh(elsbuf[0]) >> 24;
2943 
2944 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2945 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2946 		return (TRUE);
2947 	}
2948 
2949 	ioindex = le32toh(rp->TransactionContext);
2950 	req = mpt->els_cmd_ptrs[ioindex];
2951 
2952 	if (rctl == ELS && type == 1) {
2953 		switch (cmd) {
2954 		case PRLI:
2955 			/*
2956 			 * Send back a PRLI ACC
2957 			 */
2958 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2959 			    le32toh(rp->Wwn.PortNameHigh),
2960 			    le32toh(rp->Wwn.PortNameLow));
2961 			elsbuf[0] = htobe32(0x02100014);
2962 			elsbuf[1] |= htobe32(0x00000100);
2963 			elsbuf[4] = htobe32(0x00000002);
2964 			if (mpt->role & MPT_ROLE_TARGET)
2965 				elsbuf[4] |= htobe32(0x00000010);
2966 			if (mpt->role & MPT_ROLE_INITIATOR)
2967 				elsbuf[4] |= htobe32(0x00000020);
2968 			/* remove from active list as we're done */
2969 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2970 			req->state &= ~REQ_STATE_QUEUED;
2971 			req->state |= REQ_STATE_DONE;
2972 			mpt_fc_els_send_response(mpt, req, rp, 20);
2973 			do_refresh = FALSE;
2974 			break;
2975 		case PRLO:
2976 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2977 			elsbuf[0] = htobe32(0x02100014);
2978 			elsbuf[1] = htobe32(0x08000100);
2979 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2980 			    le32toh(rp->Wwn.PortNameHigh),
2981 			    le32toh(rp->Wwn.PortNameLow));
2982 			/* remove from active list as we're done */
2983 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2984 			req->state &= ~REQ_STATE_QUEUED;
2985 			req->state |= REQ_STATE_DONE;
2986 			mpt_fc_els_send_response(mpt, req, rp, 20);
2987 			do_refresh = FALSE;
2988 			break;
2989 		default:
2990 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2991 			break;
2992 		}
2993 	} else if (rctl == ABTS && type == 0) {
2994 		uint16_t rx_id = le16toh(rp->Rxid);
2995 		uint16_t ox_id = le16toh(rp->Oxid);
2996 		request_t *tgt_req = NULL;
2997 
2998 		mpt_prt(mpt,
2999 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3000 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
3001 		    le32toh(rp->Wwn.PortNameLow));
3002 		if (rx_id >= mpt->mpt_max_tgtcmds) {
3003 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3004 		} else if (mpt->tgt_cmd_ptrs == NULL) {
3005 			mpt_prt(mpt, "No TGT CMD PTRS\n");
3006 		} else {
3007 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3008 		}
3009 		if (tgt_req) {
3010 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
3011 			union ccb *ccb;
3012 			uint32_t ct_id;
3013 
3014 			/*
3015 			 * Check to make sure we have the correct command
3016 			 * The reply descriptor in the target state should
3017 			 * should contain an IoIndex that should match the
3018 			 * RX_ID.
3019 			 *
3020 			 * It'd be nice to have OX_ID to crosscheck with
3021 			 * as well.
3022 			 */
3023 			ct_id = GET_IO_INDEX(tgt->reply_desc);
3024 
3025 			if (ct_id != rx_id) {
3026 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3027 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3028 				    rx_id, ct_id);
3029 				goto skip;
3030 			}
3031 
3032 			ccb = tgt->ccb;
3033 			if (ccb) {
3034 				mpt_prt(mpt,
3035 				    "CCB (%p): lun %u flags %x status %x\n",
3036 				    ccb, ccb->ccb_h.target_lun,
3037 				    ccb->ccb_h.flags, ccb->ccb_h.status);
3038 			}
3039 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3040 			    "%x nxfers %x\n", tgt->state,
3041 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3042 			    tgt->nxfers);
3043   skip:
3044 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
3045 				mpt_prt(mpt, "unable to start TargetAbort\n");
3046 			}
3047 		} else {
3048 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3049 		}
3050 		memset(elsbuf, 0, 5 * (sizeof (U32)));
3051 		elsbuf[0] = htobe32(0);
3052 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3053 		elsbuf[2] = htobe32(0x000ffff);
3054 		/*
3055 		 * Dork with the reply frame so that the response to it
3056 		 * will be correct.
3057 		 */
3058 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3059 		/* remove from active list as we're done */
3060 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3061 		req->state &= ~REQ_STATE_QUEUED;
3062 		req->state |= REQ_STATE_DONE;
3063 		mpt_fc_els_send_response(mpt, req, rp, 12);
3064 		do_refresh = FALSE;
3065 	} else {
3066 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3067 	}
3068 	if (do_refresh == TRUE) {
3069 		/* remove from active list as we're done */
3070 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3071 		req->state &= ~REQ_STATE_QUEUED;
3072 		req->state |= REQ_STATE_DONE;
3073 		mpt_fc_post_els(mpt, req, ioindex);
3074 	}
3075 	return (TRUE);
3076 }
3077 
3078 /*
3079  * Clean up all SCSI Initiator personality state in response
3080  * to a controller reset.
3081  */
3082 static void
3083 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3084 {
3085 
3086 	/*
3087 	 * The pending list is already run down by
3088 	 * the generic handler.  Perform the same
3089 	 * operation on the timed out request list.
3090 	 */
3091 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3092 				   MPI_IOCSTATUS_INVALID_STATE);
3093 
3094 	/*
3095 	 * XXX: We need to repost ELS and Target Command Buffers?
3096 	 */
3097 
3098 	/*
3099 	 * Inform the XPT that a bus reset has occurred.
3100 	 */
3101 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3102 }
3103 
3104 /*
3105  * Parse additional completion information in the reply
3106  * frame for SCSI I/O requests.
3107  */
3108 static int
3109 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3110 			     MSG_DEFAULT_REPLY *reply_frame)
3111 {
3112 	union ccb *ccb;
3113 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3114 	u_int ioc_status;
3115 	u_int sstate;
3116 
3117 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3118 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3119 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3120 		("MPT SCSI I/O Handler called with incorrect reply type"));
3121 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3122 		("MPT SCSI I/O Handler called with continuation reply"));
3123 
3124 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3125 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3126 	ioc_status &= MPI_IOCSTATUS_MASK;
3127 	sstate = scsi_io_reply->SCSIState;
3128 
3129 	ccb = req->ccb;
3130 	ccb->csio.resid =
3131 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3132 
3133 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3134 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3135 		uint32_t sense_returned;
3136 
3137 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3138 
3139 		sense_returned = le32toh(scsi_io_reply->SenseCount);
3140 		if (sense_returned < ccb->csio.sense_len)
3141 			ccb->csio.sense_resid = ccb->csio.sense_len -
3142 						sense_returned;
3143 		else
3144 			ccb->csio.sense_resid = 0;
3145 
3146 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3147 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3148 		    min(ccb->csio.sense_len, sense_returned));
3149 	}
3150 
3151 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3152 		/*
3153 		 * Tag messages rejected, but non-tagged retry
3154 		 * was successful.
3155 XXXX
3156 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3157 		 */
3158 	}
3159 
3160 	switch(ioc_status) {
3161 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3162 		/*
3163 		 * XXX
3164 		 * Linux driver indicates that a zero
3165 		 * transfer length with this error code
3166 		 * indicates a CRC error.
3167 		 *
3168 		 * No need to swap the bytes for checking
3169 		 * against zero.
3170 		 */
3171 		if (scsi_io_reply->TransferCount == 0) {
3172 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3173 			break;
3174 		}
3175 		/* FALLTHROUGH */
3176 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3177 	case MPI_IOCSTATUS_SUCCESS:
3178 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3179 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3180 			/*
3181 			 * Status was never returned for this transaction.
3182 			 */
3183 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3184 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3185 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3186 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3187 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3188 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3189 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3190 
3191 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3192 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3193 		} else
3194 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3195 		break;
3196 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3197 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3198 		break;
3199 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3200 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3201 		break;
3202 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3203 		/*
3204 		 * Since selection timeouts and "device really not
3205 		 * there" are grouped into this error code, report
3206 		 * selection timeout.  Selection timeouts are
3207 		 * typically retried before giving up on the device
3208 		 * whereas "device not there" errors are considered
3209 		 * unretryable.
3210 		 */
3211 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3212 		break;
3213 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3214 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3215 		break;
3216 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3217 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3218 		break;
3219 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3220 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3221 		break;
3222 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3223 		ccb->ccb_h.status = CAM_UA_TERMIO;
3224 		break;
3225 	case MPI_IOCSTATUS_INVALID_STATE:
3226 		/*
3227 		 * The IOC has been reset.  Emulate a bus reset.
3228 		 */
3229 		/* FALLTHROUGH */
3230 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3231 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3232 		break;
3233 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3234 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3235 		/*
3236 		 * Don't clobber any timeout status that has
3237 		 * already been set for this transaction.  We
3238 		 * want the SCSI layer to be able to differentiate
3239 		 * between the command we aborted due to timeout
3240 		 * and any innocent bystanders.
3241 		 */
3242 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3243 			break;
3244 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3245 		break;
3246 
3247 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3248 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3249 		break;
3250 	case MPI_IOCSTATUS_BUSY:
3251 		mpt_set_ccb_status(ccb, CAM_BUSY);
3252 		break;
3253 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3254 	case MPI_IOCSTATUS_INVALID_SGL:
3255 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3256 	case MPI_IOCSTATUS_INVALID_FIELD:
3257 	default:
3258 		/* XXX
3259 		 * Some of the above may need to kick
3260 		 * of a recovery action!!!!
3261 		 */
3262 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3263 		break;
3264 	}
3265 
3266 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3267 		mpt_freeze_ccb(ccb);
3268 	}
3269 
3270 	return (TRUE);
3271 }
3272 
3273 static void
3274 mpt_action(struct cam_sim *sim, union ccb *ccb)
3275 {
3276 	struct mpt_softc *mpt;
3277 	struct ccb_trans_settings *cts;
3278 	target_id_t tgt;
3279 	lun_id_t lun;
3280 	int raid_passthru;
3281 
3282 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3283 
3284 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3285 	raid_passthru = (sim == mpt->phydisk_sim);
3286 	MPT_LOCK_ASSERT(mpt);
3287 
3288 	tgt = ccb->ccb_h.target_id;
3289 	lun = ccb->ccb_h.target_lun;
3290 	if (raid_passthru &&
3291 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3292 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3293 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3294 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3295 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3296 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3297 			xpt_done(ccb);
3298 			return;
3299 		}
3300 	}
3301 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3302 
3303 	switch (ccb->ccb_h.func_code) {
3304 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3305 		/*
3306 		 * Do a couple of preliminary checks...
3307 		 */
3308 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3309 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3310 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3311 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3312 				break;
3313 			}
3314 		}
3315 		/* Max supported CDB length is 16 bytes */
3316 		/* XXX Unless we implement the new 32byte message type */
3317 		if (ccb->csio.cdb_len >
3318 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3319 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3320 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3321 			break;
3322 		}
3323 #ifdef	MPT_TEST_MULTIPATH
3324 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3325 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3326 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3327 			break;
3328 		}
3329 #endif
3330 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3331 		mpt_start(sim, ccb);
3332 		return;
3333 
3334 	case XPT_RESET_BUS:
3335 		if (raid_passthru) {
3336 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3337 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3338 			break;
3339 		}
3340 	case XPT_RESET_DEV:
3341 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3342 			if (bootverbose) {
3343 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3344 			}
3345 		} else {
3346 			xpt_print(ccb->ccb_h.path, "reset device\n");
3347 		}
3348 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3349 
3350 		/*
3351 		 * mpt_bus_reset is always successful in that it
3352 		 * will fall back to a hard reset should a bus
3353 		 * reset attempt fail.
3354 		 */
3355 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3356 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3357 		break;
3358 
3359 	case XPT_ABORT:
3360 	{
3361 		union ccb *accb = ccb->cab.abort_ccb;
3362 		switch (accb->ccb_h.func_code) {
3363 		case XPT_ACCEPT_TARGET_IO:
3364 		case XPT_IMMED_NOTIFY:
3365 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3366 			break;
3367 		case XPT_CONT_TARGET_IO:
3368 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3369 			ccb->ccb_h.status = CAM_UA_ABORT;
3370 			break;
3371 		case XPT_SCSI_IO:
3372 			ccb->ccb_h.status = CAM_UA_ABORT;
3373 			break;
3374 		default:
3375 			ccb->ccb_h.status = CAM_REQ_INVALID;
3376 			break;
3377 		}
3378 		break;
3379 	}
3380 
3381 #ifdef	CAM_NEW_TRAN_CODE
3382 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3383 #else
3384 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3385 #endif
3386 #define	DP_DISC_ENABLE	0x1
3387 #define	DP_DISC_DISABL	0x2
3388 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3389 
3390 #define	DP_TQING_ENABLE	0x4
3391 #define	DP_TQING_DISABL	0x8
3392 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3393 
3394 #define	DP_WIDE		0x10
3395 #define	DP_NARROW	0x20
3396 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3397 
3398 #define	DP_SYNC		0x40
3399 
3400 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3401 	{
3402 #ifdef	CAM_NEW_TRAN_CODE
3403 		struct ccb_trans_settings_scsi *scsi;
3404 		struct ccb_trans_settings_spi *spi;
3405 #endif
3406 		uint8_t dval;
3407 		u_int period;
3408 		u_int offset;
3409 		int i, j;
3410 
3411 		cts = &ccb->cts;
3412 
3413 		if (mpt->is_fc || mpt->is_sas) {
3414 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3415 			break;
3416 		}
3417 
3418 #ifdef	CAM_NEW_TRAN_CODE
3419 		scsi = &cts->proto_specific.scsi;
3420 		spi = &cts->xport_specific.spi;
3421 
3422 		/*
3423 		 * We can be called just to valid transport and proto versions
3424 		 */
3425 		if (scsi->valid == 0 && spi->valid == 0) {
3426 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3427 			break;
3428 		}
3429 #endif
3430 
3431 		/*
3432 		 * Skip attempting settings on RAID volume disks.
3433 		 * Other devices on the bus get the normal treatment.
3434 		 */
3435 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3436 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3437 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3438 			    "no transfer settings for RAID vols\n");
3439 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3440 			break;
3441 		}
3442 
3443 		i = mpt->mpt_port_page2.PortSettings &
3444 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3445 		j = mpt->mpt_port_page2.PortFlags &
3446 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3447 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3448 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3449 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3450 			    "honoring BIOS transfer negotiations\n");
3451 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3452 			break;
3453 		}
3454 
3455 		dval = 0;
3456 		period = 0;
3457 		offset = 0;
3458 
3459 #ifndef	CAM_NEW_TRAN_CODE
3460 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3461 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3462 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3463 		}
3464 
3465 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3466 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3467 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3468 		}
3469 
3470 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3471 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3472 		}
3473 
3474 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3475 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3476 			dval |= DP_SYNC;
3477 			period = cts->sync_period;
3478 			offset = cts->sync_offset;
3479 		}
3480 #else
3481 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3482 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3483 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3484 		}
3485 
3486 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3487 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3488 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3489 		}
3490 
3491 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3492 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3493 			    DP_WIDE : DP_NARROW;
3494 		}
3495 
3496 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3497 			dval |= DP_SYNC;
3498 			offset = spi->sync_offset;
3499 		} else {
3500 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3501 			    &mpt->mpt_dev_page1[tgt];
3502 			offset = ptr->RequestedParameters;
3503 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3504 			offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3505 		}
3506 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3507 			dval |= DP_SYNC;
3508 			period = spi->sync_period;
3509 		} else {
3510 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3511 			    &mpt->mpt_dev_page1[tgt];
3512 			period = ptr->RequestedParameters;
3513 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3514 			period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3515 		}
3516 #endif
3517 		if (dval & DP_DISC_ENABLE) {
3518 			mpt->mpt_disc_enable |= (1 << tgt);
3519 		} else if (dval & DP_DISC_DISABL) {
3520 			mpt->mpt_disc_enable &= ~(1 << tgt);
3521 		}
3522 		if (dval & DP_TQING_ENABLE) {
3523 			mpt->mpt_tag_enable |= (1 << tgt);
3524 		} else if (dval & DP_TQING_DISABL) {
3525 			mpt->mpt_tag_enable &= ~(1 << tgt);
3526 		}
3527 		if (dval & DP_WIDTH) {
3528 			mpt_setwidth(mpt, tgt, 1);
3529 		}
3530 		if (dval & DP_SYNC) {
3531 			mpt_setsync(mpt, tgt, period, offset);
3532 		}
3533 		if (dval == 0) {
3534 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3535 			break;
3536 		}
3537 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3538 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3539 		    tgt, dval, period, offset);
3540 		if (mpt_update_spi_config(mpt, tgt)) {
3541 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3542 		} else {
3543 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3544 		}
3545 		break;
3546 	}
3547 	case XPT_GET_TRAN_SETTINGS:
3548 	{
3549 #ifdef	CAM_NEW_TRAN_CODE
3550 		struct ccb_trans_settings_scsi *scsi;
3551 		cts = &ccb->cts;
3552 		cts->protocol = PROTO_SCSI;
3553 		if (mpt->is_fc) {
3554 			struct ccb_trans_settings_fc *fc =
3555 			    &cts->xport_specific.fc;
3556 			cts->protocol_version = SCSI_REV_SPC;
3557 			cts->transport = XPORT_FC;
3558 			cts->transport_version = 0;
3559 			fc->valid = CTS_FC_VALID_SPEED;
3560 			fc->bitrate = 100000;
3561 		} else if (mpt->is_sas) {
3562 			struct ccb_trans_settings_sas *sas =
3563 			    &cts->xport_specific.sas;
3564 			cts->protocol_version = SCSI_REV_SPC2;
3565 			cts->transport = XPORT_SAS;
3566 			cts->transport_version = 0;
3567 			sas->valid = CTS_SAS_VALID_SPEED;
3568 			sas->bitrate = 300000;
3569 		} else {
3570 			cts->protocol_version = SCSI_REV_2;
3571 			cts->transport = XPORT_SPI;
3572 			cts->transport_version = 2;
3573 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3574 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3575 				break;
3576 			}
3577 		}
3578 		scsi = &cts->proto_specific.scsi;
3579 		scsi->valid = CTS_SCSI_VALID_TQ;
3580 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3581 #else
3582 		cts = &ccb->cts;
3583 		if (mpt->is_fc) {
3584 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3585 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3586 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3587 		} else if (mpt->is_sas) {
3588 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3589 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3590 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3591 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3592 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3593 			break;
3594 		}
3595 #endif
3596 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3597 		break;
3598 	}
3599 	case XPT_CALC_GEOMETRY:
3600 	{
3601 		struct ccb_calc_geometry *ccg;
3602 
3603 		ccg = &ccb->ccg;
3604 		if (ccg->block_size == 0) {
3605 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3606 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3607 			break;
3608 		}
3609 		cam_calc_geometry(ccg, /*extended*/1);
3610 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3611 		break;
3612 	}
3613 	case XPT_PATH_INQ:		/* Path routing inquiry */
3614 	{
3615 		struct ccb_pathinq *cpi = &ccb->cpi;
3616 
3617 		cpi->version_num = 1;
3618 		cpi->target_sprt = 0;
3619 		cpi->hba_eng_cnt = 0;
3620 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3621 #if 0 /* XXX swildner */
3622 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3623 #endif
3624 		/*
3625 		 * FC cards report MAX_DEVICES of 512, but
3626 		 * the MSG_SCSI_IO_REQUEST target id field
3627 		 * is only 8 bits. Until we fix the driver
3628 		 * to support 'channels' for bus overflow,
3629 		 * just limit it.
3630 		 */
3631 		if (cpi->max_target > 255) {
3632 			cpi->max_target = 255;
3633 		}
3634 
3635 		/*
3636 		 * VMware ESX reports > 16 devices and then dies when we probe.
3637 		 */
3638 		if (mpt->is_spi && cpi->max_target > 15) {
3639 			cpi->max_target = 15;
3640 		}
3641 		if (mpt->is_spi)
3642 			cpi->max_lun = 7;
3643 		else
3644 			cpi->max_lun = MPT_MAX_LUNS;
3645 		cpi->initiator_id = mpt->mpt_ini_id;
3646 		cpi->bus_id = cam_sim_bus(sim);
3647 
3648 		/*
3649 		 * The base speed is the speed of the underlying connection.
3650 		 */
3651 #ifdef	CAM_NEW_TRAN_CODE
3652 		cpi->protocol = PROTO_SCSI;
3653 		if (mpt->is_fc) {
3654 			cpi->hba_misc = PIM_NOBUSRESET;
3655 			cpi->base_transfer_speed = 100000;
3656 			cpi->hba_inquiry = PI_TAG_ABLE;
3657 			cpi->transport = XPORT_FC;
3658 			cpi->transport_version = 0;
3659 			cpi->protocol_version = SCSI_REV_SPC;
3660 		} else if (mpt->is_sas) {
3661 			cpi->hba_misc = PIM_NOBUSRESET;
3662 			cpi->base_transfer_speed = 300000;
3663 			cpi->hba_inquiry = PI_TAG_ABLE;
3664 			cpi->transport = XPORT_SAS;
3665 			cpi->transport_version = 0;
3666 			cpi->protocol_version = SCSI_REV_SPC2;
3667 		} else {
3668 			cpi->hba_misc = PIM_SEQSCAN;
3669 			cpi->base_transfer_speed = 3300;
3670 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3671 			cpi->transport = XPORT_SPI;
3672 			cpi->transport_version = 2;
3673 			cpi->protocol_version = SCSI_REV_2;
3674 		}
3675 #else
3676 		if (mpt->is_fc) {
3677 			cpi->hba_misc = PIM_NOBUSRESET;
3678 			cpi->base_transfer_speed = 100000;
3679 			cpi->hba_inquiry = PI_TAG_ABLE;
3680 		} else if (mpt->is_sas) {
3681 			cpi->hba_misc = PIM_NOBUSRESET;
3682 			cpi->base_transfer_speed = 300000;
3683 			cpi->hba_inquiry = PI_TAG_ABLE;
3684 		} else {
3685 			cpi->hba_misc = PIM_SEQSCAN;
3686 			cpi->base_transfer_speed = 3300;
3687 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3688 		}
3689 #endif
3690 
3691 		/*
3692 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3693 		 * wide and restrict it to one lun.
3694 		 */
3695 		if (raid_passthru) {
3696 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3697 			cpi->initiator_id = cpi->max_target + 1;
3698 			cpi->max_lun = 0;
3699 		}
3700 
3701 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3702 			cpi->hba_misc |= PIM_NOINITIATOR;
3703 		}
3704 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3705 			cpi->target_sprt =
3706 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3707 		} else {
3708 			cpi->target_sprt = 0;
3709 		}
3710 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3711 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3712 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3713 		cpi->unit_number = cam_sim_unit(sim);
3714 		cpi->ccb_h.status = CAM_REQ_CMP;
3715 		break;
3716 	}
3717 	case XPT_EN_LUN:		/* Enable LUN as a target */
3718 	{
3719 		int result;
3720 
3721 		if (ccb->cel.enable)
3722 			result = mpt_enable_lun(mpt,
3723 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3724 		else
3725 			result = mpt_disable_lun(mpt,
3726 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3727 		if (result == 0) {
3728 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3729 		} else {
3730 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3731 		}
3732 		break;
3733 	}
3734 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3735 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3736 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3737 	{
3738 		tgt_resource_t *trtp;
3739 		lun_id_t lun = ccb->ccb_h.target_lun;
3740 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3741 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3742 		ccb->ccb_h.flags = 0;
3743 
3744 		if (lun == CAM_LUN_WILDCARD) {
3745 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3746 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3747 				break;
3748 			}
3749 			trtp = &mpt->trt_wildcard;
3750 		} else if (lun >= MPT_MAX_LUNS) {
3751 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3752 			break;
3753 		} else {
3754 			trtp = &mpt->trt[lun];
3755 		}
3756 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3757 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3758 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3759 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3760 			    sim_links.stqe);
3761 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3762 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3763 			    "Put FREE INOT lun %d\n", lun);
3764 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3765 			    sim_links.stqe);
3766 		} else {
3767 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3768 		}
3769 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3770 		return;
3771 	}
3772 	case XPT_CONT_TARGET_IO:
3773 		mpt_target_start_io(mpt, ccb);
3774 		return;
3775 
3776 	default:
3777 		ccb->ccb_h.status = CAM_REQ_INVALID;
3778 		break;
3779 	}
3780 	xpt_done(ccb);
3781 }
3782 
3783 static int
3784 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3785 {
3786 #ifdef	CAM_NEW_TRAN_CODE
3787 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3788 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3789 #endif
3790 	target_id_t tgt;
3791 	uint32_t dval, pval, oval;
3792 	int rv;
3793 
3794 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3795 		tgt = cts->ccb_h.target_id;
3796 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3797 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3798 			return (-1);
3799 		}
3800 	} else {
3801 		tgt = cts->ccb_h.target_id;
3802 	}
3803 
3804 	/*
3805 	 * We aren't looking at Port Page 2 BIOS settings here-
3806 	 * sometimes these have been known to be bogus XXX.
3807 	 *
3808 	 * For user settings, we pick the max from port page 0
3809 	 *
3810 	 * For current settings we read the current settings out from
3811 	 * device page 0 for that target.
3812 	 */
3813 	if (IS_CURRENT_SETTINGS(cts)) {
3814 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3815 		dval = 0;
3816 
3817 		tmp = mpt->mpt_dev_page0[tgt];
3818 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3819 		    sizeof(tmp), FALSE, 5000);
3820 		if (rv) {
3821 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3822 			return (rv);
3823 		}
3824 		mpt2host_config_page_scsi_device_0(&tmp);
3825 
3826 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3827 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3828 		    tmp.NegotiatedParameters, tmp.Information);
3829 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3830 		    DP_WIDE : DP_NARROW;
3831 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3832 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3833 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3834 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3835 		oval = tmp.NegotiatedParameters;
3836 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3837 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3838 		pval = tmp.NegotiatedParameters;
3839 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3840 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3841 		mpt->mpt_dev_page0[tgt] = tmp;
3842 	} else {
3843 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3844 		oval = mpt->mpt_port_page0.Capabilities;
3845 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3846 		pval = mpt->mpt_port_page0.Capabilities;
3847 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3848 	}
3849 
3850 #ifndef	CAM_NEW_TRAN_CODE
3851 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3852 	cts->valid = 0;
3853 	cts->sync_period = pval;
3854 	cts->sync_offset = oval;
3855 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3856 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3857 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3858 	if (dval & DP_WIDE) {
3859 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3860 	} else {
3861 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3862 	}
3863 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3864 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3865 		if (dval & DP_DISC_ENABLE) {
3866 			cts->flags |= CCB_TRANS_DISC_ENB;
3867 		}
3868 		if (dval & DP_TQING_ENABLE) {
3869 			cts->flags |= CCB_TRANS_TAG_ENB;
3870 		}
3871 	}
3872 #else
3873 	spi->valid = 0;
3874 	scsi->valid = 0;
3875 	spi->flags = 0;
3876 	scsi->flags = 0;
3877 	spi->sync_offset = oval;
3878 	spi->sync_period = pval;
3879 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3880 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3881 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3882 	if (dval & DP_WIDE) {
3883 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3884 	} else {
3885 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3886 	}
3887 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3888 		scsi->valid = CTS_SCSI_VALID_TQ;
3889 		if (dval & DP_TQING_ENABLE) {
3890 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3891 		}
3892 		spi->valid |= CTS_SPI_VALID_DISC;
3893 		if (dval & DP_DISC_ENABLE) {
3894 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3895 		}
3896 	}
3897 #endif
3898 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3899 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3900 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3901 	return (0);
3902 }
3903 
3904 static void
3905 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3906 {
3907 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3908 
3909 	ptr = &mpt->mpt_dev_page1[tgt];
3910 	if (onoff) {
3911 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3912 	} else {
3913 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3914 	}
3915 }
3916 
3917 static void
3918 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3919 {
3920 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3921 
3922 	ptr = &mpt->mpt_dev_page1[tgt];
3923 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3924 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3925 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3926 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3927 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3928 	if (period == 0) {
3929 		return;
3930 	}
3931 	ptr->RequestedParameters |=
3932 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3933 	ptr->RequestedParameters |=
3934 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3935 	if (period < 0xa) {
3936 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3937 	}
3938 	if (period < 0x9) {
3939 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3940 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3941 	}
3942 }
3943 
3944 static int
3945 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3946 {
3947 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3948 	int rv;
3949 
3950 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3951 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3952 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3953 	tmp = mpt->mpt_dev_page1[tgt];
3954 	host2mpt_config_page_scsi_device_1(&tmp);
3955 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3956 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3957 	if (rv) {
3958 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3959 		return (-1);
3960 	}
3961 	return (0);
3962 }
3963 
3964 /****************************** Timeout Recovery ******************************/
3965 static int
3966 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3967 {
3968 	int error;
3969 
3970 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3971 	    &mpt->recovery_thread, /*flags*/0,
3972 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3973 	return (error);
3974 }
3975 
3976 static void
3977 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3978 {
3979 
3980 	if (mpt->recovery_thread == NULL) {
3981 		return;
3982 	}
3983 	mpt->shutdwn_recovery = 1;
3984 	wakeup(mpt);
3985 	/*
3986 	 * Sleep on a slightly different location
3987 	 * for this interlock just for added safety.
3988 	 */
3989 	mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
3990 }
3991 
3992 static void
3993 mpt_recovery_thread(void *arg)
3994 {
3995 	struct mpt_softc *mpt;
3996 
3997 	mpt = (struct mpt_softc *)arg;
3998 	MPT_LOCK(mpt);
3999 	for (;;) {
4000 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4001 			if (mpt->shutdwn_recovery == 0) {
4002 				mpt_sleep(mpt, mpt, 0, "idle", 0);
4003 			}
4004 		}
4005 		if (mpt->shutdwn_recovery != 0) {
4006 			break;
4007 		}
4008 		mpt_recover_commands(mpt);
4009 	}
4010 	mpt->recovery_thread = NULL;
4011 	wakeup(&mpt->recovery_thread);
4012 	MPT_UNLOCK(mpt);
4013 	mpt_kthread_exit(0);
4014 }
4015 
4016 static int
4017 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4018     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4019 {
4020 	MSG_SCSI_TASK_MGMT *tmf_req;
4021 	int		    error;
4022 
4023 	/*
4024 	 * Wait for any current TMF request to complete.
4025 	 * We're only allowed to issue one TMF at a time.
4026 	 */
4027 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4028 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
4029 	if (error != 0) {
4030 		mpt_reset(mpt, TRUE);
4031 		return (ETIMEDOUT);
4032 	}
4033 
4034 	mpt_assign_serno(mpt, mpt->tmf_req);
4035 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4036 
4037 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4038 	memset(tmf_req, 0, sizeof(*tmf_req));
4039 	tmf_req->TargetID = target;
4040 	tmf_req->Bus = channel;
4041 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4042 	tmf_req->TaskType = type;
4043 	tmf_req->MsgFlags = flags;
4044 	tmf_req->MsgContext =
4045 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4046 	if (lun > MPT_MAX_LUNS) {
4047 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4048 		tmf_req->LUN[1] = lun & 0xff;
4049 	} else {
4050 		tmf_req->LUN[1] = lun;
4051 	}
4052 	tmf_req->TaskMsgContext = abort_ctx;
4053 
4054 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4055 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4056 	    mpt->tmf_req->serno, tmf_req->MsgContext);
4057 	if (mpt->verbose > MPT_PRT_DEBUG) {
4058 		mpt_print_request(tmf_req);
4059 	}
4060 
4061 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4062 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4063 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4064 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4065 	if (error != MPT_OK) {
4066 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4067 		mpt->tmf_req->state = REQ_STATE_FREE;
4068 		mpt_reset(mpt, TRUE);
4069 	}
4070 	return (error);
4071 }
4072 
4073 /*
4074  * When a command times out, it is placed on the requeust_timeout_list
4075  * and we wake our recovery thread.  The MPT-Fusion architecture supports
4076  * only a single TMF operation at a time, so we serially abort/bdr, etc,
4077  * the timedout transactions.  The next TMF is issued either by the
4078  * completion handler of the current TMF waking our recovery thread,
4079  * or the TMF timeout handler causing a hard reset sequence.
4080  */
4081 static void
4082 mpt_recover_commands(struct mpt_softc *mpt)
4083 {
4084 	request_t	   *req;
4085 	union ccb	   *ccb;
4086 	int		    error;
4087 
4088 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4089 		/*
4090 		 * No work to do- leave.
4091 		 */
4092 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4093 		return;
4094 	}
4095 
4096 	/*
4097 	 * Flush any commands whose completion coincides with their timeout.
4098 	 */
4099 	mpt_intr(mpt);
4100 
4101 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4102 		/*
4103 		 * The timedout commands have already
4104 		 * completed.  This typically means
4105 		 * that either the timeout value was on
4106 		 * the hairy edge of what the device
4107 		 * requires or - more likely - interrupts
4108 		 * are not happening.
4109 		 */
4110 		mpt_prt(mpt, "Timedout requests already complete. "
4111 		    "Interrupts may not be functioning.\n");
4112 		mpt_enable_ints(mpt);
4113 		return;
4114 	}
4115 
4116 	/*
4117 	 * We have no visibility into the current state of the
4118 	 * controller, so attempt to abort the commands in the
4119 	 * order they timed-out. For initiator commands, we
4120 	 * depend on the reply handler pulling requests off
4121 	 * the timeout list.
4122 	 */
4123 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4124 		uint16_t status;
4125 		uint8_t response;
4126 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4127 
4128 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4129 		    req, req->serno, hdrp->Function);
4130 		ccb = req->ccb;
4131 		if (ccb == NULL) {
4132 			mpt_prt(mpt, "null ccb in timed out request. "
4133 			    "Resetting Controller.\n");
4134 			mpt_reset(mpt, TRUE);
4135 			continue;
4136 		}
4137 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4138 
4139 		/*
4140 		 * Check to see if this is not an initiator command and
4141 		 * deal with it differently if it is.
4142 		 */
4143 		switch (hdrp->Function) {
4144 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4145 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4146 			break;
4147 		default:
4148 			/*
4149 			 * XXX: FIX ME: need to abort target assists...
4150 			 */
4151 			mpt_prt(mpt, "just putting it back on the pend q\n");
4152 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4153 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4154 			    links);
4155 			continue;
4156 		}
4157 
4158 		error = mpt_scsi_send_tmf(mpt,
4159 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4160 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4161 		    htole32(req->index | scsi_io_handler_id), TRUE);
4162 
4163 		if (error != 0) {
4164 			/*
4165 			 * mpt_scsi_send_tmf hard resets on failure, so no
4166 			 * need to do so here.  Our queue should be emptied
4167 			 * by the hard reset.
4168 			 */
4169 			continue;
4170 		}
4171 
4172 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4173 		    REQ_STATE_DONE, TRUE, 500);
4174 
4175 		status = le16toh(mpt->tmf_req->IOCStatus);
4176 		response = mpt->tmf_req->ResponseCode;
4177 		mpt->tmf_req->state = REQ_STATE_FREE;
4178 
4179 		if (error != 0) {
4180 			/*
4181 			 * If we've errored out,, reset the controller.
4182 			 */
4183 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4184 			    "Resetting controller\n");
4185 			mpt_reset(mpt, TRUE);
4186 			continue;
4187 		}
4188 
4189 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4190 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4191 			    "Resetting controller.\n", status);
4192 			mpt_reset(mpt, TRUE);
4193 			continue;
4194 		}
4195 
4196 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4197 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4198 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4199 			    "Resetting controller.\n", response);
4200 			mpt_reset(mpt, TRUE);
4201 			continue;
4202 		}
4203 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4204 	}
4205 }
4206 
4207 /************************ Target Mode Support ****************************/
4208 static void
4209 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4210 {
4211 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4212 	PTR_SGE_TRANSACTION32 tep;
4213 	PTR_SGE_SIMPLE32 se;
4214 	bus_addr_t paddr;
4215 	uint32_t fl;
4216 
4217 	paddr = req->req_pbuf;
4218 	paddr += MPT_RQSL(mpt);
4219 
4220 	fc = req->req_vbuf;
4221 	memset(fc, 0, MPT_REQUEST_AREA);
4222 	fc->BufferCount = 1;
4223 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4224 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4225 
4226 	/*
4227 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4228 	 * consist of a TE SGL element (with details length of zero)
4229 	 * followed by a SIMPLE SGL element which holds the address
4230 	 * of the buffer.
4231 	 */
4232 
4233 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4234 
4235 	tep->ContextSize = 4;
4236 	tep->Flags = 0;
4237 	tep->TransactionContext[0] = htole32(ioindex);
4238 
4239 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4240 	fl =
4241 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4242 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4243 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4244 		MPI_SGE_FLAGS_END_OF_LIST	|
4245 		MPI_SGE_FLAGS_END_OF_BUFFER;
4246 	fl <<= MPI_SGE_FLAGS_SHIFT;
4247 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4248 	se->FlagsLength = htole32(fl);
4249 	se->Address = htole32((uint32_t) paddr);
4250 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4251 	    "add ELS index %d ioindex %d for %p:%u\n",
4252 	    req->index, ioindex, req, req->serno);
4253 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4254 	    ("mpt_fc_post_els: request not locked"));
4255 	mpt_send_cmd(mpt, req);
4256 }
4257 
4258 static void
4259 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4260 {
4261 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4262 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4263 	bus_addr_t paddr;
4264 
4265 	paddr = req->req_pbuf;
4266 	paddr += MPT_RQSL(mpt);
4267 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4268 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4269 
4270 	fc = req->req_vbuf;
4271 	fc->BufferCount = 1;
4272 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4273 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4274 
4275 	cb = &fc->Buffer[0];
4276 	cb->IoIndex = htole16(ioindex);
4277 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4278 
4279 	mpt_check_doorbell(mpt);
4280 	mpt_send_cmd(mpt, req);
4281 }
4282 
4283 static int
4284 mpt_add_els_buffers(struct mpt_softc *mpt)
4285 {
4286 	int i;
4287 
4288 	if (mpt->is_fc == 0) {
4289 		return (TRUE);
4290 	}
4291 
4292 	if (mpt->els_cmds_allocated) {
4293 		return (TRUE);
4294 	}
4295 
4296 	mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
4297 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4298 
4299 	if (mpt->els_cmd_ptrs == NULL) {
4300 		return (FALSE);
4301 	}
4302 
4303 	/*
4304 	 * Feed the chip some ELS buffer resources
4305 	 */
4306 	for (i = 0; i < MPT_MAX_ELS; i++) {
4307 		request_t *req = mpt_get_request(mpt, FALSE);
4308 		if (req == NULL) {
4309 			break;
4310 		}
4311 		req->state |= REQ_STATE_LOCKED;
4312 		mpt->els_cmd_ptrs[i] = req;
4313 		mpt_fc_post_els(mpt, req, i);
4314 	}
4315 
4316 	if (i == 0) {
4317 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4318 		kfree(mpt->els_cmd_ptrs, M_DEVBUF);
4319 		mpt->els_cmd_ptrs = NULL;
4320 		return (FALSE);
4321 	}
4322 	if (i != MPT_MAX_ELS) {
4323 		mpt_lprt(mpt, MPT_PRT_INFO,
4324 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4325 	}
4326 	mpt->els_cmds_allocated = i;
4327 	return(TRUE);
4328 }
4329 
4330 static int
4331 mpt_add_target_commands(struct mpt_softc *mpt)
4332 {
4333 	int i, max;
4334 
4335 	if (mpt->tgt_cmd_ptrs) {
4336 		return (TRUE);
4337 	}
4338 
4339 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4340 	if (max > mpt->mpt_max_tgtcmds) {
4341 		max = mpt->mpt_max_tgtcmds;
4342 	}
4343 	mpt->tgt_cmd_ptrs =
4344 	    kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4345 	if (mpt->tgt_cmd_ptrs == NULL) {
4346 		mpt_prt(mpt,
4347 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4348 		return (FALSE);
4349 	}
4350 
4351 	for (i = 0; i < max; i++) {
4352 		request_t *req;
4353 
4354 		req = mpt_get_request(mpt, FALSE);
4355 		if (req == NULL) {
4356 			break;
4357 		}
4358 		req->state |= REQ_STATE_LOCKED;
4359 		mpt->tgt_cmd_ptrs[i] = req;
4360 		mpt_post_target_command(mpt, req, i);
4361 	}
4362 
4363 
4364 	if (i == 0) {
4365 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4366 		kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
4367 		mpt->tgt_cmd_ptrs = NULL;
4368 		return (FALSE);
4369 	}
4370 
4371 	mpt->tgt_cmds_allocated = i;
4372 
4373 	if (i < max) {
4374 		mpt_lprt(mpt, MPT_PRT_INFO,
4375 		    "added %d of %d target bufs\n", i, max);
4376 	}
4377 	return (i);
4378 }
4379 
4380 static int
4381 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4382 {
4383 
4384 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4385 		mpt->twildcard = 1;
4386 	} else if (lun >= MPT_MAX_LUNS) {
4387 		return (EINVAL);
4388 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4389 		return (EINVAL);
4390 	}
4391 	if (mpt->tenabled == 0) {
4392 		if (mpt->is_fc) {
4393 			(void) mpt_fc_reset_link(mpt, 0);
4394 		}
4395 		mpt->tenabled = 1;
4396 	}
4397 	if (lun == CAM_LUN_WILDCARD) {
4398 		mpt->trt_wildcard.enabled = 1;
4399 	} else {
4400 		mpt->trt[lun].enabled = 1;
4401 	}
4402 	return (0);
4403 }
4404 
4405 static int
4406 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4407 {
4408 	int i;
4409 
4410 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4411 		mpt->twildcard = 0;
4412 	} else if (lun >= MPT_MAX_LUNS) {
4413 		return (EINVAL);
4414 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4415 		return (EINVAL);
4416 	}
4417 	if (lun == CAM_LUN_WILDCARD) {
4418 		mpt->trt_wildcard.enabled = 0;
4419 	} else {
4420 		mpt->trt[lun].enabled = 0;
4421 	}
4422 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4423 		if (mpt->trt[lun].enabled) {
4424 			break;
4425 		}
4426 	}
4427 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4428 		if (mpt->is_fc) {
4429 			(void) mpt_fc_reset_link(mpt, 0);
4430 		}
4431 		mpt->tenabled = 0;
4432 	}
4433 	return (0);
4434 }
4435 
4436 /*
4437  * Called with MPT lock held
4438  */
4439 static void
4440 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4441 {
4442 	struct ccb_scsiio *csio = &ccb->csio;
4443 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4444 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4445 
4446 	switch (tgt->state) {
4447 	case TGT_STATE_IN_CAM:
4448 		break;
4449 	case TGT_STATE_MOVING_DATA:
4450 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4451 		xpt_freeze_simq(mpt->sim, 1);
4452 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4453 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4454 		xpt_done(ccb);
4455 		return;
4456 	default:
4457 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4458 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4459 		mpt_tgt_dump_req_state(mpt, cmd_req);
4460 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4461 		xpt_done(ccb);
4462 		return;
4463 	}
4464 
4465 	if (csio->dxfer_len) {
4466 		bus_dmamap_callback_t *cb;
4467 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4468 		request_t *req;
4469 
4470 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4471 		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4472 
4473 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4474 			if (mpt->outofbeer == 0) {
4475 				mpt->outofbeer = 1;
4476 				xpt_freeze_simq(mpt->sim, 1);
4477 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4478 			}
4479 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4480 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4481 			xpt_done(ccb);
4482 			return;
4483 		}
4484 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4485 		if (sizeof (bus_addr_t) > 4) {
4486 			cb = mpt_execute_req_a64;
4487 		} else {
4488 			cb = mpt_execute_req;
4489 		}
4490 
4491 		req->ccb = ccb;
4492 		ccb->ccb_h.ccb_req_ptr = req;
4493 
4494 		/*
4495 		 * Record the currently active ccb and the
4496 		 * request for it in our target state area.
4497 		 */
4498 		tgt->ccb = ccb;
4499 		tgt->req = req;
4500 
4501 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4502 		ta = req->req_vbuf;
4503 
4504 		if (mpt->is_sas) {
4505 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4506 			     cmd_req->req_vbuf;
4507 			ta->QueueTag = ssp->InitiatorTag;
4508 		} else if (mpt->is_spi) {
4509 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4510 			     cmd_req->req_vbuf;
4511 			ta->QueueTag = sp->Tag;
4512 		}
4513 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4514 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4515 		ta->ReplyWord = htole32(tgt->reply_desc);
4516 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4517 			ta->LUN[0] =
4518 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4519 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4520 		} else {
4521 			ta->LUN[1] = csio->ccb_h.target_lun;
4522 		}
4523 
4524 		ta->RelativeOffset = tgt->bytes_xfered;
4525 		ta->DataLength = ccb->csio.dxfer_len;
4526 		if (ta->DataLength > tgt->resid) {
4527 			ta->DataLength = tgt->resid;
4528 		}
4529 
4530 		/*
4531 		 * XXX Should be done after data transfer completes?
4532 		 */
4533 		tgt->resid -= csio->dxfer_len;
4534 		tgt->bytes_xfered += csio->dxfer_len;
4535 
4536 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4537 			ta->TargetAssistFlags |=
4538 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4539 		}
4540 
4541 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4542 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4543 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4544 			ta->TargetAssistFlags |=
4545 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4546 		}
4547 #endif
4548 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4549 
4550 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4551 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4552 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4553 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4554 
4555 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4556 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4557 				int error;
4558 				crit_enter();
4559 				error = bus_dmamap_load(mpt->buffer_dmat,
4560 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4561 				    cb, req, 0);
4562 				crit_exit();
4563 				if (error == EINPROGRESS) {
4564 					xpt_freeze_simq(mpt->sim, 1);
4565 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4566 				}
4567 			} else {
4568 				/*
4569 				 * We have been given a pointer to single
4570 				 * physical buffer.
4571 				 */
4572 				struct bus_dma_segment seg;
4573 				seg.ds_addr = (bus_addr_t)
4574 				    (vm_offset_t)csio->data_ptr;
4575 				seg.ds_len = csio->dxfer_len;
4576 				(*cb)(req, &seg, 1, 0);
4577 			}
4578 		} else {
4579 			/*
4580 			 * We have been given a list of addresses.
4581 			 * This case could be easily supported but they are not
4582 			 * currently generated by the CAM subsystem so there
4583 			 * is no point in wasting the time right now.
4584 			 */
4585 			struct bus_dma_segment *sgs;
4586 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4587 				(*cb)(req, NULL, 0, EFAULT);
4588 			} else {
4589 				/* Just use the segments provided */
4590 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4591 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4592 			}
4593 		}
4594 	} else {
4595 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4596 
4597 		/*
4598 		 * XXX: I don't know why this seems to happen, but
4599 		 * XXX: completing the CCB seems to make things happy.
4600 		 * XXX: This seems to happen if the initiator requests
4601 		 * XXX: enough data that we have to do multiple CTIOs.
4602 		 */
4603 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4604 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4605 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4606 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4607 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4608 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4609 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4610 			xpt_done(ccb);
4611 			return;
4612 		}
4613 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4614 			sp = sense;
4615 			memcpy(sp, &csio->sense_data,
4616 			   min(csio->sense_len, MPT_SENSE_SIZE));
4617 		}
4618 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4619 	}
4620 }
4621 
4622 static void
4623 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4624     uint32_t lun, int send, uint8_t *data, size_t length)
4625 {
4626 	mpt_tgt_state_t *tgt;
4627 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4628 	SGE_SIMPLE32 *se;
4629 	uint32_t flags;
4630 	uint8_t *dptr;
4631 	bus_addr_t pptr;
4632 	request_t *req;
4633 
4634 	/*
4635 	 * We enter with resid set to the data load for the command.
4636 	 */
4637 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4638 	if (length == 0 || tgt->resid == 0) {
4639 		tgt->resid = 0;
4640 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4641 		return;
4642 	}
4643 
4644 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4645 		mpt_prt(mpt, "out of resources- dropping local response\n");
4646 		return;
4647 	}
4648 	tgt->is_local = 1;
4649 
4650 
4651 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4652 	ta = req->req_vbuf;
4653 
4654 	if (mpt->is_sas) {
4655 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4656 		ta->QueueTag = ssp->InitiatorTag;
4657 	} else if (mpt->is_spi) {
4658 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4659 		ta->QueueTag = sp->Tag;
4660 	}
4661 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4662 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4663 	ta->ReplyWord = htole32(tgt->reply_desc);
4664 	if (lun > MPT_MAX_LUNS) {
4665 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4666 		ta->LUN[1] = lun & 0xff;
4667 	} else {
4668 		ta->LUN[1] = lun;
4669 	}
4670 	ta->RelativeOffset = 0;
4671 	ta->DataLength = length;
4672 
4673 	dptr = req->req_vbuf;
4674 	dptr += MPT_RQSL(mpt);
4675 	pptr = req->req_pbuf;
4676 	pptr += MPT_RQSL(mpt);
4677 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4678 
4679 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4680 	memset(se, 0,sizeof (*se));
4681 
4682 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4683 	if (send) {
4684 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4685 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4686 	}
4687 	se->Address = pptr;
4688 	MPI_pSGE_SET_LENGTH(se, length);
4689 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4690 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4691 	MPI_pSGE_SET_FLAGS(se, flags);
4692 
4693 	tgt->ccb = NULL;
4694 	tgt->req = req;
4695 	tgt->resid -= length;
4696 	tgt->bytes_xfered = length;
4697 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4698 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4699 #else
4700 	tgt->state = TGT_STATE_MOVING_DATA;
4701 #endif
4702 	mpt_send_cmd(mpt, req);
4703 }
4704 
4705 /*
4706  * Abort queued up CCBs
4707  */
4708 static cam_status
4709 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4710 {
4711 	struct mpt_hdr_stailq *lp;
4712 	struct ccb_hdr *srch;
4713 	int found = 0;
4714 	union ccb *accb = ccb->cab.abort_ccb;
4715 	tgt_resource_t *trtp;
4716 
4717 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4718 
4719 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4720 		trtp = &mpt->trt_wildcard;
4721 	} else {
4722 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4723 	}
4724 
4725 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4726 		lp = &trtp->atios;
4727 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4728 		lp = &trtp->inots;
4729 	} else {
4730 		return (CAM_REQ_INVALID);
4731 	}
4732 
4733 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4734 		if (srch == &accb->ccb_h) {
4735 			found = 1;
4736 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4737 			break;
4738 		}
4739 	}
4740 	if (found) {
4741 		accb->ccb_h.status = CAM_REQ_ABORTED;
4742 		xpt_done(accb);
4743 		return (CAM_REQ_CMP);
4744 	}
4745 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4746 	return (CAM_PATH_INVALID);
4747 }
4748 
4749 /*
4750  * Ask the MPT to abort the current target command
4751  */
4752 static int
4753 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4754 {
4755 	int error;
4756 	request_t *req;
4757 	PTR_MSG_TARGET_MODE_ABORT abtp;
4758 
4759 	req = mpt_get_request(mpt, FALSE);
4760 	if (req == NULL) {
4761 		return (-1);
4762 	}
4763 	abtp = req->req_vbuf;
4764 	memset(abtp, 0, sizeof (*abtp));
4765 
4766 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4767 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4768 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4769 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4770 	error = 0;
4771 	if (mpt->is_fc || mpt->is_sas) {
4772 		mpt_send_cmd(mpt, req);
4773 	} else {
4774 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4775 	}
4776 	return (error);
4777 }
4778 
4779 /*
4780  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4781  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4782  * FC929 to set bogus FC_RSP fields (nonzero residuals
4783  * but w/o RESID fields set). This causes QLogic initiators
4784  * to think maybe that a frame was lost.
4785  *
4786  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4787  * we use allocated requests to do TARGET_ASSIST and we
4788  * need to know when to release them.
4789  */
4790 
4791 static void
4792 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4793     uint8_t status, uint8_t const *sense_data)
4794 {
4795 	uint8_t *cmd_vbuf;
4796 	mpt_tgt_state_t *tgt;
4797 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4798 	request_t *req;
4799 	bus_addr_t paddr;
4800 	int resplen = 0;
4801 	uint32_t fl;
4802 
4803 	cmd_vbuf = cmd_req->req_vbuf;
4804 	cmd_vbuf += MPT_RQSL(mpt);
4805 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4806 
4807 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4808 		if (mpt->outofbeer == 0) {
4809 			mpt->outofbeer = 1;
4810 			xpt_freeze_simq(mpt->sim, 1);
4811 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4812 		}
4813 		if (ccb) {
4814 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4815 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4816 			xpt_done(ccb);
4817 		} else {
4818 			mpt_prt(mpt,
4819 			    "could not allocate status request- dropping\n");
4820 		}
4821 		return;
4822 	}
4823 	req->ccb = ccb;
4824 	if (ccb) {
4825 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4826 		ccb->ccb_h.ccb_req_ptr = req;
4827 	}
4828 
4829 	/*
4830 	 * Record the currently active ccb, if any, and the
4831 	 * request for it in our target state area.
4832 	 */
4833 	tgt->ccb = ccb;
4834 	tgt->req = req;
4835 	tgt->state = TGT_STATE_SENDING_STATUS;
4836 
4837 	tp = req->req_vbuf;
4838 	paddr = req->req_pbuf;
4839 	paddr += MPT_RQSL(mpt);
4840 
4841 	memset(tp, 0, sizeof (*tp));
4842 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4843 	if (mpt->is_fc) {
4844 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4845 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4846 		uint8_t *sts_vbuf;
4847 		uint32_t *rsp;
4848 
4849 		sts_vbuf = req->req_vbuf;
4850 		sts_vbuf += MPT_RQSL(mpt);
4851 		rsp = (uint32_t *) sts_vbuf;
4852 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4853 
4854 		/*
4855 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4856 		 * It has to be big-endian in memory and is organized
4857 		 * in 32 bit words, which are much easier to deal with
4858 		 * as words which are swizzled as needed.
4859 		 *
4860 		 * All we're filling here is the FC_RSP payload.
4861 		 * We may just have the chip synthesize it if
4862 		 * we have no residual and an OK status.
4863 		 *
4864 		 */
4865 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4866 
4867 		rsp[2] = status;
4868 		if (tgt->resid) {
4869 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4870 			rsp[3] = htobe32(tgt->resid);
4871 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4872 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4873 #endif
4874 		}
4875 		if (status == SCSI_STATUS_CHECK_COND) {
4876 			int i;
4877 
4878 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4879 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4880 			if (sense_data) {
4881 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4882 			} else {
4883 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4884 				    "TION but no sense data?\n");
4885 				memset(&rsp, 0, MPT_SENSE_SIZE);
4886 			}
4887 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4888 				rsp[i] = htobe32(rsp[i]);
4889 			}
4890 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4891 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4892 #endif
4893 		}
4894 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4895 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4896 #endif
4897 		rsp[2] = htobe32(rsp[2]);
4898 	} else if (mpt->is_sas) {
4899 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4900 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4901 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4902 	} else {
4903 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4904 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4905 		tp->StatusCode = status;
4906 		tp->QueueTag = htole16(sp->Tag);
4907 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4908 	}
4909 
4910 	tp->ReplyWord = htole32(tgt->reply_desc);
4911 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4912 
4913 #ifdef	WE_CAN_USE_AUTO_REPOST
4914 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4915 #endif
4916 	if (status == SCSI_STATUS_OK && resplen == 0) {
4917 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4918 	} else {
4919 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4920 		fl =
4921 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4922 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4923 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4924 			MPI_SGE_FLAGS_END_OF_LIST	|
4925 			MPI_SGE_FLAGS_END_OF_BUFFER;
4926 		fl <<= MPI_SGE_FLAGS_SHIFT;
4927 		fl |= resplen;
4928 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4929 	}
4930 
4931 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4932 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4933 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4934 	    req->serno, tgt->resid);
4935 	if (ccb) {
4936 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4937 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4938 	}
4939 	mpt_send_cmd(mpt, req);
4940 }
4941 
4942 static void
4943 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4944     tgt_resource_t *trtp, int init_id)
4945 {
4946 	struct ccb_immed_notify *inot;
4947 	mpt_tgt_state_t *tgt;
4948 
4949 	tgt = MPT_TGT_STATE(mpt, req);
4950 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4951 	if (inot == NULL) {
4952 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4953 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4954 		return;
4955 	}
4956 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4957 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4958 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4959 
4960 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4961 	inot->sense_len = 0;
4962 	memset(inot->message_args, 0, sizeof (inot->message_args));
4963 	inot->initiator_id = init_id;	/* XXX */
4964 
4965 	/*
4966 	 * This is a somewhat grotesque attempt to map from task management
4967 	 * to old style SCSI messages. God help us all.
4968 	 */
4969 	switch (fc) {
4970 	case MPT_ABORT_TASK_SET:
4971 		inot->message_args[0] = MSG_ABORT_TAG;
4972 		break;
4973 	case MPT_CLEAR_TASK_SET:
4974 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4975 		break;
4976 	case MPT_TARGET_RESET:
4977 		inot->message_args[0] = MSG_TARGET_RESET;
4978 		break;
4979 	case MPT_CLEAR_ACA:
4980 		inot->message_args[0] = MSG_CLEAR_ACA;
4981 		break;
4982 	case MPT_TERMINATE_TASK:
4983 		inot->message_args[0] = MSG_ABORT_TAG;
4984 		break;
4985 	default:
4986 		inot->message_args[0] = MSG_NOOP;
4987 		break;
4988 	}
4989 	tgt->ccb = (union ccb *) inot;
4990 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4991 	xpt_done((union ccb *)inot);
4992 }
4993 
4994 static void
4995 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4996 {
4997 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4998 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4999 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
5000 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
5001 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
5002 	     '0',  '0',  '0',  '1'
5003 	};
5004 	struct ccb_accept_tio *atiop;
5005 	lun_id_t lun;
5006 	int tag_action = 0;
5007 	mpt_tgt_state_t *tgt;
5008 	tgt_resource_t *trtp = NULL;
5009 	U8 *lunptr;
5010 	U8 *vbuf;
5011 	U16 itag;
5012 	U16 ioindex;
5013 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5014 	uint8_t *cdbp;
5015 
5016 	/*
5017 	 * Stash info for the current command where we can get at it later.
5018 	 */
5019 	vbuf = req->req_vbuf;
5020 	vbuf += MPT_RQSL(mpt);
5021 
5022 	/*
5023 	 * Get our state pointer set up.
5024 	 */
5025 	tgt = MPT_TGT_STATE(mpt, req);
5026 	if (tgt->state != TGT_STATE_LOADED) {
5027 		mpt_tgt_dump_req_state(mpt, req);
5028 		panic("bad target state in mpt_scsi_tgt_atio");
5029 	}
5030 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
5031 	tgt->state = TGT_STATE_IN_CAM;
5032 	tgt->reply_desc = reply_desc;
5033 	ioindex = GET_IO_INDEX(reply_desc);
5034 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5035 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5036 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5037 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5038 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5039 	}
5040 	if (mpt->is_fc) {
5041 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5042 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5043 		if (fc->FcpCntl[2]) {
5044 			/*
5045 			 * Task Management Request
5046 			 */
5047 			switch (fc->FcpCntl[2]) {
5048 			case 0x2:
5049 				fct = MPT_ABORT_TASK_SET;
5050 				break;
5051 			case 0x4:
5052 				fct = MPT_CLEAR_TASK_SET;
5053 				break;
5054 			case 0x20:
5055 				fct = MPT_TARGET_RESET;
5056 				break;
5057 			case 0x40:
5058 				fct = MPT_CLEAR_ACA;
5059 				break;
5060 			case 0x80:
5061 				fct = MPT_TERMINATE_TASK;
5062 				break;
5063 			default:
5064 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5065 				    fc->FcpCntl[2]);
5066 				mpt_scsi_tgt_status(mpt, 0, req,
5067 				    SCSI_STATUS_OK, 0);
5068 				return;
5069 			}
5070 		} else {
5071 			switch (fc->FcpCntl[1]) {
5072 			case 0:
5073 				tag_action = MSG_SIMPLE_Q_TAG;
5074 				break;
5075 			case 1:
5076 				tag_action = MSG_HEAD_OF_Q_TAG;
5077 				break;
5078 			case 2:
5079 				tag_action = MSG_ORDERED_Q_TAG;
5080 				break;
5081 			default:
5082 				/*
5083 				 * Bah. Ignore Untagged Queing and ACA
5084 				 */
5085 				tag_action = MSG_SIMPLE_Q_TAG;
5086 				break;
5087 			}
5088 		}
5089 		tgt->resid = be32toh(fc->FcpDl);
5090 		cdbp = fc->FcpCdb;
5091 		lunptr = fc->FcpLun;
5092 		itag = be16toh(fc->OptionalOxid);
5093 	} else if (mpt->is_sas) {
5094 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5095 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5096 		cdbp = ssp->CDB;
5097 		lunptr = ssp->LogicalUnitNumber;
5098 		itag = ssp->InitiatorTag;
5099 	} else {
5100 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5101 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5102 		cdbp = sp->CDB;
5103 		lunptr = sp->LogicalUnitNumber;
5104 		itag = sp->Tag;
5105 	}
5106 
5107 	/*
5108 	 * Generate a simple lun
5109 	 */
5110 	switch (lunptr[0] & 0xc0) {
5111 	case 0x40:
5112 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5113 		break;
5114 	case 0:
5115 		lun = lunptr[1];
5116 		break;
5117 	default:
5118 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5119 		lun = 0xffff;
5120 		break;
5121 	}
5122 
5123 	/*
5124 	 * Deal with non-enabled or bad luns here.
5125 	 */
5126 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5127 	    mpt->trt[lun].enabled == 0) {
5128 		if (mpt->twildcard) {
5129 			trtp = &mpt->trt_wildcard;
5130 		} else if (fct == MPT_NIL_TMT_VALUE) {
5131 			/*
5132 			 * In this case, we haven't got an upstream listener
5133 			 * for either a specific lun or wildcard luns. We
5134 			 * have to make some sensible response. For regular
5135 			 * inquiry, just return some NOT HERE inquiry data.
5136 			 * For VPD inquiry, report illegal field in cdb.
5137 			 * For REQUEST SENSE, just return NO SENSE data.
5138 			 * REPORT LUNS gets illegal command.
5139 			 * All other commands get 'no such device'.
5140 			 */
5141 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5142 			size_t len;
5143 
5144 			memset(buf, 0, MPT_SENSE_SIZE);
5145 			cond = SCSI_STATUS_CHECK_COND;
5146 			buf[0] = 0xf0;
5147 			buf[2] = 0x5;
5148 			buf[7] = 0x8;
5149 			sp = buf;
5150 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5151 
5152 			switch (cdbp[0]) {
5153 			case INQUIRY:
5154 			{
5155 				if (cdbp[1] != 0) {
5156 					buf[12] = 0x26;
5157 					buf[13] = 0x01;
5158 					break;
5159 				}
5160 				len = min(tgt->resid, cdbp[4]);
5161 				len = min(len, sizeof (null_iqd));
5162 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5163 				    "local inquiry %ld bytes\n", (long) len);
5164 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5165 				    null_iqd, len);
5166 				return;
5167 			}
5168 			case REQUEST_SENSE:
5169 			{
5170 				buf[2] = 0x0;
5171 				len = min(tgt->resid, cdbp[4]);
5172 				len = min(len, sizeof (buf));
5173 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5174 				    "local reqsense %ld bytes\n", (long) len);
5175 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5176 				    buf, len);
5177 				return;
5178 			}
5179 			case REPORT_LUNS:
5180 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5181 				buf[12] = 0x26;
5182 				return;
5183 			default:
5184 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5185 				    "CMD 0x%x to unmanaged lun %u\n",
5186 				    cdbp[0], lun);
5187 				buf[12] = 0x25;
5188 				break;
5189 			}
5190 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5191 			return;
5192 		}
5193 		/* otherwise, leave trtp NULL */
5194 	} else {
5195 		trtp = &mpt->trt[lun];
5196 	}
5197 
5198 	/*
5199 	 * Deal with any task management
5200 	 */
5201 	if (fct != MPT_NIL_TMT_VALUE) {
5202 		if (trtp == NULL) {
5203 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5204 			    fct);
5205 			mpt_scsi_tgt_status(mpt, 0, req,
5206 			    SCSI_STATUS_OK, 0);
5207 		} else {
5208 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5209 			    GET_INITIATOR_INDEX(reply_desc));
5210 		}
5211 		return;
5212 	}
5213 
5214 
5215 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5216 	if (atiop == NULL) {
5217 		mpt_lprt(mpt, MPT_PRT_WARN,
5218 		    "no ATIOs for lun %u- sending back %s\n", lun,
5219 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5220 		mpt_scsi_tgt_status(mpt, NULL, req,
5221 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5222 		    NULL);
5223 		return;
5224 	}
5225 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5226 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5227 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5228 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5229 	atiop->ccb_h.status = CAM_CDB_RECVD;
5230 	atiop->ccb_h.target_lun = lun;
5231 	atiop->sense_len = 0;
5232 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5233 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5234 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5235 
5236 	/*
5237 	 * The tag we construct here allows us to find the
5238 	 * original request that the command came in with.
5239 	 *
5240 	 * This way we don't have to depend on anything but the
5241 	 * tag to find things when CCBs show back up from CAM.
5242 	 */
5243 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5244 	tgt->tag_id = atiop->tag_id;
5245 	if (tag_action) {
5246 		atiop->tag_action = tag_action;
5247 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5248 	}
5249 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5250 		int i;
5251 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5252 		    atiop->ccb_h.target_lun);
5253 		for (i = 0; i < atiop->cdb_len; i++) {
5254 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5255 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5256 		}
5257 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5258 		    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5259 	}
5260 
5261 	xpt_done((union ccb *)atiop);
5262 }
5263 
5264 static void
5265 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5266 {
5267 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5268 
5269 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5270 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5271 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5272 	    tgt->tag_id, tgt->state);
5273 }
5274 
5275 static void
5276 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5277 {
5278 
5279 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5280 	    req->index, req->index, req->state);
5281 	mpt_tgt_dump_tgt_state(mpt, req);
5282 }
5283 
5284 static int
5285 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5286     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5287 {
5288 	int dbg;
5289 	union ccb *ccb;
5290 	U16 status;
5291 
5292 	if (reply_frame == NULL) {
5293 		/*
5294 		 * Figure out what the state of the command is.
5295 		 */
5296 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5297 
5298 #ifdef	INVARIANTS
5299 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5300 		if (tgt->req) {
5301 			mpt_req_not_spcl(mpt, tgt->req,
5302 			    "turbo scsi_tgt_reply associated req", __LINE__);
5303 		}
5304 #endif
5305 		switch(tgt->state) {
5306 		case TGT_STATE_LOADED:
5307 			/*
5308 			 * This is a new command starting.
5309 			 */
5310 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5311 			break;
5312 		case TGT_STATE_MOVING_DATA:
5313 		{
5314 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5315 
5316 			ccb = tgt->ccb;
5317 			if (tgt->req == NULL) {
5318 				panic("mpt: turbo target reply with null "
5319 				    "associated request moving data");
5320 				/* NOTREACHED */
5321 			}
5322 			if (ccb == NULL) {
5323 				if (tgt->is_local == 0) {
5324 					panic("mpt: turbo target reply with "
5325 					    "null associated ccb moving data");
5326 					/* NOTREACHED */
5327 				}
5328 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5329 				    "TARGET_ASSIST local done\n");
5330 				TAILQ_REMOVE(&mpt->request_pending_list,
5331 				    tgt->req, links);
5332 				mpt_free_request(mpt, tgt->req);
5333 				tgt->req = NULL;
5334 				mpt_scsi_tgt_status(mpt, NULL, req,
5335 				    0, NULL);
5336 				return (TRUE);
5337 			}
5338 			tgt->ccb = NULL;
5339 			tgt->nxfers++;
5340 			mpt_req_untimeout(req, mpt_timeout, ccb);
5341 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5342 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5343 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5344 			/*
5345 			 * Free the Target Assist Request
5346 			 */
5347 			KASSERT(tgt->req->ccb == ccb,
5348 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5349 			    tgt->req->serno, tgt->req->ccb));
5350 			TAILQ_REMOVE(&mpt->request_pending_list,
5351 			    tgt->req, links);
5352 			mpt_free_request(mpt, tgt->req);
5353 			tgt->req = NULL;
5354 
5355 			/*
5356 			 * Do we need to send status now? That is, are
5357 			 * we done with all our data transfers?
5358 			 */
5359 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5360 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5361 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5362 				KASSERT(ccb->ccb_h.status,
5363 				    ("zero ccb sts at %d", __LINE__));
5364 				tgt->state = TGT_STATE_IN_CAM;
5365 				if (mpt->outofbeer) {
5366 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5367 					mpt->outofbeer = 0;
5368 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5369 				}
5370 				xpt_done(ccb);
5371 				break;
5372 			}
5373 			/*
5374 			 * Otherwise, send status (and sense)
5375 			 */
5376 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5377 				sp = sense;
5378 				memcpy(sp, &ccb->csio.sense_data,
5379 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5380 			}
5381 			mpt_scsi_tgt_status(mpt, ccb, req,
5382 			    ccb->csio.scsi_status, sp);
5383 			break;
5384 		}
5385 		case TGT_STATE_SENDING_STATUS:
5386 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5387 		{
5388 			int ioindex;
5389 			ccb = tgt->ccb;
5390 
5391 			if (tgt->req == NULL) {
5392 				panic("mpt: turbo target reply with null "
5393 				    "associated request sending status");
5394 				/* NOTREACHED */
5395 			}
5396 
5397 			if (ccb) {
5398 				tgt->ccb = NULL;
5399 				if (tgt->state ==
5400 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5401 					tgt->nxfers++;
5402 				}
5403 				mpt_req_untimeout(req, mpt_timeout, ccb);
5404 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5405 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5406 				}
5407 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5408 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5409 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5410 				    ccb->ccb_h.flags, tgt->req);
5411 				/*
5412 				 * Free the Target Send Status Request
5413 				 */
5414 				KASSERT(tgt->req->ccb == ccb,
5415 				    ("tgt->req %p:%u tgt->req->ccb %p",
5416 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5417 				/*
5418 				 * Notify CAM that we're done
5419 				 */
5420 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5421 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5422 				KASSERT(ccb->ccb_h.status,
5423 				    ("ZERO ccb sts at %d", __LINE__));
5424 				tgt->ccb = NULL;
5425 			} else {
5426 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5427 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5428 				    tgt->req, tgt->req->serno);
5429 			}
5430 			TAILQ_REMOVE(&mpt->request_pending_list,
5431 			    tgt->req, links);
5432 			mpt_free_request(mpt, tgt->req);
5433 			tgt->req = NULL;
5434 
5435 			/*
5436 			 * And re-post the Command Buffer.
5437 			 * This will reset the state.
5438 			 */
5439 			ioindex = GET_IO_INDEX(reply_desc);
5440 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5441 			tgt->is_local = 0;
5442 			mpt_post_target_command(mpt, req, ioindex);
5443 
5444 			/*
5445 			 * And post a done for anyone who cares
5446 			 */
5447 			if (ccb) {
5448 				if (mpt->outofbeer) {
5449 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5450 					mpt->outofbeer = 0;
5451 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5452 				}
5453 				xpt_done(ccb);
5454 			}
5455 			break;
5456 		}
5457 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5458 			tgt->state = TGT_STATE_LOADED;
5459 			break;
5460 		default:
5461 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5462 			    "Reply Function\n", tgt->state);
5463 		}
5464 		return (TRUE);
5465 	}
5466 
5467 	status = le16toh(reply_frame->IOCStatus);
5468 	if (status != MPI_IOCSTATUS_SUCCESS) {
5469 		dbg = MPT_PRT_ERROR;
5470 	} else {
5471 		dbg = MPT_PRT_DEBUG1;
5472 	}
5473 
5474 	mpt_lprt(mpt, dbg,
5475 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5476 	     req, req->serno, reply_frame, reply_frame->Function, status);
5477 
5478 	switch (reply_frame->Function) {
5479 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5480 	{
5481 		mpt_tgt_state_t *tgt;
5482 #ifdef	INVARIANTS
5483 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5484 #endif
5485 		if (status != MPI_IOCSTATUS_SUCCESS) {
5486 			/*
5487 			 * XXX What to do?
5488 			 */
5489 			break;
5490 		}
5491 		tgt = MPT_TGT_STATE(mpt, req);
5492 		KASSERT(tgt->state == TGT_STATE_LOADING,
5493 		    ("bad state 0x%x on reply to buffer post", tgt->state));
5494 		mpt_assign_serno(mpt, req);
5495 		tgt->state = TGT_STATE_LOADED;
5496 		break;
5497 	}
5498 	case MPI_FUNCTION_TARGET_ASSIST:
5499 #ifdef	INVARIANTS
5500 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5501 #endif
5502 		mpt_prt(mpt, "target assist completion\n");
5503 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5504 		mpt_free_request(mpt, req);
5505 		break;
5506 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5507 #ifdef	INVARIANTS
5508 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5509 #endif
5510 		mpt_prt(mpt, "status send completion\n");
5511 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5512 		mpt_free_request(mpt, req);
5513 		break;
5514 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5515 	{
5516 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5517 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5518 		PTR_MSG_TARGET_MODE_ABORT abtp =
5519 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5520 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5521 #ifdef	INVARIANTS
5522 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5523 #endif
5524 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5525 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5526 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5527 		mpt_free_request(mpt, req);
5528 		break;
5529 	}
5530 	default:
5531 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5532 		    "0x%x\n", reply_frame->Function);
5533 		break;
5534 	}
5535 	return (TRUE);
5536 }
5537