1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 * 96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.77 2011/04/22 09:59:16 marius Exp $ 97 */ 98 99 #include <dev/disk/mpt/mpt.h> 100 #include <dev/disk/mpt/mpt_cam.h> 101 #include <dev/disk/mpt/mpt_raid.h> 102 103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/disk/mpt/mpilib/mpi_init.h" 105 #include "dev/disk/mpt/mpilib/mpi_targ.h" 106 #include "dev/disk/mpt/mpilib/mpi_fc.h" 107 #include "dev/disk/mpt/mpilib/mpi_sas.h" 108 #include <sys/sysctl.h> 109 #include <sys/callout.h> 110 #include <sys/kthread.h> 111 112 #ifndef CAM_NEW_TRAN_CODE 113 #define CAM_NEW_TRAN_CODE 1 114 #endif 115 116 static void mpt_poll(struct cam_sim *); 117 static timeout_t mpt_timeout; 118 static void mpt_action(struct cam_sim *, union ccb *); 119 static int 120 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 121 static void mpt_setwidth(struct mpt_softc *, int, int); 122 static void mpt_setsync(struct mpt_softc *, int, int, int); 123 static int mpt_update_spi_config(struct mpt_softc *, int); 124 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 125 126 static mpt_reply_handler_t mpt_scsi_reply_handler; 127 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 128 static mpt_reply_handler_t mpt_fc_els_reply_handler; 129 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 130 MSG_DEFAULT_REPLY *); 131 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 132 static int mpt_fc_reset_link(struct mpt_softc *, int); 133 134 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 135 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 136 static void mpt_recovery_thread(void *arg); 137 static void mpt_recover_commands(struct mpt_softc *mpt); 138 139 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 140 u_int, u_int, u_int, int); 141 142 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 143 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 144 static int mpt_add_els_buffers(struct mpt_softc *mpt); 145 static int mpt_add_target_commands(struct mpt_softc *mpt); 146 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 147 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 148 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 149 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 150 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 151 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 152 uint8_t, uint8_t const *); 153 static void 154 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 155 tgt_resource_t *, int); 156 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 157 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 158 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 159 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 160 161 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 162 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 163 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 164 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 165 166 static mpt_probe_handler_t mpt_cam_probe; 167 static mpt_attach_handler_t mpt_cam_attach; 168 static mpt_enable_handler_t mpt_cam_enable; 169 static mpt_ready_handler_t mpt_cam_ready; 170 static mpt_event_handler_t mpt_cam_event; 171 static mpt_reset_handler_t mpt_cam_ioc_reset; 172 static mpt_detach_handler_t mpt_cam_detach; 173 174 static struct mpt_personality mpt_cam_personality = 175 { 176 .name = "mpt_cam", 177 .probe = mpt_cam_probe, 178 .attach = mpt_cam_attach, 179 .enable = mpt_cam_enable, 180 .ready = mpt_cam_ready, 181 .event = mpt_cam_event, 182 .reset = mpt_cam_ioc_reset, 183 .detach = mpt_cam_detach, 184 }; 185 186 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 187 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 188 189 int mpt_enable_sata_wc = -1; 190 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 191 192 int 193 mpt_cam_probe(struct mpt_softc *mpt) 194 { 195 int role; 196 197 /* 198 * Only attach to nodes that support the initiator or target role 199 * (or want to) or have RAID physical devices that need CAM pass-thru 200 * support. 201 */ 202 if (mpt->do_cfg_role) { 203 role = mpt->cfg_role; 204 } else { 205 role = mpt->role; 206 } 207 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 208 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 209 return (0); 210 } 211 return (ENODEV); 212 } 213 214 int 215 mpt_cam_attach(struct mpt_softc *mpt) 216 { 217 struct cam_devq *devq; 218 mpt_handler_t handler; 219 int maxq; 220 int error; 221 222 MPT_LOCK(mpt); 223 TAILQ_INIT(&mpt->request_timeout_list); 224 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 225 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 226 227 handler.reply_handler = mpt_scsi_reply_handler; 228 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 229 &scsi_io_handler_id); 230 if (error != 0) { 231 MPT_UNLOCK(mpt); 232 goto cleanup; 233 } 234 235 handler.reply_handler = mpt_scsi_tmf_reply_handler; 236 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 237 &scsi_tmf_handler_id); 238 if (error != 0) { 239 MPT_UNLOCK(mpt); 240 goto cleanup; 241 } 242 243 /* 244 * If we're fibre channel and could support target mode, we register 245 * an ELS reply handler and give it resources. 246 */ 247 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 248 handler.reply_handler = mpt_fc_els_reply_handler; 249 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 250 &fc_els_handler_id); 251 if (error != 0) { 252 MPT_UNLOCK(mpt); 253 goto cleanup; 254 } 255 if (mpt_add_els_buffers(mpt) == FALSE) { 256 error = ENOMEM; 257 MPT_UNLOCK(mpt); 258 goto cleanup; 259 } 260 maxq -= mpt->els_cmds_allocated; 261 } 262 263 /* 264 * If we support target mode, we register a reply handler for it, 265 * but don't add command resources until we actually enable target 266 * mode. 267 */ 268 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 269 handler.reply_handler = mpt_scsi_tgt_reply_handler; 270 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 271 &mpt->scsi_tgt_handler_id); 272 if (error != 0) { 273 MPT_UNLOCK(mpt); 274 goto cleanup; 275 } 276 } 277 278 if (mpt->is_sas) { 279 handler.reply_handler = mpt_sata_pass_reply_handler; 280 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 281 &sata_pass_handler_id); 282 if (error != 0) { 283 MPT_UNLOCK(mpt); 284 goto cleanup; 285 } 286 } 287 288 /* 289 * We keep one request reserved for timeout TMF requests. 290 */ 291 mpt->tmf_req = mpt_get_request(mpt, FALSE); 292 if (mpt->tmf_req == NULL) { 293 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 294 error = ENOMEM; 295 MPT_UNLOCK(mpt); 296 goto cleanup; 297 } 298 299 /* 300 * Mark the request as free even though not on the free list. 301 * There is only one TMF request allowed to be outstanding at 302 * a time and the TMF routines perform their own allocation 303 * tracking using the standard state flags. 304 */ 305 mpt->tmf_req->state = REQ_STATE_FREE; 306 maxq--; 307 308 /* 309 * The rest of this is CAM foo, for which we need to drop our lock 310 */ 311 MPT_UNLOCK(mpt); 312 313 if (mpt_spawn_recovery_thread(mpt) != 0) { 314 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 315 error = ENOMEM; 316 goto cleanup; 317 } 318 319 /* 320 * Create the device queue for our SIM(s). 321 */ 322 devq = cam_simq_alloc(maxq); 323 if (devq == NULL) { 324 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 325 error = ENOMEM; 326 goto cleanup; 327 } 328 329 /* 330 * Construct our SIM entry. 331 */ 332 mpt->sim = 333 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 334 if (mpt->sim == NULL) { 335 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 336 cam_devq_release(devq); 337 error = ENOMEM; 338 goto cleanup; 339 } 340 341 /* 342 * Register exactly this bus. 343 */ 344 MPT_LOCK(mpt); 345 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 346 mpt_prt(mpt, "Bus registration Failed!\n"); 347 error = ENOMEM; 348 MPT_UNLOCK(mpt); 349 goto cleanup; 350 } 351 352 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 353 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 354 mpt_prt(mpt, "Unable to allocate Path!\n"); 355 error = ENOMEM; 356 MPT_UNLOCK(mpt); 357 goto cleanup; 358 } 359 MPT_UNLOCK(mpt); 360 361 /* 362 * Only register a second bus for RAID physical 363 * devices if the controller supports RAID. 364 */ 365 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 366 return (0); 367 } 368 369 /* 370 * Create a "bus" to export all hidden disks to CAM. 371 */ 372 mpt->phydisk_sim = 373 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 374 if (mpt->phydisk_sim == NULL) { 375 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 376 error = ENOMEM; 377 goto cleanup; 378 } 379 380 /* 381 * Register this bus. 382 */ 383 MPT_LOCK(mpt); 384 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 385 CAM_SUCCESS) { 386 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 387 error = ENOMEM; 388 MPT_UNLOCK(mpt); 389 goto cleanup; 390 } 391 392 if (xpt_create_path(&mpt->phydisk_path, NULL, 393 cam_sim_path(mpt->phydisk_sim), 394 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 395 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 396 error = ENOMEM; 397 MPT_UNLOCK(mpt); 398 goto cleanup; 399 } 400 MPT_UNLOCK(mpt); 401 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 402 return (0); 403 404 cleanup: 405 mpt_cam_detach(mpt); 406 return (error); 407 } 408 409 /* 410 * Read FC configuration information 411 */ 412 static int 413 mpt_read_config_info_fc(struct mpt_softc *mpt) 414 { 415 char *topology = NULL; 416 int rv; 417 418 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 419 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 420 if (rv) { 421 return (-1); 422 } 423 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 424 mpt->mpt_fcport_page0.Header.PageVersion, 425 mpt->mpt_fcport_page0.Header.PageLength, 426 mpt->mpt_fcport_page0.Header.PageNumber, 427 mpt->mpt_fcport_page0.Header.PageType); 428 429 430 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 431 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 432 if (rv) { 433 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 434 return (-1); 435 } 436 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 437 438 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 439 440 switch (mpt->mpt_fcport_page0.Flags & 441 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 442 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 443 mpt->mpt_fcport_speed = 0; 444 topology = "<NO LOOP>"; 445 break; 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 447 topology = "N-Port"; 448 break; 449 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 450 topology = "NL-Port"; 451 break; 452 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 453 topology = "F-Port"; 454 break; 455 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 456 topology = "FL-Port"; 457 break; 458 default: 459 mpt->mpt_fcport_speed = 0; 460 topology = "?"; 461 break; 462 } 463 464 mpt_lprt(mpt, MPT_PRT_INFO, 465 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 466 "Speed %u-Gbit\n", topology, 467 mpt->mpt_fcport_page0.WWNN.High, 468 mpt->mpt_fcport_page0.WWNN.Low, 469 mpt->mpt_fcport_page0.WWPN.High, 470 mpt->mpt_fcport_page0.WWPN.Low, 471 mpt->mpt_fcport_speed); 472 MPT_UNLOCK(mpt); 473 { 474 ksnprintf(mpt->scinfo.fc.wwnn, 475 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 476 mpt->mpt_fcport_page0.WWNN.High, 477 mpt->mpt_fcport_page0.WWNN.Low); 478 479 ksnprintf(mpt->scinfo.fc.wwpn, 480 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 481 mpt->mpt_fcport_page0.WWPN.High, 482 mpt->mpt_fcport_page0.WWPN.Low); 483 484 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx, 485 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO, 486 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 487 "World Wide Node Name"); 488 489 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx, 490 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO, 491 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 492 "World Wide Port Name"); 493 494 } 495 MPT_LOCK(mpt); 496 return (0); 497 } 498 499 /* 500 * Set FC configuration information. 501 */ 502 static int 503 mpt_set_initial_config_fc(struct mpt_softc *mpt) 504 { 505 506 CONFIG_PAGE_FC_PORT_1 fc; 507 U32 fl; 508 int r, doit = 0; 509 int role; 510 511 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 512 &fc.Header, FALSE, 5000); 513 if (r) { 514 mpt_prt(mpt, "failed to read FC page 1 header\n"); 515 return (mpt_fc_reset_link(mpt, 1)); 516 } 517 518 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 519 &fc.Header, sizeof (fc), FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 mpt2host_config_page_fc_port_1(&fc); 525 526 /* 527 * Check our flags to make sure we support the role we want. 528 */ 529 doit = 0; 530 role = 0; 531 fl = fc.Flags; 532 533 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 534 role |= MPT_ROLE_INITIATOR; 535 } 536 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 537 role |= MPT_ROLE_TARGET; 538 } 539 540 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 541 542 if (mpt->do_cfg_role == 0) { 543 role = mpt->cfg_role; 544 } else { 545 mpt->do_cfg_role = 0; 546 } 547 548 if (role != mpt->cfg_role) { 549 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 550 if ((role & MPT_ROLE_INITIATOR) == 0) { 551 mpt_prt(mpt, "adding initiator role\n"); 552 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 553 doit++; 554 } else { 555 mpt_prt(mpt, "keeping initiator role\n"); 556 } 557 } else if (role & MPT_ROLE_INITIATOR) { 558 mpt_prt(mpt, "removing initiator role\n"); 559 doit++; 560 } 561 if (mpt->cfg_role & MPT_ROLE_TARGET) { 562 if ((role & MPT_ROLE_TARGET) == 0) { 563 mpt_prt(mpt, "adding target role\n"); 564 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 565 doit++; 566 } else { 567 mpt_prt(mpt, "keeping target role\n"); 568 } 569 } else if (role & MPT_ROLE_TARGET) { 570 mpt_prt(mpt, "removing target role\n"); 571 doit++; 572 } 573 mpt->role = mpt->cfg_role; 574 } 575 576 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 577 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 578 mpt_prt(mpt, "adding OXID option\n"); 579 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 580 doit++; 581 } 582 } 583 584 if (doit) { 585 fc.Flags = fl; 586 host2mpt_config_page_fc_port_1(&fc); 587 r = mpt_write_cfg_page(mpt, 588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 589 sizeof(fc), FALSE, 5000); 590 if (r != 0) { 591 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 592 return (0); 593 } 594 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 595 "effect until next reboot or IOC reset\n"); 596 } 597 return (0); 598 } 599 600 static int 601 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 602 { 603 ConfigExtendedPageHeader_t hdr; 604 struct mptsas_phyinfo *phyinfo; 605 SasIOUnitPage0_t *buffer; 606 int error, len, i; 607 608 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 610 &hdr, 0, 10000); 611 if (error) 612 goto out; 613 if (hdr.ExtPageLength == 0) { 614 error = ENXIO; 615 goto out; 616 } 617 618 len = hdr.ExtPageLength * 4; 619 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 620 if (buffer == NULL) { 621 error = ENOMEM; 622 goto out; 623 } 624 625 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 626 0, &hdr, buffer, len, 0, 10000); 627 if (error) { 628 kfree(buffer, M_DEVBUF); 629 goto out; 630 } 631 632 portinfo->num_phys = buffer->NumPhys; 633 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) * 634 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 635 if (portinfo->phy_info == NULL) { 636 kfree(buffer, M_DEVBUF); 637 error = ENOMEM; 638 goto out; 639 } 640 641 for (i = 0; i < portinfo->num_phys; i++) { 642 phyinfo = &portinfo->phy_info[i]; 643 phyinfo->phy_num = i; 644 phyinfo->port_id = buffer->PhyData[i].Port; 645 phyinfo->negotiated_link_rate = 646 buffer->PhyData[i].NegotiatedLinkRate; 647 phyinfo->handle = 648 le16toh(buffer->PhyData[i].ControllerDevHandle); 649 } 650 651 kfree(buffer, M_DEVBUF); 652 out: 653 return (error); 654 } 655 656 static int 657 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 658 uint32_t form, uint32_t form_specific) 659 { 660 ConfigExtendedPageHeader_t hdr; 661 SasPhyPage0_t *buffer; 662 int error; 663 664 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 666 0, 10000); 667 if (error) 668 goto out; 669 if (hdr.ExtPageLength == 0) { 670 error = ENXIO; 671 goto out; 672 } 673 674 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 675 if (buffer == NULL) { 676 error = ENOMEM; 677 goto out; 678 } 679 680 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 681 form + form_specific, &hdr, buffer, 682 sizeof(SasPhyPage0_t), 0, 10000); 683 if (error) { 684 kfree(buffer, M_DEVBUF); 685 goto out; 686 } 687 688 phy_info->hw_link_rate = buffer->HwLinkRate; 689 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 690 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 691 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 692 693 kfree(buffer, M_DEVBUF); 694 out: 695 return (error); 696 } 697 698 static int 699 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 700 uint32_t form, uint32_t form_specific) 701 { 702 ConfigExtendedPageHeader_t hdr; 703 SasDevicePage0_t *buffer; 704 uint64_t sas_address; 705 int error = 0; 706 707 bzero(device_info, sizeof(*device_info)); 708 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 710 &hdr, 0, 10000); 711 if (error) 712 goto out; 713 if (hdr.ExtPageLength == 0) { 714 error = ENXIO; 715 goto out; 716 } 717 718 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 719 if (buffer == NULL) { 720 error = ENOMEM; 721 goto out; 722 } 723 724 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 725 form + form_specific, &hdr, buffer, 726 sizeof(SasDevicePage0_t), 0, 10000); 727 if (error) { 728 kfree(buffer, M_DEVBUF); 729 goto out; 730 } 731 732 device_info->dev_handle = le16toh(buffer->DevHandle); 733 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 734 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 735 device_info->slot = le16toh(buffer->Slot); 736 device_info->phy_num = buffer->PhyNum; 737 device_info->physical_port = buffer->PhysicalPort; 738 device_info->target_id = buffer->TargetID; 739 device_info->bus = buffer->Bus; 740 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 741 device_info->sas_address = le64toh(sas_address); 742 device_info->device_info = le32toh(buffer->DeviceInfo); 743 744 kfree(buffer, M_DEVBUF); 745 out: 746 return (error); 747 } 748 749 /* 750 * Read SAS configuration information. Nothing to do yet. 751 */ 752 static int 753 mpt_read_config_info_sas(struct mpt_softc *mpt) 754 { 755 struct mptsas_portinfo *portinfo; 756 struct mptsas_phyinfo *phyinfo; 757 int error, i; 758 759 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 760 if (portinfo == NULL) 761 return (ENOMEM); 762 763 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 764 if (error) { 765 kfree(portinfo, M_DEVBUF); 766 return (0); 767 } 768 769 for (i = 0; i < portinfo->num_phys; i++) { 770 phyinfo = &portinfo->phy_info[i]; 771 error = mptsas_sas_phy_pg0(mpt, phyinfo, 772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 773 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 774 if (error) 775 break; 776 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 779 phyinfo->handle); 780 if (error) 781 break; 782 phyinfo->identify.phy_num = phyinfo->phy_num = i; 783 if (phyinfo->attached.dev_handle) 784 error = mptsas_sas_device_pg0(mpt, 785 &phyinfo->attached, 786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 788 phyinfo->attached.dev_handle); 789 if (error) 790 break; 791 } 792 mpt->sas_portinfo = portinfo; 793 return (0); 794 } 795 796 static void 797 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 798 int enabled) 799 { 800 SataPassthroughRequest_t *pass; 801 request_t *req; 802 int error, status; 803 804 req = mpt_get_request(mpt, 0); 805 if (req == NULL) 806 return; 807 808 pass = req->req_vbuf; 809 bzero(pass, sizeof(SataPassthroughRequest_t)); 810 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 811 pass->TargetID = devinfo->target_id; 812 pass->Bus = devinfo->bus; 813 pass->PassthroughFlags = 0; 814 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 815 pass->DataLength = 0; 816 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 817 pass->CommandFIS[0] = 0x27; 818 pass->CommandFIS[1] = 0x80; 819 pass->CommandFIS[2] = 0xef; 820 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 821 pass->CommandFIS[7] = 0x40; 822 pass->CommandFIS[15] = 0x08; 823 824 mpt_check_doorbell(mpt); 825 mpt_send_cmd(mpt, req); 826 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 827 10 * 1000); 828 if (error) { 829 mpt_free_request(mpt, req); 830 kprintf("error %d sending passthrough\n", error); 831 return; 832 } 833 834 status = le16toh(req->IOCStatus); 835 if (status != MPI_IOCSTATUS_SUCCESS) { 836 mpt_free_request(mpt, req); 837 kprintf("IOCSTATUS %d\n", status); 838 return; 839 } 840 841 mpt_free_request(mpt, req); 842 } 843 844 /* 845 * Set SAS configuration information. Nothing to do yet. 846 */ 847 static int 848 mpt_set_initial_config_sas(struct mpt_softc *mpt) 849 { 850 struct mptsas_phyinfo *phyinfo; 851 int i; 852 853 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 854 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 855 phyinfo = &mpt->sas_portinfo->phy_info[i]; 856 if (phyinfo->attached.dev_handle == 0) 857 continue; 858 if ((phyinfo->attached.device_info & 859 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 860 continue; 861 if (bootverbose) 862 device_printf(mpt->dev, 863 "%sabling SATA WC on phy %d\n", 864 (mpt_enable_sata_wc) ? "En" : "Dis", i); 865 mptsas_set_sata_wc(mpt, &phyinfo->attached, 866 mpt_enable_sata_wc); 867 } 868 } 869 870 return (0); 871 } 872 873 static int 874 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 876 { 877 if (req != NULL) { 878 879 if (reply_frame != NULL) { 880 req->IOCStatus = le16toh(reply_frame->IOCStatus); 881 } 882 req->state &= ~REQ_STATE_QUEUED; 883 req->state |= REQ_STATE_DONE; 884 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 885 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 886 wakeup(req); 887 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 888 /* 889 * Whew- we can free this request (late completion) 890 */ 891 mpt_free_request(mpt, req); 892 } 893 } 894 895 return (TRUE); 896 } 897 898 /* 899 * Read SCSI configuration information 900 */ 901 static int 902 mpt_read_config_info_spi(struct mpt_softc *mpt) 903 { 904 int rv, i; 905 906 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 907 &mpt->mpt_port_page0.Header, FALSE, 5000); 908 if (rv) { 909 return (-1); 910 } 911 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 912 mpt->mpt_port_page0.Header.PageVersion, 913 mpt->mpt_port_page0.Header.PageLength, 914 mpt->mpt_port_page0.Header.PageNumber, 915 mpt->mpt_port_page0.Header.PageType); 916 917 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 918 &mpt->mpt_port_page1.Header, FALSE, 5000); 919 if (rv) { 920 return (-1); 921 } 922 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 923 mpt->mpt_port_page1.Header.PageVersion, 924 mpt->mpt_port_page1.Header.PageLength, 925 mpt->mpt_port_page1.Header.PageNumber, 926 mpt->mpt_port_page1.Header.PageType); 927 928 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 929 &mpt->mpt_port_page2.Header, FALSE, 5000); 930 if (rv) { 931 return (-1); 932 } 933 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 934 mpt->mpt_port_page2.Header.PageVersion, 935 mpt->mpt_port_page2.Header.PageLength, 936 mpt->mpt_port_page2.Header.PageNumber, 937 mpt->mpt_port_page2.Header.PageType); 938 939 for (i = 0; i < 16; i++) { 940 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 941 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 942 if (rv) { 943 return (-1); 944 } 945 mpt_lprt(mpt, MPT_PRT_DEBUG, 946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 947 mpt->mpt_dev_page0[i].Header.PageVersion, 948 mpt->mpt_dev_page0[i].Header.PageLength, 949 mpt->mpt_dev_page0[i].Header.PageNumber, 950 mpt->mpt_dev_page0[i].Header.PageType); 951 952 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 953 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 954 if (rv) { 955 return (-1); 956 } 957 mpt_lprt(mpt, MPT_PRT_DEBUG, 958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 959 mpt->mpt_dev_page1[i].Header.PageVersion, 960 mpt->mpt_dev_page1[i].Header.PageLength, 961 mpt->mpt_dev_page1[i].Header.PageNumber, 962 mpt->mpt_dev_page1[i].Header.PageType); 963 } 964 965 /* 966 * At this point, we don't *have* to fail. As long as we have 967 * valid config header information, we can (barely) lurch 968 * along. 969 */ 970 971 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 972 sizeof(mpt->mpt_port_page0), FALSE, 5000); 973 if (rv) { 974 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 975 } else { 976 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 977 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 979 mpt->mpt_port_page0.Capabilities, 980 mpt->mpt_port_page0.PhysicalInterface); 981 } 982 983 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 984 sizeof(mpt->mpt_port_page1), FALSE, 5000); 985 if (rv) { 986 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 987 } else { 988 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 989 mpt_lprt(mpt, MPT_PRT_DEBUG, 990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 991 mpt->mpt_port_page1.Configuration, 992 mpt->mpt_port_page1.OnBusTimerValue); 993 } 994 995 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 996 sizeof(mpt->mpt_port_page2), FALSE, 5000); 997 if (rv) { 998 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 999 } else { 1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1001 "Port Page 2: Flags %x Settings %x\n", 1002 mpt->mpt_port_page2.PortFlags, 1003 mpt->mpt_port_page2.PortSettings); 1004 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1005 for (i = 0; i < 16; i++) { 1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1008 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1009 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1010 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1011 } 1012 } 1013 1014 for (i = 0; i < 16; i++) { 1015 rv = mpt_read_cur_cfg_page(mpt, i, 1016 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1017 FALSE, 5000); 1018 if (rv) { 1019 mpt_prt(mpt, 1020 "cannot read SPI Target %d Device Page 0\n", i); 1021 continue; 1022 } 1023 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1024 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1025 "target %d page 0: Negotiated Params %x Information %x\n", 1026 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1027 mpt->mpt_dev_page0[i].Information); 1028 1029 rv = mpt_read_cur_cfg_page(mpt, i, 1030 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1031 FALSE, 5000); 1032 if (rv) { 1033 mpt_prt(mpt, 1034 "cannot read SPI Target %d Device Page 1\n", i); 1035 continue; 1036 } 1037 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1038 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1039 "target %d page 1: Requested Params %x Configuration %x\n", 1040 i, mpt->mpt_dev_page1[i].RequestedParameters, 1041 mpt->mpt_dev_page1[i].Configuration); 1042 } 1043 return (0); 1044 } 1045 1046 /* 1047 * Validate SPI configuration information. 1048 * 1049 * In particular, validate SPI Port Page 1. 1050 */ 1051 static int 1052 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1053 { 1054 int error, i, pp1val; 1055 1056 mpt->mpt_disc_enable = 0xff; 1057 mpt->mpt_tag_enable = 0; 1058 1059 pp1val = ((1 << mpt->mpt_ini_id) << 1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1061 if (mpt->mpt_port_page1.Configuration != pp1val) { 1062 CONFIG_PAGE_SCSI_PORT_1 tmp; 1063 1064 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1065 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1066 tmp = mpt->mpt_port_page1; 1067 tmp.Configuration = pp1val; 1068 host2mpt_config_page_scsi_port_1(&tmp); 1069 error = mpt_write_cur_cfg_page(mpt, 0, 1070 &tmp.Header, sizeof(tmp), FALSE, 5000); 1071 if (error) { 1072 return (-1); 1073 } 1074 error = mpt_read_cur_cfg_page(mpt, 0, 1075 &tmp.Header, sizeof(tmp), FALSE, 5000); 1076 if (error) { 1077 return (-1); 1078 } 1079 mpt2host_config_page_scsi_port_1(&tmp); 1080 if (tmp.Configuration != pp1val) { 1081 mpt_prt(mpt, 1082 "failed to reset SPI Port Page 1 Config value\n"); 1083 return (-1); 1084 } 1085 mpt->mpt_port_page1 = tmp; 1086 } 1087 1088 /* 1089 * The purpose of this exercise is to get 1090 * all targets back to async/narrow. 1091 * 1092 * We skip this step if the BIOS has already negotiated 1093 * speeds with the targets. 1094 */ 1095 i = mpt->mpt_port_page2.PortSettings & 1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1097 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1098 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1099 "honoring BIOS transfer negotiations\n"); 1100 } else { 1101 for (i = 0; i < 16; i++) { 1102 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1103 mpt->mpt_dev_page1[i].Configuration = 0; 1104 (void) mpt_update_spi_config(mpt, i); 1105 } 1106 } 1107 return (0); 1108 } 1109 1110 int 1111 mpt_cam_enable(struct mpt_softc *mpt) 1112 { 1113 int error; 1114 1115 MPT_LOCK(mpt); 1116 1117 error = EIO; 1118 if (mpt->is_fc) { 1119 if (mpt_read_config_info_fc(mpt)) { 1120 goto out; 1121 } 1122 if (mpt_set_initial_config_fc(mpt)) { 1123 goto out; 1124 } 1125 } else if (mpt->is_sas) { 1126 if (mpt_read_config_info_sas(mpt)) { 1127 goto out; 1128 } 1129 if (mpt_set_initial_config_sas(mpt)) { 1130 goto out; 1131 } 1132 } else if (mpt->is_spi) { 1133 if (mpt_read_config_info_spi(mpt)) { 1134 goto out; 1135 } 1136 if (mpt_set_initial_config_spi(mpt)) { 1137 goto out; 1138 } 1139 } 1140 error = 0; 1141 1142 out: 1143 MPT_UNLOCK(mpt); 1144 return (error); 1145 } 1146 1147 void 1148 mpt_cam_ready(struct mpt_softc *mpt) 1149 { 1150 /* 1151 * If we're in target mode, hang out resources now 1152 * so we don't cause the world to hang talking to us. 1153 */ 1154 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1155 /* 1156 * Try to add some target command resources 1157 */ 1158 MPT_LOCK(mpt); 1159 if (mpt_add_target_commands(mpt) == FALSE) { 1160 mpt_prt(mpt, "failed to add target commands\n"); 1161 } 1162 MPT_UNLOCK(mpt); 1163 } 1164 mpt->ready = 1; 1165 } 1166 1167 void 1168 mpt_cam_detach(struct mpt_softc *mpt) 1169 { 1170 mpt_handler_t handler; 1171 1172 MPT_LOCK(mpt); 1173 mpt->ready = 0; 1174 mpt_terminate_recovery_thread(mpt); 1175 1176 handler.reply_handler = mpt_scsi_reply_handler; 1177 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1178 scsi_io_handler_id); 1179 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1180 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1181 scsi_tmf_handler_id); 1182 handler.reply_handler = mpt_fc_els_reply_handler; 1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1184 fc_els_handler_id); 1185 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1187 mpt->scsi_tgt_handler_id); 1188 handler.reply_handler = mpt_sata_pass_reply_handler; 1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1190 sata_pass_handler_id); 1191 1192 if (mpt->tmf_req != NULL) { 1193 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1194 mpt_free_request(mpt, mpt->tmf_req); 1195 mpt->tmf_req = NULL; 1196 } 1197 if (mpt->sas_portinfo != NULL) { 1198 kfree(mpt->sas_portinfo, M_DEVBUF); 1199 mpt->sas_portinfo = NULL; 1200 } 1201 1202 if (mpt->sim != NULL) { 1203 xpt_free_path(mpt->path); 1204 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1205 cam_sim_free(mpt->sim); 1206 mpt->sim = NULL; 1207 } 1208 1209 if (mpt->phydisk_sim != NULL) { 1210 xpt_free_path(mpt->phydisk_path); 1211 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1212 cam_sim_free(mpt->phydisk_sim); 1213 mpt->phydisk_sim = NULL; 1214 } 1215 MPT_UNLOCK(mpt); 1216 } 1217 1218 /* This routine is used after a system crash to dump core onto the swap device. 1219 */ 1220 static void 1221 mpt_poll(struct cam_sim *sim) 1222 { 1223 struct mpt_softc *mpt; 1224 1225 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1226 mpt_intr(mpt); 1227 } 1228 1229 /* 1230 * Watchdog timeout routine for SCSI requests. 1231 */ 1232 static void 1233 mpt_timeout(void *arg) 1234 { 1235 union ccb *ccb; 1236 struct mpt_softc *mpt; 1237 request_t *req; 1238 1239 ccb = (union ccb *)arg; 1240 mpt = ccb->ccb_h.ccb_mpt_ptr; 1241 1242 MPT_LOCK_ASSERT(mpt); 1243 req = ccb->ccb_h.ccb_req_ptr; 1244 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1245 req->serno, ccb, req->ccb); 1246 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1247 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1248 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1249 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1250 req->state |= REQ_STATE_TIMEDOUT; 1251 mpt_wakeup_recovery_thread(mpt); 1252 } 1253 } 1254 1255 /* 1256 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1257 * 1258 * Takes a list of physical segments and builds the SGL for SCSI IO command 1259 * and forwards the commard to the IOC after one last check that CAM has not 1260 * aborted the transaction. 1261 */ 1262 static void 1263 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1264 { 1265 request_t *req, *trq; 1266 char *mpt_off; 1267 union ccb *ccb; 1268 struct mpt_softc *mpt; 1269 int seg, first_lim; 1270 uint32_t flags, nxt_off; 1271 void *sglp = NULL; 1272 MSG_REQUEST_HEADER *hdrp; 1273 SGE_SIMPLE64 *se; 1274 SGE_CHAIN64 *ce; 1275 int istgt = 0; 1276 1277 req = (request_t *)arg; 1278 ccb = req->ccb; 1279 1280 mpt = ccb->ccb_h.ccb_mpt_ptr; 1281 req = ccb->ccb_h.ccb_req_ptr; 1282 1283 hdrp = req->req_vbuf; 1284 mpt_off = req->req_vbuf; 1285 1286 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1287 error = EFBIG; 1288 } 1289 1290 if (error == 0) { 1291 switch (hdrp->Function) { 1292 case MPI_FUNCTION_SCSI_IO_REQUEST: 1293 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1294 istgt = 0; 1295 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1296 break; 1297 case MPI_FUNCTION_TARGET_ASSIST: 1298 istgt = 1; 1299 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1300 break; 1301 default: 1302 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1303 hdrp->Function); 1304 error = EINVAL; 1305 break; 1306 } 1307 } 1308 1309 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1310 error = EFBIG; 1311 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1312 nseg, mpt->max_seg_cnt); 1313 } 1314 1315 bad: 1316 if (error != 0) { 1317 if (error != EFBIG && error != ENOMEM) { 1318 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1319 } 1320 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1321 cam_status status; 1322 mpt_freeze_ccb(ccb); 1323 if (error == EFBIG) { 1324 status = CAM_REQ_TOO_BIG; 1325 } else if (error == ENOMEM) { 1326 if (mpt->outofbeer == 0) { 1327 mpt->outofbeer = 1; 1328 xpt_freeze_simq(mpt->sim, 1); 1329 mpt_lprt(mpt, MPT_PRT_DEBUG, 1330 "FREEZEQ\n"); 1331 } 1332 status = CAM_REQUEUE_REQ; 1333 } else { 1334 status = CAM_REQ_CMP_ERR; 1335 } 1336 mpt_set_ccb_status(ccb, status); 1337 } 1338 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1339 request_t *cmd_req = 1340 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1341 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1342 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1343 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1344 } 1345 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1346 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1347 xpt_done(ccb); 1348 CAMLOCK_2_MPTLOCK(mpt); 1349 mpt_free_request(mpt, req); 1350 MPTLOCK_2_CAMLOCK(mpt); 1351 return; 1352 } 1353 1354 /* 1355 * No data to transfer? 1356 * Just make a single simple SGL with zero length. 1357 */ 1358 1359 if (mpt->verbose >= MPT_PRT_DEBUG) { 1360 int tidx = ((char *)sglp) - mpt_off; 1361 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1362 } 1363 1364 if (nseg == 0) { 1365 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1366 MPI_pSGE_SET_FLAGS(se1, 1367 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1368 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1369 se1->FlagsLength = htole32(se1->FlagsLength); 1370 goto out; 1371 } 1372 1373 1374 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1375 if (istgt == 0) { 1376 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1377 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1378 } 1379 } else { 1380 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1381 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1382 } 1383 } 1384 1385 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1386 bus_dmasync_op_t op; 1387 if (istgt == 0) { 1388 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1389 op = BUS_DMASYNC_PREREAD; 1390 } else { 1391 op = BUS_DMASYNC_PREWRITE; 1392 } 1393 } else { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 op = BUS_DMASYNC_PREWRITE; 1396 } else { 1397 op = BUS_DMASYNC_PREREAD; 1398 } 1399 } 1400 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1401 } 1402 1403 /* 1404 * Okay, fill in what we can at the end of the command frame. 1405 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1406 * the command frame. 1407 * 1408 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1409 * SIMPLE64 pointers and start doing CHAIN64 entries after 1410 * that. 1411 */ 1412 1413 if (nseg < MPT_NSGL_FIRST(mpt)) { 1414 first_lim = nseg; 1415 } else { 1416 /* 1417 * Leave room for CHAIN element 1418 */ 1419 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1420 } 1421 1422 se = (SGE_SIMPLE64 *) sglp; 1423 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1424 uint32_t tf; 1425 1426 memset(se, 0, sizeof (*se)); 1427 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1428 if (sizeof(bus_addr_t) > 4) { 1429 se->Address.High = 1430 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1431 } 1432 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1433 tf = flags; 1434 if (seg == first_lim - 1) { 1435 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1436 } 1437 if (seg == nseg - 1) { 1438 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1439 MPI_SGE_FLAGS_END_OF_BUFFER; 1440 } 1441 MPI_pSGE_SET_FLAGS(se, tf); 1442 se->FlagsLength = htole32(se->FlagsLength); 1443 } 1444 1445 if (seg == nseg) { 1446 goto out; 1447 } 1448 1449 /* 1450 * Tell the IOC where to find the first chain element. 1451 */ 1452 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1453 nxt_off = MPT_RQSL(mpt); 1454 trq = req; 1455 1456 /* 1457 * Make up the rest of the data segments out of a chain element 1458 * (contiained in the current request frame) which points to 1459 * SIMPLE64 elements in the next request frame, possibly ending 1460 * with *another* chain element (if there's more). 1461 */ 1462 while (seg < nseg) { 1463 int this_seg_lim; 1464 uint32_t tf, cur_off; 1465 bus_addr_t chain_list_addr; 1466 1467 /* 1468 * Point to the chain descriptor. Note that the chain 1469 * descriptor is at the end of the *previous* list (whether 1470 * chain or simple). 1471 */ 1472 ce = (SGE_CHAIN64 *) se; 1473 1474 /* 1475 * Before we change our current pointer, make sure we won't 1476 * overflow the request area with this frame. Note that we 1477 * test against 'greater than' here as it's okay in this case 1478 * to have next offset be just outside the request area. 1479 */ 1480 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1481 nxt_off = MPT_REQUEST_AREA; 1482 goto next_chain; 1483 } 1484 1485 /* 1486 * Set our SGE element pointer to the beginning of the chain 1487 * list and update our next chain list offset. 1488 */ 1489 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1490 cur_off = nxt_off; 1491 nxt_off += MPT_RQSL(mpt); 1492 1493 /* 1494 * Now initialized the chain descriptor. 1495 */ 1496 memset(ce, 0, sizeof (*ce)); 1497 1498 /* 1499 * Get the physical address of the chain list. 1500 */ 1501 chain_list_addr = trq->req_pbuf; 1502 chain_list_addr += cur_off; 1503 if (sizeof (bus_addr_t) > 4) { 1504 ce->Address.High = 1505 htole32(((uint64_t)chain_list_addr) >> 32); 1506 } 1507 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1508 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1509 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1510 1511 /* 1512 * If we have more than a frame's worth of segments left, 1513 * set up the chain list to have the last element be another 1514 * chain descriptor. 1515 */ 1516 if ((nseg - seg) > MPT_NSGL(mpt)) { 1517 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1518 /* 1519 * The length of the chain is the length in bytes of the 1520 * number of segments plus the next chain element. 1521 * 1522 * The next chain descriptor offset is the length, 1523 * in words, of the number of segments. 1524 */ 1525 ce->Length = (this_seg_lim - seg) * 1526 sizeof (SGE_SIMPLE64); 1527 ce->NextChainOffset = ce->Length >> 2; 1528 ce->Length += sizeof (SGE_CHAIN64); 1529 } else { 1530 this_seg_lim = nseg; 1531 ce->Length = (this_seg_lim - seg) * 1532 sizeof (SGE_SIMPLE64); 1533 } 1534 ce->Length = htole16(ce->Length); 1535 1536 /* 1537 * Fill in the chain list SGE elements with our segment data. 1538 * 1539 * If we're the last element in this chain list, set the last 1540 * element flag. If we're the completely last element period, 1541 * set the end of list and end of buffer flags. 1542 */ 1543 while (seg < this_seg_lim) { 1544 memset(se, 0, sizeof (*se)); 1545 se->Address.Low = htole32(dm_segs->ds_addr & 1546 0xffffffff); 1547 if (sizeof (bus_addr_t) > 4) { 1548 se->Address.High = 1549 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1550 } 1551 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1552 tf = flags; 1553 if (seg == this_seg_lim - 1) { 1554 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1555 } 1556 if (seg == nseg - 1) { 1557 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1558 MPI_SGE_FLAGS_END_OF_BUFFER; 1559 } 1560 MPI_pSGE_SET_FLAGS(se, tf); 1561 se->FlagsLength = htole32(se->FlagsLength); 1562 se++; 1563 seg++; 1564 dm_segs++; 1565 } 1566 1567 next_chain: 1568 /* 1569 * If we have more segments to do and we've used up all of 1570 * the space in a request area, go allocate another one 1571 * and chain to that. 1572 */ 1573 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1574 request_t *nrq; 1575 1576 CAMLOCK_2_MPTLOCK(mpt); 1577 nrq = mpt_get_request(mpt, FALSE); 1578 MPTLOCK_2_CAMLOCK(mpt); 1579 1580 if (nrq == NULL) { 1581 error = ENOMEM; 1582 goto bad; 1583 } 1584 1585 /* 1586 * Append the new request area on the tail of our list. 1587 */ 1588 if ((trq = req->chain) == NULL) { 1589 req->chain = nrq; 1590 } else { 1591 while (trq->chain != NULL) { 1592 trq = trq->chain; 1593 } 1594 trq->chain = nrq; 1595 } 1596 trq = nrq; 1597 mpt_off = trq->req_vbuf; 1598 if (mpt->verbose >= MPT_PRT_DEBUG) { 1599 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1600 } 1601 nxt_off = 0; 1602 } 1603 } 1604 out: 1605 1606 /* 1607 * Last time we need to check if this CCB needs to be aborted. 1608 */ 1609 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1610 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1611 request_t *cmd_req = 1612 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1613 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1614 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1615 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1616 } 1617 mpt_prt(mpt, 1618 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1619 ccb->ccb_h.status & CAM_STATUS_MASK); 1620 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1621 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1622 } 1623 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1624 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1625 xpt_done(ccb); 1626 CAMLOCK_2_MPTLOCK(mpt); 1627 mpt_free_request(mpt, req); 1628 MPTLOCK_2_CAMLOCK(mpt); 1629 return; 1630 } 1631 1632 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1633 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1634 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1635 mpt_timeout, ccb); 1636 } 1637 if (mpt->verbose > MPT_PRT_DEBUG) { 1638 int nc = 0; 1639 mpt_print_request(req->req_vbuf); 1640 for (trq = req->chain; trq; trq = trq->chain) { 1641 kprintf(" Additional Chain Area %d\n", nc++); 1642 mpt_dump_sgl(trq->req_vbuf, 0); 1643 } 1644 } 1645 1646 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1647 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1648 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1649 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1650 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1651 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1652 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1653 } else { 1654 tgt->state = TGT_STATE_MOVING_DATA; 1655 } 1656 #else 1657 tgt->state = TGT_STATE_MOVING_DATA; 1658 #endif 1659 } 1660 CAMLOCK_2_MPTLOCK(mpt); 1661 mpt_send_cmd(mpt, req); 1662 MPTLOCK_2_CAMLOCK(mpt); 1663 } 1664 1665 static void 1666 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1667 { 1668 request_t *req, *trq; 1669 char *mpt_off; 1670 union ccb *ccb; 1671 struct mpt_softc *mpt; 1672 int seg, first_lim; 1673 uint32_t flags, nxt_off; 1674 void *sglp = NULL; 1675 MSG_REQUEST_HEADER *hdrp; 1676 SGE_SIMPLE32 *se; 1677 SGE_CHAIN32 *ce; 1678 int istgt = 0; 1679 1680 req = (request_t *)arg; 1681 ccb = req->ccb; 1682 1683 mpt = ccb->ccb_h.ccb_mpt_ptr; 1684 req = ccb->ccb_h.ccb_req_ptr; 1685 1686 hdrp = req->req_vbuf; 1687 mpt_off = req->req_vbuf; 1688 1689 1690 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1691 error = EFBIG; 1692 } 1693 1694 if (error == 0) { 1695 switch (hdrp->Function) { 1696 case MPI_FUNCTION_SCSI_IO_REQUEST: 1697 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1698 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1699 break; 1700 case MPI_FUNCTION_TARGET_ASSIST: 1701 istgt = 1; 1702 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1703 break; 1704 default: 1705 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1706 hdrp->Function); 1707 error = EINVAL; 1708 break; 1709 } 1710 } 1711 1712 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1713 error = EFBIG; 1714 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1715 nseg, mpt->max_seg_cnt); 1716 } 1717 1718 bad: 1719 if (error != 0) { 1720 if (error != EFBIG && error != ENOMEM) { 1721 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1722 } 1723 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1724 cam_status status; 1725 mpt_freeze_ccb(ccb); 1726 if (error == EFBIG) { 1727 status = CAM_REQ_TOO_BIG; 1728 } else if (error == ENOMEM) { 1729 if (mpt->outofbeer == 0) { 1730 mpt->outofbeer = 1; 1731 xpt_freeze_simq(mpt->sim, 1); 1732 mpt_lprt(mpt, MPT_PRT_DEBUG, 1733 "FREEZEQ\n"); 1734 } 1735 status = CAM_REQUEUE_REQ; 1736 } else { 1737 status = CAM_REQ_CMP_ERR; 1738 } 1739 mpt_set_ccb_status(ccb, status); 1740 } 1741 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1742 request_t *cmd_req = 1743 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1744 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1745 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1746 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1747 } 1748 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1749 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1750 xpt_done(ccb); 1751 CAMLOCK_2_MPTLOCK(mpt); 1752 mpt_free_request(mpt, req); 1753 MPTLOCK_2_CAMLOCK(mpt); 1754 return; 1755 } 1756 1757 /* 1758 * No data to transfer? 1759 * Just make a single simple SGL with zero length. 1760 */ 1761 1762 if (mpt->verbose >= MPT_PRT_DEBUG) { 1763 int tidx = ((char *)sglp) - mpt_off; 1764 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1765 } 1766 1767 if (nseg == 0) { 1768 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1769 MPI_pSGE_SET_FLAGS(se1, 1770 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1771 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1772 se1->FlagsLength = htole32(se1->FlagsLength); 1773 goto out; 1774 } 1775 1776 1777 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1778 if (istgt == 0) { 1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1780 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1781 } 1782 } else { 1783 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1784 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1785 } 1786 } 1787 1788 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1789 bus_dmasync_op_t op; 1790 if (istgt) { 1791 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1792 op = BUS_DMASYNC_PREREAD; 1793 } else { 1794 op = BUS_DMASYNC_PREWRITE; 1795 } 1796 } else { 1797 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1798 op = BUS_DMASYNC_PREWRITE; 1799 } else { 1800 op = BUS_DMASYNC_PREREAD; 1801 } 1802 } 1803 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1804 } 1805 1806 /* 1807 * Okay, fill in what we can at the end of the command frame. 1808 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1809 * the command frame. 1810 * 1811 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1812 * SIMPLE32 pointers and start doing CHAIN32 entries after 1813 * that. 1814 */ 1815 1816 if (nseg < MPT_NSGL_FIRST(mpt)) { 1817 first_lim = nseg; 1818 } else { 1819 /* 1820 * Leave room for CHAIN element 1821 */ 1822 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1823 } 1824 1825 se = (SGE_SIMPLE32 *) sglp; 1826 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1827 uint32_t tf; 1828 1829 memset(se, 0,sizeof (*se)); 1830 se->Address = htole32(dm_segs->ds_addr); 1831 1832 1833 1834 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1835 tf = flags; 1836 if (seg == first_lim - 1) { 1837 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1838 } 1839 if (seg == nseg - 1) { 1840 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1841 MPI_SGE_FLAGS_END_OF_BUFFER; 1842 } 1843 MPI_pSGE_SET_FLAGS(se, tf); 1844 se->FlagsLength = htole32(se->FlagsLength); 1845 } 1846 1847 if (seg == nseg) { 1848 goto out; 1849 } 1850 1851 /* 1852 * Tell the IOC where to find the first chain element. 1853 */ 1854 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1855 nxt_off = MPT_RQSL(mpt); 1856 trq = req; 1857 1858 /* 1859 * Make up the rest of the data segments out of a chain element 1860 * (contiained in the current request frame) which points to 1861 * SIMPLE32 elements in the next request frame, possibly ending 1862 * with *another* chain element (if there's more). 1863 */ 1864 while (seg < nseg) { 1865 int this_seg_lim; 1866 uint32_t tf, cur_off; 1867 bus_addr_t chain_list_addr; 1868 1869 /* 1870 * Point to the chain descriptor. Note that the chain 1871 * descriptor is at the end of the *previous* list (whether 1872 * chain or simple). 1873 */ 1874 ce = (SGE_CHAIN32 *) se; 1875 1876 /* 1877 * Before we change our current pointer, make sure we won't 1878 * overflow the request area with this frame. Note that we 1879 * test against 'greater than' here as it's okay in this case 1880 * to have next offset be just outside the request area. 1881 */ 1882 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1883 nxt_off = MPT_REQUEST_AREA; 1884 goto next_chain; 1885 } 1886 1887 /* 1888 * Set our SGE element pointer to the beginning of the chain 1889 * list and update our next chain list offset. 1890 */ 1891 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1892 cur_off = nxt_off; 1893 nxt_off += MPT_RQSL(mpt); 1894 1895 /* 1896 * Now initialized the chain descriptor. 1897 */ 1898 memset(ce, 0, sizeof (*ce)); 1899 1900 /* 1901 * Get the physical address of the chain list. 1902 */ 1903 chain_list_addr = trq->req_pbuf; 1904 chain_list_addr += cur_off; 1905 1906 1907 1908 ce->Address = htole32(chain_list_addr); 1909 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1910 1911 1912 /* 1913 * If we have more than a frame's worth of segments left, 1914 * set up the chain list to have the last element be another 1915 * chain descriptor. 1916 */ 1917 if ((nseg - seg) > MPT_NSGL(mpt)) { 1918 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1919 /* 1920 * The length of the chain is the length in bytes of the 1921 * number of segments plus the next chain element. 1922 * 1923 * The next chain descriptor offset is the length, 1924 * in words, of the number of segments. 1925 */ 1926 ce->Length = (this_seg_lim - seg) * 1927 sizeof (SGE_SIMPLE32); 1928 ce->NextChainOffset = ce->Length >> 2; 1929 ce->Length += sizeof (SGE_CHAIN32); 1930 } else { 1931 this_seg_lim = nseg; 1932 ce->Length = (this_seg_lim - seg) * 1933 sizeof (SGE_SIMPLE32); 1934 } 1935 ce->Length = htole16(ce->Length); 1936 1937 /* 1938 * Fill in the chain list SGE elements with our segment data. 1939 * 1940 * If we're the last element in this chain list, set the last 1941 * element flag. If we're the completely last element period, 1942 * set the end of list and end of buffer flags. 1943 */ 1944 while (seg < this_seg_lim) { 1945 memset(se, 0, sizeof (*se)); 1946 se->Address = htole32(dm_segs->ds_addr); 1947 1948 1949 1950 1951 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1952 tf = flags; 1953 if (seg == this_seg_lim - 1) { 1954 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1955 } 1956 if (seg == nseg - 1) { 1957 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1958 MPI_SGE_FLAGS_END_OF_BUFFER; 1959 } 1960 MPI_pSGE_SET_FLAGS(se, tf); 1961 se->FlagsLength = htole32(se->FlagsLength); 1962 se++; 1963 seg++; 1964 dm_segs++; 1965 } 1966 1967 next_chain: 1968 /* 1969 * If we have more segments to do and we've used up all of 1970 * the space in a request area, go allocate another one 1971 * and chain to that. 1972 */ 1973 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1974 request_t *nrq; 1975 1976 CAMLOCK_2_MPTLOCK(mpt); 1977 nrq = mpt_get_request(mpt, FALSE); 1978 MPTLOCK_2_CAMLOCK(mpt); 1979 1980 if (nrq == NULL) { 1981 error = ENOMEM; 1982 goto bad; 1983 } 1984 1985 /* 1986 * Append the new request area on the tail of our list. 1987 */ 1988 if ((trq = req->chain) == NULL) { 1989 req->chain = nrq; 1990 } else { 1991 while (trq->chain != NULL) { 1992 trq = trq->chain; 1993 } 1994 trq->chain = nrq; 1995 } 1996 trq = nrq; 1997 mpt_off = trq->req_vbuf; 1998 if (mpt->verbose >= MPT_PRT_DEBUG) { 1999 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2000 } 2001 nxt_off = 0; 2002 } 2003 } 2004 out: 2005 2006 /* 2007 * Last time we need to check if this CCB needs to be aborted. 2008 */ 2009 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2010 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2011 request_t *cmd_req = 2012 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2013 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2014 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2015 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2016 } 2017 mpt_prt(mpt, 2018 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2019 ccb->ccb_h.status & CAM_STATUS_MASK); 2020 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2021 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2022 } 2023 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2024 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2025 xpt_done(ccb); 2026 CAMLOCK_2_MPTLOCK(mpt); 2027 mpt_free_request(mpt, req); 2028 MPTLOCK_2_CAMLOCK(mpt); 2029 return; 2030 } 2031 2032 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2033 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2034 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2035 mpt_timeout, ccb); 2036 } 2037 if (mpt->verbose > MPT_PRT_DEBUG) { 2038 int nc = 0; 2039 mpt_print_request(req->req_vbuf); 2040 for (trq = req->chain; trq; trq = trq->chain) { 2041 kprintf(" Additional Chain Area %d\n", nc++); 2042 mpt_dump_sgl(trq->req_vbuf, 0); 2043 } 2044 } 2045 2046 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2047 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2048 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2049 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2050 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2051 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2052 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2053 } else { 2054 tgt->state = TGT_STATE_MOVING_DATA; 2055 } 2056 #else 2057 tgt->state = TGT_STATE_MOVING_DATA; 2058 #endif 2059 } 2060 CAMLOCK_2_MPTLOCK(mpt); 2061 mpt_send_cmd(mpt, req); 2062 MPTLOCK_2_CAMLOCK(mpt); 2063 } 2064 2065 static void 2066 mpt_start(struct cam_sim *sim, union ccb *ccb) 2067 { 2068 request_t *req; 2069 struct mpt_softc *mpt; 2070 MSG_SCSI_IO_REQUEST *mpt_req; 2071 struct ccb_scsiio *csio = &ccb->csio; 2072 struct ccb_hdr *ccbh = &ccb->ccb_h; 2073 bus_dmamap_callback_t *cb; 2074 target_id_t tgt; 2075 int raid_passthru; 2076 2077 /* Get the pointer for the physical addapter */ 2078 mpt = ccb->ccb_h.ccb_mpt_ptr; 2079 raid_passthru = (sim == mpt->phydisk_sim); 2080 2081 CAMLOCK_2_MPTLOCK(mpt); 2082 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2083 if (mpt->outofbeer == 0) { 2084 mpt->outofbeer = 1; 2085 xpt_freeze_simq(mpt->sim, 1); 2086 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2087 } 2088 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2089 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2090 MPTLOCK_2_CAMLOCK(mpt); 2091 xpt_done(ccb); 2092 return; 2093 } 2094 #ifdef INVARIANTS 2095 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2096 #endif 2097 MPTLOCK_2_CAMLOCK(mpt); 2098 2099 if (sizeof (bus_addr_t) > 4) { 2100 cb = mpt_execute_req_a64; 2101 } else { 2102 cb = mpt_execute_req; 2103 } 2104 2105 /* 2106 * Link the ccb and the request structure so we can find 2107 * the other knowing either the request or the ccb 2108 */ 2109 req->ccb = ccb; 2110 ccb->ccb_h.ccb_req_ptr = req; 2111 2112 /* Now we build the command for the IOC */ 2113 mpt_req = req->req_vbuf; 2114 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2115 2116 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2117 if (raid_passthru) { 2118 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2119 CAMLOCK_2_MPTLOCK(mpt); 2120 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2121 MPTLOCK_2_CAMLOCK(mpt); 2122 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2123 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2124 xpt_done(ccb); 2125 return; 2126 } 2127 MPTLOCK_2_CAMLOCK(mpt); 2128 mpt_req->Bus = 0; /* we never set bus here */ 2129 } else { 2130 tgt = ccb->ccb_h.target_id; 2131 mpt_req->Bus = 0; /* XXX */ 2132 2133 } 2134 mpt_req->SenseBufferLength = 2135 (csio->sense_len < MPT_SENSE_SIZE) ? 2136 csio->sense_len : MPT_SENSE_SIZE; 2137 2138 /* 2139 * We use the message context to find the request structure when we 2140 * Get the command completion interrupt from the IOC. 2141 */ 2142 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2143 2144 /* Which physical device to do the I/O on */ 2145 mpt_req->TargetID = tgt; 2146 2147 /* We assume a single level LUN type */ 2148 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2149 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2150 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2151 } else { 2152 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2153 } 2154 2155 /* Set the direction of the transfer */ 2156 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2157 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2158 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2159 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2160 } else { 2161 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2162 } 2163 2164 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2165 switch(ccb->csio.tag_action) { 2166 case MSG_HEAD_OF_Q_TAG: 2167 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2168 break; 2169 case MSG_ACA_TASK: 2170 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2171 break; 2172 case MSG_ORDERED_Q_TAG: 2173 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2174 break; 2175 case MSG_SIMPLE_Q_TAG: 2176 default: 2177 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2178 break; 2179 } 2180 } else { 2181 if (mpt->is_fc || mpt->is_sas) { 2182 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2183 } else { 2184 /* XXX No such thing for a target doing packetized. */ 2185 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2186 } 2187 } 2188 2189 if (mpt->is_spi) { 2190 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2191 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2192 } 2193 } 2194 mpt_req->Control = htole32(mpt_req->Control); 2195 2196 /* Copy the scsi command block into place */ 2197 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2198 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2199 } else { 2200 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2201 } 2202 2203 mpt_req->CDBLength = csio->cdb_len; 2204 mpt_req->DataLength = htole32(csio->dxfer_len); 2205 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2206 2207 /* 2208 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2209 */ 2210 if (mpt->verbose == MPT_PRT_DEBUG) { 2211 U32 df; 2212 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2213 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2214 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2215 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2216 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2217 mpt_prtc(mpt, "(%s %u byte%s ", 2218 (df == MPI_SCSIIO_CONTROL_READ)? 2219 "read" : "write", csio->dxfer_len, 2220 (csio->dxfer_len == 1)? ")" : "s)"); 2221 } 2222 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2223 ccb->ccb_h.target_lun, req, req->serno); 2224 } 2225 2226 /* 2227 * If we have any data to send with this command map it into bus space. 2228 */ 2229 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2230 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2231 /* 2232 * We've been given a pointer to a single buffer. 2233 */ 2234 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2235 /* 2236 * Virtual address that needs to translated into 2237 * one or more physical address ranges. 2238 */ 2239 int error; 2240 crit_enter(); 2241 error = bus_dmamap_load(mpt->buffer_dmat, 2242 req->dmap, csio->data_ptr, csio->dxfer_len, 2243 cb, req, 0); 2244 crit_exit(); 2245 if (error == EINPROGRESS) { 2246 /* 2247 * So as to maintain ordering, 2248 * freeze the controller queue 2249 * until our mapping is 2250 * returned. 2251 */ 2252 xpt_freeze_simq(mpt->sim, 1); 2253 ccbh->status |= CAM_RELEASE_SIMQ; 2254 } 2255 } else { 2256 /* 2257 * We have been given a pointer to single 2258 * physical buffer. 2259 */ 2260 struct bus_dma_segment seg; 2261 seg.ds_addr = 2262 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2263 seg.ds_len = csio->dxfer_len; 2264 (*cb)(req, &seg, 1, 0); 2265 } 2266 } else { 2267 /* 2268 * We have been given a list of addresses. 2269 * This case could be easily supported but they are not 2270 * currently generated by the CAM subsystem so there 2271 * is no point in wasting the time right now. 2272 */ 2273 struct bus_dma_segment *segs; 2274 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2275 (*cb)(req, NULL, 0, EFAULT); 2276 } else { 2277 /* Just use the segments provided */ 2278 segs = (struct bus_dma_segment *)csio->data_ptr; 2279 (*cb)(req, segs, csio->sglist_cnt, 0); 2280 } 2281 } 2282 } else { 2283 (*cb)(req, NULL, 0, 0); 2284 } 2285 } 2286 2287 static int 2288 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2289 int sleep_ok) 2290 { 2291 int error; 2292 uint16_t status; 2293 uint8_t response; 2294 2295 error = mpt_scsi_send_tmf(mpt, 2296 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2297 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2298 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2299 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2300 0, /* XXX How do I get the channel ID? */ 2301 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2302 lun != CAM_LUN_WILDCARD ? lun : 0, 2303 0, sleep_ok); 2304 2305 if (error != 0) { 2306 /* 2307 * mpt_scsi_send_tmf hard resets on failure, so no 2308 * need to do so here. 2309 */ 2310 mpt_prt(mpt, 2311 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2312 return (EIO); 2313 } 2314 2315 /* Wait for bus reset to be processed by the IOC. */ 2316 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2317 REQ_STATE_DONE, sleep_ok, 5000); 2318 2319 status = le16toh(mpt->tmf_req->IOCStatus); 2320 response = mpt->tmf_req->ResponseCode; 2321 mpt->tmf_req->state = REQ_STATE_FREE; 2322 2323 if (error) { 2324 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2325 "Resetting controller.\n"); 2326 mpt_reset(mpt, TRUE); 2327 return (ETIMEDOUT); 2328 } 2329 2330 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2331 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2332 "Resetting controller.\n", status); 2333 mpt_reset(mpt, TRUE); 2334 return (EIO); 2335 } 2336 2337 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2338 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2339 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2340 "Resetting controller.\n", response); 2341 mpt_reset(mpt, TRUE); 2342 return (EIO); 2343 } 2344 return (0); 2345 } 2346 2347 static int 2348 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2349 { 2350 int r = 0; 2351 request_t *req; 2352 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2353 2354 req = mpt_get_request(mpt, FALSE); 2355 if (req == NULL) { 2356 return (ENOMEM); 2357 } 2358 fc = req->req_vbuf; 2359 memset(fc, 0, sizeof(*fc)); 2360 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2361 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2362 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2363 mpt_send_cmd(mpt, req); 2364 if (dowait) { 2365 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2366 REQ_STATE_DONE, FALSE, 60 * 1000); 2367 if (r == 0) { 2368 mpt_free_request(mpt, req); 2369 } 2370 } 2371 return (r); 2372 } 2373 2374 static void 2375 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) 2376 { 2377 xpt_free_path(ccb->ccb_h.path); 2378 kfree(ccb, M_TEMP); 2379 } 2380 2381 static int 2382 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2383 MSG_EVENT_NOTIFY_REPLY *msg) 2384 { 2385 uint32_t data0, data1; 2386 2387 data0 = le32toh(msg->Data[0]); 2388 data1 = le32toh(msg->Data[1]); 2389 switch(msg->Event & 0xFF) { 2390 case MPI_EVENT_UNIT_ATTENTION: 2391 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2392 (data0 >> 8) & 0xff, data0 & 0xff); 2393 break; 2394 2395 case MPI_EVENT_IOC_BUS_RESET: 2396 /* We generated a bus reset */ 2397 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2398 (data0 >> 8) & 0xff); 2399 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2400 break; 2401 2402 case MPI_EVENT_EXT_BUS_RESET: 2403 /* Someone else generated a bus reset */ 2404 mpt_prt(mpt, "External Bus Reset Detected\n"); 2405 /* 2406 * These replies don't return EventData like the MPI 2407 * spec says they do 2408 */ 2409 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2410 break; 2411 2412 case MPI_EVENT_RESCAN: 2413 { 2414 union ccb *ccb; 2415 uint32_t pathid; 2416 /* 2417 * In general this means a device has been added to the loop. 2418 */ 2419 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2420 if (mpt->ready == 0) { 2421 break; 2422 } 2423 if (mpt->phydisk_sim) { 2424 pathid = cam_sim_path(mpt->phydisk_sim); 2425 } else { 2426 pathid = cam_sim_path(mpt->sim); 2427 } 2428 MPTLOCK_2_CAMLOCK(mpt); 2429 /* 2430 * Allocate a CCB, create a wildcard path for this bus, 2431 * and schedule a rescan. 2432 */ 2433 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO); 2434 2435 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2436 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2437 CAMLOCK_2_MPTLOCK(mpt); 2438 mpt_prt(mpt, "unable to create path for rescan\n"); 2439 kfree(ccb, M_TEMP); 2440 break; 2441 } 2442 2443 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); 2444 ccb->ccb_h.func_code = XPT_SCAN_BUS; 2445 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; 2446 ccb->crcn.flags = CAM_FLAG_NONE; 2447 xpt_action(ccb); 2448 2449 /* scan is now in progress */ 2450 2451 CAMLOCK_2_MPTLOCK(mpt); 2452 break; 2453 } 2454 case MPI_EVENT_LINK_STATUS_CHANGE: 2455 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2456 (data1 >> 8) & 0xff, 2457 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2458 break; 2459 2460 case MPI_EVENT_LOOP_STATE_CHANGE: 2461 switch ((data0 >> 16) & 0xff) { 2462 case 0x01: 2463 mpt_prt(mpt, 2464 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2465 "(Loop Initialization)\n", 2466 (data1 >> 8) & 0xff, 2467 (data0 >> 8) & 0xff, 2468 (data0 ) & 0xff); 2469 switch ((data0 >> 8) & 0xff) { 2470 case 0xF7: 2471 if ((data0 & 0xff) == 0xF7) { 2472 mpt_prt(mpt, "Device needs AL_PA\n"); 2473 } else { 2474 mpt_prt(mpt, "Device %02x doesn't like " 2475 "FC performance\n", 2476 data0 & 0xFF); 2477 } 2478 break; 2479 case 0xF8: 2480 if ((data0 & 0xff) == 0xF7) { 2481 mpt_prt(mpt, "Device had loop failure " 2482 "at its receiver prior to acquiring" 2483 " AL_PA\n"); 2484 } else { 2485 mpt_prt(mpt, "Device %02x detected loop" 2486 " failure at its receiver\n", 2487 data0 & 0xFF); 2488 } 2489 break; 2490 default: 2491 mpt_prt(mpt, "Device %02x requests that device " 2492 "%02x reset itself\n", 2493 data0 & 0xFF, 2494 (data0 >> 8) & 0xFF); 2495 break; 2496 } 2497 break; 2498 case 0x02: 2499 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2500 "LPE(%02x,%02x) (Loop Port Enable)\n", 2501 (data1 >> 8) & 0xff, /* Port */ 2502 (data0 >> 8) & 0xff, /* Character 3 */ 2503 (data0 ) & 0xff /* Character 4 */); 2504 break; 2505 case 0x03: 2506 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2507 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2508 (data1 >> 8) & 0xff, /* Port */ 2509 (data0 >> 8) & 0xff, /* Character 3 */ 2510 (data0 ) & 0xff /* Character 4 */); 2511 break; 2512 default: 2513 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2514 "FC event (%02x %02x %02x)\n", 2515 (data1 >> 8) & 0xff, /* Port */ 2516 (data0 >> 16) & 0xff, /* Event */ 2517 (data0 >> 8) & 0xff, /* Character 3 */ 2518 (data0 ) & 0xff /* Character 4 */); 2519 } 2520 break; 2521 2522 case MPI_EVENT_LOGOUT: 2523 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2524 (data1 >> 8) & 0xff, data0); 2525 break; 2526 case MPI_EVENT_QUEUE_FULL: 2527 { 2528 struct cam_sim *sim; 2529 struct cam_path *tmppath; 2530 struct ccb_relsim crs; 2531 PTR_EVENT_DATA_QUEUE_FULL pqf; 2532 lun_id_t lun_id; 2533 2534 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2535 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2536 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2537 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2538 if (mpt->phydisk_sim) { 2539 sim = mpt->phydisk_sim; 2540 } else { 2541 sim = mpt->sim; 2542 } 2543 MPTLOCK_2_CAMLOCK(mpt); 2544 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2545 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2546 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2547 mpt_prt(mpt, "unable to create a path to send " 2548 "XPT_REL_SIMQ"); 2549 CAMLOCK_2_MPTLOCK(mpt); 2550 break; 2551 } 2552 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2553 crs.ccb_h.func_code = XPT_REL_SIMQ; 2554 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2555 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2556 crs.openings = pqf->CurrentDepth - 1; 2557 xpt_action((union ccb *)&crs); 2558 if (crs.ccb_h.status != CAM_REQ_CMP) { 2559 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2560 } 2561 xpt_free_path(tmppath); 2562 } 2563 CAMLOCK_2_MPTLOCK(mpt); 2564 break; 2565 } 2566 case MPI_EVENT_IR_RESYNC_UPDATE: 2567 mpt_prt(mpt, "IR resync update %d completed\n", 2568 (data0 >> 16) & 0xff); 2569 break; 2570 case MPI_EVENT_EVENT_CHANGE: 2571 case MPI_EVENT_INTEGRATED_RAID: 2572 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2573 case MPI_EVENT_SAS_SES: 2574 break; 2575 default: 2576 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2577 msg->Event & 0xFF); 2578 return (0); 2579 } 2580 return (1); 2581 } 2582 2583 /* 2584 * Reply path for all SCSI I/O requests, called from our 2585 * interrupt handler by extracting our handler index from 2586 * the MsgContext field of the reply from the IOC. 2587 * 2588 * This routine is optimized for the common case of a 2589 * completion without error. All exception handling is 2590 * offloaded to non-inlined helper routines to minimize 2591 * cache footprint. 2592 */ 2593 static int 2594 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2595 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2596 { 2597 MSG_SCSI_IO_REQUEST *scsi_req; 2598 union ccb *ccb; 2599 2600 if (req->state == REQ_STATE_FREE) { 2601 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2602 return (TRUE); 2603 } 2604 2605 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2606 ccb = req->ccb; 2607 if (ccb == NULL) { 2608 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2609 req, req->serno); 2610 return (TRUE); 2611 } 2612 2613 mpt_req_untimeout(req, mpt_timeout, ccb); 2614 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2615 2616 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2617 bus_dmasync_op_t op; 2618 2619 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2620 op = BUS_DMASYNC_POSTREAD; 2621 else 2622 op = BUS_DMASYNC_POSTWRITE; 2623 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2624 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2625 } 2626 2627 if (reply_frame == NULL) { 2628 /* 2629 * Context only reply, completion without error status. 2630 */ 2631 ccb->csio.resid = 0; 2632 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2633 ccb->csio.scsi_status = SCSI_STATUS_OK; 2634 } else { 2635 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2636 } 2637 2638 if (mpt->outofbeer) { 2639 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2640 mpt->outofbeer = 0; 2641 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2642 } 2643 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2644 struct scsi_inquiry_data *iq = 2645 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2646 if (scsi_req->Function == 2647 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2648 /* 2649 * Fake out the device type so that only the 2650 * pass-thru device will attach. 2651 */ 2652 iq->device &= ~0x1F; 2653 iq->device |= T_NODEVICE; 2654 } 2655 } 2656 if (mpt->verbose == MPT_PRT_DEBUG) { 2657 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2658 req, req->serno); 2659 } 2660 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2661 MPTLOCK_2_CAMLOCK(mpt); 2662 xpt_done(ccb); 2663 CAMLOCK_2_MPTLOCK(mpt); 2664 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2665 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2666 } else { 2667 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2668 req, req->serno); 2669 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2670 } 2671 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2672 ("CCB req needed wakeup")); 2673 #ifdef INVARIANTS 2674 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2675 #endif 2676 mpt_free_request(mpt, req); 2677 return (TRUE); 2678 } 2679 2680 static int 2681 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2682 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2683 { 2684 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2685 2686 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2687 #ifdef INVARIANTS 2688 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2689 #endif 2690 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2691 /* Record IOC Status and Response Code of TMF for any waiters. */ 2692 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2693 req->ResponseCode = tmf_reply->ResponseCode; 2694 2695 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2696 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2697 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2698 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2699 req->state |= REQ_STATE_DONE; 2700 wakeup(req); 2701 } else { 2702 mpt->tmf_req->state = REQ_STATE_FREE; 2703 } 2704 return (TRUE); 2705 } 2706 2707 /* 2708 * XXX: Move to definitions file 2709 */ 2710 #define ELS 0x22 2711 #define FC4LS 0x32 2712 #define ABTS 0x81 2713 #define BA_ACC 0x84 2714 2715 #define LS_RJT 0x01 2716 #define LS_ACC 0x02 2717 #define PLOGI 0x03 2718 #define LOGO 0x05 2719 #define SRR 0x14 2720 #define PRLI 0x20 2721 #define PRLO 0x21 2722 #define ADISC 0x52 2723 #define RSCN 0x61 2724 2725 static void 2726 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2727 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2728 { 2729 uint32_t fl; 2730 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2731 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2732 2733 /* 2734 * We are going to reuse the ELS request to send this response back. 2735 */ 2736 rsp = &tmp; 2737 memset(rsp, 0, sizeof(*rsp)); 2738 2739 #ifdef USE_IMMEDIATE_LINK_DATA 2740 /* 2741 * Apparently the IMMEDIATE stuff doesn't seem to work. 2742 */ 2743 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2744 #endif 2745 rsp->RspLength = length; 2746 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2747 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2748 2749 /* 2750 * Copy over information from the original reply frame to 2751 * it's correct place in the response. 2752 */ 2753 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2754 2755 /* 2756 * And now copy back the temporary area to the original frame. 2757 */ 2758 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2759 rsp = req->req_vbuf; 2760 2761 #ifdef USE_IMMEDIATE_LINK_DATA 2762 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2763 #else 2764 { 2765 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2766 bus_addr_t paddr = req->req_pbuf; 2767 paddr += MPT_RQSL(mpt); 2768 2769 fl = 2770 MPI_SGE_FLAGS_HOST_TO_IOC | 2771 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2772 MPI_SGE_FLAGS_LAST_ELEMENT | 2773 MPI_SGE_FLAGS_END_OF_LIST | 2774 MPI_SGE_FLAGS_END_OF_BUFFER; 2775 fl <<= MPI_SGE_FLAGS_SHIFT; 2776 fl |= (length); 2777 se->FlagsLength = htole32(fl); 2778 se->Address = htole32((uint32_t) paddr); 2779 } 2780 #endif 2781 2782 /* 2783 * Send it on... 2784 */ 2785 mpt_send_cmd(mpt, req); 2786 } 2787 2788 static int 2789 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2790 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2791 { 2792 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2793 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2794 U8 rctl; 2795 U8 type; 2796 U8 cmd; 2797 U16 status = le16toh(reply_frame->IOCStatus); 2798 U32 *elsbuf; 2799 int ioindex; 2800 int do_refresh = TRUE; 2801 2802 #ifdef INVARIANTS 2803 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2804 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2805 req, req->serno, rp->Function)); 2806 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2807 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2808 } else { 2809 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2810 } 2811 #endif 2812 mpt_lprt(mpt, MPT_PRT_DEBUG, 2813 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2814 req, req->serno, reply_frame, reply_frame->Function); 2815 2816 if (status != MPI_IOCSTATUS_SUCCESS) { 2817 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2818 status, reply_frame->Function); 2819 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2820 /* 2821 * XXX: to get around shutdown issue 2822 */ 2823 mpt->disabled = 1; 2824 return (TRUE); 2825 } 2826 return (TRUE); 2827 } 2828 2829 /* 2830 * If the function of a link service response, we recycle the 2831 * response to be a refresh for a new link service request. 2832 * 2833 * The request pointer is bogus in this case and we have to fetch 2834 * it based upon the TransactionContext. 2835 */ 2836 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2837 /* Freddie Uncle Charlie Katie */ 2838 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2839 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2840 if (mpt->els_cmd_ptrs[ioindex] == req) { 2841 break; 2842 } 2843 2844 KASSERT(ioindex < mpt->els_cmds_allocated, 2845 ("can't find my mommie!")); 2846 2847 /* remove from active list as we're going to re-post it */ 2848 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2849 req->state &= ~REQ_STATE_QUEUED; 2850 req->state |= REQ_STATE_DONE; 2851 mpt_fc_post_els(mpt, req, ioindex); 2852 return (TRUE); 2853 } 2854 2855 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2856 /* remove from active list as we're done */ 2857 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2858 req->state &= ~REQ_STATE_QUEUED; 2859 req->state |= REQ_STATE_DONE; 2860 if (req->state & REQ_STATE_TIMEDOUT) { 2861 mpt_lprt(mpt, MPT_PRT_DEBUG, 2862 "Sync Primitive Send Completed After Timeout\n"); 2863 mpt_free_request(mpt, req); 2864 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2865 mpt_lprt(mpt, MPT_PRT_DEBUG, 2866 "Async Primitive Send Complete\n"); 2867 mpt_free_request(mpt, req); 2868 } else { 2869 mpt_lprt(mpt, MPT_PRT_DEBUG, 2870 "Sync Primitive Send Complete- Waking Waiter\n"); 2871 wakeup(req); 2872 } 2873 return (TRUE); 2874 } 2875 2876 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2877 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2878 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2879 rp->MsgLength, rp->MsgFlags); 2880 return (TRUE); 2881 } 2882 2883 if (rp->MsgLength <= 5) { 2884 /* 2885 * This is just a ack of an original ELS buffer post 2886 */ 2887 mpt_lprt(mpt, MPT_PRT_DEBUG, 2888 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2889 return (TRUE); 2890 } 2891 2892 2893 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2894 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2895 2896 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2897 cmd = be32toh(elsbuf[0]) >> 24; 2898 2899 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2900 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2901 return (TRUE); 2902 } 2903 2904 ioindex = le32toh(rp->TransactionContext); 2905 req = mpt->els_cmd_ptrs[ioindex]; 2906 2907 if (rctl == ELS && type == 1) { 2908 switch (cmd) { 2909 case PRLI: 2910 /* 2911 * Send back a PRLI ACC 2912 */ 2913 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2914 le32toh(rp->Wwn.PortNameHigh), 2915 le32toh(rp->Wwn.PortNameLow)); 2916 elsbuf[0] = htobe32(0x02100014); 2917 elsbuf[1] |= htobe32(0x00000100); 2918 elsbuf[4] = htobe32(0x00000002); 2919 if (mpt->role & MPT_ROLE_TARGET) 2920 elsbuf[4] |= htobe32(0x00000010); 2921 if (mpt->role & MPT_ROLE_INITIATOR) 2922 elsbuf[4] |= htobe32(0x00000020); 2923 /* remove from active list as we're done */ 2924 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2925 req->state &= ~REQ_STATE_QUEUED; 2926 req->state |= REQ_STATE_DONE; 2927 mpt_fc_els_send_response(mpt, req, rp, 20); 2928 do_refresh = FALSE; 2929 break; 2930 case PRLO: 2931 memset(elsbuf, 0, 5 * (sizeof (U32))); 2932 elsbuf[0] = htobe32(0x02100014); 2933 elsbuf[1] = htobe32(0x08000100); 2934 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2935 le32toh(rp->Wwn.PortNameHigh), 2936 le32toh(rp->Wwn.PortNameLow)); 2937 /* remove from active list as we're done */ 2938 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2939 req->state &= ~REQ_STATE_QUEUED; 2940 req->state |= REQ_STATE_DONE; 2941 mpt_fc_els_send_response(mpt, req, rp, 20); 2942 do_refresh = FALSE; 2943 break; 2944 default: 2945 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2946 break; 2947 } 2948 } else if (rctl == ABTS && type == 0) { 2949 uint16_t rx_id = le16toh(rp->Rxid); 2950 uint16_t ox_id = le16toh(rp->Oxid); 2951 request_t *tgt_req = NULL; 2952 2953 mpt_prt(mpt, 2954 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2955 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2956 le32toh(rp->Wwn.PortNameLow)); 2957 if (rx_id >= mpt->mpt_max_tgtcmds) { 2958 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2959 } else if (mpt->tgt_cmd_ptrs == NULL) { 2960 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2961 } else { 2962 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2963 } 2964 if (tgt_req) { 2965 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2966 union ccb *ccb; 2967 uint32_t ct_id; 2968 2969 /* 2970 * Check to make sure we have the correct command 2971 * The reply descriptor in the target state should 2972 * should contain an IoIndex that should match the 2973 * RX_ID. 2974 * 2975 * It'd be nice to have OX_ID to crosscheck with 2976 * as well. 2977 */ 2978 ct_id = GET_IO_INDEX(tgt->reply_desc); 2979 2980 if (ct_id != rx_id) { 2981 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2982 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2983 rx_id, ct_id); 2984 goto skip; 2985 } 2986 2987 ccb = tgt->ccb; 2988 if (ccb) { 2989 mpt_prt(mpt, 2990 "CCB (%p): lun %u flags %x status %x\n", 2991 ccb, ccb->ccb_h.target_lun, 2992 ccb->ccb_h.flags, ccb->ccb_h.status); 2993 } 2994 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2995 "%x nxfers %x\n", tgt->state, 2996 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2997 tgt->nxfers); 2998 skip: 2999 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3000 mpt_prt(mpt, "unable to start TargetAbort\n"); 3001 } 3002 } else { 3003 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3004 } 3005 memset(elsbuf, 0, 5 * (sizeof (U32))); 3006 elsbuf[0] = htobe32(0); 3007 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3008 elsbuf[2] = htobe32(0x000ffff); 3009 /* 3010 * Dork with the reply frame so that the response to it 3011 * will be correct. 3012 */ 3013 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3014 /* remove from active list as we're done */ 3015 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3016 req->state &= ~REQ_STATE_QUEUED; 3017 req->state |= REQ_STATE_DONE; 3018 mpt_fc_els_send_response(mpt, req, rp, 12); 3019 do_refresh = FALSE; 3020 } else { 3021 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3022 } 3023 if (do_refresh == TRUE) { 3024 /* remove from active list as we're done */ 3025 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3026 req->state &= ~REQ_STATE_QUEUED; 3027 req->state |= REQ_STATE_DONE; 3028 mpt_fc_post_els(mpt, req, ioindex); 3029 } 3030 return (TRUE); 3031 } 3032 3033 /* 3034 * Clean up all SCSI Initiator personality state in response 3035 * to a controller reset. 3036 */ 3037 static void 3038 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3039 { 3040 /* 3041 * The pending list is already run down by 3042 * the generic handler. Perform the same 3043 * operation on the timed out request list. 3044 */ 3045 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3046 MPI_IOCSTATUS_INVALID_STATE); 3047 3048 /* 3049 * XXX: We need to repost ELS and Target Command Buffers? 3050 */ 3051 3052 /* 3053 * Inform the XPT that a bus reset has occurred. 3054 */ 3055 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3056 } 3057 3058 /* 3059 * Parse additional completion information in the reply 3060 * frame for SCSI I/O requests. 3061 */ 3062 static int 3063 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3064 MSG_DEFAULT_REPLY *reply_frame) 3065 { 3066 union ccb *ccb; 3067 MSG_SCSI_IO_REPLY *scsi_io_reply; 3068 u_int ioc_status; 3069 u_int sstate; 3070 3071 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3072 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3073 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3074 ("MPT SCSI I/O Handler called with incorrect reply type")); 3075 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3076 ("MPT SCSI I/O Handler called with continuation reply")); 3077 3078 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3079 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3080 ioc_status &= MPI_IOCSTATUS_MASK; 3081 sstate = scsi_io_reply->SCSIState; 3082 3083 ccb = req->ccb; 3084 ccb->csio.resid = 3085 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3086 3087 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3088 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3089 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3090 ccb->csio.sense_resid = 3091 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); 3092 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3093 min(ccb->csio.sense_len, 3094 le32toh(scsi_io_reply->SenseCount))); 3095 } 3096 3097 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3098 /* 3099 * Tag messages rejected, but non-tagged retry 3100 * was successful. 3101 XXXX 3102 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3103 */ 3104 } 3105 3106 switch(ioc_status) { 3107 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3108 /* 3109 * XXX 3110 * Linux driver indicates that a zero 3111 * transfer length with this error code 3112 * indicates a CRC error. 3113 * 3114 * No need to swap the bytes for checking 3115 * against zero. 3116 */ 3117 if (scsi_io_reply->TransferCount == 0) { 3118 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3119 break; 3120 } 3121 /* FALLTHROUGH */ 3122 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3123 case MPI_IOCSTATUS_SUCCESS: 3124 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3125 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3126 /* 3127 * Status was never returned for this transaction. 3128 */ 3129 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3130 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3131 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3132 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3133 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3134 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3135 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3136 3137 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3138 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3139 } else 3140 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3141 break; 3142 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3143 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3144 break; 3145 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3146 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3147 break; 3148 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3149 /* 3150 * Since selection timeouts and "device really not 3151 * there" are grouped into this error code, report 3152 * selection timeout. Selection timeouts are 3153 * typically retried before giving up on the device 3154 * whereas "device not there" errors are considered 3155 * unretryable. 3156 */ 3157 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3158 break; 3159 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3160 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3161 break; 3162 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3163 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3164 break; 3165 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3166 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3167 break; 3168 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3169 ccb->ccb_h.status = CAM_UA_TERMIO; 3170 break; 3171 case MPI_IOCSTATUS_INVALID_STATE: 3172 /* 3173 * The IOC has been reset. Emulate a bus reset. 3174 */ 3175 /* FALLTHROUGH */ 3176 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3177 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3178 break; 3179 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3180 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3181 /* 3182 * Don't clobber any timeout status that has 3183 * already been set for this transaction. We 3184 * want the SCSI layer to be able to differentiate 3185 * between the command we aborted due to timeout 3186 * and any innocent bystanders. 3187 */ 3188 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3189 break; 3190 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3191 break; 3192 3193 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3194 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3195 break; 3196 case MPI_IOCSTATUS_BUSY: 3197 mpt_set_ccb_status(ccb, CAM_BUSY); 3198 break; 3199 case MPI_IOCSTATUS_INVALID_FUNCTION: 3200 case MPI_IOCSTATUS_INVALID_SGL: 3201 case MPI_IOCSTATUS_INTERNAL_ERROR: 3202 case MPI_IOCSTATUS_INVALID_FIELD: 3203 default: 3204 /* XXX 3205 * Some of the above may need to kick 3206 * of a recovery action!!!! 3207 */ 3208 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3209 break; 3210 } 3211 3212 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3213 mpt_freeze_ccb(ccb); 3214 } 3215 3216 return (TRUE); 3217 } 3218 3219 static void 3220 mpt_action(struct cam_sim *sim, union ccb *ccb) 3221 { 3222 struct mpt_softc *mpt; 3223 struct ccb_trans_settings *cts; 3224 target_id_t tgt; 3225 lun_id_t lun; 3226 int raid_passthru; 3227 3228 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3229 3230 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3231 raid_passthru = (sim == mpt->phydisk_sim); 3232 MPT_LOCK_ASSERT(mpt); 3233 3234 tgt = ccb->ccb_h.target_id; 3235 lun = ccb->ccb_h.target_lun; 3236 if (raid_passthru && 3237 ccb->ccb_h.func_code != XPT_PATH_INQ && 3238 ccb->ccb_h.func_code != XPT_RESET_BUS && 3239 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3240 CAMLOCK_2_MPTLOCK(mpt); 3241 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3242 MPTLOCK_2_CAMLOCK(mpt); 3243 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3244 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3245 xpt_done(ccb); 3246 return; 3247 } 3248 MPTLOCK_2_CAMLOCK(mpt); 3249 } 3250 ccb->ccb_h.ccb_mpt_ptr = mpt; 3251 3252 switch (ccb->ccb_h.func_code) { 3253 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3254 /* 3255 * Do a couple of preliminary checks... 3256 */ 3257 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3258 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3259 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3260 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3261 break; 3262 } 3263 } 3264 /* Max supported CDB length is 16 bytes */ 3265 /* XXX Unless we implement the new 32byte message type */ 3266 if (ccb->csio.cdb_len > 3267 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3268 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3269 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3270 break; 3271 } 3272 #ifdef MPT_TEST_MULTIPATH 3273 if (mpt->failure_id == ccb->ccb_h.target_id) { 3274 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3275 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3276 break; 3277 } 3278 #endif 3279 ccb->csio.scsi_status = SCSI_STATUS_OK; 3280 mpt_start(sim, ccb); 3281 return; 3282 3283 case XPT_RESET_BUS: 3284 if (raid_passthru) { 3285 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3286 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3287 break; 3288 } 3289 case XPT_RESET_DEV: 3290 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3291 if (bootverbose) { 3292 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3293 } 3294 } else { 3295 xpt_print(ccb->ccb_h.path, "reset device\n"); 3296 } 3297 CAMLOCK_2_MPTLOCK(mpt); 3298 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3299 MPTLOCK_2_CAMLOCK(mpt); 3300 3301 /* 3302 * mpt_bus_reset is always successful in that it 3303 * will fall back to a hard reset should a bus 3304 * reset attempt fail. 3305 */ 3306 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3307 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3308 break; 3309 3310 case XPT_ABORT: 3311 { 3312 union ccb *accb = ccb->cab.abort_ccb; 3313 CAMLOCK_2_MPTLOCK(mpt); 3314 switch (accb->ccb_h.func_code) { 3315 case XPT_ACCEPT_TARGET_IO: 3316 case XPT_IMMED_NOTIFY: 3317 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3318 break; 3319 case XPT_CONT_TARGET_IO: 3320 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3321 ccb->ccb_h.status = CAM_UA_ABORT; 3322 break; 3323 case XPT_SCSI_IO: 3324 ccb->ccb_h.status = CAM_UA_ABORT; 3325 break; 3326 default: 3327 ccb->ccb_h.status = CAM_REQ_INVALID; 3328 break; 3329 } 3330 MPTLOCK_2_CAMLOCK(mpt); 3331 break; 3332 } 3333 3334 #ifdef CAM_NEW_TRAN_CODE 3335 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3336 #else 3337 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3338 #endif 3339 #define DP_DISC_ENABLE 0x1 3340 #define DP_DISC_DISABL 0x2 3341 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3342 3343 #define DP_TQING_ENABLE 0x4 3344 #define DP_TQING_DISABL 0x8 3345 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3346 3347 #define DP_WIDE 0x10 3348 #define DP_NARROW 0x20 3349 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3350 3351 #define DP_SYNC 0x40 3352 3353 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3354 { 3355 #ifdef CAM_NEW_TRAN_CODE 3356 struct ccb_trans_settings_scsi *scsi; 3357 struct ccb_trans_settings_spi *spi; 3358 #endif 3359 uint8_t dval; 3360 u_int period; 3361 u_int offset; 3362 int i, j; 3363 3364 cts = &ccb->cts; 3365 3366 if (mpt->is_fc || mpt->is_sas) { 3367 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3368 break; 3369 } 3370 3371 #ifdef CAM_NEW_TRAN_CODE 3372 scsi = &cts->proto_specific.scsi; 3373 spi = &cts->xport_specific.spi; 3374 3375 /* 3376 * We can be called just to valid transport and proto versions 3377 */ 3378 if (scsi->valid == 0 && spi->valid == 0) { 3379 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3380 break; 3381 } 3382 #endif 3383 3384 /* 3385 * Skip attempting settings on RAID volume disks. 3386 * Other devices on the bus get the normal treatment. 3387 */ 3388 if (mpt->phydisk_sim && raid_passthru == 0 && 3389 mpt_is_raid_volume(mpt, tgt) != 0) { 3390 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3391 "no transfer settings for RAID vols\n"); 3392 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3393 break; 3394 } 3395 3396 i = mpt->mpt_port_page2.PortSettings & 3397 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3398 j = mpt->mpt_port_page2.PortFlags & 3399 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3400 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3401 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3402 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3403 "honoring BIOS transfer negotiations\n"); 3404 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3405 break; 3406 } 3407 3408 dval = 0; 3409 period = 0; 3410 offset = 0; 3411 3412 #ifndef CAM_NEW_TRAN_CODE 3413 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3414 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3415 DP_DISC_ENABLE : DP_DISC_DISABL; 3416 } 3417 3418 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3419 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3420 DP_TQING_ENABLE : DP_TQING_DISABL; 3421 } 3422 3423 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3424 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3425 } 3426 3427 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3428 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3429 dval |= DP_SYNC; 3430 period = cts->sync_period; 3431 offset = cts->sync_offset; 3432 } 3433 #else 3434 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3435 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3436 DP_DISC_ENABLE : DP_DISC_DISABL; 3437 } 3438 3439 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3440 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3441 DP_TQING_ENABLE : DP_TQING_DISABL; 3442 } 3443 3444 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3445 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3446 DP_WIDE : DP_NARROW; 3447 } 3448 3449 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3450 dval |= DP_SYNC; 3451 offset = spi->sync_offset; 3452 } else { 3453 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3454 &mpt->mpt_dev_page1[tgt]; 3455 offset = ptr->RequestedParameters; 3456 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3457 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3458 } 3459 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3460 dval |= DP_SYNC; 3461 period = spi->sync_period; 3462 } else { 3463 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3464 &mpt->mpt_dev_page1[tgt]; 3465 period = ptr->RequestedParameters; 3466 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3467 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3468 } 3469 #endif 3470 CAMLOCK_2_MPTLOCK(mpt); 3471 if (dval & DP_DISC_ENABLE) { 3472 mpt->mpt_disc_enable |= (1 << tgt); 3473 } else if (dval & DP_DISC_DISABL) { 3474 mpt->mpt_disc_enable &= ~(1 << tgt); 3475 } 3476 if (dval & DP_TQING_ENABLE) { 3477 mpt->mpt_tag_enable |= (1 << tgt); 3478 } else if (dval & DP_TQING_DISABL) { 3479 mpt->mpt_tag_enable &= ~(1 << tgt); 3480 } 3481 if (dval & DP_WIDTH) { 3482 mpt_setwidth(mpt, tgt, 1); 3483 } 3484 if (dval & DP_SYNC) { 3485 mpt_setsync(mpt, tgt, period, offset); 3486 } 3487 if (dval == 0) { 3488 MPTLOCK_2_CAMLOCK(mpt); 3489 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3490 break; 3491 } 3492 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3493 "set [%d]: 0x%x period 0x%x offset %d\n", 3494 tgt, dval, period, offset); 3495 if (mpt_update_spi_config(mpt, tgt)) { 3496 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3497 } else { 3498 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3499 } 3500 MPTLOCK_2_CAMLOCK(mpt); 3501 break; 3502 } 3503 case XPT_GET_TRAN_SETTINGS: 3504 { 3505 #ifdef CAM_NEW_TRAN_CODE 3506 struct ccb_trans_settings_scsi *scsi; 3507 cts = &ccb->cts; 3508 cts->protocol = PROTO_SCSI; 3509 if (mpt->is_fc) { 3510 struct ccb_trans_settings_fc *fc = 3511 &cts->xport_specific.fc; 3512 cts->protocol_version = SCSI_REV_SPC; 3513 cts->transport = XPORT_FC; 3514 cts->transport_version = 0; 3515 fc->valid = CTS_FC_VALID_SPEED; 3516 fc->bitrate = 100000; 3517 } else if (mpt->is_sas) { 3518 struct ccb_trans_settings_sas *sas = 3519 &cts->xport_specific.sas; 3520 cts->protocol_version = SCSI_REV_SPC2; 3521 cts->transport = XPORT_SAS; 3522 cts->transport_version = 0; 3523 sas->valid = CTS_SAS_VALID_SPEED; 3524 sas->bitrate = 300000; 3525 } else { 3526 cts->protocol_version = SCSI_REV_2; 3527 cts->transport = XPORT_SPI; 3528 cts->transport_version = 2; 3529 if (mpt_get_spi_settings(mpt, cts) != 0) { 3530 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3531 break; 3532 } 3533 } 3534 scsi = &cts->proto_specific.scsi; 3535 scsi->valid = CTS_SCSI_VALID_TQ; 3536 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3537 #else 3538 cts = &ccb->cts; 3539 if (mpt->is_fc) { 3540 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3541 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3542 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3543 } else if (mpt->is_sas) { 3544 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3545 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3546 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3547 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3548 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3549 break; 3550 } 3551 #endif 3552 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3553 break; 3554 } 3555 case XPT_CALC_GEOMETRY: 3556 { 3557 struct ccb_calc_geometry *ccg; 3558 3559 ccg = &ccb->ccg; 3560 if (ccg->block_size == 0) { 3561 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3562 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3563 break; 3564 } 3565 mpt_calc_geometry(ccg, /*extended*/1); 3566 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3567 break; 3568 } 3569 case XPT_PATH_INQ: /* Path routing inquiry */ 3570 { 3571 struct ccb_pathinq *cpi = &ccb->cpi; 3572 3573 cpi->version_num = 1; 3574 cpi->target_sprt = 0; 3575 cpi->hba_eng_cnt = 0; 3576 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3577 #if 0 /* XXX swildner */ 3578 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3579 #endif 3580 /* 3581 * FC cards report MAX_DEVICES of 512, but 3582 * the MSG_SCSI_IO_REQUEST target id field 3583 * is only 8 bits. Until we fix the driver 3584 * to support 'channels' for bus overflow, 3585 * just limit it. 3586 */ 3587 if (cpi->max_target > 255) { 3588 cpi->max_target = 255; 3589 } 3590 3591 /* 3592 * VMware ESX reports > 16 devices and then dies when we probe. 3593 */ 3594 if (mpt->is_spi && cpi->max_target > 15) { 3595 cpi->max_target = 15; 3596 } 3597 if (mpt->is_spi) 3598 cpi->max_lun = 7; 3599 else 3600 cpi->max_lun = MPT_MAX_LUNS; 3601 cpi->initiator_id = mpt->mpt_ini_id; 3602 cpi->bus_id = cam_sim_bus(sim); 3603 3604 /* 3605 * The base speed is the speed of the underlying connection. 3606 */ 3607 #ifdef CAM_NEW_TRAN_CODE 3608 cpi->protocol = PROTO_SCSI; 3609 if (mpt->is_fc) { 3610 cpi->hba_misc = PIM_NOBUSRESET; 3611 cpi->base_transfer_speed = 100000; 3612 cpi->hba_inquiry = PI_TAG_ABLE; 3613 cpi->transport = XPORT_FC; 3614 cpi->transport_version = 0; 3615 cpi->protocol_version = SCSI_REV_SPC; 3616 } else if (mpt->is_sas) { 3617 cpi->hba_misc = PIM_NOBUSRESET; 3618 cpi->base_transfer_speed = 300000; 3619 cpi->hba_inquiry = PI_TAG_ABLE; 3620 cpi->transport = XPORT_SAS; 3621 cpi->transport_version = 0; 3622 cpi->protocol_version = SCSI_REV_SPC2; 3623 } else { 3624 cpi->hba_misc = PIM_SEQSCAN; 3625 cpi->base_transfer_speed = 3300; 3626 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3627 cpi->transport = XPORT_SPI; 3628 cpi->transport_version = 2; 3629 cpi->protocol_version = SCSI_REV_2; 3630 } 3631 #else 3632 if (mpt->is_fc) { 3633 cpi->hba_misc = PIM_NOBUSRESET; 3634 cpi->base_transfer_speed = 100000; 3635 cpi->hba_inquiry = PI_TAG_ABLE; 3636 } else if (mpt->is_sas) { 3637 cpi->hba_misc = PIM_NOBUSRESET; 3638 cpi->base_transfer_speed = 300000; 3639 cpi->hba_inquiry = PI_TAG_ABLE; 3640 } else { 3641 cpi->hba_misc = PIM_SEQSCAN; 3642 cpi->base_transfer_speed = 3300; 3643 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3644 } 3645 #endif 3646 3647 /* 3648 * We give our fake RAID passhtru bus a width that is MaxVolumes 3649 * wide and restrict it to one lun. 3650 */ 3651 if (raid_passthru) { 3652 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3653 cpi->initiator_id = cpi->max_target + 1; 3654 cpi->max_lun = 0; 3655 } 3656 3657 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3658 cpi->hba_misc |= PIM_NOINITIATOR; 3659 } 3660 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3661 cpi->target_sprt = 3662 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3663 } else { 3664 cpi->target_sprt = 0; 3665 } 3666 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3667 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3668 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3669 cpi->unit_number = cam_sim_unit(sim); 3670 cpi->ccb_h.status = CAM_REQ_CMP; 3671 break; 3672 } 3673 case XPT_EN_LUN: /* Enable LUN as a target */ 3674 { 3675 int result; 3676 3677 CAMLOCK_2_MPTLOCK(mpt); 3678 if (ccb->cel.enable) 3679 result = mpt_enable_lun(mpt, 3680 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3681 else 3682 result = mpt_disable_lun(mpt, 3683 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3684 MPTLOCK_2_CAMLOCK(mpt); 3685 if (result == 0) { 3686 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3687 } else { 3688 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3689 } 3690 break; 3691 } 3692 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3693 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3694 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3695 { 3696 tgt_resource_t *trtp; 3697 lun_id_t lun = ccb->ccb_h.target_lun; 3698 ccb->ccb_h.sim_priv.entries[0].field = 0; 3699 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3700 ccb->ccb_h.flags = 0; 3701 3702 if (lun == CAM_LUN_WILDCARD) { 3703 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3704 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3705 break; 3706 } 3707 trtp = &mpt->trt_wildcard; 3708 } else if (lun >= MPT_MAX_LUNS) { 3709 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3710 break; 3711 } else { 3712 trtp = &mpt->trt[lun]; 3713 } 3714 CAMLOCK_2_MPTLOCK(mpt); 3715 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3716 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3717 "Put FREE ATIO %p lun %d\n", ccb, lun); 3718 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3719 sim_links.stqe); 3720 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3721 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3722 "Put FREE INOT lun %d\n", lun); 3723 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3724 sim_links.stqe); 3725 } else { 3726 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3727 } 3728 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3729 MPTLOCK_2_CAMLOCK(mpt); 3730 return; 3731 } 3732 case XPT_CONT_TARGET_IO: 3733 CAMLOCK_2_MPTLOCK(mpt); 3734 mpt_target_start_io(mpt, ccb); 3735 MPTLOCK_2_CAMLOCK(mpt); 3736 return; 3737 3738 default: 3739 ccb->ccb_h.status = CAM_REQ_INVALID; 3740 break; 3741 } 3742 xpt_done(ccb); 3743 } 3744 3745 static int 3746 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3747 { 3748 #ifdef CAM_NEW_TRAN_CODE 3749 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3750 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3751 #endif 3752 target_id_t tgt; 3753 uint32_t dval, pval, oval; 3754 int rv; 3755 3756 if (IS_CURRENT_SETTINGS(cts) == 0) { 3757 tgt = cts->ccb_h.target_id; 3758 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3759 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3760 return (-1); 3761 } 3762 } else { 3763 tgt = cts->ccb_h.target_id; 3764 } 3765 3766 /* 3767 * We aren't looking at Port Page 2 BIOS settings here- 3768 * sometimes these have been known to be bogus XXX. 3769 * 3770 * For user settings, we pick the max from port page 0 3771 * 3772 * For current settings we read the current settings out from 3773 * device page 0 for that target. 3774 */ 3775 if (IS_CURRENT_SETTINGS(cts)) { 3776 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3777 dval = 0; 3778 3779 CAMLOCK_2_MPTLOCK(mpt); 3780 tmp = mpt->mpt_dev_page0[tgt]; 3781 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3782 sizeof(tmp), FALSE, 5000); 3783 if (rv) { 3784 MPTLOCK_2_CAMLOCK(mpt); 3785 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3786 return (rv); 3787 } 3788 mpt2host_config_page_scsi_device_0(&tmp); 3789 3790 MPTLOCK_2_CAMLOCK(mpt); 3791 mpt_lprt(mpt, MPT_PRT_DEBUG, 3792 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3793 tmp.NegotiatedParameters, tmp.Information); 3794 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3795 DP_WIDE : DP_NARROW; 3796 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3797 DP_DISC_ENABLE : DP_DISC_DISABL; 3798 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3799 DP_TQING_ENABLE : DP_TQING_DISABL; 3800 oval = tmp.NegotiatedParameters; 3801 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3802 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3803 pval = tmp.NegotiatedParameters; 3804 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3805 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3806 mpt->mpt_dev_page0[tgt] = tmp; 3807 } else { 3808 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3809 oval = mpt->mpt_port_page0.Capabilities; 3810 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3811 pval = mpt->mpt_port_page0.Capabilities; 3812 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3813 } 3814 3815 #ifndef CAM_NEW_TRAN_CODE 3816 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3817 cts->valid = 0; 3818 cts->sync_period = pval; 3819 cts->sync_offset = oval; 3820 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3821 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3822 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3823 if (dval & DP_WIDE) { 3824 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3825 } else { 3826 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3827 } 3828 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3829 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3830 if (dval & DP_DISC_ENABLE) { 3831 cts->flags |= CCB_TRANS_DISC_ENB; 3832 } 3833 if (dval & DP_TQING_ENABLE) { 3834 cts->flags |= CCB_TRANS_TAG_ENB; 3835 } 3836 } 3837 #else 3838 spi->valid = 0; 3839 scsi->valid = 0; 3840 spi->flags = 0; 3841 scsi->flags = 0; 3842 spi->sync_offset = oval; 3843 spi->sync_period = pval; 3844 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3845 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3846 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3847 if (dval & DP_WIDE) { 3848 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3849 } else { 3850 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3851 } 3852 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3853 scsi->valid = CTS_SCSI_VALID_TQ; 3854 if (dval & DP_TQING_ENABLE) { 3855 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3856 } 3857 spi->valid |= CTS_SPI_VALID_DISC; 3858 if (dval & DP_DISC_ENABLE) { 3859 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3860 } 3861 } 3862 #endif 3863 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3864 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3865 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3866 return (0); 3867 } 3868 3869 static void 3870 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3871 { 3872 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3873 3874 ptr = &mpt->mpt_dev_page1[tgt]; 3875 if (onoff) { 3876 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3877 } else { 3878 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3879 } 3880 } 3881 3882 static void 3883 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3884 { 3885 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3886 3887 ptr = &mpt->mpt_dev_page1[tgt]; 3888 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3889 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3890 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3891 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3892 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3893 if (period == 0) { 3894 return; 3895 } 3896 ptr->RequestedParameters |= 3897 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3898 ptr->RequestedParameters |= 3899 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3900 if (period < 0xa) { 3901 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3902 } 3903 if (period < 0x9) { 3904 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3905 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3906 } 3907 } 3908 3909 static int 3910 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3911 { 3912 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3913 int rv; 3914 3915 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3916 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3917 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3918 tmp = mpt->mpt_dev_page1[tgt]; 3919 host2mpt_config_page_scsi_device_1(&tmp); 3920 rv = mpt_write_cur_cfg_page(mpt, tgt, 3921 &tmp.Header, sizeof(tmp), FALSE, 5000); 3922 if (rv) { 3923 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3924 return (-1); 3925 } 3926 return (0); 3927 } 3928 3929 static void 3930 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3931 { 3932 cam_calc_geometry(ccg, extended); 3933 uint32_t size_mb; 3934 uint32_t secs_per_cylinder; 3935 3936 if (ccg->block_size == 0) { 3937 ccg->ccb_h.status = CAM_REQ_INVALID; 3938 return; 3939 } 3940 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3941 if (size_mb > 1024 && extended) { 3942 ccg->heads = 255; 3943 ccg->secs_per_track = 63; 3944 } else { 3945 ccg->heads = 64; 3946 ccg->secs_per_track = 32; 3947 } 3948 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3949 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3950 ccg->ccb_h.status = CAM_REQ_CMP; 3951 } 3952 3953 /****************************** Timeout Recovery ******************************/ 3954 static int 3955 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3956 { 3957 int error; 3958 3959 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3960 &mpt->recovery_thread, /*flags*/0, 3961 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3962 return (error); 3963 } 3964 3965 static void 3966 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3967 { 3968 if (mpt->recovery_thread == NULL) { 3969 return; 3970 } 3971 mpt->shutdwn_recovery = 1; 3972 wakeup(mpt); 3973 /* 3974 * Sleep on a slightly different location 3975 * for this interlock just for added safety. 3976 */ 3977 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0); 3978 } 3979 3980 static void 3981 mpt_recovery_thread(void *arg) 3982 { 3983 struct mpt_softc *mpt; 3984 3985 mpt = (struct mpt_softc *)arg; 3986 MPT_LOCK(mpt); 3987 for (;;) { 3988 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3989 if (mpt->shutdwn_recovery == 0) { 3990 mpt_sleep(mpt, mpt, 0, "idle", 0); 3991 } 3992 } 3993 if (mpt->shutdwn_recovery != 0) { 3994 break; 3995 } 3996 mpt_recover_commands(mpt); 3997 } 3998 mpt->recovery_thread = NULL; 3999 wakeup(&mpt->recovery_thread); 4000 MPT_UNLOCK(mpt); 4001 mpt_kthread_exit(0); 4002 } 4003 4004 static int 4005 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4006 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4007 { 4008 MSG_SCSI_TASK_MGMT *tmf_req; 4009 int error; 4010 4011 /* 4012 * Wait for any current TMF request to complete. 4013 * We're only allowed to issue one TMF at a time. 4014 */ 4015 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4016 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4017 if (error != 0) { 4018 mpt_reset(mpt, TRUE); 4019 return (ETIMEDOUT); 4020 } 4021 4022 mpt_assign_serno(mpt, mpt->tmf_req); 4023 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4024 4025 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4026 memset(tmf_req, 0, sizeof(*tmf_req)); 4027 tmf_req->TargetID = target; 4028 tmf_req->Bus = channel; 4029 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4030 tmf_req->TaskType = type; 4031 tmf_req->MsgFlags = flags; 4032 tmf_req->MsgContext = 4033 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4034 if (lun > MPT_MAX_LUNS) { 4035 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4036 tmf_req->LUN[1] = lun & 0xff; 4037 } else { 4038 tmf_req->LUN[1] = lun; 4039 } 4040 tmf_req->TaskMsgContext = abort_ctx; 4041 4042 mpt_lprt(mpt, MPT_PRT_DEBUG, 4043 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4044 mpt->tmf_req->serno, tmf_req->MsgContext); 4045 if (mpt->verbose > MPT_PRT_DEBUG) { 4046 mpt_print_request(tmf_req); 4047 } 4048 4049 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4050 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4051 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4052 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4053 if (error != MPT_OK) { 4054 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4055 mpt->tmf_req->state = REQ_STATE_FREE; 4056 mpt_reset(mpt, TRUE); 4057 } 4058 return (error); 4059 } 4060 4061 /* 4062 * When a command times out, it is placed on the requeust_timeout_list 4063 * and we wake our recovery thread. The MPT-Fusion architecture supports 4064 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4065 * the timedout transactions. The next TMF is issued either by the 4066 * completion handler of the current TMF waking our recovery thread, 4067 * or the TMF timeout handler causing a hard reset sequence. 4068 */ 4069 static void 4070 mpt_recover_commands(struct mpt_softc *mpt) 4071 { 4072 request_t *req; 4073 union ccb *ccb; 4074 int error; 4075 4076 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4077 /* 4078 * No work to do- leave. 4079 */ 4080 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4081 return; 4082 } 4083 4084 /* 4085 * Flush any commands whose completion coincides with their timeout. 4086 */ 4087 mpt_intr(mpt); 4088 4089 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4090 /* 4091 * The timedout commands have already 4092 * completed. This typically means 4093 * that either the timeout value was on 4094 * the hairy edge of what the device 4095 * requires or - more likely - interrupts 4096 * are not happening. 4097 */ 4098 mpt_prt(mpt, "Timedout requests already complete. " 4099 "Interrupts may not be functioning.\n"); 4100 mpt_enable_ints(mpt); 4101 return; 4102 } 4103 4104 /* 4105 * We have no visibility into the current state of the 4106 * controller, so attempt to abort the commands in the 4107 * order they timed-out. For initiator commands, we 4108 * depend on the reply handler pulling requests off 4109 * the timeout list. 4110 */ 4111 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4112 uint16_t status; 4113 uint8_t response; 4114 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4115 4116 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4117 req, req->serno, hdrp->Function); 4118 ccb = req->ccb; 4119 if (ccb == NULL) { 4120 mpt_prt(mpt, "null ccb in timed out request. " 4121 "Resetting Controller.\n"); 4122 mpt_reset(mpt, TRUE); 4123 continue; 4124 } 4125 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4126 4127 /* 4128 * Check to see if this is not an initiator command and 4129 * deal with it differently if it is. 4130 */ 4131 switch (hdrp->Function) { 4132 case MPI_FUNCTION_SCSI_IO_REQUEST: 4133 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4134 break; 4135 default: 4136 /* 4137 * XXX: FIX ME: need to abort target assists... 4138 */ 4139 mpt_prt(mpt, "just putting it back on the pend q\n"); 4140 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4141 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4142 links); 4143 continue; 4144 } 4145 4146 error = mpt_scsi_send_tmf(mpt, 4147 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4148 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4149 htole32(req->index | scsi_io_handler_id), TRUE); 4150 4151 if (error != 0) { 4152 /* 4153 * mpt_scsi_send_tmf hard resets on failure, so no 4154 * need to do so here. Our queue should be emptied 4155 * by the hard reset. 4156 */ 4157 continue; 4158 } 4159 4160 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4161 REQ_STATE_DONE, TRUE, 500); 4162 4163 status = le16toh(mpt->tmf_req->IOCStatus); 4164 response = mpt->tmf_req->ResponseCode; 4165 mpt->tmf_req->state = REQ_STATE_FREE; 4166 4167 if (error != 0) { 4168 /* 4169 * If we've errored out,, reset the controller. 4170 */ 4171 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4172 "Resetting controller\n"); 4173 mpt_reset(mpt, TRUE); 4174 continue; 4175 } 4176 4177 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4178 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4179 "Resetting controller.\n", status); 4180 mpt_reset(mpt, TRUE); 4181 continue; 4182 } 4183 4184 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4185 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4186 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4187 "Resetting controller.\n", response); 4188 mpt_reset(mpt, TRUE); 4189 continue; 4190 } 4191 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4192 } 4193 } 4194 4195 /************************ Target Mode Support ****************************/ 4196 static void 4197 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4198 { 4199 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4200 PTR_SGE_TRANSACTION32 tep; 4201 PTR_SGE_SIMPLE32 se; 4202 bus_addr_t paddr; 4203 uint32_t fl; 4204 4205 paddr = req->req_pbuf; 4206 paddr += MPT_RQSL(mpt); 4207 4208 fc = req->req_vbuf; 4209 memset(fc, 0, MPT_REQUEST_AREA); 4210 fc->BufferCount = 1; 4211 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4212 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4213 4214 /* 4215 * Okay, set up ELS buffer pointers. ELS buffer pointers 4216 * consist of a TE SGL element (with details length of zero) 4217 * followed by a SIMPLE SGL element which holds the address 4218 * of the buffer. 4219 */ 4220 4221 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4222 4223 tep->ContextSize = 4; 4224 tep->Flags = 0; 4225 tep->TransactionContext[0] = htole32(ioindex); 4226 4227 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4228 fl = 4229 MPI_SGE_FLAGS_HOST_TO_IOC | 4230 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4231 MPI_SGE_FLAGS_LAST_ELEMENT | 4232 MPI_SGE_FLAGS_END_OF_LIST | 4233 MPI_SGE_FLAGS_END_OF_BUFFER; 4234 fl <<= MPI_SGE_FLAGS_SHIFT; 4235 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4236 se->FlagsLength = htole32(fl); 4237 se->Address = htole32((uint32_t) paddr); 4238 mpt_lprt(mpt, MPT_PRT_DEBUG, 4239 "add ELS index %d ioindex %d for %p:%u\n", 4240 req->index, ioindex, req, req->serno); 4241 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4242 ("mpt_fc_post_els: request not locked")); 4243 mpt_send_cmd(mpt, req); 4244 } 4245 4246 static void 4247 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4248 { 4249 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4250 PTR_CMD_BUFFER_DESCRIPTOR cb; 4251 bus_addr_t paddr; 4252 4253 paddr = req->req_pbuf; 4254 paddr += MPT_RQSL(mpt); 4255 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4256 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4257 4258 fc = req->req_vbuf; 4259 fc->BufferCount = 1; 4260 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4261 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4262 4263 cb = &fc->Buffer[0]; 4264 cb->IoIndex = htole16(ioindex); 4265 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4266 4267 mpt_check_doorbell(mpt); 4268 mpt_send_cmd(mpt, req); 4269 } 4270 4271 static int 4272 mpt_add_els_buffers(struct mpt_softc *mpt) 4273 { 4274 int i; 4275 4276 if (mpt->is_fc == 0) { 4277 return (TRUE); 4278 } 4279 4280 if (mpt->els_cmds_allocated) { 4281 return (TRUE); 4282 } 4283 4284 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *), 4285 M_DEVBUF, M_NOWAIT | M_ZERO); 4286 4287 if (mpt->els_cmd_ptrs == NULL) { 4288 return (FALSE); 4289 } 4290 4291 /* 4292 * Feed the chip some ELS buffer resources 4293 */ 4294 for (i = 0; i < MPT_MAX_ELS; i++) { 4295 request_t *req = mpt_get_request(mpt, FALSE); 4296 if (req == NULL) { 4297 break; 4298 } 4299 req->state |= REQ_STATE_LOCKED; 4300 mpt->els_cmd_ptrs[i] = req; 4301 mpt_fc_post_els(mpt, req, i); 4302 } 4303 4304 if (i == 0) { 4305 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4306 kfree(mpt->els_cmd_ptrs, M_DEVBUF); 4307 mpt->els_cmd_ptrs = NULL; 4308 return (FALSE); 4309 } 4310 if (i != MPT_MAX_ELS) { 4311 mpt_lprt(mpt, MPT_PRT_INFO, 4312 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4313 } 4314 mpt->els_cmds_allocated = i; 4315 return(TRUE); 4316 } 4317 4318 static int 4319 mpt_add_target_commands(struct mpt_softc *mpt) 4320 { 4321 int i, max; 4322 4323 if (mpt->tgt_cmd_ptrs) { 4324 return (TRUE); 4325 } 4326 4327 max = MPT_MAX_REQUESTS(mpt) >> 1; 4328 if (max > mpt->mpt_max_tgtcmds) { 4329 max = mpt->mpt_max_tgtcmds; 4330 } 4331 mpt->tgt_cmd_ptrs = 4332 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4333 if (mpt->tgt_cmd_ptrs == NULL) { 4334 mpt_prt(mpt, 4335 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4336 return (FALSE); 4337 } 4338 4339 for (i = 0; i < max; i++) { 4340 request_t *req; 4341 4342 req = mpt_get_request(mpt, FALSE); 4343 if (req == NULL) { 4344 break; 4345 } 4346 req->state |= REQ_STATE_LOCKED; 4347 mpt->tgt_cmd_ptrs[i] = req; 4348 mpt_post_target_command(mpt, req, i); 4349 } 4350 4351 4352 if (i == 0) { 4353 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4354 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF); 4355 mpt->tgt_cmd_ptrs = NULL; 4356 return (FALSE); 4357 } 4358 4359 mpt->tgt_cmds_allocated = i; 4360 4361 if (i < max) { 4362 mpt_lprt(mpt, MPT_PRT_INFO, 4363 "added %d of %d target bufs\n", i, max); 4364 } 4365 return (i); 4366 } 4367 4368 static int 4369 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4370 { 4371 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4372 mpt->twildcard = 1; 4373 } else if (lun >= MPT_MAX_LUNS) { 4374 return (EINVAL); 4375 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4376 return (EINVAL); 4377 } 4378 if (mpt->tenabled == 0) { 4379 if (mpt->is_fc) { 4380 (void) mpt_fc_reset_link(mpt, 0); 4381 } 4382 mpt->tenabled = 1; 4383 } 4384 if (lun == CAM_LUN_WILDCARD) { 4385 mpt->trt_wildcard.enabled = 1; 4386 } else { 4387 mpt->trt[lun].enabled = 1; 4388 } 4389 return (0); 4390 } 4391 4392 static int 4393 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4394 { 4395 int i; 4396 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4397 mpt->twildcard = 0; 4398 } else if (lun >= MPT_MAX_LUNS) { 4399 return (EINVAL); 4400 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4401 return (EINVAL); 4402 } 4403 if (lun == CAM_LUN_WILDCARD) { 4404 mpt->trt_wildcard.enabled = 0; 4405 } else { 4406 mpt->trt[lun].enabled = 0; 4407 } 4408 for (i = 0; i < MPT_MAX_LUNS; i++) { 4409 if (mpt->trt[lun].enabled) { 4410 break; 4411 } 4412 } 4413 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4414 if (mpt->is_fc) { 4415 (void) mpt_fc_reset_link(mpt, 0); 4416 } 4417 mpt->tenabled = 0; 4418 } 4419 return (0); 4420 } 4421 4422 /* 4423 * Called with MPT lock held 4424 */ 4425 static void 4426 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4427 { 4428 struct ccb_scsiio *csio = &ccb->csio; 4429 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4430 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4431 4432 switch (tgt->state) { 4433 case TGT_STATE_IN_CAM: 4434 break; 4435 case TGT_STATE_MOVING_DATA: 4436 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4437 xpt_freeze_simq(mpt->sim, 1); 4438 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4439 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4440 MPTLOCK_2_CAMLOCK(mpt); 4441 xpt_done(ccb); 4442 CAMLOCK_2_MPTLOCK(mpt); 4443 return; 4444 default: 4445 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4446 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4447 mpt_tgt_dump_req_state(mpt, cmd_req); 4448 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4449 MPTLOCK_2_CAMLOCK(mpt); 4450 xpt_done(ccb); 4451 CAMLOCK_2_MPTLOCK(mpt); 4452 return; 4453 } 4454 4455 if (csio->dxfer_len) { 4456 bus_dmamap_callback_t *cb; 4457 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4458 request_t *req; 4459 4460 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4461 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4462 4463 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4464 if (mpt->outofbeer == 0) { 4465 mpt->outofbeer = 1; 4466 xpt_freeze_simq(mpt->sim, 1); 4467 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4468 } 4469 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4470 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4471 MPTLOCK_2_CAMLOCK(mpt); 4472 xpt_done(ccb); 4473 CAMLOCK_2_MPTLOCK(mpt); 4474 return; 4475 } 4476 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4477 if (sizeof (bus_addr_t) > 4) { 4478 cb = mpt_execute_req_a64; 4479 } else { 4480 cb = mpt_execute_req; 4481 } 4482 4483 req->ccb = ccb; 4484 ccb->ccb_h.ccb_req_ptr = req; 4485 4486 /* 4487 * Record the currently active ccb and the 4488 * request for it in our target state area. 4489 */ 4490 tgt->ccb = ccb; 4491 tgt->req = req; 4492 4493 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4494 ta = req->req_vbuf; 4495 4496 if (mpt->is_sas) { 4497 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4498 cmd_req->req_vbuf; 4499 ta->QueueTag = ssp->InitiatorTag; 4500 } else if (mpt->is_spi) { 4501 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4502 cmd_req->req_vbuf; 4503 ta->QueueTag = sp->Tag; 4504 } 4505 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4506 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4507 ta->ReplyWord = htole32(tgt->reply_desc); 4508 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4509 ta->LUN[0] = 4510 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4511 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4512 } else { 4513 ta->LUN[1] = csio->ccb_h.target_lun; 4514 } 4515 4516 ta->RelativeOffset = tgt->bytes_xfered; 4517 ta->DataLength = ccb->csio.dxfer_len; 4518 if (ta->DataLength > tgt->resid) { 4519 ta->DataLength = tgt->resid; 4520 } 4521 4522 /* 4523 * XXX Should be done after data transfer completes? 4524 */ 4525 tgt->resid -= csio->dxfer_len; 4526 tgt->bytes_xfered += csio->dxfer_len; 4527 4528 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4529 ta->TargetAssistFlags |= 4530 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4531 } 4532 4533 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4534 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4535 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4536 ta->TargetAssistFlags |= 4537 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4538 } 4539 #endif 4540 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4541 4542 mpt_lprt(mpt, MPT_PRT_DEBUG, 4543 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4544 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4545 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4546 4547 MPTLOCK_2_CAMLOCK(mpt); 4548 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4549 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4550 int error; 4551 crit_enter(); 4552 error = bus_dmamap_load(mpt->buffer_dmat, 4553 req->dmap, csio->data_ptr, csio->dxfer_len, 4554 cb, req, 0); 4555 crit_exit(); 4556 if (error == EINPROGRESS) { 4557 xpt_freeze_simq(mpt->sim, 1); 4558 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4559 } 4560 } else { 4561 /* 4562 * We have been given a pointer to single 4563 * physical buffer. 4564 */ 4565 struct bus_dma_segment seg; 4566 seg.ds_addr = (bus_addr_t) 4567 (vm_offset_t)csio->data_ptr; 4568 seg.ds_len = csio->dxfer_len; 4569 (*cb)(req, &seg, 1, 0); 4570 } 4571 } else { 4572 /* 4573 * We have been given a list of addresses. 4574 * This case could be easily supported but they are not 4575 * currently generated by the CAM subsystem so there 4576 * is no point in wasting the time right now. 4577 */ 4578 struct bus_dma_segment *sgs; 4579 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4580 (*cb)(req, NULL, 0, EFAULT); 4581 } else { 4582 /* Just use the segments provided */ 4583 sgs = (struct bus_dma_segment *)csio->data_ptr; 4584 (*cb)(req, sgs, csio->sglist_cnt, 0); 4585 } 4586 } 4587 CAMLOCK_2_MPTLOCK(mpt); 4588 } else { 4589 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4590 4591 /* 4592 * XXX: I don't know why this seems to happen, but 4593 * XXX: completing the CCB seems to make things happy. 4594 * XXX: This seems to happen if the initiator requests 4595 * XXX: enough data that we have to do multiple CTIOs. 4596 */ 4597 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4598 mpt_lprt(mpt, MPT_PRT_DEBUG, 4599 "Meaningless STATUS CCB (%p): flags %x status %x " 4600 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4601 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4602 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4603 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4604 MPTLOCK_2_CAMLOCK(mpt); 4605 xpt_done(ccb); 4606 CAMLOCK_2_MPTLOCK(mpt); 4607 return; 4608 } 4609 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4610 sp = sense; 4611 memcpy(sp, &csio->sense_data, 4612 min(csio->sense_len, MPT_SENSE_SIZE)); 4613 } 4614 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4615 } 4616 } 4617 4618 static void 4619 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4620 uint32_t lun, int send, uint8_t *data, size_t length) 4621 { 4622 mpt_tgt_state_t *tgt; 4623 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4624 SGE_SIMPLE32 *se; 4625 uint32_t flags; 4626 uint8_t *dptr; 4627 bus_addr_t pptr; 4628 request_t *req; 4629 4630 /* 4631 * We enter with resid set to the data load for the command. 4632 */ 4633 tgt = MPT_TGT_STATE(mpt, cmd_req); 4634 if (length == 0 || tgt->resid == 0) { 4635 tgt->resid = 0; 4636 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4637 return; 4638 } 4639 4640 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4641 mpt_prt(mpt, "out of resources- dropping local response\n"); 4642 return; 4643 } 4644 tgt->is_local = 1; 4645 4646 4647 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4648 ta = req->req_vbuf; 4649 4650 if (mpt->is_sas) { 4651 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4652 ta->QueueTag = ssp->InitiatorTag; 4653 } else if (mpt->is_spi) { 4654 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4655 ta->QueueTag = sp->Tag; 4656 } 4657 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4658 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4659 ta->ReplyWord = htole32(tgt->reply_desc); 4660 if (lun > MPT_MAX_LUNS) { 4661 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4662 ta->LUN[1] = lun & 0xff; 4663 } else { 4664 ta->LUN[1] = lun; 4665 } 4666 ta->RelativeOffset = 0; 4667 ta->DataLength = length; 4668 4669 dptr = req->req_vbuf; 4670 dptr += MPT_RQSL(mpt); 4671 pptr = req->req_pbuf; 4672 pptr += MPT_RQSL(mpt); 4673 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4674 4675 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4676 memset(se, 0,sizeof (*se)); 4677 4678 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4679 if (send) { 4680 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4681 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4682 } 4683 se->Address = pptr; 4684 MPI_pSGE_SET_LENGTH(se, length); 4685 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4686 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4687 MPI_pSGE_SET_FLAGS(se, flags); 4688 4689 tgt->ccb = NULL; 4690 tgt->req = req; 4691 tgt->resid -= length; 4692 tgt->bytes_xfered = length; 4693 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4694 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4695 #else 4696 tgt->state = TGT_STATE_MOVING_DATA; 4697 #endif 4698 mpt_send_cmd(mpt, req); 4699 } 4700 4701 /* 4702 * Abort queued up CCBs 4703 */ 4704 static cam_status 4705 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4706 { 4707 struct mpt_hdr_stailq *lp; 4708 struct ccb_hdr *srch; 4709 int found = 0; 4710 union ccb *accb = ccb->cab.abort_ccb; 4711 tgt_resource_t *trtp; 4712 4713 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4714 4715 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4716 trtp = &mpt->trt_wildcard; 4717 } else { 4718 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4719 } 4720 4721 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4722 lp = &trtp->atios; 4723 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4724 lp = &trtp->inots; 4725 } else { 4726 return (CAM_REQ_INVALID); 4727 } 4728 4729 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4730 if (srch == &accb->ccb_h) { 4731 found = 1; 4732 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4733 break; 4734 } 4735 } 4736 if (found) { 4737 accb->ccb_h.status = CAM_REQ_ABORTED; 4738 xpt_done(accb); 4739 return (CAM_REQ_CMP); 4740 } 4741 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4742 return (CAM_PATH_INVALID); 4743 } 4744 4745 /* 4746 * Ask the MPT to abort the current target command 4747 */ 4748 static int 4749 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4750 { 4751 int error; 4752 request_t *req; 4753 PTR_MSG_TARGET_MODE_ABORT abtp; 4754 4755 req = mpt_get_request(mpt, FALSE); 4756 if (req == NULL) { 4757 return (-1); 4758 } 4759 abtp = req->req_vbuf; 4760 memset(abtp, 0, sizeof (*abtp)); 4761 4762 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4763 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4764 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4765 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4766 error = 0; 4767 if (mpt->is_fc || mpt->is_sas) { 4768 mpt_send_cmd(mpt, req); 4769 } else { 4770 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4771 } 4772 return (error); 4773 } 4774 4775 /* 4776 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4777 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4778 * FC929 to set bogus FC_RSP fields (nonzero residuals 4779 * but w/o RESID fields set). This causes QLogic initiators 4780 * to think maybe that a frame was lost. 4781 * 4782 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4783 * we use allocated requests to do TARGET_ASSIST and we 4784 * need to know when to release them. 4785 */ 4786 4787 static void 4788 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4789 uint8_t status, uint8_t const *sense_data) 4790 { 4791 uint8_t *cmd_vbuf; 4792 mpt_tgt_state_t *tgt; 4793 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4794 request_t *req; 4795 bus_addr_t paddr; 4796 int resplen = 0; 4797 uint32_t fl; 4798 4799 cmd_vbuf = cmd_req->req_vbuf; 4800 cmd_vbuf += MPT_RQSL(mpt); 4801 tgt = MPT_TGT_STATE(mpt, cmd_req); 4802 4803 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4804 if (mpt->outofbeer == 0) { 4805 mpt->outofbeer = 1; 4806 xpt_freeze_simq(mpt->sim, 1); 4807 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4808 } 4809 if (ccb) { 4810 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4811 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4812 MPTLOCK_2_CAMLOCK(mpt); 4813 xpt_done(ccb); 4814 CAMLOCK_2_MPTLOCK(mpt); 4815 } else { 4816 mpt_prt(mpt, 4817 "could not allocate status request- dropping\n"); 4818 } 4819 return; 4820 } 4821 req->ccb = ccb; 4822 if (ccb) { 4823 ccb->ccb_h.ccb_mpt_ptr = mpt; 4824 ccb->ccb_h.ccb_req_ptr = req; 4825 } 4826 4827 /* 4828 * Record the currently active ccb, if any, and the 4829 * request for it in our target state area. 4830 */ 4831 tgt->ccb = ccb; 4832 tgt->req = req; 4833 tgt->state = TGT_STATE_SENDING_STATUS; 4834 4835 tp = req->req_vbuf; 4836 paddr = req->req_pbuf; 4837 paddr += MPT_RQSL(mpt); 4838 4839 memset(tp, 0, sizeof (*tp)); 4840 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4841 if (mpt->is_fc) { 4842 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4843 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4844 uint8_t *sts_vbuf; 4845 uint32_t *rsp; 4846 4847 sts_vbuf = req->req_vbuf; 4848 sts_vbuf += MPT_RQSL(mpt); 4849 rsp = (uint32_t *) sts_vbuf; 4850 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4851 4852 /* 4853 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4854 * It has to be big-endian in memory and is organized 4855 * in 32 bit words, which are much easier to deal with 4856 * as words which are swizzled as needed. 4857 * 4858 * All we're filling here is the FC_RSP payload. 4859 * We may just have the chip synthesize it if 4860 * we have no residual and an OK status. 4861 * 4862 */ 4863 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4864 4865 rsp[2] = status; 4866 if (tgt->resid) { 4867 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4868 rsp[3] = htobe32(tgt->resid); 4869 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4870 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4871 #endif 4872 } 4873 if (status == SCSI_STATUS_CHECK_COND) { 4874 int i; 4875 4876 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4877 rsp[4] = htobe32(MPT_SENSE_SIZE); 4878 if (sense_data) { 4879 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4880 } else { 4881 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4882 "TION but no sense data?\n"); 4883 memset(&rsp, 0, MPT_SENSE_SIZE); 4884 } 4885 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4886 rsp[i] = htobe32(rsp[i]); 4887 } 4888 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4889 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4890 #endif 4891 } 4892 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4893 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4894 #endif 4895 rsp[2] = htobe32(rsp[2]); 4896 } else if (mpt->is_sas) { 4897 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4898 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4899 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4900 } else { 4901 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4902 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4903 tp->StatusCode = status; 4904 tp->QueueTag = htole16(sp->Tag); 4905 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4906 } 4907 4908 tp->ReplyWord = htole32(tgt->reply_desc); 4909 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4910 4911 #ifdef WE_CAN_USE_AUTO_REPOST 4912 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4913 #endif 4914 if (status == SCSI_STATUS_OK && resplen == 0) { 4915 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4916 } else { 4917 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4918 fl = 4919 MPI_SGE_FLAGS_HOST_TO_IOC | 4920 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4921 MPI_SGE_FLAGS_LAST_ELEMENT | 4922 MPI_SGE_FLAGS_END_OF_LIST | 4923 MPI_SGE_FLAGS_END_OF_BUFFER; 4924 fl <<= MPI_SGE_FLAGS_SHIFT; 4925 fl |= resplen; 4926 tp->StatusDataSGE.FlagsLength = htole32(fl); 4927 } 4928 4929 mpt_lprt(mpt, MPT_PRT_DEBUG, 4930 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4931 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4932 req->serno, tgt->resid); 4933 if (ccb) { 4934 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4935 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4936 } 4937 mpt_send_cmd(mpt, req); 4938 } 4939 4940 static void 4941 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4942 tgt_resource_t *trtp, int init_id) 4943 { 4944 struct ccb_immed_notify *inot; 4945 mpt_tgt_state_t *tgt; 4946 4947 tgt = MPT_TGT_STATE(mpt, req); 4948 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4949 if (inot == NULL) { 4950 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4951 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4952 return; 4953 } 4954 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4955 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4956 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4957 4958 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4959 inot->sense_len = 0; 4960 memset(inot->message_args, 0, sizeof (inot->message_args)); 4961 inot->initiator_id = init_id; /* XXX */ 4962 4963 /* 4964 * This is a somewhat grotesque attempt to map from task management 4965 * to old style SCSI messages. God help us all. 4966 */ 4967 switch (fc) { 4968 case MPT_ABORT_TASK_SET: 4969 inot->message_args[0] = MSG_ABORT_TAG; 4970 break; 4971 case MPT_CLEAR_TASK_SET: 4972 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4973 break; 4974 case MPT_TARGET_RESET: 4975 inot->message_args[0] = MSG_TARGET_RESET; 4976 break; 4977 case MPT_CLEAR_ACA: 4978 inot->message_args[0] = MSG_CLEAR_ACA; 4979 break; 4980 case MPT_TERMINATE_TASK: 4981 inot->message_args[0] = MSG_ABORT_TAG; 4982 break; 4983 default: 4984 inot->message_args[0] = MSG_NOOP; 4985 break; 4986 } 4987 tgt->ccb = (union ccb *) inot; 4988 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4989 MPTLOCK_2_CAMLOCK(mpt); 4990 xpt_done((union ccb *)inot); 4991 CAMLOCK_2_MPTLOCK(mpt); 4992 } 4993 4994 static void 4995 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4996 { 4997 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4998 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4999 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 5000 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 5001 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 5002 '0', '0', '0', '1' 5003 }; 5004 struct ccb_accept_tio *atiop; 5005 lun_id_t lun; 5006 int tag_action = 0; 5007 mpt_tgt_state_t *tgt; 5008 tgt_resource_t *trtp = NULL; 5009 U8 *lunptr; 5010 U8 *vbuf; 5011 U16 itag; 5012 U16 ioindex; 5013 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5014 uint8_t *cdbp; 5015 5016 /* 5017 * Stash info for the current command where we can get at it later. 5018 */ 5019 vbuf = req->req_vbuf; 5020 vbuf += MPT_RQSL(mpt); 5021 5022 /* 5023 * Get our state pointer set up. 5024 */ 5025 tgt = MPT_TGT_STATE(mpt, req); 5026 if (tgt->state != TGT_STATE_LOADED) { 5027 mpt_tgt_dump_req_state(mpt, req); 5028 panic("bad target state in mpt_scsi_tgt_atio"); 5029 } 5030 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5031 tgt->state = TGT_STATE_IN_CAM; 5032 tgt->reply_desc = reply_desc; 5033 ioindex = GET_IO_INDEX(reply_desc); 5034 if (mpt->verbose >= MPT_PRT_DEBUG) { 5035 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5036 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5037 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5038 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5039 } 5040 if (mpt->is_fc) { 5041 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5042 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5043 if (fc->FcpCntl[2]) { 5044 /* 5045 * Task Management Request 5046 */ 5047 switch (fc->FcpCntl[2]) { 5048 case 0x2: 5049 fct = MPT_ABORT_TASK_SET; 5050 break; 5051 case 0x4: 5052 fct = MPT_CLEAR_TASK_SET; 5053 break; 5054 case 0x20: 5055 fct = MPT_TARGET_RESET; 5056 break; 5057 case 0x40: 5058 fct = MPT_CLEAR_ACA; 5059 break; 5060 case 0x80: 5061 fct = MPT_TERMINATE_TASK; 5062 break; 5063 default: 5064 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5065 fc->FcpCntl[2]); 5066 mpt_scsi_tgt_status(mpt, 0, req, 5067 SCSI_STATUS_OK, 0); 5068 return; 5069 } 5070 } else { 5071 switch (fc->FcpCntl[1]) { 5072 case 0: 5073 tag_action = MSG_SIMPLE_Q_TAG; 5074 break; 5075 case 1: 5076 tag_action = MSG_HEAD_OF_Q_TAG; 5077 break; 5078 case 2: 5079 tag_action = MSG_ORDERED_Q_TAG; 5080 break; 5081 default: 5082 /* 5083 * Bah. Ignore Untagged Queing and ACA 5084 */ 5085 tag_action = MSG_SIMPLE_Q_TAG; 5086 break; 5087 } 5088 } 5089 tgt->resid = be32toh(fc->FcpDl); 5090 cdbp = fc->FcpCdb; 5091 lunptr = fc->FcpLun; 5092 itag = be16toh(fc->OptionalOxid); 5093 } else if (mpt->is_sas) { 5094 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5095 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5096 cdbp = ssp->CDB; 5097 lunptr = ssp->LogicalUnitNumber; 5098 itag = ssp->InitiatorTag; 5099 } else { 5100 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5101 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5102 cdbp = sp->CDB; 5103 lunptr = sp->LogicalUnitNumber; 5104 itag = sp->Tag; 5105 } 5106 5107 /* 5108 * Generate a simple lun 5109 */ 5110 switch (lunptr[0] & 0xc0) { 5111 case 0x40: 5112 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5113 break; 5114 case 0: 5115 lun = lunptr[1]; 5116 break; 5117 default: 5118 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5119 lun = 0xffff; 5120 break; 5121 } 5122 5123 /* 5124 * Deal with non-enabled or bad luns here. 5125 */ 5126 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5127 mpt->trt[lun].enabled == 0) { 5128 if (mpt->twildcard) { 5129 trtp = &mpt->trt_wildcard; 5130 } else if (fct == MPT_NIL_TMT_VALUE) { 5131 /* 5132 * In this case, we haven't got an upstream listener 5133 * for either a specific lun or wildcard luns. We 5134 * have to make some sensible response. For regular 5135 * inquiry, just return some NOT HERE inquiry data. 5136 * For VPD inquiry, report illegal field in cdb. 5137 * For REQUEST SENSE, just return NO SENSE data. 5138 * REPORT LUNS gets illegal command. 5139 * All other commands get 'no such device'. 5140 */ 5141 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5142 size_t len; 5143 5144 memset(buf, 0, MPT_SENSE_SIZE); 5145 cond = SCSI_STATUS_CHECK_COND; 5146 buf[0] = 0xf0; 5147 buf[2] = 0x5; 5148 buf[7] = 0x8; 5149 sp = buf; 5150 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5151 5152 switch (cdbp[0]) { 5153 case INQUIRY: 5154 { 5155 if (cdbp[1] != 0) { 5156 buf[12] = 0x26; 5157 buf[13] = 0x01; 5158 break; 5159 } 5160 len = min(tgt->resid, cdbp[4]); 5161 len = min(len, sizeof (null_iqd)); 5162 mpt_lprt(mpt, MPT_PRT_DEBUG, 5163 "local inquiry %ld bytes\n", (long) len); 5164 mpt_scsi_tgt_local(mpt, req, lun, 1, 5165 null_iqd, len); 5166 return; 5167 } 5168 case REQUEST_SENSE: 5169 { 5170 buf[2] = 0x0; 5171 len = min(tgt->resid, cdbp[4]); 5172 len = min(len, sizeof (buf)); 5173 mpt_lprt(mpt, MPT_PRT_DEBUG, 5174 "local reqsense %ld bytes\n", (long) len); 5175 mpt_scsi_tgt_local(mpt, req, lun, 1, 5176 buf, len); 5177 return; 5178 } 5179 case REPORT_LUNS: 5180 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5181 buf[12] = 0x26; 5182 return; 5183 default: 5184 mpt_lprt(mpt, MPT_PRT_DEBUG, 5185 "CMD 0x%x to unmanaged lun %u\n", 5186 cdbp[0], lun); 5187 buf[12] = 0x25; 5188 break; 5189 } 5190 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5191 return; 5192 } 5193 /* otherwise, leave trtp NULL */ 5194 } else { 5195 trtp = &mpt->trt[lun]; 5196 } 5197 5198 /* 5199 * Deal with any task management 5200 */ 5201 if (fct != MPT_NIL_TMT_VALUE) { 5202 if (trtp == NULL) { 5203 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5204 fct); 5205 mpt_scsi_tgt_status(mpt, 0, req, 5206 SCSI_STATUS_OK, 0); 5207 } else { 5208 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5209 GET_INITIATOR_INDEX(reply_desc)); 5210 } 5211 return; 5212 } 5213 5214 5215 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5216 if (atiop == NULL) { 5217 mpt_lprt(mpt, MPT_PRT_WARN, 5218 "no ATIOs for lun %u- sending back %s\n", lun, 5219 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5220 mpt_scsi_tgt_status(mpt, NULL, req, 5221 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5222 NULL); 5223 return; 5224 } 5225 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5226 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5227 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5228 atiop->ccb_h.ccb_mpt_ptr = mpt; 5229 atiop->ccb_h.status = CAM_CDB_RECVD; 5230 atiop->ccb_h.target_lun = lun; 5231 atiop->sense_len = 0; 5232 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5233 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5234 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5235 5236 /* 5237 * The tag we construct here allows us to find the 5238 * original request that the command came in with. 5239 * 5240 * This way we don't have to depend on anything but the 5241 * tag to find things when CCBs show back up from CAM. 5242 */ 5243 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5244 tgt->tag_id = atiop->tag_id; 5245 if (tag_action) { 5246 atiop->tag_action = tag_action; 5247 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5248 } 5249 if (mpt->verbose >= MPT_PRT_DEBUG) { 5250 int i; 5251 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5252 atiop->ccb_h.target_lun); 5253 for (i = 0; i < atiop->cdb_len; i++) { 5254 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5255 (i == (atiop->cdb_len - 1))? '>' : ' '); 5256 } 5257 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5258 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5259 } 5260 5261 MPTLOCK_2_CAMLOCK(mpt); 5262 xpt_done((union ccb *)atiop); 5263 CAMLOCK_2_MPTLOCK(mpt); 5264 } 5265 5266 static void 5267 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5268 { 5269 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5270 5271 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5272 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5273 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5274 tgt->tag_id, tgt->state); 5275 } 5276 5277 static void 5278 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5279 { 5280 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5281 req->index, req->index, req->state); 5282 mpt_tgt_dump_tgt_state(mpt, req); 5283 } 5284 5285 static int 5286 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5287 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5288 { 5289 int dbg; 5290 union ccb *ccb; 5291 U16 status; 5292 5293 if (reply_frame == NULL) { 5294 /* 5295 * Figure out what the state of the command is. 5296 */ 5297 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5298 5299 #ifdef INVARIANTS 5300 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5301 if (tgt->req) { 5302 mpt_req_not_spcl(mpt, tgt->req, 5303 "turbo scsi_tgt_reply associated req", __LINE__); 5304 } 5305 #endif 5306 switch(tgt->state) { 5307 case TGT_STATE_LOADED: 5308 /* 5309 * This is a new command starting. 5310 */ 5311 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5312 break; 5313 case TGT_STATE_MOVING_DATA: 5314 { 5315 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5316 5317 ccb = tgt->ccb; 5318 if (tgt->req == NULL) { 5319 panic("mpt: turbo target reply with null " 5320 "associated request moving data"); 5321 /* NOTREACHED */ 5322 } 5323 if (ccb == NULL) { 5324 if (tgt->is_local == 0) { 5325 panic("mpt: turbo target reply with " 5326 "null associated ccb moving data"); 5327 /* NOTREACHED */ 5328 } 5329 mpt_lprt(mpt, MPT_PRT_DEBUG, 5330 "TARGET_ASSIST local done\n"); 5331 TAILQ_REMOVE(&mpt->request_pending_list, 5332 tgt->req, links); 5333 mpt_free_request(mpt, tgt->req); 5334 tgt->req = NULL; 5335 mpt_scsi_tgt_status(mpt, NULL, req, 5336 0, NULL); 5337 return (TRUE); 5338 } 5339 tgt->ccb = NULL; 5340 tgt->nxfers++; 5341 mpt_req_untimeout(req, mpt_timeout, ccb); 5342 mpt_lprt(mpt, MPT_PRT_DEBUG, 5343 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5344 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5345 /* 5346 * Free the Target Assist Request 5347 */ 5348 KASSERT(tgt->req->ccb == ccb, 5349 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5350 tgt->req->serno, tgt->req->ccb)); 5351 TAILQ_REMOVE(&mpt->request_pending_list, 5352 tgt->req, links); 5353 mpt_free_request(mpt, tgt->req); 5354 tgt->req = NULL; 5355 5356 /* 5357 * Do we need to send status now? That is, are 5358 * we done with all our data transfers? 5359 */ 5360 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5361 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5362 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5363 KASSERT(ccb->ccb_h.status, 5364 ("zero ccb sts at %d\n", __LINE__)); 5365 tgt->state = TGT_STATE_IN_CAM; 5366 if (mpt->outofbeer) { 5367 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5368 mpt->outofbeer = 0; 5369 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5370 } 5371 MPTLOCK_2_CAMLOCK(mpt); 5372 xpt_done(ccb); 5373 CAMLOCK_2_MPTLOCK(mpt); 5374 break; 5375 } 5376 /* 5377 * Otherwise, send status (and sense) 5378 */ 5379 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5380 sp = sense; 5381 memcpy(sp, &ccb->csio.sense_data, 5382 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5383 } 5384 mpt_scsi_tgt_status(mpt, ccb, req, 5385 ccb->csio.scsi_status, sp); 5386 break; 5387 } 5388 case TGT_STATE_SENDING_STATUS: 5389 case TGT_STATE_MOVING_DATA_AND_STATUS: 5390 { 5391 int ioindex; 5392 ccb = tgt->ccb; 5393 5394 if (tgt->req == NULL) { 5395 panic("mpt: turbo target reply with null " 5396 "associated request sending status"); 5397 /* NOTREACHED */ 5398 } 5399 5400 if (ccb) { 5401 tgt->ccb = NULL; 5402 if (tgt->state == 5403 TGT_STATE_MOVING_DATA_AND_STATUS) { 5404 tgt->nxfers++; 5405 } 5406 mpt_req_untimeout(req, mpt_timeout, ccb); 5407 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5408 ccb->ccb_h.status |= CAM_SENT_SENSE; 5409 } 5410 mpt_lprt(mpt, MPT_PRT_DEBUG, 5411 "TARGET_STATUS tag %x sts %x flgs %x req " 5412 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5413 ccb->ccb_h.flags, tgt->req); 5414 /* 5415 * Free the Target Send Status Request 5416 */ 5417 KASSERT(tgt->req->ccb == ccb, 5418 ("tgt->req %p:%u tgt->req->ccb %p", 5419 tgt->req, tgt->req->serno, tgt->req->ccb)); 5420 /* 5421 * Notify CAM that we're done 5422 */ 5423 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5424 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5425 KASSERT(ccb->ccb_h.status, 5426 ("ZERO ccb sts at %d\n", __LINE__)); 5427 tgt->ccb = NULL; 5428 } else { 5429 mpt_lprt(mpt, MPT_PRT_DEBUG, 5430 "TARGET_STATUS non-CAM for req %p:%u\n", 5431 tgt->req, tgt->req->serno); 5432 } 5433 TAILQ_REMOVE(&mpt->request_pending_list, 5434 tgt->req, links); 5435 mpt_free_request(mpt, tgt->req); 5436 tgt->req = NULL; 5437 5438 /* 5439 * And re-post the Command Buffer. 5440 * This will reset the state. 5441 */ 5442 ioindex = GET_IO_INDEX(reply_desc); 5443 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5444 tgt->is_local = 0; 5445 mpt_post_target_command(mpt, req, ioindex); 5446 5447 /* 5448 * And post a done for anyone who cares 5449 */ 5450 if (ccb) { 5451 if (mpt->outofbeer) { 5452 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5453 mpt->outofbeer = 0; 5454 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5455 } 5456 MPTLOCK_2_CAMLOCK(mpt); 5457 xpt_done(ccb); 5458 CAMLOCK_2_MPTLOCK(mpt); 5459 } 5460 break; 5461 } 5462 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5463 tgt->state = TGT_STATE_LOADED; 5464 break; 5465 default: 5466 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5467 "Reply Function\n", tgt->state); 5468 } 5469 return (TRUE); 5470 } 5471 5472 status = le16toh(reply_frame->IOCStatus); 5473 if (status != MPI_IOCSTATUS_SUCCESS) { 5474 dbg = MPT_PRT_ERROR; 5475 } else { 5476 dbg = MPT_PRT_DEBUG1; 5477 } 5478 5479 mpt_lprt(mpt, dbg, 5480 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5481 req, req->serno, reply_frame, reply_frame->Function, status); 5482 5483 switch (reply_frame->Function) { 5484 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5485 { 5486 mpt_tgt_state_t *tgt; 5487 #ifdef INVARIANTS 5488 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5489 #endif 5490 if (status != MPI_IOCSTATUS_SUCCESS) { 5491 /* 5492 * XXX What to do? 5493 */ 5494 break; 5495 } 5496 tgt = MPT_TGT_STATE(mpt, req); 5497 KASSERT(tgt->state == TGT_STATE_LOADING, 5498 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5499 mpt_assign_serno(mpt, req); 5500 tgt->state = TGT_STATE_LOADED; 5501 break; 5502 } 5503 case MPI_FUNCTION_TARGET_ASSIST: 5504 #ifdef INVARIANTS 5505 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5506 #endif 5507 mpt_prt(mpt, "target assist completion\n"); 5508 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5509 mpt_free_request(mpt, req); 5510 break; 5511 case MPI_FUNCTION_TARGET_STATUS_SEND: 5512 #ifdef INVARIANTS 5513 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5514 #endif 5515 mpt_prt(mpt, "status send completion\n"); 5516 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5517 mpt_free_request(mpt, req); 5518 break; 5519 case MPI_FUNCTION_TARGET_MODE_ABORT: 5520 { 5521 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5522 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5523 PTR_MSG_TARGET_MODE_ABORT abtp = 5524 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5525 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5526 #ifdef INVARIANTS 5527 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5528 #endif 5529 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5530 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5531 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5532 mpt_free_request(mpt, req); 5533 break; 5534 } 5535 default: 5536 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5537 "0x%x\n", reply_frame->Function); 5538 break; 5539 } 5540 return (TRUE); 5541 } 5542