12545bca0SMatthew Dillon /*- 22545bca0SMatthew Dillon * FreeBSD/CAM specific routines for LSI '909 FC adapters. 32545bca0SMatthew Dillon * FreeBSD Version. 42545bca0SMatthew Dillon * 52545bca0SMatthew Dillon * Copyright (c) 2000, 2001 by Greg Ansley 62545bca0SMatthew Dillon * 72545bca0SMatthew Dillon * Redistribution and use in source and binary forms, with or without 82545bca0SMatthew Dillon * modification, are permitted provided that the following conditions 92545bca0SMatthew Dillon * are met: 102545bca0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 112545bca0SMatthew Dillon * notice immediately at the beginning of the file, without modification, 122545bca0SMatthew Dillon * this list of conditions, and the following disclaimer. 132545bca0SMatthew Dillon * 2. The name of the author may not be used to endorse or promote products 142545bca0SMatthew Dillon * derived from this software without specific prior written permission. 152545bca0SMatthew Dillon * 162545bca0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 172545bca0SMatthew Dillon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 182545bca0SMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 192545bca0SMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 202545bca0SMatthew Dillon * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 212545bca0SMatthew Dillon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 222545bca0SMatthew Dillon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 232545bca0SMatthew Dillon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 242545bca0SMatthew Dillon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 252545bca0SMatthew Dillon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 262545bca0SMatthew Dillon * SUCH DAMAGE. 272545bca0SMatthew Dillon */ 282545bca0SMatthew Dillon /*- 292545bca0SMatthew Dillon * Copyright (c) 2002, 2006 by Matthew Jacob 302545bca0SMatthew Dillon * All rights reserved. 312545bca0SMatthew Dillon * 322545bca0SMatthew Dillon * Redistribution and use in source and binary forms, with or without 332545bca0SMatthew Dillon * modification, are permitted provided that the following conditions are 342545bca0SMatthew Dillon * met: 352545bca0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 362545bca0SMatthew Dillon * notice, this list of conditions and the following disclaimer. 372545bca0SMatthew Dillon * 2. Redistributions in binary form must reproduce at minimum a disclaimer 382545bca0SMatthew Dillon * substantially similar to the "NO WARRANTY" disclaimer below 392545bca0SMatthew Dillon * ("Disclaimer") and any redistribution must be conditioned upon including 402545bca0SMatthew Dillon * a substantially similar Disclaimer requirement for further binary 412545bca0SMatthew Dillon * redistribution. 422545bca0SMatthew Dillon * 3. Neither the names of the above listed copyright holders nor the names 432545bca0SMatthew Dillon * of any contributors may be used to endorse or promote products derived 442545bca0SMatthew Dillon * from this software without specific prior written permission. 452545bca0SMatthew Dillon * 462545bca0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 472545bca0SMatthew Dillon * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 482545bca0SMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 492545bca0SMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 502545bca0SMatthew Dillon * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 512545bca0SMatthew Dillon * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 522545bca0SMatthew Dillon * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 532545bca0SMatthew Dillon * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 542545bca0SMatthew Dillon * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 552545bca0SMatthew Dillon * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 562545bca0SMatthew Dillon * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 572545bca0SMatthew Dillon * 582545bca0SMatthew Dillon * Support from Chris Ellsworth in order to make SAS adapters work 592545bca0SMatthew Dillon * is gratefully acknowledged. 602545bca0SMatthew Dillon * 612545bca0SMatthew Dillon * Support from LSI-Logic has also gone a great deal toward making this a 622545bca0SMatthew Dillon * workable subsystem and is gratefully acknowledged. 632545bca0SMatthew Dillon */ 642545bca0SMatthew Dillon /*- 652545bca0SMatthew Dillon * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 662545bca0SMatthew Dillon * Copyright (c) 2005, WHEEL Sp. z o.o. 672545bca0SMatthew Dillon * Copyright (c) 2004, 2005 Justin T. Gibbs 682545bca0SMatthew Dillon * All rights reserved. 692545bca0SMatthew Dillon * 702545bca0SMatthew Dillon * Redistribution and use in source and binary forms, with or without 712545bca0SMatthew Dillon * modification, are permitted provided that the following conditions are 722545bca0SMatthew Dillon * met: 732545bca0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 742545bca0SMatthew Dillon * notice, this list of conditions and the following disclaimer. 752545bca0SMatthew Dillon * 2. Redistributions in binary form must reproduce at minimum a disclaimer 762545bca0SMatthew Dillon * substantially similar to the "NO WARRANTY" disclaimer below 772545bca0SMatthew Dillon * ("Disclaimer") and any redistribution must be conditioned upon including 782545bca0SMatthew Dillon * a substantially similar Disclaimer requirement for further binary 792545bca0SMatthew Dillon * redistribution. 802545bca0SMatthew Dillon * 3. Neither the names of the above listed copyright holders nor the names 812545bca0SMatthew Dillon * of any contributors may be used to endorse or promote products derived 822545bca0SMatthew Dillon * from this software without specific prior written permission. 832545bca0SMatthew Dillon * 842545bca0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 852545bca0SMatthew Dillon * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 862545bca0SMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 872545bca0SMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 882545bca0SMatthew Dillon * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 892545bca0SMatthew Dillon * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 902545bca0SMatthew Dillon * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 912545bca0SMatthew Dillon * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 922545bca0SMatthew Dillon * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 932545bca0SMatthew Dillon * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 942545bca0SMatthew Dillon * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9532af04f7SSascha Wildner * 964c42baf4SSascha Wildner * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $ 972545bca0SMatthew Dillon */ 982545bca0SMatthew Dillon 992545bca0SMatthew Dillon #include <dev/disk/mpt/mpt.h> 1002545bca0SMatthew Dillon #include <dev/disk/mpt/mpt_cam.h> 1012545bca0SMatthew Dillon #include <dev/disk/mpt/mpt_raid.h> 1022545bca0SMatthew Dillon 1032545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 1042545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_init.h" 1052545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_targ.h" 1062545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_fc.h" 1072545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_sas.h" 1082545bca0SMatthew Dillon #include <sys/callout.h> 1092545bca0SMatthew Dillon #include <sys/kthread.h> 110*f582582cSSascha Wildner #include <sys/sysctl.h> 1112545bca0SMatthew Dillon 1122545bca0SMatthew Dillon static void mpt_poll(struct cam_sim *); 1132545bca0SMatthew Dillon static timeout_t mpt_timeout; 1142545bca0SMatthew Dillon static void mpt_action(struct cam_sim *, union ccb *); 1152545bca0SMatthew Dillon static int 1162545bca0SMatthew Dillon mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 1172545bca0SMatthew Dillon static void mpt_setwidth(struct mpt_softc *, int, int); 1182545bca0SMatthew Dillon static void mpt_setsync(struct mpt_softc *, int, int, int); 1192545bca0SMatthew Dillon static int mpt_update_spi_config(struct mpt_softc *, int); 1202545bca0SMatthew Dillon 1212545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_reply_handler; 1222545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 1232545bca0SMatthew Dillon static mpt_reply_handler_t mpt_fc_els_reply_handler; 1242545bca0SMatthew Dillon static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 1252545bca0SMatthew Dillon MSG_DEFAULT_REPLY *); 1262545bca0SMatthew Dillon static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 1272545bca0SMatthew Dillon static int mpt_fc_reset_link(struct mpt_softc *, int); 1282545bca0SMatthew Dillon 1292545bca0SMatthew Dillon static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 1302545bca0SMatthew Dillon static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 1312545bca0SMatthew Dillon static void mpt_recovery_thread(void *arg); 1322545bca0SMatthew Dillon static void mpt_recover_commands(struct mpt_softc *mpt); 1332545bca0SMatthew Dillon 1342545bca0SMatthew Dillon static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 1352545bca0SMatthew Dillon u_int, u_int, u_int, int); 1362545bca0SMatthew Dillon 1372545bca0SMatthew Dillon static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 1382545bca0SMatthew Dillon static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 1392545bca0SMatthew Dillon static int mpt_add_els_buffers(struct mpt_softc *mpt); 1402545bca0SMatthew Dillon static int mpt_add_target_commands(struct mpt_softc *mpt); 1412545bca0SMatthew Dillon static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 1422545bca0SMatthew Dillon static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 1432545bca0SMatthew Dillon static void mpt_target_start_io(struct mpt_softc *, union ccb *); 1442545bca0SMatthew Dillon static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 1452545bca0SMatthew Dillon static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 1462545bca0SMatthew Dillon static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 1472545bca0SMatthew Dillon uint8_t, uint8_t const *); 1482545bca0SMatthew Dillon static void 1492545bca0SMatthew Dillon mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 1502545bca0SMatthew Dillon tgt_resource_t *, int); 1512545bca0SMatthew Dillon static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 1522545bca0SMatthew Dillon static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 1532545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 1542545bca0SMatthew Dillon static mpt_reply_handler_t mpt_sata_pass_reply_handler; 1552545bca0SMatthew Dillon 1562545bca0SMatthew Dillon static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 1572545bca0SMatthew Dillon static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 1582545bca0SMatthew Dillon static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 1592545bca0SMatthew Dillon static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 1602545bca0SMatthew Dillon 1612545bca0SMatthew Dillon static mpt_probe_handler_t mpt_cam_probe; 1622545bca0SMatthew Dillon static mpt_attach_handler_t mpt_cam_attach; 1632545bca0SMatthew Dillon static mpt_enable_handler_t mpt_cam_enable; 1642545bca0SMatthew Dillon static mpt_ready_handler_t mpt_cam_ready; 1652545bca0SMatthew Dillon static mpt_event_handler_t mpt_cam_event; 1662545bca0SMatthew Dillon static mpt_reset_handler_t mpt_cam_ioc_reset; 1672545bca0SMatthew Dillon static mpt_detach_handler_t mpt_cam_detach; 1682545bca0SMatthew Dillon 1692545bca0SMatthew Dillon static struct mpt_personality mpt_cam_personality = 1702545bca0SMatthew Dillon { 1712545bca0SMatthew Dillon .name = "mpt_cam", 1722545bca0SMatthew Dillon .probe = mpt_cam_probe, 1732545bca0SMatthew Dillon .attach = mpt_cam_attach, 1742545bca0SMatthew Dillon .enable = mpt_cam_enable, 1752545bca0SMatthew Dillon .ready = mpt_cam_ready, 1762545bca0SMatthew Dillon .event = mpt_cam_event, 1772545bca0SMatthew Dillon .reset = mpt_cam_ioc_reset, 1782545bca0SMatthew Dillon .detach = mpt_cam_detach, 1792545bca0SMatthew Dillon }; 1802545bca0SMatthew Dillon 1812545bca0SMatthew Dillon DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 1822545bca0SMatthew Dillon MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 1832545bca0SMatthew Dillon 1842545bca0SMatthew Dillon int mpt_enable_sata_wc = -1; 1852545bca0SMatthew Dillon TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 1862545bca0SMatthew Dillon 1874c42baf4SSascha Wildner static int 1882545bca0SMatthew Dillon mpt_cam_probe(struct mpt_softc *mpt) 1892545bca0SMatthew Dillon { 1902545bca0SMatthew Dillon int role; 1912545bca0SMatthew Dillon 1922545bca0SMatthew Dillon /* 1932545bca0SMatthew Dillon * Only attach to nodes that support the initiator or target role 1942545bca0SMatthew Dillon * (or want to) or have RAID physical devices that need CAM pass-thru 1952545bca0SMatthew Dillon * support. 1962545bca0SMatthew Dillon */ 1972545bca0SMatthew Dillon if (mpt->do_cfg_role) { 1982545bca0SMatthew Dillon role = mpt->cfg_role; 1992545bca0SMatthew Dillon } else { 2002545bca0SMatthew Dillon role = mpt->role; 2012545bca0SMatthew Dillon } 2022545bca0SMatthew Dillon if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 2032545bca0SMatthew Dillon (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 2042545bca0SMatthew Dillon return (0); 2052545bca0SMatthew Dillon } 2062545bca0SMatthew Dillon return (ENODEV); 2072545bca0SMatthew Dillon } 2082545bca0SMatthew Dillon 2094c42baf4SSascha Wildner static int 2102545bca0SMatthew Dillon mpt_cam_attach(struct mpt_softc *mpt) 2112545bca0SMatthew Dillon { 2122545bca0SMatthew Dillon struct cam_devq *devq; 2132545bca0SMatthew Dillon mpt_handler_t handler; 2142545bca0SMatthew Dillon int maxq; 2152545bca0SMatthew Dillon int error; 2162545bca0SMatthew Dillon 2172545bca0SMatthew Dillon MPT_LOCK(mpt); 2182545bca0SMatthew Dillon TAILQ_INIT(&mpt->request_timeout_list); 2192545bca0SMatthew Dillon maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 2202545bca0SMatthew Dillon mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 2212545bca0SMatthew Dillon 2222545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_reply_handler; 2232545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 2242545bca0SMatthew Dillon &scsi_io_handler_id); 2252545bca0SMatthew Dillon if (error != 0) { 2262545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2272545bca0SMatthew Dillon goto cleanup; 2282545bca0SMatthew Dillon } 2292545bca0SMatthew Dillon 2302545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_tmf_reply_handler; 2312545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 2322545bca0SMatthew Dillon &scsi_tmf_handler_id); 2332545bca0SMatthew Dillon if (error != 0) { 2342545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2352545bca0SMatthew Dillon goto cleanup; 2362545bca0SMatthew Dillon } 2372545bca0SMatthew Dillon 2382545bca0SMatthew Dillon /* 2392545bca0SMatthew Dillon * If we're fibre channel and could support target mode, we register 2402545bca0SMatthew Dillon * an ELS reply handler and give it resources. 2412545bca0SMatthew Dillon */ 2422545bca0SMatthew Dillon if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 2432545bca0SMatthew Dillon handler.reply_handler = mpt_fc_els_reply_handler; 2442545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 2452545bca0SMatthew Dillon &fc_els_handler_id); 2462545bca0SMatthew Dillon if (error != 0) { 2472545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2482545bca0SMatthew Dillon goto cleanup; 2492545bca0SMatthew Dillon } 2502545bca0SMatthew Dillon if (mpt_add_els_buffers(mpt) == FALSE) { 2512545bca0SMatthew Dillon error = ENOMEM; 2522545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2532545bca0SMatthew Dillon goto cleanup; 2542545bca0SMatthew Dillon } 2552545bca0SMatthew Dillon maxq -= mpt->els_cmds_allocated; 2562545bca0SMatthew Dillon } 2572545bca0SMatthew Dillon 2582545bca0SMatthew Dillon /* 2592545bca0SMatthew Dillon * If we support target mode, we register a reply handler for it, 2602545bca0SMatthew Dillon * but don't add command resources until we actually enable target 2612545bca0SMatthew Dillon * mode. 2622545bca0SMatthew Dillon */ 2632545bca0SMatthew Dillon if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 2642545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_tgt_reply_handler; 2652545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 2662545bca0SMatthew Dillon &mpt->scsi_tgt_handler_id); 2672545bca0SMatthew Dillon if (error != 0) { 2682545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2692545bca0SMatthew Dillon goto cleanup; 2702545bca0SMatthew Dillon } 2712545bca0SMatthew Dillon } 2722545bca0SMatthew Dillon 2732545bca0SMatthew Dillon if (mpt->is_sas) { 2742545bca0SMatthew Dillon handler.reply_handler = mpt_sata_pass_reply_handler; 2752545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 2762545bca0SMatthew Dillon &sata_pass_handler_id); 2772545bca0SMatthew Dillon if (error != 0) { 2782545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2792545bca0SMatthew Dillon goto cleanup; 2802545bca0SMatthew Dillon } 2812545bca0SMatthew Dillon } 2822545bca0SMatthew Dillon 2832545bca0SMatthew Dillon /* 2842545bca0SMatthew Dillon * We keep one request reserved for timeout TMF requests. 2852545bca0SMatthew Dillon */ 2862545bca0SMatthew Dillon mpt->tmf_req = mpt_get_request(mpt, FALSE); 2872545bca0SMatthew Dillon if (mpt->tmf_req == NULL) { 2882545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 2892545bca0SMatthew Dillon error = ENOMEM; 2902545bca0SMatthew Dillon MPT_UNLOCK(mpt); 2912545bca0SMatthew Dillon goto cleanup; 2922545bca0SMatthew Dillon } 2932545bca0SMatthew Dillon 2942545bca0SMatthew Dillon /* 2952545bca0SMatthew Dillon * Mark the request as free even though not on the free list. 2962545bca0SMatthew Dillon * There is only one TMF request allowed to be outstanding at 2972545bca0SMatthew Dillon * a time and the TMF routines perform their own allocation 2982545bca0SMatthew Dillon * tracking using the standard state flags. 2992545bca0SMatthew Dillon */ 3002545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_FREE; 3012545bca0SMatthew Dillon maxq--; 3022545bca0SMatthew Dillon 3032545bca0SMatthew Dillon /* 3042545bca0SMatthew Dillon * The rest of this is CAM foo, for which we need to drop our lock 3052545bca0SMatthew Dillon */ 3062545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3072545bca0SMatthew Dillon 3082545bca0SMatthew Dillon if (mpt_spawn_recovery_thread(mpt) != 0) { 3092545bca0SMatthew Dillon mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 3102545bca0SMatthew Dillon error = ENOMEM; 3112545bca0SMatthew Dillon goto cleanup; 3122545bca0SMatthew Dillon } 3132545bca0SMatthew Dillon 3142545bca0SMatthew Dillon /* 3152545bca0SMatthew Dillon * Create the device queue for our SIM(s). 3162545bca0SMatthew Dillon */ 3172545bca0SMatthew Dillon devq = cam_simq_alloc(maxq); 3182545bca0SMatthew Dillon if (devq == NULL) { 3192545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 3202545bca0SMatthew Dillon error = ENOMEM; 3212545bca0SMatthew Dillon goto cleanup; 3222545bca0SMatthew Dillon } 3232545bca0SMatthew Dillon 3242545bca0SMatthew Dillon /* 3252545bca0SMatthew Dillon * Construct our SIM entry. 3262545bca0SMatthew Dillon */ 3272545bca0SMatthew Dillon mpt->sim = 3282545bca0SMatthew Dillon mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 3292545bca0SMatthew Dillon if (mpt->sim == NULL) { 3302545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 3312545bca0SMatthew Dillon cam_devq_release(devq); 3322545bca0SMatthew Dillon error = ENOMEM; 3332545bca0SMatthew Dillon goto cleanup; 3342545bca0SMatthew Dillon } 3352545bca0SMatthew Dillon 3362545bca0SMatthew Dillon /* 3372545bca0SMatthew Dillon * Register exactly this bus. 3382545bca0SMatthew Dillon */ 3392545bca0SMatthew Dillon MPT_LOCK(mpt); 340*f582582cSSascha Wildner if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { 3412545bca0SMatthew Dillon mpt_prt(mpt, "Bus registration Failed!\n"); 3422545bca0SMatthew Dillon error = ENOMEM; 3432545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3442545bca0SMatthew Dillon goto cleanup; 3452545bca0SMatthew Dillon } 3462545bca0SMatthew Dillon 3472545bca0SMatthew Dillon if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 3482545bca0SMatthew Dillon CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3492545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate Path!\n"); 3502545bca0SMatthew Dillon error = ENOMEM; 3512545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3522545bca0SMatthew Dillon goto cleanup; 3532545bca0SMatthew Dillon } 3542545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3552545bca0SMatthew Dillon 3562545bca0SMatthew Dillon /* 3572545bca0SMatthew Dillon * Only register a second bus for RAID physical 3582545bca0SMatthew Dillon * devices if the controller supports RAID. 3592545bca0SMatthew Dillon */ 3602545bca0SMatthew Dillon if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 3612545bca0SMatthew Dillon return (0); 3622545bca0SMatthew Dillon } 3632545bca0SMatthew Dillon 3642545bca0SMatthew Dillon /* 3652545bca0SMatthew Dillon * Create a "bus" to export all hidden disks to CAM. 3662545bca0SMatthew Dillon */ 3672545bca0SMatthew Dillon mpt->phydisk_sim = 3682545bca0SMatthew Dillon mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 3692545bca0SMatthew Dillon if (mpt->phydisk_sim == NULL) { 3702545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 3712545bca0SMatthew Dillon error = ENOMEM; 3722545bca0SMatthew Dillon goto cleanup; 3732545bca0SMatthew Dillon } 3742545bca0SMatthew Dillon 3752545bca0SMatthew Dillon /* 3762545bca0SMatthew Dillon * Register this bus. 3772545bca0SMatthew Dillon */ 3782545bca0SMatthew Dillon MPT_LOCK(mpt); 379*f582582cSSascha Wildner if (xpt_bus_register(mpt->phydisk_sim, 1) != 3802545bca0SMatthew Dillon CAM_SUCCESS) { 3812545bca0SMatthew Dillon mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 3822545bca0SMatthew Dillon error = ENOMEM; 3832545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3842545bca0SMatthew Dillon goto cleanup; 3852545bca0SMatthew Dillon } 3862545bca0SMatthew Dillon 3872545bca0SMatthew Dillon if (xpt_create_path(&mpt->phydisk_path, NULL, 3882545bca0SMatthew Dillon cam_sim_path(mpt->phydisk_sim), 3892545bca0SMatthew Dillon CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3902545bca0SMatthew Dillon mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 3912545bca0SMatthew Dillon error = ENOMEM; 3922545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3932545bca0SMatthew Dillon goto cleanup; 3942545bca0SMatthew Dillon } 3952545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3962545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 3972545bca0SMatthew Dillon return (0); 3982545bca0SMatthew Dillon 3992545bca0SMatthew Dillon cleanup: 4002545bca0SMatthew Dillon mpt_cam_detach(mpt); 4012545bca0SMatthew Dillon return (error); 4022545bca0SMatthew Dillon } 4032545bca0SMatthew Dillon 4042545bca0SMatthew Dillon /* 4052545bca0SMatthew Dillon * Read FC configuration information 4062545bca0SMatthew Dillon */ 4072545bca0SMatthew Dillon static int 4082545bca0SMatthew Dillon mpt_read_config_info_fc(struct mpt_softc *mpt) 4092545bca0SMatthew Dillon { 4102545bca0SMatthew Dillon char *topology = NULL; 4112545bca0SMatthew Dillon int rv; 4122545bca0SMatthew Dillon 4132545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 4142545bca0SMatthew Dillon 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 4152545bca0SMatthew Dillon if (rv) { 4162545bca0SMatthew Dillon return (-1); 4172545bca0SMatthew Dillon } 4182545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 4192545bca0SMatthew Dillon mpt->mpt_fcport_page0.Header.PageVersion, 4202545bca0SMatthew Dillon mpt->mpt_fcport_page0.Header.PageLength, 4212545bca0SMatthew Dillon mpt->mpt_fcport_page0.Header.PageNumber, 4222545bca0SMatthew Dillon mpt->mpt_fcport_page0.Header.PageType); 4232545bca0SMatthew Dillon 4242545bca0SMatthew Dillon 4252545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 4262545bca0SMatthew Dillon sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 4272545bca0SMatthew Dillon if (rv) { 4282545bca0SMatthew Dillon mpt_prt(mpt, "failed to read FC Port Page 0\n"); 4292545bca0SMatthew Dillon return (-1); 4302545bca0SMatthew Dillon } 4312545bca0SMatthew Dillon mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 4322545bca0SMatthew Dillon 4332545bca0SMatthew Dillon mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 4342545bca0SMatthew Dillon 4352545bca0SMatthew Dillon switch (mpt->mpt_fcport_page0.Flags & 4362545bca0SMatthew Dillon MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 4372545bca0SMatthew Dillon case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 4382545bca0SMatthew Dillon mpt->mpt_fcport_speed = 0; 4392545bca0SMatthew Dillon topology = "<NO LOOP>"; 4402545bca0SMatthew Dillon break; 4412545bca0SMatthew Dillon case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 4422545bca0SMatthew Dillon topology = "N-Port"; 4432545bca0SMatthew Dillon break; 4442545bca0SMatthew Dillon case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 4452545bca0SMatthew Dillon topology = "NL-Port"; 4462545bca0SMatthew Dillon break; 4472545bca0SMatthew Dillon case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 4482545bca0SMatthew Dillon topology = "F-Port"; 4492545bca0SMatthew Dillon break; 4502545bca0SMatthew Dillon case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 4512545bca0SMatthew Dillon topology = "FL-Port"; 4522545bca0SMatthew Dillon break; 4532545bca0SMatthew Dillon default: 4542545bca0SMatthew Dillon mpt->mpt_fcport_speed = 0; 4552545bca0SMatthew Dillon topology = "?"; 4562545bca0SMatthew Dillon break; 4572545bca0SMatthew Dillon } 4582545bca0SMatthew Dillon 4592545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_INFO, 4602545bca0SMatthew Dillon "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 4612545bca0SMatthew Dillon "Speed %u-Gbit\n", topology, 4626d259fc1SSascha Wildner mpt->mpt_fcport_page0.WWNN.High, 4636d259fc1SSascha Wildner mpt->mpt_fcport_page0.WWNN.Low, 4646d259fc1SSascha Wildner mpt->mpt_fcport_page0.WWPN.High, 4656d259fc1SSascha Wildner mpt->mpt_fcport_page0.WWPN.Low, 4666d259fc1SSascha Wildner mpt->mpt_fcport_speed); 4672545bca0SMatthew Dillon MPT_UNLOCK(mpt); 4682545bca0SMatthew Dillon { 4696d259fc1SSascha Wildner ksnprintf(mpt->scinfo.fc.wwnn, 4702545bca0SMatthew Dillon sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 4712545bca0SMatthew Dillon mpt->mpt_fcport_page0.WWNN.High, 4722545bca0SMatthew Dillon mpt->mpt_fcport_page0.WWNN.Low); 4732545bca0SMatthew Dillon 4746d259fc1SSascha Wildner ksnprintf(mpt->scinfo.fc.wwpn, 4752545bca0SMatthew Dillon sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 4762545bca0SMatthew Dillon mpt->mpt_fcport_page0.WWPN.High, 4772545bca0SMatthew Dillon mpt->mpt_fcport_page0.WWPN.Low); 4782545bca0SMatthew Dillon 4796d259fc1SSascha Wildner SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx, 4806d259fc1SSascha Wildner SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO, 4812545bca0SMatthew Dillon "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 4822545bca0SMatthew Dillon "World Wide Node Name"); 4832545bca0SMatthew Dillon 4846d259fc1SSascha Wildner SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx, 4856d259fc1SSascha Wildner SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO, 4862545bca0SMatthew Dillon "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 4872545bca0SMatthew Dillon "World Wide Port Name"); 4882545bca0SMatthew Dillon 4892545bca0SMatthew Dillon } 4902545bca0SMatthew Dillon MPT_LOCK(mpt); 4912545bca0SMatthew Dillon return (0); 4922545bca0SMatthew Dillon } 4932545bca0SMatthew Dillon 4942545bca0SMatthew Dillon /* 4952545bca0SMatthew Dillon * Set FC configuration information. 4962545bca0SMatthew Dillon */ 4972545bca0SMatthew Dillon static int 4982545bca0SMatthew Dillon mpt_set_initial_config_fc(struct mpt_softc *mpt) 4992545bca0SMatthew Dillon { 5002545bca0SMatthew Dillon CONFIG_PAGE_FC_PORT_1 fc; 5012545bca0SMatthew Dillon U32 fl; 5022545bca0SMatthew Dillon int r, doit = 0; 5032545bca0SMatthew Dillon int role; 5042545bca0SMatthew Dillon 5052545bca0SMatthew Dillon r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 5062545bca0SMatthew Dillon &fc.Header, FALSE, 5000); 5072545bca0SMatthew Dillon if (r) { 5082545bca0SMatthew Dillon mpt_prt(mpt, "failed to read FC page 1 header\n"); 5092545bca0SMatthew Dillon return (mpt_fc_reset_link(mpt, 1)); 5102545bca0SMatthew Dillon } 5112545bca0SMatthew Dillon 5122545bca0SMatthew Dillon r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 5132545bca0SMatthew Dillon &fc.Header, sizeof (fc), FALSE, 5000); 5142545bca0SMatthew Dillon if (r) { 5152545bca0SMatthew Dillon mpt_prt(mpt, "failed to read FC page 1\n"); 5162545bca0SMatthew Dillon return (mpt_fc_reset_link(mpt, 1)); 5172545bca0SMatthew Dillon } 5182545bca0SMatthew Dillon mpt2host_config_page_fc_port_1(&fc); 5192545bca0SMatthew Dillon 5202545bca0SMatthew Dillon /* 5212545bca0SMatthew Dillon * Check our flags to make sure we support the role we want. 5222545bca0SMatthew Dillon */ 5232545bca0SMatthew Dillon doit = 0; 5242545bca0SMatthew Dillon role = 0; 5252545bca0SMatthew Dillon fl = fc.Flags; 5262545bca0SMatthew Dillon 5272545bca0SMatthew Dillon if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 5282545bca0SMatthew Dillon role |= MPT_ROLE_INITIATOR; 5292545bca0SMatthew Dillon } 5302545bca0SMatthew Dillon if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 5312545bca0SMatthew Dillon role |= MPT_ROLE_TARGET; 5322545bca0SMatthew Dillon } 5332545bca0SMatthew Dillon 5342545bca0SMatthew Dillon fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 5352545bca0SMatthew Dillon 5362545bca0SMatthew Dillon if (mpt->do_cfg_role == 0) { 5372545bca0SMatthew Dillon role = mpt->cfg_role; 5382545bca0SMatthew Dillon } else { 5392545bca0SMatthew Dillon mpt->do_cfg_role = 0; 5402545bca0SMatthew Dillon } 5412545bca0SMatthew Dillon 5422545bca0SMatthew Dillon if (role != mpt->cfg_role) { 5432545bca0SMatthew Dillon if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 5442545bca0SMatthew Dillon if ((role & MPT_ROLE_INITIATOR) == 0) { 5452545bca0SMatthew Dillon mpt_prt(mpt, "adding initiator role\n"); 5462545bca0SMatthew Dillon fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 5472545bca0SMatthew Dillon doit++; 5482545bca0SMatthew Dillon } else { 5492545bca0SMatthew Dillon mpt_prt(mpt, "keeping initiator role\n"); 5502545bca0SMatthew Dillon } 5512545bca0SMatthew Dillon } else if (role & MPT_ROLE_INITIATOR) { 5522545bca0SMatthew Dillon mpt_prt(mpt, "removing initiator role\n"); 5532545bca0SMatthew Dillon doit++; 5542545bca0SMatthew Dillon } 5552545bca0SMatthew Dillon if (mpt->cfg_role & MPT_ROLE_TARGET) { 5562545bca0SMatthew Dillon if ((role & MPT_ROLE_TARGET) == 0) { 5572545bca0SMatthew Dillon mpt_prt(mpt, "adding target role\n"); 5582545bca0SMatthew Dillon fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 5592545bca0SMatthew Dillon doit++; 5602545bca0SMatthew Dillon } else { 5612545bca0SMatthew Dillon mpt_prt(mpt, "keeping target role\n"); 5622545bca0SMatthew Dillon } 5632545bca0SMatthew Dillon } else if (role & MPT_ROLE_TARGET) { 5642545bca0SMatthew Dillon mpt_prt(mpt, "removing target role\n"); 5652545bca0SMatthew Dillon doit++; 5662545bca0SMatthew Dillon } 5672545bca0SMatthew Dillon mpt->role = mpt->cfg_role; 5682545bca0SMatthew Dillon } 5692545bca0SMatthew Dillon 5702545bca0SMatthew Dillon if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 5712545bca0SMatthew Dillon if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 5722545bca0SMatthew Dillon mpt_prt(mpt, "adding OXID option\n"); 5732545bca0SMatthew Dillon fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 5742545bca0SMatthew Dillon doit++; 5752545bca0SMatthew Dillon } 5762545bca0SMatthew Dillon } 5772545bca0SMatthew Dillon 5782545bca0SMatthew Dillon if (doit) { 5792545bca0SMatthew Dillon fc.Flags = fl; 5802545bca0SMatthew Dillon host2mpt_config_page_fc_port_1(&fc); 5812545bca0SMatthew Dillon r = mpt_write_cfg_page(mpt, 5822545bca0SMatthew Dillon MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 5832545bca0SMatthew Dillon sizeof(fc), FALSE, 5000); 5842545bca0SMatthew Dillon if (r != 0) { 5852545bca0SMatthew Dillon mpt_prt(mpt, "failed to update NVRAM with changes\n"); 5862545bca0SMatthew Dillon return (0); 5872545bca0SMatthew Dillon } 5882545bca0SMatthew Dillon mpt_prt(mpt, "NOTE: NVRAM changes will not take " 5892545bca0SMatthew Dillon "effect until next reboot or IOC reset\n"); 5902545bca0SMatthew Dillon } 5912545bca0SMatthew Dillon return (0); 5922545bca0SMatthew Dillon } 5932545bca0SMatthew Dillon 5942545bca0SMatthew Dillon static int 5952545bca0SMatthew Dillon mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 5962545bca0SMatthew Dillon { 5972545bca0SMatthew Dillon ConfigExtendedPageHeader_t hdr; 5982545bca0SMatthew Dillon struct mptsas_phyinfo *phyinfo; 5992545bca0SMatthew Dillon SasIOUnitPage0_t *buffer; 6002545bca0SMatthew Dillon int error, len, i; 6012545bca0SMatthew Dillon 6022545bca0SMatthew Dillon error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 6032545bca0SMatthew Dillon 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 6042545bca0SMatthew Dillon &hdr, 0, 10000); 6052545bca0SMatthew Dillon if (error) 6062545bca0SMatthew Dillon goto out; 6072545bca0SMatthew Dillon if (hdr.ExtPageLength == 0) { 6082545bca0SMatthew Dillon error = ENXIO; 6092545bca0SMatthew Dillon goto out; 6102545bca0SMatthew Dillon } 6112545bca0SMatthew Dillon 6122545bca0SMatthew Dillon len = hdr.ExtPageLength * 4; 6132545bca0SMatthew Dillon buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 6142545bca0SMatthew Dillon if (buffer == NULL) { 6152545bca0SMatthew Dillon error = ENOMEM; 6162545bca0SMatthew Dillon goto out; 6172545bca0SMatthew Dillon } 6182545bca0SMatthew Dillon 6192545bca0SMatthew Dillon error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 6202545bca0SMatthew Dillon 0, &hdr, buffer, len, 0, 10000); 6212545bca0SMatthew Dillon if (error) { 6222545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 6232545bca0SMatthew Dillon goto out; 6242545bca0SMatthew Dillon } 6252545bca0SMatthew Dillon 6262545bca0SMatthew Dillon portinfo->num_phys = buffer->NumPhys; 6272545bca0SMatthew Dillon portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) * 6282545bca0SMatthew Dillon portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 6292545bca0SMatthew Dillon if (portinfo->phy_info == NULL) { 6302545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 6312545bca0SMatthew Dillon error = ENOMEM; 6322545bca0SMatthew Dillon goto out; 6332545bca0SMatthew Dillon } 6342545bca0SMatthew Dillon 6352545bca0SMatthew Dillon for (i = 0; i < portinfo->num_phys; i++) { 6362545bca0SMatthew Dillon phyinfo = &portinfo->phy_info[i]; 6372545bca0SMatthew Dillon phyinfo->phy_num = i; 6382545bca0SMatthew Dillon phyinfo->port_id = buffer->PhyData[i].Port; 6392545bca0SMatthew Dillon phyinfo->negotiated_link_rate = 6402545bca0SMatthew Dillon buffer->PhyData[i].NegotiatedLinkRate; 6412545bca0SMatthew Dillon phyinfo->handle = 6422545bca0SMatthew Dillon le16toh(buffer->PhyData[i].ControllerDevHandle); 6432545bca0SMatthew Dillon } 6442545bca0SMatthew Dillon 6452545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 6462545bca0SMatthew Dillon out: 6472545bca0SMatthew Dillon return (error); 6482545bca0SMatthew Dillon } 6492545bca0SMatthew Dillon 6502545bca0SMatthew Dillon static int 6512545bca0SMatthew Dillon mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 6522545bca0SMatthew Dillon uint32_t form, uint32_t form_specific) 6532545bca0SMatthew Dillon { 6542545bca0SMatthew Dillon ConfigExtendedPageHeader_t hdr; 6552545bca0SMatthew Dillon SasPhyPage0_t *buffer; 6562545bca0SMatthew Dillon int error; 6572545bca0SMatthew Dillon 6582545bca0SMatthew Dillon error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 6592545bca0SMatthew Dillon MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 6602545bca0SMatthew Dillon 0, 10000); 6612545bca0SMatthew Dillon if (error) 6622545bca0SMatthew Dillon goto out; 6632545bca0SMatthew Dillon if (hdr.ExtPageLength == 0) { 6642545bca0SMatthew Dillon error = ENXIO; 6652545bca0SMatthew Dillon goto out; 6662545bca0SMatthew Dillon } 6672545bca0SMatthew Dillon 6682545bca0SMatthew Dillon buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 6692545bca0SMatthew Dillon if (buffer == NULL) { 6702545bca0SMatthew Dillon error = ENOMEM; 6712545bca0SMatthew Dillon goto out; 6722545bca0SMatthew Dillon } 6732545bca0SMatthew Dillon 6742545bca0SMatthew Dillon error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 6752545bca0SMatthew Dillon form + form_specific, &hdr, buffer, 6762545bca0SMatthew Dillon sizeof(SasPhyPage0_t), 0, 10000); 6772545bca0SMatthew Dillon if (error) { 6782545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 6792545bca0SMatthew Dillon goto out; 6802545bca0SMatthew Dillon } 6812545bca0SMatthew Dillon 6822545bca0SMatthew Dillon phy_info->hw_link_rate = buffer->HwLinkRate; 6832545bca0SMatthew Dillon phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 6842545bca0SMatthew Dillon phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 6852545bca0SMatthew Dillon phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 6862545bca0SMatthew Dillon 6872545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 6882545bca0SMatthew Dillon out: 6892545bca0SMatthew Dillon return (error); 6902545bca0SMatthew Dillon } 6912545bca0SMatthew Dillon 6922545bca0SMatthew Dillon static int 6932545bca0SMatthew Dillon mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 6942545bca0SMatthew Dillon uint32_t form, uint32_t form_specific) 6952545bca0SMatthew Dillon { 6962545bca0SMatthew Dillon ConfigExtendedPageHeader_t hdr; 6972545bca0SMatthew Dillon SasDevicePage0_t *buffer; 6982545bca0SMatthew Dillon uint64_t sas_address; 6992545bca0SMatthew Dillon int error = 0; 7002545bca0SMatthew Dillon 7012545bca0SMatthew Dillon bzero(device_info, sizeof(*device_info)); 7022545bca0SMatthew Dillon error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 7032545bca0SMatthew Dillon MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 7042545bca0SMatthew Dillon &hdr, 0, 10000); 7052545bca0SMatthew Dillon if (error) 7062545bca0SMatthew Dillon goto out; 7072545bca0SMatthew Dillon if (hdr.ExtPageLength == 0) { 7082545bca0SMatthew Dillon error = ENXIO; 7092545bca0SMatthew Dillon goto out; 7102545bca0SMatthew Dillon } 7112545bca0SMatthew Dillon 7122545bca0SMatthew Dillon buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 7132545bca0SMatthew Dillon if (buffer == NULL) { 7142545bca0SMatthew Dillon error = ENOMEM; 7152545bca0SMatthew Dillon goto out; 7162545bca0SMatthew Dillon } 7172545bca0SMatthew Dillon 7182545bca0SMatthew Dillon error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 7192545bca0SMatthew Dillon form + form_specific, &hdr, buffer, 7202545bca0SMatthew Dillon sizeof(SasDevicePage0_t), 0, 10000); 7212545bca0SMatthew Dillon if (error) { 7222545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 7232545bca0SMatthew Dillon goto out; 7242545bca0SMatthew Dillon } 7252545bca0SMatthew Dillon 7262545bca0SMatthew Dillon device_info->dev_handle = le16toh(buffer->DevHandle); 7272545bca0SMatthew Dillon device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 7282545bca0SMatthew Dillon device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 7292545bca0SMatthew Dillon device_info->slot = le16toh(buffer->Slot); 7302545bca0SMatthew Dillon device_info->phy_num = buffer->PhyNum; 7312545bca0SMatthew Dillon device_info->physical_port = buffer->PhysicalPort; 7322545bca0SMatthew Dillon device_info->target_id = buffer->TargetID; 7332545bca0SMatthew Dillon device_info->bus = buffer->Bus; 7342545bca0SMatthew Dillon bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 7352545bca0SMatthew Dillon device_info->sas_address = le64toh(sas_address); 7362545bca0SMatthew Dillon device_info->device_info = le32toh(buffer->DeviceInfo); 7372545bca0SMatthew Dillon 7382545bca0SMatthew Dillon kfree(buffer, M_DEVBUF); 7392545bca0SMatthew Dillon out: 7402545bca0SMatthew Dillon return (error); 7412545bca0SMatthew Dillon } 7422545bca0SMatthew Dillon 7432545bca0SMatthew Dillon /* 7442545bca0SMatthew Dillon * Read SAS configuration information. Nothing to do yet. 7452545bca0SMatthew Dillon */ 7462545bca0SMatthew Dillon static int 7472545bca0SMatthew Dillon mpt_read_config_info_sas(struct mpt_softc *mpt) 7482545bca0SMatthew Dillon { 7492545bca0SMatthew Dillon struct mptsas_portinfo *portinfo; 7502545bca0SMatthew Dillon struct mptsas_phyinfo *phyinfo; 7512545bca0SMatthew Dillon int error, i; 7522545bca0SMatthew Dillon 7532545bca0SMatthew Dillon portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 7542545bca0SMatthew Dillon if (portinfo == NULL) 7552545bca0SMatthew Dillon return (ENOMEM); 7562545bca0SMatthew Dillon 7572545bca0SMatthew Dillon error = mptsas_sas_io_unit_pg0(mpt, portinfo); 7582545bca0SMatthew Dillon if (error) { 7592545bca0SMatthew Dillon kfree(portinfo, M_DEVBUF); 7602545bca0SMatthew Dillon return (0); 7612545bca0SMatthew Dillon } 7622545bca0SMatthew Dillon 7632545bca0SMatthew Dillon for (i = 0; i < portinfo->num_phys; i++) { 7642545bca0SMatthew Dillon phyinfo = &portinfo->phy_info[i]; 7652545bca0SMatthew Dillon error = mptsas_sas_phy_pg0(mpt, phyinfo, 7662545bca0SMatthew Dillon (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 7672545bca0SMatthew Dillon MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 7682545bca0SMatthew Dillon if (error) 7692545bca0SMatthew Dillon break; 7702545bca0SMatthew Dillon error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 7712545bca0SMatthew Dillon (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 7722545bca0SMatthew Dillon MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 7732545bca0SMatthew Dillon phyinfo->handle); 7742545bca0SMatthew Dillon if (error) 7752545bca0SMatthew Dillon break; 7762545bca0SMatthew Dillon phyinfo->identify.phy_num = phyinfo->phy_num = i; 7772545bca0SMatthew Dillon if (phyinfo->attached.dev_handle) 7782545bca0SMatthew Dillon error = mptsas_sas_device_pg0(mpt, 7792545bca0SMatthew Dillon &phyinfo->attached, 7802545bca0SMatthew Dillon (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 7812545bca0SMatthew Dillon MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 7822545bca0SMatthew Dillon phyinfo->attached.dev_handle); 7832545bca0SMatthew Dillon if (error) 7842545bca0SMatthew Dillon break; 7852545bca0SMatthew Dillon } 7862545bca0SMatthew Dillon mpt->sas_portinfo = portinfo; 7872545bca0SMatthew Dillon return (0); 7882545bca0SMatthew Dillon } 7892545bca0SMatthew Dillon 7902545bca0SMatthew Dillon static void 7912545bca0SMatthew Dillon mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 7922545bca0SMatthew Dillon int enabled) 7932545bca0SMatthew Dillon { 7942545bca0SMatthew Dillon SataPassthroughRequest_t *pass; 7952545bca0SMatthew Dillon request_t *req; 7962545bca0SMatthew Dillon int error, status; 7972545bca0SMatthew Dillon 7982545bca0SMatthew Dillon req = mpt_get_request(mpt, 0); 7992545bca0SMatthew Dillon if (req == NULL) 8002545bca0SMatthew Dillon return; 8012545bca0SMatthew Dillon 8022545bca0SMatthew Dillon pass = req->req_vbuf; 8032545bca0SMatthew Dillon bzero(pass, sizeof(SataPassthroughRequest_t)); 8042545bca0SMatthew Dillon pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 8052545bca0SMatthew Dillon pass->TargetID = devinfo->target_id; 8062545bca0SMatthew Dillon pass->Bus = devinfo->bus; 8072545bca0SMatthew Dillon pass->PassthroughFlags = 0; 8082545bca0SMatthew Dillon pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 8092545bca0SMatthew Dillon pass->DataLength = 0; 8102545bca0SMatthew Dillon pass->MsgContext = htole32(req->index | sata_pass_handler_id); 8112545bca0SMatthew Dillon pass->CommandFIS[0] = 0x27; 8122545bca0SMatthew Dillon pass->CommandFIS[1] = 0x80; 8132545bca0SMatthew Dillon pass->CommandFIS[2] = 0xef; 8142545bca0SMatthew Dillon pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 8152545bca0SMatthew Dillon pass->CommandFIS[7] = 0x40; 8162545bca0SMatthew Dillon pass->CommandFIS[15] = 0x08; 8172545bca0SMatthew Dillon 8182545bca0SMatthew Dillon mpt_check_doorbell(mpt); 8192545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 8202545bca0SMatthew Dillon error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 8212545bca0SMatthew Dillon 10 * 1000); 8222545bca0SMatthew Dillon if (error) { 8232545bca0SMatthew Dillon mpt_free_request(mpt, req); 8242545bca0SMatthew Dillon kprintf("error %d sending passthrough\n", error); 8252545bca0SMatthew Dillon return; 8262545bca0SMatthew Dillon } 8272545bca0SMatthew Dillon 8282545bca0SMatthew Dillon status = le16toh(req->IOCStatus); 8292545bca0SMatthew Dillon if (status != MPI_IOCSTATUS_SUCCESS) { 8302545bca0SMatthew Dillon mpt_free_request(mpt, req); 8312545bca0SMatthew Dillon kprintf("IOCSTATUS %d\n", status); 8322545bca0SMatthew Dillon return; 8332545bca0SMatthew Dillon } 8342545bca0SMatthew Dillon 8352545bca0SMatthew Dillon mpt_free_request(mpt, req); 8362545bca0SMatthew Dillon } 8372545bca0SMatthew Dillon 8382545bca0SMatthew Dillon /* 8392545bca0SMatthew Dillon * Set SAS configuration information. Nothing to do yet. 8402545bca0SMatthew Dillon */ 8412545bca0SMatthew Dillon static int 8422545bca0SMatthew Dillon mpt_set_initial_config_sas(struct mpt_softc *mpt) 8432545bca0SMatthew Dillon { 8442545bca0SMatthew Dillon struct mptsas_phyinfo *phyinfo; 8452545bca0SMatthew Dillon int i; 8462545bca0SMatthew Dillon 8472545bca0SMatthew Dillon if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 8482545bca0SMatthew Dillon for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 8492545bca0SMatthew Dillon phyinfo = &mpt->sas_portinfo->phy_info[i]; 8502545bca0SMatthew Dillon if (phyinfo->attached.dev_handle == 0) 8512545bca0SMatthew Dillon continue; 8522545bca0SMatthew Dillon if ((phyinfo->attached.device_info & 8532545bca0SMatthew Dillon MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 8542545bca0SMatthew Dillon continue; 8552545bca0SMatthew Dillon if (bootverbose) 8562545bca0SMatthew Dillon device_printf(mpt->dev, 8572545bca0SMatthew Dillon "%sabling SATA WC on phy %d\n", 8582545bca0SMatthew Dillon (mpt_enable_sata_wc) ? "En" : "Dis", i); 8592545bca0SMatthew Dillon mptsas_set_sata_wc(mpt, &phyinfo->attached, 8602545bca0SMatthew Dillon mpt_enable_sata_wc); 8612545bca0SMatthew Dillon } 8622545bca0SMatthew Dillon } 8632545bca0SMatthew Dillon 8642545bca0SMatthew Dillon return (0); 8652545bca0SMatthew Dillon } 8662545bca0SMatthew Dillon 8672545bca0SMatthew Dillon static int 8682545bca0SMatthew Dillon mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 8692545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 8702545bca0SMatthew Dillon { 8712545bca0SMatthew Dillon 8724c42baf4SSascha Wildner if (req != NULL) { 8732545bca0SMatthew Dillon if (reply_frame != NULL) { 8742545bca0SMatthew Dillon req->IOCStatus = le16toh(reply_frame->IOCStatus); 8752545bca0SMatthew Dillon } 8762545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 8772545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 8782545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 8792545bca0SMatthew Dillon if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 8802545bca0SMatthew Dillon wakeup(req); 8812545bca0SMatthew Dillon } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 8822545bca0SMatthew Dillon /* 8832545bca0SMatthew Dillon * Whew- we can free this request (late completion) 8842545bca0SMatthew Dillon */ 8852545bca0SMatthew Dillon mpt_free_request(mpt, req); 8862545bca0SMatthew Dillon } 8872545bca0SMatthew Dillon } 8882545bca0SMatthew Dillon 8892545bca0SMatthew Dillon return (TRUE); 8902545bca0SMatthew Dillon } 8912545bca0SMatthew Dillon 8922545bca0SMatthew Dillon /* 8932545bca0SMatthew Dillon * Read SCSI configuration information 8942545bca0SMatthew Dillon */ 8952545bca0SMatthew Dillon static int 8962545bca0SMatthew Dillon mpt_read_config_info_spi(struct mpt_softc *mpt) 8972545bca0SMatthew Dillon { 8982545bca0SMatthew Dillon int rv, i; 8992545bca0SMatthew Dillon 9002545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 9012545bca0SMatthew Dillon &mpt->mpt_port_page0.Header, FALSE, 5000); 9022545bca0SMatthew Dillon if (rv) { 9032545bca0SMatthew Dillon return (-1); 9042545bca0SMatthew Dillon } 9052545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 9062545bca0SMatthew Dillon mpt->mpt_port_page0.Header.PageVersion, 9072545bca0SMatthew Dillon mpt->mpt_port_page0.Header.PageLength, 9082545bca0SMatthew Dillon mpt->mpt_port_page0.Header.PageNumber, 9092545bca0SMatthew Dillon mpt->mpt_port_page0.Header.PageType); 9102545bca0SMatthew Dillon 9112545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 9122545bca0SMatthew Dillon &mpt->mpt_port_page1.Header, FALSE, 5000); 9132545bca0SMatthew Dillon if (rv) { 9142545bca0SMatthew Dillon return (-1); 9152545bca0SMatthew Dillon } 9162545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 9172545bca0SMatthew Dillon mpt->mpt_port_page1.Header.PageVersion, 9182545bca0SMatthew Dillon mpt->mpt_port_page1.Header.PageLength, 9192545bca0SMatthew Dillon mpt->mpt_port_page1.Header.PageNumber, 9202545bca0SMatthew Dillon mpt->mpt_port_page1.Header.PageType); 9212545bca0SMatthew Dillon 9222545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 9232545bca0SMatthew Dillon &mpt->mpt_port_page2.Header, FALSE, 5000); 9242545bca0SMatthew Dillon if (rv) { 9252545bca0SMatthew Dillon return (-1); 9262545bca0SMatthew Dillon } 9272545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 9282545bca0SMatthew Dillon mpt->mpt_port_page2.Header.PageVersion, 9292545bca0SMatthew Dillon mpt->mpt_port_page2.Header.PageLength, 9302545bca0SMatthew Dillon mpt->mpt_port_page2.Header.PageNumber, 9312545bca0SMatthew Dillon mpt->mpt_port_page2.Header.PageType); 9322545bca0SMatthew Dillon 9332545bca0SMatthew Dillon for (i = 0; i < 16; i++) { 9342545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 9352545bca0SMatthew Dillon 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 9362545bca0SMatthew Dillon if (rv) { 9372545bca0SMatthew Dillon return (-1); 9382545bca0SMatthew Dillon } 9392545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 9402545bca0SMatthew Dillon "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 9412545bca0SMatthew Dillon mpt->mpt_dev_page0[i].Header.PageVersion, 9422545bca0SMatthew Dillon mpt->mpt_dev_page0[i].Header.PageLength, 9432545bca0SMatthew Dillon mpt->mpt_dev_page0[i].Header.PageNumber, 9442545bca0SMatthew Dillon mpt->mpt_dev_page0[i].Header.PageType); 9452545bca0SMatthew Dillon 9462545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 9472545bca0SMatthew Dillon 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 9482545bca0SMatthew Dillon if (rv) { 9492545bca0SMatthew Dillon return (-1); 9502545bca0SMatthew Dillon } 9512545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 9522545bca0SMatthew Dillon "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 9532545bca0SMatthew Dillon mpt->mpt_dev_page1[i].Header.PageVersion, 9542545bca0SMatthew Dillon mpt->mpt_dev_page1[i].Header.PageLength, 9552545bca0SMatthew Dillon mpt->mpt_dev_page1[i].Header.PageNumber, 9562545bca0SMatthew Dillon mpt->mpt_dev_page1[i].Header.PageType); 9572545bca0SMatthew Dillon } 9582545bca0SMatthew Dillon 9592545bca0SMatthew Dillon /* 9602545bca0SMatthew Dillon * At this point, we don't *have* to fail. As long as we have 9612545bca0SMatthew Dillon * valid config header information, we can (barely) lurch 9622545bca0SMatthew Dillon * along. 9632545bca0SMatthew Dillon */ 9642545bca0SMatthew Dillon 9652545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 9662545bca0SMatthew Dillon sizeof(mpt->mpt_port_page0), FALSE, 5000); 9672545bca0SMatthew Dillon if (rv) { 9682545bca0SMatthew Dillon mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 9692545bca0SMatthew Dillon } else { 9702545bca0SMatthew Dillon mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 9712545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 9722545bca0SMatthew Dillon "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 9736d259fc1SSascha Wildner mpt->mpt_port_page0.Capabilities, 9746d259fc1SSascha Wildner mpt->mpt_port_page0.PhysicalInterface); 9752545bca0SMatthew Dillon } 9762545bca0SMatthew Dillon 9772545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 9782545bca0SMatthew Dillon sizeof(mpt->mpt_port_page1), FALSE, 5000); 9792545bca0SMatthew Dillon if (rv) { 9802545bca0SMatthew Dillon mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 9812545bca0SMatthew Dillon } else { 9822545bca0SMatthew Dillon mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 9832545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 9842545bca0SMatthew Dillon "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 9856d259fc1SSascha Wildner mpt->mpt_port_page1.Configuration, 9866d259fc1SSascha Wildner mpt->mpt_port_page1.OnBusTimerValue); 9872545bca0SMatthew Dillon } 9882545bca0SMatthew Dillon 9892545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 9902545bca0SMatthew Dillon sizeof(mpt->mpt_port_page2), FALSE, 5000); 9912545bca0SMatthew Dillon if (rv) { 9922545bca0SMatthew Dillon mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 9932545bca0SMatthew Dillon } else { 9942545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 9952545bca0SMatthew Dillon "Port Page 2: Flags %x Settings %x\n", 9966d259fc1SSascha Wildner mpt->mpt_port_page2.PortFlags, 9976d259fc1SSascha Wildner mpt->mpt_port_page2.PortSettings); 9982545bca0SMatthew Dillon mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 9992545bca0SMatthew Dillon for (i = 0; i < 16; i++) { 10002545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 10012545bca0SMatthew Dillon " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 10022545bca0SMatthew Dillon i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 10032545bca0SMatthew Dillon mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 10042545bca0SMatthew Dillon mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 10052545bca0SMatthew Dillon } 10062545bca0SMatthew Dillon } 10072545bca0SMatthew Dillon 10082545bca0SMatthew Dillon for (i = 0; i < 16; i++) { 10092545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, i, 10102545bca0SMatthew Dillon &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 10112545bca0SMatthew Dillon FALSE, 5000); 10122545bca0SMatthew Dillon if (rv) { 10132545bca0SMatthew Dillon mpt_prt(mpt, 10142545bca0SMatthew Dillon "cannot read SPI Target %d Device Page 0\n", i); 10152545bca0SMatthew Dillon continue; 10162545bca0SMatthew Dillon } 10172545bca0SMatthew Dillon mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 10182545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 10192545bca0SMatthew Dillon "target %d page 0: Negotiated Params %x Information %x\n", 10206d259fc1SSascha Wildner i, mpt->mpt_dev_page0[i].NegotiatedParameters, 10216d259fc1SSascha Wildner mpt->mpt_dev_page0[i].Information); 10222545bca0SMatthew Dillon 10232545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, i, 10242545bca0SMatthew Dillon &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 10252545bca0SMatthew Dillon FALSE, 5000); 10262545bca0SMatthew Dillon if (rv) { 10272545bca0SMatthew Dillon mpt_prt(mpt, 10282545bca0SMatthew Dillon "cannot read SPI Target %d Device Page 1\n", i); 10292545bca0SMatthew Dillon continue; 10302545bca0SMatthew Dillon } 10312545bca0SMatthew Dillon mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 10322545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 10332545bca0SMatthew Dillon "target %d page 1: Requested Params %x Configuration %x\n", 10346d259fc1SSascha Wildner i, mpt->mpt_dev_page1[i].RequestedParameters, 10356d259fc1SSascha Wildner mpt->mpt_dev_page1[i].Configuration); 10362545bca0SMatthew Dillon } 10372545bca0SMatthew Dillon return (0); 10382545bca0SMatthew Dillon } 10392545bca0SMatthew Dillon 10402545bca0SMatthew Dillon /* 10412545bca0SMatthew Dillon * Validate SPI configuration information. 10422545bca0SMatthew Dillon * 10432545bca0SMatthew Dillon * In particular, validate SPI Port Page 1. 10442545bca0SMatthew Dillon */ 10452545bca0SMatthew Dillon static int 10462545bca0SMatthew Dillon mpt_set_initial_config_spi(struct mpt_softc *mpt) 10472545bca0SMatthew Dillon { 10486d259fc1SSascha Wildner int error, i, pp1val; 10492545bca0SMatthew Dillon 10502545bca0SMatthew Dillon mpt->mpt_disc_enable = 0xff; 10512545bca0SMatthew Dillon mpt->mpt_tag_enable = 0; 10522545bca0SMatthew Dillon 10536d259fc1SSascha Wildner pp1val = ((1 << mpt->mpt_ini_id) << 10546d259fc1SSascha Wildner MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 10552545bca0SMatthew Dillon if (mpt->mpt_port_page1.Configuration != pp1val) { 10562545bca0SMatthew Dillon CONFIG_PAGE_SCSI_PORT_1 tmp; 10572545bca0SMatthew Dillon 10582545bca0SMatthew Dillon mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 10596d259fc1SSascha Wildner "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 10602545bca0SMatthew Dillon tmp = mpt->mpt_port_page1; 10612545bca0SMatthew Dillon tmp.Configuration = pp1val; 10622545bca0SMatthew Dillon host2mpt_config_page_scsi_port_1(&tmp); 10632545bca0SMatthew Dillon error = mpt_write_cur_cfg_page(mpt, 0, 10642545bca0SMatthew Dillon &tmp.Header, sizeof(tmp), FALSE, 5000); 10652545bca0SMatthew Dillon if (error) { 10662545bca0SMatthew Dillon return (-1); 10672545bca0SMatthew Dillon } 10682545bca0SMatthew Dillon error = mpt_read_cur_cfg_page(mpt, 0, 10692545bca0SMatthew Dillon &tmp.Header, sizeof(tmp), FALSE, 5000); 10702545bca0SMatthew Dillon if (error) { 10712545bca0SMatthew Dillon return (-1); 10722545bca0SMatthew Dillon } 10732545bca0SMatthew Dillon mpt2host_config_page_scsi_port_1(&tmp); 10742545bca0SMatthew Dillon if (tmp.Configuration != pp1val) { 10752545bca0SMatthew Dillon mpt_prt(mpt, 10762545bca0SMatthew Dillon "failed to reset SPI Port Page 1 Config value\n"); 10772545bca0SMatthew Dillon return (-1); 10782545bca0SMatthew Dillon } 10792545bca0SMatthew Dillon mpt->mpt_port_page1 = tmp; 10802545bca0SMatthew Dillon } 10812545bca0SMatthew Dillon 10822545bca0SMatthew Dillon /* 10832545bca0SMatthew Dillon * The purpose of this exercise is to get 10842545bca0SMatthew Dillon * all targets back to async/narrow. 10852545bca0SMatthew Dillon * 10862545bca0SMatthew Dillon * We skip this step if the BIOS has already negotiated 10872545bca0SMatthew Dillon * speeds with the targets. 10882545bca0SMatthew Dillon */ 10892545bca0SMatthew Dillon i = mpt->mpt_port_page2.PortSettings & 10902545bca0SMatthew Dillon MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 10912545bca0SMatthew Dillon if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 10922545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 10932545bca0SMatthew Dillon "honoring BIOS transfer negotiations\n"); 10942545bca0SMatthew Dillon } else { 10952545bca0SMatthew Dillon for (i = 0; i < 16; i++) { 10962545bca0SMatthew Dillon mpt->mpt_dev_page1[i].RequestedParameters = 0; 10972545bca0SMatthew Dillon mpt->mpt_dev_page1[i].Configuration = 0; 10982545bca0SMatthew Dillon (void) mpt_update_spi_config(mpt, i); 10992545bca0SMatthew Dillon } 11002545bca0SMatthew Dillon } 11012545bca0SMatthew Dillon return (0); 11022545bca0SMatthew Dillon } 11032545bca0SMatthew Dillon 11044c42baf4SSascha Wildner static int 11052545bca0SMatthew Dillon mpt_cam_enable(struct mpt_softc *mpt) 11062545bca0SMatthew Dillon { 11072545bca0SMatthew Dillon int error; 11082545bca0SMatthew Dillon 11092545bca0SMatthew Dillon MPT_LOCK(mpt); 11102545bca0SMatthew Dillon 11112545bca0SMatthew Dillon error = EIO; 11122545bca0SMatthew Dillon if (mpt->is_fc) { 11132545bca0SMatthew Dillon if (mpt_read_config_info_fc(mpt)) { 11142545bca0SMatthew Dillon goto out; 11152545bca0SMatthew Dillon } 11162545bca0SMatthew Dillon if (mpt_set_initial_config_fc(mpt)) { 11172545bca0SMatthew Dillon goto out; 11182545bca0SMatthew Dillon } 11192545bca0SMatthew Dillon } else if (mpt->is_sas) { 11202545bca0SMatthew Dillon if (mpt_read_config_info_sas(mpt)) { 11212545bca0SMatthew Dillon goto out; 11222545bca0SMatthew Dillon } 11232545bca0SMatthew Dillon if (mpt_set_initial_config_sas(mpt)) { 11242545bca0SMatthew Dillon goto out; 11252545bca0SMatthew Dillon } 11262545bca0SMatthew Dillon } else if (mpt->is_spi) { 11272545bca0SMatthew Dillon if (mpt_read_config_info_spi(mpt)) { 11282545bca0SMatthew Dillon goto out; 11292545bca0SMatthew Dillon } 11302545bca0SMatthew Dillon if (mpt_set_initial_config_spi(mpt)) { 11312545bca0SMatthew Dillon goto out; 11322545bca0SMatthew Dillon } 11332545bca0SMatthew Dillon } 11342545bca0SMatthew Dillon error = 0; 11352545bca0SMatthew Dillon 11362545bca0SMatthew Dillon out: 11372545bca0SMatthew Dillon MPT_UNLOCK(mpt); 11382545bca0SMatthew Dillon return (error); 11392545bca0SMatthew Dillon } 11402545bca0SMatthew Dillon 11414c42baf4SSascha Wildner static void 11422545bca0SMatthew Dillon mpt_cam_ready(struct mpt_softc *mpt) 11432545bca0SMatthew Dillon { 11444c42baf4SSascha Wildner 11452545bca0SMatthew Dillon /* 11462545bca0SMatthew Dillon * If we're in target mode, hang out resources now 11472545bca0SMatthew Dillon * so we don't cause the world to hang talking to us. 11482545bca0SMatthew Dillon */ 11492545bca0SMatthew Dillon if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 11502545bca0SMatthew Dillon /* 11512545bca0SMatthew Dillon * Try to add some target command resources 11522545bca0SMatthew Dillon */ 11532545bca0SMatthew Dillon MPT_LOCK(mpt); 11542545bca0SMatthew Dillon if (mpt_add_target_commands(mpt) == FALSE) { 11552545bca0SMatthew Dillon mpt_prt(mpt, "failed to add target commands\n"); 11562545bca0SMatthew Dillon } 11572545bca0SMatthew Dillon MPT_UNLOCK(mpt); 11582545bca0SMatthew Dillon } 11592545bca0SMatthew Dillon mpt->ready = 1; 11602545bca0SMatthew Dillon } 11612545bca0SMatthew Dillon 11624c42baf4SSascha Wildner static void 11632545bca0SMatthew Dillon mpt_cam_detach(struct mpt_softc *mpt) 11642545bca0SMatthew Dillon { 11652545bca0SMatthew Dillon mpt_handler_t handler; 11662545bca0SMatthew Dillon 11672545bca0SMatthew Dillon MPT_LOCK(mpt); 11682545bca0SMatthew Dillon mpt->ready = 0; 11692545bca0SMatthew Dillon mpt_terminate_recovery_thread(mpt); 11702545bca0SMatthew Dillon 11712545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_reply_handler; 11722545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 11732545bca0SMatthew Dillon scsi_io_handler_id); 11742545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_tmf_reply_handler; 11752545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 11762545bca0SMatthew Dillon scsi_tmf_handler_id); 11772545bca0SMatthew Dillon handler.reply_handler = mpt_fc_els_reply_handler; 11782545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 11792545bca0SMatthew Dillon fc_els_handler_id); 11802545bca0SMatthew Dillon handler.reply_handler = mpt_scsi_tgt_reply_handler; 11812545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 11822545bca0SMatthew Dillon mpt->scsi_tgt_handler_id); 11832545bca0SMatthew Dillon handler.reply_handler = mpt_sata_pass_reply_handler; 11842545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 11852545bca0SMatthew Dillon sata_pass_handler_id); 11862545bca0SMatthew Dillon 11872545bca0SMatthew Dillon if (mpt->tmf_req != NULL) { 11882545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_ALLOCATED; 11892545bca0SMatthew Dillon mpt_free_request(mpt, mpt->tmf_req); 11902545bca0SMatthew Dillon mpt->tmf_req = NULL; 11912545bca0SMatthew Dillon } 11922545bca0SMatthew Dillon if (mpt->sas_portinfo != NULL) { 11932545bca0SMatthew Dillon kfree(mpt->sas_portinfo, M_DEVBUF); 11942545bca0SMatthew Dillon mpt->sas_portinfo = NULL; 11952545bca0SMatthew Dillon } 11962545bca0SMatthew Dillon 11972545bca0SMatthew Dillon if (mpt->sim != NULL) { 11982545bca0SMatthew Dillon xpt_free_path(mpt->path); 11992545bca0SMatthew Dillon xpt_bus_deregister(cam_sim_path(mpt->sim)); 12002545bca0SMatthew Dillon cam_sim_free(mpt->sim); 12012545bca0SMatthew Dillon mpt->sim = NULL; 12022545bca0SMatthew Dillon } 12032545bca0SMatthew Dillon 12042545bca0SMatthew Dillon if (mpt->phydisk_sim != NULL) { 12052545bca0SMatthew Dillon xpt_free_path(mpt->phydisk_path); 12062545bca0SMatthew Dillon xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 12072545bca0SMatthew Dillon cam_sim_free(mpt->phydisk_sim); 12082545bca0SMatthew Dillon mpt->phydisk_sim = NULL; 12092545bca0SMatthew Dillon } 12106d259fc1SSascha Wildner MPT_UNLOCK(mpt); 12112545bca0SMatthew Dillon } 12122545bca0SMatthew Dillon 12132545bca0SMatthew Dillon /* This routine is used after a system crash to dump core onto the swap device. 12142545bca0SMatthew Dillon */ 12152545bca0SMatthew Dillon static void 12162545bca0SMatthew Dillon mpt_poll(struct cam_sim *sim) 12172545bca0SMatthew Dillon { 12182545bca0SMatthew Dillon struct mpt_softc *mpt; 12192545bca0SMatthew Dillon 12202545bca0SMatthew Dillon mpt = (struct mpt_softc *)cam_sim_softc(sim); 12212545bca0SMatthew Dillon mpt_intr(mpt); 12222545bca0SMatthew Dillon } 12232545bca0SMatthew Dillon 12242545bca0SMatthew Dillon /* 12252545bca0SMatthew Dillon * Watchdog timeout routine for SCSI requests. 12262545bca0SMatthew Dillon */ 12272545bca0SMatthew Dillon static void 12282545bca0SMatthew Dillon mpt_timeout(void *arg) 12292545bca0SMatthew Dillon { 12302545bca0SMatthew Dillon union ccb *ccb; 12312545bca0SMatthew Dillon struct mpt_softc *mpt; 12322545bca0SMatthew Dillon request_t *req; 12332545bca0SMatthew Dillon 12342545bca0SMatthew Dillon ccb = (union ccb *)arg; 12352545bca0SMatthew Dillon mpt = ccb->ccb_h.ccb_mpt_ptr; 12362545bca0SMatthew Dillon 12372be58998SSascha Wildner MPT_LOCK(mpt); 12382545bca0SMatthew Dillon req = ccb->ccb_h.ccb_req_ptr; 12392545bca0SMatthew Dillon mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 12402545bca0SMatthew Dillon req->serno, ccb, req->ccb); 12412545bca0SMatthew Dillon /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 12422545bca0SMatthew Dillon if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 12432545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 12442545bca0SMatthew Dillon TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 12452545bca0SMatthew Dillon req->state |= REQ_STATE_TIMEDOUT; 12462545bca0SMatthew Dillon mpt_wakeup_recovery_thread(mpt); 12472545bca0SMatthew Dillon } 12482be58998SSascha Wildner MPT_UNLOCK(mpt); 12492545bca0SMatthew Dillon } 12502545bca0SMatthew Dillon 12512545bca0SMatthew Dillon /* 12522545bca0SMatthew Dillon * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 12532545bca0SMatthew Dillon * 12542545bca0SMatthew Dillon * Takes a list of physical segments and builds the SGL for SCSI IO command 12552545bca0SMatthew Dillon * and forwards the commard to the IOC after one last check that CAM has not 12562545bca0SMatthew Dillon * aborted the transaction. 12572545bca0SMatthew Dillon */ 12582545bca0SMatthew Dillon static void 12592545bca0SMatthew Dillon mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 12602545bca0SMatthew Dillon { 12612545bca0SMatthew Dillon request_t *req, *trq; 12622545bca0SMatthew Dillon char *mpt_off; 12632545bca0SMatthew Dillon union ccb *ccb; 12642545bca0SMatthew Dillon struct mpt_softc *mpt; 12654c42baf4SSascha Wildner bus_addr_t chain_list_addr; 12664c42baf4SSascha Wildner int first_lim, seg, this_seg_lim; 12674c42baf4SSascha Wildner uint32_t addr, cur_off, flags, nxt_off, tf; 12682545bca0SMatthew Dillon void *sglp = NULL; 12692545bca0SMatthew Dillon MSG_REQUEST_HEADER *hdrp; 12702545bca0SMatthew Dillon SGE_SIMPLE64 *se; 12712545bca0SMatthew Dillon SGE_CHAIN64 *ce; 12722545bca0SMatthew Dillon int istgt = 0; 12732545bca0SMatthew Dillon 12742545bca0SMatthew Dillon req = (request_t *)arg; 12752545bca0SMatthew Dillon ccb = req->ccb; 12762545bca0SMatthew Dillon 12772545bca0SMatthew Dillon mpt = ccb->ccb_h.ccb_mpt_ptr; 12782545bca0SMatthew Dillon req = ccb->ccb_h.ccb_req_ptr; 12792545bca0SMatthew Dillon 12802545bca0SMatthew Dillon hdrp = req->req_vbuf; 12812545bca0SMatthew Dillon mpt_off = req->req_vbuf; 12822545bca0SMatthew Dillon 12832545bca0SMatthew Dillon if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 12842545bca0SMatthew Dillon error = EFBIG; 12852545bca0SMatthew Dillon } 12862545bca0SMatthew Dillon 12872545bca0SMatthew Dillon if (error == 0) { 12882545bca0SMatthew Dillon switch (hdrp->Function) { 12892545bca0SMatthew Dillon case MPI_FUNCTION_SCSI_IO_REQUEST: 12902545bca0SMatthew Dillon case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 12912545bca0SMatthew Dillon istgt = 0; 12922545bca0SMatthew Dillon sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 12932545bca0SMatthew Dillon break; 12942545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_ASSIST: 12952545bca0SMatthew Dillon istgt = 1; 12962545bca0SMatthew Dillon sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 12972545bca0SMatthew Dillon break; 12982545bca0SMatthew Dillon default: 12992545bca0SMatthew Dillon mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 13002545bca0SMatthew Dillon hdrp->Function); 13012545bca0SMatthew Dillon error = EINVAL; 13022545bca0SMatthew Dillon break; 13032545bca0SMatthew Dillon } 13042545bca0SMatthew Dillon } 13052545bca0SMatthew Dillon 13062545bca0SMatthew Dillon if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 13072545bca0SMatthew Dillon error = EFBIG; 13082545bca0SMatthew Dillon mpt_prt(mpt, "segment count %d too large (max %u)\n", 13092545bca0SMatthew Dillon nseg, mpt->max_seg_cnt); 13102545bca0SMatthew Dillon } 13112545bca0SMatthew Dillon 13122545bca0SMatthew Dillon bad: 13132545bca0SMatthew Dillon if (error != 0) { 13142545bca0SMatthew Dillon if (error != EFBIG && error != ENOMEM) { 13152545bca0SMatthew Dillon mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 13162545bca0SMatthew Dillon } 13172545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 13182545bca0SMatthew Dillon cam_status status; 13192545bca0SMatthew Dillon mpt_freeze_ccb(ccb); 13202545bca0SMatthew Dillon if (error == EFBIG) { 13212545bca0SMatthew Dillon status = CAM_REQ_TOO_BIG; 13222545bca0SMatthew Dillon } else if (error == ENOMEM) { 13232545bca0SMatthew Dillon if (mpt->outofbeer == 0) { 13242545bca0SMatthew Dillon mpt->outofbeer = 1; 13252545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 13262545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 13272545bca0SMatthew Dillon "FREEZEQ\n"); 13282545bca0SMatthew Dillon } 13292545bca0SMatthew Dillon status = CAM_REQUEUE_REQ; 13302545bca0SMatthew Dillon } else { 13312545bca0SMatthew Dillon status = CAM_REQ_CMP_ERR; 13322545bca0SMatthew Dillon } 13332545bca0SMatthew Dillon mpt_set_ccb_status(ccb, status); 13342545bca0SMatthew Dillon } 13352545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 13362545bca0SMatthew Dillon request_t *cmd_req = 13372545bca0SMatthew Dillon MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 13382545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 13392545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 13402545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 13412545bca0SMatthew Dillon } 13422545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 13434c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 13442545bca0SMatthew Dillon xpt_done(ccb); 13452545bca0SMatthew Dillon mpt_free_request(mpt, req); 13462545bca0SMatthew Dillon return; 13472545bca0SMatthew Dillon } 13482545bca0SMatthew Dillon 13492545bca0SMatthew Dillon /* 13502545bca0SMatthew Dillon * No data to transfer? 13512545bca0SMatthew Dillon * Just make a single simple SGL with zero length. 13522545bca0SMatthew Dillon */ 13532545bca0SMatthew Dillon 13542545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 13552545bca0SMatthew Dillon int tidx = ((char *)sglp) - mpt_off; 13562545bca0SMatthew Dillon memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 13572545bca0SMatthew Dillon } 13582545bca0SMatthew Dillon 13592545bca0SMatthew Dillon if (nseg == 0) { 13602545bca0SMatthew Dillon SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 13612545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se1, 13622545bca0SMatthew Dillon (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 13632545bca0SMatthew Dillon MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 13642545bca0SMatthew Dillon se1->FlagsLength = htole32(se1->FlagsLength); 13652545bca0SMatthew Dillon goto out; 13662545bca0SMatthew Dillon } 13672545bca0SMatthew Dillon 13682545bca0SMatthew Dillon 13692545bca0SMatthew Dillon flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 13702545bca0SMatthew Dillon if (istgt == 0) { 13712545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 13722545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 13732545bca0SMatthew Dillon } 13742545bca0SMatthew Dillon } else { 13752545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 13762545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 13772545bca0SMatthew Dillon } 13782545bca0SMatthew Dillon } 13792545bca0SMatthew Dillon 13802545bca0SMatthew Dillon if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 13812545bca0SMatthew Dillon bus_dmasync_op_t op; 13822545bca0SMatthew Dillon if (istgt == 0) { 13832545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 13842545bca0SMatthew Dillon op = BUS_DMASYNC_PREREAD; 13852545bca0SMatthew Dillon } else { 13862545bca0SMatthew Dillon op = BUS_DMASYNC_PREWRITE; 13872545bca0SMatthew Dillon } 13882545bca0SMatthew Dillon } else { 13892545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 13902545bca0SMatthew Dillon op = BUS_DMASYNC_PREWRITE; 13912545bca0SMatthew Dillon } else { 13922545bca0SMatthew Dillon op = BUS_DMASYNC_PREREAD; 13932545bca0SMatthew Dillon } 13942545bca0SMatthew Dillon } 13952545bca0SMatthew Dillon bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 13962545bca0SMatthew Dillon } 13972545bca0SMatthew Dillon 13982545bca0SMatthew Dillon /* 13992545bca0SMatthew Dillon * Okay, fill in what we can at the end of the command frame. 14002545bca0SMatthew Dillon * If we have up to MPT_NSGL_FIRST, we can fit them all into 14012545bca0SMatthew Dillon * the command frame. 14022545bca0SMatthew Dillon * 14032545bca0SMatthew Dillon * Otherwise, we fill up through MPT_NSGL_FIRST less one 14042545bca0SMatthew Dillon * SIMPLE64 pointers and start doing CHAIN64 entries after 14052545bca0SMatthew Dillon * that. 14062545bca0SMatthew Dillon */ 14072545bca0SMatthew Dillon 14082545bca0SMatthew Dillon if (nseg < MPT_NSGL_FIRST(mpt)) { 14092545bca0SMatthew Dillon first_lim = nseg; 14102545bca0SMatthew Dillon } else { 14112545bca0SMatthew Dillon /* 14122545bca0SMatthew Dillon * Leave room for CHAIN element 14132545bca0SMatthew Dillon */ 14142545bca0SMatthew Dillon first_lim = MPT_NSGL_FIRST(mpt) - 1; 14152545bca0SMatthew Dillon } 14162545bca0SMatthew Dillon 14172545bca0SMatthew Dillon se = (SGE_SIMPLE64 *) sglp; 14182545bca0SMatthew Dillon for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 14194c42baf4SSascha Wildner tf = flags; 14202545bca0SMatthew Dillon memset(se, 0, sizeof (*se)); 14214c42baf4SSascha Wildner MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 14222545bca0SMatthew Dillon se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 14232545bca0SMatthew Dillon if (sizeof(bus_addr_t) > 4) { 14244c42baf4SSascha Wildner addr = ((uint64_t)dm_segs->ds_addr) >> 32; 14254c42baf4SSascha Wildner /* SAS1078 36GB limitation WAR */ 14264c42baf4SSascha Wildner if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 14274c42baf4SSascha Wildner MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1428*f582582cSSascha Wildner addr |= (1U << 31); 14294c42baf4SSascha Wildner tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 14302545bca0SMatthew Dillon } 14314c42baf4SSascha Wildner se->Address.High = htole32(addr); 14324c42baf4SSascha Wildner } 14332545bca0SMatthew Dillon if (seg == first_lim - 1) { 14342545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 14352545bca0SMatthew Dillon } 14362545bca0SMatthew Dillon if (seg == nseg - 1) { 14372545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_END_OF_LIST | 14382545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 14392545bca0SMatthew Dillon } 14402545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, tf); 14412545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength); 14422545bca0SMatthew Dillon } 14432545bca0SMatthew Dillon 14442545bca0SMatthew Dillon if (seg == nseg) { 14452545bca0SMatthew Dillon goto out; 14462545bca0SMatthew Dillon } 14472545bca0SMatthew Dillon 14482545bca0SMatthew Dillon /* 14492545bca0SMatthew Dillon * Tell the IOC where to find the first chain element. 14502545bca0SMatthew Dillon */ 14512545bca0SMatthew Dillon hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 14522545bca0SMatthew Dillon nxt_off = MPT_RQSL(mpt); 14532545bca0SMatthew Dillon trq = req; 14542545bca0SMatthew Dillon 14552545bca0SMatthew Dillon /* 14562545bca0SMatthew Dillon * Make up the rest of the data segments out of a chain element 14574c42baf4SSascha Wildner * (contained in the current request frame) which points to 14582545bca0SMatthew Dillon * SIMPLE64 elements in the next request frame, possibly ending 14592545bca0SMatthew Dillon * with *another* chain element (if there's more). 14602545bca0SMatthew Dillon */ 14612545bca0SMatthew Dillon while (seg < nseg) { 14622545bca0SMatthew Dillon /* 14632545bca0SMatthew Dillon * Point to the chain descriptor. Note that the chain 14642545bca0SMatthew Dillon * descriptor is at the end of the *previous* list (whether 14652545bca0SMatthew Dillon * chain or simple). 14662545bca0SMatthew Dillon */ 14672545bca0SMatthew Dillon ce = (SGE_CHAIN64 *) se; 14682545bca0SMatthew Dillon 14692545bca0SMatthew Dillon /* 14702545bca0SMatthew Dillon * Before we change our current pointer, make sure we won't 14712545bca0SMatthew Dillon * overflow the request area with this frame. Note that we 14722545bca0SMatthew Dillon * test against 'greater than' here as it's okay in this case 14732545bca0SMatthew Dillon * to have next offset be just outside the request area. 14742545bca0SMatthew Dillon */ 14752545bca0SMatthew Dillon if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 14762545bca0SMatthew Dillon nxt_off = MPT_REQUEST_AREA; 14772545bca0SMatthew Dillon goto next_chain; 14782545bca0SMatthew Dillon } 14792545bca0SMatthew Dillon 14802545bca0SMatthew Dillon /* 14812545bca0SMatthew Dillon * Set our SGE element pointer to the beginning of the chain 14822545bca0SMatthew Dillon * list and update our next chain list offset. 14832545bca0SMatthew Dillon */ 14842545bca0SMatthew Dillon se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 14852545bca0SMatthew Dillon cur_off = nxt_off; 14862545bca0SMatthew Dillon nxt_off += MPT_RQSL(mpt); 14872545bca0SMatthew Dillon 14882545bca0SMatthew Dillon /* 14894c42baf4SSascha Wildner * Now initialize the chain descriptor. 14902545bca0SMatthew Dillon */ 14912545bca0SMatthew Dillon memset(ce, 0, sizeof (*ce)); 14922545bca0SMatthew Dillon 14932545bca0SMatthew Dillon /* 14942545bca0SMatthew Dillon * Get the physical address of the chain list. 14952545bca0SMatthew Dillon */ 14962545bca0SMatthew Dillon chain_list_addr = trq->req_pbuf; 14972545bca0SMatthew Dillon chain_list_addr += cur_off; 14982545bca0SMatthew Dillon if (sizeof (bus_addr_t) > 4) { 14992545bca0SMatthew Dillon ce->Address.High = 15002545bca0SMatthew Dillon htole32(((uint64_t)chain_list_addr) >> 32); 15012545bca0SMatthew Dillon } 15022545bca0SMatthew Dillon ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 15032545bca0SMatthew Dillon ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 15042545bca0SMatthew Dillon MPI_SGE_FLAGS_64_BIT_ADDRESSING; 15052545bca0SMatthew Dillon 15062545bca0SMatthew Dillon /* 15072545bca0SMatthew Dillon * If we have more than a frame's worth of segments left, 15082545bca0SMatthew Dillon * set up the chain list to have the last element be another 15092545bca0SMatthew Dillon * chain descriptor. 15102545bca0SMatthew Dillon */ 15112545bca0SMatthew Dillon if ((nseg - seg) > MPT_NSGL(mpt)) { 15122545bca0SMatthew Dillon this_seg_lim = seg + MPT_NSGL(mpt) - 1; 15132545bca0SMatthew Dillon /* 15142545bca0SMatthew Dillon * The length of the chain is the length in bytes of the 15152545bca0SMatthew Dillon * number of segments plus the next chain element. 15162545bca0SMatthew Dillon * 15172545bca0SMatthew Dillon * The next chain descriptor offset is the length, 15182545bca0SMatthew Dillon * in words, of the number of segments. 15192545bca0SMatthew Dillon */ 15202545bca0SMatthew Dillon ce->Length = (this_seg_lim - seg) * 15212545bca0SMatthew Dillon sizeof (SGE_SIMPLE64); 15222545bca0SMatthew Dillon ce->NextChainOffset = ce->Length >> 2; 15232545bca0SMatthew Dillon ce->Length += sizeof (SGE_CHAIN64); 15242545bca0SMatthew Dillon } else { 15252545bca0SMatthew Dillon this_seg_lim = nseg; 15262545bca0SMatthew Dillon ce->Length = (this_seg_lim - seg) * 15272545bca0SMatthew Dillon sizeof (SGE_SIMPLE64); 15282545bca0SMatthew Dillon } 15292545bca0SMatthew Dillon ce->Length = htole16(ce->Length); 15302545bca0SMatthew Dillon 15312545bca0SMatthew Dillon /* 15322545bca0SMatthew Dillon * Fill in the chain list SGE elements with our segment data. 15332545bca0SMatthew Dillon * 15342545bca0SMatthew Dillon * If we're the last element in this chain list, set the last 15352545bca0SMatthew Dillon * element flag. If we're the completely last element period, 15362545bca0SMatthew Dillon * set the end of list and end of buffer flags. 15372545bca0SMatthew Dillon */ 15382545bca0SMatthew Dillon while (seg < this_seg_lim) { 15394c42baf4SSascha Wildner tf = flags; 15402545bca0SMatthew Dillon memset(se, 0, sizeof (*se)); 15414c42baf4SSascha Wildner MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 15422545bca0SMatthew Dillon se->Address.Low = htole32(dm_segs->ds_addr & 15432545bca0SMatthew Dillon 0xffffffff); 15442545bca0SMatthew Dillon if (sizeof (bus_addr_t) > 4) { 15454c42baf4SSascha Wildner addr = ((uint64_t)dm_segs->ds_addr) >> 32; 15464c42baf4SSascha Wildner /* SAS1078 36GB limitation WAR */ 15474c42baf4SSascha Wildner if (mpt->is_1078 && 15484c42baf4SSascha Wildner (((uint64_t)dm_segs->ds_addr + 15494c42baf4SSascha Wildner MPI_SGE_LENGTH(se->FlagsLength)) >> 15504c42baf4SSascha Wildner 32) == 9) { 1551*f582582cSSascha Wildner addr |= (1U << 31); 15524c42baf4SSascha Wildner tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 15532545bca0SMatthew Dillon } 15544c42baf4SSascha Wildner se->Address.High = htole32(addr); 15554c42baf4SSascha Wildner } 15562545bca0SMatthew Dillon if (seg == this_seg_lim - 1) { 15572545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 15582545bca0SMatthew Dillon } 15592545bca0SMatthew Dillon if (seg == nseg - 1) { 15602545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_END_OF_LIST | 15612545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 15622545bca0SMatthew Dillon } 15632545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, tf); 15642545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength); 15652545bca0SMatthew Dillon se++; 15662545bca0SMatthew Dillon seg++; 15672545bca0SMatthew Dillon dm_segs++; 15682545bca0SMatthew Dillon } 15692545bca0SMatthew Dillon 15702545bca0SMatthew Dillon next_chain: 15712545bca0SMatthew Dillon /* 15722545bca0SMatthew Dillon * If we have more segments to do and we've used up all of 15732545bca0SMatthew Dillon * the space in a request area, go allocate another one 15742545bca0SMatthew Dillon * and chain to that. 15752545bca0SMatthew Dillon */ 15762545bca0SMatthew Dillon if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 15772545bca0SMatthew Dillon request_t *nrq; 15782545bca0SMatthew Dillon 15792545bca0SMatthew Dillon nrq = mpt_get_request(mpt, FALSE); 15802545bca0SMatthew Dillon 15812545bca0SMatthew Dillon if (nrq == NULL) { 15822545bca0SMatthew Dillon error = ENOMEM; 15832545bca0SMatthew Dillon goto bad; 15842545bca0SMatthew Dillon } 15852545bca0SMatthew Dillon 15862545bca0SMatthew Dillon /* 15872545bca0SMatthew Dillon * Append the new request area on the tail of our list. 15882545bca0SMatthew Dillon */ 15892545bca0SMatthew Dillon if ((trq = req->chain) == NULL) { 15902545bca0SMatthew Dillon req->chain = nrq; 15912545bca0SMatthew Dillon } else { 15922545bca0SMatthew Dillon while (trq->chain != NULL) { 15932545bca0SMatthew Dillon trq = trq->chain; 15942545bca0SMatthew Dillon } 15952545bca0SMatthew Dillon trq->chain = nrq; 15962545bca0SMatthew Dillon } 15972545bca0SMatthew Dillon trq = nrq; 15982545bca0SMatthew Dillon mpt_off = trq->req_vbuf; 15992545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 16002545bca0SMatthew Dillon memset(mpt_off, 0xff, MPT_REQUEST_AREA); 16012545bca0SMatthew Dillon } 16022545bca0SMatthew Dillon nxt_off = 0; 16032545bca0SMatthew Dillon } 16042545bca0SMatthew Dillon } 16052545bca0SMatthew Dillon out: 16062545bca0SMatthew Dillon 16072545bca0SMatthew Dillon /* 16082545bca0SMatthew Dillon * Last time we need to check if this CCB needs to be aborted. 16092545bca0SMatthew Dillon */ 16102545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 16112545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 16122545bca0SMatthew Dillon request_t *cmd_req = 16132545bca0SMatthew Dillon MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 16142545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 16152545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 16162545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 16172545bca0SMatthew Dillon } 16182545bca0SMatthew Dillon mpt_prt(mpt, 16192545bca0SMatthew Dillon "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 16202545bca0SMatthew Dillon ccb->ccb_h.status & CAM_STATUS_MASK); 16212545bca0SMatthew Dillon if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 16222545bca0SMatthew Dillon bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 16232545bca0SMatthew Dillon } 16242545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 16254c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 16262545bca0SMatthew Dillon xpt_done(ccb); 16272545bca0SMatthew Dillon mpt_free_request(mpt, req); 16282545bca0SMatthew Dillon return; 16292545bca0SMatthew Dillon } 16302545bca0SMatthew Dillon 16312545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_SIM_QUEUED; 16322545bca0SMatthew Dillon if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 16332545bca0SMatthew Dillon mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 16342545bca0SMatthew Dillon mpt_timeout, ccb); 16352545bca0SMatthew Dillon } 16362545bca0SMatthew Dillon if (mpt->verbose > MPT_PRT_DEBUG) { 16372545bca0SMatthew Dillon int nc = 0; 16382545bca0SMatthew Dillon mpt_print_request(req->req_vbuf); 16392545bca0SMatthew Dillon for (trq = req->chain; trq; trq = trq->chain) { 16402545bca0SMatthew Dillon kprintf(" Additional Chain Area %d\n", nc++); 16412545bca0SMatthew Dillon mpt_dump_sgl(trq->req_vbuf, 0); 16422545bca0SMatthew Dillon } 16432545bca0SMatthew Dillon } 16442545bca0SMatthew Dillon 16452545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 16462545bca0SMatthew Dillon request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 16472545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 16482545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 16492545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 16502545bca0SMatthew Dillon csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 16512545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 16522545bca0SMatthew Dillon } else { 16532545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA; 16542545bca0SMatthew Dillon } 16552545bca0SMatthew Dillon #else 16562545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA; 16572545bca0SMatthew Dillon #endif 16582545bca0SMatthew Dillon } 16592545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 16602545bca0SMatthew Dillon } 16612545bca0SMatthew Dillon 16622545bca0SMatthew Dillon static void 16632545bca0SMatthew Dillon mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 16642545bca0SMatthew Dillon { 16652545bca0SMatthew Dillon request_t *req, *trq; 16662545bca0SMatthew Dillon char *mpt_off; 16672545bca0SMatthew Dillon union ccb *ccb; 16682545bca0SMatthew Dillon struct mpt_softc *mpt; 16692545bca0SMatthew Dillon int seg, first_lim; 16702545bca0SMatthew Dillon uint32_t flags, nxt_off; 16712545bca0SMatthew Dillon void *sglp = NULL; 16722545bca0SMatthew Dillon MSG_REQUEST_HEADER *hdrp; 16732545bca0SMatthew Dillon SGE_SIMPLE32 *se; 16742545bca0SMatthew Dillon SGE_CHAIN32 *ce; 16752545bca0SMatthew Dillon int istgt = 0; 16762545bca0SMatthew Dillon 16772545bca0SMatthew Dillon req = (request_t *)arg; 16782545bca0SMatthew Dillon ccb = req->ccb; 16792545bca0SMatthew Dillon 16802545bca0SMatthew Dillon mpt = ccb->ccb_h.ccb_mpt_ptr; 16812545bca0SMatthew Dillon req = ccb->ccb_h.ccb_req_ptr; 16822545bca0SMatthew Dillon 16832545bca0SMatthew Dillon hdrp = req->req_vbuf; 16842545bca0SMatthew Dillon mpt_off = req->req_vbuf; 16852545bca0SMatthew Dillon 16862545bca0SMatthew Dillon 16872545bca0SMatthew Dillon if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 16882545bca0SMatthew Dillon error = EFBIG; 16892545bca0SMatthew Dillon } 16902545bca0SMatthew Dillon 16912545bca0SMatthew Dillon if (error == 0) { 16922545bca0SMatthew Dillon switch (hdrp->Function) { 16932545bca0SMatthew Dillon case MPI_FUNCTION_SCSI_IO_REQUEST: 16942545bca0SMatthew Dillon case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 16952545bca0SMatthew Dillon sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 16962545bca0SMatthew Dillon break; 16972545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_ASSIST: 16982545bca0SMatthew Dillon istgt = 1; 16992545bca0SMatthew Dillon sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 17002545bca0SMatthew Dillon break; 17012545bca0SMatthew Dillon default: 17022545bca0SMatthew Dillon mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 17032545bca0SMatthew Dillon hdrp->Function); 17042545bca0SMatthew Dillon error = EINVAL; 17052545bca0SMatthew Dillon break; 17062545bca0SMatthew Dillon } 17072545bca0SMatthew Dillon } 17082545bca0SMatthew Dillon 17092545bca0SMatthew Dillon if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 17102545bca0SMatthew Dillon error = EFBIG; 17112545bca0SMatthew Dillon mpt_prt(mpt, "segment count %d too large (max %u)\n", 17122545bca0SMatthew Dillon nseg, mpt->max_seg_cnt); 17132545bca0SMatthew Dillon } 17142545bca0SMatthew Dillon 17152545bca0SMatthew Dillon bad: 17162545bca0SMatthew Dillon if (error != 0) { 17172545bca0SMatthew Dillon if (error != EFBIG && error != ENOMEM) { 17182545bca0SMatthew Dillon mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 17192545bca0SMatthew Dillon } 17202545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 17212545bca0SMatthew Dillon cam_status status; 17222545bca0SMatthew Dillon mpt_freeze_ccb(ccb); 17232545bca0SMatthew Dillon if (error == EFBIG) { 17242545bca0SMatthew Dillon status = CAM_REQ_TOO_BIG; 17252545bca0SMatthew Dillon } else if (error == ENOMEM) { 17262545bca0SMatthew Dillon if (mpt->outofbeer == 0) { 17272545bca0SMatthew Dillon mpt->outofbeer = 1; 17282545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 17292545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 17302545bca0SMatthew Dillon "FREEZEQ\n"); 17312545bca0SMatthew Dillon } 17322545bca0SMatthew Dillon status = CAM_REQUEUE_REQ; 17332545bca0SMatthew Dillon } else { 17342545bca0SMatthew Dillon status = CAM_REQ_CMP_ERR; 17352545bca0SMatthew Dillon } 17362545bca0SMatthew Dillon mpt_set_ccb_status(ccb, status); 17372545bca0SMatthew Dillon } 17382545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 17392545bca0SMatthew Dillon request_t *cmd_req = 17402545bca0SMatthew Dillon MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 17412545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 17422545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 17432545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 17442545bca0SMatthew Dillon } 17452545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 17464c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 17472545bca0SMatthew Dillon xpt_done(ccb); 17482545bca0SMatthew Dillon mpt_free_request(mpt, req); 17492545bca0SMatthew Dillon return; 17502545bca0SMatthew Dillon } 17512545bca0SMatthew Dillon 17522545bca0SMatthew Dillon /* 17532545bca0SMatthew Dillon * No data to transfer? 17542545bca0SMatthew Dillon * Just make a single simple SGL with zero length. 17552545bca0SMatthew Dillon */ 17562545bca0SMatthew Dillon 17572545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 17582545bca0SMatthew Dillon int tidx = ((char *)sglp) - mpt_off; 17592545bca0SMatthew Dillon memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 17602545bca0SMatthew Dillon } 17612545bca0SMatthew Dillon 17622545bca0SMatthew Dillon if (nseg == 0) { 17632545bca0SMatthew Dillon SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 17642545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se1, 17652545bca0SMatthew Dillon (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 17662545bca0SMatthew Dillon MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 17672545bca0SMatthew Dillon se1->FlagsLength = htole32(se1->FlagsLength); 17682545bca0SMatthew Dillon goto out; 17692545bca0SMatthew Dillon } 17702545bca0SMatthew Dillon 17712545bca0SMatthew Dillon 17722545bca0SMatthew Dillon flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 17732545bca0SMatthew Dillon if (istgt == 0) { 17742545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 17752545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 17762545bca0SMatthew Dillon } 17772545bca0SMatthew Dillon } else { 17782545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 17792545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 17802545bca0SMatthew Dillon } 17812545bca0SMatthew Dillon } 17822545bca0SMatthew Dillon 17832545bca0SMatthew Dillon if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 17842545bca0SMatthew Dillon bus_dmasync_op_t op; 17852545bca0SMatthew Dillon if (istgt) { 17862545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 17872545bca0SMatthew Dillon op = BUS_DMASYNC_PREREAD; 17882545bca0SMatthew Dillon } else { 17892545bca0SMatthew Dillon op = BUS_DMASYNC_PREWRITE; 17902545bca0SMatthew Dillon } 17912545bca0SMatthew Dillon } else { 17922545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 17932545bca0SMatthew Dillon op = BUS_DMASYNC_PREWRITE; 17942545bca0SMatthew Dillon } else { 17952545bca0SMatthew Dillon op = BUS_DMASYNC_PREREAD; 17962545bca0SMatthew Dillon } 17972545bca0SMatthew Dillon } 17982545bca0SMatthew Dillon bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 17992545bca0SMatthew Dillon } 18002545bca0SMatthew Dillon 18012545bca0SMatthew Dillon /* 18022545bca0SMatthew Dillon * Okay, fill in what we can at the end of the command frame. 18032545bca0SMatthew Dillon * If we have up to MPT_NSGL_FIRST, we can fit them all into 18042545bca0SMatthew Dillon * the command frame. 18052545bca0SMatthew Dillon * 18062545bca0SMatthew Dillon * Otherwise, we fill up through MPT_NSGL_FIRST less one 18072545bca0SMatthew Dillon * SIMPLE32 pointers and start doing CHAIN32 entries after 18082545bca0SMatthew Dillon * that. 18092545bca0SMatthew Dillon */ 18102545bca0SMatthew Dillon 18112545bca0SMatthew Dillon if (nseg < MPT_NSGL_FIRST(mpt)) { 18122545bca0SMatthew Dillon first_lim = nseg; 18132545bca0SMatthew Dillon } else { 18142545bca0SMatthew Dillon /* 18152545bca0SMatthew Dillon * Leave room for CHAIN element 18162545bca0SMatthew Dillon */ 18172545bca0SMatthew Dillon first_lim = MPT_NSGL_FIRST(mpt) - 1; 18182545bca0SMatthew Dillon } 18192545bca0SMatthew Dillon 18202545bca0SMatthew Dillon se = (SGE_SIMPLE32 *) sglp; 18212545bca0SMatthew Dillon for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 18222545bca0SMatthew Dillon uint32_t tf; 18232545bca0SMatthew Dillon 18242545bca0SMatthew Dillon memset(se, 0,sizeof (*se)); 18252545bca0SMatthew Dillon se->Address = htole32(dm_segs->ds_addr); 18262545bca0SMatthew Dillon 18272545bca0SMatthew Dillon MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 18282545bca0SMatthew Dillon tf = flags; 18292545bca0SMatthew Dillon if (seg == first_lim - 1) { 18302545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 18312545bca0SMatthew Dillon } 18322545bca0SMatthew Dillon if (seg == nseg - 1) { 18332545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_END_OF_LIST | 18342545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 18352545bca0SMatthew Dillon } 18362545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, tf); 18372545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength); 18382545bca0SMatthew Dillon } 18392545bca0SMatthew Dillon 18402545bca0SMatthew Dillon if (seg == nseg) { 18412545bca0SMatthew Dillon goto out; 18422545bca0SMatthew Dillon } 18432545bca0SMatthew Dillon 18442545bca0SMatthew Dillon /* 18452545bca0SMatthew Dillon * Tell the IOC where to find the first chain element. 18462545bca0SMatthew Dillon */ 18472545bca0SMatthew Dillon hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 18482545bca0SMatthew Dillon nxt_off = MPT_RQSL(mpt); 18492545bca0SMatthew Dillon trq = req; 18502545bca0SMatthew Dillon 18512545bca0SMatthew Dillon /* 18522545bca0SMatthew Dillon * Make up the rest of the data segments out of a chain element 18534c42baf4SSascha Wildner * (contained in the current request frame) which points to 18542545bca0SMatthew Dillon * SIMPLE32 elements in the next request frame, possibly ending 18552545bca0SMatthew Dillon * with *another* chain element (if there's more). 18562545bca0SMatthew Dillon */ 18572545bca0SMatthew Dillon while (seg < nseg) { 18582545bca0SMatthew Dillon int this_seg_lim; 18592545bca0SMatthew Dillon uint32_t tf, cur_off; 18602545bca0SMatthew Dillon bus_addr_t chain_list_addr; 18612545bca0SMatthew Dillon 18622545bca0SMatthew Dillon /* 18632545bca0SMatthew Dillon * Point to the chain descriptor. Note that the chain 18642545bca0SMatthew Dillon * descriptor is at the end of the *previous* list (whether 18652545bca0SMatthew Dillon * chain or simple). 18662545bca0SMatthew Dillon */ 18672545bca0SMatthew Dillon ce = (SGE_CHAIN32 *) se; 18682545bca0SMatthew Dillon 18692545bca0SMatthew Dillon /* 18702545bca0SMatthew Dillon * Before we change our current pointer, make sure we won't 18712545bca0SMatthew Dillon * overflow the request area with this frame. Note that we 18722545bca0SMatthew Dillon * test against 'greater than' here as it's okay in this case 18732545bca0SMatthew Dillon * to have next offset be just outside the request area. 18742545bca0SMatthew Dillon */ 18752545bca0SMatthew Dillon if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 18762545bca0SMatthew Dillon nxt_off = MPT_REQUEST_AREA; 18772545bca0SMatthew Dillon goto next_chain; 18782545bca0SMatthew Dillon } 18792545bca0SMatthew Dillon 18802545bca0SMatthew Dillon /* 18812545bca0SMatthew Dillon * Set our SGE element pointer to the beginning of the chain 18822545bca0SMatthew Dillon * list and update our next chain list offset. 18832545bca0SMatthew Dillon */ 18842545bca0SMatthew Dillon se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 18852545bca0SMatthew Dillon cur_off = nxt_off; 18862545bca0SMatthew Dillon nxt_off += MPT_RQSL(mpt); 18872545bca0SMatthew Dillon 18882545bca0SMatthew Dillon /* 18894c42baf4SSascha Wildner * Now initialize the chain descriptor. 18902545bca0SMatthew Dillon */ 18912545bca0SMatthew Dillon memset(ce, 0, sizeof (*ce)); 18922545bca0SMatthew Dillon 18932545bca0SMatthew Dillon /* 18942545bca0SMatthew Dillon * Get the physical address of the chain list. 18952545bca0SMatthew Dillon */ 18962545bca0SMatthew Dillon chain_list_addr = trq->req_pbuf; 18972545bca0SMatthew Dillon chain_list_addr += cur_off; 18982545bca0SMatthew Dillon 18992545bca0SMatthew Dillon 19002545bca0SMatthew Dillon 19012545bca0SMatthew Dillon ce->Address = htole32(chain_list_addr); 19022545bca0SMatthew Dillon ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 19032545bca0SMatthew Dillon 19042545bca0SMatthew Dillon 19052545bca0SMatthew Dillon /* 19062545bca0SMatthew Dillon * If we have more than a frame's worth of segments left, 19072545bca0SMatthew Dillon * set up the chain list to have the last element be another 19082545bca0SMatthew Dillon * chain descriptor. 19092545bca0SMatthew Dillon */ 19102545bca0SMatthew Dillon if ((nseg - seg) > MPT_NSGL(mpt)) { 19112545bca0SMatthew Dillon this_seg_lim = seg + MPT_NSGL(mpt) - 1; 19122545bca0SMatthew Dillon /* 19132545bca0SMatthew Dillon * The length of the chain is the length in bytes of the 19142545bca0SMatthew Dillon * number of segments plus the next chain element. 19152545bca0SMatthew Dillon * 19162545bca0SMatthew Dillon * The next chain descriptor offset is the length, 19172545bca0SMatthew Dillon * in words, of the number of segments. 19182545bca0SMatthew Dillon */ 19192545bca0SMatthew Dillon ce->Length = (this_seg_lim - seg) * 19202545bca0SMatthew Dillon sizeof (SGE_SIMPLE32); 19212545bca0SMatthew Dillon ce->NextChainOffset = ce->Length >> 2; 19222545bca0SMatthew Dillon ce->Length += sizeof (SGE_CHAIN32); 19232545bca0SMatthew Dillon } else { 19242545bca0SMatthew Dillon this_seg_lim = nseg; 19252545bca0SMatthew Dillon ce->Length = (this_seg_lim - seg) * 19262545bca0SMatthew Dillon sizeof (SGE_SIMPLE32); 19272545bca0SMatthew Dillon } 19282545bca0SMatthew Dillon ce->Length = htole16(ce->Length); 19292545bca0SMatthew Dillon 19302545bca0SMatthew Dillon /* 19312545bca0SMatthew Dillon * Fill in the chain list SGE elements with our segment data. 19322545bca0SMatthew Dillon * 19332545bca0SMatthew Dillon * If we're the last element in this chain list, set the last 19342545bca0SMatthew Dillon * element flag. If we're the completely last element period, 19352545bca0SMatthew Dillon * set the end of list and end of buffer flags. 19362545bca0SMatthew Dillon */ 19372545bca0SMatthew Dillon while (seg < this_seg_lim) { 19382545bca0SMatthew Dillon memset(se, 0, sizeof (*se)); 19392545bca0SMatthew Dillon se->Address = htole32(dm_segs->ds_addr); 19402545bca0SMatthew Dillon 19412545bca0SMatthew Dillon MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 19422545bca0SMatthew Dillon tf = flags; 19432545bca0SMatthew Dillon if (seg == this_seg_lim - 1) { 19442545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 19452545bca0SMatthew Dillon } 19462545bca0SMatthew Dillon if (seg == nseg - 1) { 19472545bca0SMatthew Dillon tf |= MPI_SGE_FLAGS_END_OF_LIST | 19482545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 19492545bca0SMatthew Dillon } 19502545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, tf); 19512545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength); 19522545bca0SMatthew Dillon se++; 19532545bca0SMatthew Dillon seg++; 19542545bca0SMatthew Dillon dm_segs++; 19552545bca0SMatthew Dillon } 19562545bca0SMatthew Dillon 19572545bca0SMatthew Dillon next_chain: 19582545bca0SMatthew Dillon /* 19592545bca0SMatthew Dillon * If we have more segments to do and we've used up all of 19602545bca0SMatthew Dillon * the space in a request area, go allocate another one 19612545bca0SMatthew Dillon * and chain to that. 19622545bca0SMatthew Dillon */ 19632545bca0SMatthew Dillon if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 19642545bca0SMatthew Dillon request_t *nrq; 19652545bca0SMatthew Dillon 19662545bca0SMatthew Dillon nrq = mpt_get_request(mpt, FALSE); 19672545bca0SMatthew Dillon 19682545bca0SMatthew Dillon if (nrq == NULL) { 19692545bca0SMatthew Dillon error = ENOMEM; 19702545bca0SMatthew Dillon goto bad; 19712545bca0SMatthew Dillon } 19722545bca0SMatthew Dillon 19732545bca0SMatthew Dillon /* 19742545bca0SMatthew Dillon * Append the new request area on the tail of our list. 19752545bca0SMatthew Dillon */ 19762545bca0SMatthew Dillon if ((trq = req->chain) == NULL) { 19772545bca0SMatthew Dillon req->chain = nrq; 19782545bca0SMatthew Dillon } else { 19792545bca0SMatthew Dillon while (trq->chain != NULL) { 19802545bca0SMatthew Dillon trq = trq->chain; 19812545bca0SMatthew Dillon } 19822545bca0SMatthew Dillon trq->chain = nrq; 19832545bca0SMatthew Dillon } 19842545bca0SMatthew Dillon trq = nrq; 19852545bca0SMatthew Dillon mpt_off = trq->req_vbuf; 19862545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 19872545bca0SMatthew Dillon memset(mpt_off, 0xff, MPT_REQUEST_AREA); 19882545bca0SMatthew Dillon } 19892545bca0SMatthew Dillon nxt_off = 0; 19902545bca0SMatthew Dillon } 19912545bca0SMatthew Dillon } 19922545bca0SMatthew Dillon out: 19932545bca0SMatthew Dillon 19942545bca0SMatthew Dillon /* 19952545bca0SMatthew Dillon * Last time we need to check if this CCB needs to be aborted. 19962545bca0SMatthew Dillon */ 19972545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 19982545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 19992545bca0SMatthew Dillon request_t *cmd_req = 20002545bca0SMatthew Dillon MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 20012545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 20022545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 20032545bca0SMatthew Dillon MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 20042545bca0SMatthew Dillon } 20052545bca0SMatthew Dillon mpt_prt(mpt, 20062545bca0SMatthew Dillon "mpt_execute_req: I/O cancelled (status 0x%x)\n", 20072545bca0SMatthew Dillon ccb->ccb_h.status & CAM_STATUS_MASK); 20082545bca0SMatthew Dillon if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 20092545bca0SMatthew Dillon bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 20102545bca0SMatthew Dillon } 20112545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 20124c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 20132545bca0SMatthew Dillon xpt_done(ccb); 20142545bca0SMatthew Dillon mpt_free_request(mpt, req); 20152545bca0SMatthew Dillon return; 20162545bca0SMatthew Dillon } 20172545bca0SMatthew Dillon 20182545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_SIM_QUEUED; 20192545bca0SMatthew Dillon if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 20202545bca0SMatthew Dillon mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 20212545bca0SMatthew Dillon mpt_timeout, ccb); 20222545bca0SMatthew Dillon } 20232545bca0SMatthew Dillon if (mpt->verbose > MPT_PRT_DEBUG) { 20242545bca0SMatthew Dillon int nc = 0; 20252545bca0SMatthew Dillon mpt_print_request(req->req_vbuf); 20262545bca0SMatthew Dillon for (trq = req->chain; trq; trq = trq->chain) { 20272545bca0SMatthew Dillon kprintf(" Additional Chain Area %d\n", nc++); 20282545bca0SMatthew Dillon mpt_dump_sgl(trq->req_vbuf, 0); 20292545bca0SMatthew Dillon } 20302545bca0SMatthew Dillon } 20312545bca0SMatthew Dillon 20322545bca0SMatthew Dillon if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 20332545bca0SMatthew Dillon request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 20342545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 20352545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 20362545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 20372545bca0SMatthew Dillon csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 20382545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 20392545bca0SMatthew Dillon } else { 20402545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA; 20412545bca0SMatthew Dillon } 20422545bca0SMatthew Dillon #else 20432545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA; 20442545bca0SMatthew Dillon #endif 20452545bca0SMatthew Dillon } 20462545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 20472545bca0SMatthew Dillon } 20482545bca0SMatthew Dillon 20492545bca0SMatthew Dillon static void 20502545bca0SMatthew Dillon mpt_start(struct cam_sim *sim, union ccb *ccb) 20512545bca0SMatthew Dillon { 20522545bca0SMatthew Dillon request_t *req; 20532545bca0SMatthew Dillon struct mpt_softc *mpt; 20542545bca0SMatthew Dillon MSG_SCSI_IO_REQUEST *mpt_req; 20552545bca0SMatthew Dillon struct ccb_scsiio *csio = &ccb->csio; 20562545bca0SMatthew Dillon struct ccb_hdr *ccbh = &ccb->ccb_h; 20572545bca0SMatthew Dillon bus_dmamap_callback_t *cb; 20582545bca0SMatthew Dillon target_id_t tgt; 20592545bca0SMatthew Dillon int raid_passthru; 20602545bca0SMatthew Dillon 20612545bca0SMatthew Dillon /* Get the pointer for the physical addapter */ 20622545bca0SMatthew Dillon mpt = ccb->ccb_h.ccb_mpt_ptr; 20632545bca0SMatthew Dillon raid_passthru = (sim == mpt->phydisk_sim); 20642545bca0SMatthew Dillon 20652545bca0SMatthew Dillon if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 20662545bca0SMatthew Dillon if (mpt->outofbeer == 0) { 20672545bca0SMatthew Dillon mpt->outofbeer = 1; 20682545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 20692545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 20702545bca0SMatthew Dillon } 20712545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 20722545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 20732545bca0SMatthew Dillon xpt_done(ccb); 20742545bca0SMatthew Dillon return; 20752545bca0SMatthew Dillon } 20762545bca0SMatthew Dillon #ifdef INVARIANTS 20772545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 20782545bca0SMatthew Dillon #endif 20792545bca0SMatthew Dillon 20802545bca0SMatthew Dillon if (sizeof (bus_addr_t) > 4) { 20812545bca0SMatthew Dillon cb = mpt_execute_req_a64; 20822545bca0SMatthew Dillon } else { 20832545bca0SMatthew Dillon cb = mpt_execute_req; 20842545bca0SMatthew Dillon } 20852545bca0SMatthew Dillon 20862545bca0SMatthew Dillon /* 20872545bca0SMatthew Dillon * Link the ccb and the request structure so we can find 20882545bca0SMatthew Dillon * the other knowing either the request or the ccb 20892545bca0SMatthew Dillon */ 20902545bca0SMatthew Dillon req->ccb = ccb; 20912545bca0SMatthew Dillon ccb->ccb_h.ccb_req_ptr = req; 20922545bca0SMatthew Dillon 20932545bca0SMatthew Dillon /* Now we build the command for the IOC */ 20942545bca0SMatthew Dillon mpt_req = req->req_vbuf; 20952545bca0SMatthew Dillon memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 20962545bca0SMatthew Dillon 20972545bca0SMatthew Dillon mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 20982545bca0SMatthew Dillon if (raid_passthru) { 20992545bca0SMatthew Dillon mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 21002545bca0SMatthew Dillon if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 21012545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 21022545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 21032545bca0SMatthew Dillon xpt_done(ccb); 21042545bca0SMatthew Dillon return; 21052545bca0SMatthew Dillon } 21062545bca0SMatthew Dillon mpt_req->Bus = 0; /* we never set bus here */ 21072545bca0SMatthew Dillon } else { 21082545bca0SMatthew Dillon tgt = ccb->ccb_h.target_id; 21092545bca0SMatthew Dillon mpt_req->Bus = 0; /* XXX */ 21102545bca0SMatthew Dillon 21112545bca0SMatthew Dillon } 21122545bca0SMatthew Dillon mpt_req->SenseBufferLength = 21132545bca0SMatthew Dillon (csio->sense_len < MPT_SENSE_SIZE) ? 21142545bca0SMatthew Dillon csio->sense_len : MPT_SENSE_SIZE; 21152545bca0SMatthew Dillon 21162545bca0SMatthew Dillon /* 21172545bca0SMatthew Dillon * We use the message context to find the request structure when we 21182545bca0SMatthew Dillon * Get the command completion interrupt from the IOC. 21192545bca0SMatthew Dillon */ 21202545bca0SMatthew Dillon mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 21212545bca0SMatthew Dillon 21222545bca0SMatthew Dillon /* Which physical device to do the I/O on */ 21232545bca0SMatthew Dillon mpt_req->TargetID = tgt; 21242545bca0SMatthew Dillon 21252545bca0SMatthew Dillon /* We assume a single level LUN type */ 21262545bca0SMatthew Dillon if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 21272545bca0SMatthew Dillon mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 21282545bca0SMatthew Dillon mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 21292545bca0SMatthew Dillon } else { 21302545bca0SMatthew Dillon mpt_req->LUN[1] = ccb->ccb_h.target_lun; 21312545bca0SMatthew Dillon } 21322545bca0SMatthew Dillon 21332545bca0SMatthew Dillon /* Set the direction of the transfer */ 21342545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 21352545bca0SMatthew Dillon mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 21362545bca0SMatthew Dillon } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 21372545bca0SMatthew Dillon mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 21382545bca0SMatthew Dillon } else { 21392545bca0SMatthew Dillon mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 21402545bca0SMatthew Dillon } 21412545bca0SMatthew Dillon 21422545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 21432545bca0SMatthew Dillon switch(ccb->csio.tag_action) { 21442545bca0SMatthew Dillon case MSG_HEAD_OF_Q_TAG: 21452545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 21462545bca0SMatthew Dillon break; 21472545bca0SMatthew Dillon case MSG_ACA_TASK: 21482545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 21492545bca0SMatthew Dillon break; 21502545bca0SMatthew Dillon case MSG_ORDERED_Q_TAG: 21512545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 21522545bca0SMatthew Dillon break; 21532545bca0SMatthew Dillon case MSG_SIMPLE_Q_TAG: 21542545bca0SMatthew Dillon default: 21552545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 21562545bca0SMatthew Dillon break; 21572545bca0SMatthew Dillon } 21582545bca0SMatthew Dillon } else { 21592545bca0SMatthew Dillon if (mpt->is_fc || mpt->is_sas) { 21602545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 21612545bca0SMatthew Dillon } else { 21622545bca0SMatthew Dillon /* XXX No such thing for a target doing packetized. */ 21632545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 21642545bca0SMatthew Dillon } 21652545bca0SMatthew Dillon } 21662545bca0SMatthew Dillon 21672545bca0SMatthew Dillon if (mpt->is_spi) { 21682545bca0SMatthew Dillon if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 21692545bca0SMatthew Dillon mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 21702545bca0SMatthew Dillon } 21712545bca0SMatthew Dillon } 21722545bca0SMatthew Dillon mpt_req->Control = htole32(mpt_req->Control); 21732545bca0SMatthew Dillon 21742545bca0SMatthew Dillon /* Copy the scsi command block into place */ 21752545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 21762545bca0SMatthew Dillon bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 21772545bca0SMatthew Dillon } else { 21782545bca0SMatthew Dillon bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 21792545bca0SMatthew Dillon } 21802545bca0SMatthew Dillon 21812545bca0SMatthew Dillon mpt_req->CDBLength = csio->cdb_len; 21822545bca0SMatthew Dillon mpt_req->DataLength = htole32(csio->dxfer_len); 21832545bca0SMatthew Dillon mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 21842545bca0SMatthew Dillon 21852545bca0SMatthew Dillon /* 21862545bca0SMatthew Dillon * Do a *short* print here if we're set to MPT_PRT_DEBUG 21872545bca0SMatthew Dillon */ 21882545bca0SMatthew Dillon if (mpt->verbose == MPT_PRT_DEBUG) { 21892545bca0SMatthew Dillon U32 df; 21902545bca0SMatthew Dillon mpt_prt(mpt, "mpt_start: %s op 0x%x ", 21912545bca0SMatthew Dillon (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 21922545bca0SMatthew Dillon "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 21932545bca0SMatthew Dillon df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 21942545bca0SMatthew Dillon if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 21952545bca0SMatthew Dillon mpt_prtc(mpt, "(%s %u byte%s ", 21962545bca0SMatthew Dillon (df == MPI_SCSIIO_CONTROL_READ)? 21972545bca0SMatthew Dillon "read" : "write", csio->dxfer_len, 21982545bca0SMatthew Dillon (csio->dxfer_len == 1)? ")" : "s)"); 21992545bca0SMatthew Dillon } 22002545bca0SMatthew Dillon mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 22012545bca0SMatthew Dillon ccb->ccb_h.target_lun, req, req->serno); 22022545bca0SMatthew Dillon } 22032545bca0SMatthew Dillon 22042545bca0SMatthew Dillon /* 22052545bca0SMatthew Dillon * If we have any data to send with this command map it into bus space. 22062545bca0SMatthew Dillon */ 22072545bca0SMatthew Dillon if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 22082545bca0SMatthew Dillon if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 22092545bca0SMatthew Dillon /* 22102545bca0SMatthew Dillon * We've been given a pointer to a single buffer. 22112545bca0SMatthew Dillon */ 22122545bca0SMatthew Dillon if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 22132545bca0SMatthew Dillon /* 22142545bca0SMatthew Dillon * Virtual address that needs to translated into 22152545bca0SMatthew Dillon * one or more physical address ranges. 22162545bca0SMatthew Dillon */ 22172545bca0SMatthew Dillon int error; 22186d259fc1SSascha Wildner crit_enter(); 22192545bca0SMatthew Dillon error = bus_dmamap_load(mpt->buffer_dmat, 22202545bca0SMatthew Dillon req->dmap, csio->data_ptr, csio->dxfer_len, 22212545bca0SMatthew Dillon cb, req, 0); 22226d259fc1SSascha Wildner crit_exit(); 22232545bca0SMatthew Dillon if (error == EINPROGRESS) { 22242545bca0SMatthew Dillon /* 22252545bca0SMatthew Dillon * So as to maintain ordering, 22262545bca0SMatthew Dillon * freeze the controller queue 22272545bca0SMatthew Dillon * until our mapping is 22282545bca0SMatthew Dillon * returned. 22292545bca0SMatthew Dillon */ 22302545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 22312545bca0SMatthew Dillon ccbh->status |= CAM_RELEASE_SIMQ; 22322545bca0SMatthew Dillon } 22332545bca0SMatthew Dillon } else { 22342545bca0SMatthew Dillon /* 22352545bca0SMatthew Dillon * We have been given a pointer to single 22362545bca0SMatthew Dillon * physical buffer. 22372545bca0SMatthew Dillon */ 22382545bca0SMatthew Dillon struct bus_dma_segment seg; 22392545bca0SMatthew Dillon seg.ds_addr = 22402545bca0SMatthew Dillon (bus_addr_t)(vm_offset_t)csio->data_ptr; 22412545bca0SMatthew Dillon seg.ds_len = csio->dxfer_len; 22422545bca0SMatthew Dillon (*cb)(req, &seg, 1, 0); 22432545bca0SMatthew Dillon } 22442545bca0SMatthew Dillon } else { 22452545bca0SMatthew Dillon /* 22462545bca0SMatthew Dillon * We have been given a list of addresses. 22472545bca0SMatthew Dillon * This case could be easily supported but they are not 22482545bca0SMatthew Dillon * currently generated by the CAM subsystem so there 22492545bca0SMatthew Dillon * is no point in wasting the time right now. 22502545bca0SMatthew Dillon */ 22512545bca0SMatthew Dillon struct bus_dma_segment *segs; 22522545bca0SMatthew Dillon if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 22532545bca0SMatthew Dillon (*cb)(req, NULL, 0, EFAULT); 22542545bca0SMatthew Dillon } else { 22552545bca0SMatthew Dillon /* Just use the segments provided */ 22562545bca0SMatthew Dillon segs = (struct bus_dma_segment *)csio->data_ptr; 22572545bca0SMatthew Dillon (*cb)(req, segs, csio->sglist_cnt, 0); 22582545bca0SMatthew Dillon } 22592545bca0SMatthew Dillon } 22602545bca0SMatthew Dillon } else { 22612545bca0SMatthew Dillon (*cb)(req, NULL, 0, 0); 22622545bca0SMatthew Dillon } 22632545bca0SMatthew Dillon } 22642545bca0SMatthew Dillon 22652545bca0SMatthew Dillon static int 22662545bca0SMatthew Dillon mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 22672545bca0SMatthew Dillon int sleep_ok) 22682545bca0SMatthew Dillon { 22692545bca0SMatthew Dillon int error; 22702545bca0SMatthew Dillon uint16_t status; 22712545bca0SMatthew Dillon uint8_t response; 22722545bca0SMatthew Dillon 22732545bca0SMatthew Dillon error = mpt_scsi_send_tmf(mpt, 22742545bca0SMatthew Dillon (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 22752545bca0SMatthew Dillon MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 22762545bca0SMatthew Dillon MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 22772545bca0SMatthew Dillon mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 22782545bca0SMatthew Dillon 0, /* XXX How do I get the channel ID? */ 22792545bca0SMatthew Dillon tgt != CAM_TARGET_WILDCARD ? tgt : 0, 22802545bca0SMatthew Dillon lun != CAM_LUN_WILDCARD ? lun : 0, 22812545bca0SMatthew Dillon 0, sleep_ok); 22822545bca0SMatthew Dillon 22832545bca0SMatthew Dillon if (error != 0) { 22842545bca0SMatthew Dillon /* 22852545bca0SMatthew Dillon * mpt_scsi_send_tmf hard resets on failure, so no 22862545bca0SMatthew Dillon * need to do so here. 22872545bca0SMatthew Dillon */ 22882545bca0SMatthew Dillon mpt_prt(mpt, 22892545bca0SMatthew Dillon "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 22902545bca0SMatthew Dillon return (EIO); 22912545bca0SMatthew Dillon } 22922545bca0SMatthew Dillon 22932545bca0SMatthew Dillon /* Wait for bus reset to be processed by the IOC. */ 22942545bca0SMatthew Dillon error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 22952545bca0SMatthew Dillon REQ_STATE_DONE, sleep_ok, 5000); 22962545bca0SMatthew Dillon 22972545bca0SMatthew Dillon status = le16toh(mpt->tmf_req->IOCStatus); 22982545bca0SMatthew Dillon response = mpt->tmf_req->ResponseCode; 22992545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_FREE; 23002545bca0SMatthew Dillon 23012545bca0SMatthew Dillon if (error) { 23022545bca0SMatthew Dillon mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 23032545bca0SMatthew Dillon "Resetting controller.\n"); 23042545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 23052545bca0SMatthew Dillon return (ETIMEDOUT); 23062545bca0SMatthew Dillon } 23072545bca0SMatthew Dillon 23082545bca0SMatthew Dillon if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 23092545bca0SMatthew Dillon mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 23102545bca0SMatthew Dillon "Resetting controller.\n", status); 23112545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 23122545bca0SMatthew Dillon return (EIO); 23132545bca0SMatthew Dillon } 23142545bca0SMatthew Dillon 23152545bca0SMatthew Dillon if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 23162545bca0SMatthew Dillon response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 23172545bca0SMatthew Dillon mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 23182545bca0SMatthew Dillon "Resetting controller.\n", response); 23192545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 23202545bca0SMatthew Dillon return (EIO); 23212545bca0SMatthew Dillon } 23222545bca0SMatthew Dillon return (0); 23232545bca0SMatthew Dillon } 23242545bca0SMatthew Dillon 23252545bca0SMatthew Dillon static int 23262545bca0SMatthew Dillon mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 23272545bca0SMatthew Dillon { 23282545bca0SMatthew Dillon int r = 0; 23292545bca0SMatthew Dillon request_t *req; 23302545bca0SMatthew Dillon PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 23312545bca0SMatthew Dillon 23322545bca0SMatthew Dillon req = mpt_get_request(mpt, FALSE); 23332545bca0SMatthew Dillon if (req == NULL) { 23342545bca0SMatthew Dillon return (ENOMEM); 23352545bca0SMatthew Dillon } 23362545bca0SMatthew Dillon fc = req->req_vbuf; 23372545bca0SMatthew Dillon memset(fc, 0, sizeof(*fc)); 23382545bca0SMatthew Dillon fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 23392545bca0SMatthew Dillon fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 23402545bca0SMatthew Dillon fc->MsgContext = htole32(req->index | fc_els_handler_id); 23412545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 23422545bca0SMatthew Dillon if (dowait) { 23432545bca0SMatthew Dillon r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 23442545bca0SMatthew Dillon REQ_STATE_DONE, FALSE, 60 * 1000); 23452545bca0SMatthew Dillon if (r == 0) { 23462545bca0SMatthew Dillon mpt_free_request(mpt, req); 23472545bca0SMatthew Dillon } 23482545bca0SMatthew Dillon } 23492545bca0SMatthew Dillon return (r); 23502545bca0SMatthew Dillon } 23512545bca0SMatthew Dillon 23526d259fc1SSascha Wildner static void 23536d259fc1SSascha Wildner mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) 23546d259fc1SSascha Wildner { 23556d259fc1SSascha Wildner xpt_free_path(ccb->ccb_h.path); 23566d259fc1SSascha Wildner kfree(ccb, M_TEMP); 23576d259fc1SSascha Wildner } 23586d259fc1SSascha Wildner 23592545bca0SMatthew Dillon static int 23602545bca0SMatthew Dillon mpt_cam_event(struct mpt_softc *mpt, request_t *req, 23612545bca0SMatthew Dillon MSG_EVENT_NOTIFY_REPLY *msg) 23622545bca0SMatthew Dillon { 23632545bca0SMatthew Dillon uint32_t data0, data1; 23642545bca0SMatthew Dillon 23652545bca0SMatthew Dillon data0 = le32toh(msg->Data[0]); 23662545bca0SMatthew Dillon data1 = le32toh(msg->Data[1]); 23672545bca0SMatthew Dillon switch(msg->Event & 0xFF) { 23682545bca0SMatthew Dillon case MPI_EVENT_UNIT_ATTENTION: 23692545bca0SMatthew Dillon mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 23702545bca0SMatthew Dillon (data0 >> 8) & 0xff, data0 & 0xff); 23712545bca0SMatthew Dillon break; 23722545bca0SMatthew Dillon 23732545bca0SMatthew Dillon case MPI_EVENT_IOC_BUS_RESET: 23742545bca0SMatthew Dillon /* We generated a bus reset */ 23752545bca0SMatthew Dillon mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 23762545bca0SMatthew Dillon (data0 >> 8) & 0xff); 23772545bca0SMatthew Dillon xpt_async(AC_BUS_RESET, mpt->path, NULL); 23782545bca0SMatthew Dillon break; 23792545bca0SMatthew Dillon 23802545bca0SMatthew Dillon case MPI_EVENT_EXT_BUS_RESET: 23812545bca0SMatthew Dillon /* Someone else generated a bus reset */ 23822545bca0SMatthew Dillon mpt_prt(mpt, "External Bus Reset Detected\n"); 23832545bca0SMatthew Dillon /* 23842545bca0SMatthew Dillon * These replies don't return EventData like the MPI 23852545bca0SMatthew Dillon * spec says they do 23862545bca0SMatthew Dillon */ 23872545bca0SMatthew Dillon xpt_async(AC_BUS_RESET, mpt->path, NULL); 23882545bca0SMatthew Dillon break; 23892545bca0SMatthew Dillon 23902545bca0SMatthew Dillon case MPI_EVENT_RESCAN: 23912545bca0SMatthew Dillon { 23922545bca0SMatthew Dillon union ccb *ccb; 23932545bca0SMatthew Dillon uint32_t pathid; 23942545bca0SMatthew Dillon /* 23952545bca0SMatthew Dillon * In general this means a device has been added to the loop. 23962545bca0SMatthew Dillon */ 23972545bca0SMatthew Dillon mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 23982545bca0SMatthew Dillon if (mpt->ready == 0) { 23992545bca0SMatthew Dillon break; 24002545bca0SMatthew Dillon } 24012545bca0SMatthew Dillon if (mpt->phydisk_sim) { 24022545bca0SMatthew Dillon pathid = cam_sim_path(mpt->phydisk_sim); 24032545bca0SMatthew Dillon } else { 24042545bca0SMatthew Dillon pathid = cam_sim_path(mpt->sim); 24052545bca0SMatthew Dillon } 24062545bca0SMatthew Dillon /* 24072545bca0SMatthew Dillon * Allocate a CCB, create a wildcard path for this bus, 24082545bca0SMatthew Dillon * and schedule a rescan. 24092545bca0SMatthew Dillon */ 24106d259fc1SSascha Wildner ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO); 24112545bca0SMatthew Dillon 24122545bca0SMatthew Dillon if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 24132545bca0SMatthew Dillon CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 24142545bca0SMatthew Dillon mpt_prt(mpt, "unable to create path for rescan\n"); 24156d259fc1SSascha Wildner kfree(ccb, M_TEMP); 24162545bca0SMatthew Dillon break; 24172545bca0SMatthew Dillon } 24186d259fc1SSascha Wildner 24196d259fc1SSascha Wildner xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); 24206d259fc1SSascha Wildner ccb->ccb_h.func_code = XPT_SCAN_BUS; 24216d259fc1SSascha Wildner ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; 24226d259fc1SSascha Wildner ccb->crcn.flags = CAM_FLAG_NONE; 24236d259fc1SSascha Wildner xpt_action(ccb); 24246d259fc1SSascha Wildner 24256d259fc1SSascha Wildner /* scan is now in progress */ 24266d259fc1SSascha Wildner 24272545bca0SMatthew Dillon break; 24282545bca0SMatthew Dillon } 24292545bca0SMatthew Dillon case MPI_EVENT_LINK_STATUS_CHANGE: 24302545bca0SMatthew Dillon mpt_prt(mpt, "Port %d: LinkState: %s\n", 24312545bca0SMatthew Dillon (data1 >> 8) & 0xff, 24322545bca0SMatthew Dillon ((data0 & 0xff) == 0)? "Failed" : "Active"); 24332545bca0SMatthew Dillon break; 24342545bca0SMatthew Dillon 24352545bca0SMatthew Dillon case MPI_EVENT_LOOP_STATE_CHANGE: 24362545bca0SMatthew Dillon switch ((data0 >> 16) & 0xff) { 24372545bca0SMatthew Dillon case 0x01: 24382545bca0SMatthew Dillon mpt_prt(mpt, 24392545bca0SMatthew Dillon "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 24402545bca0SMatthew Dillon "(Loop Initialization)\n", 24412545bca0SMatthew Dillon (data1 >> 8) & 0xff, 24422545bca0SMatthew Dillon (data0 >> 8) & 0xff, 24432545bca0SMatthew Dillon (data0 ) & 0xff); 24442545bca0SMatthew Dillon switch ((data0 >> 8) & 0xff) { 24452545bca0SMatthew Dillon case 0xF7: 24462545bca0SMatthew Dillon if ((data0 & 0xff) == 0xF7) { 24472545bca0SMatthew Dillon mpt_prt(mpt, "Device needs AL_PA\n"); 24482545bca0SMatthew Dillon } else { 24492545bca0SMatthew Dillon mpt_prt(mpt, "Device %02x doesn't like " 24502545bca0SMatthew Dillon "FC performance\n", 24512545bca0SMatthew Dillon data0 & 0xFF); 24522545bca0SMatthew Dillon } 24532545bca0SMatthew Dillon break; 24542545bca0SMatthew Dillon case 0xF8: 24552545bca0SMatthew Dillon if ((data0 & 0xff) == 0xF7) { 24562545bca0SMatthew Dillon mpt_prt(mpt, "Device had loop failure " 24572545bca0SMatthew Dillon "at its receiver prior to acquiring" 24582545bca0SMatthew Dillon " AL_PA\n"); 24592545bca0SMatthew Dillon } else { 24602545bca0SMatthew Dillon mpt_prt(mpt, "Device %02x detected loop" 24612545bca0SMatthew Dillon " failure at its receiver\n", 24622545bca0SMatthew Dillon data0 & 0xFF); 24632545bca0SMatthew Dillon } 24642545bca0SMatthew Dillon break; 24652545bca0SMatthew Dillon default: 24662545bca0SMatthew Dillon mpt_prt(mpt, "Device %02x requests that device " 24672545bca0SMatthew Dillon "%02x reset itself\n", 24682545bca0SMatthew Dillon data0 & 0xFF, 24692545bca0SMatthew Dillon (data0 >> 8) & 0xFF); 24702545bca0SMatthew Dillon break; 24712545bca0SMatthew Dillon } 24722545bca0SMatthew Dillon break; 24732545bca0SMatthew Dillon case 0x02: 24742545bca0SMatthew Dillon mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 24752545bca0SMatthew Dillon "LPE(%02x,%02x) (Loop Port Enable)\n", 24762545bca0SMatthew Dillon (data1 >> 8) & 0xff, /* Port */ 24772545bca0SMatthew Dillon (data0 >> 8) & 0xff, /* Character 3 */ 24782545bca0SMatthew Dillon (data0 ) & 0xff /* Character 4 */); 24792545bca0SMatthew Dillon break; 24802545bca0SMatthew Dillon case 0x03: 24812545bca0SMatthew Dillon mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 24822545bca0SMatthew Dillon "LPB(%02x,%02x) (Loop Port Bypass)\n", 24832545bca0SMatthew Dillon (data1 >> 8) & 0xff, /* Port */ 24842545bca0SMatthew Dillon (data0 >> 8) & 0xff, /* Character 3 */ 24852545bca0SMatthew Dillon (data0 ) & 0xff /* Character 4 */); 24862545bca0SMatthew Dillon break; 24872545bca0SMatthew Dillon default: 24882545bca0SMatthew Dillon mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 24892545bca0SMatthew Dillon "FC event (%02x %02x %02x)\n", 24902545bca0SMatthew Dillon (data1 >> 8) & 0xff, /* Port */ 24912545bca0SMatthew Dillon (data0 >> 16) & 0xff, /* Event */ 24922545bca0SMatthew Dillon (data0 >> 8) & 0xff, /* Character 3 */ 24932545bca0SMatthew Dillon (data0 ) & 0xff /* Character 4 */); 24942545bca0SMatthew Dillon } 24952545bca0SMatthew Dillon break; 24962545bca0SMatthew Dillon 24972545bca0SMatthew Dillon case MPI_EVENT_LOGOUT: 24982545bca0SMatthew Dillon mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 24992545bca0SMatthew Dillon (data1 >> 8) & 0xff, data0); 25002545bca0SMatthew Dillon break; 25012545bca0SMatthew Dillon case MPI_EVENT_QUEUE_FULL: 25022545bca0SMatthew Dillon { 25032545bca0SMatthew Dillon struct cam_sim *sim; 25042545bca0SMatthew Dillon struct cam_path *tmppath; 25052545bca0SMatthew Dillon struct ccb_relsim crs; 25062545bca0SMatthew Dillon PTR_EVENT_DATA_QUEUE_FULL pqf; 25072545bca0SMatthew Dillon lun_id_t lun_id; 25082545bca0SMatthew Dillon 25092545bca0SMatthew Dillon pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 25102545bca0SMatthew Dillon pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 25112545bca0SMatthew Dillon mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 25122545bca0SMatthew Dillon "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 25134c42baf4SSascha Wildner if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 25144c42baf4SSascha Wildner pqf->TargetID) != 0) { 25152545bca0SMatthew Dillon sim = mpt->phydisk_sim; 25162545bca0SMatthew Dillon } else { 25172545bca0SMatthew Dillon sim = mpt->sim; 25182545bca0SMatthew Dillon } 25192545bca0SMatthew Dillon for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 25202545bca0SMatthew Dillon if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 25212545bca0SMatthew Dillon pqf->TargetID, lun_id) != CAM_REQ_CMP) { 25222545bca0SMatthew Dillon mpt_prt(mpt, "unable to create a path to send " 25232545bca0SMatthew Dillon "XPT_REL_SIMQ"); 25242545bca0SMatthew Dillon break; 25252545bca0SMatthew Dillon } 25262545bca0SMatthew Dillon xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 25272545bca0SMatthew Dillon crs.ccb_h.func_code = XPT_REL_SIMQ; 25286d259fc1SSascha Wildner crs.ccb_h.flags = CAM_DEV_QFREEZE; 25292545bca0SMatthew Dillon crs.release_flags = RELSIM_ADJUST_OPENINGS; 25302545bca0SMatthew Dillon crs.openings = pqf->CurrentDepth - 1; 25312545bca0SMatthew Dillon xpt_action((union ccb *)&crs); 25322545bca0SMatthew Dillon if (crs.ccb_h.status != CAM_REQ_CMP) { 25332545bca0SMatthew Dillon mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 25342545bca0SMatthew Dillon } 25352545bca0SMatthew Dillon xpt_free_path(tmppath); 25362545bca0SMatthew Dillon } 25372545bca0SMatthew Dillon break; 25382545bca0SMatthew Dillon } 25396d259fc1SSascha Wildner case MPI_EVENT_IR_RESYNC_UPDATE: 25406d259fc1SSascha Wildner mpt_prt(mpt, "IR resync update %d completed\n", 25416d259fc1SSascha Wildner (data0 >> 16) & 0xff); 25426d259fc1SSascha Wildner break; 25434c42baf4SSascha Wildner case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 25444c42baf4SSascha Wildner { 25454c42baf4SSascha Wildner union ccb *ccb; 25464c42baf4SSascha Wildner struct cam_sim *sim; 25474c42baf4SSascha Wildner struct cam_path *tmppath; 25484c42baf4SSascha Wildner PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 25494c42baf4SSascha Wildner 25504c42baf4SSascha Wildner psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 25514c42baf4SSascha Wildner if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 25524c42baf4SSascha Wildner psdsc->TargetID) != 0) 25534c42baf4SSascha Wildner sim = mpt->phydisk_sim; 25544c42baf4SSascha Wildner else 25554c42baf4SSascha Wildner sim = mpt->sim; 25564c42baf4SSascha Wildner switch(psdsc->ReasonCode) { 25574c42baf4SSascha Wildner case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 25584c42baf4SSascha Wildner ccb = kmalloc(sizeof(union ccb), M_TEMP, 25594c42baf4SSascha Wildner M_WAITOK | M_ZERO); 25604c42baf4SSascha Wildner if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 25614c42baf4SSascha Wildner cam_sim_path(sim), psdsc->TargetID, 25624c42baf4SSascha Wildner CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 25634c42baf4SSascha Wildner mpt_prt(mpt, 25644c42baf4SSascha Wildner "unable to create path for rescan\n"); 25654c42baf4SSascha Wildner kfree(ccb, M_TEMP); 25664c42baf4SSascha Wildner break; 25674c42baf4SSascha Wildner } 25684c42baf4SSascha Wildner xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 25694c42baf4SSascha Wildner 5/*priority (low)*/); 25704c42baf4SSascha Wildner ccb->ccb_h.func_code = XPT_SCAN_BUS; 25714c42baf4SSascha Wildner ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; 25724c42baf4SSascha Wildner ccb->crcn.flags = CAM_FLAG_NONE; 25734c42baf4SSascha Wildner xpt_action(ccb); 25744c42baf4SSascha Wildner break; 25754c42baf4SSascha Wildner case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 25764c42baf4SSascha Wildner if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 25774c42baf4SSascha Wildner psdsc->TargetID, CAM_LUN_WILDCARD) != 25784c42baf4SSascha Wildner CAM_REQ_CMP) { 25794c42baf4SSascha Wildner mpt_prt(mpt, 25804c42baf4SSascha Wildner "unable to create path for async event"); 25814c42baf4SSascha Wildner break; 25824c42baf4SSascha Wildner } 25834c42baf4SSascha Wildner xpt_async(AC_LOST_DEVICE, tmppath, NULL); 25844c42baf4SSascha Wildner xpt_free_path(tmppath); 25854c42baf4SSascha Wildner break; 25864c42baf4SSascha Wildner case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 25874c42baf4SSascha Wildner case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 25884c42baf4SSascha Wildner case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 25894c42baf4SSascha Wildner break; 25904c42baf4SSascha Wildner default: 25914c42baf4SSascha Wildner mpt_lprt(mpt, MPT_PRT_WARN, 25924c42baf4SSascha Wildner "SAS device status change: Bus: 0x%02x TargetID: " 25934c42baf4SSascha Wildner "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 25944c42baf4SSascha Wildner psdsc->TargetID, psdsc->ReasonCode); 25954c42baf4SSascha Wildner break; 25964c42baf4SSascha Wildner } 25974c42baf4SSascha Wildner break; 25984c42baf4SSascha Wildner } 25994c42baf4SSascha Wildner case MPI_EVENT_SAS_DISCOVERY_ERROR: 26004c42baf4SSascha Wildner { 26014c42baf4SSascha Wildner PTR_EVENT_DATA_DISCOVERY_ERROR pde; 26024c42baf4SSascha Wildner 26034c42baf4SSascha Wildner pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 26044c42baf4SSascha Wildner pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 26054c42baf4SSascha Wildner mpt_lprt(mpt, MPT_PRT_WARN, 26064c42baf4SSascha Wildner "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 26074c42baf4SSascha Wildner pde->Port, pde->DiscoveryStatus); 26084c42baf4SSascha Wildner break; 26094c42baf4SSascha Wildner } 26102545bca0SMatthew Dillon case MPI_EVENT_EVENT_CHANGE: 26112545bca0SMatthew Dillon case MPI_EVENT_INTEGRATED_RAID: 26124c42baf4SSascha Wildner case MPI_EVENT_IR2: 26134c42baf4SSascha Wildner case MPI_EVENT_LOG_ENTRY_ADDED: 26144c42baf4SSascha Wildner case MPI_EVENT_SAS_DISCOVERY: 26154c42baf4SSascha Wildner case MPI_EVENT_SAS_PHY_LINK_STATUS: 26162545bca0SMatthew Dillon case MPI_EVENT_SAS_SES: 26172545bca0SMatthew Dillon break; 26182545bca0SMatthew Dillon default: 26192545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 26206d259fc1SSascha Wildner msg->Event & 0xFF); 26212545bca0SMatthew Dillon return (0); 26222545bca0SMatthew Dillon } 26232545bca0SMatthew Dillon return (1); 26242545bca0SMatthew Dillon } 26252545bca0SMatthew Dillon 26262545bca0SMatthew Dillon /* 26272545bca0SMatthew Dillon * Reply path for all SCSI I/O requests, called from our 26282545bca0SMatthew Dillon * interrupt handler by extracting our handler index from 26292545bca0SMatthew Dillon * the MsgContext field of the reply from the IOC. 26302545bca0SMatthew Dillon * 26312545bca0SMatthew Dillon * This routine is optimized for the common case of a 26322545bca0SMatthew Dillon * completion without error. All exception handling is 26332545bca0SMatthew Dillon * offloaded to non-inlined helper routines to minimize 26342545bca0SMatthew Dillon * cache footprint. 26352545bca0SMatthew Dillon */ 26362545bca0SMatthew Dillon static int 26372545bca0SMatthew Dillon mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 26382545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 26392545bca0SMatthew Dillon { 26402545bca0SMatthew Dillon MSG_SCSI_IO_REQUEST *scsi_req; 26412545bca0SMatthew Dillon union ccb *ccb; 26422545bca0SMatthew Dillon 26432545bca0SMatthew Dillon if (req->state == REQ_STATE_FREE) { 26442545bca0SMatthew Dillon mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 26452545bca0SMatthew Dillon return (TRUE); 26462545bca0SMatthew Dillon } 26472545bca0SMatthew Dillon 26482545bca0SMatthew Dillon scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 26492545bca0SMatthew Dillon ccb = req->ccb; 26502545bca0SMatthew Dillon if (ccb == NULL) { 26512545bca0SMatthew Dillon mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 26522545bca0SMatthew Dillon req, req->serno); 26532545bca0SMatthew Dillon return (TRUE); 26542545bca0SMatthew Dillon } 26552545bca0SMatthew Dillon 26562545bca0SMatthew Dillon mpt_req_untimeout(req, mpt_timeout, ccb); 26572545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 26582545bca0SMatthew Dillon 26592545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 26602545bca0SMatthew Dillon bus_dmasync_op_t op; 26612545bca0SMatthew Dillon 26622545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 26632545bca0SMatthew Dillon op = BUS_DMASYNC_POSTREAD; 26642545bca0SMatthew Dillon else 26652545bca0SMatthew Dillon op = BUS_DMASYNC_POSTWRITE; 26662545bca0SMatthew Dillon bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 26672545bca0SMatthew Dillon bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 26682545bca0SMatthew Dillon } 26692545bca0SMatthew Dillon 26702545bca0SMatthew Dillon if (reply_frame == NULL) { 26712545bca0SMatthew Dillon /* 26722545bca0SMatthew Dillon * Context only reply, completion without error status. 26732545bca0SMatthew Dillon */ 26742545bca0SMatthew Dillon ccb->csio.resid = 0; 26752545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 26762545bca0SMatthew Dillon ccb->csio.scsi_status = SCSI_STATUS_OK; 26772545bca0SMatthew Dillon } else { 26782545bca0SMatthew Dillon mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 26792545bca0SMatthew Dillon } 26802545bca0SMatthew Dillon 26812545bca0SMatthew Dillon if (mpt->outofbeer) { 26822545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 26832545bca0SMatthew Dillon mpt->outofbeer = 0; 26842545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 26852545bca0SMatthew Dillon } 26862545bca0SMatthew Dillon if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 26872545bca0SMatthew Dillon struct scsi_inquiry_data *iq = 26882545bca0SMatthew Dillon (struct scsi_inquiry_data *)ccb->csio.data_ptr; 26892545bca0SMatthew Dillon if (scsi_req->Function == 26902545bca0SMatthew Dillon MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 26912545bca0SMatthew Dillon /* 26922545bca0SMatthew Dillon * Fake out the device type so that only the 26932545bca0SMatthew Dillon * pass-thru device will attach. 26942545bca0SMatthew Dillon */ 26952545bca0SMatthew Dillon iq->device &= ~0x1F; 26962545bca0SMatthew Dillon iq->device |= T_NODEVICE; 26972545bca0SMatthew Dillon } 26982545bca0SMatthew Dillon } 26992545bca0SMatthew Dillon if (mpt->verbose == MPT_PRT_DEBUG) { 27002545bca0SMatthew Dillon mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 27012545bca0SMatthew Dillon req, req->serno); 27022545bca0SMatthew Dillon } 27034c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 27042545bca0SMatthew Dillon xpt_done(ccb); 27052545bca0SMatthew Dillon if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 27062545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 27072545bca0SMatthew Dillon } else { 27082545bca0SMatthew Dillon mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 27092545bca0SMatthew Dillon req, req->serno); 27102545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 27112545bca0SMatthew Dillon } 27122545bca0SMatthew Dillon KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 27132545bca0SMatthew Dillon ("CCB req needed wakeup")); 27142545bca0SMatthew Dillon #ifdef INVARIANTS 27152545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 27162545bca0SMatthew Dillon #endif 27172545bca0SMatthew Dillon mpt_free_request(mpt, req); 27182545bca0SMatthew Dillon return (TRUE); 27192545bca0SMatthew Dillon } 27202545bca0SMatthew Dillon 27212545bca0SMatthew Dillon static int 27222545bca0SMatthew Dillon mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 27232545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 27242545bca0SMatthew Dillon { 27252545bca0SMatthew Dillon MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 27262545bca0SMatthew Dillon 27272545bca0SMatthew Dillon KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 27282545bca0SMatthew Dillon #ifdef INVARIANTS 27292545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 27302545bca0SMatthew Dillon #endif 27312545bca0SMatthew Dillon tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 27322545bca0SMatthew Dillon /* Record IOC Status and Response Code of TMF for any waiters. */ 27332545bca0SMatthew Dillon req->IOCStatus = le16toh(tmf_reply->IOCStatus); 27342545bca0SMatthew Dillon req->ResponseCode = tmf_reply->ResponseCode; 27352545bca0SMatthew Dillon 27362545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 27372545bca0SMatthew Dillon req, req->serno, le16toh(tmf_reply->IOCStatus)); 27382545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 27392545bca0SMatthew Dillon if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 27402545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 27412545bca0SMatthew Dillon wakeup(req); 27422545bca0SMatthew Dillon } else { 27432545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_FREE; 27442545bca0SMatthew Dillon } 27452545bca0SMatthew Dillon return (TRUE); 27462545bca0SMatthew Dillon } 27472545bca0SMatthew Dillon 27482545bca0SMatthew Dillon /* 27492545bca0SMatthew Dillon * XXX: Move to definitions file 27502545bca0SMatthew Dillon */ 27512545bca0SMatthew Dillon #define ELS 0x22 27522545bca0SMatthew Dillon #define FC4LS 0x32 27532545bca0SMatthew Dillon #define ABTS 0x81 27542545bca0SMatthew Dillon #define BA_ACC 0x84 27552545bca0SMatthew Dillon 27562545bca0SMatthew Dillon #define LS_RJT 0x01 27572545bca0SMatthew Dillon #define LS_ACC 0x02 27582545bca0SMatthew Dillon #define PLOGI 0x03 27592545bca0SMatthew Dillon #define LOGO 0x05 27602545bca0SMatthew Dillon #define SRR 0x14 27612545bca0SMatthew Dillon #define PRLI 0x20 27622545bca0SMatthew Dillon #define PRLO 0x21 27632545bca0SMatthew Dillon #define ADISC 0x52 27642545bca0SMatthew Dillon #define RSCN 0x61 27652545bca0SMatthew Dillon 27662545bca0SMatthew Dillon static void 27672545bca0SMatthew Dillon mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 27682545bca0SMatthew Dillon PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 27692545bca0SMatthew Dillon { 27702545bca0SMatthew Dillon uint32_t fl; 27712545bca0SMatthew Dillon MSG_LINK_SERVICE_RSP_REQUEST tmp; 27722545bca0SMatthew Dillon PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 27732545bca0SMatthew Dillon 27742545bca0SMatthew Dillon /* 27752545bca0SMatthew Dillon * We are going to reuse the ELS request to send this response back. 27762545bca0SMatthew Dillon */ 27772545bca0SMatthew Dillon rsp = &tmp; 27782545bca0SMatthew Dillon memset(rsp, 0, sizeof(*rsp)); 27792545bca0SMatthew Dillon 27802545bca0SMatthew Dillon #ifdef USE_IMMEDIATE_LINK_DATA 27812545bca0SMatthew Dillon /* 27822545bca0SMatthew Dillon * Apparently the IMMEDIATE stuff doesn't seem to work. 27832545bca0SMatthew Dillon */ 27842545bca0SMatthew Dillon rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 27852545bca0SMatthew Dillon #endif 27862545bca0SMatthew Dillon rsp->RspLength = length; 27872545bca0SMatthew Dillon rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 27882545bca0SMatthew Dillon rsp->MsgContext = htole32(req->index | fc_els_handler_id); 27892545bca0SMatthew Dillon 27902545bca0SMatthew Dillon /* 27912545bca0SMatthew Dillon * Copy over information from the original reply frame to 27922545bca0SMatthew Dillon * it's correct place in the response. 27932545bca0SMatthew Dillon */ 27942545bca0SMatthew Dillon memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 27952545bca0SMatthew Dillon 27962545bca0SMatthew Dillon /* 27972545bca0SMatthew Dillon * And now copy back the temporary area to the original frame. 27982545bca0SMatthew Dillon */ 27992545bca0SMatthew Dillon memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 28002545bca0SMatthew Dillon rsp = req->req_vbuf; 28012545bca0SMatthew Dillon 28022545bca0SMatthew Dillon #ifdef USE_IMMEDIATE_LINK_DATA 28032545bca0SMatthew Dillon memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 28042545bca0SMatthew Dillon #else 28052545bca0SMatthew Dillon { 28062545bca0SMatthew Dillon PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 28072545bca0SMatthew Dillon bus_addr_t paddr = req->req_pbuf; 28082545bca0SMatthew Dillon paddr += MPT_RQSL(mpt); 28092545bca0SMatthew Dillon 28102545bca0SMatthew Dillon fl = 28112545bca0SMatthew Dillon MPI_SGE_FLAGS_HOST_TO_IOC | 28122545bca0SMatthew Dillon MPI_SGE_FLAGS_SIMPLE_ELEMENT | 28132545bca0SMatthew Dillon MPI_SGE_FLAGS_LAST_ELEMENT | 28142545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_LIST | 28152545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 28162545bca0SMatthew Dillon fl <<= MPI_SGE_FLAGS_SHIFT; 28172545bca0SMatthew Dillon fl |= (length); 28182545bca0SMatthew Dillon se->FlagsLength = htole32(fl); 28192545bca0SMatthew Dillon se->Address = htole32((uint32_t) paddr); 28202545bca0SMatthew Dillon } 28212545bca0SMatthew Dillon #endif 28222545bca0SMatthew Dillon 28232545bca0SMatthew Dillon /* 28242545bca0SMatthew Dillon * Send it on... 28252545bca0SMatthew Dillon */ 28262545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 28272545bca0SMatthew Dillon } 28282545bca0SMatthew Dillon 28292545bca0SMatthew Dillon static int 28302545bca0SMatthew Dillon mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 28312545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 28322545bca0SMatthew Dillon { 28332545bca0SMatthew Dillon PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 28342545bca0SMatthew Dillon (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 28352545bca0SMatthew Dillon U8 rctl; 28362545bca0SMatthew Dillon U8 type; 28372545bca0SMatthew Dillon U8 cmd; 28382545bca0SMatthew Dillon U16 status = le16toh(reply_frame->IOCStatus); 28392545bca0SMatthew Dillon U32 *elsbuf; 28402545bca0SMatthew Dillon int ioindex; 28412545bca0SMatthew Dillon int do_refresh = TRUE; 28422545bca0SMatthew Dillon 28432545bca0SMatthew Dillon #ifdef INVARIANTS 28442545bca0SMatthew Dillon KASSERT(mpt_req_on_free_list(mpt, req) == 0, 28452545bca0SMatthew Dillon ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 28462545bca0SMatthew Dillon req, req->serno, rp->Function)); 28472545bca0SMatthew Dillon if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 28482545bca0SMatthew Dillon mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 28492545bca0SMatthew Dillon } else { 28502545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 28512545bca0SMatthew Dillon } 28522545bca0SMatthew Dillon #endif 28532545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 28542545bca0SMatthew Dillon "FC_ELS Complete: req %p:%u, reply %p function %x\n", 28552545bca0SMatthew Dillon req, req->serno, reply_frame, reply_frame->Function); 28562545bca0SMatthew Dillon 28572545bca0SMatthew Dillon if (status != MPI_IOCSTATUS_SUCCESS) { 28582545bca0SMatthew Dillon mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 28592545bca0SMatthew Dillon status, reply_frame->Function); 28602545bca0SMatthew Dillon if (status == MPI_IOCSTATUS_INVALID_STATE) { 28612545bca0SMatthew Dillon /* 28622545bca0SMatthew Dillon * XXX: to get around shutdown issue 28632545bca0SMatthew Dillon */ 28642545bca0SMatthew Dillon mpt->disabled = 1; 28652545bca0SMatthew Dillon return (TRUE); 28662545bca0SMatthew Dillon } 28672545bca0SMatthew Dillon return (TRUE); 28682545bca0SMatthew Dillon } 28692545bca0SMatthew Dillon 28702545bca0SMatthew Dillon /* 28712545bca0SMatthew Dillon * If the function of a link service response, we recycle the 28722545bca0SMatthew Dillon * response to be a refresh for a new link service request. 28732545bca0SMatthew Dillon * 28742545bca0SMatthew Dillon * The request pointer is bogus in this case and we have to fetch 28752545bca0SMatthew Dillon * it based upon the TransactionContext. 28762545bca0SMatthew Dillon */ 28772545bca0SMatthew Dillon if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 28782545bca0SMatthew Dillon /* Freddie Uncle Charlie Katie */ 28792545bca0SMatthew Dillon /* We don't get the IOINDEX as part of the Link Svc Rsp */ 28802545bca0SMatthew Dillon for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 28812545bca0SMatthew Dillon if (mpt->els_cmd_ptrs[ioindex] == req) { 28822545bca0SMatthew Dillon break; 28832545bca0SMatthew Dillon } 28842545bca0SMatthew Dillon 28852545bca0SMatthew Dillon KASSERT(ioindex < mpt->els_cmds_allocated, 28862545bca0SMatthew Dillon ("can't find my mommie!")); 28872545bca0SMatthew Dillon 28882545bca0SMatthew Dillon /* remove from active list as we're going to re-post it */ 28892545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 28902545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 28912545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 28922545bca0SMatthew Dillon mpt_fc_post_els(mpt, req, ioindex); 28932545bca0SMatthew Dillon return (TRUE); 28942545bca0SMatthew Dillon } 28952545bca0SMatthew Dillon 28962545bca0SMatthew Dillon if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 28972545bca0SMatthew Dillon /* remove from active list as we're done */ 28982545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 28992545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 29002545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 29012545bca0SMatthew Dillon if (req->state & REQ_STATE_TIMEDOUT) { 29022545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 29032545bca0SMatthew Dillon "Sync Primitive Send Completed After Timeout\n"); 29042545bca0SMatthew Dillon mpt_free_request(mpt, req); 29052545bca0SMatthew Dillon } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 29062545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 29072545bca0SMatthew Dillon "Async Primitive Send Complete\n"); 29082545bca0SMatthew Dillon mpt_free_request(mpt, req); 29092545bca0SMatthew Dillon } else { 29102545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 29112545bca0SMatthew Dillon "Sync Primitive Send Complete- Waking Waiter\n"); 29122545bca0SMatthew Dillon wakeup(req); 29132545bca0SMatthew Dillon } 29142545bca0SMatthew Dillon return (TRUE); 29152545bca0SMatthew Dillon } 29162545bca0SMatthew Dillon 29172545bca0SMatthew Dillon if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 29182545bca0SMatthew Dillon mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 29192545bca0SMatthew Dillon "Length %d Message Flags %x\n", rp->Function, rp->Flags, 29202545bca0SMatthew Dillon rp->MsgLength, rp->MsgFlags); 29212545bca0SMatthew Dillon return (TRUE); 29222545bca0SMatthew Dillon } 29232545bca0SMatthew Dillon 29242545bca0SMatthew Dillon if (rp->MsgLength <= 5) { 29252545bca0SMatthew Dillon /* 29262545bca0SMatthew Dillon * This is just a ack of an original ELS buffer post 29272545bca0SMatthew Dillon */ 29282545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 29292545bca0SMatthew Dillon "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 29302545bca0SMatthew Dillon return (TRUE); 29312545bca0SMatthew Dillon } 29322545bca0SMatthew Dillon 29332545bca0SMatthew Dillon 29342545bca0SMatthew Dillon rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 29352545bca0SMatthew Dillon type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 29362545bca0SMatthew Dillon 29372545bca0SMatthew Dillon elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 29382545bca0SMatthew Dillon cmd = be32toh(elsbuf[0]) >> 24; 29392545bca0SMatthew Dillon 29402545bca0SMatthew Dillon if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 29412545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 29422545bca0SMatthew Dillon return (TRUE); 29432545bca0SMatthew Dillon } 29442545bca0SMatthew Dillon 29452545bca0SMatthew Dillon ioindex = le32toh(rp->TransactionContext); 29462545bca0SMatthew Dillon req = mpt->els_cmd_ptrs[ioindex]; 29472545bca0SMatthew Dillon 29482545bca0SMatthew Dillon if (rctl == ELS && type == 1) { 29492545bca0SMatthew Dillon switch (cmd) { 29502545bca0SMatthew Dillon case PRLI: 29512545bca0SMatthew Dillon /* 29522545bca0SMatthew Dillon * Send back a PRLI ACC 29532545bca0SMatthew Dillon */ 29542545bca0SMatthew Dillon mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 29552545bca0SMatthew Dillon le32toh(rp->Wwn.PortNameHigh), 29562545bca0SMatthew Dillon le32toh(rp->Wwn.PortNameLow)); 29572545bca0SMatthew Dillon elsbuf[0] = htobe32(0x02100014); 29582545bca0SMatthew Dillon elsbuf[1] |= htobe32(0x00000100); 29592545bca0SMatthew Dillon elsbuf[4] = htobe32(0x00000002); 29602545bca0SMatthew Dillon if (mpt->role & MPT_ROLE_TARGET) 29612545bca0SMatthew Dillon elsbuf[4] |= htobe32(0x00000010); 29622545bca0SMatthew Dillon if (mpt->role & MPT_ROLE_INITIATOR) 29632545bca0SMatthew Dillon elsbuf[4] |= htobe32(0x00000020); 29642545bca0SMatthew Dillon /* remove from active list as we're done */ 29652545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 29662545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 29672545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 29682545bca0SMatthew Dillon mpt_fc_els_send_response(mpt, req, rp, 20); 29692545bca0SMatthew Dillon do_refresh = FALSE; 29702545bca0SMatthew Dillon break; 29712545bca0SMatthew Dillon case PRLO: 29722545bca0SMatthew Dillon memset(elsbuf, 0, 5 * (sizeof (U32))); 29732545bca0SMatthew Dillon elsbuf[0] = htobe32(0x02100014); 29742545bca0SMatthew Dillon elsbuf[1] = htobe32(0x08000100); 29752545bca0SMatthew Dillon mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 29762545bca0SMatthew Dillon le32toh(rp->Wwn.PortNameHigh), 29772545bca0SMatthew Dillon le32toh(rp->Wwn.PortNameLow)); 29782545bca0SMatthew Dillon /* remove from active list as we're done */ 29792545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 29802545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 29812545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 29822545bca0SMatthew Dillon mpt_fc_els_send_response(mpt, req, rp, 20); 29832545bca0SMatthew Dillon do_refresh = FALSE; 29842545bca0SMatthew Dillon break; 29852545bca0SMatthew Dillon default: 29862545bca0SMatthew Dillon mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 29872545bca0SMatthew Dillon break; 29882545bca0SMatthew Dillon } 29892545bca0SMatthew Dillon } else if (rctl == ABTS && type == 0) { 29902545bca0SMatthew Dillon uint16_t rx_id = le16toh(rp->Rxid); 29912545bca0SMatthew Dillon uint16_t ox_id = le16toh(rp->Oxid); 29922545bca0SMatthew Dillon request_t *tgt_req = NULL; 29932545bca0SMatthew Dillon 29942545bca0SMatthew Dillon mpt_prt(mpt, 29952545bca0SMatthew Dillon "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 29962545bca0SMatthew Dillon ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 29972545bca0SMatthew Dillon le32toh(rp->Wwn.PortNameLow)); 29982545bca0SMatthew Dillon if (rx_id >= mpt->mpt_max_tgtcmds) { 29992545bca0SMatthew Dillon mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 30002545bca0SMatthew Dillon } else if (mpt->tgt_cmd_ptrs == NULL) { 30012545bca0SMatthew Dillon mpt_prt(mpt, "No TGT CMD PTRS\n"); 30022545bca0SMatthew Dillon } else { 30032545bca0SMatthew Dillon tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 30042545bca0SMatthew Dillon } 30052545bca0SMatthew Dillon if (tgt_req) { 30062545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 30076d259fc1SSascha Wildner union ccb *ccb; 30082545bca0SMatthew Dillon uint32_t ct_id; 30092545bca0SMatthew Dillon 30102545bca0SMatthew Dillon /* 30112545bca0SMatthew Dillon * Check to make sure we have the correct command 30122545bca0SMatthew Dillon * The reply descriptor in the target state should 30132545bca0SMatthew Dillon * should contain an IoIndex that should match the 30142545bca0SMatthew Dillon * RX_ID. 30152545bca0SMatthew Dillon * 30162545bca0SMatthew Dillon * It'd be nice to have OX_ID to crosscheck with 30172545bca0SMatthew Dillon * as well. 30182545bca0SMatthew Dillon */ 30192545bca0SMatthew Dillon ct_id = GET_IO_INDEX(tgt->reply_desc); 30202545bca0SMatthew Dillon 30212545bca0SMatthew Dillon if (ct_id != rx_id) { 30222545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 30232545bca0SMatthew Dillon "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 30242545bca0SMatthew Dillon rx_id, ct_id); 30252545bca0SMatthew Dillon goto skip; 30262545bca0SMatthew Dillon } 30272545bca0SMatthew Dillon 30282545bca0SMatthew Dillon ccb = tgt->ccb; 30292545bca0SMatthew Dillon if (ccb) { 30302545bca0SMatthew Dillon mpt_prt(mpt, 30312545bca0SMatthew Dillon "CCB (%p): lun %u flags %x status %x\n", 30322545bca0SMatthew Dillon ccb, ccb->ccb_h.target_lun, 30332545bca0SMatthew Dillon ccb->ccb_h.flags, ccb->ccb_h.status); 30342545bca0SMatthew Dillon } 30352545bca0SMatthew Dillon mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 30362545bca0SMatthew Dillon "%x nxfers %x\n", tgt->state, 30372545bca0SMatthew Dillon tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 30382545bca0SMatthew Dillon tgt->nxfers); 30392545bca0SMatthew Dillon skip: 30402545bca0SMatthew Dillon if (mpt_abort_target_cmd(mpt, tgt_req)) { 30412545bca0SMatthew Dillon mpt_prt(mpt, "unable to start TargetAbort\n"); 30422545bca0SMatthew Dillon } 30432545bca0SMatthew Dillon } else { 30442545bca0SMatthew Dillon mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 30452545bca0SMatthew Dillon } 30462545bca0SMatthew Dillon memset(elsbuf, 0, 5 * (sizeof (U32))); 30472545bca0SMatthew Dillon elsbuf[0] = htobe32(0); 30482545bca0SMatthew Dillon elsbuf[1] = htobe32((ox_id << 16) | rx_id); 30492545bca0SMatthew Dillon elsbuf[2] = htobe32(0x000ffff); 30502545bca0SMatthew Dillon /* 30516d259fc1SSascha Wildner * Dork with the reply frame so that the response to it 30522545bca0SMatthew Dillon * will be correct. 30532545bca0SMatthew Dillon */ 30542545bca0SMatthew Dillon rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 30552545bca0SMatthew Dillon /* remove from active list as we're done */ 30562545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 30572545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 30582545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 30592545bca0SMatthew Dillon mpt_fc_els_send_response(mpt, req, rp, 12); 30602545bca0SMatthew Dillon do_refresh = FALSE; 30612545bca0SMatthew Dillon } else { 30622545bca0SMatthew Dillon mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 30632545bca0SMatthew Dillon } 30642545bca0SMatthew Dillon if (do_refresh == TRUE) { 30652545bca0SMatthew Dillon /* remove from active list as we're done */ 30662545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 30672545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED; 30682545bca0SMatthew Dillon req->state |= REQ_STATE_DONE; 30692545bca0SMatthew Dillon mpt_fc_post_els(mpt, req, ioindex); 30702545bca0SMatthew Dillon } 30712545bca0SMatthew Dillon return (TRUE); 30722545bca0SMatthew Dillon } 30732545bca0SMatthew Dillon 30742545bca0SMatthew Dillon /* 30752545bca0SMatthew Dillon * Clean up all SCSI Initiator personality state in response 30762545bca0SMatthew Dillon * to a controller reset. 30772545bca0SMatthew Dillon */ 30782545bca0SMatthew Dillon static void 30792545bca0SMatthew Dillon mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 30802545bca0SMatthew Dillon { 30814c42baf4SSascha Wildner 30822545bca0SMatthew Dillon /* 30832545bca0SMatthew Dillon * The pending list is already run down by 30842545bca0SMatthew Dillon * the generic handler. Perform the same 30852545bca0SMatthew Dillon * operation on the timed out request list. 30862545bca0SMatthew Dillon */ 30872545bca0SMatthew Dillon mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 30882545bca0SMatthew Dillon MPI_IOCSTATUS_INVALID_STATE); 30892545bca0SMatthew Dillon 30902545bca0SMatthew Dillon /* 30912545bca0SMatthew Dillon * XXX: We need to repost ELS and Target Command Buffers? 30922545bca0SMatthew Dillon */ 30932545bca0SMatthew Dillon 30942545bca0SMatthew Dillon /* 30952545bca0SMatthew Dillon * Inform the XPT that a bus reset has occurred. 30962545bca0SMatthew Dillon */ 30972545bca0SMatthew Dillon xpt_async(AC_BUS_RESET, mpt->path, NULL); 30982545bca0SMatthew Dillon } 30992545bca0SMatthew Dillon 31002545bca0SMatthew Dillon /* 31012545bca0SMatthew Dillon * Parse additional completion information in the reply 31022545bca0SMatthew Dillon * frame for SCSI I/O requests. 31032545bca0SMatthew Dillon */ 31042545bca0SMatthew Dillon static int 31052545bca0SMatthew Dillon mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 31062545bca0SMatthew Dillon MSG_DEFAULT_REPLY *reply_frame) 31072545bca0SMatthew Dillon { 31082545bca0SMatthew Dillon union ccb *ccb; 31092545bca0SMatthew Dillon MSG_SCSI_IO_REPLY *scsi_io_reply; 31102545bca0SMatthew Dillon u_int ioc_status; 31112545bca0SMatthew Dillon u_int sstate; 31122545bca0SMatthew Dillon 31132545bca0SMatthew Dillon MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 31142545bca0SMatthew Dillon KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 31152545bca0SMatthew Dillon || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 31162545bca0SMatthew Dillon ("MPT SCSI I/O Handler called with incorrect reply type")); 31172545bca0SMatthew Dillon KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 31182545bca0SMatthew Dillon ("MPT SCSI I/O Handler called with continuation reply")); 31192545bca0SMatthew Dillon 31202545bca0SMatthew Dillon scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 31212545bca0SMatthew Dillon ioc_status = le16toh(scsi_io_reply->IOCStatus); 31222545bca0SMatthew Dillon ioc_status &= MPI_IOCSTATUS_MASK; 31232545bca0SMatthew Dillon sstate = scsi_io_reply->SCSIState; 31242545bca0SMatthew Dillon 31252545bca0SMatthew Dillon ccb = req->ccb; 31262545bca0SMatthew Dillon ccb->csio.resid = 31272545bca0SMatthew Dillon ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 31282545bca0SMatthew Dillon 31292545bca0SMatthew Dillon if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 31302545bca0SMatthew Dillon && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 31314c42baf4SSascha Wildner uint32_t sense_returned; 31324c42baf4SSascha Wildner 31332545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 31344c42baf4SSascha Wildner 31354c42baf4SSascha Wildner sense_returned = le32toh(scsi_io_reply->SenseCount); 31364c42baf4SSascha Wildner if (sense_returned < ccb->csio.sense_len) 31374c42baf4SSascha Wildner ccb->csio.sense_resid = ccb->csio.sense_len - 31384c42baf4SSascha Wildner sense_returned; 31394c42baf4SSascha Wildner else 31404c42baf4SSascha Wildner ccb->csio.sense_resid = 0; 31414c42baf4SSascha Wildner 3142bc14747bSSascha Wildner bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 31432545bca0SMatthew Dillon bcopy(req->sense_vbuf, &ccb->csio.sense_data, 31444c42baf4SSascha Wildner min(ccb->csio.sense_len, sense_returned)); 31452545bca0SMatthew Dillon } 31462545bca0SMatthew Dillon 31472545bca0SMatthew Dillon if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 31482545bca0SMatthew Dillon /* 31492545bca0SMatthew Dillon * Tag messages rejected, but non-tagged retry 31502545bca0SMatthew Dillon * was successful. 31512545bca0SMatthew Dillon XXXX 31522545bca0SMatthew Dillon mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 31532545bca0SMatthew Dillon */ 31542545bca0SMatthew Dillon } 31552545bca0SMatthew Dillon 31562545bca0SMatthew Dillon switch(ioc_status) { 31572545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 31582545bca0SMatthew Dillon /* 31592545bca0SMatthew Dillon * XXX 31602545bca0SMatthew Dillon * Linux driver indicates that a zero 31612545bca0SMatthew Dillon * transfer length with this error code 31622545bca0SMatthew Dillon * indicates a CRC error. 31632545bca0SMatthew Dillon * 31642545bca0SMatthew Dillon * No need to swap the bytes for checking 31652545bca0SMatthew Dillon * against zero. 31662545bca0SMatthew Dillon */ 31672545bca0SMatthew Dillon if (scsi_io_reply->TransferCount == 0) { 31682545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 31692545bca0SMatthew Dillon break; 31702545bca0SMatthew Dillon } 31712545bca0SMatthew Dillon /* FALLTHROUGH */ 31722545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 31732545bca0SMatthew Dillon case MPI_IOCSTATUS_SUCCESS: 31742545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 31752545bca0SMatthew Dillon if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 31762545bca0SMatthew Dillon /* 31772545bca0SMatthew Dillon * Status was never returned for this transaction. 31782545bca0SMatthew Dillon */ 31792545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 31802545bca0SMatthew Dillon } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 31812545bca0SMatthew Dillon ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 31822545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 31832545bca0SMatthew Dillon if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 31842545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 31852545bca0SMatthew Dillon } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 31862545bca0SMatthew Dillon 31876d259fc1SSascha Wildner /* XXX Handle SPI-Packet and FCP-2 response info. */ 31882545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 31892545bca0SMatthew Dillon } else 31902545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 31912545bca0SMatthew Dillon break; 31922545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 31932545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 31942545bca0SMatthew Dillon break; 31952545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 31962545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 31972545bca0SMatthew Dillon break; 31982545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 31992545bca0SMatthew Dillon /* 32002545bca0SMatthew Dillon * Since selection timeouts and "device really not 32012545bca0SMatthew Dillon * there" are grouped into this error code, report 32022545bca0SMatthew Dillon * selection timeout. Selection timeouts are 32032545bca0SMatthew Dillon * typically retried before giving up on the device 32042545bca0SMatthew Dillon * whereas "device not there" errors are considered 32052545bca0SMatthew Dillon * unretryable. 32062545bca0SMatthew Dillon */ 32072545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 32082545bca0SMatthew Dillon break; 32092545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 32102545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 32112545bca0SMatthew Dillon break; 32122545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_INVALID_BUS: 32132545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 32142545bca0SMatthew Dillon break; 32152545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 32162545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_TID_INVALID); 32172545bca0SMatthew Dillon break; 32182545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 32192545bca0SMatthew Dillon ccb->ccb_h.status = CAM_UA_TERMIO; 32202545bca0SMatthew Dillon break; 32212545bca0SMatthew Dillon case MPI_IOCSTATUS_INVALID_STATE: 32222545bca0SMatthew Dillon /* 32232545bca0SMatthew Dillon * The IOC has been reset. Emulate a bus reset. 32242545bca0SMatthew Dillon */ 32252545bca0SMatthew Dillon /* FALLTHROUGH */ 32262545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 32272545bca0SMatthew Dillon ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 32282545bca0SMatthew Dillon break; 32292545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 32302545bca0SMatthew Dillon case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 32312545bca0SMatthew Dillon /* 32322545bca0SMatthew Dillon * Don't clobber any timeout status that has 32332545bca0SMatthew Dillon * already been set for this transaction. We 32342545bca0SMatthew Dillon * want the SCSI layer to be able to differentiate 32352545bca0SMatthew Dillon * between the command we aborted due to timeout 32362545bca0SMatthew Dillon * and any innocent bystanders. 32372545bca0SMatthew Dillon */ 32382545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 32392545bca0SMatthew Dillon break; 32402545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 32412545bca0SMatthew Dillon break; 32422545bca0SMatthew Dillon 32432545bca0SMatthew Dillon case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 32442545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 32452545bca0SMatthew Dillon break; 32462545bca0SMatthew Dillon case MPI_IOCSTATUS_BUSY: 32472545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_BUSY); 32482545bca0SMatthew Dillon break; 32492545bca0SMatthew Dillon case MPI_IOCSTATUS_INVALID_FUNCTION: 32502545bca0SMatthew Dillon case MPI_IOCSTATUS_INVALID_SGL: 32512545bca0SMatthew Dillon case MPI_IOCSTATUS_INTERNAL_ERROR: 32522545bca0SMatthew Dillon case MPI_IOCSTATUS_INVALID_FIELD: 32532545bca0SMatthew Dillon default: 32542545bca0SMatthew Dillon /* XXX 32552545bca0SMatthew Dillon * Some of the above may need to kick 32562545bca0SMatthew Dillon * of a recovery action!!!! 32572545bca0SMatthew Dillon */ 32582545bca0SMatthew Dillon ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 32592545bca0SMatthew Dillon break; 32602545bca0SMatthew Dillon } 32612545bca0SMatthew Dillon 32622545bca0SMatthew Dillon if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 32632545bca0SMatthew Dillon mpt_freeze_ccb(ccb); 32642545bca0SMatthew Dillon } 32652545bca0SMatthew Dillon 32662545bca0SMatthew Dillon return (TRUE); 32672545bca0SMatthew Dillon } 32682545bca0SMatthew Dillon 32692545bca0SMatthew Dillon static void 32702545bca0SMatthew Dillon mpt_action(struct cam_sim *sim, union ccb *ccb) 32712545bca0SMatthew Dillon { 32722545bca0SMatthew Dillon struct mpt_softc *mpt; 32732545bca0SMatthew Dillon struct ccb_trans_settings *cts; 32742545bca0SMatthew Dillon target_id_t tgt; 32752545bca0SMatthew Dillon lun_id_t lun; 32762545bca0SMatthew Dillon int raid_passthru; 32772545bca0SMatthew Dillon 32782545bca0SMatthew Dillon CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 32792545bca0SMatthew Dillon 32802545bca0SMatthew Dillon mpt = (struct mpt_softc *)cam_sim_softc(sim); 32812545bca0SMatthew Dillon raid_passthru = (sim == mpt->phydisk_sim); 32822545bca0SMatthew Dillon MPT_LOCK_ASSERT(mpt); 32832545bca0SMatthew Dillon 32842545bca0SMatthew Dillon tgt = ccb->ccb_h.target_id; 32852545bca0SMatthew Dillon lun = ccb->ccb_h.target_lun; 32862545bca0SMatthew Dillon if (raid_passthru && 32872545bca0SMatthew Dillon ccb->ccb_h.func_code != XPT_PATH_INQ && 32882545bca0SMatthew Dillon ccb->ccb_h.func_code != XPT_RESET_BUS && 32892545bca0SMatthew Dillon ccb->ccb_h.func_code != XPT_RESET_DEV) { 32902545bca0SMatthew Dillon if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 32912545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 32922545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 32932545bca0SMatthew Dillon xpt_done(ccb); 32942545bca0SMatthew Dillon return; 32952545bca0SMatthew Dillon } 32962545bca0SMatthew Dillon } 32972545bca0SMatthew Dillon ccb->ccb_h.ccb_mpt_ptr = mpt; 32982545bca0SMatthew Dillon 32992545bca0SMatthew Dillon switch (ccb->ccb_h.func_code) { 33002545bca0SMatthew Dillon case XPT_SCSI_IO: /* Execute the requested I/O operation */ 33012545bca0SMatthew Dillon /* 33022545bca0SMatthew Dillon * Do a couple of preliminary checks... 33032545bca0SMatthew Dillon */ 33042545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 33052545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 33062545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 33072545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 33082545bca0SMatthew Dillon break; 33092545bca0SMatthew Dillon } 33102545bca0SMatthew Dillon } 33112545bca0SMatthew Dillon /* Max supported CDB length is 16 bytes */ 33122545bca0SMatthew Dillon /* XXX Unless we implement the new 32byte message type */ 33132545bca0SMatthew Dillon if (ccb->csio.cdb_len > 33142545bca0SMatthew Dillon sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 33152545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 33162545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 33172545bca0SMatthew Dillon break; 33182545bca0SMatthew Dillon } 33192545bca0SMatthew Dillon #ifdef MPT_TEST_MULTIPATH 33202545bca0SMatthew Dillon if (mpt->failure_id == ccb->ccb_h.target_id) { 33212545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 33222545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 33232545bca0SMatthew Dillon break; 33242545bca0SMatthew Dillon } 33252545bca0SMatthew Dillon #endif 33262545bca0SMatthew Dillon ccb->csio.scsi_status = SCSI_STATUS_OK; 33272545bca0SMatthew Dillon mpt_start(sim, ccb); 33282545bca0SMatthew Dillon return; 33292545bca0SMatthew Dillon 33302545bca0SMatthew Dillon case XPT_RESET_BUS: 33312545bca0SMatthew Dillon if (raid_passthru) { 33322545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 33332545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 33342545bca0SMatthew Dillon break; 33352545bca0SMatthew Dillon } 33362545bca0SMatthew Dillon case XPT_RESET_DEV: 33372545bca0SMatthew Dillon if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 33382545bca0SMatthew Dillon if (bootverbose) { 33392545bca0SMatthew Dillon xpt_print(ccb->ccb_h.path, "reset bus\n"); 33402545bca0SMatthew Dillon } 33412545bca0SMatthew Dillon } else { 33422545bca0SMatthew Dillon xpt_print(ccb->ccb_h.path, "reset device\n"); 33432545bca0SMatthew Dillon } 33442545bca0SMatthew Dillon (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 33452545bca0SMatthew Dillon 33462545bca0SMatthew Dillon /* 33472545bca0SMatthew Dillon * mpt_bus_reset is always successful in that it 33482545bca0SMatthew Dillon * will fall back to a hard reset should a bus 33492545bca0SMatthew Dillon * reset attempt fail. 33502545bca0SMatthew Dillon */ 33512545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 33522545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 33532545bca0SMatthew Dillon break; 33542545bca0SMatthew Dillon 33552545bca0SMatthew Dillon case XPT_ABORT: 33562545bca0SMatthew Dillon { 33572545bca0SMatthew Dillon union ccb *accb = ccb->cab.abort_ccb; 33582545bca0SMatthew Dillon switch (accb->ccb_h.func_code) { 33592545bca0SMatthew Dillon case XPT_ACCEPT_TARGET_IO: 33602545bca0SMatthew Dillon case XPT_IMMED_NOTIFY: 33612545bca0SMatthew Dillon ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 33622545bca0SMatthew Dillon break; 33632545bca0SMatthew Dillon case XPT_CONT_TARGET_IO: 33642545bca0SMatthew Dillon mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 33652545bca0SMatthew Dillon ccb->ccb_h.status = CAM_UA_ABORT; 33662545bca0SMatthew Dillon break; 33672545bca0SMatthew Dillon case XPT_SCSI_IO: 33682545bca0SMatthew Dillon ccb->ccb_h.status = CAM_UA_ABORT; 33692545bca0SMatthew Dillon break; 33702545bca0SMatthew Dillon default: 33712545bca0SMatthew Dillon ccb->ccb_h.status = CAM_REQ_INVALID; 33722545bca0SMatthew Dillon break; 33732545bca0SMatthew Dillon } 33742545bca0SMatthew Dillon break; 33752545bca0SMatthew Dillon } 33762545bca0SMatthew Dillon 33772545bca0SMatthew Dillon #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 33782545bca0SMatthew Dillon #define DP_DISC_ENABLE 0x1 33792545bca0SMatthew Dillon #define DP_DISC_DISABL 0x2 33802545bca0SMatthew Dillon #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 33812545bca0SMatthew Dillon 33822545bca0SMatthew Dillon #define DP_TQING_ENABLE 0x4 33832545bca0SMatthew Dillon #define DP_TQING_DISABL 0x8 33842545bca0SMatthew Dillon #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 33852545bca0SMatthew Dillon 33862545bca0SMatthew Dillon #define DP_WIDE 0x10 33872545bca0SMatthew Dillon #define DP_NARROW 0x20 33882545bca0SMatthew Dillon #define DP_WIDTH (DP_WIDE|DP_NARROW) 33892545bca0SMatthew Dillon 33902545bca0SMatthew Dillon #define DP_SYNC 0x40 33912545bca0SMatthew Dillon 33922545bca0SMatthew Dillon case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 33932545bca0SMatthew Dillon { 33942545bca0SMatthew Dillon struct ccb_trans_settings_scsi *scsi; 33952545bca0SMatthew Dillon struct ccb_trans_settings_spi *spi; 33962545bca0SMatthew Dillon uint8_t dval; 33972545bca0SMatthew Dillon u_int period; 33982545bca0SMatthew Dillon u_int offset; 33992545bca0SMatthew Dillon int i, j; 34002545bca0SMatthew Dillon 34012545bca0SMatthew Dillon cts = &ccb->cts; 34022545bca0SMatthew Dillon 34032545bca0SMatthew Dillon if (mpt->is_fc || mpt->is_sas) { 34042545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 34052545bca0SMatthew Dillon break; 34062545bca0SMatthew Dillon } 34072545bca0SMatthew Dillon 34082545bca0SMatthew Dillon scsi = &cts->proto_specific.scsi; 34092545bca0SMatthew Dillon spi = &cts->xport_specific.spi; 34102545bca0SMatthew Dillon 34112545bca0SMatthew Dillon /* 34122545bca0SMatthew Dillon * We can be called just to valid transport and proto versions 34132545bca0SMatthew Dillon */ 34142545bca0SMatthew Dillon if (scsi->valid == 0 && spi->valid == 0) { 34152545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 34162545bca0SMatthew Dillon break; 34172545bca0SMatthew Dillon } 34182545bca0SMatthew Dillon 34192545bca0SMatthew Dillon /* 34202545bca0SMatthew Dillon * Skip attempting settings on RAID volume disks. 34212545bca0SMatthew Dillon * Other devices on the bus get the normal treatment. 34222545bca0SMatthew Dillon */ 34232545bca0SMatthew Dillon if (mpt->phydisk_sim && raid_passthru == 0 && 34242545bca0SMatthew Dillon mpt_is_raid_volume(mpt, tgt) != 0) { 34252545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 34262545bca0SMatthew Dillon "no transfer settings for RAID vols\n"); 34272545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 34282545bca0SMatthew Dillon break; 34292545bca0SMatthew Dillon } 34302545bca0SMatthew Dillon 34312545bca0SMatthew Dillon i = mpt->mpt_port_page2.PortSettings & 34322545bca0SMatthew Dillon MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 34332545bca0SMatthew Dillon j = mpt->mpt_port_page2.PortFlags & 34342545bca0SMatthew Dillon MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 34352545bca0SMatthew Dillon if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 34362545bca0SMatthew Dillon j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 34372545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ALWAYS, 34382545bca0SMatthew Dillon "honoring BIOS transfer negotiations\n"); 34392545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 34402545bca0SMatthew Dillon break; 34412545bca0SMatthew Dillon } 34422545bca0SMatthew Dillon 34432545bca0SMatthew Dillon dval = 0; 34442545bca0SMatthew Dillon period = 0; 34452545bca0SMatthew Dillon offset = 0; 34462545bca0SMatthew Dillon 34472545bca0SMatthew Dillon if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 34482545bca0SMatthew Dillon dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 34492545bca0SMatthew Dillon DP_DISC_ENABLE : DP_DISC_DISABL; 34502545bca0SMatthew Dillon } 34512545bca0SMatthew Dillon 34522545bca0SMatthew Dillon if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 34532545bca0SMatthew Dillon dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 34542545bca0SMatthew Dillon DP_TQING_ENABLE : DP_TQING_DISABL; 34552545bca0SMatthew Dillon } 34562545bca0SMatthew Dillon 34572545bca0SMatthew Dillon if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 34582545bca0SMatthew Dillon dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 34592545bca0SMatthew Dillon DP_WIDE : DP_NARROW; 34602545bca0SMatthew Dillon } 34612545bca0SMatthew Dillon 34622545bca0SMatthew Dillon if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 34632545bca0SMatthew Dillon dval |= DP_SYNC; 34642545bca0SMatthew Dillon offset = spi->sync_offset; 34652545bca0SMatthew Dillon } else { 34662545bca0SMatthew Dillon PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 34672545bca0SMatthew Dillon &mpt->mpt_dev_page1[tgt]; 34682545bca0SMatthew Dillon offset = ptr->RequestedParameters; 34692545bca0SMatthew Dillon offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 34702545bca0SMatthew Dillon offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 34712545bca0SMatthew Dillon } 34722545bca0SMatthew Dillon if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 34732545bca0SMatthew Dillon dval |= DP_SYNC; 34742545bca0SMatthew Dillon period = spi->sync_period; 34752545bca0SMatthew Dillon } else { 34762545bca0SMatthew Dillon PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 34772545bca0SMatthew Dillon &mpt->mpt_dev_page1[tgt]; 34782545bca0SMatthew Dillon period = ptr->RequestedParameters; 34792545bca0SMatthew Dillon period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 34802545bca0SMatthew Dillon period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 34812545bca0SMatthew Dillon } 34822545bca0SMatthew Dillon if (dval & DP_DISC_ENABLE) { 34832545bca0SMatthew Dillon mpt->mpt_disc_enable |= (1 << tgt); 34842545bca0SMatthew Dillon } else if (dval & DP_DISC_DISABL) { 34852545bca0SMatthew Dillon mpt->mpt_disc_enable &= ~(1 << tgt); 34862545bca0SMatthew Dillon } 34872545bca0SMatthew Dillon if (dval & DP_TQING_ENABLE) { 34882545bca0SMatthew Dillon mpt->mpt_tag_enable |= (1 << tgt); 34892545bca0SMatthew Dillon } else if (dval & DP_TQING_DISABL) { 34902545bca0SMatthew Dillon mpt->mpt_tag_enable &= ~(1 << tgt); 34912545bca0SMatthew Dillon } 34922545bca0SMatthew Dillon if (dval & DP_WIDTH) { 34932545bca0SMatthew Dillon mpt_setwidth(mpt, tgt, 1); 34942545bca0SMatthew Dillon } 34952545bca0SMatthew Dillon if (dval & DP_SYNC) { 34962545bca0SMatthew Dillon mpt_setsync(mpt, tgt, period, offset); 34972545bca0SMatthew Dillon } 34982545bca0SMatthew Dillon if (dval == 0) { 34992545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 35002545bca0SMatthew Dillon break; 35012545bca0SMatthew Dillon } 35022545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 35032545bca0SMatthew Dillon "set [%d]: 0x%x period 0x%x offset %d\n", 35042545bca0SMatthew Dillon tgt, dval, period, offset); 35052545bca0SMatthew Dillon if (mpt_update_spi_config(mpt, tgt)) { 35062545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 35072545bca0SMatthew Dillon } else { 35082545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 35092545bca0SMatthew Dillon } 35102545bca0SMatthew Dillon break; 35112545bca0SMatthew Dillon } 35122545bca0SMatthew Dillon case XPT_GET_TRAN_SETTINGS: 35132545bca0SMatthew Dillon { 35142545bca0SMatthew Dillon struct ccb_trans_settings_scsi *scsi; 35152545bca0SMatthew Dillon cts = &ccb->cts; 35162545bca0SMatthew Dillon cts->protocol = PROTO_SCSI; 35172545bca0SMatthew Dillon if (mpt->is_fc) { 35182545bca0SMatthew Dillon struct ccb_trans_settings_fc *fc = 35192545bca0SMatthew Dillon &cts->xport_specific.fc; 35202545bca0SMatthew Dillon cts->protocol_version = SCSI_REV_SPC; 35212545bca0SMatthew Dillon cts->transport = XPORT_FC; 35222545bca0SMatthew Dillon cts->transport_version = 0; 35232545bca0SMatthew Dillon fc->valid = CTS_FC_VALID_SPEED; 35242545bca0SMatthew Dillon fc->bitrate = 100000; 35252545bca0SMatthew Dillon } else if (mpt->is_sas) { 35262545bca0SMatthew Dillon struct ccb_trans_settings_sas *sas = 35272545bca0SMatthew Dillon &cts->xport_specific.sas; 35282545bca0SMatthew Dillon cts->protocol_version = SCSI_REV_SPC2; 35292545bca0SMatthew Dillon cts->transport = XPORT_SAS; 35302545bca0SMatthew Dillon cts->transport_version = 0; 35312545bca0SMatthew Dillon sas->valid = CTS_SAS_VALID_SPEED; 35322545bca0SMatthew Dillon sas->bitrate = 300000; 35332545bca0SMatthew Dillon } else { 35342545bca0SMatthew Dillon cts->protocol_version = SCSI_REV_2; 35352545bca0SMatthew Dillon cts->transport = XPORT_SPI; 35362545bca0SMatthew Dillon cts->transport_version = 2; 35372545bca0SMatthew Dillon if (mpt_get_spi_settings(mpt, cts) != 0) { 35382545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 35392545bca0SMatthew Dillon break; 35402545bca0SMatthew Dillon } 35412545bca0SMatthew Dillon } 35422545bca0SMatthew Dillon scsi = &cts->proto_specific.scsi; 35432545bca0SMatthew Dillon scsi->valid = CTS_SCSI_VALID_TQ; 35442545bca0SMatthew Dillon scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 35452545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 35462545bca0SMatthew Dillon break; 35472545bca0SMatthew Dillon } 35482545bca0SMatthew Dillon case XPT_CALC_GEOMETRY: 35492545bca0SMatthew Dillon { 35502545bca0SMatthew Dillon struct ccb_calc_geometry *ccg; 35512545bca0SMatthew Dillon 35522545bca0SMatthew Dillon ccg = &ccb->ccg; 35532545bca0SMatthew Dillon if (ccg->block_size == 0) { 35542545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 35552545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 35562545bca0SMatthew Dillon break; 35572545bca0SMatthew Dillon } 355852001f09SSascha Wildner cam_calc_geometry(ccg, /*extended*/1); 35594c42baf4SSascha Wildner KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 35602545bca0SMatthew Dillon break; 35612545bca0SMatthew Dillon } 35622545bca0SMatthew Dillon case XPT_PATH_INQ: /* Path routing inquiry */ 35632545bca0SMatthew Dillon { 35642545bca0SMatthew Dillon struct ccb_pathinq *cpi = &ccb->cpi; 35652545bca0SMatthew Dillon 35662545bca0SMatthew Dillon cpi->version_num = 1; 35672545bca0SMatthew Dillon cpi->target_sprt = 0; 35682545bca0SMatthew Dillon cpi->hba_eng_cnt = 0; 35692545bca0SMatthew Dillon cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 35706d259fc1SSascha Wildner #if 0 /* XXX swildner */ 35716d259fc1SSascha Wildner cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 35726d259fc1SSascha Wildner #endif 35732545bca0SMatthew Dillon /* 35742545bca0SMatthew Dillon * FC cards report MAX_DEVICES of 512, but 35752545bca0SMatthew Dillon * the MSG_SCSI_IO_REQUEST target id field 35762545bca0SMatthew Dillon * is only 8 bits. Until we fix the driver 35772545bca0SMatthew Dillon * to support 'channels' for bus overflow, 35782545bca0SMatthew Dillon * just limit it. 35792545bca0SMatthew Dillon */ 35802545bca0SMatthew Dillon if (cpi->max_target > 255) { 35812545bca0SMatthew Dillon cpi->max_target = 255; 35822545bca0SMatthew Dillon } 35832545bca0SMatthew Dillon 35842545bca0SMatthew Dillon /* 35852545bca0SMatthew Dillon * VMware ESX reports > 16 devices and then dies when we probe. 35862545bca0SMatthew Dillon */ 35872545bca0SMatthew Dillon if (mpt->is_spi && cpi->max_target > 15) { 35882545bca0SMatthew Dillon cpi->max_target = 15; 35892545bca0SMatthew Dillon } 35902545bca0SMatthew Dillon if (mpt->is_spi) 35912545bca0SMatthew Dillon cpi->max_lun = 7; 35922545bca0SMatthew Dillon else 35932545bca0SMatthew Dillon cpi->max_lun = MPT_MAX_LUNS; 35942545bca0SMatthew Dillon cpi->initiator_id = mpt->mpt_ini_id; 35952545bca0SMatthew Dillon cpi->bus_id = cam_sim_bus(sim); 35962545bca0SMatthew Dillon 35972545bca0SMatthew Dillon /* 35982545bca0SMatthew Dillon * The base speed is the speed of the underlying connection. 35992545bca0SMatthew Dillon */ 36002545bca0SMatthew Dillon cpi->protocol = PROTO_SCSI; 36012545bca0SMatthew Dillon if (mpt->is_fc) { 36022545bca0SMatthew Dillon cpi->hba_misc = PIM_NOBUSRESET; 36032545bca0SMatthew Dillon cpi->base_transfer_speed = 100000; 36042545bca0SMatthew Dillon cpi->hba_inquiry = PI_TAG_ABLE; 36052545bca0SMatthew Dillon cpi->transport = XPORT_FC; 36062545bca0SMatthew Dillon cpi->transport_version = 0; 36072545bca0SMatthew Dillon cpi->protocol_version = SCSI_REV_SPC; 36082545bca0SMatthew Dillon } else if (mpt->is_sas) { 36092545bca0SMatthew Dillon cpi->hba_misc = PIM_NOBUSRESET; 36102545bca0SMatthew Dillon cpi->base_transfer_speed = 300000; 36112545bca0SMatthew Dillon cpi->hba_inquiry = PI_TAG_ABLE; 36122545bca0SMatthew Dillon cpi->transport = XPORT_SAS; 36132545bca0SMatthew Dillon cpi->transport_version = 0; 36142545bca0SMatthew Dillon cpi->protocol_version = SCSI_REV_SPC2; 36152545bca0SMatthew Dillon } else { 36162545bca0SMatthew Dillon cpi->hba_misc = PIM_SEQSCAN; 36172545bca0SMatthew Dillon cpi->base_transfer_speed = 3300; 36182545bca0SMatthew Dillon cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 36192545bca0SMatthew Dillon cpi->transport = XPORT_SPI; 36202545bca0SMatthew Dillon cpi->transport_version = 2; 36212545bca0SMatthew Dillon cpi->protocol_version = SCSI_REV_2; 36222545bca0SMatthew Dillon } 36232545bca0SMatthew Dillon 36242545bca0SMatthew Dillon /* 36252545bca0SMatthew Dillon * We give our fake RAID passhtru bus a width that is MaxVolumes 36262545bca0SMatthew Dillon * wide and restrict it to one lun. 36272545bca0SMatthew Dillon */ 36282545bca0SMatthew Dillon if (raid_passthru) { 36292545bca0SMatthew Dillon cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 36302545bca0SMatthew Dillon cpi->initiator_id = cpi->max_target + 1; 36312545bca0SMatthew Dillon cpi->max_lun = 0; 36322545bca0SMatthew Dillon } 36332545bca0SMatthew Dillon 36342545bca0SMatthew Dillon if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 36352545bca0SMatthew Dillon cpi->hba_misc |= PIM_NOINITIATOR; 36362545bca0SMatthew Dillon } 36372545bca0SMatthew Dillon if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 36382545bca0SMatthew Dillon cpi->target_sprt = 36392545bca0SMatthew Dillon PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 36402545bca0SMatthew Dillon } else { 36412545bca0SMatthew Dillon cpi->target_sprt = 0; 36422545bca0SMatthew Dillon } 36432545bca0SMatthew Dillon strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 36442545bca0SMatthew Dillon strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 36452545bca0SMatthew Dillon strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 36462545bca0SMatthew Dillon cpi->unit_number = cam_sim_unit(sim); 36472545bca0SMatthew Dillon cpi->ccb_h.status = CAM_REQ_CMP; 36482545bca0SMatthew Dillon break; 36492545bca0SMatthew Dillon } 36502545bca0SMatthew Dillon case XPT_EN_LUN: /* Enable LUN as a target */ 36512545bca0SMatthew Dillon { 36522545bca0SMatthew Dillon int result; 36532545bca0SMatthew Dillon 36542545bca0SMatthew Dillon if (ccb->cel.enable) 36552545bca0SMatthew Dillon result = mpt_enable_lun(mpt, 36562545bca0SMatthew Dillon ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 36572545bca0SMatthew Dillon else 36582545bca0SMatthew Dillon result = mpt_disable_lun(mpt, 36592545bca0SMatthew Dillon ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 36602545bca0SMatthew Dillon if (result == 0) { 36612545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 36622545bca0SMatthew Dillon } else { 36632545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 36642545bca0SMatthew Dillon } 36652545bca0SMatthew Dillon break; 36662545bca0SMatthew Dillon } 36672545bca0SMatthew Dillon case XPT_NOTIFY_ACK: /* recycle notify ack */ 36682545bca0SMatthew Dillon case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 36692545bca0SMatthew Dillon case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 36702545bca0SMatthew Dillon { 36712545bca0SMatthew Dillon tgt_resource_t *trtp; 36722545bca0SMatthew Dillon lun_id_t lun = ccb->ccb_h.target_lun; 36732545bca0SMatthew Dillon ccb->ccb_h.sim_priv.entries[0].field = 0; 36742545bca0SMatthew Dillon ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 36752545bca0SMatthew Dillon ccb->ccb_h.flags = 0; 36762545bca0SMatthew Dillon 36772545bca0SMatthew Dillon if (lun == CAM_LUN_WILDCARD) { 36782545bca0SMatthew Dillon if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 36792545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 36802545bca0SMatthew Dillon break; 36812545bca0SMatthew Dillon } 36822545bca0SMatthew Dillon trtp = &mpt->trt_wildcard; 36832545bca0SMatthew Dillon } else if (lun >= MPT_MAX_LUNS) { 36842545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 36852545bca0SMatthew Dillon break; 36862545bca0SMatthew Dillon } else { 36872545bca0SMatthew Dillon trtp = &mpt->trt[lun]; 36882545bca0SMatthew Dillon } 36892545bca0SMatthew Dillon if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 36902545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG1, 36912545bca0SMatthew Dillon "Put FREE ATIO %p lun %d\n", ccb, lun); 36922545bca0SMatthew Dillon STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 36932545bca0SMatthew Dillon sim_links.stqe); 36942545bca0SMatthew Dillon } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 36952545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG1, 36962545bca0SMatthew Dillon "Put FREE INOT lun %d\n", lun); 36972545bca0SMatthew Dillon STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 36982545bca0SMatthew Dillon sim_links.stqe); 36992545bca0SMatthew Dillon } else { 37002545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 37012545bca0SMatthew Dillon } 37022545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 37032545bca0SMatthew Dillon return; 37042545bca0SMatthew Dillon } 37052545bca0SMatthew Dillon case XPT_CONT_TARGET_IO: 37062545bca0SMatthew Dillon mpt_target_start_io(mpt, ccb); 37072545bca0SMatthew Dillon return; 37082545bca0SMatthew Dillon 37092545bca0SMatthew Dillon default: 37102545bca0SMatthew Dillon ccb->ccb_h.status = CAM_REQ_INVALID; 37112545bca0SMatthew Dillon break; 37122545bca0SMatthew Dillon } 37132545bca0SMatthew Dillon xpt_done(ccb); 37142545bca0SMatthew Dillon } 37152545bca0SMatthew Dillon 37162545bca0SMatthew Dillon static int 37172545bca0SMatthew Dillon mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 37182545bca0SMatthew Dillon { 37192545bca0SMatthew Dillon struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 37202545bca0SMatthew Dillon struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 37212545bca0SMatthew Dillon target_id_t tgt; 37222545bca0SMatthew Dillon uint32_t dval, pval, oval; 37232545bca0SMatthew Dillon int rv; 37242545bca0SMatthew Dillon 37252545bca0SMatthew Dillon if (IS_CURRENT_SETTINGS(cts) == 0) { 37262545bca0SMatthew Dillon tgt = cts->ccb_h.target_id; 37272545bca0SMatthew Dillon } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 37282545bca0SMatthew Dillon if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 37292545bca0SMatthew Dillon return (-1); 37302545bca0SMatthew Dillon } 37312545bca0SMatthew Dillon } else { 37322545bca0SMatthew Dillon tgt = cts->ccb_h.target_id; 37332545bca0SMatthew Dillon } 37342545bca0SMatthew Dillon 37352545bca0SMatthew Dillon /* 37362545bca0SMatthew Dillon * We aren't looking at Port Page 2 BIOS settings here- 37372545bca0SMatthew Dillon * sometimes these have been known to be bogus XXX. 37382545bca0SMatthew Dillon * 37392545bca0SMatthew Dillon * For user settings, we pick the max from port page 0 37402545bca0SMatthew Dillon * 37412545bca0SMatthew Dillon * For current settings we read the current settings out from 37422545bca0SMatthew Dillon * device page 0 for that target. 37432545bca0SMatthew Dillon */ 37442545bca0SMatthew Dillon if (IS_CURRENT_SETTINGS(cts)) { 37452545bca0SMatthew Dillon CONFIG_PAGE_SCSI_DEVICE_0 tmp; 37462545bca0SMatthew Dillon dval = 0; 37472545bca0SMatthew Dillon 37482545bca0SMatthew Dillon tmp = mpt->mpt_dev_page0[tgt]; 37492545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 37502545bca0SMatthew Dillon sizeof(tmp), FALSE, 5000); 37512545bca0SMatthew Dillon if (rv) { 37522545bca0SMatthew Dillon mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 37532545bca0SMatthew Dillon return (rv); 37542545bca0SMatthew Dillon } 37552545bca0SMatthew Dillon mpt2host_config_page_scsi_device_0(&tmp); 37562545bca0SMatthew Dillon 37572545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 37586d259fc1SSascha Wildner "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 37596d259fc1SSascha Wildner tmp.NegotiatedParameters, tmp.Information); 37602545bca0SMatthew Dillon dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 37612545bca0SMatthew Dillon DP_WIDE : DP_NARROW; 37622545bca0SMatthew Dillon dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 37632545bca0SMatthew Dillon DP_DISC_ENABLE : DP_DISC_DISABL; 37642545bca0SMatthew Dillon dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 37652545bca0SMatthew Dillon DP_TQING_ENABLE : DP_TQING_DISABL; 37662545bca0SMatthew Dillon oval = tmp.NegotiatedParameters; 37672545bca0SMatthew Dillon oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 37682545bca0SMatthew Dillon oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 37692545bca0SMatthew Dillon pval = tmp.NegotiatedParameters; 37702545bca0SMatthew Dillon pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 37712545bca0SMatthew Dillon pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 37722545bca0SMatthew Dillon mpt->mpt_dev_page0[tgt] = tmp; 37732545bca0SMatthew Dillon } else { 37742545bca0SMatthew Dillon dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 37752545bca0SMatthew Dillon oval = mpt->mpt_port_page0.Capabilities; 37762545bca0SMatthew Dillon oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 37772545bca0SMatthew Dillon pval = mpt->mpt_port_page0.Capabilities; 37782545bca0SMatthew Dillon pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 37792545bca0SMatthew Dillon } 37802545bca0SMatthew Dillon 37812545bca0SMatthew Dillon spi->valid = 0; 37822545bca0SMatthew Dillon scsi->valid = 0; 37832545bca0SMatthew Dillon spi->flags = 0; 37842545bca0SMatthew Dillon scsi->flags = 0; 37852545bca0SMatthew Dillon spi->sync_offset = oval; 37862545bca0SMatthew Dillon spi->sync_period = pval; 37872545bca0SMatthew Dillon spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 37882545bca0SMatthew Dillon spi->valid |= CTS_SPI_VALID_SYNC_RATE; 37892545bca0SMatthew Dillon spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 37902545bca0SMatthew Dillon if (dval & DP_WIDE) { 37912545bca0SMatthew Dillon spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 37922545bca0SMatthew Dillon } else { 37932545bca0SMatthew Dillon spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 37942545bca0SMatthew Dillon } 37952545bca0SMatthew Dillon if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 37962545bca0SMatthew Dillon scsi->valid = CTS_SCSI_VALID_TQ; 37972545bca0SMatthew Dillon if (dval & DP_TQING_ENABLE) { 37982545bca0SMatthew Dillon scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 37992545bca0SMatthew Dillon } 38002545bca0SMatthew Dillon spi->valid |= CTS_SPI_VALID_DISC; 38012545bca0SMatthew Dillon if (dval & DP_DISC_ENABLE) { 38022545bca0SMatthew Dillon spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 38032545bca0SMatthew Dillon } 38042545bca0SMatthew Dillon } 38052545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 38062545bca0SMatthew Dillon "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 38072545bca0SMatthew Dillon IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); 38082545bca0SMatthew Dillon return (0); 38092545bca0SMatthew Dillon } 38102545bca0SMatthew Dillon 38112545bca0SMatthew Dillon static void 38122545bca0SMatthew Dillon mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 38132545bca0SMatthew Dillon { 38142545bca0SMatthew Dillon PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 38152545bca0SMatthew Dillon 38162545bca0SMatthew Dillon ptr = &mpt->mpt_dev_page1[tgt]; 38172545bca0SMatthew Dillon if (onoff) { 38182545bca0SMatthew Dillon ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 38192545bca0SMatthew Dillon } else { 38202545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 38212545bca0SMatthew Dillon } 38222545bca0SMatthew Dillon } 38232545bca0SMatthew Dillon 38242545bca0SMatthew Dillon static void 38252545bca0SMatthew Dillon mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 38262545bca0SMatthew Dillon { 38272545bca0SMatthew Dillon PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 38282545bca0SMatthew Dillon 38292545bca0SMatthew Dillon ptr = &mpt->mpt_dev_page1[tgt]; 38302545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 38312545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 38322545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 38332545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 38342545bca0SMatthew Dillon ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 38352545bca0SMatthew Dillon if (period == 0) { 38362545bca0SMatthew Dillon return; 38372545bca0SMatthew Dillon } 38382545bca0SMatthew Dillon ptr->RequestedParameters |= 38392545bca0SMatthew Dillon period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 38402545bca0SMatthew Dillon ptr->RequestedParameters |= 38412545bca0SMatthew Dillon offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 38422545bca0SMatthew Dillon if (period < 0xa) { 38432545bca0SMatthew Dillon ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 38442545bca0SMatthew Dillon } 38452545bca0SMatthew Dillon if (period < 0x9) { 38462545bca0SMatthew Dillon ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 38472545bca0SMatthew Dillon ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 38482545bca0SMatthew Dillon } 38492545bca0SMatthew Dillon } 38502545bca0SMatthew Dillon 38512545bca0SMatthew Dillon static int 38522545bca0SMatthew Dillon mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 38532545bca0SMatthew Dillon { 38542545bca0SMatthew Dillon CONFIG_PAGE_SCSI_DEVICE_1 tmp; 38552545bca0SMatthew Dillon int rv; 38562545bca0SMatthew Dillon 38572545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 38582545bca0SMatthew Dillon "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 38596d259fc1SSascha Wildner tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 38602545bca0SMatthew Dillon tmp = mpt->mpt_dev_page1[tgt]; 38612545bca0SMatthew Dillon host2mpt_config_page_scsi_device_1(&tmp); 38622545bca0SMatthew Dillon rv = mpt_write_cur_cfg_page(mpt, tgt, 38632545bca0SMatthew Dillon &tmp.Header, sizeof(tmp), FALSE, 5000); 38642545bca0SMatthew Dillon if (rv) { 38652545bca0SMatthew Dillon mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 38662545bca0SMatthew Dillon return (-1); 38672545bca0SMatthew Dillon } 38682545bca0SMatthew Dillon return (0); 38692545bca0SMatthew Dillon } 38702545bca0SMatthew Dillon 38712545bca0SMatthew Dillon /****************************** Timeout Recovery ******************************/ 38722545bca0SMatthew Dillon static int 38732545bca0SMatthew Dillon mpt_spawn_recovery_thread(struct mpt_softc *mpt) 38742545bca0SMatthew Dillon { 38752545bca0SMatthew Dillon int error; 38762545bca0SMatthew Dillon 3877*f582582cSSascha Wildner error = kthread_create(mpt_recovery_thread, mpt, 3878*f582582cSSascha Wildner &mpt->recovery_thread, "mpt_recovery%d", mpt->unit); 38792545bca0SMatthew Dillon return (error); 38802545bca0SMatthew Dillon } 38812545bca0SMatthew Dillon 38822545bca0SMatthew Dillon static void 38832545bca0SMatthew Dillon mpt_terminate_recovery_thread(struct mpt_softc *mpt) 38842545bca0SMatthew Dillon { 38854c42baf4SSascha Wildner 38862545bca0SMatthew Dillon if (mpt->recovery_thread == NULL) { 38872545bca0SMatthew Dillon return; 38882545bca0SMatthew Dillon } 38892545bca0SMatthew Dillon mpt->shutdwn_recovery = 1; 38902545bca0SMatthew Dillon wakeup(mpt); 38912545bca0SMatthew Dillon /* 38922545bca0SMatthew Dillon * Sleep on a slightly different location 38932545bca0SMatthew Dillon * for this interlock just for added safety. 38942545bca0SMatthew Dillon */ 38956d259fc1SSascha Wildner mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0); 38962545bca0SMatthew Dillon } 38972545bca0SMatthew Dillon 38982545bca0SMatthew Dillon static void 38992545bca0SMatthew Dillon mpt_recovery_thread(void *arg) 39002545bca0SMatthew Dillon { 39012545bca0SMatthew Dillon struct mpt_softc *mpt; 39022545bca0SMatthew Dillon 39032545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg; 39042545bca0SMatthew Dillon MPT_LOCK(mpt); 39052545bca0SMatthew Dillon for (;;) { 39062545bca0SMatthew Dillon if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 39072545bca0SMatthew Dillon if (mpt->shutdwn_recovery == 0) { 39086d259fc1SSascha Wildner mpt_sleep(mpt, mpt, 0, "idle", 0); 39092545bca0SMatthew Dillon } 39102545bca0SMatthew Dillon } 39112545bca0SMatthew Dillon if (mpt->shutdwn_recovery != 0) { 39122545bca0SMatthew Dillon break; 39132545bca0SMatthew Dillon } 39142545bca0SMatthew Dillon mpt_recover_commands(mpt); 39152545bca0SMatthew Dillon } 39162545bca0SMatthew Dillon mpt->recovery_thread = NULL; 39172545bca0SMatthew Dillon wakeup(&mpt->recovery_thread); 39182545bca0SMatthew Dillon MPT_UNLOCK(mpt); 3919*f582582cSSascha Wildner kthread_exit(); 39202545bca0SMatthew Dillon } 39212545bca0SMatthew Dillon 39222545bca0SMatthew Dillon static int 39232545bca0SMatthew Dillon mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 39242545bca0SMatthew Dillon u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 39252545bca0SMatthew Dillon { 39262545bca0SMatthew Dillon MSG_SCSI_TASK_MGMT *tmf_req; 39272545bca0SMatthew Dillon int error; 39282545bca0SMatthew Dillon 39292545bca0SMatthew Dillon /* 39302545bca0SMatthew Dillon * Wait for any current TMF request to complete. 39312545bca0SMatthew Dillon * We're only allowed to issue one TMF at a time. 39322545bca0SMatthew Dillon */ 39332545bca0SMatthew Dillon error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 39342545bca0SMatthew Dillon sleep_ok, MPT_TMF_MAX_TIMEOUT); 39352545bca0SMatthew Dillon if (error != 0) { 39362545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 39372545bca0SMatthew Dillon return (ETIMEDOUT); 39382545bca0SMatthew Dillon } 39392545bca0SMatthew Dillon 39402545bca0SMatthew Dillon mpt_assign_serno(mpt, mpt->tmf_req); 39412545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 39422545bca0SMatthew Dillon 39432545bca0SMatthew Dillon tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 39442545bca0SMatthew Dillon memset(tmf_req, 0, sizeof(*tmf_req)); 39452545bca0SMatthew Dillon tmf_req->TargetID = target; 39462545bca0SMatthew Dillon tmf_req->Bus = channel; 39472545bca0SMatthew Dillon tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 39482545bca0SMatthew Dillon tmf_req->TaskType = type; 39492545bca0SMatthew Dillon tmf_req->MsgFlags = flags; 39502545bca0SMatthew Dillon tmf_req->MsgContext = 39512545bca0SMatthew Dillon htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 39522545bca0SMatthew Dillon if (lun > MPT_MAX_LUNS) { 39532545bca0SMatthew Dillon tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 39542545bca0SMatthew Dillon tmf_req->LUN[1] = lun & 0xff; 39552545bca0SMatthew Dillon } else { 39562545bca0SMatthew Dillon tmf_req->LUN[1] = lun; 39572545bca0SMatthew Dillon } 39582545bca0SMatthew Dillon tmf_req->TaskMsgContext = abort_ctx; 39592545bca0SMatthew Dillon 39602545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 39616d259fc1SSascha Wildner "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 39626d259fc1SSascha Wildner mpt->tmf_req->serno, tmf_req->MsgContext); 39632545bca0SMatthew Dillon if (mpt->verbose > MPT_PRT_DEBUG) { 39642545bca0SMatthew Dillon mpt_print_request(tmf_req); 39652545bca0SMatthew Dillon } 39662545bca0SMatthew Dillon 39672545bca0SMatthew Dillon KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 39682545bca0SMatthew Dillon ("mpt_scsi_send_tmf: tmf_req already on pending list")); 39692545bca0SMatthew Dillon TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 39702545bca0SMatthew Dillon error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 39712545bca0SMatthew Dillon if (error != MPT_OK) { 39722545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 39732545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_FREE; 39742545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 39752545bca0SMatthew Dillon } 39762545bca0SMatthew Dillon return (error); 39772545bca0SMatthew Dillon } 39782545bca0SMatthew Dillon 39792545bca0SMatthew Dillon /* 39802545bca0SMatthew Dillon * When a command times out, it is placed on the requeust_timeout_list 39812545bca0SMatthew Dillon * and we wake our recovery thread. The MPT-Fusion architecture supports 39822545bca0SMatthew Dillon * only a single TMF operation at a time, so we serially abort/bdr, etc, 39832545bca0SMatthew Dillon * the timedout transactions. The next TMF is issued either by the 39842545bca0SMatthew Dillon * completion handler of the current TMF waking our recovery thread, 39852545bca0SMatthew Dillon * or the TMF timeout handler causing a hard reset sequence. 39862545bca0SMatthew Dillon */ 39872545bca0SMatthew Dillon static void 39882545bca0SMatthew Dillon mpt_recover_commands(struct mpt_softc *mpt) 39892545bca0SMatthew Dillon { 39902545bca0SMatthew Dillon request_t *req; 39912545bca0SMatthew Dillon union ccb *ccb; 39922545bca0SMatthew Dillon int error; 39932545bca0SMatthew Dillon 39942545bca0SMatthew Dillon if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 39952545bca0SMatthew Dillon /* 39962545bca0SMatthew Dillon * No work to do- leave. 39972545bca0SMatthew Dillon */ 39982545bca0SMatthew Dillon mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 39992545bca0SMatthew Dillon return; 40002545bca0SMatthew Dillon } 40012545bca0SMatthew Dillon 40022545bca0SMatthew Dillon /* 40032545bca0SMatthew Dillon * Flush any commands whose completion coincides with their timeout. 40042545bca0SMatthew Dillon */ 40052545bca0SMatthew Dillon mpt_intr(mpt); 40062545bca0SMatthew Dillon 40072545bca0SMatthew Dillon if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 40082545bca0SMatthew Dillon /* 40092545bca0SMatthew Dillon * The timedout commands have already 40102545bca0SMatthew Dillon * completed. This typically means 40112545bca0SMatthew Dillon * that either the timeout value was on 40122545bca0SMatthew Dillon * the hairy edge of what the device 40132545bca0SMatthew Dillon * requires or - more likely - interrupts 40142545bca0SMatthew Dillon * are not happening. 40152545bca0SMatthew Dillon */ 40162545bca0SMatthew Dillon mpt_prt(mpt, "Timedout requests already complete. " 40172545bca0SMatthew Dillon "Interrupts may not be functioning.\n"); 40182545bca0SMatthew Dillon mpt_enable_ints(mpt); 40192545bca0SMatthew Dillon return; 40202545bca0SMatthew Dillon } 40212545bca0SMatthew Dillon 40222545bca0SMatthew Dillon /* 40232545bca0SMatthew Dillon * We have no visibility into the current state of the 40242545bca0SMatthew Dillon * controller, so attempt to abort the commands in the 40252545bca0SMatthew Dillon * order they timed-out. For initiator commands, we 40262545bca0SMatthew Dillon * depend on the reply handler pulling requests off 40272545bca0SMatthew Dillon * the timeout list. 40282545bca0SMatthew Dillon */ 40292545bca0SMatthew Dillon while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 40302545bca0SMatthew Dillon uint16_t status; 40312545bca0SMatthew Dillon uint8_t response; 40322545bca0SMatthew Dillon MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 40332545bca0SMatthew Dillon 40342545bca0SMatthew Dillon mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 40352545bca0SMatthew Dillon req, req->serno, hdrp->Function); 40362545bca0SMatthew Dillon ccb = req->ccb; 40372545bca0SMatthew Dillon if (ccb == NULL) { 40382545bca0SMatthew Dillon mpt_prt(mpt, "null ccb in timed out request. " 40392545bca0SMatthew Dillon "Resetting Controller.\n"); 40402545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 40412545bca0SMatthew Dillon continue; 40422545bca0SMatthew Dillon } 40432545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 40442545bca0SMatthew Dillon 40452545bca0SMatthew Dillon /* 40462545bca0SMatthew Dillon * Check to see if this is not an initiator command and 40472545bca0SMatthew Dillon * deal with it differently if it is. 40482545bca0SMatthew Dillon */ 40492545bca0SMatthew Dillon switch (hdrp->Function) { 40502545bca0SMatthew Dillon case MPI_FUNCTION_SCSI_IO_REQUEST: 40512545bca0SMatthew Dillon case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 40522545bca0SMatthew Dillon break; 40532545bca0SMatthew Dillon default: 40542545bca0SMatthew Dillon /* 40552545bca0SMatthew Dillon * XXX: FIX ME: need to abort target assists... 40562545bca0SMatthew Dillon */ 40572545bca0SMatthew Dillon mpt_prt(mpt, "just putting it back on the pend q\n"); 40582545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 40592545bca0SMatthew Dillon TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 40602545bca0SMatthew Dillon links); 40612545bca0SMatthew Dillon continue; 40622545bca0SMatthew Dillon } 40632545bca0SMatthew Dillon 40642545bca0SMatthew Dillon error = mpt_scsi_send_tmf(mpt, 40652545bca0SMatthew Dillon MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 40662545bca0SMatthew Dillon 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 40672545bca0SMatthew Dillon htole32(req->index | scsi_io_handler_id), TRUE); 40682545bca0SMatthew Dillon 40692545bca0SMatthew Dillon if (error != 0) { 40702545bca0SMatthew Dillon /* 40712545bca0SMatthew Dillon * mpt_scsi_send_tmf hard resets on failure, so no 40722545bca0SMatthew Dillon * need to do so here. Our queue should be emptied 40732545bca0SMatthew Dillon * by the hard reset. 40742545bca0SMatthew Dillon */ 40752545bca0SMatthew Dillon continue; 40762545bca0SMatthew Dillon } 40772545bca0SMatthew Dillon 40782545bca0SMatthew Dillon error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 40792545bca0SMatthew Dillon REQ_STATE_DONE, TRUE, 500); 40802545bca0SMatthew Dillon 40812545bca0SMatthew Dillon status = le16toh(mpt->tmf_req->IOCStatus); 40822545bca0SMatthew Dillon response = mpt->tmf_req->ResponseCode; 40832545bca0SMatthew Dillon mpt->tmf_req->state = REQ_STATE_FREE; 40842545bca0SMatthew Dillon 40852545bca0SMatthew Dillon if (error != 0) { 40862545bca0SMatthew Dillon /* 40872545bca0SMatthew Dillon * If we've errored out,, reset the controller. 40882545bca0SMatthew Dillon */ 40892545bca0SMatthew Dillon mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 40902545bca0SMatthew Dillon "Resetting controller\n"); 40912545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 40922545bca0SMatthew Dillon continue; 40932545bca0SMatthew Dillon } 40942545bca0SMatthew Dillon 40952545bca0SMatthew Dillon if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 40962545bca0SMatthew Dillon mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 40972545bca0SMatthew Dillon "Resetting controller.\n", status); 40982545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 40992545bca0SMatthew Dillon continue; 41002545bca0SMatthew Dillon } 41012545bca0SMatthew Dillon 41022545bca0SMatthew Dillon if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 41032545bca0SMatthew Dillon response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 41042545bca0SMatthew Dillon mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 41052545bca0SMatthew Dillon "Resetting controller.\n", response); 41062545bca0SMatthew Dillon mpt_reset(mpt, TRUE); 41072545bca0SMatthew Dillon continue; 41082545bca0SMatthew Dillon } 41092545bca0SMatthew Dillon mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 41102545bca0SMatthew Dillon } 41112545bca0SMatthew Dillon } 41122545bca0SMatthew Dillon 41132545bca0SMatthew Dillon /************************ Target Mode Support ****************************/ 41142545bca0SMatthew Dillon static void 41152545bca0SMatthew Dillon mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 41162545bca0SMatthew Dillon { 41172545bca0SMatthew Dillon MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 41182545bca0SMatthew Dillon PTR_SGE_TRANSACTION32 tep; 41192545bca0SMatthew Dillon PTR_SGE_SIMPLE32 se; 41202545bca0SMatthew Dillon bus_addr_t paddr; 41212545bca0SMatthew Dillon uint32_t fl; 41222545bca0SMatthew Dillon 41232545bca0SMatthew Dillon paddr = req->req_pbuf; 41242545bca0SMatthew Dillon paddr += MPT_RQSL(mpt); 41252545bca0SMatthew Dillon 41262545bca0SMatthew Dillon fc = req->req_vbuf; 41272545bca0SMatthew Dillon memset(fc, 0, MPT_REQUEST_AREA); 41282545bca0SMatthew Dillon fc->BufferCount = 1; 41292545bca0SMatthew Dillon fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 41302545bca0SMatthew Dillon fc->MsgContext = htole32(req->index | fc_els_handler_id); 41312545bca0SMatthew Dillon 41322545bca0SMatthew Dillon /* 41332545bca0SMatthew Dillon * Okay, set up ELS buffer pointers. ELS buffer pointers 41342545bca0SMatthew Dillon * consist of a TE SGL element (with details length of zero) 41356d259fc1SSascha Wildner * followed by a SIMPLE SGL element which holds the address 41362545bca0SMatthew Dillon * of the buffer. 41372545bca0SMatthew Dillon */ 41382545bca0SMatthew Dillon 41392545bca0SMatthew Dillon tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 41402545bca0SMatthew Dillon 41412545bca0SMatthew Dillon tep->ContextSize = 4; 41422545bca0SMatthew Dillon tep->Flags = 0; 41432545bca0SMatthew Dillon tep->TransactionContext[0] = htole32(ioindex); 41442545bca0SMatthew Dillon 41452545bca0SMatthew Dillon se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 41462545bca0SMatthew Dillon fl = 41472545bca0SMatthew Dillon MPI_SGE_FLAGS_HOST_TO_IOC | 41482545bca0SMatthew Dillon MPI_SGE_FLAGS_SIMPLE_ELEMENT | 41492545bca0SMatthew Dillon MPI_SGE_FLAGS_LAST_ELEMENT | 41502545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_LIST | 41512545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 41522545bca0SMatthew Dillon fl <<= MPI_SGE_FLAGS_SHIFT; 41532545bca0SMatthew Dillon fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 41542545bca0SMatthew Dillon se->FlagsLength = htole32(fl); 41552545bca0SMatthew Dillon se->Address = htole32((uint32_t) paddr); 41562545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 41572545bca0SMatthew Dillon "add ELS index %d ioindex %d for %p:%u\n", 41582545bca0SMatthew Dillon req->index, ioindex, req, req->serno); 41592545bca0SMatthew Dillon KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 41602545bca0SMatthew Dillon ("mpt_fc_post_els: request not locked")); 41612545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 41622545bca0SMatthew Dillon } 41632545bca0SMatthew Dillon 41642545bca0SMatthew Dillon static void 41652545bca0SMatthew Dillon mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 41662545bca0SMatthew Dillon { 41672545bca0SMatthew Dillon PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 41682545bca0SMatthew Dillon PTR_CMD_BUFFER_DESCRIPTOR cb; 41692545bca0SMatthew Dillon bus_addr_t paddr; 41702545bca0SMatthew Dillon 41712545bca0SMatthew Dillon paddr = req->req_pbuf; 41722545bca0SMatthew Dillon paddr += MPT_RQSL(mpt); 41732545bca0SMatthew Dillon memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 41742545bca0SMatthew Dillon MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 41752545bca0SMatthew Dillon 41762545bca0SMatthew Dillon fc = req->req_vbuf; 41772545bca0SMatthew Dillon fc->BufferCount = 1; 41782545bca0SMatthew Dillon fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 41792545bca0SMatthew Dillon fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 41802545bca0SMatthew Dillon 41812545bca0SMatthew Dillon cb = &fc->Buffer[0]; 41822545bca0SMatthew Dillon cb->IoIndex = htole16(ioindex); 41832545bca0SMatthew Dillon cb->u.PhysicalAddress32 = htole32((U32) paddr); 41842545bca0SMatthew Dillon 41852545bca0SMatthew Dillon mpt_check_doorbell(mpt); 41862545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 41872545bca0SMatthew Dillon } 41882545bca0SMatthew Dillon 41892545bca0SMatthew Dillon static int 41902545bca0SMatthew Dillon mpt_add_els_buffers(struct mpt_softc *mpt) 41912545bca0SMatthew Dillon { 41922545bca0SMatthew Dillon int i; 41932545bca0SMatthew Dillon 41942545bca0SMatthew Dillon if (mpt->is_fc == 0) { 41952545bca0SMatthew Dillon return (TRUE); 41962545bca0SMatthew Dillon } 41972545bca0SMatthew Dillon 41982545bca0SMatthew Dillon if (mpt->els_cmds_allocated) { 41992545bca0SMatthew Dillon return (TRUE); 42002545bca0SMatthew Dillon } 42012545bca0SMatthew Dillon 42022545bca0SMatthew Dillon mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *), 42032545bca0SMatthew Dillon M_DEVBUF, M_NOWAIT | M_ZERO); 42042545bca0SMatthew Dillon 42052545bca0SMatthew Dillon if (mpt->els_cmd_ptrs == NULL) { 42062545bca0SMatthew Dillon return (FALSE); 42072545bca0SMatthew Dillon } 42082545bca0SMatthew Dillon 42092545bca0SMatthew Dillon /* 42102545bca0SMatthew Dillon * Feed the chip some ELS buffer resources 42112545bca0SMatthew Dillon */ 42122545bca0SMatthew Dillon for (i = 0; i < MPT_MAX_ELS; i++) { 42132545bca0SMatthew Dillon request_t *req = mpt_get_request(mpt, FALSE); 42142545bca0SMatthew Dillon if (req == NULL) { 42152545bca0SMatthew Dillon break; 42162545bca0SMatthew Dillon } 42172545bca0SMatthew Dillon req->state |= REQ_STATE_LOCKED; 42182545bca0SMatthew Dillon mpt->els_cmd_ptrs[i] = req; 42192545bca0SMatthew Dillon mpt_fc_post_els(mpt, req, i); 42202545bca0SMatthew Dillon } 42212545bca0SMatthew Dillon 42222545bca0SMatthew Dillon if (i == 0) { 42232545bca0SMatthew Dillon mpt_prt(mpt, "unable to add ELS buffer resources\n"); 42242545bca0SMatthew Dillon kfree(mpt->els_cmd_ptrs, M_DEVBUF); 42252545bca0SMatthew Dillon mpt->els_cmd_ptrs = NULL; 42262545bca0SMatthew Dillon return (FALSE); 42272545bca0SMatthew Dillon } 42282545bca0SMatthew Dillon if (i != MPT_MAX_ELS) { 42292545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_INFO, 42302545bca0SMatthew Dillon "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 42312545bca0SMatthew Dillon } 42322545bca0SMatthew Dillon mpt->els_cmds_allocated = i; 42332545bca0SMatthew Dillon return(TRUE); 42342545bca0SMatthew Dillon } 42352545bca0SMatthew Dillon 42362545bca0SMatthew Dillon static int 42372545bca0SMatthew Dillon mpt_add_target_commands(struct mpt_softc *mpt) 42382545bca0SMatthew Dillon { 42392545bca0SMatthew Dillon int i, max; 42402545bca0SMatthew Dillon 42412545bca0SMatthew Dillon if (mpt->tgt_cmd_ptrs) { 42422545bca0SMatthew Dillon return (TRUE); 42432545bca0SMatthew Dillon } 42442545bca0SMatthew Dillon 42452545bca0SMatthew Dillon max = MPT_MAX_REQUESTS(mpt) >> 1; 42462545bca0SMatthew Dillon if (max > mpt->mpt_max_tgtcmds) { 42472545bca0SMatthew Dillon max = mpt->mpt_max_tgtcmds; 42482545bca0SMatthew Dillon } 42492545bca0SMatthew Dillon mpt->tgt_cmd_ptrs = 42502545bca0SMatthew Dillon kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 42512545bca0SMatthew Dillon if (mpt->tgt_cmd_ptrs == NULL) { 42522545bca0SMatthew Dillon mpt_prt(mpt, 42532545bca0SMatthew Dillon "mpt_add_target_commands: could not allocate cmd ptrs\n"); 42542545bca0SMatthew Dillon return (FALSE); 42552545bca0SMatthew Dillon } 42562545bca0SMatthew Dillon 42572545bca0SMatthew Dillon for (i = 0; i < max; i++) { 42582545bca0SMatthew Dillon request_t *req; 42592545bca0SMatthew Dillon 42602545bca0SMatthew Dillon req = mpt_get_request(mpt, FALSE); 42612545bca0SMatthew Dillon if (req == NULL) { 42622545bca0SMatthew Dillon break; 42632545bca0SMatthew Dillon } 42642545bca0SMatthew Dillon req->state |= REQ_STATE_LOCKED; 42652545bca0SMatthew Dillon mpt->tgt_cmd_ptrs[i] = req; 42662545bca0SMatthew Dillon mpt_post_target_command(mpt, req, i); 42672545bca0SMatthew Dillon } 42682545bca0SMatthew Dillon 42692545bca0SMatthew Dillon 42702545bca0SMatthew Dillon if (i == 0) { 42712545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 42722545bca0SMatthew Dillon kfree(mpt->tgt_cmd_ptrs, M_DEVBUF); 42732545bca0SMatthew Dillon mpt->tgt_cmd_ptrs = NULL; 42742545bca0SMatthew Dillon return (FALSE); 42752545bca0SMatthew Dillon } 42762545bca0SMatthew Dillon 42772545bca0SMatthew Dillon mpt->tgt_cmds_allocated = i; 42782545bca0SMatthew Dillon 42792545bca0SMatthew Dillon if (i < max) { 42802545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_INFO, 42812545bca0SMatthew Dillon "added %d of %d target bufs\n", i, max); 42822545bca0SMatthew Dillon } 42832545bca0SMatthew Dillon return (i); 42842545bca0SMatthew Dillon } 42852545bca0SMatthew Dillon 42862545bca0SMatthew Dillon static int 42872545bca0SMatthew Dillon mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 42882545bca0SMatthew Dillon { 42894c42baf4SSascha Wildner 42902545bca0SMatthew Dillon if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 42912545bca0SMatthew Dillon mpt->twildcard = 1; 42922545bca0SMatthew Dillon } else if (lun >= MPT_MAX_LUNS) { 42932545bca0SMatthew Dillon return (EINVAL); 42942545bca0SMatthew Dillon } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 42952545bca0SMatthew Dillon return (EINVAL); 42962545bca0SMatthew Dillon } 42972545bca0SMatthew Dillon if (mpt->tenabled == 0) { 42982545bca0SMatthew Dillon if (mpt->is_fc) { 42992545bca0SMatthew Dillon (void) mpt_fc_reset_link(mpt, 0); 43002545bca0SMatthew Dillon } 43012545bca0SMatthew Dillon mpt->tenabled = 1; 43022545bca0SMatthew Dillon } 43032545bca0SMatthew Dillon if (lun == CAM_LUN_WILDCARD) { 43042545bca0SMatthew Dillon mpt->trt_wildcard.enabled = 1; 43052545bca0SMatthew Dillon } else { 43062545bca0SMatthew Dillon mpt->trt[lun].enabled = 1; 43072545bca0SMatthew Dillon } 43082545bca0SMatthew Dillon return (0); 43092545bca0SMatthew Dillon } 43102545bca0SMatthew Dillon 43112545bca0SMatthew Dillon static int 43122545bca0SMatthew Dillon mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 43132545bca0SMatthew Dillon { 43142545bca0SMatthew Dillon int i; 43154c42baf4SSascha Wildner 43162545bca0SMatthew Dillon if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 43172545bca0SMatthew Dillon mpt->twildcard = 0; 43182545bca0SMatthew Dillon } else if (lun >= MPT_MAX_LUNS) { 43192545bca0SMatthew Dillon return (EINVAL); 43202545bca0SMatthew Dillon } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 43212545bca0SMatthew Dillon return (EINVAL); 43222545bca0SMatthew Dillon } 43232545bca0SMatthew Dillon if (lun == CAM_LUN_WILDCARD) { 43242545bca0SMatthew Dillon mpt->trt_wildcard.enabled = 0; 43252545bca0SMatthew Dillon } else { 43262545bca0SMatthew Dillon mpt->trt[lun].enabled = 0; 43272545bca0SMatthew Dillon } 43282545bca0SMatthew Dillon for (i = 0; i < MPT_MAX_LUNS; i++) { 43292545bca0SMatthew Dillon if (mpt->trt[lun].enabled) { 43302545bca0SMatthew Dillon break; 43312545bca0SMatthew Dillon } 43322545bca0SMatthew Dillon } 43332545bca0SMatthew Dillon if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 43342545bca0SMatthew Dillon if (mpt->is_fc) { 43352545bca0SMatthew Dillon (void) mpt_fc_reset_link(mpt, 0); 43362545bca0SMatthew Dillon } 43372545bca0SMatthew Dillon mpt->tenabled = 0; 43382545bca0SMatthew Dillon } 43392545bca0SMatthew Dillon return (0); 43402545bca0SMatthew Dillon } 43412545bca0SMatthew Dillon 43422545bca0SMatthew Dillon /* 43432545bca0SMatthew Dillon * Called with MPT lock held 43442545bca0SMatthew Dillon */ 43452545bca0SMatthew Dillon static void 43462545bca0SMatthew Dillon mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 43472545bca0SMatthew Dillon { 43482545bca0SMatthew Dillon struct ccb_scsiio *csio = &ccb->csio; 43492545bca0SMatthew Dillon request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 43502545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 43512545bca0SMatthew Dillon 43522545bca0SMatthew Dillon switch (tgt->state) { 43532545bca0SMatthew Dillon case TGT_STATE_IN_CAM: 43542545bca0SMatthew Dillon break; 43552545bca0SMatthew Dillon case TGT_STATE_MOVING_DATA: 43562545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 43572545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 43582545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 43592545bca0SMatthew Dillon tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 43602545bca0SMatthew Dillon xpt_done(ccb); 43612545bca0SMatthew Dillon return; 43622545bca0SMatthew Dillon default: 43632545bca0SMatthew Dillon mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 43642545bca0SMatthew Dillon "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 43652545bca0SMatthew Dillon mpt_tgt_dump_req_state(mpt, cmd_req); 43662545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 43672545bca0SMatthew Dillon xpt_done(ccb); 43682545bca0SMatthew Dillon return; 43692545bca0SMatthew Dillon } 43702545bca0SMatthew Dillon 43712545bca0SMatthew Dillon if (csio->dxfer_len) { 43722545bca0SMatthew Dillon bus_dmamap_callback_t *cb; 43732545bca0SMatthew Dillon PTR_MSG_TARGET_ASSIST_REQUEST ta; 43742545bca0SMatthew Dillon request_t *req; 43752545bca0SMatthew Dillon 43762545bca0SMatthew Dillon KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 43774c42baf4SSascha Wildner ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 43782545bca0SMatthew Dillon 43792545bca0SMatthew Dillon if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 43802545bca0SMatthew Dillon if (mpt->outofbeer == 0) { 43812545bca0SMatthew Dillon mpt->outofbeer = 1; 43822545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 43832545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 43842545bca0SMatthew Dillon } 43852545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 43862545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 43872545bca0SMatthew Dillon xpt_done(ccb); 43882545bca0SMatthew Dillon return; 43892545bca0SMatthew Dillon } 43902545bca0SMatthew Dillon ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 43912545bca0SMatthew Dillon if (sizeof (bus_addr_t) > 4) { 43922545bca0SMatthew Dillon cb = mpt_execute_req_a64; 43932545bca0SMatthew Dillon } else { 43942545bca0SMatthew Dillon cb = mpt_execute_req; 43952545bca0SMatthew Dillon } 43962545bca0SMatthew Dillon 43972545bca0SMatthew Dillon req->ccb = ccb; 43982545bca0SMatthew Dillon ccb->ccb_h.ccb_req_ptr = req; 43992545bca0SMatthew Dillon 44002545bca0SMatthew Dillon /* 44012545bca0SMatthew Dillon * Record the currently active ccb and the 44022545bca0SMatthew Dillon * request for it in our target state area. 44032545bca0SMatthew Dillon */ 44042545bca0SMatthew Dillon tgt->ccb = ccb; 44052545bca0SMatthew Dillon tgt->req = req; 44062545bca0SMatthew Dillon 44072545bca0SMatthew Dillon memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 44082545bca0SMatthew Dillon ta = req->req_vbuf; 44092545bca0SMatthew Dillon 44102545bca0SMatthew Dillon if (mpt->is_sas) { 44112545bca0SMatthew Dillon PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 44122545bca0SMatthew Dillon cmd_req->req_vbuf; 44132545bca0SMatthew Dillon ta->QueueTag = ssp->InitiatorTag; 44142545bca0SMatthew Dillon } else if (mpt->is_spi) { 44152545bca0SMatthew Dillon PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 44162545bca0SMatthew Dillon cmd_req->req_vbuf; 44172545bca0SMatthew Dillon ta->QueueTag = sp->Tag; 44182545bca0SMatthew Dillon } 44192545bca0SMatthew Dillon ta->Function = MPI_FUNCTION_TARGET_ASSIST; 44202545bca0SMatthew Dillon ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 44212545bca0SMatthew Dillon ta->ReplyWord = htole32(tgt->reply_desc); 44222545bca0SMatthew Dillon if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 44232545bca0SMatthew Dillon ta->LUN[0] = 44242545bca0SMatthew Dillon 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 44252545bca0SMatthew Dillon ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 44262545bca0SMatthew Dillon } else { 44272545bca0SMatthew Dillon ta->LUN[1] = csio->ccb_h.target_lun; 44282545bca0SMatthew Dillon } 44292545bca0SMatthew Dillon 44302545bca0SMatthew Dillon ta->RelativeOffset = tgt->bytes_xfered; 44312545bca0SMatthew Dillon ta->DataLength = ccb->csio.dxfer_len; 44322545bca0SMatthew Dillon if (ta->DataLength > tgt->resid) { 44332545bca0SMatthew Dillon ta->DataLength = tgt->resid; 44342545bca0SMatthew Dillon } 44352545bca0SMatthew Dillon 44362545bca0SMatthew Dillon /* 44372545bca0SMatthew Dillon * XXX Should be done after data transfer completes? 44382545bca0SMatthew Dillon */ 44392545bca0SMatthew Dillon tgt->resid -= csio->dxfer_len; 44402545bca0SMatthew Dillon tgt->bytes_xfered += csio->dxfer_len; 44412545bca0SMatthew Dillon 44422545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 44432545bca0SMatthew Dillon ta->TargetAssistFlags |= 44442545bca0SMatthew Dillon TARGET_ASSIST_FLAGS_DATA_DIRECTION; 44452545bca0SMatthew Dillon } 44462545bca0SMatthew Dillon 44472545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 44482545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 44492545bca0SMatthew Dillon csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 44502545bca0SMatthew Dillon ta->TargetAssistFlags |= 44512545bca0SMatthew Dillon TARGET_ASSIST_FLAGS_AUTO_STATUS; 44522545bca0SMatthew Dillon } 44532545bca0SMatthew Dillon #endif 44542545bca0SMatthew Dillon tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 44552545bca0SMatthew Dillon 44562545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 44572545bca0SMatthew Dillon "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 44582545bca0SMatthew Dillon "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 44592545bca0SMatthew Dillon tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 44602545bca0SMatthew Dillon 44612545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 44622545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 44632545bca0SMatthew Dillon int error; 44646d259fc1SSascha Wildner crit_enter(); 44652545bca0SMatthew Dillon error = bus_dmamap_load(mpt->buffer_dmat, 44662545bca0SMatthew Dillon req->dmap, csio->data_ptr, csio->dxfer_len, 44672545bca0SMatthew Dillon cb, req, 0); 44686d259fc1SSascha Wildner crit_exit(); 44692545bca0SMatthew Dillon if (error == EINPROGRESS) { 44702545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 44712545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 44722545bca0SMatthew Dillon } 44732545bca0SMatthew Dillon } else { 44742545bca0SMatthew Dillon /* 44752545bca0SMatthew Dillon * We have been given a pointer to single 44762545bca0SMatthew Dillon * physical buffer. 44772545bca0SMatthew Dillon */ 44782545bca0SMatthew Dillon struct bus_dma_segment seg; 44792545bca0SMatthew Dillon seg.ds_addr = (bus_addr_t) 44802545bca0SMatthew Dillon (vm_offset_t)csio->data_ptr; 44812545bca0SMatthew Dillon seg.ds_len = csio->dxfer_len; 44822545bca0SMatthew Dillon (*cb)(req, &seg, 1, 0); 44832545bca0SMatthew Dillon } 44842545bca0SMatthew Dillon } else { 44852545bca0SMatthew Dillon /* 44862545bca0SMatthew Dillon * We have been given a list of addresses. 44872545bca0SMatthew Dillon * This case could be easily supported but they are not 44882545bca0SMatthew Dillon * currently generated by the CAM subsystem so there 44892545bca0SMatthew Dillon * is no point in wasting the time right now. 44902545bca0SMatthew Dillon */ 44912545bca0SMatthew Dillon struct bus_dma_segment *sgs; 44922545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 44932545bca0SMatthew Dillon (*cb)(req, NULL, 0, EFAULT); 44942545bca0SMatthew Dillon } else { 44952545bca0SMatthew Dillon /* Just use the segments provided */ 44962545bca0SMatthew Dillon sgs = (struct bus_dma_segment *)csio->data_ptr; 44972545bca0SMatthew Dillon (*cb)(req, sgs, csio->sglist_cnt, 0); 44982545bca0SMatthew Dillon } 44992545bca0SMatthew Dillon } 45002545bca0SMatthew Dillon } else { 45012545bca0SMatthew Dillon uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 45022545bca0SMatthew Dillon 45032545bca0SMatthew Dillon /* 45042545bca0SMatthew Dillon * XXX: I don't know why this seems to happen, but 45052545bca0SMatthew Dillon * XXX: completing the CCB seems to make things happy. 45062545bca0SMatthew Dillon * XXX: This seems to happen if the initiator requests 45072545bca0SMatthew Dillon * XXX: enough data that we have to do multiple CTIOs. 45082545bca0SMatthew Dillon */ 45092545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 45102545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 45112545bca0SMatthew Dillon "Meaningless STATUS CCB (%p): flags %x status %x " 45122545bca0SMatthew Dillon "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 45132545bca0SMatthew Dillon ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 45142545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 45152545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 45162545bca0SMatthew Dillon xpt_done(ccb); 45172545bca0SMatthew Dillon return; 45182545bca0SMatthew Dillon } 45192545bca0SMatthew Dillon if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 45202545bca0SMatthew Dillon sp = sense; 45212545bca0SMatthew Dillon memcpy(sp, &csio->sense_data, 45222545bca0SMatthew Dillon min(csio->sense_len, MPT_SENSE_SIZE)); 45232545bca0SMatthew Dillon } 45242545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 45252545bca0SMatthew Dillon } 45262545bca0SMatthew Dillon } 45272545bca0SMatthew Dillon 45282545bca0SMatthew Dillon static void 45292545bca0SMatthew Dillon mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 45302545bca0SMatthew Dillon uint32_t lun, int send, uint8_t *data, size_t length) 45312545bca0SMatthew Dillon { 45322545bca0SMatthew Dillon mpt_tgt_state_t *tgt; 45332545bca0SMatthew Dillon PTR_MSG_TARGET_ASSIST_REQUEST ta; 45342545bca0SMatthew Dillon SGE_SIMPLE32 *se; 45352545bca0SMatthew Dillon uint32_t flags; 45362545bca0SMatthew Dillon uint8_t *dptr; 45372545bca0SMatthew Dillon bus_addr_t pptr; 45382545bca0SMatthew Dillon request_t *req; 45392545bca0SMatthew Dillon 45402545bca0SMatthew Dillon /* 45412545bca0SMatthew Dillon * We enter with resid set to the data load for the command. 45422545bca0SMatthew Dillon */ 45432545bca0SMatthew Dillon tgt = MPT_TGT_STATE(mpt, cmd_req); 45442545bca0SMatthew Dillon if (length == 0 || tgt->resid == 0) { 45452545bca0SMatthew Dillon tgt->resid = 0; 45462545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 45472545bca0SMatthew Dillon return; 45482545bca0SMatthew Dillon } 45492545bca0SMatthew Dillon 45502545bca0SMatthew Dillon if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 45512545bca0SMatthew Dillon mpt_prt(mpt, "out of resources- dropping local response\n"); 45522545bca0SMatthew Dillon return; 45532545bca0SMatthew Dillon } 45542545bca0SMatthew Dillon tgt->is_local = 1; 45552545bca0SMatthew Dillon 45562545bca0SMatthew Dillon 45572545bca0SMatthew Dillon memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 45582545bca0SMatthew Dillon ta = req->req_vbuf; 45592545bca0SMatthew Dillon 45602545bca0SMatthew Dillon if (mpt->is_sas) { 45612545bca0SMatthew Dillon PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 45622545bca0SMatthew Dillon ta->QueueTag = ssp->InitiatorTag; 45632545bca0SMatthew Dillon } else if (mpt->is_spi) { 45642545bca0SMatthew Dillon PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 45652545bca0SMatthew Dillon ta->QueueTag = sp->Tag; 45662545bca0SMatthew Dillon } 45672545bca0SMatthew Dillon ta->Function = MPI_FUNCTION_TARGET_ASSIST; 45682545bca0SMatthew Dillon ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 45692545bca0SMatthew Dillon ta->ReplyWord = htole32(tgt->reply_desc); 45702545bca0SMatthew Dillon if (lun > MPT_MAX_LUNS) { 45712545bca0SMatthew Dillon ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 45722545bca0SMatthew Dillon ta->LUN[1] = lun & 0xff; 45732545bca0SMatthew Dillon } else { 45742545bca0SMatthew Dillon ta->LUN[1] = lun; 45752545bca0SMatthew Dillon } 45762545bca0SMatthew Dillon ta->RelativeOffset = 0; 45772545bca0SMatthew Dillon ta->DataLength = length; 45782545bca0SMatthew Dillon 45792545bca0SMatthew Dillon dptr = req->req_vbuf; 45802545bca0SMatthew Dillon dptr += MPT_RQSL(mpt); 45812545bca0SMatthew Dillon pptr = req->req_pbuf; 45822545bca0SMatthew Dillon pptr += MPT_RQSL(mpt); 45832545bca0SMatthew Dillon memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 45842545bca0SMatthew Dillon 45852545bca0SMatthew Dillon se = (SGE_SIMPLE32 *) &ta->SGL[0]; 45862545bca0SMatthew Dillon memset(se, 0,sizeof (*se)); 45872545bca0SMatthew Dillon 45882545bca0SMatthew Dillon flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 45892545bca0SMatthew Dillon if (send) { 45902545bca0SMatthew Dillon ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 45912545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 45922545bca0SMatthew Dillon } 45932545bca0SMatthew Dillon se->Address = pptr; 45942545bca0SMatthew Dillon MPI_pSGE_SET_LENGTH(se, length); 45952545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 45962545bca0SMatthew Dillon flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 45972545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, flags); 45982545bca0SMatthew Dillon 45992545bca0SMatthew Dillon tgt->ccb = NULL; 46002545bca0SMatthew Dillon tgt->req = req; 46012545bca0SMatthew Dillon tgt->resid -= length; 46022545bca0SMatthew Dillon tgt->bytes_xfered = length; 46032545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 46042545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 46052545bca0SMatthew Dillon #else 46062545bca0SMatthew Dillon tgt->state = TGT_STATE_MOVING_DATA; 46072545bca0SMatthew Dillon #endif 46082545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 46092545bca0SMatthew Dillon } 46102545bca0SMatthew Dillon 46112545bca0SMatthew Dillon /* 46122545bca0SMatthew Dillon * Abort queued up CCBs 46132545bca0SMatthew Dillon */ 46142545bca0SMatthew Dillon static cam_status 46152545bca0SMatthew Dillon mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 46162545bca0SMatthew Dillon { 46172545bca0SMatthew Dillon struct mpt_hdr_stailq *lp; 46182545bca0SMatthew Dillon struct ccb_hdr *srch; 46192545bca0SMatthew Dillon int found = 0; 46202545bca0SMatthew Dillon union ccb *accb = ccb->cab.abort_ccb; 46212545bca0SMatthew Dillon tgt_resource_t *trtp; 46222545bca0SMatthew Dillon 46232545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 46242545bca0SMatthew Dillon 46252545bca0SMatthew Dillon if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 46262545bca0SMatthew Dillon trtp = &mpt->trt_wildcard; 46272545bca0SMatthew Dillon } else { 46282545bca0SMatthew Dillon trtp = &mpt->trt[ccb->ccb_h.target_lun]; 46292545bca0SMatthew Dillon } 46302545bca0SMatthew Dillon 46312545bca0SMatthew Dillon if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 46322545bca0SMatthew Dillon lp = &trtp->atios; 46332545bca0SMatthew Dillon } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 46342545bca0SMatthew Dillon lp = &trtp->inots; 46352545bca0SMatthew Dillon } else { 46362545bca0SMatthew Dillon return (CAM_REQ_INVALID); 46372545bca0SMatthew Dillon } 46382545bca0SMatthew Dillon 46392545bca0SMatthew Dillon STAILQ_FOREACH(srch, lp, sim_links.stqe) { 46402545bca0SMatthew Dillon if (srch == &accb->ccb_h) { 46412545bca0SMatthew Dillon found = 1; 46422545bca0SMatthew Dillon STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 46432545bca0SMatthew Dillon break; 46442545bca0SMatthew Dillon } 46452545bca0SMatthew Dillon } 46462545bca0SMatthew Dillon if (found) { 46472545bca0SMatthew Dillon accb->ccb_h.status = CAM_REQ_ABORTED; 46482545bca0SMatthew Dillon xpt_done(accb); 46492545bca0SMatthew Dillon return (CAM_REQ_CMP); 46502545bca0SMatthew Dillon } 46512545bca0SMatthew Dillon mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 46522545bca0SMatthew Dillon return (CAM_PATH_INVALID); 46532545bca0SMatthew Dillon } 46542545bca0SMatthew Dillon 46552545bca0SMatthew Dillon /* 46562545bca0SMatthew Dillon * Ask the MPT to abort the current target command 46572545bca0SMatthew Dillon */ 46582545bca0SMatthew Dillon static int 46592545bca0SMatthew Dillon mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 46602545bca0SMatthew Dillon { 46612545bca0SMatthew Dillon int error; 46622545bca0SMatthew Dillon request_t *req; 46632545bca0SMatthew Dillon PTR_MSG_TARGET_MODE_ABORT abtp; 46642545bca0SMatthew Dillon 46652545bca0SMatthew Dillon req = mpt_get_request(mpt, FALSE); 46662545bca0SMatthew Dillon if (req == NULL) { 46672545bca0SMatthew Dillon return (-1); 46682545bca0SMatthew Dillon } 46692545bca0SMatthew Dillon abtp = req->req_vbuf; 46702545bca0SMatthew Dillon memset(abtp, 0, sizeof (*abtp)); 46712545bca0SMatthew Dillon 46722545bca0SMatthew Dillon abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 46732545bca0SMatthew Dillon abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 46742545bca0SMatthew Dillon abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 46752545bca0SMatthew Dillon abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 46762545bca0SMatthew Dillon error = 0; 46772545bca0SMatthew Dillon if (mpt->is_fc || mpt->is_sas) { 46782545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 46792545bca0SMatthew Dillon } else { 46802545bca0SMatthew Dillon error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 46812545bca0SMatthew Dillon } 46822545bca0SMatthew Dillon return (error); 46832545bca0SMatthew Dillon } 46842545bca0SMatthew Dillon 46852545bca0SMatthew Dillon /* 46862545bca0SMatthew Dillon * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 46872545bca0SMatthew Dillon * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 46882545bca0SMatthew Dillon * FC929 to set bogus FC_RSP fields (nonzero residuals 46892545bca0SMatthew Dillon * but w/o RESID fields set). This causes QLogic initiators 46902545bca0SMatthew Dillon * to think maybe that a frame was lost. 46912545bca0SMatthew Dillon * 46922545bca0SMatthew Dillon * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 46932545bca0SMatthew Dillon * we use allocated requests to do TARGET_ASSIST and we 46942545bca0SMatthew Dillon * need to know when to release them. 46952545bca0SMatthew Dillon */ 46962545bca0SMatthew Dillon 46972545bca0SMatthew Dillon static void 46982545bca0SMatthew Dillon mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 46992545bca0SMatthew Dillon uint8_t status, uint8_t const *sense_data) 47002545bca0SMatthew Dillon { 47012545bca0SMatthew Dillon uint8_t *cmd_vbuf; 47022545bca0SMatthew Dillon mpt_tgt_state_t *tgt; 47032545bca0SMatthew Dillon PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 47042545bca0SMatthew Dillon request_t *req; 47052545bca0SMatthew Dillon bus_addr_t paddr; 47062545bca0SMatthew Dillon int resplen = 0; 47072545bca0SMatthew Dillon uint32_t fl; 47082545bca0SMatthew Dillon 47092545bca0SMatthew Dillon cmd_vbuf = cmd_req->req_vbuf; 47102545bca0SMatthew Dillon cmd_vbuf += MPT_RQSL(mpt); 47112545bca0SMatthew Dillon tgt = MPT_TGT_STATE(mpt, cmd_req); 47122545bca0SMatthew Dillon 47132545bca0SMatthew Dillon if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 47142545bca0SMatthew Dillon if (mpt->outofbeer == 0) { 47152545bca0SMatthew Dillon mpt->outofbeer = 1; 47162545bca0SMatthew Dillon xpt_freeze_simq(mpt->sim, 1); 47172545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 47182545bca0SMatthew Dillon } 47192545bca0SMatthew Dillon if (ccb) { 47202545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 47212545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 47222545bca0SMatthew Dillon xpt_done(ccb); 47232545bca0SMatthew Dillon } else { 47242545bca0SMatthew Dillon mpt_prt(mpt, 47252545bca0SMatthew Dillon "could not allocate status request- dropping\n"); 47262545bca0SMatthew Dillon } 47272545bca0SMatthew Dillon return; 47282545bca0SMatthew Dillon } 47292545bca0SMatthew Dillon req->ccb = ccb; 47302545bca0SMatthew Dillon if (ccb) { 47312545bca0SMatthew Dillon ccb->ccb_h.ccb_mpt_ptr = mpt; 47322545bca0SMatthew Dillon ccb->ccb_h.ccb_req_ptr = req; 47332545bca0SMatthew Dillon } 47342545bca0SMatthew Dillon 47352545bca0SMatthew Dillon /* 47362545bca0SMatthew Dillon * Record the currently active ccb, if any, and the 47372545bca0SMatthew Dillon * request for it in our target state area. 47382545bca0SMatthew Dillon */ 47392545bca0SMatthew Dillon tgt->ccb = ccb; 47402545bca0SMatthew Dillon tgt->req = req; 47412545bca0SMatthew Dillon tgt->state = TGT_STATE_SENDING_STATUS; 47422545bca0SMatthew Dillon 47432545bca0SMatthew Dillon tp = req->req_vbuf; 47442545bca0SMatthew Dillon paddr = req->req_pbuf; 47452545bca0SMatthew Dillon paddr += MPT_RQSL(mpt); 47462545bca0SMatthew Dillon 47472545bca0SMatthew Dillon memset(tp, 0, sizeof (*tp)); 47482545bca0SMatthew Dillon tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 47492545bca0SMatthew Dillon if (mpt->is_fc) { 47502545bca0SMatthew Dillon PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 47512545bca0SMatthew Dillon (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 47522545bca0SMatthew Dillon uint8_t *sts_vbuf; 47532545bca0SMatthew Dillon uint32_t *rsp; 47542545bca0SMatthew Dillon 47552545bca0SMatthew Dillon sts_vbuf = req->req_vbuf; 47562545bca0SMatthew Dillon sts_vbuf += MPT_RQSL(mpt); 47572545bca0SMatthew Dillon rsp = (uint32_t *) sts_vbuf; 47582545bca0SMatthew Dillon memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 47592545bca0SMatthew Dillon 47602545bca0SMatthew Dillon /* 47612545bca0SMatthew Dillon * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 47622545bca0SMatthew Dillon * It has to be big-endian in memory and is organized 47632545bca0SMatthew Dillon * in 32 bit words, which are much easier to deal with 47642545bca0SMatthew Dillon * as words which are swizzled as needed. 47652545bca0SMatthew Dillon * 47662545bca0SMatthew Dillon * All we're filling here is the FC_RSP payload. 47672545bca0SMatthew Dillon * We may just have the chip synthesize it if 47682545bca0SMatthew Dillon * we have no residual and an OK status. 47692545bca0SMatthew Dillon * 47702545bca0SMatthew Dillon */ 47712545bca0SMatthew Dillon memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 47722545bca0SMatthew Dillon 47732545bca0SMatthew Dillon rsp[2] = status; 47742545bca0SMatthew Dillon if (tgt->resid) { 47752545bca0SMatthew Dillon rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 47762545bca0SMatthew Dillon rsp[3] = htobe32(tgt->resid); 47772545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 47782545bca0SMatthew Dillon resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 47792545bca0SMatthew Dillon #endif 47802545bca0SMatthew Dillon } 47812545bca0SMatthew Dillon if (status == SCSI_STATUS_CHECK_COND) { 47822545bca0SMatthew Dillon int i; 47832545bca0SMatthew Dillon 47842545bca0SMatthew Dillon rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 47852545bca0SMatthew Dillon rsp[4] = htobe32(MPT_SENSE_SIZE); 47862545bca0SMatthew Dillon if (sense_data) { 47872545bca0SMatthew Dillon memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 47882545bca0SMatthew Dillon } else { 47892545bca0SMatthew Dillon mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 47902545bca0SMatthew Dillon "TION but no sense data?\n"); 47912545bca0SMatthew Dillon memset(&rsp, 0, MPT_SENSE_SIZE); 47922545bca0SMatthew Dillon } 47932545bca0SMatthew Dillon for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 47942545bca0SMatthew Dillon rsp[i] = htobe32(rsp[i]); 47952545bca0SMatthew Dillon } 47962545bca0SMatthew Dillon #ifdef WE_TRUST_AUTO_GOOD_STATUS 47972545bca0SMatthew Dillon resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 47982545bca0SMatthew Dillon #endif 47992545bca0SMatthew Dillon } 48002545bca0SMatthew Dillon #ifndef WE_TRUST_AUTO_GOOD_STATUS 48012545bca0SMatthew Dillon resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 48022545bca0SMatthew Dillon #endif 48032545bca0SMatthew Dillon rsp[2] = htobe32(rsp[2]); 48042545bca0SMatthew Dillon } else if (mpt->is_sas) { 48052545bca0SMatthew Dillon PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 48062545bca0SMatthew Dillon (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 48072545bca0SMatthew Dillon memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 48082545bca0SMatthew Dillon } else { 48092545bca0SMatthew Dillon PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 48102545bca0SMatthew Dillon (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 48112545bca0SMatthew Dillon tp->StatusCode = status; 48122545bca0SMatthew Dillon tp->QueueTag = htole16(sp->Tag); 48132545bca0SMatthew Dillon memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 48142545bca0SMatthew Dillon } 48152545bca0SMatthew Dillon 48162545bca0SMatthew Dillon tp->ReplyWord = htole32(tgt->reply_desc); 48172545bca0SMatthew Dillon tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 48182545bca0SMatthew Dillon 48192545bca0SMatthew Dillon #ifdef WE_CAN_USE_AUTO_REPOST 48202545bca0SMatthew Dillon tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 48212545bca0SMatthew Dillon #endif 48222545bca0SMatthew Dillon if (status == SCSI_STATUS_OK && resplen == 0) { 48232545bca0SMatthew Dillon tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 48242545bca0SMatthew Dillon } else { 48252545bca0SMatthew Dillon tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 48262545bca0SMatthew Dillon fl = 48272545bca0SMatthew Dillon MPI_SGE_FLAGS_HOST_TO_IOC | 48282545bca0SMatthew Dillon MPI_SGE_FLAGS_SIMPLE_ELEMENT | 48292545bca0SMatthew Dillon MPI_SGE_FLAGS_LAST_ELEMENT | 48302545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_LIST | 48312545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_BUFFER; 48322545bca0SMatthew Dillon fl <<= MPI_SGE_FLAGS_SHIFT; 48332545bca0SMatthew Dillon fl |= resplen; 48342545bca0SMatthew Dillon tp->StatusDataSGE.FlagsLength = htole32(fl); 48352545bca0SMatthew Dillon } 48362545bca0SMatthew Dillon 48372545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 48382545bca0SMatthew Dillon "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 48392545bca0SMatthew Dillon ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 48402545bca0SMatthew Dillon req->serno, tgt->resid); 48412545bca0SMatthew Dillon if (ccb) { 48422545bca0SMatthew Dillon ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 48432545bca0SMatthew Dillon mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 48442545bca0SMatthew Dillon } 48452545bca0SMatthew Dillon mpt_send_cmd(mpt, req); 48462545bca0SMatthew Dillon } 48472545bca0SMatthew Dillon 48482545bca0SMatthew Dillon static void 48492545bca0SMatthew Dillon mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 48502545bca0SMatthew Dillon tgt_resource_t *trtp, int init_id) 48512545bca0SMatthew Dillon { 48522545bca0SMatthew Dillon struct ccb_immed_notify *inot; 48532545bca0SMatthew Dillon mpt_tgt_state_t *tgt; 48542545bca0SMatthew Dillon 48552545bca0SMatthew Dillon tgt = MPT_TGT_STATE(mpt, req); 48562545bca0SMatthew Dillon inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 48572545bca0SMatthew Dillon if (inot == NULL) { 48582545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 48592545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 48602545bca0SMatthew Dillon return; 48612545bca0SMatthew Dillon } 48622545bca0SMatthew Dillon STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 48632545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG1, 48642545bca0SMatthew Dillon "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 48652545bca0SMatthew Dillon 48662545bca0SMatthew Dillon memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 48672545bca0SMatthew Dillon inot->sense_len = 0; 48682545bca0SMatthew Dillon memset(inot->message_args, 0, sizeof (inot->message_args)); 48692545bca0SMatthew Dillon inot->initiator_id = init_id; /* XXX */ 48702545bca0SMatthew Dillon 48712545bca0SMatthew Dillon /* 48722545bca0SMatthew Dillon * This is a somewhat grotesque attempt to map from task management 48732545bca0SMatthew Dillon * to old style SCSI messages. God help us all. 48742545bca0SMatthew Dillon */ 48752545bca0SMatthew Dillon switch (fc) { 48762545bca0SMatthew Dillon case MPT_ABORT_TASK_SET: 48772545bca0SMatthew Dillon inot->message_args[0] = MSG_ABORT_TAG; 48782545bca0SMatthew Dillon break; 48792545bca0SMatthew Dillon case MPT_CLEAR_TASK_SET: 48802545bca0SMatthew Dillon inot->message_args[0] = MSG_CLEAR_TASK_SET; 48812545bca0SMatthew Dillon break; 48822545bca0SMatthew Dillon case MPT_TARGET_RESET: 48832545bca0SMatthew Dillon inot->message_args[0] = MSG_TARGET_RESET; 48842545bca0SMatthew Dillon break; 48852545bca0SMatthew Dillon case MPT_CLEAR_ACA: 48862545bca0SMatthew Dillon inot->message_args[0] = MSG_CLEAR_ACA; 48872545bca0SMatthew Dillon break; 48882545bca0SMatthew Dillon case MPT_TERMINATE_TASK: 48892545bca0SMatthew Dillon inot->message_args[0] = MSG_ABORT_TAG; 48902545bca0SMatthew Dillon break; 48912545bca0SMatthew Dillon default: 48922545bca0SMatthew Dillon inot->message_args[0] = MSG_NOOP; 48932545bca0SMatthew Dillon break; 48942545bca0SMatthew Dillon } 48952545bca0SMatthew Dillon tgt->ccb = (union ccb *) inot; 48962545bca0SMatthew Dillon inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 48972545bca0SMatthew Dillon xpt_done((union ccb *)inot); 48982545bca0SMatthew Dillon } 48992545bca0SMatthew Dillon 49002545bca0SMatthew Dillon static void 49012545bca0SMatthew Dillon mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 49022545bca0SMatthew Dillon { 49032545bca0SMatthew Dillon static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 49042545bca0SMatthew Dillon 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 49052545bca0SMatthew Dillon 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 49062545bca0SMatthew Dillon 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 49072545bca0SMatthew Dillon 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 49082545bca0SMatthew Dillon '0', '0', '0', '1' 49092545bca0SMatthew Dillon }; 49102545bca0SMatthew Dillon struct ccb_accept_tio *atiop; 49112545bca0SMatthew Dillon lun_id_t lun; 49122545bca0SMatthew Dillon int tag_action = 0; 49132545bca0SMatthew Dillon mpt_tgt_state_t *tgt; 49142545bca0SMatthew Dillon tgt_resource_t *trtp = NULL; 49152545bca0SMatthew Dillon U8 *lunptr; 49162545bca0SMatthew Dillon U8 *vbuf; 49172545bca0SMatthew Dillon U16 itag; 49182545bca0SMatthew Dillon U16 ioindex; 49192545bca0SMatthew Dillon mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 49202545bca0SMatthew Dillon uint8_t *cdbp; 49212545bca0SMatthew Dillon 49222545bca0SMatthew Dillon /* 49232545bca0SMatthew Dillon * Stash info for the current command where we can get at it later. 49242545bca0SMatthew Dillon */ 49252545bca0SMatthew Dillon vbuf = req->req_vbuf; 49262545bca0SMatthew Dillon vbuf += MPT_RQSL(mpt); 49272545bca0SMatthew Dillon 49282545bca0SMatthew Dillon /* 49292545bca0SMatthew Dillon * Get our state pointer set up. 49302545bca0SMatthew Dillon */ 49312545bca0SMatthew Dillon tgt = MPT_TGT_STATE(mpt, req); 49322545bca0SMatthew Dillon if (tgt->state != TGT_STATE_LOADED) { 49332545bca0SMatthew Dillon mpt_tgt_dump_req_state(mpt, req); 49342545bca0SMatthew Dillon panic("bad target state in mpt_scsi_tgt_atio"); 49352545bca0SMatthew Dillon } 49362545bca0SMatthew Dillon memset(tgt, 0, sizeof (mpt_tgt_state_t)); 49372545bca0SMatthew Dillon tgt->state = TGT_STATE_IN_CAM; 49382545bca0SMatthew Dillon tgt->reply_desc = reply_desc; 49392545bca0SMatthew Dillon ioindex = GET_IO_INDEX(reply_desc); 49402545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 49412545bca0SMatthew Dillon mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 49422545bca0SMatthew Dillon max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 49432545bca0SMatthew Dillon max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 49442545bca0SMatthew Dillon sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 49452545bca0SMatthew Dillon } 49462545bca0SMatthew Dillon if (mpt->is_fc) { 49472545bca0SMatthew Dillon PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 49482545bca0SMatthew Dillon fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 49492545bca0SMatthew Dillon if (fc->FcpCntl[2]) { 49502545bca0SMatthew Dillon /* 49512545bca0SMatthew Dillon * Task Management Request 49522545bca0SMatthew Dillon */ 49532545bca0SMatthew Dillon switch (fc->FcpCntl[2]) { 49542545bca0SMatthew Dillon case 0x2: 49552545bca0SMatthew Dillon fct = MPT_ABORT_TASK_SET; 49562545bca0SMatthew Dillon break; 49572545bca0SMatthew Dillon case 0x4: 49582545bca0SMatthew Dillon fct = MPT_CLEAR_TASK_SET; 49592545bca0SMatthew Dillon break; 49602545bca0SMatthew Dillon case 0x20: 49612545bca0SMatthew Dillon fct = MPT_TARGET_RESET; 49622545bca0SMatthew Dillon break; 49632545bca0SMatthew Dillon case 0x40: 49642545bca0SMatthew Dillon fct = MPT_CLEAR_ACA; 49652545bca0SMatthew Dillon break; 49662545bca0SMatthew Dillon case 0x80: 49672545bca0SMatthew Dillon fct = MPT_TERMINATE_TASK; 49682545bca0SMatthew Dillon break; 49692545bca0SMatthew Dillon default: 49702545bca0SMatthew Dillon mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 49712545bca0SMatthew Dillon fc->FcpCntl[2]); 49722545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, 0, req, 49732545bca0SMatthew Dillon SCSI_STATUS_OK, 0); 49742545bca0SMatthew Dillon return; 49752545bca0SMatthew Dillon } 49762545bca0SMatthew Dillon } else { 49772545bca0SMatthew Dillon switch (fc->FcpCntl[1]) { 49782545bca0SMatthew Dillon case 0: 49792545bca0SMatthew Dillon tag_action = MSG_SIMPLE_Q_TAG; 49802545bca0SMatthew Dillon break; 49812545bca0SMatthew Dillon case 1: 49822545bca0SMatthew Dillon tag_action = MSG_HEAD_OF_Q_TAG; 49832545bca0SMatthew Dillon break; 49842545bca0SMatthew Dillon case 2: 49852545bca0SMatthew Dillon tag_action = MSG_ORDERED_Q_TAG; 49862545bca0SMatthew Dillon break; 49872545bca0SMatthew Dillon default: 49882545bca0SMatthew Dillon /* 49892545bca0SMatthew Dillon * Bah. Ignore Untagged Queing and ACA 49902545bca0SMatthew Dillon */ 49912545bca0SMatthew Dillon tag_action = MSG_SIMPLE_Q_TAG; 49922545bca0SMatthew Dillon break; 49932545bca0SMatthew Dillon } 49942545bca0SMatthew Dillon } 49952545bca0SMatthew Dillon tgt->resid = be32toh(fc->FcpDl); 49962545bca0SMatthew Dillon cdbp = fc->FcpCdb; 49972545bca0SMatthew Dillon lunptr = fc->FcpLun; 49982545bca0SMatthew Dillon itag = be16toh(fc->OptionalOxid); 49992545bca0SMatthew Dillon } else if (mpt->is_sas) { 50002545bca0SMatthew Dillon PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 50012545bca0SMatthew Dillon ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 50022545bca0SMatthew Dillon cdbp = ssp->CDB; 50032545bca0SMatthew Dillon lunptr = ssp->LogicalUnitNumber; 50042545bca0SMatthew Dillon itag = ssp->InitiatorTag; 50052545bca0SMatthew Dillon } else { 50062545bca0SMatthew Dillon PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 50072545bca0SMatthew Dillon sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 50082545bca0SMatthew Dillon cdbp = sp->CDB; 50092545bca0SMatthew Dillon lunptr = sp->LogicalUnitNumber; 50102545bca0SMatthew Dillon itag = sp->Tag; 50112545bca0SMatthew Dillon } 50122545bca0SMatthew Dillon 50132545bca0SMatthew Dillon /* 50142545bca0SMatthew Dillon * Generate a simple lun 50152545bca0SMatthew Dillon */ 50162545bca0SMatthew Dillon switch (lunptr[0] & 0xc0) { 50172545bca0SMatthew Dillon case 0x40: 50182545bca0SMatthew Dillon lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 50192545bca0SMatthew Dillon break; 50202545bca0SMatthew Dillon case 0: 50212545bca0SMatthew Dillon lun = lunptr[1]; 50222545bca0SMatthew Dillon break; 50232545bca0SMatthew Dillon default: 50242545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 50252545bca0SMatthew Dillon lun = 0xffff; 50262545bca0SMatthew Dillon break; 50272545bca0SMatthew Dillon } 50282545bca0SMatthew Dillon 50292545bca0SMatthew Dillon /* 50302545bca0SMatthew Dillon * Deal with non-enabled or bad luns here. 50312545bca0SMatthew Dillon */ 50322545bca0SMatthew Dillon if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 50332545bca0SMatthew Dillon mpt->trt[lun].enabled == 0) { 50342545bca0SMatthew Dillon if (mpt->twildcard) { 50352545bca0SMatthew Dillon trtp = &mpt->trt_wildcard; 50362545bca0SMatthew Dillon } else if (fct == MPT_NIL_TMT_VALUE) { 50372545bca0SMatthew Dillon /* 50382545bca0SMatthew Dillon * In this case, we haven't got an upstream listener 50392545bca0SMatthew Dillon * for either a specific lun or wildcard luns. We 50402545bca0SMatthew Dillon * have to make some sensible response. For regular 50412545bca0SMatthew Dillon * inquiry, just return some NOT HERE inquiry data. 50422545bca0SMatthew Dillon * For VPD inquiry, report illegal field in cdb. 50432545bca0SMatthew Dillon * For REQUEST SENSE, just return NO SENSE data. 50442545bca0SMatthew Dillon * REPORT LUNS gets illegal command. 50452545bca0SMatthew Dillon * All other commands get 'no such device'. 50462545bca0SMatthew Dillon */ 50472545bca0SMatthew Dillon uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 50482545bca0SMatthew Dillon size_t len; 50492545bca0SMatthew Dillon 50502545bca0SMatthew Dillon memset(buf, 0, MPT_SENSE_SIZE); 50512545bca0SMatthew Dillon cond = SCSI_STATUS_CHECK_COND; 50522545bca0SMatthew Dillon buf[0] = 0xf0; 50532545bca0SMatthew Dillon buf[2] = 0x5; 50542545bca0SMatthew Dillon buf[7] = 0x8; 50552545bca0SMatthew Dillon sp = buf; 50562545bca0SMatthew Dillon tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 50572545bca0SMatthew Dillon 50582545bca0SMatthew Dillon switch (cdbp[0]) { 50592545bca0SMatthew Dillon case INQUIRY: 50602545bca0SMatthew Dillon { 50612545bca0SMatthew Dillon if (cdbp[1] != 0) { 50622545bca0SMatthew Dillon buf[12] = 0x26; 50632545bca0SMatthew Dillon buf[13] = 0x01; 50642545bca0SMatthew Dillon break; 50652545bca0SMatthew Dillon } 50662545bca0SMatthew Dillon len = min(tgt->resid, cdbp[4]); 50672545bca0SMatthew Dillon len = min(len, sizeof (null_iqd)); 50682545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 50692545bca0SMatthew Dillon "local inquiry %ld bytes\n", (long) len); 50702545bca0SMatthew Dillon mpt_scsi_tgt_local(mpt, req, lun, 1, 50712545bca0SMatthew Dillon null_iqd, len); 50722545bca0SMatthew Dillon return; 50732545bca0SMatthew Dillon } 50742545bca0SMatthew Dillon case REQUEST_SENSE: 50752545bca0SMatthew Dillon { 50762545bca0SMatthew Dillon buf[2] = 0x0; 50772545bca0SMatthew Dillon len = min(tgt->resid, cdbp[4]); 50782545bca0SMatthew Dillon len = min(len, sizeof (buf)); 50792545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 50802545bca0SMatthew Dillon "local reqsense %ld bytes\n", (long) len); 50812545bca0SMatthew Dillon mpt_scsi_tgt_local(mpt, req, lun, 1, 50822545bca0SMatthew Dillon buf, len); 50832545bca0SMatthew Dillon return; 50842545bca0SMatthew Dillon } 50852545bca0SMatthew Dillon case REPORT_LUNS: 50862545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 50872545bca0SMatthew Dillon buf[12] = 0x26; 50882545bca0SMatthew Dillon return; 50892545bca0SMatthew Dillon default: 50902545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 50912545bca0SMatthew Dillon "CMD 0x%x to unmanaged lun %u\n", 50922545bca0SMatthew Dillon cdbp[0], lun); 50932545bca0SMatthew Dillon buf[12] = 0x25; 50942545bca0SMatthew Dillon break; 50952545bca0SMatthew Dillon } 50962545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 50972545bca0SMatthew Dillon return; 50982545bca0SMatthew Dillon } 50992545bca0SMatthew Dillon /* otherwise, leave trtp NULL */ 51002545bca0SMatthew Dillon } else { 51012545bca0SMatthew Dillon trtp = &mpt->trt[lun]; 51022545bca0SMatthew Dillon } 51032545bca0SMatthew Dillon 51042545bca0SMatthew Dillon /* 51052545bca0SMatthew Dillon * Deal with any task management 51062545bca0SMatthew Dillon */ 51072545bca0SMatthew Dillon if (fct != MPT_NIL_TMT_VALUE) { 51082545bca0SMatthew Dillon if (trtp == NULL) { 51092545bca0SMatthew Dillon mpt_prt(mpt, "task mgmt function %x but no listener\n", 51102545bca0SMatthew Dillon fct); 51112545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, 0, req, 51122545bca0SMatthew Dillon SCSI_STATUS_OK, 0); 51132545bca0SMatthew Dillon } else { 51142545bca0SMatthew Dillon mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 51152545bca0SMatthew Dillon GET_INITIATOR_INDEX(reply_desc)); 51162545bca0SMatthew Dillon } 51172545bca0SMatthew Dillon return; 51182545bca0SMatthew Dillon } 51192545bca0SMatthew Dillon 51202545bca0SMatthew Dillon 51212545bca0SMatthew Dillon atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 51222545bca0SMatthew Dillon if (atiop == NULL) { 51232545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_WARN, 51242545bca0SMatthew Dillon "no ATIOs for lun %u- sending back %s\n", lun, 51252545bca0SMatthew Dillon mpt->tenabled? "QUEUE FULL" : "BUSY"); 51262545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, NULL, req, 51272545bca0SMatthew Dillon mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 51282545bca0SMatthew Dillon NULL); 51292545bca0SMatthew Dillon return; 51302545bca0SMatthew Dillon } 51312545bca0SMatthew Dillon STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 51322545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG1, 51332545bca0SMatthew Dillon "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 51342545bca0SMatthew Dillon atiop->ccb_h.ccb_mpt_ptr = mpt; 51352545bca0SMatthew Dillon atiop->ccb_h.status = CAM_CDB_RECVD; 51362545bca0SMatthew Dillon atiop->ccb_h.target_lun = lun; 51372545bca0SMatthew Dillon atiop->sense_len = 0; 51382545bca0SMatthew Dillon atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 51392545bca0SMatthew Dillon atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 51402545bca0SMatthew Dillon memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 51412545bca0SMatthew Dillon 51422545bca0SMatthew Dillon /* 51432545bca0SMatthew Dillon * The tag we construct here allows us to find the 51442545bca0SMatthew Dillon * original request that the command came in with. 51452545bca0SMatthew Dillon * 51462545bca0SMatthew Dillon * This way we don't have to depend on anything but the 51472545bca0SMatthew Dillon * tag to find things when CCBs show back up from CAM. 51482545bca0SMatthew Dillon */ 51492545bca0SMatthew Dillon atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 51502545bca0SMatthew Dillon tgt->tag_id = atiop->tag_id; 51512545bca0SMatthew Dillon if (tag_action) { 51522545bca0SMatthew Dillon atiop->tag_action = tag_action; 51532545bca0SMatthew Dillon atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 51542545bca0SMatthew Dillon } 51552545bca0SMatthew Dillon if (mpt->verbose >= MPT_PRT_DEBUG) { 51562545bca0SMatthew Dillon int i; 51572545bca0SMatthew Dillon mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 51582545bca0SMatthew Dillon atiop->ccb_h.target_lun); 51592545bca0SMatthew Dillon for (i = 0; i < atiop->cdb_len; i++) { 51602545bca0SMatthew Dillon mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 51612545bca0SMatthew Dillon (i == (atiop->cdb_len - 1))? '>' : ' '); 51622545bca0SMatthew Dillon } 51632545bca0SMatthew Dillon mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 51642545bca0SMatthew Dillon itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 51652545bca0SMatthew Dillon } 51662545bca0SMatthew Dillon 51672545bca0SMatthew Dillon xpt_done((union ccb *)atiop); 51682545bca0SMatthew Dillon } 51692545bca0SMatthew Dillon 51702545bca0SMatthew Dillon static void 51712545bca0SMatthew Dillon mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 51722545bca0SMatthew Dillon { 51732545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 51742545bca0SMatthew Dillon 51752545bca0SMatthew Dillon mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 51762545bca0SMatthew Dillon "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 51772545bca0SMatthew Dillon tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 51782545bca0SMatthew Dillon tgt->tag_id, tgt->state); 51792545bca0SMatthew Dillon } 51802545bca0SMatthew Dillon 51812545bca0SMatthew Dillon static void 51822545bca0SMatthew Dillon mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 51832545bca0SMatthew Dillon { 51844c42baf4SSascha Wildner 51852545bca0SMatthew Dillon mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 51862545bca0SMatthew Dillon req->index, req->index, req->state); 51872545bca0SMatthew Dillon mpt_tgt_dump_tgt_state(mpt, req); 51882545bca0SMatthew Dillon } 51892545bca0SMatthew Dillon 51902545bca0SMatthew Dillon static int 51912545bca0SMatthew Dillon mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 51922545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 51932545bca0SMatthew Dillon { 51942545bca0SMatthew Dillon int dbg; 51952545bca0SMatthew Dillon union ccb *ccb; 51962545bca0SMatthew Dillon U16 status; 51972545bca0SMatthew Dillon 51982545bca0SMatthew Dillon if (reply_frame == NULL) { 51992545bca0SMatthew Dillon /* 52002545bca0SMatthew Dillon * Figure out what the state of the command is. 52012545bca0SMatthew Dillon */ 52022545bca0SMatthew Dillon mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 52032545bca0SMatthew Dillon 52042545bca0SMatthew Dillon #ifdef INVARIANTS 52052545bca0SMatthew Dillon mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 52062545bca0SMatthew Dillon if (tgt->req) { 52072545bca0SMatthew Dillon mpt_req_not_spcl(mpt, tgt->req, 52082545bca0SMatthew Dillon "turbo scsi_tgt_reply associated req", __LINE__); 52092545bca0SMatthew Dillon } 52102545bca0SMatthew Dillon #endif 52112545bca0SMatthew Dillon switch(tgt->state) { 52122545bca0SMatthew Dillon case TGT_STATE_LOADED: 52132545bca0SMatthew Dillon /* 52142545bca0SMatthew Dillon * This is a new command starting. 52152545bca0SMatthew Dillon */ 52162545bca0SMatthew Dillon mpt_scsi_tgt_atio(mpt, req, reply_desc); 52172545bca0SMatthew Dillon break; 52182545bca0SMatthew Dillon case TGT_STATE_MOVING_DATA: 52192545bca0SMatthew Dillon { 52202545bca0SMatthew Dillon uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 52212545bca0SMatthew Dillon 52222545bca0SMatthew Dillon ccb = tgt->ccb; 52232545bca0SMatthew Dillon if (tgt->req == NULL) { 52242545bca0SMatthew Dillon panic("mpt: turbo target reply with null " 52252545bca0SMatthew Dillon "associated request moving data"); 52262545bca0SMatthew Dillon /* NOTREACHED */ 52272545bca0SMatthew Dillon } 52282545bca0SMatthew Dillon if (ccb == NULL) { 52292545bca0SMatthew Dillon if (tgt->is_local == 0) { 52302545bca0SMatthew Dillon panic("mpt: turbo target reply with " 52312545bca0SMatthew Dillon "null associated ccb moving data"); 52322545bca0SMatthew Dillon /* NOTREACHED */ 52332545bca0SMatthew Dillon } 52342545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 52352545bca0SMatthew Dillon "TARGET_ASSIST local done\n"); 52362545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, 52372545bca0SMatthew Dillon tgt->req, links); 52382545bca0SMatthew Dillon mpt_free_request(mpt, tgt->req); 52392545bca0SMatthew Dillon tgt->req = NULL; 52402545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, NULL, req, 52412545bca0SMatthew Dillon 0, NULL); 52422545bca0SMatthew Dillon return (TRUE); 52432545bca0SMatthew Dillon } 52442545bca0SMatthew Dillon tgt->ccb = NULL; 52452545bca0SMatthew Dillon tgt->nxfers++; 52462545bca0SMatthew Dillon mpt_req_untimeout(req, mpt_timeout, ccb); 52472545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 52482545bca0SMatthew Dillon "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 52492545bca0SMatthew Dillon ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 52502545bca0SMatthew Dillon /* 52512545bca0SMatthew Dillon * Free the Target Assist Request 52522545bca0SMatthew Dillon */ 52532545bca0SMatthew Dillon KASSERT(tgt->req->ccb == ccb, 52542545bca0SMatthew Dillon ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 52552545bca0SMatthew Dillon tgt->req->serno, tgt->req->ccb)); 52562545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, 52572545bca0SMatthew Dillon tgt->req, links); 52582545bca0SMatthew Dillon mpt_free_request(mpt, tgt->req); 52592545bca0SMatthew Dillon tgt->req = NULL; 52602545bca0SMatthew Dillon 52612545bca0SMatthew Dillon /* 52622545bca0SMatthew Dillon * Do we need to send status now? That is, are 52632545bca0SMatthew Dillon * we done with all our data transfers? 52642545bca0SMatthew Dillon */ 52652545bca0SMatthew Dillon if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 52662545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 52672545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 52682545bca0SMatthew Dillon KASSERT(ccb->ccb_h.status, 52694c42baf4SSascha Wildner ("zero ccb sts at %d", __LINE__)); 52702545bca0SMatthew Dillon tgt->state = TGT_STATE_IN_CAM; 52712545bca0SMatthew Dillon if (mpt->outofbeer) { 52722545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 52732545bca0SMatthew Dillon mpt->outofbeer = 0; 52742545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 52752545bca0SMatthew Dillon } 52762545bca0SMatthew Dillon xpt_done(ccb); 52772545bca0SMatthew Dillon break; 52782545bca0SMatthew Dillon } 52792545bca0SMatthew Dillon /* 52802545bca0SMatthew Dillon * Otherwise, send status (and sense) 52812545bca0SMatthew Dillon */ 52822545bca0SMatthew Dillon if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 52832545bca0SMatthew Dillon sp = sense; 52842545bca0SMatthew Dillon memcpy(sp, &ccb->csio.sense_data, 52852545bca0SMatthew Dillon min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 52862545bca0SMatthew Dillon } 52872545bca0SMatthew Dillon mpt_scsi_tgt_status(mpt, ccb, req, 52882545bca0SMatthew Dillon ccb->csio.scsi_status, sp); 52892545bca0SMatthew Dillon break; 52902545bca0SMatthew Dillon } 52912545bca0SMatthew Dillon case TGT_STATE_SENDING_STATUS: 52922545bca0SMatthew Dillon case TGT_STATE_MOVING_DATA_AND_STATUS: 52932545bca0SMatthew Dillon { 52942545bca0SMatthew Dillon int ioindex; 52952545bca0SMatthew Dillon ccb = tgt->ccb; 52962545bca0SMatthew Dillon 52972545bca0SMatthew Dillon if (tgt->req == NULL) { 52982545bca0SMatthew Dillon panic("mpt: turbo target reply with null " 52992545bca0SMatthew Dillon "associated request sending status"); 53002545bca0SMatthew Dillon /* NOTREACHED */ 53012545bca0SMatthew Dillon } 53022545bca0SMatthew Dillon 53032545bca0SMatthew Dillon if (ccb) { 53042545bca0SMatthew Dillon tgt->ccb = NULL; 53052545bca0SMatthew Dillon if (tgt->state == 53062545bca0SMatthew Dillon TGT_STATE_MOVING_DATA_AND_STATUS) { 53072545bca0SMatthew Dillon tgt->nxfers++; 53082545bca0SMatthew Dillon } 53092545bca0SMatthew Dillon mpt_req_untimeout(req, mpt_timeout, ccb); 53102545bca0SMatthew Dillon if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 53112545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_SENT_SENSE; 53122545bca0SMatthew Dillon } 53132545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 53142545bca0SMatthew Dillon "TARGET_STATUS tag %x sts %x flgs %x req " 53152545bca0SMatthew Dillon "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 53162545bca0SMatthew Dillon ccb->ccb_h.flags, tgt->req); 53172545bca0SMatthew Dillon /* 53182545bca0SMatthew Dillon * Free the Target Send Status Request 53192545bca0SMatthew Dillon */ 53202545bca0SMatthew Dillon KASSERT(tgt->req->ccb == ccb, 53212545bca0SMatthew Dillon ("tgt->req %p:%u tgt->req->ccb %p", 53222545bca0SMatthew Dillon tgt->req, tgt->req->serno, tgt->req->ccb)); 53232545bca0SMatthew Dillon /* 53242545bca0SMatthew Dillon * Notify CAM that we're done 53252545bca0SMatthew Dillon */ 53262545bca0SMatthew Dillon mpt_set_ccb_status(ccb, CAM_REQ_CMP); 53272545bca0SMatthew Dillon ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 53282545bca0SMatthew Dillon KASSERT(ccb->ccb_h.status, 53294c42baf4SSascha Wildner ("ZERO ccb sts at %d", __LINE__)); 53302545bca0SMatthew Dillon tgt->ccb = NULL; 53312545bca0SMatthew Dillon } else { 53322545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, 53332545bca0SMatthew Dillon "TARGET_STATUS non-CAM for req %p:%u\n", 53342545bca0SMatthew Dillon tgt->req, tgt->req->serno); 53352545bca0SMatthew Dillon } 53362545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, 53372545bca0SMatthew Dillon tgt->req, links); 53382545bca0SMatthew Dillon mpt_free_request(mpt, tgt->req); 53392545bca0SMatthew Dillon tgt->req = NULL; 53402545bca0SMatthew Dillon 53412545bca0SMatthew Dillon /* 53422545bca0SMatthew Dillon * And re-post the Command Buffer. 53432545bca0SMatthew Dillon * This will reset the state. 53442545bca0SMatthew Dillon */ 53452545bca0SMatthew Dillon ioindex = GET_IO_INDEX(reply_desc); 53462545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 53472545bca0SMatthew Dillon tgt->is_local = 0; 53482545bca0SMatthew Dillon mpt_post_target_command(mpt, req, ioindex); 53492545bca0SMatthew Dillon 53502545bca0SMatthew Dillon /* 53512545bca0SMatthew Dillon * And post a done for anyone who cares 53522545bca0SMatthew Dillon */ 53532545bca0SMatthew Dillon if (ccb) { 53542545bca0SMatthew Dillon if (mpt->outofbeer) { 53552545bca0SMatthew Dillon ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 53562545bca0SMatthew Dillon mpt->outofbeer = 0; 53572545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 53582545bca0SMatthew Dillon } 53592545bca0SMatthew Dillon xpt_done(ccb); 53602545bca0SMatthew Dillon } 53612545bca0SMatthew Dillon break; 53622545bca0SMatthew Dillon } 53632545bca0SMatthew Dillon case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 53642545bca0SMatthew Dillon tgt->state = TGT_STATE_LOADED; 53652545bca0SMatthew Dillon break; 53662545bca0SMatthew Dillon default: 53672545bca0SMatthew Dillon mpt_prt(mpt, "Unknown Target State 0x%x in Context " 53682545bca0SMatthew Dillon "Reply Function\n", tgt->state); 53692545bca0SMatthew Dillon } 53702545bca0SMatthew Dillon return (TRUE); 53712545bca0SMatthew Dillon } 53722545bca0SMatthew Dillon 53732545bca0SMatthew Dillon status = le16toh(reply_frame->IOCStatus); 53742545bca0SMatthew Dillon if (status != MPI_IOCSTATUS_SUCCESS) { 53752545bca0SMatthew Dillon dbg = MPT_PRT_ERROR; 53762545bca0SMatthew Dillon } else { 53772545bca0SMatthew Dillon dbg = MPT_PRT_DEBUG1; 53782545bca0SMatthew Dillon } 53792545bca0SMatthew Dillon 53802545bca0SMatthew Dillon mpt_lprt(mpt, dbg, 53812545bca0SMatthew Dillon "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 53822545bca0SMatthew Dillon req, req->serno, reply_frame, reply_frame->Function, status); 53832545bca0SMatthew Dillon 53842545bca0SMatthew Dillon switch (reply_frame->Function) { 53852545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 53862545bca0SMatthew Dillon { 53872545bca0SMatthew Dillon mpt_tgt_state_t *tgt; 53882545bca0SMatthew Dillon #ifdef INVARIANTS 53892545bca0SMatthew Dillon mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 53902545bca0SMatthew Dillon #endif 53912545bca0SMatthew Dillon if (status != MPI_IOCSTATUS_SUCCESS) { 53922545bca0SMatthew Dillon /* 53932545bca0SMatthew Dillon * XXX What to do? 53942545bca0SMatthew Dillon */ 53952545bca0SMatthew Dillon break; 53962545bca0SMatthew Dillon } 53972545bca0SMatthew Dillon tgt = MPT_TGT_STATE(mpt, req); 53982545bca0SMatthew Dillon KASSERT(tgt->state == TGT_STATE_LOADING, 53994c42baf4SSascha Wildner ("bad state 0x%x on reply to buffer post", tgt->state)); 54002545bca0SMatthew Dillon mpt_assign_serno(mpt, req); 54012545bca0SMatthew Dillon tgt->state = TGT_STATE_LOADED; 54022545bca0SMatthew Dillon break; 54032545bca0SMatthew Dillon } 54042545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_ASSIST: 54052545bca0SMatthew Dillon #ifdef INVARIANTS 54062545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 54072545bca0SMatthew Dillon #endif 54082545bca0SMatthew Dillon mpt_prt(mpt, "target assist completion\n"); 54092545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 54102545bca0SMatthew Dillon mpt_free_request(mpt, req); 54112545bca0SMatthew Dillon break; 54122545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_STATUS_SEND: 54132545bca0SMatthew Dillon #ifdef INVARIANTS 54142545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 54152545bca0SMatthew Dillon #endif 54162545bca0SMatthew Dillon mpt_prt(mpt, "status send completion\n"); 54172545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 54182545bca0SMatthew Dillon mpt_free_request(mpt, req); 54192545bca0SMatthew Dillon break; 54202545bca0SMatthew Dillon case MPI_FUNCTION_TARGET_MODE_ABORT: 54212545bca0SMatthew Dillon { 54222545bca0SMatthew Dillon PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 54232545bca0SMatthew Dillon (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 54242545bca0SMatthew Dillon PTR_MSG_TARGET_MODE_ABORT abtp = 54252545bca0SMatthew Dillon (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 54262545bca0SMatthew Dillon uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 54272545bca0SMatthew Dillon #ifdef INVARIANTS 54282545bca0SMatthew Dillon mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 54292545bca0SMatthew Dillon #endif 54302545bca0SMatthew Dillon mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 54312545bca0SMatthew Dillon cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 54322545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links); 54332545bca0SMatthew Dillon mpt_free_request(mpt, req); 54342545bca0SMatthew Dillon break; 54352545bca0SMatthew Dillon } 54362545bca0SMatthew Dillon default: 54372545bca0SMatthew Dillon mpt_prt(mpt, "Unknown Target Address Reply Function code: " 54382545bca0SMatthew Dillon "0x%x\n", reply_frame->Function); 54392545bca0SMatthew Dillon break; 54402545bca0SMatthew Dillon } 54412545bca0SMatthew Dillon return (TRUE); 54422545bca0SMatthew Dillon } 5443