1a1eda741SJohn Baldwin /*- 2a1eda741SJohn Baldwin * SPDX-License-Identifier: BSD-2-Clause 3a1eda741SJohn Baldwin * 4a1eda741SJohn Baldwin * Copyright (c) 2023-2024 Chelsio Communications, Inc. 5a1eda741SJohn Baldwin * Written by: John Baldwin <jhb@FreeBSD.org> 6a1eda741SJohn Baldwin */ 7a1eda741SJohn Baldwin 8a1eda741SJohn Baldwin #include <sys/types.h> 9a1eda741SJohn Baldwin #include <sys/malloc.h> 10a1eda741SJohn Baldwin #include <sys/memdesc.h> 11a1eda741SJohn Baldwin #include <sys/refcount.h> 12a1eda741SJohn Baldwin 13a1eda741SJohn Baldwin #include <cam/cam.h> 14a1eda741SJohn Baldwin #include <cam/cam_ccb.h> 15a1eda741SJohn Baldwin #include <cam/cam_sim.h> 16a1eda741SJohn Baldwin #include <cam/cam_xpt_sim.h> 17a1eda741SJohn Baldwin #include <cam/cam_debug.h> 18a1eda741SJohn Baldwin 19a1eda741SJohn Baldwin #include <dev/nvmf/host/nvmf_var.h> 20a1eda741SJohn Baldwin 21a1eda741SJohn Baldwin /* 22a1eda741SJohn Baldwin * The I/O completion may trigger after the received CQE if the I/O 23a1eda741SJohn Baldwin * used a zero-copy mbuf that isn't harvested until after the NIC 24a1eda741SJohn Baldwin * driver processes TX completions. Use spriv_field0 to as a refcount. 25a1eda741SJohn Baldwin * 26a1eda741SJohn Baldwin * Store any I/O error returned in spriv_field1. 27a1eda741SJohn Baldwin */ 28a1eda741SJohn Baldwin static __inline u_int * 29a1eda741SJohn Baldwin ccb_refs(union ccb *ccb) 30a1eda741SJohn Baldwin { 31a1eda741SJohn Baldwin return ((u_int *)&ccb->ccb_h.spriv_field0); 32a1eda741SJohn Baldwin } 33a1eda741SJohn Baldwin 34a1eda741SJohn Baldwin #define spriv_ioerror spriv_field1 35a1eda741SJohn Baldwin 36a1eda741SJohn Baldwin static void 37a1eda741SJohn Baldwin nvmf_ccb_done(union ccb *ccb) 38a1eda741SJohn Baldwin { 39a1eda741SJohn Baldwin if (!refcount_release(ccb_refs(ccb))) 40a1eda741SJohn Baldwin return; 41a1eda741SJohn Baldwin 42a1eda741SJohn Baldwin if (nvmf_cqe_aborted(&ccb->nvmeio.cpl)) { 43f46d4971SJohn Baldwin struct cam_sim *sim = xpt_path_sim(ccb->ccb_h.path); 44f46d4971SJohn Baldwin struct nvmf_softc *sc = cam_sim_softc(sim); 45f46d4971SJohn Baldwin 46f46d4971SJohn Baldwin if (nvmf_fail_disconnect || sc->sim_shutdown) 47aacaeeeeSJohn Baldwin ccb->ccb_h.status = CAM_DEV_NOT_THERE; 48aacaeeeeSJohn Baldwin else 49a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_REQUEUE_REQ; 50a1eda741SJohn Baldwin xpt_done(ccb); 51a1eda741SJohn Baldwin } else if (ccb->nvmeio.cpl.status != 0) { 52a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_NVME_STATUS_ERROR; 53a1eda741SJohn Baldwin xpt_done(ccb); 54a1eda741SJohn Baldwin } else if (ccb->ccb_h.spriv_ioerror != 0) { 55a1eda741SJohn Baldwin KASSERT(ccb->ccb_h.spriv_ioerror != EJUSTRETURN, 56a1eda741SJohn Baldwin ("%s: zero sized transfer without CQE error", __func__)); 57a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_REQ_CMP_ERR; 58a1eda741SJohn Baldwin xpt_done(ccb); 59a1eda741SJohn Baldwin } else { 60a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_REQ_CMP; 61aec2ae8bSJohn Baldwin xpt_done(ccb); 62a1eda741SJohn Baldwin } 63a1eda741SJohn Baldwin } 64a1eda741SJohn Baldwin 65a1eda741SJohn Baldwin static void 66a1eda741SJohn Baldwin nvmf_ccb_io_complete(void *arg, size_t xfered, int error) 67a1eda741SJohn Baldwin { 68a1eda741SJohn Baldwin union ccb *ccb = arg; 69a1eda741SJohn Baldwin 70a1eda741SJohn Baldwin /* 71a1eda741SJohn Baldwin * TODO: Reporting partial completions requires extending 72a1eda741SJohn Baldwin * nvmeio to support resid and updating nda to handle partial 73a1eda741SJohn Baldwin * reads, either by returning partial success (or an error) to 74a1eda741SJohn Baldwin * the caller, or retrying all or part of the request. 75a1eda741SJohn Baldwin */ 76a1eda741SJohn Baldwin ccb->ccb_h.spriv_ioerror = error; 77a1eda741SJohn Baldwin if (error == 0) { 78a1eda741SJohn Baldwin if (xfered == 0) { 79a1eda741SJohn Baldwin #ifdef INVARIANTS 80a1eda741SJohn Baldwin /* 81a1eda741SJohn Baldwin * If the request fails with an error in the CQE 82a1eda741SJohn Baldwin * there will be no data transferred but also no 83a1eda741SJohn Baldwin * I/O error. 84a1eda741SJohn Baldwin */ 85a1eda741SJohn Baldwin ccb->ccb_h.spriv_ioerror = EJUSTRETURN; 86a1eda741SJohn Baldwin #endif 87a1eda741SJohn Baldwin } else 88a1eda741SJohn Baldwin KASSERT(xfered == ccb->nvmeio.dxfer_len, 89a1eda741SJohn Baldwin ("%s: partial CCB completion", __func__)); 90a1eda741SJohn Baldwin } 91a1eda741SJohn Baldwin 92a1eda741SJohn Baldwin nvmf_ccb_done(ccb); 93a1eda741SJohn Baldwin } 94a1eda741SJohn Baldwin 95a1eda741SJohn Baldwin static void 96a1eda741SJohn Baldwin nvmf_ccb_complete(void *arg, const struct nvme_completion *cqe) 97a1eda741SJohn Baldwin { 98a1eda741SJohn Baldwin union ccb *ccb = arg; 99a1eda741SJohn Baldwin 100a1eda741SJohn Baldwin ccb->nvmeio.cpl = *cqe; 101a1eda741SJohn Baldwin nvmf_ccb_done(ccb); 102a1eda741SJohn Baldwin } 103a1eda741SJohn Baldwin 104a1eda741SJohn Baldwin static void 105a1eda741SJohn Baldwin nvmf_sim_io(struct nvmf_softc *sc, union ccb *ccb) 106a1eda741SJohn Baldwin { 107a1eda741SJohn Baldwin struct ccb_nvmeio *nvmeio = &ccb->nvmeio; 108a1eda741SJohn Baldwin struct memdesc mem; 109a1eda741SJohn Baldwin struct nvmf_request *req; 110a1eda741SJohn Baldwin struct nvmf_host_qpair *qp; 111a1eda741SJohn Baldwin 112a1eda741SJohn Baldwin mtx_lock(&sc->sim_mtx); 113a1eda741SJohn Baldwin if (sc->sim_disconnected) { 114a1eda741SJohn Baldwin mtx_unlock(&sc->sim_mtx); 115f46d4971SJohn Baldwin if (nvmf_fail_disconnect || sc->sim_shutdown) 116aacaeeeeSJohn Baldwin nvmeio->ccb_h.status = CAM_DEV_NOT_THERE; 117aacaeeeeSJohn Baldwin else 118a1eda741SJohn Baldwin nvmeio->ccb_h.status = CAM_REQUEUE_REQ; 119a1eda741SJohn Baldwin xpt_done(ccb); 120a1eda741SJohn Baldwin return; 121a1eda741SJohn Baldwin } 122a1eda741SJohn Baldwin if (nvmeio->ccb_h.func_code == XPT_NVME_IO) 123a1eda741SJohn Baldwin qp = nvmf_select_io_queue(sc); 124a1eda741SJohn Baldwin else 125a1eda741SJohn Baldwin qp = sc->admin; 126a1eda741SJohn Baldwin req = nvmf_allocate_request(qp, &nvmeio->cmd, nvmf_ccb_complete, 127a1eda741SJohn Baldwin ccb, M_NOWAIT); 128a1eda741SJohn Baldwin mtx_unlock(&sc->sim_mtx); 129*ef052adfSJohn Baldwin if (req == NULL) { 130a1eda741SJohn Baldwin nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL; 131a1eda741SJohn Baldwin xpt_done(ccb); 132a1eda741SJohn Baldwin return; 133a1eda741SJohn Baldwin } 134a1eda741SJohn Baldwin 135a1eda741SJohn Baldwin if (nvmeio->dxfer_len != 0) { 136a1eda741SJohn Baldwin refcount_init(ccb_refs(ccb), 2); 137a1eda741SJohn Baldwin mem = memdesc_ccb(ccb); 138a1eda741SJohn Baldwin nvmf_capsule_append_data(req->nc, &mem, nvmeio->dxfer_len, 139a1eda741SJohn Baldwin (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT, 140a1eda741SJohn Baldwin nvmf_ccb_io_complete, ccb); 141a1eda741SJohn Baldwin } else 142a1eda741SJohn Baldwin refcount_init(ccb_refs(ccb), 1); 143a1eda741SJohn Baldwin 144a1eda741SJohn Baldwin /* 145a1eda741SJohn Baldwin * Clear spriv_ioerror as it can hold an earlier error if this 146a1eda741SJohn Baldwin * CCB was aborted and has been retried. 147a1eda741SJohn Baldwin */ 148a1eda741SJohn Baldwin ccb->ccb_h.spriv_ioerror = 0; 149a1eda741SJohn Baldwin KASSERT(ccb->ccb_h.status == CAM_REQ_INPROG, 150a1eda741SJohn Baldwin ("%s: incoming CCB is not in-progress", __func__)); 151a1eda741SJohn Baldwin ccb->ccb_h.status |= CAM_SIM_QUEUED; 152a1eda741SJohn Baldwin nvmf_submit_request(req); 153a1eda741SJohn Baldwin } 154a1eda741SJohn Baldwin 155a1eda741SJohn Baldwin static void 156a1eda741SJohn Baldwin nvmf_sim_action(struct cam_sim *sim, union ccb *ccb) 157a1eda741SJohn Baldwin { 158a1eda741SJohn Baldwin struct nvmf_softc *sc = cam_sim_softc(sim); 159a1eda741SJohn Baldwin 160a1eda741SJohn Baldwin CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 161a1eda741SJohn Baldwin ("nvmf_sim_action: func= %#x\n", 162a1eda741SJohn Baldwin ccb->ccb_h.func_code)); 163a1eda741SJohn Baldwin 164a1eda741SJohn Baldwin switch (ccb->ccb_h.func_code) { 165a1eda741SJohn Baldwin case XPT_PATH_INQ: /* Path routing inquiry */ 166a1eda741SJohn Baldwin { 167a1eda741SJohn Baldwin struct ccb_pathinq *cpi = &ccb->cpi; 168a1eda741SJohn Baldwin 169a1eda741SJohn Baldwin cpi->version_num = 1; 170a1eda741SJohn Baldwin cpi->hba_inquiry = 0; 171a1eda741SJohn Baldwin cpi->target_sprt = 0; 172a1eda741SJohn Baldwin cpi->hba_misc = PIM_UNMAPPED | PIM_NOSCAN; 173a1eda741SJohn Baldwin cpi->hba_eng_cnt = 0; 174a1eda741SJohn Baldwin cpi->max_target = 0; 175a1eda741SJohn Baldwin cpi->max_lun = sc->cdata->nn; 176a1eda741SJohn Baldwin cpi->async_flags = 0; 177a1eda741SJohn Baldwin cpi->hpath_id = 0; 178a1eda741SJohn Baldwin cpi->initiator_id = 0; 179a1eda741SJohn Baldwin strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 180a1eda741SJohn Baldwin strlcpy(cpi->hba_vid, "NVMeoF", HBA_IDLEN); 181a1eda741SJohn Baldwin strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 182a1eda741SJohn Baldwin cpi->unit_number = cam_sim_unit(sim); 183a1eda741SJohn Baldwin cpi->bus_id = 0; 184a1eda741SJohn Baldwin 185a1eda741SJohn Baldwin /* XXX: Same as iSCSI. */ 186a1eda741SJohn Baldwin cpi->base_transfer_speed = 150000; 187a1eda741SJohn Baldwin cpi->protocol = PROTO_NVME; 188a1eda741SJohn Baldwin cpi->protocol_version = sc->vs; 189a1eda741SJohn Baldwin cpi->transport = XPORT_NVMF; 190a1eda741SJohn Baldwin cpi->transport_version = sc->vs; 191a1eda741SJohn Baldwin cpi->xport_specific.nvmf.nsid = 192a1eda741SJohn Baldwin xpt_path_lun_id(ccb->ccb_h.path); 193a1eda741SJohn Baldwin cpi->xport_specific.nvmf.trtype = sc->trtype; 1941f029b86SJohn Baldwin strlcpy(cpi->xport_specific.nvmf.dev_name, 195a1eda741SJohn Baldwin device_get_nameunit(sc->dev), 196a1eda741SJohn Baldwin sizeof(cpi->xport_specific.nvmf.dev_name)); 197a1eda741SJohn Baldwin cpi->maxio = sc->max_xfer_size; 198a1eda741SJohn Baldwin cpi->hba_vendor = 0; 199a1eda741SJohn Baldwin cpi->hba_device = 0; 200a1eda741SJohn Baldwin cpi->hba_subvendor = 0; 201a1eda741SJohn Baldwin cpi->hba_subdevice = 0; 202a1eda741SJohn Baldwin cpi->ccb_h.status = CAM_REQ_CMP; 203a1eda741SJohn Baldwin break; 204a1eda741SJohn Baldwin } 205a1eda741SJohn Baldwin case XPT_GET_TRAN_SETTINGS: /* Get transport settings */ 206a1eda741SJohn Baldwin { 207a1eda741SJohn Baldwin struct ccb_trans_settings *cts = &ccb->cts; 208a1eda741SJohn Baldwin struct ccb_trans_settings_nvme *nvme; 209a1eda741SJohn Baldwin struct ccb_trans_settings_nvmf *nvmf; 210a1eda741SJohn Baldwin 211a1eda741SJohn Baldwin cts->protocol = PROTO_NVME; 212a1eda741SJohn Baldwin cts->protocol_version = sc->vs; 213a1eda741SJohn Baldwin cts->transport = XPORT_NVMF; 214a1eda741SJohn Baldwin cts->transport_version = sc->vs; 215a1eda741SJohn Baldwin 216a1eda741SJohn Baldwin nvme = &cts->proto_specific.nvme; 217a1eda741SJohn Baldwin nvme->valid = CTS_NVME_VALID_SPEC; 218a1eda741SJohn Baldwin nvme->spec = sc->vs; 219a1eda741SJohn Baldwin 220a1eda741SJohn Baldwin nvmf = &cts->xport_specific.nvmf; 221a1eda741SJohn Baldwin nvmf->valid = CTS_NVMF_VALID_TRTYPE; 222a1eda741SJohn Baldwin nvmf->trtype = sc->trtype; 223a1eda741SJohn Baldwin cts->ccb_h.status = CAM_REQ_CMP; 224a1eda741SJohn Baldwin break; 225a1eda741SJohn Baldwin } 226a1eda741SJohn Baldwin case XPT_SET_TRAN_SETTINGS: /* Set transport settings */ 227a1eda741SJohn Baldwin /* 228a1eda741SJohn Baldwin * No transfer settings can be set, but nvme_xpt sends 229a1eda741SJohn Baldwin * this anyway. 230a1eda741SJohn Baldwin */ 231a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_REQ_CMP; 232a1eda741SJohn Baldwin break; 233a1eda741SJohn Baldwin case XPT_NVME_IO: /* Execute the requested I/O */ 234a1eda741SJohn Baldwin case XPT_NVME_ADMIN: /* or Admin operation */ 235a1eda741SJohn Baldwin nvmf_sim_io(sc, ccb); 236a1eda741SJohn Baldwin return; 237a1eda741SJohn Baldwin default: 238a1eda741SJohn Baldwin /* XXX */ 239a1eda741SJohn Baldwin device_printf(sc->dev, "unhandled sim function %#x\n", 240a1eda741SJohn Baldwin ccb->ccb_h.func_code); 241a1eda741SJohn Baldwin ccb->ccb_h.status = CAM_REQ_INVALID; 242a1eda741SJohn Baldwin break; 243a1eda741SJohn Baldwin } 244a1eda741SJohn Baldwin xpt_done(ccb); 245a1eda741SJohn Baldwin } 246a1eda741SJohn Baldwin 247a1eda741SJohn Baldwin int 248a1eda741SJohn Baldwin nvmf_init_sim(struct nvmf_softc *sc) 249a1eda741SJohn Baldwin { 250a1eda741SJohn Baldwin struct cam_devq *devq; 251a1eda741SJohn Baldwin int max_trans; 252a1eda741SJohn Baldwin 253a1eda741SJohn Baldwin max_trans = sc->max_pending_io * 3 / 4; 254a1eda741SJohn Baldwin devq = cam_simq_alloc(max_trans); 255a1eda741SJohn Baldwin if (devq == NULL) { 256a1eda741SJohn Baldwin device_printf(sc->dev, "Failed to allocate CAM simq\n"); 257a1eda741SJohn Baldwin return (ENOMEM); 258a1eda741SJohn Baldwin } 259a1eda741SJohn Baldwin 260a1eda741SJohn Baldwin mtx_init(&sc->sim_mtx, "nvmf sim", NULL, MTX_DEF); 261a1eda741SJohn Baldwin sc->sim = cam_sim_alloc(nvmf_sim_action, NULL, "nvme", sc, 262a1eda741SJohn Baldwin device_get_unit(sc->dev), NULL, max_trans, max_trans, devq); 263a1eda741SJohn Baldwin if (sc->sim == NULL) { 264a1eda741SJohn Baldwin device_printf(sc->dev, "Failed to allocate CAM sim\n"); 265a1eda741SJohn Baldwin cam_simq_free(devq); 266a1eda741SJohn Baldwin mtx_destroy(&sc->sim_mtx); 267a1eda741SJohn Baldwin return (ENXIO); 268a1eda741SJohn Baldwin } 269a1eda741SJohn Baldwin if (xpt_bus_register(sc->sim, sc->dev, 0) != CAM_SUCCESS) { 270a1eda741SJohn Baldwin device_printf(sc->dev, "Failed to create CAM bus\n"); 271a1eda741SJohn Baldwin cam_sim_free(sc->sim, TRUE); 272a1eda741SJohn Baldwin mtx_destroy(&sc->sim_mtx); 273a1eda741SJohn Baldwin return (ENXIO); 274a1eda741SJohn Baldwin } 275a1eda741SJohn Baldwin if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim), 276a1eda741SJohn Baldwin CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 277a1eda741SJohn Baldwin device_printf(sc->dev, "Failed to create CAM path\n"); 278a1eda741SJohn Baldwin xpt_bus_deregister(cam_sim_path(sc->sim)); 279a1eda741SJohn Baldwin cam_sim_free(sc->sim, TRUE); 280a1eda741SJohn Baldwin mtx_destroy(&sc->sim_mtx); 281a1eda741SJohn Baldwin return (ENXIO); 282a1eda741SJohn Baldwin } 283a1eda741SJohn Baldwin return (0); 284a1eda741SJohn Baldwin } 285a1eda741SJohn Baldwin 286a1eda741SJohn Baldwin void 287a1eda741SJohn Baldwin nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id) 288a1eda741SJohn Baldwin { 289a1eda741SJohn Baldwin union ccb *ccb; 290a1eda741SJohn Baldwin 291a1eda741SJohn Baldwin ccb = xpt_alloc_ccb_nowait(); 292a1eda741SJohn Baldwin if (ccb == NULL) { 293a1eda741SJohn Baldwin device_printf(sc->dev, 294a1eda741SJohn Baldwin "unable to alloc CCB for rescan of namespace %u\n", id); 295a1eda741SJohn Baldwin return; 296a1eda741SJohn Baldwin } 297a1eda741SJohn Baldwin 298a1eda741SJohn Baldwin /* 299a1eda741SJohn Baldwin * As with nvme_sim, map NVMe namespace IDs onto CAM unit 300a1eda741SJohn Baldwin * LUNs. 301a1eda741SJohn Baldwin */ 302a1eda741SJohn Baldwin if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), 0, 303a1eda741SJohn Baldwin id) != CAM_REQ_CMP) { 304a1eda741SJohn Baldwin device_printf(sc->dev, 305a1eda741SJohn Baldwin "Unable to create path for rescan of namespace %u\n", id); 306a1eda741SJohn Baldwin xpt_free_ccb(ccb); 307a1eda741SJohn Baldwin return; 308a1eda741SJohn Baldwin } 309a1eda741SJohn Baldwin xpt_rescan(ccb); 310a1eda741SJohn Baldwin } 311a1eda741SJohn Baldwin 312a1eda741SJohn Baldwin void 313a1eda741SJohn Baldwin nvmf_disconnect_sim(struct nvmf_softc *sc) 314a1eda741SJohn Baldwin { 315a1eda741SJohn Baldwin mtx_lock(&sc->sim_mtx); 316a1eda741SJohn Baldwin sc->sim_disconnected = true; 317a1eda741SJohn Baldwin xpt_freeze_simq(sc->sim, 1); 318a1eda741SJohn Baldwin mtx_unlock(&sc->sim_mtx); 319a1eda741SJohn Baldwin } 320a1eda741SJohn Baldwin 321a1eda741SJohn Baldwin void 322a1eda741SJohn Baldwin nvmf_reconnect_sim(struct nvmf_softc *sc) 323a1eda741SJohn Baldwin { 324a1eda741SJohn Baldwin mtx_lock(&sc->sim_mtx); 325a1eda741SJohn Baldwin sc->sim_disconnected = false; 326a1eda741SJohn Baldwin mtx_unlock(&sc->sim_mtx); 327a1eda741SJohn Baldwin xpt_release_simq(sc->sim, 1); 328a1eda741SJohn Baldwin } 329a1eda741SJohn Baldwin 330a1eda741SJohn Baldwin void 331f46d4971SJohn Baldwin nvmf_shutdown_sim(struct nvmf_softc *sc) 332f46d4971SJohn Baldwin { 333f46d4971SJohn Baldwin mtx_lock(&sc->sim_mtx); 334f46d4971SJohn Baldwin sc->sim_shutdown = true; 335f46d4971SJohn Baldwin mtx_unlock(&sc->sim_mtx); 336f46d4971SJohn Baldwin xpt_release_simq(sc->sim, 1); 337f46d4971SJohn Baldwin } 338f46d4971SJohn Baldwin 339f46d4971SJohn Baldwin void 340a1eda741SJohn Baldwin nvmf_destroy_sim(struct nvmf_softc *sc) 341a1eda741SJohn Baldwin { 342a1eda741SJohn Baldwin xpt_async(AC_LOST_DEVICE, sc->path, NULL); 343a1eda741SJohn Baldwin if (sc->sim_disconnected) 344a1eda741SJohn Baldwin xpt_release_simq(sc->sim, 1); 345a1eda741SJohn Baldwin xpt_free_path(sc->path); 346a1eda741SJohn Baldwin xpt_bus_deregister(cam_sim_path(sc->sim)); 347a1eda741SJohn Baldwin cam_sim_free(sc->sim, TRUE); 348a1eda741SJohn Baldwin mtx_destroy(&sc->sim_mtx); 349a1eda741SJohn Baldwin } 350