1488570ebSJim Harris /* SPDX-License-Identifier: BSD-3-Clause 2a6dbe372Spaul luse * Copyright (C) 2015 Intel Corporation. All rights reserved. 39ad2046aSAlexey Marchuk * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved. 41010fb3aSDaniel Verkamp */ 51010fb3aSDaniel Verkamp 68bc25e17SKonrad Sztyber #include "spdk/config.h" 7246c39a7SZiye Yang #include "spdk/nvmf_spec.h" 894345a0aSZiye Yang #include "spdk/string.h" 959237d22SJin Yu #include "spdk/env.h" 101010fb3aSDaniel Verkamp #include "nvme_internal.h" 1172e079a8SMaciej Szwed #include "nvme_io_msg.h" 121010fb3aSDaniel Verkamp 13bb726d51SGangCao #define SPDK_NVME_DRIVER_NAME "spdk_nvme_driver" 141010fb3aSDaniel Verkamp 15bb726d51SGangCao struct nvme_driver *g_spdk_nvme_driver; 1613ed9986SJim Harris pid_t g_spdk_nvme_pid; 17b724e2e5SGangCao 18bf316377SPaul Luse /* gross timeout of 180 seconds in milliseconds */ 19bf316377SPaul Luse static int g_nvme_driver_timeout_ms = 3 * 60 * 1000; 20bf316377SPaul Luse 21bb2444f4SDaniel Verkamp /* Per-process attached controller list */ 22bb2444f4SDaniel Verkamp static TAILQ_HEAD(, spdk_nvme_ctrlr) g_nvme_attached_ctrlrs = 23bb2444f4SDaniel Verkamp TAILQ_HEAD_INITIALIZER(g_nvme_attached_ctrlrs); 24bb2444f4SDaniel Verkamp 25bb2444f4SDaniel Verkamp /* Returns true if ctrlr should be stored on the multi-process shared_attached_ctrlrs list */ 26bb2444f4SDaniel Verkamp static bool 27bb2444f4SDaniel Verkamp nvme_ctrlr_shared(const struct spdk_nvme_ctrlr *ctrlr) 28bb2444f4SDaniel Verkamp { 29bb2444f4SDaniel Verkamp return ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE; 30bb2444f4SDaniel Verkamp } 31bb2444f4SDaniel Verkamp 321804f3c5SDaniel Verkamp void 333306e49eSChangpeng Liu nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx, 343306e49eSChangpeng Liu struct spdk_nvme_ctrlr *ctrlr) 351804f3c5SDaniel Verkamp { 363306e49eSChangpeng Liu TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq); 371804f3c5SDaniel Verkamp } 381804f3c5SDaniel Verkamp 39ea1bfd84SShuhei Matsumoto static void 40ea1bfd84SShuhei Matsumoto nvme_ctrlr_detach_async_finish(struct spdk_nvme_ctrlr *ctrlr) 411010fb3aSDaniel Verkamp { 426bdcf5abSGangCao nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 43bb2444f4SDaniel Verkamp if (nvme_ctrlr_shared(ctrlr)) { 44bb2444f4SDaniel Verkamp TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq); 45bb2444f4SDaniel Verkamp } else { 46bb2444f4SDaniel Verkamp TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq); 47bb2444f4SDaniel Verkamp } 48ea1bfd84SShuhei Matsumoto nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 49ea1bfd84SShuhei Matsumoto } 50ea1bfd84SShuhei Matsumoto 51ea1bfd84SShuhei Matsumoto static int 52ea1bfd84SShuhei Matsumoto nvme_ctrlr_detach_async(struct spdk_nvme_ctrlr *ctrlr, 53ea1bfd84SShuhei Matsumoto struct nvme_ctrlr_detach_ctx **_ctx) 54ea1bfd84SShuhei Matsumoto { 55ea1bfd84SShuhei Matsumoto struct nvme_ctrlr_detach_ctx *ctx; 56ea1bfd84SShuhei Matsumoto int ref_count; 57ea1bfd84SShuhei Matsumoto 58ea1bfd84SShuhei Matsumoto nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 59ea1bfd84SShuhei Matsumoto 60ea1bfd84SShuhei Matsumoto ref_count = nvme_ctrlr_get_ref_count(ctrlr); 61ea1bfd84SShuhei Matsumoto assert(ref_count > 0); 62ea1bfd84SShuhei Matsumoto 63ea1bfd84SShuhei Matsumoto if (ref_count == 1) { 64ea1bfd84SShuhei Matsumoto /* This is the last reference to the controller, so we need to 65ea1bfd84SShuhei Matsumoto * allocate a context to destruct it. 66ea1bfd84SShuhei Matsumoto */ 67ea1bfd84SShuhei Matsumoto ctx = calloc(1, sizeof(*ctx)); 68ea1bfd84SShuhei Matsumoto if (ctx == NULL) { 69ea1bfd84SShuhei Matsumoto nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 70ea1bfd84SShuhei Matsumoto 71ea1bfd84SShuhei Matsumoto return -ENOMEM; 72ea1bfd84SShuhei Matsumoto } 73d8f4bbebSShuhei Matsumoto ctx->ctrlr = ctrlr; 74ea1bfd84SShuhei Matsumoto ctx->cb_fn = nvme_ctrlr_detach_async_finish; 75ea1bfd84SShuhei Matsumoto 76ea1bfd84SShuhei Matsumoto nvme_ctrlr_proc_put_ref(ctrlr); 77ea1bfd84SShuhei Matsumoto 78ea1bfd84SShuhei Matsumoto nvme_io_msg_ctrlr_detach(ctrlr); 79ea1bfd84SShuhei Matsumoto 80ea1bfd84SShuhei Matsumoto nvme_ctrlr_destruct_async(ctrlr, ctx); 81ea1bfd84SShuhei Matsumoto 82ea1bfd84SShuhei Matsumoto *_ctx = ctx; 83ea1bfd84SShuhei Matsumoto } else { 84ea1bfd84SShuhei Matsumoto nvme_ctrlr_proc_put_ref(ctrlr); 85ec5b6fedSGangCao } 868374a727SDaniel Verkamp 876bdcf5abSGangCao nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 88ea1bfd84SShuhei Matsumoto 89ea1bfd84SShuhei Matsumoto return 0; 90ea1bfd84SShuhei Matsumoto } 91ea1bfd84SShuhei Matsumoto 92ea1bfd84SShuhei Matsumoto static int 93d8f4bbebSShuhei Matsumoto nvme_ctrlr_detach_poll_async(struct nvme_ctrlr_detach_ctx *ctx) 94ea1bfd84SShuhei Matsumoto { 95ea1bfd84SShuhei Matsumoto int rc; 96ea1bfd84SShuhei Matsumoto 97d8f4bbebSShuhei Matsumoto rc = nvme_ctrlr_destruct_poll_async(ctx->ctrlr, ctx); 98ea1bfd84SShuhei Matsumoto if (rc == -EAGAIN) { 99ea1bfd84SShuhei Matsumoto return -EAGAIN; 100ea1bfd84SShuhei Matsumoto } 101ea1bfd84SShuhei Matsumoto 102ea1bfd84SShuhei Matsumoto free(ctx); 103ea1bfd84SShuhei Matsumoto 104ea1bfd84SShuhei Matsumoto return rc; 105ea1bfd84SShuhei Matsumoto } 106ea1bfd84SShuhei Matsumoto 107ea1bfd84SShuhei Matsumoto int 108ea1bfd84SShuhei Matsumoto spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 109ea1bfd84SShuhei Matsumoto { 110ea1bfd84SShuhei Matsumoto struct nvme_ctrlr_detach_ctx *ctx = NULL; 111ea1bfd84SShuhei Matsumoto int rc; 112ea1bfd84SShuhei Matsumoto 113ea1bfd84SShuhei Matsumoto rc = nvme_ctrlr_detach_async(ctrlr, &ctx); 114ea1bfd84SShuhei Matsumoto if (rc != 0) { 115ea1bfd84SShuhei Matsumoto return rc; 116ea1bfd84SShuhei Matsumoto } else if (ctx == NULL) { 117ea1bfd84SShuhei Matsumoto /* ctrlr was detached from the caller process but any other process 118ea1bfd84SShuhei Matsumoto * still attaches it. 119ea1bfd84SShuhei Matsumoto */ 120ea1bfd84SShuhei Matsumoto return 0; 121ea1bfd84SShuhei Matsumoto } 122ea1bfd84SShuhei Matsumoto 123ea1bfd84SShuhei Matsumoto while (1) { 124d8f4bbebSShuhei Matsumoto rc = nvme_ctrlr_detach_poll_async(ctx); 125ea1bfd84SShuhei Matsumoto if (rc != -EAGAIN) { 126ea1bfd84SShuhei Matsumoto break; 127ea1bfd84SShuhei Matsumoto } 128ea1bfd84SShuhei Matsumoto nvme_delay(1000); 129ea1bfd84SShuhei Matsumoto } 130ea1bfd84SShuhei Matsumoto 1311010fb3aSDaniel Verkamp return 0; 1321010fb3aSDaniel Verkamp } 1331010fb3aSDaniel Verkamp 134d8f4bbebSShuhei Matsumoto int 135d8f4bbebSShuhei Matsumoto spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, 136d8f4bbebSShuhei Matsumoto struct spdk_nvme_detach_ctx **_detach_ctx) 137d8f4bbebSShuhei Matsumoto { 138d8f4bbebSShuhei Matsumoto struct spdk_nvme_detach_ctx *detach_ctx; 139d8f4bbebSShuhei Matsumoto struct nvme_ctrlr_detach_ctx *ctx = NULL; 140d8f4bbebSShuhei Matsumoto int rc; 141d8f4bbebSShuhei Matsumoto 142d8f4bbebSShuhei Matsumoto if (ctrlr == NULL || _detach_ctx == NULL) { 143d8f4bbebSShuhei Matsumoto return -EINVAL; 144d8f4bbebSShuhei Matsumoto } 145d8f4bbebSShuhei Matsumoto 146cc6920a4SJosh Soref /* Use a context header to poll detachment for multiple controllers. 147d8f4bbebSShuhei Matsumoto * Allocate an new one if not allocated yet, or use the passed one otherwise. 148d8f4bbebSShuhei Matsumoto */ 149d8f4bbebSShuhei Matsumoto detach_ctx = *_detach_ctx; 150d8f4bbebSShuhei Matsumoto if (detach_ctx == NULL) { 151d8f4bbebSShuhei Matsumoto detach_ctx = calloc(1, sizeof(*detach_ctx)); 152d8f4bbebSShuhei Matsumoto if (detach_ctx == NULL) { 153d8f4bbebSShuhei Matsumoto return -ENOMEM; 154d8f4bbebSShuhei Matsumoto } 155d8f4bbebSShuhei Matsumoto TAILQ_INIT(&detach_ctx->head); 156d8f4bbebSShuhei Matsumoto } 157d8f4bbebSShuhei Matsumoto 158d8f4bbebSShuhei Matsumoto rc = nvme_ctrlr_detach_async(ctrlr, &ctx); 159d8f4bbebSShuhei Matsumoto if (rc != 0 || ctx == NULL) { 160d8f4bbebSShuhei Matsumoto /* If this detach failed and the context header is empty, it means we just 161d8f4bbebSShuhei Matsumoto * allocated the header and need to free it before returning. 162d8f4bbebSShuhei Matsumoto */ 163d8f4bbebSShuhei Matsumoto if (TAILQ_EMPTY(&detach_ctx->head)) { 164d8f4bbebSShuhei Matsumoto free(detach_ctx); 165d8f4bbebSShuhei Matsumoto } 166d8f4bbebSShuhei Matsumoto return rc; 167d8f4bbebSShuhei Matsumoto } 168d8f4bbebSShuhei Matsumoto 169d8f4bbebSShuhei Matsumoto /* Append a context for this detachment to the context header. */ 170d8f4bbebSShuhei Matsumoto TAILQ_INSERT_TAIL(&detach_ctx->head, ctx, link); 171d8f4bbebSShuhei Matsumoto 172d8f4bbebSShuhei Matsumoto *_detach_ctx = detach_ctx; 173d8f4bbebSShuhei Matsumoto 174d8f4bbebSShuhei Matsumoto return 0; 175d8f4bbebSShuhei Matsumoto } 176d8f4bbebSShuhei Matsumoto 177d8f4bbebSShuhei Matsumoto int 178d8f4bbebSShuhei Matsumoto spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *detach_ctx) 179d8f4bbebSShuhei Matsumoto { 180d8f4bbebSShuhei Matsumoto struct nvme_ctrlr_detach_ctx *ctx, *tmp_ctx; 181d8f4bbebSShuhei Matsumoto int rc; 182d8f4bbebSShuhei Matsumoto 183d8f4bbebSShuhei Matsumoto if (detach_ctx == NULL) { 184d8f4bbebSShuhei Matsumoto return -EINVAL; 185d8f4bbebSShuhei Matsumoto } 186d8f4bbebSShuhei Matsumoto 187d8f4bbebSShuhei Matsumoto TAILQ_FOREACH_SAFE(ctx, &detach_ctx->head, link, tmp_ctx) { 188d8f4bbebSShuhei Matsumoto TAILQ_REMOVE(&detach_ctx->head, ctx, link); 189d8f4bbebSShuhei Matsumoto 190d8f4bbebSShuhei Matsumoto rc = nvme_ctrlr_detach_poll_async(ctx); 191d8f4bbebSShuhei Matsumoto if (rc == -EAGAIN) { 192d8f4bbebSShuhei Matsumoto /* If not -EAGAIN, ctx was freed by nvme_ctrlr_detach_poll_async(). */ 193d8f4bbebSShuhei Matsumoto TAILQ_INSERT_HEAD(&detach_ctx->head, ctx, link); 194d8f4bbebSShuhei Matsumoto } 195d8f4bbebSShuhei Matsumoto } 196d8f4bbebSShuhei Matsumoto 197d8f4bbebSShuhei Matsumoto if (!TAILQ_EMPTY(&detach_ctx->head)) { 198d8f4bbebSShuhei Matsumoto return -EAGAIN; 199d8f4bbebSShuhei Matsumoto } 200d8f4bbebSShuhei Matsumoto 201d8f4bbebSShuhei Matsumoto free(detach_ctx); 202d8f4bbebSShuhei Matsumoto return 0; 203d8f4bbebSShuhei Matsumoto } 204d8f4bbebSShuhei Matsumoto 2051010fb3aSDaniel Verkamp void 2064fe4040aSShuhei Matsumoto spdk_nvme_detach_poll(struct spdk_nvme_detach_ctx *detach_ctx) 2074fe4040aSShuhei Matsumoto { 2084fe4040aSShuhei Matsumoto while (detach_ctx && spdk_nvme_detach_poll_async(detach_ctx) == -EAGAIN) { 2094fe4040aSShuhei Matsumoto ; 2104fe4040aSShuhei Matsumoto } 2114fe4040aSShuhei Matsumoto } 2124fe4040aSShuhei Matsumoto 2134fe4040aSShuhei Matsumoto void 214ad35d6cdSDaniel Verkamp nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl) 2151010fb3aSDaniel Verkamp { 2161010fb3aSDaniel Verkamp struct nvme_completion_poll_status *status = arg; 2171010fb3aSDaniel Verkamp 2188818ace2SAlexey Marchuk if (status->timed_out) { 2198818ace2SAlexey Marchuk /* There is no routine waiting for the completion of this request, free allocated memory */ 220b10fbdf5SJim Harris spdk_free(status->dma_data); 2218818ace2SAlexey Marchuk free(status); 2228818ace2SAlexey Marchuk return; 2238818ace2SAlexey Marchuk } 2248818ace2SAlexey Marchuk 2251010fb3aSDaniel Verkamp /* 2261010fb3aSDaniel Verkamp * Copy status into the argument passed by the caller, so that 2271010fb3aSDaniel Verkamp * the caller can check the status to determine if the 2281010fb3aSDaniel Verkamp * the request passed or failed. 2291010fb3aSDaniel Verkamp */ 2301010fb3aSDaniel Verkamp memcpy(&status->cpl, cpl, sizeof(*cpl)); 2311010fb3aSDaniel Verkamp status->done = true; 2321010fb3aSDaniel Verkamp } 2331010fb3aSDaniel Verkamp 234bc36528cSAlexey Marchuk static void 235bc36528cSAlexey Marchuk dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx) 236bc36528cSAlexey Marchuk { 237bc36528cSAlexey Marchuk } 238bc36528cSAlexey Marchuk 239ecd46370SJim Harris int 240ecd46370SJim Harris nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair, 241ecd46370SJim Harris struct nvme_completion_poll_status *status, 242ecd46370SJim Harris pthread_mutex_t *robust_mutex) 243ecd46370SJim Harris { 244ecd46370SJim Harris int rc; 245ecd46370SJim Harris 246ecd46370SJim Harris if (robust_mutex) { 247ecd46370SJim Harris nvme_robust_mutex_lock(robust_mutex); 248ecd46370SJim Harris } 249ecd46370SJim Harris 250ecd46370SJim Harris if (qpair->poll_group) { 251ecd46370SJim Harris rc = (int)spdk_nvme_poll_group_process_completions(qpair->poll_group->group, 0, 252ecd46370SJim Harris dummy_disconnected_qpair_cb); 253ecd46370SJim Harris } else { 254ecd46370SJim Harris rc = spdk_nvme_qpair_process_completions(qpair, 0); 255ecd46370SJim Harris } 256ecd46370SJim Harris 257ecd46370SJim Harris if (robust_mutex) { 258ecd46370SJim Harris nvme_robust_mutex_unlock(robust_mutex); 259ecd46370SJim Harris } 260ecd46370SJim Harris 261ecd46370SJim Harris if (rc < 0) { 262ecd46370SJim Harris status->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 263ecd46370SJim Harris status->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 264ecd46370SJim Harris goto error; 265ecd46370SJim Harris } 266ecd46370SJim Harris 267ecd46370SJim Harris if (!status->done && status->timeout_tsc && spdk_get_ticks() > status->timeout_tsc) { 268ecd46370SJim Harris goto error; 269ecd46370SJim Harris } 270ecd46370SJim Harris 271ecd46370SJim Harris if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 272ecd46370SJim Harris union spdk_nvme_csts_register csts = spdk_nvme_ctrlr_get_regs_csts(qpair->ctrlr); 273ecd46370SJim Harris if (csts.raw == SPDK_NVME_INVALID_REGISTER_VALUE) { 274ecd46370SJim Harris status->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 275ecd46370SJim Harris status->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 276ecd46370SJim Harris goto error; 277ecd46370SJim Harris } 278ecd46370SJim Harris } 279ecd46370SJim Harris 280ecd46370SJim Harris if (!status->done) { 281ecd46370SJim Harris return -EAGAIN; 282ecd46370SJim Harris } else if (spdk_nvme_cpl_is_error(&status->cpl)) { 283ecd46370SJim Harris return -EIO; 284ecd46370SJim Harris } else { 285ecd46370SJim Harris return 0; 286ecd46370SJim Harris } 287ecd46370SJim Harris error: 288ecd46370SJim Harris /* Either transport error occurred or we've timed out. Either way, if the response hasn't 289ecd46370SJim Harris * been received yet, mark the command as timed out, so the status gets freed when the 290ecd46370SJim Harris * command is completed or aborted. 291ecd46370SJim Harris */ 292ecd46370SJim Harris if (!status->done) { 293ecd46370SJim Harris status->timed_out = true; 294ecd46370SJim Harris } 295ecd46370SJim Harris 296ecd46370SJim Harris return -ECANCELED; 297ecd46370SJim Harris } 298ecd46370SJim Harris 299c4bb0ea6SDaniel Verkamp /** 300c4bb0ea6SDaniel Verkamp * Poll qpair for completions until a command completes. 301c4bb0ea6SDaniel Verkamp * 302c4bb0ea6SDaniel Verkamp * \param qpair queue to poll 30324d61956SAlexey Marchuk * \param status completion status. The user must fill this structure with zeroes before calling 30424d61956SAlexey Marchuk * this function 305c4bb0ea6SDaniel Verkamp * \param robust_mutex optional robust mutex to lock while polling qpair 30606d25b70SShuhei Matsumoto * \param timeout_in_usecs optional timeout 307c4bb0ea6SDaniel Verkamp * 30852f1e4b0SAlexey Marchuk * \return 0 if command completed without error, 30952f1e4b0SAlexey Marchuk * -EIO if command completed with error, 31006d25b70SShuhei Matsumoto * -ECANCELED if command is not completed due to transport/device error or time expired 311c4bb0ea6SDaniel Verkamp * 312c4bb0ea6SDaniel Verkamp * The command to wait upon must be submitted with nvme_completion_poll_cb as the callback 313c4bb0ea6SDaniel Verkamp * and status as the callback argument. 314c4bb0ea6SDaniel Verkamp */ 315c4bb0ea6SDaniel Verkamp int 31606d25b70SShuhei Matsumoto nvme_wait_for_completion_robust_lock_timeout( 317c4bb0ea6SDaniel Verkamp struct spdk_nvme_qpair *qpair, 318c4bb0ea6SDaniel Verkamp struct nvme_completion_poll_status *status, 31906d25b70SShuhei Matsumoto pthread_mutex_t *robust_mutex, 32006d25b70SShuhei Matsumoto uint64_t timeout_in_usecs) 321c4bb0ea6SDaniel Verkamp { 322ecd46370SJim Harris int rc; 32306d25b70SShuhei Matsumoto 32406d25b70SShuhei Matsumoto if (timeout_in_usecs) { 32520e6c821SJim Harris status->timeout_tsc = spdk_get_ticks() + timeout_in_usecs * 32620e6c821SJim Harris spdk_get_ticks_hz() / SPDK_SEC_TO_USEC; 32720e6c821SJim Harris } else { 32820e6c821SJim Harris status->timeout_tsc = 0; 32906d25b70SShuhei Matsumoto } 330c4bb0ea6SDaniel Verkamp 331ecd46370SJim Harris status->cpl.status_raw = 0; 332ecd46370SJim Harris do { 333ecd46370SJim Harris rc = nvme_wait_for_completion_robust_lock_timeout_poll(qpair, status, robust_mutex); 334ecd46370SJim Harris } while (rc == -EAGAIN); 335c4bb0ea6SDaniel Verkamp 336ecd46370SJim Harris return rc; 337c4bb0ea6SDaniel Verkamp } 338c4bb0ea6SDaniel Verkamp 33906d25b70SShuhei Matsumoto /** 34006d25b70SShuhei Matsumoto * Poll qpair for completions until a command completes. 34106d25b70SShuhei Matsumoto * 34206d25b70SShuhei Matsumoto * \param qpair queue to poll 34306d25b70SShuhei Matsumoto * \param status completion status. The user must fill this structure with zeroes before calling 34406d25b70SShuhei Matsumoto * this function 34506d25b70SShuhei Matsumoto * \param robust_mutex optional robust mutex to lock while polling qpair 34606d25b70SShuhei Matsumoto * 34706d25b70SShuhei Matsumoto * \return 0 if command completed without error, 34806d25b70SShuhei Matsumoto * -EIO if command completed with error, 34906d25b70SShuhei Matsumoto * -ECANCELED if command is not completed due to transport/device error 35006d25b70SShuhei Matsumoto * 35106d25b70SShuhei Matsumoto * The command to wait upon must be submitted with nvme_completion_poll_cb as the callback 35206d25b70SShuhei Matsumoto * and status as the callback argument. 35306d25b70SShuhei Matsumoto */ 35406d25b70SShuhei Matsumoto int 35506d25b70SShuhei Matsumoto nvme_wait_for_completion_robust_lock( 35606d25b70SShuhei Matsumoto struct spdk_nvme_qpair *qpair, 35706d25b70SShuhei Matsumoto struct nvme_completion_poll_status *status, 35806d25b70SShuhei Matsumoto pthread_mutex_t *robust_mutex) 35906d25b70SShuhei Matsumoto { 36006d25b70SShuhei Matsumoto return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0); 36106d25b70SShuhei Matsumoto } 36206d25b70SShuhei Matsumoto 363c4bb0ea6SDaniel Verkamp int 3641a9c19a9SSeth Howell nvme_wait_for_completion(struct spdk_nvme_qpair *qpair, 365c4bb0ea6SDaniel Verkamp struct nvme_completion_poll_status *status) 366c4bb0ea6SDaniel Verkamp { 36706d25b70SShuhei Matsumoto return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0); 368c4bb0ea6SDaniel Verkamp } 369c4bb0ea6SDaniel Verkamp 3709ad2046aSAlexey Marchuk /** 3719ad2046aSAlexey Marchuk * Poll qpair for completions until a command completes. 3729ad2046aSAlexey Marchuk * 3739ad2046aSAlexey Marchuk * \param qpair queue to poll 37424d61956SAlexey Marchuk * \param status completion status. The user must fill this structure with zeroes before calling 37524d61956SAlexey Marchuk * this function 376b3bb3a1bSSeth Howell * \param timeout_in_usecs optional timeout 3779ad2046aSAlexey Marchuk * 3789ad2046aSAlexey Marchuk * \return 0 if command completed without error, 3799ad2046aSAlexey Marchuk * -EIO if command completed with error, 3809ad2046aSAlexey Marchuk * -ECANCELED if command is not completed due to transport/device error or time expired 3819ad2046aSAlexey Marchuk * 3829ad2046aSAlexey Marchuk * The command to wait upon must be submitted with nvme_completion_poll_cb as the callback 3839ad2046aSAlexey Marchuk * and status as the callback argument. 3849ad2046aSAlexey Marchuk */ 3857d4d22a8SChangpeng Liu int 3861a9c19a9SSeth Howell nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair, 3877d4d22a8SChangpeng Liu struct nvme_completion_poll_status *status, 388b3bb3a1bSSeth Howell uint64_t timeout_in_usecs) 3897d4d22a8SChangpeng Liu { 39006d25b70SShuhei Matsumoto return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs); 3917d4d22a8SChangpeng Liu } 3927d4d22a8SChangpeng Liu 3935e9d8593SDaniel Verkamp static void 3945e9d8593SDaniel Verkamp nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl) 3955e9d8593SDaniel Verkamp { 3965e9d8593SDaniel Verkamp struct nvme_request *req = arg; 3974b1aa5daSDeepak Abraham Tom spdk_nvme_cmd_cb user_cb_fn; 3984b1aa5daSDeepak Abraham Tom void *user_cb_arg; 3995e9d8593SDaniel Verkamp enum spdk_nvme_data_transfer xfer; 4005e9d8593SDaniel Verkamp 4015e9d8593SDaniel Verkamp if (req->user_buffer && req->payload_size) { 4024b1aa5daSDeepak Abraham Tom /* Copy back to the user buffer */ 4035c2ccd06SDaniel Verkamp assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG); 4045e9d8593SDaniel Verkamp xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc); 4055e9d8593SDaniel Verkamp if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST || 4065e9d8593SDaniel Verkamp xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 407bfc8bc87SGangCao assert(req->pid == getpid()); 4087dff719fSDaniel Verkamp memcpy(req->user_buffer, req->payload.contig_or_cb_arg, req->payload_size); 4095e9d8593SDaniel Verkamp } 4105e9d8593SDaniel Verkamp } 4115e9d8593SDaniel Verkamp 4124b1aa5daSDeepak Abraham Tom user_cb_fn = req->user_cb_fn; 4134b1aa5daSDeepak Abraham Tom user_cb_arg = req->user_cb_arg; 4144b1aa5daSDeepak Abraham Tom nvme_cleanup_user_req(req); 4154b1aa5daSDeepak Abraham Tom 4165e9d8593SDaniel Verkamp /* Call the user's original callback now that the buffer has been copied */ 4174b1aa5daSDeepak Abraham Tom user_cb_fn(user_cb_arg, cpl); 4184b1aa5daSDeepak Abraham Tom 4195e9d8593SDaniel Verkamp } 4205e9d8593SDaniel Verkamp 4215e9d8593SDaniel Verkamp /** 4224af4e4f5SDaniel Verkamp * Allocate a request as well as a DMA-capable buffer to copy to/from the user's buffer. 4235e9d8593SDaniel Verkamp * 4245e9d8593SDaniel Verkamp * This is intended for use in non-fast-path functions (admin commands, reservations, etc.) 4255e9d8593SDaniel Verkamp * where the overhead of a copy is not a problem. 4265e9d8593SDaniel Verkamp */ 4275e9d8593SDaniel Verkamp struct nvme_request * 428cd13f280SDaniel Verkamp nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, 429cd13f280SDaniel Verkamp void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, 4305e9d8593SDaniel Verkamp void *cb_arg, bool host_to_controller) 4315e9d8593SDaniel Verkamp { 4325e9d8593SDaniel Verkamp struct nvme_request *req; 4334af4e4f5SDaniel Verkamp void *dma_buffer = NULL; 4345e9d8593SDaniel Verkamp 4355e9d8593SDaniel Verkamp if (buffer && payload_size) { 43627c42e31SDarek Stojaczyk dma_buffer = spdk_zmalloc(payload_size, 4096, NULL, 437186b109dSJim Harris SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4384af4e4f5SDaniel Verkamp if (!dma_buffer) { 4395e9d8593SDaniel Verkamp return NULL; 4405e9d8593SDaniel Verkamp } 4415e9d8593SDaniel Verkamp 4425e9d8593SDaniel Verkamp if (host_to_controller) { 4434af4e4f5SDaniel Verkamp memcpy(dma_buffer, buffer, payload_size); 4445e9d8593SDaniel Verkamp } 4455e9d8593SDaniel Verkamp } 4465e9d8593SDaniel Verkamp 4474af4e4f5SDaniel Verkamp req = nvme_allocate_request_contig(qpair, dma_buffer, payload_size, nvme_user_copy_cmd_complete, 448cd13f280SDaniel Verkamp NULL); 4495e9d8593SDaniel Verkamp if (!req) { 450eb6a2cb8Szkhatami88 spdk_free(dma_buffer); 4515e9d8593SDaniel Verkamp return NULL; 4525e9d8593SDaniel Verkamp } 4535e9d8593SDaniel Verkamp 4545e9d8593SDaniel Verkamp req->user_cb_fn = cb_fn; 4555e9d8593SDaniel Verkamp req->user_cb_arg = cb_arg; 4565e9d8593SDaniel Verkamp req->user_buffer = buffer; 4575e9d8593SDaniel Verkamp req->cb_arg = req; 4585e9d8593SDaniel Verkamp 4595e9d8593SDaniel Verkamp return req; 4605e9d8593SDaniel Verkamp } 4615e9d8593SDaniel Verkamp 4628b158aaaSDaniel Verkamp /** 4638b158aaaSDaniel Verkamp * Check if a request has exceeded the controller timeout. 4648b158aaaSDaniel Verkamp * 4658b158aaaSDaniel Verkamp * \param req request to check for timeout. 4668b158aaaSDaniel Verkamp * \param cid command ID for command submitted by req (will be passed to timeout_cb_fn) 4678b158aaaSDaniel Verkamp * \param active_proc per-process data for the controller associated with req 4688b158aaaSDaniel Verkamp * \param now_tick current time from spdk_get_ticks() 4698b158aaaSDaniel Verkamp * \return 0 if requests submitted more recently than req should still be checked for timeouts, or 4708b158aaaSDaniel Verkamp * 1 if requests newer than req need not be checked. 4718b158aaaSDaniel Verkamp * 4728b158aaaSDaniel Verkamp * The request's timeout callback will be called if needed; the caller is only responsible for 4738b158aaaSDaniel Verkamp * calling this function on each outstanding request. 4748b158aaaSDaniel Verkamp */ 4758b158aaaSDaniel Verkamp int 4768b158aaaSDaniel Verkamp nvme_request_check_timeout(struct nvme_request *req, uint16_t cid, 4778b158aaaSDaniel Verkamp struct spdk_nvme_ctrlr_process *active_proc, 4788b158aaaSDaniel Verkamp uint64_t now_tick) 4798b158aaaSDaniel Verkamp { 4808b158aaaSDaniel Verkamp struct spdk_nvme_qpair *qpair = req->qpair; 4818b158aaaSDaniel Verkamp struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 4825288c4dfSMatt Dumm uint64_t timeout_ticks = nvme_qpair_is_admin_queue(qpair) ? 4835288c4dfSMatt Dumm active_proc->timeout_admin_ticks : active_proc->timeout_io_ticks; 4848b158aaaSDaniel Verkamp 4858b158aaaSDaniel Verkamp assert(active_proc->timeout_cb_fn != NULL); 4868b158aaaSDaniel Verkamp 4878b158aaaSDaniel Verkamp if (req->timed_out || req->submit_tick == 0) { 4888b158aaaSDaniel Verkamp return 0; 4898b158aaaSDaniel Verkamp } 4908b158aaaSDaniel Verkamp 4918b158aaaSDaniel Verkamp if (req->pid != g_spdk_nvme_pid) { 4928b158aaaSDaniel Verkamp return 0; 4938b158aaaSDaniel Verkamp } 4948b158aaaSDaniel Verkamp 4958b158aaaSDaniel Verkamp if (nvme_qpair_is_admin_queue(qpair) && 4968b158aaaSDaniel Verkamp req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { 4978b158aaaSDaniel Verkamp return 0; 4988b158aaaSDaniel Verkamp } 4998b158aaaSDaniel Verkamp 5005288c4dfSMatt Dumm if (req->submit_tick + timeout_ticks > now_tick) { 5018b158aaaSDaniel Verkamp return 1; 5028b158aaaSDaniel Verkamp } 5038b158aaaSDaniel Verkamp 5048b158aaaSDaniel Verkamp req->timed_out = true; 5058b158aaaSDaniel Verkamp 5068b158aaaSDaniel Verkamp /* 5078b158aaaSDaniel Verkamp * We don't want to expose the admin queue to the user, 5088b158aaaSDaniel Verkamp * so when we're timing out admin commands set the 5098b158aaaSDaniel Verkamp * qpair to NULL. 5108b158aaaSDaniel Verkamp */ 5118b158aaaSDaniel Verkamp active_proc->timeout_cb_fn(active_proc->timeout_cb_arg, ctrlr, 5128b158aaaSDaniel Verkamp nvme_qpair_is_admin_queue(qpair) ? NULL : qpair, 5138b158aaaSDaniel Verkamp cid); 5148b158aaaSDaniel Verkamp return 0; 5158b158aaaSDaniel Verkamp } 5168b158aaaSDaniel Verkamp 517dcd19bdbSGangCao int 51847341b89SGangCao nvme_robust_mutex_init_shared(pthread_mutex_t *mtx) 519dcd19bdbSGangCao { 520dcd19bdbSGangCao int rc = 0; 521dcd19bdbSGangCao 522224e0ff0SGangCao #ifdef __FreeBSD__ 523224e0ff0SGangCao pthread_mutex_init(mtx, NULL); 524224e0ff0SGangCao #else 525224e0ff0SGangCao pthread_mutexattr_t attr; 526224e0ff0SGangCao 527dcd19bdbSGangCao if (pthread_mutexattr_init(&attr)) { 528dcd19bdbSGangCao return -1; 529dcd19bdbSGangCao } 530dcd19bdbSGangCao if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) || 53147341b89SGangCao pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) || 532dcd19bdbSGangCao pthread_mutex_init(mtx, &attr)) { 533dcd19bdbSGangCao rc = -1; 534dcd19bdbSGangCao } 535dcd19bdbSGangCao pthread_mutexattr_destroy(&attr); 536224e0ff0SGangCao #endif 537224e0ff0SGangCao 538dcd19bdbSGangCao return rc; 539dcd19bdbSGangCao } 540dcd19bdbSGangCao 541cd82151eSBen Walker int 542bb726d51SGangCao nvme_driver_init(void) 543bb726d51SGangCao { 54418f79f24SJim Harris static pthread_mutex_t g_init_mutex = PTHREAD_MUTEX_INITIALIZER; 545bb726d51SGangCao int ret = 0; 546bb726d51SGangCao 54718f79f24SJim Harris /* Use a special process-private mutex to ensure the global 54818f79f24SJim Harris * nvme driver object (g_spdk_nvme_driver) gets initialized by 54918f79f24SJim Harris * only one thread. Once that object is established and its 55018f79f24SJim Harris * mutex is initialized, we can unlock this mutex and use that 55118f79f24SJim Harris * one instead. 55218f79f24SJim Harris */ 55318f79f24SJim Harris pthread_mutex_lock(&g_init_mutex); 55418f79f24SJim Harris 555ce70f296SJonathan Richardson /* Each process needs its own pid. */ 5569c4679bcSJim Harris g_spdk_nvme_pid = getpid(); 557ce70f296SJonathan Richardson 558bb726d51SGangCao /* 559bb726d51SGangCao * Only one thread from one process will do this driver init work. 560bb726d51SGangCao * The primary process will reserve the shared memory and do the 561bb726d51SGangCao * initialization. 562bb726d51SGangCao * The secondary process will lookup the existing reserved memory. 563bb726d51SGangCao */ 564bb726d51SGangCao if (spdk_process_is_primary()) { 565bb726d51SGangCao /* The unique named memzone already reserved. */ 566bb726d51SGangCao if (g_spdk_nvme_driver != NULL) { 56718f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 568bb726d51SGangCao return 0; 569bb726d51SGangCao } else { 570bb726d51SGangCao g_spdk_nvme_driver = spdk_memzone_reserve(SPDK_NVME_DRIVER_NAME, 57138b1eaa4SJim Harris sizeof(struct nvme_driver), SPDK_ENV_NUMA_ID_ANY, 5722044690eSDariusz Stojaczyk SPDK_MEMZONE_NO_IOVA_CONTIG); 573bb726d51SGangCao } 574bb726d51SGangCao 575bb726d51SGangCao if (g_spdk_nvme_driver == NULL) { 576bb726d51SGangCao SPDK_ERRLOG("primary process failed to reserve memory\n"); 57718f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 578bb726d51SGangCao return -1; 579bb726d51SGangCao } 580bb726d51SGangCao } else { 581bb726d51SGangCao g_spdk_nvme_driver = spdk_memzone_lookup(SPDK_NVME_DRIVER_NAME); 582bb726d51SGangCao 583bb726d51SGangCao /* The unique named memzone already reserved by the primary process. */ 584bb726d51SGangCao if (g_spdk_nvme_driver != NULL) { 585bf316377SPaul Luse int ms_waited = 0; 586bf316377SPaul Luse 587bb726d51SGangCao /* Wait the nvme driver to get initialized. */ 588bf316377SPaul Luse while ((g_spdk_nvme_driver->initialized == false) && 589bf316377SPaul Luse (ms_waited < g_nvme_driver_timeout_ms)) { 590bf316377SPaul Luse ms_waited++; 591bf316377SPaul Luse nvme_delay(1000); /* delay 1ms */ 592bf316377SPaul Luse } 593bf316377SPaul Luse if (g_spdk_nvme_driver->initialized == false) { 594bf316377SPaul Luse SPDK_ERRLOG("timeout waiting for primary process to init\n"); 59518f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 596bf316377SPaul Luse return -1; 597bb726d51SGangCao } 598bb726d51SGangCao } else { 599bb726d51SGangCao SPDK_ERRLOG("primary process is not started yet\n"); 60018f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 601bb726d51SGangCao return -1; 602bb726d51SGangCao } 603bb726d51SGangCao 60418f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 605bb726d51SGangCao return 0; 606bb726d51SGangCao } 607bb726d51SGangCao 608bb726d51SGangCao /* 609bb726d51SGangCao * At this moment, only one thread from the primary process will do 610bb726d51SGangCao * the g_spdk_nvme_driver initialization 611bb726d51SGangCao */ 612bb726d51SGangCao assert(spdk_process_is_primary()); 613bb726d51SGangCao 61447341b89SGangCao ret = nvme_robust_mutex_init_shared(&g_spdk_nvme_driver->lock); 615bb726d51SGangCao if (ret != 0) { 616bb726d51SGangCao SPDK_ERRLOG("failed to initialize mutex\n"); 617bb726d51SGangCao spdk_memzone_free(SPDK_NVME_DRIVER_NAME); 61818f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 619bb726d51SGangCao return ret; 620bb726d51SGangCao } 621bb726d51SGangCao 62218f79f24SJim Harris /* The lock in the shared g_spdk_nvme_driver object is now ready to 62318f79f24SJim Harris * be used - so we can unlock the g_init_mutex here. 62418f79f24SJim Harris */ 62518f79f24SJim Harris pthread_mutex_unlock(&g_init_mutex); 6266bdcf5abSGangCao nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 627bb726d51SGangCao 628bb726d51SGangCao g_spdk_nvme_driver->initialized = false; 62959237d22SJin Yu g_spdk_nvme_driver->hotplug_fd = spdk_pci_event_listen(); 63089e47f60SJim Harris if (g_spdk_nvme_driver->hotplug_fd < 0) { 6312172c432STomasz Zawadzki SPDK_DEBUGLOG(nvme, "Failed to open uevent netlink socket\n"); 63289e47f60SJim Harris } 633bb726d51SGangCao 634bb2444f4SDaniel Verkamp TAILQ_INIT(&g_spdk_nvme_driver->shared_attached_ctrlrs); 635bb726d51SGangCao 6364c06ce9bSDaniel Verkamp spdk_uuid_generate(&g_spdk_nvme_driver->default_extended_host_id); 6378acc4ae5SDaniel Verkamp 6386bdcf5abSGangCao nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 639bb726d51SGangCao 640bb726d51SGangCao return ret; 641bb726d51SGangCao } 642bb726d51SGangCao 64359467b3aSBen Walker /* This function must only be called while holding g_spdk_nvme_driver->lock */ 64416ae3941SDaniel Verkamp int 6453306e49eSChangpeng Liu nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, 6463306e49eSChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle) 6478374a727SDaniel Verkamp { 6486ce73aa6SDaniel Verkamp struct spdk_nvme_ctrlr *ctrlr; 6494ad99808SDaniel Verkamp struct spdk_nvme_ctrlr_opts opts; 6508374a727SDaniel Verkamp 6513fa7c33aSDaniel Verkamp assert(trid != NULL); 6523fa7c33aSDaniel Verkamp 653dc2fb2edSGangCao spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts)); 6544ad99808SDaniel Verkamp 6553306e49eSChangpeng Liu if (!probe_ctx->probe_cb || probe_ctx->probe_cb(probe_ctx->cb_ctx, trid, &opts)) { 6563c8b6782SKonrad Sztyber ctrlr = nvme_get_ctrlr_by_trid_unsafe(trid, opts.hostnqn); 65759467b3aSBen Walker if (ctrlr) { 658b8fbb070SShuhei Matsumoto /* This ctrlr already exists. */ 659b8fbb070SShuhei Matsumoto 660b8fbb070SShuhei Matsumoto if (ctrlr->is_destructed) { 661b8fbb070SShuhei Matsumoto /* This ctrlr is being destructed asynchronously. */ 662b8fbb070SShuhei Matsumoto SPDK_ERRLOG("NVMe controller for SSD: %s is being destructed\n", 663b8fbb070SShuhei Matsumoto trid->traddr); 6649ce869c2SJinlong Chen probe_ctx->attach_fail_cb(probe_ctx->cb_ctx, trid, -EBUSY); 665b8fbb070SShuhei Matsumoto return -EBUSY; 666b8fbb070SShuhei Matsumoto } 667b8fbb070SShuhei Matsumoto 668b8fbb070SShuhei Matsumoto /* Increase the ref count before calling attach_cb() as the user may 66959467b3aSBen Walker * call nvme_detach() immediately. */ 67059467b3aSBen Walker nvme_ctrlr_proc_get_ref(ctrlr); 67159467b3aSBen Walker 67259467b3aSBen Walker if (probe_ctx->attach_cb) { 67359467b3aSBen Walker nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 67459467b3aSBen Walker probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 67559467b3aSBen Walker nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 67659467b3aSBen Walker } 67759467b3aSBen Walker return 0; 67859467b3aSBen Walker } 67959467b3aSBen Walker 680ec0b5d2bSBen Walker ctrlr = nvme_transport_ctrlr_construct(trid, &opts, devhandle); 6818374a727SDaniel Verkamp if (ctrlr == NULL) { 682e51a07dfSPan Liu SPDK_ERRLOG("Failed to construct NVMe controller for SSD: %s\n", trid->traddr); 6839ce869c2SJinlong Chen probe_ctx->attach_fail_cb(probe_ctx->cb_ctx, trid, -ENODEV); 6848374a727SDaniel Verkamp return -1; 6858374a727SDaniel Verkamp } 686d7d03bd3SSeth Howell ctrlr->remove_cb = probe_ctx->remove_cb; 687d7d03bd3SSeth Howell ctrlr->cb_ctx = probe_ctx->cb_ctx; 6888374a727SDaniel Verkamp 68908d4d977SSeth Howell nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED); 6903306e49eSChangpeng Liu TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq); 691ea3a2772SDaniel Verkamp return 0; 6928374a727SDaniel Verkamp } 6938374a727SDaniel Verkamp 694ea3a2772SDaniel Verkamp return 1; 6958374a727SDaniel Verkamp } 6968374a727SDaniel Verkamp 697ddf86600SJim Harris static void 6983306e49eSChangpeng Liu nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, 6993306e49eSChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx) 7008374a727SDaniel Verkamp { 701e8c63cddSCunyin Chang int rc = 0; 702*6c35d974SNathan Claudel struct nvme_ctrlr_detach_ctx *detach_ctx; 70320735396SChangpeng Liu 70420735396SChangpeng Liu rc = nvme_ctrlr_process_init(ctrlr); 705ac99f2fbSGangCao 70620735396SChangpeng Liu if (rc) { 70720abbe8aSDaniel Verkamp /* Controller failed to initialize. */ 7083306e49eSChangpeng Liu TAILQ_REMOVE(&probe_ctx->init_ctrlrs, ctrlr, tailq); 709e51a07dfSPan Liu SPDK_ERRLOG("Failed to initialize SSD: %s\n", ctrlr->trid.traddr); 7109ce869c2SJinlong Chen probe_ctx->attach_fail_cb(probe_ctx->cb_ctx, &ctrlr->trid, rc); 711e10b4806SJim Harris nvme_ctrlr_lock(ctrlr); 712584a6302SSeth Howell nvme_ctrlr_fail(ctrlr, false); 713e10b4806SJim Harris nvme_ctrlr_unlock(ctrlr); 714*6c35d974SNathan Claudel 715*6c35d974SNathan Claudel /* allocate a context to detach this controller asynchronously */ 716*6c35d974SNathan Claudel detach_ctx = calloc(1, sizeof(*detach_ctx)); 717*6c35d974SNathan Claudel if (detach_ctx == NULL) { 718*6c35d974SNathan Claudel SPDK_WARNLOG("Failed to allocate asynchronous detach context. Performing synchronous destruct.\n"); 71920abbe8aSDaniel Verkamp nvme_ctrlr_destruct(ctrlr); 720ddf86600SJim Harris return; 72120abbe8aSDaniel Verkamp } 722*6c35d974SNathan Claudel detach_ctx->ctrlr = ctrlr; 723*6c35d974SNathan Claudel TAILQ_INSERT_TAIL(&probe_ctx->failed_ctxs.head, detach_ctx, link); 724*6c35d974SNathan Claudel nvme_ctrlr_destruct_async(ctrlr, detach_ctx); 725*6c35d974SNathan Claudel return; 726*6c35d974SNathan Claudel } 72720abbe8aSDaniel Verkamp 72820735396SChangpeng Liu if (ctrlr->state != NVME_CTRLR_STATE_READY) { 729ddf86600SJim Harris return; 73020735396SChangpeng Liu } 73120735396SChangpeng Liu 73272e079a8SMaciej Szwed STAILQ_INIT(&ctrlr->io_producers); 73372e079a8SMaciej Szwed 73420abbe8aSDaniel Verkamp /* 73520abbe8aSDaniel Verkamp * Controller has been initialized. 73620abbe8aSDaniel Verkamp * Move it to the attached_ctrlrs list. 73720abbe8aSDaniel Verkamp */ 7383306e49eSChangpeng Liu TAILQ_REMOVE(&probe_ctx->init_ctrlrs, ctrlr, tailq); 7393306e49eSChangpeng Liu 7403306e49eSChangpeng Liu nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 741bb2444f4SDaniel Verkamp if (nvme_ctrlr_shared(ctrlr)) { 742bb2444f4SDaniel Verkamp TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq); 743bb2444f4SDaniel Verkamp } else { 744bb2444f4SDaniel Verkamp TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, ctrlr, tailq); 745bb2444f4SDaniel Verkamp } 7468374a727SDaniel Verkamp 7478374a727SDaniel Verkamp /* 748ec5b6fedSGangCao * Increase the ref count before calling attach_cb() as the user may 749ec5b6fedSGangCao * call nvme_detach() immediately. 750ec5b6fedSGangCao */ 751ec5b6fedSGangCao nvme_ctrlr_proc_get_ref(ctrlr); 7526bdcf5abSGangCao nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 7533306e49eSChangpeng Liu 7543306e49eSChangpeng Liu if (probe_ctx->attach_cb) { 7553306e49eSChangpeng Liu probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 7566368d6c0SGangCao } 75720735396SChangpeng Liu } 75820735396SChangpeng Liu 75920735396SChangpeng Liu static int 7603306e49eSChangpeng Liu nvme_init_controllers(struct spdk_nvme_probe_ctx *probe_ctx) 76120735396SChangpeng Liu { 76220735396SChangpeng Liu int rc = 0; 76320735396SChangpeng Liu 7644ce22e0fSChangpeng Liu while (true) { 7654ce22e0fSChangpeng Liu rc = spdk_nvme_probe_poll_async(probe_ctx); 7664ce22e0fSChangpeng Liu if (rc != -EAGAIN) { 7674ce22e0fSChangpeng Liu return rc; 76820abbe8aSDaniel Verkamp } 7698374a727SDaniel Verkamp } 7703306e49eSChangpeng Liu 771e8c63cddSCunyin Chang return rc; 772e8c63cddSCunyin Chang } 773ac99f2fbSGangCao 7746368d6c0SGangCao /* This function must not be called while holding g_spdk_nvme_driver->lock */ 7756368d6c0SGangCao static struct spdk_nvme_ctrlr * 7763c8b6782SKonrad Sztyber nvme_get_ctrlr_by_trid(const struct spdk_nvme_transport_id *trid, const char *hostnqn) 7776368d6c0SGangCao { 7784525fc89SDaniel Verkamp struct spdk_nvme_ctrlr *ctrlr; 7796368d6c0SGangCao 7806368d6c0SGangCao nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 7813c8b6782SKonrad Sztyber ctrlr = nvme_get_ctrlr_by_trid_unsafe(trid, hostnqn); 7824525fc89SDaniel Verkamp nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 7834525fc89SDaniel Verkamp 7844525fc89SDaniel Verkamp return ctrlr; 7854525fc89SDaniel Verkamp } 7864525fc89SDaniel Verkamp 7874525fc89SDaniel Verkamp /* This function must be called while holding g_spdk_nvme_driver->lock */ 7884525fc89SDaniel Verkamp struct spdk_nvme_ctrlr * 7893c8b6782SKonrad Sztyber nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid, const char *hostnqn) 7904525fc89SDaniel Verkamp { 7914525fc89SDaniel Verkamp struct spdk_nvme_ctrlr *ctrlr; 7926368d6c0SGangCao 793bb2444f4SDaniel Verkamp /* Search per-process list */ 794bb2444f4SDaniel Verkamp TAILQ_FOREACH(ctrlr, &g_nvme_attached_ctrlrs, tailq) { 7953c8b6782SKonrad Sztyber if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) != 0) { 7963c8b6782SKonrad Sztyber continue; 797bb2444f4SDaniel Verkamp } 7983c8b6782SKonrad Sztyber if (hostnqn && strcmp(ctrlr->opts.hostnqn, hostnqn) != 0) { 7993c8b6782SKonrad Sztyber continue; 8003c8b6782SKonrad Sztyber } 8013c8b6782SKonrad Sztyber return ctrlr; 802bb2444f4SDaniel Verkamp } 803bb2444f4SDaniel Verkamp 804bb2444f4SDaniel Verkamp /* Search multi-process shared list */ 805bb2444f4SDaniel Verkamp TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq) { 8063c8b6782SKonrad Sztyber if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) != 0) { 8073c8b6782SKonrad Sztyber continue; 8086368d6c0SGangCao } 8093c8b6782SKonrad Sztyber if (hostnqn && strcmp(ctrlr->opts.hostnqn, hostnqn) != 0) { 8103c8b6782SKonrad Sztyber continue; 8113c8b6782SKonrad Sztyber } 8123c8b6782SKonrad Sztyber return ctrlr; 8136368d6c0SGangCao } 8146368d6c0SGangCao 8154525fc89SDaniel Verkamp return NULL; 8166368d6c0SGangCao } 8176368d6c0SGangCao 8186368d6c0SGangCao /* This function must only be called while holding g_spdk_nvme_driver->lock */ 8196368d6c0SGangCao static int 820a3f72b2eSSeth Howell nvme_probe_internal(struct spdk_nvme_probe_ctx *probe_ctx, 8213306e49eSChangpeng Liu bool direct_connect) 822e8c63cddSCunyin Chang { 823e8c63cddSCunyin Chang int rc; 824f4e3f59eSChangpeng Liu struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp; 8253c8b6782SKonrad Sztyber const struct spdk_nvme_ctrlr_opts *opts = probe_ctx->opts; 826e8c63cddSCunyin Chang 827a2d4ddb3SEvgeniy Kochetov if (strlen(probe_ctx->trid.trstring) == 0) { 828a2d4ddb3SEvgeniy Kochetov /* If user didn't provide trstring, derive it from trtype */ 8298d6f48fbSChangpeng Liu spdk_nvme_trid_populate_transport(&probe_ctx->trid, probe_ctx->trid.trtype); 830a2d4ddb3SEvgeniy Kochetov } 831a2d4ddb3SEvgeniy Kochetov 832771d7593SSeth Howell if (!spdk_nvme_transport_available_by_name(probe_ctx->trid.trstring)) { 833a2d4ddb3SEvgeniy Kochetov SPDK_ERRLOG("NVMe trtype %u (%s) not available\n", 834a2d4ddb3SEvgeniy Kochetov probe_ctx->trid.trtype, probe_ctx->trid.trstring); 835e8c63cddSCunyin Chang return -1; 836e8c63cddSCunyin Chang } 837e8c63cddSCunyin Chang 8389c5df2c4SDaniel Verkamp nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 839e8c63cddSCunyin Chang 8403306e49eSChangpeng Liu rc = nvme_transport_ctrlr_scan(probe_ctx, direct_connect); 84147f2a233SDarek Stojaczyk if (rc != 0) { 84247f2a233SDarek Stojaczyk SPDK_ERRLOG("NVMe ctrlr scan failed\n"); 843f4e3f59eSChangpeng Liu TAILQ_FOREACH_SAFE(ctrlr, &probe_ctx->init_ctrlrs, tailq, ctrlr_tmp) { 844f4e3f59eSChangpeng Liu TAILQ_REMOVE(&probe_ctx->init_ctrlrs, ctrlr, tailq); 8459ce869c2SJinlong Chen probe_ctx->attach_fail_cb(probe_ctx->cb_ctx, &ctrlr->trid, -EFAULT); 846f4e3f59eSChangpeng Liu nvme_transport_ctrlr_destruct(ctrlr); 847f4e3f59eSChangpeng Liu } 84847f2a233SDarek Stojaczyk nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 84947f2a233SDarek Stojaczyk return -1; 85047f2a233SDarek Stojaczyk } 8517473d6b3SBen Walker 85291f037f6SGangCao /* 853bb2444f4SDaniel Verkamp * Probe controllers on the shared_attached_ctrlrs list 85491f037f6SGangCao */ 8553306e49eSChangpeng Liu if (!spdk_process_is_primary() && (probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE)) { 856bb2444f4SDaniel Verkamp TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq) { 85706988b1fSZiye Yang /* Do not attach other ctrlrs if user specify a valid trid */ 8583306e49eSChangpeng Liu if ((strlen(probe_ctx->trid.traddr) != 0) && 8593306e49eSChangpeng Liu (spdk_nvme_transport_id_compare(&probe_ctx->trid, &ctrlr->trid))) { 86006988b1fSZiye Yang continue; 86106988b1fSZiye Yang } 86206988b1fSZiye Yang 8633c8b6782SKonrad Sztyber if (opts && strcmp(opts->hostnqn, ctrlr->opts.hostnqn) != 0) { 8643c8b6782SKonrad Sztyber continue; 8653c8b6782SKonrad Sztyber } 8663c8b6782SKonrad Sztyber 86704ee899fSDarek Stojaczyk /* Do not attach if we failed to initialize it in this process */ 8681a9c19a9SSeth Howell if (nvme_ctrlr_get_current_process(ctrlr) == NULL) { 86904ee899fSDarek Stojaczyk continue; 87004ee899fSDarek Stojaczyk } 87104ee899fSDarek Stojaczyk 872bb726d51SGangCao nvme_ctrlr_proc_get_ref(ctrlr); 873bb726d51SGangCao 874bb726d51SGangCao /* 875bb726d51SGangCao * Unlock while calling attach_cb() so the user can call other functions 876bb726d51SGangCao * that may take the driver lock, like nvme_detach(). 877bb726d51SGangCao */ 8783306e49eSChangpeng Liu if (probe_ctx->attach_cb) { 8796bdcf5abSGangCao nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 8803306e49eSChangpeng Liu probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 8816bdcf5abSGangCao nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 882bb726d51SGangCao } 8836368d6c0SGangCao } 884bb726d51SGangCao } 885bb726d51SGangCao 8866bdcf5abSGangCao nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 887e8c63cddSCunyin Chang 88820735396SChangpeng Liu return 0; 8898374a727SDaniel Verkamp } 890b0e349a8SBen Walker 89130bbf3d9SChangpeng Liu static void 8929ce869c2SJinlong Chen nvme_dummy_attach_fail_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 8939ce869c2SJinlong Chen int rc) 8949ce869c2SJinlong Chen { 8959ce869c2SJinlong Chen SPDK_ERRLOG("Failed to attach nvme ctrlr: trtype=%s adrfam=%s traddr=%s trsvcid=%s " 8969ce869c2SJinlong Chen "subnqn=%s, %s\n", spdk_nvme_transport_id_trtype_str(trid->trtype), 8979ce869c2SJinlong Chen spdk_nvme_transport_id_adrfam_str(trid->adrfam), trid->traddr, trid->trsvcid, 8989ce869c2SJinlong Chen trid->subnqn, spdk_strerror(-rc)); 8999ce869c2SJinlong Chen } 9009ce869c2SJinlong Chen 9019ce869c2SJinlong Chen static void 902a3f72b2eSSeth Howell nvme_probe_ctx_init(struct spdk_nvme_probe_ctx *probe_ctx, 90330bbf3d9SChangpeng Liu const struct spdk_nvme_transport_id *trid, 9043c8b6782SKonrad Sztyber const struct spdk_nvme_ctrlr_opts *opts, 90530bbf3d9SChangpeng Liu void *cb_ctx, 90630bbf3d9SChangpeng Liu spdk_nvme_probe_cb probe_cb, 90730bbf3d9SChangpeng Liu spdk_nvme_attach_cb attach_cb, 9089ce869c2SJinlong Chen spdk_nvme_attach_fail_cb attach_fail_cb, 90930bbf3d9SChangpeng Liu spdk_nvme_remove_cb remove_cb) 91030bbf3d9SChangpeng Liu { 91130bbf3d9SChangpeng Liu probe_ctx->trid = *trid; 9123c8b6782SKonrad Sztyber probe_ctx->opts = opts; 91330bbf3d9SChangpeng Liu probe_ctx->cb_ctx = cb_ctx; 91430bbf3d9SChangpeng Liu probe_ctx->probe_cb = probe_cb; 91530bbf3d9SChangpeng Liu probe_ctx->attach_cb = attach_cb; 9169ce869c2SJinlong Chen if (attach_fail_cb != NULL) { 9179ce869c2SJinlong Chen probe_ctx->attach_fail_cb = attach_fail_cb; 9189ce869c2SJinlong Chen } else { 9199ce869c2SJinlong Chen probe_ctx->attach_fail_cb = nvme_dummy_attach_fail_cb; 9209ce869c2SJinlong Chen } 92130bbf3d9SChangpeng Liu probe_ctx->remove_cb = remove_cb; 92230bbf3d9SChangpeng Liu TAILQ_INIT(&probe_ctx->init_ctrlrs); 923*6c35d974SNathan Claudel TAILQ_INIT(&probe_ctx->failed_ctxs.head); 92430bbf3d9SChangpeng Liu } 92530bbf3d9SChangpeng Liu 926a3a3d7ddSDaniel Verkamp int 9276368d6c0SGangCao spdk_nvme_probe(const struct spdk_nvme_transport_id *trid, void *cb_ctx, 9286368d6c0SGangCao spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 9296368d6c0SGangCao spdk_nvme_remove_cb remove_cb) 9306368d6c0SGangCao { 9319ce869c2SJinlong Chen return spdk_nvme_probe_ext(trid, cb_ctx, probe_cb, attach_cb, NULL, remove_cb); 9329ce869c2SJinlong Chen } 9339ce869c2SJinlong Chen 9349ce869c2SJinlong Chen int 9359ce869c2SJinlong Chen spdk_nvme_probe_ext(const struct spdk_nvme_transport_id *trid, void *cb_ctx, 9369ce869c2SJinlong Chen spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 9379ce869c2SJinlong Chen spdk_nvme_attach_fail_cb attach_fail_cb, spdk_nvme_remove_cb remove_cb) 9389ce869c2SJinlong Chen { 9396368d6c0SGangCao struct spdk_nvme_transport_id trid_pcie; 9404ce22e0fSChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx; 9416368d6c0SGangCao 9426368d6c0SGangCao if (trid == NULL) { 9436368d6c0SGangCao memset(&trid_pcie, 0, sizeof(trid_pcie)); 9447ed0904bSSeth Howell spdk_nvme_trid_populate_transport(&trid_pcie, SPDK_NVME_TRANSPORT_PCIE); 9456368d6c0SGangCao trid = &trid_pcie; 9466368d6c0SGangCao } 9476368d6c0SGangCao 9489ce869c2SJinlong Chen probe_ctx = spdk_nvme_probe_async_ext(trid, cb_ctx, probe_cb, 9499ce869c2SJinlong Chen attach_cb, attach_fail_cb, remove_cb); 9504ce22e0fSChangpeng Liu if (!probe_ctx) { 9514ce22e0fSChangpeng Liu SPDK_ERRLOG("Create probe context failed\n"); 9524ce22e0fSChangpeng Liu return -1; 95320735396SChangpeng Liu } 95420735396SChangpeng Liu 95520735396SChangpeng Liu /* 95620735396SChangpeng Liu * Keep going even if one or more nvme_attach() calls failed, 95720735396SChangpeng Liu * but maintain the value of rc to signal errors when we return. 95820735396SChangpeng Liu */ 9594ce22e0fSChangpeng Liu return nvme_init_controllers(probe_ctx); 9606368d6c0SGangCao } 9616368d6c0SGangCao 9626368d6c0SGangCao static bool 963a3f72b2eSSeth Howell nvme_connect_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 9646368d6c0SGangCao struct spdk_nvme_ctrlr_opts *opts) 9656368d6c0SGangCao { 966ef7827dcSChangpeng Liu struct spdk_nvme_ctrlr_opts *requested_opts = cb_ctx; 9676368d6c0SGangCao 968ef7827dcSChangpeng Liu assert(requested_opts); 969ef7827dcSChangpeng Liu memcpy(opts, requested_opts, sizeof(*opts)); 9706368d6c0SGangCao 9716368d6c0SGangCao return true; 9726368d6c0SGangCao } 9736368d6c0SGangCao 97418450e8bSZiye Yang static void 97518450e8bSZiye Yang nvme_ctrlr_opts_init(struct spdk_nvme_ctrlr_opts *opts, 97618450e8bSZiye Yang const struct spdk_nvme_ctrlr_opts *opts_user, 97718450e8bSZiye Yang size_t opts_size_user) 97818450e8bSZiye Yang { 97918450e8bSZiye Yang assert(opts); 98018450e8bSZiye Yang assert(opts_user); 98118450e8bSZiye Yang 98218450e8bSZiye Yang spdk_nvme_ctrlr_get_default_ctrlr_opts(opts, opts_size_user); 98318450e8bSZiye Yang 98418450e8bSZiye Yang #define FIELD_OK(field) \ 98518450e8bSZiye Yang offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= (opts->opts_size) 98618450e8bSZiye Yang 987771fae3eSZiye Yang #define SET_FIELD(field) \ 988771fae3eSZiye Yang if (FIELD_OK(field)) { \ 989771fae3eSZiye Yang opts->field = opts_user->field; \ 99018450e8bSZiye Yang } 99118450e8bSZiye Yang 992771fae3eSZiye Yang #define SET_FIELD_ARRAY(field) \ 993771fae3eSZiye Yang if (FIELD_OK(field)) { \ 994771fae3eSZiye Yang memcpy(opts->field, opts_user->field, sizeof(opts_user->field)); \ 99518450e8bSZiye Yang } 99618450e8bSZiye Yang 997771fae3eSZiye Yang SET_FIELD(num_io_queues); 998771fae3eSZiye Yang SET_FIELD(use_cmb_sqs); 999771fae3eSZiye Yang SET_FIELD(no_shn_notification); 10003ab7a1f6SAnkit Kumar SET_FIELD(enable_interrupts); 1001771fae3eSZiye Yang SET_FIELD(arb_mechanism); 1002771fae3eSZiye Yang SET_FIELD(arbitration_burst); 1003771fae3eSZiye Yang SET_FIELD(low_priority_weight); 1004771fae3eSZiye Yang SET_FIELD(medium_priority_weight); 1005771fae3eSZiye Yang SET_FIELD(high_priority_weight); 1006771fae3eSZiye Yang SET_FIELD(keep_alive_timeout_ms); 1007771fae3eSZiye Yang SET_FIELD(transport_retry_count); 1008771fae3eSZiye Yang SET_FIELD(io_queue_size); 1009771fae3eSZiye Yang SET_FIELD_ARRAY(hostnqn); 1010771fae3eSZiye Yang SET_FIELD(io_queue_requests); 1011771fae3eSZiye Yang SET_FIELD_ARRAY(src_addr); 1012771fae3eSZiye Yang SET_FIELD_ARRAY(src_svcid); 1013771fae3eSZiye Yang SET_FIELD_ARRAY(host_id); 1014771fae3eSZiye Yang SET_FIELD_ARRAY(extended_host_id); 1015771fae3eSZiye Yang SET_FIELD(command_set); 1016771fae3eSZiye Yang SET_FIELD(admin_timeout_ms); 1017771fae3eSZiye Yang SET_FIELD(header_digest); 1018771fae3eSZiye Yang SET_FIELD(data_digest); 1019771fae3eSZiye Yang SET_FIELD(disable_error_logging); 1020771fae3eSZiye Yang SET_FIELD(transport_ack_timeout); 1021771fae3eSZiye Yang SET_FIELD(admin_queue_size); 1022771fae3eSZiye Yang SET_FIELD(fabrics_connect_timeout_us); 1023e0715c2aSShuhei Matsumoto SET_FIELD(disable_read_ana_log_page); 1024b801af09SJim Harris SET_FIELD(disable_read_changed_ns_list_log_page); 1025a6e805f5SKonrad Sztyber SET_FIELD(tls_psk); 1026fc8dece0SKonrad Sztyber SET_FIELD(dhchap_key); 1027dbaa0488SKonrad Sztyber SET_FIELD(dhchap_ctrlr_key); 1028ab93bb4eSKonrad Sztyber SET_FIELD(dhchap_digests); 1029ab93bb4eSKonrad Sztyber SET_FIELD(dhchap_dhgroups); 103018450e8bSZiye Yang 103118450e8bSZiye Yang #undef FIELD_OK 1032771fae3eSZiye Yang #undef SET_FIELD 1033771fae3eSZiye Yang #undef SET_FIELD_ARRAY 103418450e8bSZiye Yang } 103518450e8bSZiye Yang 10366368d6c0SGangCao struct spdk_nvme_ctrlr * 10376368d6c0SGangCao spdk_nvme_connect(const struct spdk_nvme_transport_id *trid, 10386368d6c0SGangCao const struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 10396368d6c0SGangCao { 10406368d6c0SGangCao int rc; 10416368d6c0SGangCao struct spdk_nvme_ctrlr *ctrlr = NULL; 10424ce22e0fSChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx; 104318450e8bSZiye Yang struct spdk_nvme_ctrlr_opts *opts_local_p = NULL; 104418450e8bSZiye Yang struct spdk_nvme_ctrlr_opts opts_local; 10453c8b6782SKonrad Sztyber char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1]; 10466368d6c0SGangCao 10476368d6c0SGangCao if (trid == NULL) { 10486368d6c0SGangCao SPDK_ERRLOG("No transport ID specified\n"); 10496368d6c0SGangCao return NULL; 10506368d6c0SGangCao } 10516368d6c0SGangCao 10523c8b6782SKonrad Sztyber rc = nvme_driver_init(); 10533c8b6782SKonrad Sztyber if (rc != 0) { 10543c8b6782SKonrad Sztyber return NULL; 10553c8b6782SKonrad Sztyber } 10563c8b6782SKonrad Sztyber 10573c8b6782SKonrad Sztyber nvme_get_default_hostnqn(hostnqn, sizeof(hostnqn)); 105818450e8bSZiye Yang if (opts) { 105918450e8bSZiye Yang opts_local_p = &opts_local; 106018450e8bSZiye Yang nvme_ctrlr_opts_init(opts_local_p, opts, opts_size); 10613c8b6782SKonrad Sztyber memcpy(hostnqn, opts_local.hostnqn, sizeof(hostnqn)); 10626368d6c0SGangCao } 10636368d6c0SGangCao 106418450e8bSZiye Yang probe_ctx = spdk_nvme_connect_async(trid, opts_local_p, NULL); 10654ce22e0fSChangpeng Liu if (!probe_ctx) { 10661151e65dSChangpeng Liu SPDK_ERRLOG("Create probe context failed\n"); 10674ce22e0fSChangpeng Liu return NULL; 10684ce22e0fSChangpeng Liu } 106920735396SChangpeng Liu 10704ce22e0fSChangpeng Liu rc = nvme_init_controllers(probe_ctx); 107120735396SChangpeng Liu if (rc != 0) { 107220735396SChangpeng Liu return NULL; 107320735396SChangpeng Liu } 107420735396SChangpeng Liu 10753c8b6782SKonrad Sztyber ctrlr = nvme_get_ctrlr_by_trid(trid, hostnqn); 10766368d6c0SGangCao 10776368d6c0SGangCao return ctrlr; 10786368d6c0SGangCao } 10796368d6c0SGangCao 10807ed0904bSSeth Howell void 10817ed0904bSSeth Howell spdk_nvme_trid_populate_transport(struct spdk_nvme_transport_id *trid, 10827ed0904bSSeth Howell enum spdk_nvme_transport_type trtype) 10837ed0904bSSeth Howell { 10843fefff72SJim Harris const char *trstring; 10857ed0904bSSeth Howell 10867ed0904bSSeth Howell trid->trtype = trtype; 10877ed0904bSSeth Howell switch (trtype) { 10887ed0904bSSeth Howell case SPDK_NVME_TRANSPORT_FC: 10897ed0904bSSeth Howell trstring = SPDK_NVME_TRANSPORT_NAME_FC; 10907ed0904bSSeth Howell break; 10917ed0904bSSeth Howell case SPDK_NVME_TRANSPORT_PCIE: 10927ed0904bSSeth Howell trstring = SPDK_NVME_TRANSPORT_NAME_PCIE; 10937ed0904bSSeth Howell break; 10947ed0904bSSeth Howell case SPDK_NVME_TRANSPORT_RDMA: 10957ed0904bSSeth Howell trstring = SPDK_NVME_TRANSPORT_NAME_RDMA; 10967ed0904bSSeth Howell break; 10977ed0904bSSeth Howell case SPDK_NVME_TRANSPORT_TCP: 10987ed0904bSSeth Howell trstring = SPDK_NVME_TRANSPORT_NAME_TCP; 10997ed0904bSSeth Howell break; 11006308a24fSChangpeng Liu case SPDK_NVME_TRANSPORT_VFIOUSER: 11016308a24fSChangpeng Liu trstring = SPDK_NVME_TRANSPORT_NAME_VFIOUSER; 11026308a24fSChangpeng Liu break; 1103a050dcf2SSeth Howell case SPDK_NVME_TRANSPORT_CUSTOM: 11045d5a9077SChangpeng Liu trstring = SPDK_NVME_TRANSPORT_NAME_CUSTOM; 11055d5a9077SChangpeng Liu break; 11067ed0904bSSeth Howell default: 11075d5a9077SChangpeng Liu SPDK_ERRLOG("no available transports\n"); 11083424def9SAlexey Marchuk assert(0); 11093424def9SAlexey Marchuk return; 11107ed0904bSSeth Howell } 11117ed0904bSSeth Howell snprintf(trid->trstring, SPDK_NVMF_TRSTRING_MAX_LEN, "%s", trstring); 11127ed0904bSSeth Howell } 11137ed0904bSSeth Howell 11147ed0904bSSeth Howell int 11157ed0904bSSeth Howell spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 11167ed0904bSSeth Howell { 111712d52251SJim Harris int i = 0; 11187ed0904bSSeth Howell 111909786296SJacek Kalwas if (trid == NULL || trstring == NULL) { 112009786296SJacek Kalwas return -EINVAL; 112109786296SJacek Kalwas } 112209786296SJacek Kalwas 112312d52251SJim Harris /* Note: gcc-11 has some false positive -Wstringop-overread warnings with LTO builds if we 112412d52251SJim Harris * use strnlen here. So do the trstring copy manually instead. See GitHub issue #2391. 112512d52251SJim Harris */ 11267ed0904bSSeth Howell 11277ed0904bSSeth Howell /* cast official trstring to uppercase version of input. */ 112812d52251SJim Harris while (i < SPDK_NVMF_TRSTRING_MAX_LEN && trstring[i] != 0) { 112912d52251SJim Harris trid->trstring[i] = toupper(trstring[i]); 113012d52251SJim Harris i++; 11317ed0904bSSeth Howell } 113212d52251SJim Harris 113312d52251SJim Harris if (trstring[i] != 0) { 113412d52251SJim Harris return -EINVAL; 113512d52251SJim Harris } else { 113612d52251SJim Harris trid->trstring[i] = 0; 11377ed0904bSSeth Howell return 0; 11387ed0904bSSeth Howell } 113912d52251SJim Harris } 11407ed0904bSSeth Howell 11416368d6c0SGangCao int 1142a3a3d7ddSDaniel Verkamp spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str) 1143be8a9d69SDaniel Verkamp { 1144a3a3d7ddSDaniel Verkamp if (trtype == NULL || str == NULL) { 1145a3a3d7ddSDaniel Verkamp return -EINVAL; 1146a3a3d7ddSDaniel Verkamp } 1147a3a3d7ddSDaniel Verkamp 1148be8a9d69SDaniel Verkamp if (strcasecmp(str, "PCIe") == 0) { 1149be8a9d69SDaniel Verkamp *trtype = SPDK_NVME_TRANSPORT_PCIE; 1150be8a9d69SDaniel Verkamp } else if (strcasecmp(str, "RDMA") == 0) { 1151be8a9d69SDaniel Verkamp *trtype = SPDK_NVME_TRANSPORT_RDMA; 115279cffd37SJohn Barnard } else if (strcasecmp(str, "FC") == 0) { 115379cffd37SJohn Barnard *trtype = SPDK_NVME_TRANSPORT_FC; 1154e956be96SZiye Yang } else if (strcasecmp(str, "TCP") == 0) { 1155e956be96SZiye Yang *trtype = SPDK_NVME_TRANSPORT_TCP; 11566308a24fSChangpeng Liu } else if (strcasecmp(str, "VFIOUSER") == 0) { 11576308a24fSChangpeng Liu *trtype = SPDK_NVME_TRANSPORT_VFIOUSER; 1158be8a9d69SDaniel Verkamp } else { 1159a050dcf2SSeth Howell *trtype = SPDK_NVME_TRANSPORT_CUSTOM; 1160be8a9d69SDaniel Verkamp } 1161be8a9d69SDaniel Verkamp return 0; 1162be8a9d69SDaniel Verkamp } 1163be8a9d69SDaniel Verkamp 116437a7ff0fSDaniel Verkamp const char * 116537a7ff0fSDaniel Verkamp spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 116637a7ff0fSDaniel Verkamp { 116737a7ff0fSDaniel Verkamp switch (trtype) { 116837a7ff0fSDaniel Verkamp case SPDK_NVME_TRANSPORT_PCIE: 116937a7ff0fSDaniel Verkamp return "PCIe"; 117037a7ff0fSDaniel Verkamp case SPDK_NVME_TRANSPORT_RDMA: 117137a7ff0fSDaniel Verkamp return "RDMA"; 117279cffd37SJohn Barnard case SPDK_NVME_TRANSPORT_FC: 117379cffd37SJohn Barnard return "FC"; 1174e956be96SZiye Yang case SPDK_NVME_TRANSPORT_TCP: 1175e956be96SZiye Yang return "TCP"; 11766308a24fSChangpeng Liu case SPDK_NVME_TRANSPORT_VFIOUSER: 11776308a24fSChangpeng Liu return "VFIOUSER"; 11787a35a678SJacek Kalwas case SPDK_NVME_TRANSPORT_CUSTOM: 11797a35a678SJacek Kalwas return "CUSTOM"; 118037a7ff0fSDaniel Verkamp default: 118137a7ff0fSDaniel Verkamp return NULL; 118237a7ff0fSDaniel Verkamp } 118337a7ff0fSDaniel Verkamp } 118437a7ff0fSDaniel Verkamp 1185a3a3d7ddSDaniel Verkamp int 1186a3a3d7ddSDaniel Verkamp spdk_nvme_transport_id_parse_adrfam(enum spdk_nvmf_adrfam *adrfam, const char *str) 1187be8a9d69SDaniel Verkamp { 1188a3a3d7ddSDaniel Verkamp if (adrfam == NULL || str == NULL) { 1189a3a3d7ddSDaniel Verkamp return -EINVAL; 1190a3a3d7ddSDaniel Verkamp } 1191a3a3d7ddSDaniel Verkamp 1192be8a9d69SDaniel Verkamp if (strcasecmp(str, "IPv4") == 0) { 1193be8a9d69SDaniel Verkamp *adrfam = SPDK_NVMF_ADRFAM_IPV4; 1194be8a9d69SDaniel Verkamp } else if (strcasecmp(str, "IPv6") == 0) { 1195be8a9d69SDaniel Verkamp *adrfam = SPDK_NVMF_ADRFAM_IPV6; 1196be8a9d69SDaniel Verkamp } else if (strcasecmp(str, "IB") == 0) { 1197be8a9d69SDaniel Verkamp *adrfam = SPDK_NVMF_ADRFAM_IB; 1198be8a9d69SDaniel Verkamp } else if (strcasecmp(str, "FC") == 0) { 1199be8a9d69SDaniel Verkamp *adrfam = SPDK_NVMF_ADRFAM_FC; 1200be8a9d69SDaniel Verkamp } else { 1201be8a9d69SDaniel Verkamp return -ENOENT; 1202be8a9d69SDaniel Verkamp } 1203be8a9d69SDaniel Verkamp return 0; 1204be8a9d69SDaniel Verkamp } 1205be8a9d69SDaniel Verkamp 120637a7ff0fSDaniel Verkamp const char * 120737a7ff0fSDaniel Verkamp spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam) 120837a7ff0fSDaniel Verkamp { 120937a7ff0fSDaniel Verkamp switch (adrfam) { 121037a7ff0fSDaniel Verkamp case SPDK_NVMF_ADRFAM_IPV4: 121137a7ff0fSDaniel Verkamp return "IPv4"; 121237a7ff0fSDaniel Verkamp case SPDK_NVMF_ADRFAM_IPV6: 121337a7ff0fSDaniel Verkamp return "IPv6"; 121437a7ff0fSDaniel Verkamp case SPDK_NVMF_ADRFAM_IB: 121537a7ff0fSDaniel Verkamp return "IB"; 121637a7ff0fSDaniel Verkamp case SPDK_NVMF_ADRFAM_FC: 121737a7ff0fSDaniel Verkamp return "FC"; 121837a7ff0fSDaniel Verkamp default: 121937a7ff0fSDaniel Verkamp return NULL; 122037a7ff0fSDaniel Verkamp } 122137a7ff0fSDaniel Verkamp } 122237a7ff0fSDaniel Verkamp 1223672115feSSeth Howell static size_t 1224672115feSSeth Howell parse_next_key(const char **str, char *key, char *val, size_t key_buf_size, size_t val_buf_size) 1225be8a9d69SDaniel Verkamp { 1226672115feSSeth Howell 12275c9dccc9SZiye Yang const char *sep, *sep1; 1228be8a9d69SDaniel Verkamp const char *whitespace = " \t\n"; 1229be8a9d69SDaniel Verkamp size_t key_len, val_len; 1230672115feSSeth Howell 1231672115feSSeth Howell *str += strspn(*str, whitespace); 1232672115feSSeth Howell 1233672115feSSeth Howell sep = strchr(*str, ':'); 1234672115feSSeth Howell if (!sep) { 1235672115feSSeth Howell sep = strchr(*str, '='); 1236672115feSSeth Howell if (!sep) { 1237672115feSSeth Howell SPDK_ERRLOG("Key without ':' or '=' separator\n"); 1238672115feSSeth Howell return 0; 1239672115feSSeth Howell } 1240672115feSSeth Howell } else { 1241672115feSSeth Howell sep1 = strchr(*str, '='); 1242672115feSSeth Howell if ((sep1 != NULL) && (sep1 < sep)) { 1243672115feSSeth Howell sep = sep1; 1244672115feSSeth Howell } 1245672115feSSeth Howell } 1246672115feSSeth Howell 1247672115feSSeth Howell key_len = sep - *str; 1248672115feSSeth Howell if (key_len >= key_buf_size) { 1249672115feSSeth Howell SPDK_ERRLOG("Key length %zu greater than maximum allowed %zu\n", 1250672115feSSeth Howell key_len, key_buf_size - 1); 1251672115feSSeth Howell return 0; 1252672115feSSeth Howell } 1253672115feSSeth Howell 1254672115feSSeth Howell memcpy(key, *str, key_len); 1255672115feSSeth Howell key[key_len] = '\0'; 1256672115feSSeth Howell 1257672115feSSeth Howell *str += key_len + 1; /* Skip key: */ 1258672115feSSeth Howell val_len = strcspn(*str, whitespace); 1259672115feSSeth Howell if (val_len == 0) { 1260672115feSSeth Howell SPDK_ERRLOG("Key without value\n"); 1261672115feSSeth Howell return 0; 1262672115feSSeth Howell } 1263672115feSSeth Howell 1264672115feSSeth Howell if (val_len >= val_buf_size) { 1265672115feSSeth Howell SPDK_ERRLOG("Value length %zu greater than maximum allowed %zu\n", 1266672115feSSeth Howell val_len, val_buf_size - 1); 1267672115feSSeth Howell return 0; 1268672115feSSeth Howell } 1269672115feSSeth Howell 1270672115feSSeth Howell memcpy(val, *str, val_len); 1271672115feSSeth Howell val[val_len] = '\0'; 1272672115feSSeth Howell 1273672115feSSeth Howell *str += val_len; 1274672115feSSeth Howell 1275672115feSSeth Howell return val_len; 1276672115feSSeth Howell } 1277672115feSSeth Howell 1278672115feSSeth Howell int 1279672115feSSeth Howell spdk_nvme_transport_id_parse(struct spdk_nvme_transport_id *trid, const char *str) 1280672115feSSeth Howell { 1281672115feSSeth Howell size_t val_len; 1282be8a9d69SDaniel Verkamp char key[32]; 1283be8a9d69SDaniel Verkamp char val[1024]; 1284be8a9d69SDaniel Verkamp 1285be8a9d69SDaniel Verkamp if (trid == NULL || str == NULL) { 1286be8a9d69SDaniel Verkamp return -EINVAL; 1287be8a9d69SDaniel Verkamp } 1288be8a9d69SDaniel Verkamp 1289be8a9d69SDaniel Verkamp while (*str != '\0') { 1290be8a9d69SDaniel Verkamp 1291672115feSSeth Howell val_len = parse_next_key(&str, key, val, sizeof(key), sizeof(val)); 1292be8a9d69SDaniel Verkamp 1293be8a9d69SDaniel Verkamp if (val_len == 0) { 1294672115feSSeth Howell SPDK_ERRLOG("Failed to parse transport ID\n"); 1295be8a9d69SDaniel Verkamp return -EINVAL; 1296be8a9d69SDaniel Verkamp } 1297be8a9d69SDaniel Verkamp 1298be8a9d69SDaniel Verkamp if (strcasecmp(key, "trtype") == 0) { 12997ed0904bSSeth Howell if (spdk_nvme_transport_id_populate_trstring(trid, val) != 0) { 13007ed0904bSSeth Howell SPDK_ERRLOG("invalid transport '%s'\n", val); 13017ed0904bSSeth Howell return -EINVAL; 13027ed0904bSSeth Howell } 1303a3a3d7ddSDaniel Verkamp if (spdk_nvme_transport_id_parse_trtype(&trid->trtype, val) != 0) { 1304be8a9d69SDaniel Verkamp SPDK_ERRLOG("Unknown trtype '%s'\n", val); 1305be8a9d69SDaniel Verkamp return -EINVAL; 1306be8a9d69SDaniel Verkamp } 1307be8a9d69SDaniel Verkamp } else if (strcasecmp(key, "adrfam") == 0) { 1308a3a3d7ddSDaniel Verkamp if (spdk_nvme_transport_id_parse_adrfam(&trid->adrfam, val) != 0) { 1309be8a9d69SDaniel Verkamp SPDK_ERRLOG("Unknown adrfam '%s'\n", val); 1310be8a9d69SDaniel Verkamp return -EINVAL; 1311be8a9d69SDaniel Verkamp } 1312be8a9d69SDaniel Verkamp } else if (strcasecmp(key, "traddr") == 0) { 1313be8a9d69SDaniel Verkamp if (val_len > SPDK_NVMF_TRADDR_MAX_LEN) { 1314be8a9d69SDaniel Verkamp SPDK_ERRLOG("traddr length %zu greater than maximum allowed %u\n", 1315be8a9d69SDaniel Verkamp val_len, SPDK_NVMF_TRADDR_MAX_LEN); 1316be8a9d69SDaniel Verkamp return -EINVAL; 1317be8a9d69SDaniel Verkamp } 1318be8a9d69SDaniel Verkamp memcpy(trid->traddr, val, val_len + 1); 1319be8a9d69SDaniel Verkamp } else if (strcasecmp(key, "trsvcid") == 0) { 1320be8a9d69SDaniel Verkamp if (val_len > SPDK_NVMF_TRSVCID_MAX_LEN) { 1321be8a9d69SDaniel Verkamp SPDK_ERRLOG("trsvcid length %zu greater than maximum allowed %u\n", 1322be8a9d69SDaniel Verkamp val_len, SPDK_NVMF_TRSVCID_MAX_LEN); 1323be8a9d69SDaniel Verkamp return -EINVAL; 1324be8a9d69SDaniel Verkamp } 1325be8a9d69SDaniel Verkamp memcpy(trid->trsvcid, val, val_len + 1); 132694345a0aSZiye Yang } else if (strcasecmp(key, "priority") == 0) { 132794345a0aSZiye Yang if (val_len > SPDK_NVMF_PRIORITY_MAX_LEN) { 132894345a0aSZiye Yang SPDK_ERRLOG("priority length %zu greater than maximum allowed %u\n", 132994345a0aSZiye Yang val_len, SPDK_NVMF_PRIORITY_MAX_LEN); 133094345a0aSZiye Yang return -EINVAL; 133194345a0aSZiye Yang } 133294345a0aSZiye Yang trid->priority = spdk_strtol(val, 10); 1333be8a9d69SDaniel Verkamp } else if (strcasecmp(key, "subnqn") == 0) { 1334be8a9d69SDaniel Verkamp if (val_len > SPDK_NVMF_NQN_MAX_LEN) { 1335be8a9d69SDaniel Verkamp SPDK_ERRLOG("subnqn length %zu greater than maximum allowed %u\n", 1336be8a9d69SDaniel Verkamp val_len, SPDK_NVMF_NQN_MAX_LEN); 1337be8a9d69SDaniel Verkamp return -EINVAL; 1338be8a9d69SDaniel Verkamp } 1339be8a9d69SDaniel Verkamp memcpy(trid->subnqn, val, val_len + 1); 1340675c5592SSeth Howell } else if (strcasecmp(key, "hostaddr") == 0) { 1341675c5592SSeth Howell continue; 1342675c5592SSeth Howell } else if (strcasecmp(key, "hostsvcid") == 0) { 1343675c5592SSeth Howell continue; 1344eb753b25SEvgeniy Kochetov } else if (strcasecmp(key, "hostnqn") == 0) { 1345eb753b25SEvgeniy Kochetov continue; 13460891f506SLance Hartmann } else if (strcasecmp(key, "ns") == 0) { 13470891f506SLance Hartmann /* 13480891f506SLance Hartmann * Special case. The namespace id parameter may 13490891f506SLance Hartmann * optionally be passed in the transport id string 135048ac1225SBen Walker * for an SPDK application (e.g. spdk_nvme_perf) 13510891f506SLance Hartmann * and additionally parsed therein to limit 13520891f506SLance Hartmann * targeting a specific namespace. For this 13530891f506SLance Hartmann * scenario, just silently ignore this key 13540891f506SLance Hartmann * rather than letting it default to logging 13550891f506SLance Hartmann * it as an invalid key. 13560891f506SLance Hartmann */ 13570891f506SLance Hartmann continue; 1358eb2dee24SSeth Howell } else if (strcasecmp(key, "alt_traddr") == 0) { 1359eb2dee24SSeth Howell /* 1360eb2dee24SSeth Howell * Used by applications for enabling transport ID failover. 1361eb2dee24SSeth Howell * Please see the case above for more information on custom parameters. 1362eb2dee24SSeth Howell */ 1363eb2dee24SSeth Howell continue; 1364be8a9d69SDaniel Verkamp } else { 1365be8a9d69SDaniel Verkamp SPDK_ERRLOG("Unknown transport ID key '%s'\n", key); 1366be8a9d69SDaniel Verkamp } 1367be8a9d69SDaniel Verkamp } 1368be8a9d69SDaniel Verkamp 1369be8a9d69SDaniel Verkamp return 0; 1370be8a9d69SDaniel Verkamp } 1371be8a9d69SDaniel Verkamp 1372675c5592SSeth Howell int 1373675c5592SSeth Howell spdk_nvme_host_id_parse(struct spdk_nvme_host_id *hostid, const char *str) 1374675c5592SSeth Howell { 1375675c5592SSeth Howell 1376675c5592SSeth Howell size_t key_size = 32; 1377675c5592SSeth Howell size_t val_size = 1024; 1378675c5592SSeth Howell size_t val_len; 1379675c5592SSeth Howell char key[key_size]; 1380675c5592SSeth Howell char val[val_size]; 1381675c5592SSeth Howell 1382675c5592SSeth Howell if (hostid == NULL || str == NULL) { 1383675c5592SSeth Howell return -EINVAL; 1384675c5592SSeth Howell } 1385675c5592SSeth Howell 1386675c5592SSeth Howell while (*str != '\0') { 1387675c5592SSeth Howell 1388675c5592SSeth Howell val_len = parse_next_key(&str, key, val, key_size, val_size); 1389675c5592SSeth Howell 1390675c5592SSeth Howell if (val_len == 0) { 1391675c5592SSeth Howell SPDK_ERRLOG("Failed to parse host ID\n"); 1392675c5592SSeth Howell return val_len; 1393675c5592SSeth Howell } 1394675c5592SSeth Howell 1395675c5592SSeth Howell /* Ignore the rest of the options from the transport ID. */ 1396675c5592SSeth Howell if (strcasecmp(key, "trtype") == 0) { 1397675c5592SSeth Howell continue; 1398675c5592SSeth Howell } else if (strcasecmp(key, "adrfam") == 0) { 1399675c5592SSeth Howell continue; 1400675c5592SSeth Howell } else if (strcasecmp(key, "traddr") == 0) { 1401675c5592SSeth Howell continue; 1402675c5592SSeth Howell } else if (strcasecmp(key, "trsvcid") == 0) { 1403675c5592SSeth Howell continue; 1404675c5592SSeth Howell } else if (strcasecmp(key, "subnqn") == 0) { 1405675c5592SSeth Howell continue; 140694345a0aSZiye Yang } else if (strcasecmp(key, "priority") == 0) { 140794345a0aSZiye Yang continue; 1408675c5592SSeth Howell } else if (strcasecmp(key, "ns") == 0) { 1409675c5592SSeth Howell continue; 1410675c5592SSeth Howell } else if (strcasecmp(key, "hostaddr") == 0) { 1411675c5592SSeth Howell if (val_len > SPDK_NVMF_TRADDR_MAX_LEN) { 1412675c5592SSeth Howell SPDK_ERRLOG("hostaddr length %zu greater than maximum allowed %u\n", 1413675c5592SSeth Howell val_len, SPDK_NVMF_TRADDR_MAX_LEN); 1414675c5592SSeth Howell return -EINVAL; 1415675c5592SSeth Howell } 1416675c5592SSeth Howell memcpy(hostid->hostaddr, val, val_len + 1); 1417675c5592SSeth Howell 1418675c5592SSeth Howell } else if (strcasecmp(key, "hostsvcid") == 0) { 1419675c5592SSeth Howell if (val_len > SPDK_NVMF_TRSVCID_MAX_LEN) { 1420675c5592SSeth Howell SPDK_ERRLOG("trsvcid length %zu greater than maximum allowed %u\n", 1421675c5592SSeth Howell val_len, SPDK_NVMF_TRSVCID_MAX_LEN); 1422675c5592SSeth Howell return -EINVAL; 1423675c5592SSeth Howell } 1424675c5592SSeth Howell memcpy(hostid->hostsvcid, val, val_len + 1); 1425675c5592SSeth Howell } else { 1426675c5592SSeth Howell SPDK_ERRLOG("Unknown transport ID key '%s'\n", key); 1427675c5592SSeth Howell } 1428675c5592SSeth Howell } 1429675c5592SSeth Howell 1430675c5592SSeth Howell return 0; 1431675c5592SSeth Howell } 1432675c5592SSeth Howell 1433b9ca5393SDaniel Verkamp static int 1434b9ca5393SDaniel Verkamp cmp_int(int a, int b) 1435b9ca5393SDaniel Verkamp { 1436b9ca5393SDaniel Verkamp return a - b; 1437b9ca5393SDaniel Verkamp } 1438b9ca5393SDaniel Verkamp 1439b9ca5393SDaniel Verkamp int 1440b9ca5393SDaniel Verkamp spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 1441b9ca5393SDaniel Verkamp const struct spdk_nvme_transport_id *trid2) 1442b9ca5393SDaniel Verkamp { 1443b9ca5393SDaniel Verkamp int cmp; 1444b9ca5393SDaniel Verkamp 1445d4ea320bSSeth Howell if (trid1->trtype == SPDK_NVME_TRANSPORT_CUSTOM) { 1446d4ea320bSSeth Howell cmp = strcasecmp(trid1->trstring, trid2->trstring); 1447d4ea320bSSeth Howell } else { 1448b9ca5393SDaniel Verkamp cmp = cmp_int(trid1->trtype, trid2->trtype); 1449d4ea320bSSeth Howell } 1450d4ea320bSSeth Howell 1451b9ca5393SDaniel Verkamp if (cmp) { 1452b9ca5393SDaniel Verkamp return cmp; 1453b9ca5393SDaniel Verkamp } 1454b9ca5393SDaniel Verkamp 1455ed53cba0SDaniel Verkamp if (trid1->trtype == SPDK_NVME_TRANSPORT_PCIE) { 14563a65c872SHailiang Wang struct spdk_pci_addr pci_addr1 = {}; 14573a65c872SHailiang Wang struct spdk_pci_addr pci_addr2 = {}; 1458ed53cba0SDaniel Verkamp 1459ed53cba0SDaniel Verkamp /* Normalize PCI addresses before comparing */ 1460ed53cba0SDaniel Verkamp if (spdk_pci_addr_parse(&pci_addr1, trid1->traddr) < 0 || 1461ed53cba0SDaniel Verkamp spdk_pci_addr_parse(&pci_addr2, trid2->traddr) < 0) { 1462ed53cba0SDaniel Verkamp return -1; 1463ed53cba0SDaniel Verkamp } 1464ed53cba0SDaniel Verkamp 1465ed53cba0SDaniel Verkamp /* PCIe transport ID only uses trtype and traddr */ 1466ed53cba0SDaniel Verkamp return spdk_pci_addr_compare(&pci_addr1, &pci_addr2); 1467ed53cba0SDaniel Verkamp } 1468ed53cba0SDaniel Verkamp 146965ff7a63SDaniel Verkamp cmp = strcasecmp(trid1->traddr, trid2->traddr); 1470b9ca5393SDaniel Verkamp if (cmp) { 1471b9ca5393SDaniel Verkamp return cmp; 1472b9ca5393SDaniel Verkamp } 1473b9ca5393SDaniel Verkamp 147465ff7a63SDaniel Verkamp cmp = cmp_int(trid1->adrfam, trid2->adrfam); 1475b9ca5393SDaniel Verkamp if (cmp) { 1476b9ca5393SDaniel Verkamp return cmp; 1477b9ca5393SDaniel Verkamp } 1478b9ca5393SDaniel Verkamp 1479b9ca5393SDaniel Verkamp cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 1480b9ca5393SDaniel Verkamp if (cmp) { 1481b9ca5393SDaniel Verkamp return cmp; 1482b9ca5393SDaniel Verkamp } 1483b9ca5393SDaniel Verkamp 148458c2bb85SDaniel Verkamp cmp = strcmp(trid1->subnqn, trid2->subnqn); 1485b9ca5393SDaniel Verkamp if (cmp) { 1486b9ca5393SDaniel Verkamp return cmp; 1487b9ca5393SDaniel Verkamp } 1488b9ca5393SDaniel Verkamp 1489b9ca5393SDaniel Verkamp return 0; 1490b9ca5393SDaniel Verkamp } 1491b9ca5393SDaniel Verkamp 14929562a5c7SShuhei Matsumoto int 14939562a5c7SShuhei Matsumoto spdk_nvme_prchk_flags_parse(uint32_t *prchk_flags, const char *str) 14949562a5c7SShuhei Matsumoto { 14959562a5c7SShuhei Matsumoto size_t val_len; 14969562a5c7SShuhei Matsumoto char key[32]; 14979562a5c7SShuhei Matsumoto char val[1024]; 14989562a5c7SShuhei Matsumoto 14999562a5c7SShuhei Matsumoto if (prchk_flags == NULL || str == NULL) { 15009562a5c7SShuhei Matsumoto return -EINVAL; 15019562a5c7SShuhei Matsumoto } 15029562a5c7SShuhei Matsumoto 15039562a5c7SShuhei Matsumoto while (*str != '\0') { 15049562a5c7SShuhei Matsumoto val_len = parse_next_key(&str, key, val, sizeof(key), sizeof(val)); 15059562a5c7SShuhei Matsumoto 15069562a5c7SShuhei Matsumoto if (val_len == 0) { 15079562a5c7SShuhei Matsumoto SPDK_ERRLOG("Failed to parse prchk\n"); 15089562a5c7SShuhei Matsumoto return -EINVAL; 15099562a5c7SShuhei Matsumoto } 15109562a5c7SShuhei Matsumoto 15119562a5c7SShuhei Matsumoto if (strcasecmp(key, "prchk") == 0) { 15129562a5c7SShuhei Matsumoto if (strcasestr(val, "reftag") != NULL) { 15139562a5c7SShuhei Matsumoto *prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 15149562a5c7SShuhei Matsumoto } 15159562a5c7SShuhei Matsumoto if (strcasestr(val, "guard") != NULL) { 15169562a5c7SShuhei Matsumoto *prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 15179562a5c7SShuhei Matsumoto } 15189562a5c7SShuhei Matsumoto } else { 15199562a5c7SShuhei Matsumoto SPDK_ERRLOG("Unknown key '%s'\n", key); 15209562a5c7SShuhei Matsumoto return -EINVAL; 15219562a5c7SShuhei Matsumoto } 15229562a5c7SShuhei Matsumoto } 15239562a5c7SShuhei Matsumoto 15249562a5c7SShuhei Matsumoto return 0; 15259562a5c7SShuhei Matsumoto } 15269562a5c7SShuhei Matsumoto 15279562a5c7SShuhei Matsumoto const char * 15289562a5c7SShuhei Matsumoto spdk_nvme_prchk_flags_str(uint32_t prchk_flags) 15299562a5c7SShuhei Matsumoto { 15309562a5c7SShuhei Matsumoto if (prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG) { 15319562a5c7SShuhei Matsumoto if (prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD) { 15329562a5c7SShuhei Matsumoto return "prchk:reftag|guard"; 15339562a5c7SShuhei Matsumoto } else { 15349562a5c7SShuhei Matsumoto return "prchk:reftag"; 15359562a5c7SShuhei Matsumoto } 15369562a5c7SShuhei Matsumoto } else { 15379562a5c7SShuhei Matsumoto if (prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD) { 15389562a5c7SShuhei Matsumoto return "prchk:guard"; 15399562a5c7SShuhei Matsumoto } else { 15409562a5c7SShuhei Matsumoto return NULL; 15419562a5c7SShuhei Matsumoto } 15429562a5c7SShuhei Matsumoto } 15439562a5c7SShuhei Matsumoto } 15449562a5c7SShuhei Matsumoto 1545e14876e1SKrzysztof Karas int 1546e14876e1SKrzysztof Karas spdk_nvme_scan_attached(const struct spdk_nvme_transport_id *trid) 1547e14876e1SKrzysztof Karas { 1548e14876e1SKrzysztof Karas int rc; 1549e14876e1SKrzysztof Karas struct spdk_nvme_probe_ctx *probe_ctx; 1550e14876e1SKrzysztof Karas 1551e14876e1SKrzysztof Karas rc = nvme_driver_init(); 1552e14876e1SKrzysztof Karas if (rc != 0) { 1553e14876e1SKrzysztof Karas return rc; 1554e14876e1SKrzysztof Karas } 1555e14876e1SKrzysztof Karas 1556e14876e1SKrzysztof Karas probe_ctx = calloc(1, sizeof(*probe_ctx)); 1557e14876e1SKrzysztof Karas if (!probe_ctx) { 1558e14876e1SKrzysztof Karas return -ENOMEM; 1559e14876e1SKrzysztof Karas } 1560e14876e1SKrzysztof Karas 15619ce869c2SJinlong Chen nvme_probe_ctx_init(probe_ctx, trid, NULL, NULL, NULL, NULL, NULL, NULL); 1562e14876e1SKrzysztof Karas 1563e14876e1SKrzysztof Karas nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 1564e14876e1SKrzysztof Karas rc = nvme_transport_ctrlr_scan_attached(probe_ctx); 1565e14876e1SKrzysztof Karas nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 1566e14876e1SKrzysztof Karas free(probe_ctx); 1567e14876e1SKrzysztof Karas 1568e14876e1SKrzysztof Karas return rc < 0 ? rc : 0; 1569e14876e1SKrzysztof Karas } 1570e14876e1SKrzysztof Karas 157130bbf3d9SChangpeng Liu struct spdk_nvme_probe_ctx * 157230bbf3d9SChangpeng Liu spdk_nvme_probe_async(const struct spdk_nvme_transport_id *trid, 15733306e49eSChangpeng Liu void *cb_ctx, 15743306e49eSChangpeng Liu spdk_nvme_probe_cb probe_cb, 15753306e49eSChangpeng Liu spdk_nvme_attach_cb attach_cb, 15763306e49eSChangpeng Liu spdk_nvme_remove_cb remove_cb) 15773306e49eSChangpeng Liu { 15789ce869c2SJinlong Chen return spdk_nvme_probe_async_ext(trid, cb_ctx, probe_cb, attach_cb, NULL, remove_cb); 15799ce869c2SJinlong Chen } 15809ce869c2SJinlong Chen 15819ce869c2SJinlong Chen struct spdk_nvme_probe_ctx * 15829ce869c2SJinlong Chen spdk_nvme_probe_async_ext(const struct spdk_nvme_transport_id *trid, 15839ce869c2SJinlong Chen void *cb_ctx, 15849ce869c2SJinlong Chen spdk_nvme_probe_cb probe_cb, 15859ce869c2SJinlong Chen spdk_nvme_attach_cb attach_cb, 15869ce869c2SJinlong Chen spdk_nvme_attach_fail_cb attach_fail_cb, 15879ce869c2SJinlong Chen spdk_nvme_remove_cb remove_cb) 15889ce869c2SJinlong Chen { 1589bad30d53SChangpeng Liu int rc; 159030bbf3d9SChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx; 1591bad30d53SChangpeng Liu 1592bad30d53SChangpeng Liu rc = nvme_driver_init(); 1593bad30d53SChangpeng Liu if (rc != 0) { 159430bbf3d9SChangpeng Liu return NULL; 1595bad30d53SChangpeng Liu } 1596bad30d53SChangpeng Liu 159730bbf3d9SChangpeng Liu probe_ctx = calloc(1, sizeof(*probe_ctx)); 159830bbf3d9SChangpeng Liu if (!probe_ctx) { 159930bbf3d9SChangpeng Liu return NULL; 160030bbf3d9SChangpeng Liu } 160130bbf3d9SChangpeng Liu 16029ce869c2SJinlong Chen nvme_probe_ctx_init(probe_ctx, trid, NULL, cb_ctx, probe_cb, attach_cb, attach_fail_cb, 16039ce869c2SJinlong Chen remove_cb); 1604a3f72b2eSSeth Howell rc = nvme_probe_internal(probe_ctx, false); 160530bbf3d9SChangpeng Liu if (rc != 0) { 160630bbf3d9SChangpeng Liu free(probe_ctx); 160730bbf3d9SChangpeng Liu return NULL; 160830bbf3d9SChangpeng Liu } 160930bbf3d9SChangpeng Liu 161030bbf3d9SChangpeng Liu return probe_ctx; 1611bad30d53SChangpeng Liu } 1612bad30d53SChangpeng Liu 161359746336SChangpeng Liu int 1614bad30d53SChangpeng Liu spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 1615bad30d53SChangpeng Liu { 1616bad30d53SChangpeng Liu struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp; 1617*6c35d974SNathan Claudel struct nvme_ctrlr_detach_ctx *detach_ctx, *detach_ctx_tmp; 1618*6c35d974SNathan Claudel int rc; 1619bad30d53SChangpeng Liu 16205a26346aSChangpeng Liu if (!spdk_process_is_primary() && probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 16219e378073SChangpeng Liu free(probe_ctx); 162259746336SChangpeng Liu return 0; 16235a26346aSChangpeng Liu } 16245a26346aSChangpeng Liu 1625bad30d53SChangpeng Liu TAILQ_FOREACH_SAFE(ctrlr, &probe_ctx->init_ctrlrs, tailq, ctrlr_tmp) { 1626ddf86600SJim Harris nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 1627bad30d53SChangpeng Liu } 1628bad30d53SChangpeng Liu 1629*6c35d974SNathan Claudel /* poll failed controllers destruction */ 1630*6c35d974SNathan Claudel TAILQ_FOREACH_SAFE(detach_ctx, &probe_ctx->failed_ctxs.head, link, detach_ctx_tmp) { 1631*6c35d974SNathan Claudel rc = nvme_ctrlr_destruct_poll_async(detach_ctx->ctrlr, detach_ctx); 1632*6c35d974SNathan Claudel if (rc == -EAGAIN) { 1633*6c35d974SNathan Claudel continue; 1634*6c35d974SNathan Claudel } 1635*6c35d974SNathan Claudel 1636*6c35d974SNathan Claudel if (rc != 0) { 1637*6c35d974SNathan Claudel SPDK_ERRLOG("Failure while polling the controller destruction (rc = %d)\n", rc); 1638*6c35d974SNathan Claudel } 1639*6c35d974SNathan Claudel 1640*6c35d974SNathan Claudel TAILQ_REMOVE(&probe_ctx->failed_ctxs.head, detach_ctx, link); 1641*6c35d974SNathan Claudel free(detach_ctx); 1642*6c35d974SNathan Claudel } 1643*6c35d974SNathan Claudel 1644*6c35d974SNathan Claudel if (TAILQ_EMPTY(&probe_ctx->init_ctrlrs) && TAILQ_EMPTY(&probe_ctx->failed_ctxs.head)) { 1645bad30d53SChangpeng Liu nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); 1646bad30d53SChangpeng Liu g_spdk_nvme_driver->initialized = true; 1647bad30d53SChangpeng Liu nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); 16489e378073SChangpeng Liu free(probe_ctx); 1649ddf86600SJim Harris return 0; 1650bad30d53SChangpeng Liu } 165159746336SChangpeng Liu 165259746336SChangpeng Liu return -EAGAIN; 1653bad30d53SChangpeng Liu } 1654bad30d53SChangpeng Liu 165584245b72SChangpeng Liu struct spdk_nvme_probe_ctx * 165684245b72SChangpeng Liu spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 165784245b72SChangpeng Liu const struct spdk_nvme_ctrlr_opts *opts, 165884245b72SChangpeng Liu spdk_nvme_attach_cb attach_cb) 165984245b72SChangpeng Liu { 166084245b72SChangpeng Liu int rc; 166184245b72SChangpeng Liu spdk_nvme_probe_cb probe_cb = NULL; 166284245b72SChangpeng Liu struct spdk_nvme_probe_ctx *probe_ctx; 166384245b72SChangpeng Liu 166484245b72SChangpeng Liu rc = nvme_driver_init(); 166584245b72SChangpeng Liu if (rc != 0) { 166684245b72SChangpeng Liu return NULL; 166784245b72SChangpeng Liu } 166884245b72SChangpeng Liu 166984245b72SChangpeng Liu probe_ctx = calloc(1, sizeof(*probe_ctx)); 167084245b72SChangpeng Liu if (!probe_ctx) { 167184245b72SChangpeng Liu return NULL; 167284245b72SChangpeng Liu } 167384245b72SChangpeng Liu 167484245b72SChangpeng Liu if (opts) { 1675a3f72b2eSSeth Howell probe_cb = nvme_connect_probe_cb; 167684245b72SChangpeng Liu } 167784245b72SChangpeng Liu 16789ce869c2SJinlong Chen nvme_probe_ctx_init(probe_ctx, trid, opts, (void *)opts, probe_cb, attach_cb, NULL, NULL); 1679a3f72b2eSSeth Howell rc = nvme_probe_internal(probe_ctx, true); 168084245b72SChangpeng Liu if (rc != 0) { 168184245b72SChangpeng Liu free(probe_ctx); 168284245b72SChangpeng Liu return NULL; 168384245b72SChangpeng Liu } 168484245b72SChangpeng Liu 168584245b72SChangpeng Liu return probe_ctx; 168684245b72SChangpeng Liu } 168784245b72SChangpeng Liu 168801bbc271SJim Harris int 1689bd7c9e07SJim Harris nvme_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service, 1690bd7c9e07SJim Harris long int *port) 169101bbc271SJim Harris { 169201bbc271SJim Harris struct addrinfo *res; 169301bbc271SJim Harris struct addrinfo hints; 169401bbc271SJim Harris int ret; 169501bbc271SJim Harris 169601bbc271SJim Harris memset(&hints, 0, sizeof(hints)); 169701bbc271SJim Harris hints.ai_family = family; 169801bbc271SJim Harris hints.ai_socktype = SOCK_STREAM; 169901bbc271SJim Harris hints.ai_protocol = 0; 170001bbc271SJim Harris 17016e98729cSKonrad Sztyber if (service != NULL) { 1702bd7c9e07SJim Harris *port = spdk_strtol(service, 10); 1703bd7c9e07SJim Harris if (*port <= 0 || *port >= 65536) { 1704bd7c9e07SJim Harris SPDK_ERRLOG("Invalid port: %s\n", service); 1705bd7c9e07SJim Harris return -EINVAL; 1706bd7c9e07SJim Harris } 17076e98729cSKonrad Sztyber } 1708bd7c9e07SJim Harris 170901bbc271SJim Harris ret = getaddrinfo(addr, service, &hints, &res); 171001bbc271SJim Harris if (ret) { 171101bbc271SJim Harris SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret); 171201bbc271SJim Harris return -(abs(ret)); 171301bbc271SJim Harris } 171401bbc271SJim Harris 171501bbc271SJim Harris if (res->ai_addrlen > sizeof(*sa)) { 171601bbc271SJim Harris SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen); 171701bbc271SJim Harris ret = -EINVAL; 171801bbc271SJim Harris } else { 171901bbc271SJim Harris memcpy(sa, res->ai_addr, res->ai_addrlen); 172001bbc271SJim Harris } 172101bbc271SJim Harris 172201bbc271SJim Harris freeaddrinfo(res); 172301bbc271SJim Harris return ret; 172401bbc271SJim Harris } 172501bbc271SJim Harris 172610575b06SKonrad Sztyber int 172710575b06SKonrad Sztyber nvme_get_default_hostnqn(char *buf, int len) 172810575b06SKonrad Sztyber { 172910575b06SKonrad Sztyber char uuid[SPDK_UUID_STRING_LEN]; 173010575b06SKonrad Sztyber int rc; 173110575b06SKonrad Sztyber 173210575b06SKonrad Sztyber spdk_uuid_fmt_lower(uuid, sizeof(uuid), &g_spdk_nvme_driver->default_extended_host_id); 173310575b06SKonrad Sztyber rc = snprintf(buf, len, "nqn.2014-08.org.nvmexpress:uuid:%s", uuid); 173410575b06SKonrad Sztyber if (rc < 0 || rc >= len) { 173510575b06SKonrad Sztyber return -EINVAL; 173610575b06SKonrad Sztyber } 173710575b06SKonrad Sztyber 173810575b06SKonrad Sztyber return 0; 173910575b06SKonrad Sztyber } 174010575b06SKonrad Sztyber 17412172c432STomasz Zawadzki SPDK_LOG_REGISTER_COMPONENT(nvme) 1742