1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "bdev_nvme.h" 38 39 #include "spdk/config.h" 40 41 #include "spdk/string.h" 42 #include "spdk/rpc.h" 43 #include "spdk/util.h" 44 #include "spdk/env.h" 45 #include "spdk/nvme.h" 46 #include "spdk/nvme_spec.h" 47 48 #include "spdk/log.h" 49 #include "spdk/bdev_module.h" 50 51 struct open_descriptors { 52 void *desc; 53 struct spdk_bdev *bdev; 54 TAILQ_ENTRY(open_descriptors) tqlst; 55 struct spdk_thread *thread; 56 }; 57 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t; 58 59 static int 60 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 61 { 62 enum spdk_bdev_timeout_action *action = out; 63 64 if (spdk_json_strequal(val, "none") == true) { 65 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 66 } else if (spdk_json_strequal(val, "abort") == true) { 67 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 68 } else if (spdk_json_strequal(val, "reset") == true) { 69 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 70 } else { 71 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 72 return -EINVAL; 73 } 74 75 return 0; 76 } 77 78 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 79 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 80 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 81 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 82 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 83 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 84 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 85 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 86 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 87 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 88 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 89 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 90 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 91 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 92 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 93 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 94 }; 95 96 static void 97 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 98 const struct spdk_json_val *params) 99 { 100 struct spdk_bdev_nvme_opts opts; 101 int rc; 102 103 bdev_nvme_get_opts(&opts); 104 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 105 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 106 &opts)) { 107 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 108 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 109 "spdk_json_decode_object failed"); 110 return; 111 } 112 113 rc = bdev_nvme_set_opts(&opts); 114 if (rc) { 115 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 116 return; 117 } 118 119 spdk_jsonrpc_send_bool_response(request, true); 120 121 return; 122 } 123 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 124 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 125 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_options, set_bdev_nvme_options) 126 127 struct rpc_bdev_nvme_hotplug { 128 bool enabled; 129 uint64_t period_us; 130 }; 131 132 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 133 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 134 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 135 }; 136 137 static void 138 rpc_bdev_nvme_set_hotplug_done(void *ctx) 139 { 140 struct spdk_jsonrpc_request *request = ctx; 141 142 spdk_jsonrpc_send_bool_response(request, true); 143 } 144 145 static void 146 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 147 const struct spdk_json_val *params) 148 { 149 struct rpc_bdev_nvme_hotplug req = {false, 0}; 150 int rc; 151 152 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 153 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 154 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 155 rc = -EINVAL; 156 goto invalid; 157 } 158 159 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 160 request); 161 if (rc) { 162 goto invalid; 163 } 164 165 return; 166 invalid: 167 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 168 } 169 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 170 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug) 171 172 struct rpc_bdev_nvme_attach_controller { 173 char *name; 174 char *trtype; 175 char *adrfam; 176 char *traddr; 177 char *trsvcid; 178 char *priority; 179 char *subnqn; 180 char *hostnqn; 181 char *hostaddr; 182 char *hostsvcid; 183 bool prchk_reftag; 184 bool prchk_guard; 185 uint64_t fabrics_connect_timeout_us; 186 char *multipath; 187 int32_t ctrlr_loss_timeout_sec; 188 uint32_t reconnect_delay_sec; 189 uint32_t fast_io_fail_timeout_sec; 190 struct spdk_nvme_ctrlr_opts opts; 191 }; 192 193 static void 194 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 195 { 196 free(req->name); 197 free(req->trtype); 198 free(req->adrfam); 199 free(req->traddr); 200 free(req->trsvcid); 201 free(req->priority); 202 free(req->subnqn); 203 free(req->hostnqn); 204 free(req->hostaddr); 205 free(req->hostsvcid); 206 free(req->multipath); 207 } 208 209 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 210 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 211 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 212 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 213 214 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 215 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 216 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 217 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 218 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 219 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 220 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 221 222 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_reftag), spdk_json_decode_bool, true}, 223 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_guard), spdk_json_decode_bool, true}, 224 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.header_digest), spdk_json_decode_bool, true}, 225 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.data_digest), spdk_json_decode_bool, true}, 226 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 227 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), spdk_json_decode_string, true}, 228 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, opts.num_io_queues), spdk_json_decode_uint32, true}, 229 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 230 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, reconnect_delay_sec), spdk_json_decode_uint32, true}, 231 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 232 }; 233 234 #define NVME_MAX_BDEVS_PER_RPC 128 235 236 struct rpc_bdev_nvme_attach_controller_ctx { 237 struct rpc_bdev_nvme_attach_controller req; 238 uint32_t count; 239 size_t bdev_count; 240 const char *names[NVME_MAX_BDEVS_PER_RPC]; 241 struct spdk_jsonrpc_request *request; 242 }; 243 244 static void 245 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 246 { 247 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 248 struct spdk_jsonrpc_request *request = ctx->request; 249 struct spdk_json_write_ctx *w; 250 size_t i; 251 252 w = spdk_jsonrpc_begin_result(request); 253 spdk_json_write_array_begin(w); 254 for (i = 0; i < ctx->bdev_count; i++) { 255 spdk_json_write_string(w, ctx->names[i]); 256 } 257 spdk_json_write_array_end(w); 258 spdk_jsonrpc_end_result(request, w); 259 260 free_rpc_bdev_nvme_attach_controller(&ctx->req); 261 free(ctx); 262 } 263 264 static void 265 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 266 { 267 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 268 struct spdk_jsonrpc_request *request = ctx->request; 269 270 if (rc < 0) { 271 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 272 free_rpc_bdev_nvme_attach_controller(&ctx->req); 273 free(ctx); 274 return; 275 } 276 277 ctx->bdev_count = bdev_count; 278 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 279 } 280 281 static void 282 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 283 const struct spdk_json_val *params) 284 { 285 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 286 struct spdk_nvme_transport_id trid = {}; 287 const struct spdk_nvme_ctrlr_opts *opts; 288 const struct spdk_nvme_transport_id *ctrlr_trid; 289 uint32_t prchk_flags = 0; 290 struct nvme_ctrlr *ctrlr = NULL; 291 size_t len, maxlen; 292 bool multipath = false; 293 int rc; 294 295 ctx = calloc(1, sizeof(*ctx)); 296 if (!ctx) { 297 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 298 return; 299 } 300 301 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 302 303 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 304 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 305 &ctx->req)) { 306 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 307 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 308 "spdk_json_decode_object failed"); 309 goto cleanup; 310 } 311 312 /* Parse trstring */ 313 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 314 if (rc < 0) { 315 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 316 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 317 ctx->req.trtype); 318 goto cleanup; 319 } 320 321 /* Parse trtype */ 322 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 323 assert(rc == 0); 324 325 /* Parse traddr */ 326 maxlen = sizeof(trid.traddr); 327 len = strnlen(ctx->req.traddr, maxlen); 328 if (len == maxlen) { 329 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 330 ctx->req.traddr); 331 goto cleanup; 332 } 333 memcpy(trid.traddr, ctx->req.traddr, len + 1); 334 335 /* Parse adrfam */ 336 if (ctx->req.adrfam) { 337 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 338 if (rc < 0) { 339 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 340 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 341 ctx->req.adrfam); 342 goto cleanup; 343 } 344 } 345 346 /* Parse trsvcid */ 347 if (ctx->req.trsvcid) { 348 maxlen = sizeof(trid.trsvcid); 349 len = strnlen(ctx->req.trsvcid, maxlen); 350 if (len == maxlen) { 351 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 352 ctx->req.trsvcid); 353 goto cleanup; 354 } 355 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 356 } 357 358 /* Parse priority for the NVMe-oF transport connection */ 359 if (ctx->req.priority) { 360 trid.priority = spdk_strtol(ctx->req.priority, 10); 361 } 362 363 /* Parse subnqn */ 364 if (ctx->req.subnqn) { 365 maxlen = sizeof(trid.subnqn); 366 len = strnlen(ctx->req.subnqn, maxlen); 367 if (len == maxlen) { 368 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 369 ctx->req.subnqn); 370 goto cleanup; 371 } 372 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 373 } 374 375 if (ctx->req.hostnqn) { 376 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 377 ctx->req.hostnqn); 378 } 379 380 if (ctx->req.hostaddr) { 381 maxlen = sizeof(ctx->req.opts.src_addr); 382 len = strnlen(ctx->req.hostaddr, maxlen); 383 if (len == maxlen) { 384 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 385 ctx->req.hostaddr); 386 goto cleanup; 387 } 388 snprintf(ctx->req.opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 389 } 390 391 if (ctx->req.hostsvcid) { 392 maxlen = sizeof(ctx->req.opts.src_svcid); 393 len = strnlen(ctx->req.hostsvcid, maxlen); 394 if (len == maxlen) { 395 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 396 ctx->req.hostsvcid); 397 goto cleanup; 398 } 399 snprintf(ctx->req.opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 400 } 401 402 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 403 404 if (ctrlr) { 405 if (ctx->req.multipath == NULL) { 406 /* For now, this means add a failover path. This maintains backward compatibility 407 * with past behavior. In the future, this behavior will change to "disable". */ 408 SPDK_ERRLOG("The multipath parameter was not specified to bdev_nvme_attach_controller but " 409 "it was used to add a failover path. This behavior will default to rejecting " 410 "the request in the future. Specify the 'multipath' parameter to control the behavior"); 411 ctx->req.multipath = strdup("failover"); 412 if (ctx->req.multipath == NULL) { 413 SPDK_ERRLOG("cannot allocate multipath failover string\n"); 414 goto cleanup; 415 } 416 } 417 418 opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 419 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 420 421 /* This controller already exists. Check what the user wants to do. */ 422 if (strcasecmp(ctx->req.multipath, "disable") == 0) { 423 /* The user does not want to do any form of multipathing. */ 424 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 425 "A controller named %s already exists and multipath is disabled\n", 426 ctx->req.name); 427 goto cleanup; 428 } else if (strcasecmp(ctx->req.multipath, "failover") == 0 || 429 strcasecmp(ctx->req.multipath, "multipath") == 0) { 430 /* The user wants to add this as a failover path or add this to create multipath. */ 431 432 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 433 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 434 strncmp(ctx->req.opts.src_addr, opts->src_addr, sizeof(opts->src_addr)) == 0 && 435 strncmp(ctx->req.opts.src_svcid, opts->src_svcid, sizeof(opts->src_svcid)) == 0) { 436 /* Exactly same network path can't be added a second time */ 437 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 438 "A controller named %s already exists with the specified network path\n", 439 ctx->req.name); 440 goto cleanup; 441 } 442 } else { 443 /* Invalid multipath option */ 444 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 445 "Invalid multipath parameter: %s\n", 446 ctx->req.multipath); 447 goto cleanup; 448 } 449 450 if (strncmp(trid.subnqn, 451 ctrlr_trid->subnqn, 452 SPDK_NVMF_NQN_MAX_LEN) != 0) { 453 /* Different SUBNQN is not allowed when specifying the same controller name. */ 454 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 455 "A controller named %s already exists, but uses a different subnqn (%s)\n", 456 ctx->req.name, ctrlr_trid->subnqn); 457 goto cleanup; 458 } 459 460 461 462 if (strncmp(ctx->req.opts.hostnqn, opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 463 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 464 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 465 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 466 ctx->req.name, opts->hostnqn); 467 goto cleanup; 468 } 469 470 if (ctx->req.prchk_guard || ctx->req.prchk_reftag) { 471 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 472 "A controller named %s already exists. To add a path, do not specify PI options.\n", 473 ctx->req.name); 474 goto cleanup; 475 } 476 } 477 478 if (ctx->req.prchk_reftag) { 479 prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 480 } 481 482 if (ctx->req.prchk_guard) { 483 prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 484 } 485 486 if (ctx->req.multipath != NULL && strcasecmp(ctx->req.multipath, "multipath") == 0) { 487 multipath = true; 488 } 489 490 if (ctx->req.opts.num_io_queues == 0 || ctx->req.opts.num_io_queues > UINT16_MAX + 1) { 491 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 492 "num_io_queues out of bounds, min: %u max: %u\n", 493 1, UINT16_MAX + 1); 494 goto cleanup; 495 } 496 497 ctx->request = request; 498 ctx->count = NVME_MAX_BDEVS_PER_RPC; 499 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, prchk_flags, 500 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.opts, 501 multipath, ctx->req.ctrlr_loss_timeout_sec, 502 ctx->req.reconnect_delay_sec, ctx->req.fast_io_fail_timeout_sec); 503 if (rc) { 504 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 505 goto cleanup; 506 } 507 508 return; 509 510 cleanup: 511 free_rpc_bdev_nvme_attach_controller(&ctx->req); 512 free(ctx); 513 } 514 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 515 SPDK_RPC_RUNTIME) 516 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev) 517 518 static void 519 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 520 { 521 struct spdk_json_write_ctx *w = ctx; 522 struct spdk_nvme_transport_id *trid; 523 struct nvme_ctrlr *nvme_ctrlr; 524 const struct spdk_nvme_ctrlr_opts *opts; 525 526 spdk_json_write_object_begin(w); 527 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 528 529 spdk_json_write_named_array_begin(w, "ctrlrs"); 530 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 531 spdk_json_write_object_begin(w); 532 #ifdef SPDK_CONFIG_NVME_CUSE 533 size_t cuse_name_size = 128; 534 char cuse_name[cuse_name_size]; 535 536 int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_ctrlr->ctrlr, cuse_name, &cuse_name_size); 537 if (rc == 0) { 538 spdk_json_write_named_string(w, "cuse_device", cuse_name); 539 } 540 #endif 541 trid = &nvme_ctrlr->active_path_id->trid; 542 spdk_json_write_named_object_begin(w, "trid"); 543 nvme_bdev_dump_trid_json(trid, w); 544 spdk_json_write_object_end(w); 545 546 opts = spdk_nvme_ctrlr_get_opts(nvme_ctrlr->ctrlr); 547 spdk_json_write_named_object_begin(w, "host"); 548 spdk_json_write_named_string(w, "nqn", opts->hostnqn); 549 spdk_json_write_named_string(w, "addr", opts->src_addr); 550 spdk_json_write_named_string(w, "svcid", opts->src_svcid); 551 spdk_json_write_object_end(w); 552 spdk_json_write_object_end(w); 553 } 554 spdk_json_write_array_end(w); 555 spdk_json_write_object_end(w); 556 } 557 558 struct rpc_bdev_nvme_get_controllers { 559 char *name; 560 }; 561 562 static void 563 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 564 { 565 free(r->name); 566 } 567 568 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 569 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 570 }; 571 572 static void 573 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 574 const struct spdk_json_val *params) 575 { 576 struct rpc_bdev_nvme_get_controllers req = {}; 577 struct spdk_json_write_ctx *w; 578 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 579 580 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 581 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 582 &req)) { 583 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 584 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 585 "spdk_json_decode_object failed"); 586 goto cleanup; 587 } 588 589 if (req.name) { 590 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 591 if (nbdev_ctrlr == NULL) { 592 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 593 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 594 goto cleanup; 595 } 596 } 597 598 w = spdk_jsonrpc_begin_result(request); 599 spdk_json_write_array_begin(w); 600 601 if (nbdev_ctrlr != NULL) { 602 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 603 } else { 604 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 605 } 606 607 spdk_json_write_array_end(w); 608 609 spdk_jsonrpc_end_result(request, w); 610 611 cleanup: 612 free_rpc_bdev_nvme_get_controllers(&req); 613 } 614 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 615 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_get_controllers, get_nvme_controllers) 616 617 struct rpc_bdev_nvme_detach_controller { 618 char *name; 619 char *trtype; 620 char *adrfam; 621 char *traddr; 622 char *trsvcid; 623 char *subnqn; 624 char *hostaddr; 625 char *hostsvcid; 626 }; 627 628 static void 629 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 630 { 631 free(req->name); 632 free(req->trtype); 633 free(req->adrfam); 634 free(req->traddr); 635 free(req->trsvcid); 636 free(req->subnqn); 637 free(req->hostaddr); 638 free(req->hostsvcid); 639 } 640 641 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 642 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 643 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 644 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 645 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 646 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 647 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 648 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 649 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 650 }; 651 652 static void 653 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 654 const struct spdk_json_val *params) 655 { 656 struct rpc_bdev_nvme_detach_controller req = {NULL}; 657 struct nvme_path_id path = {}; 658 size_t len, maxlen; 659 int rc = 0; 660 661 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 662 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 663 &req)) { 664 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 665 "spdk_json_decode_object failed"); 666 goto cleanup; 667 } 668 669 if (req.trtype != NULL) { 670 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 671 if (rc < 0) { 672 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 673 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 674 req.trtype); 675 goto cleanup; 676 } 677 678 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 679 if (rc < 0) { 680 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 681 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 682 req.trtype); 683 goto cleanup; 684 } 685 } 686 687 if (req.traddr != NULL) { 688 maxlen = sizeof(path.trid.traddr); 689 len = strnlen(req.traddr, maxlen); 690 if (len == maxlen) { 691 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 692 req.traddr); 693 goto cleanup; 694 } 695 memcpy(path.trid.traddr, req.traddr, len + 1); 696 } 697 698 if (req.adrfam != NULL) { 699 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 700 if (rc < 0) { 701 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 702 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 703 req.adrfam); 704 goto cleanup; 705 } 706 } 707 708 if (req.trsvcid != NULL) { 709 maxlen = sizeof(path.trid.trsvcid); 710 len = strnlen(req.trsvcid, maxlen); 711 if (len == maxlen) { 712 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 713 req.trsvcid); 714 goto cleanup; 715 } 716 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 717 } 718 719 /* Parse subnqn */ 720 if (req.subnqn != NULL) { 721 maxlen = sizeof(path.trid.subnqn); 722 len = strnlen(req.subnqn, maxlen); 723 if (len == maxlen) { 724 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 725 req.subnqn); 726 goto cleanup; 727 } 728 memcpy(path.trid.subnqn, req.subnqn, len + 1); 729 } 730 731 if (req.hostaddr) { 732 maxlen = sizeof(path.hostid.hostaddr); 733 len = strnlen(req.hostaddr, maxlen); 734 if (len == maxlen) { 735 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 736 req.hostaddr); 737 goto cleanup; 738 } 739 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 740 } 741 742 if (req.hostsvcid) { 743 maxlen = sizeof(path.hostid.hostsvcid); 744 len = strnlen(req.hostsvcid, maxlen); 745 if (len == maxlen) { 746 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 747 req.hostsvcid); 748 goto cleanup; 749 } 750 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 751 } 752 753 rc = bdev_nvme_delete(req.name, &path); 754 755 if (rc != 0) { 756 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 757 goto cleanup; 758 } 759 760 spdk_jsonrpc_send_bool_response(request, true); 761 762 cleanup: 763 free_rpc_bdev_nvme_detach_controller(&req); 764 } 765 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 766 SPDK_RPC_RUNTIME) 767 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_detach_controller, delete_nvme_controller) 768 769 struct rpc_apply_firmware { 770 char *filename; 771 char *bdev_name; 772 }; 773 774 static void 775 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 776 { 777 free(req->filename); 778 free(req->bdev_name); 779 } 780 781 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 782 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 783 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 784 }; 785 786 struct firmware_update_info { 787 void *fw_image; 788 void *p; 789 unsigned int size; 790 unsigned int size_remaining; 791 unsigned int offset; 792 unsigned int transfer; 793 794 void *desc; 795 struct spdk_io_channel *ch; 796 struct spdk_jsonrpc_request *request; 797 struct spdk_nvme_ctrlr *ctrlr; 798 open_descriptors_t desc_head; 799 struct rpc_apply_firmware *req; 800 }; 801 802 static void 803 _apply_firmware_cleanup(void *ctx) 804 { 805 struct spdk_bdev_desc *desc = ctx; 806 807 spdk_bdev_close(desc); 808 } 809 810 static void 811 apply_firmware_cleanup(void *cb_arg) 812 { 813 struct open_descriptors *opt, *tmp; 814 struct firmware_update_info *firm_ctx = cb_arg; 815 816 if (!firm_ctx) { 817 return; 818 } 819 820 if (firm_ctx->fw_image) { 821 spdk_free(firm_ctx->fw_image); 822 } 823 824 if (firm_ctx->req) { 825 free_rpc_apply_firmware(firm_ctx->req); 826 free(firm_ctx->req); 827 } 828 829 if (firm_ctx->ch) { 830 spdk_put_io_channel(firm_ctx->ch); 831 } 832 833 TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) { 834 TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst); 835 /* Close the underlying bdev on its same opened thread. */ 836 if (opt->thread && opt->thread != spdk_get_thread()) { 837 spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc); 838 } else { 839 spdk_bdev_close(opt->desc); 840 } 841 free(opt); 842 } 843 free(firm_ctx); 844 } 845 846 static void 847 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 848 { 849 struct spdk_json_write_ctx *w; 850 struct firmware_update_info *firm_ctx = cb_arg; 851 852 spdk_bdev_free_io(bdev_io); 853 854 if (!success) { 855 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 856 "firmware commit failed."); 857 apply_firmware_cleanup(firm_ctx); 858 return; 859 } 860 861 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 862 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 863 "Controller reset failed."); 864 apply_firmware_cleanup(firm_ctx); 865 return; 866 } 867 868 w = spdk_jsonrpc_begin_result(firm_ctx->request); 869 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 870 spdk_jsonrpc_end_result(firm_ctx->request, w); 871 apply_firmware_cleanup(firm_ctx); 872 } 873 874 static void 875 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 876 { 877 struct spdk_nvme_cmd cmd = {}; 878 struct spdk_nvme_fw_commit fw_commit; 879 int slot = 0; 880 int rc; 881 struct firmware_update_info *firm_ctx = cb_arg; 882 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 883 884 if (!success) { 885 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 886 "firmware download failed ."); 887 spdk_bdev_free_io(bdev_io); 888 apply_firmware_cleanup(firm_ctx); 889 return; 890 } 891 892 firm_ctx->p += firm_ctx->transfer; 893 firm_ctx->offset += firm_ctx->transfer; 894 firm_ctx->size_remaining -= firm_ctx->transfer; 895 896 switch (firm_ctx->size_remaining) { 897 case 0: 898 /* firmware download completed. Commit firmware */ 899 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 900 fw_commit.fs = slot; 901 fw_commit.ca = commit_action; 902 903 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 904 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 905 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 906 apply_firmware_complete_reset, firm_ctx); 907 if (rc) { 908 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 909 "firmware commit failed."); 910 spdk_bdev_free_io(bdev_io); 911 apply_firmware_cleanup(firm_ctx); 912 return; 913 } 914 break; 915 default: 916 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 917 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 918 919 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 920 cmd.cdw11 = firm_ctx->offset >> 2; 921 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 922 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 923 if (rc) { 924 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 925 "firmware download failed."); 926 spdk_bdev_free_io(bdev_io); 927 apply_firmware_cleanup(firm_ctx); 928 return; 929 } 930 break; 931 } 932 } 933 934 static void 935 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 936 { 937 } 938 939 static void 940 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 941 const struct spdk_json_val *params) 942 { 943 int rc; 944 int fd = -1; 945 struct stat fw_stat; 946 struct spdk_nvme_ctrlr *ctrlr; 947 char msg[1024]; 948 struct spdk_bdev *bdev; 949 struct spdk_bdev *bdev2; 950 struct open_descriptors *opt; 951 struct spdk_bdev_desc *desc; 952 struct spdk_nvme_cmd *cmd; 953 struct firmware_update_info *firm_ctx; 954 955 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 956 if (!firm_ctx) { 957 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 958 "Memory allocation error."); 959 return; 960 } 961 firm_ctx->fw_image = NULL; 962 TAILQ_INIT(&firm_ctx->desc_head); 963 firm_ctx->request = request; 964 965 firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware)); 966 if (!firm_ctx->req) { 967 snprintf(msg, sizeof(msg), "Memory allocation error."); 968 goto err; 969 } 970 971 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 972 SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) { 973 snprintf(msg, sizeof(msg), "spdk_json_decode_object failed."); 974 goto err; 975 } 976 977 if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) { 978 snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name); 979 goto err; 980 } 981 982 if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 983 snprintf(msg, sizeof(msg), "Controller information for %s were not found.", 984 firm_ctx->req->bdev_name); 985 goto err; 986 } 987 firm_ctx->ctrlr = ctrlr; 988 989 for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) { 990 991 if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) { 992 continue; 993 } 994 995 if (!(opt = malloc(sizeof(struct open_descriptors)))) { 996 snprintf(msg, sizeof(msg), "Memory allocation error."); 997 goto err; 998 } 999 1000 if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) { 1001 snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name); 1002 free(opt); 1003 goto err; 1004 } 1005 1006 /* Save the thread where the base device is opened */ 1007 opt->thread = spdk_get_thread(); 1008 1009 opt->desc = desc; 1010 opt->bdev = bdev; 1011 TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst); 1012 } 1013 1014 /* 1015 * find a descriptor associated with our bdev 1016 */ 1017 firm_ctx->desc = NULL; 1018 TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) { 1019 if (opt->bdev == bdev) { 1020 firm_ctx->desc = opt->desc; 1021 break; 1022 } 1023 } 1024 1025 if (!firm_ctx->desc) { 1026 snprintf(msg, sizeof(msg), "No descriptor were found."); 1027 goto err; 1028 } 1029 1030 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1031 if (!firm_ctx->ch) { 1032 snprintf(msg, sizeof(msg), "No channels were found."); 1033 goto err; 1034 } 1035 1036 fd = open(firm_ctx->req->filename, O_RDONLY); 1037 if (fd < 0) { 1038 snprintf(msg, sizeof(msg), "open file failed."); 1039 goto err; 1040 } 1041 1042 rc = fstat(fd, &fw_stat); 1043 if (rc < 0) { 1044 close(fd); 1045 snprintf(msg, sizeof(msg), "fstat failed."); 1046 goto err; 1047 } 1048 1049 firm_ctx->size = fw_stat.st_size; 1050 if (fw_stat.st_size % 4) { 1051 close(fd); 1052 snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4."); 1053 goto err; 1054 } 1055 1056 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1057 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1058 if (!firm_ctx->fw_image) { 1059 close(fd); 1060 snprintf(msg, sizeof(msg), "Memory allocation error."); 1061 goto err; 1062 } 1063 firm_ctx->p = firm_ctx->fw_image; 1064 1065 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1066 close(fd); 1067 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1068 goto err; 1069 } 1070 close(fd); 1071 1072 firm_ctx->offset = 0; 1073 firm_ctx->size_remaining = firm_ctx->size; 1074 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1075 1076 cmd = malloc(sizeof(struct spdk_nvme_cmd)); 1077 if (!cmd) { 1078 snprintf(msg, sizeof(msg), "Memory allocation error."); 1079 goto err; 1080 } 1081 memset(cmd, 0, sizeof(struct spdk_nvme_cmd)); 1082 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1083 1084 cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1085 cmd->cdw11 = firm_ctx->offset >> 2; 1086 1087 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p, 1088 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1089 if (rc == 0) { 1090 /* normal return here. */ 1091 return; 1092 } 1093 1094 free(cmd); 1095 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1096 err: 1097 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg); 1098 apply_firmware_cleanup(firm_ctx); 1099 } 1100 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1101 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_apply_firmware, apply_nvme_firmware) 1102 1103 struct rpc_bdev_nvme_transport_stat_ctx { 1104 struct spdk_jsonrpc_request *request; 1105 struct spdk_json_write_ctx *w; 1106 }; 1107 1108 static void 1109 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1110 struct spdk_nvme_transport_poll_group_stat *stat) 1111 { 1112 struct spdk_nvme_rdma_device_stat *device_stats; 1113 uint32_t i; 1114 1115 spdk_json_write_named_array_begin(w, "devices"); 1116 1117 for (i = 0; i < stat->rdma.num_devices; i++) { 1118 device_stats = &stat->rdma.device_stats[i]; 1119 spdk_json_write_object_begin(w); 1120 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1121 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1122 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1123 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1124 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1125 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1126 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1127 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1128 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1129 spdk_json_write_object_end(w); 1130 } 1131 spdk_json_write_array_end(w); 1132 } 1133 1134 static void 1135 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1136 struct spdk_nvme_transport_poll_group_stat *stat) 1137 { 1138 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1139 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1140 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1141 spdk_json_write_named_uint64(w, "cq_doorbell_updates", stat->pcie.cq_doorbell_updates); 1142 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1143 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1144 spdk_json_write_named_uint64(w, "sq_doobell_updates", stat->pcie.sq_doobell_updates); 1145 } 1146 1147 static void 1148 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1149 struct spdk_nvme_transport_poll_group_stat *stat) 1150 { 1151 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1152 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1153 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1154 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1155 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1156 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1157 } 1158 1159 static void 1160 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1161 { 1162 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1163 struct spdk_io_channel *ch; 1164 struct nvme_poll_group *group; 1165 struct spdk_nvme_poll_group_stat *stat; 1166 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1167 uint32_t j; 1168 int rc; 1169 1170 ctx = spdk_io_channel_iter_get_ctx(i); 1171 ch = spdk_io_channel_iter_get_channel(i); 1172 group = spdk_io_channel_get_ctx(ch); 1173 1174 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1175 if (rc) { 1176 spdk_for_each_channel_continue(i, rc); 1177 return; 1178 } 1179 1180 spdk_json_write_object_begin(ctx->w); 1181 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1182 spdk_json_write_named_array_begin(ctx->w, "transports"); 1183 1184 for (j = 0; j < stat->num_transports; j++) { 1185 tr_stat = stat->transport_stat[j]; 1186 spdk_json_write_object_begin(ctx->w); 1187 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1188 1189 switch (stat->transport_stat[j]->trtype) { 1190 case SPDK_NVME_TRANSPORT_RDMA: 1191 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1192 break; 1193 case SPDK_NVME_TRANSPORT_PCIE: 1194 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1195 break; 1196 case SPDK_NVME_TRANSPORT_TCP: 1197 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1198 break; 1199 default: 1200 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1201 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1202 } 1203 spdk_json_write_object_end(ctx->w); 1204 } 1205 /* transports array */ 1206 spdk_json_write_array_end(ctx->w); 1207 spdk_json_write_object_end(ctx->w); 1208 1209 spdk_nvme_poll_group_free_stats(group->group, stat); 1210 spdk_for_each_channel_continue(i, 0); 1211 } 1212 1213 static void 1214 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1215 { 1216 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1217 1218 spdk_json_write_array_end(ctx->w); 1219 spdk_json_write_object_end(ctx->w); 1220 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1221 free(ctx); 1222 } 1223 1224 static void 1225 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1226 const struct spdk_json_val *params) 1227 { 1228 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1229 1230 if (params) { 1231 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1232 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1233 return; 1234 } 1235 1236 ctx = calloc(1, sizeof(*ctx)); 1237 if (!ctx) { 1238 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1239 "Memory allocation error"); 1240 return; 1241 } 1242 ctx->request = request; 1243 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1244 spdk_json_write_object_begin(ctx->w); 1245 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1246 1247 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1248 rpc_bdev_nvme_stats_per_channel, 1249 ctx, 1250 rpc_bdev_nvme_stats_done); 1251 } 1252 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1253 SPDK_RPC_RUNTIME) 1254 1255 struct rpc_bdev_nvme_reset_controller_req { 1256 char *name; 1257 }; 1258 1259 static void 1260 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1261 { 1262 free(r->name); 1263 } 1264 1265 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1266 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1267 }; 1268 1269 struct rpc_bdev_nvme_reset_controller_ctx { 1270 struct spdk_jsonrpc_request *request; 1271 bool success; 1272 struct spdk_thread *orig_thread; 1273 }; 1274 1275 static void 1276 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1277 { 1278 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1279 1280 spdk_jsonrpc_send_bool_response(ctx->request, ctx->success); 1281 1282 free(ctx); 1283 } 1284 1285 static void 1286 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1287 { 1288 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1289 1290 ctx->success = success; 1291 1292 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1293 } 1294 1295 static void 1296 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1297 const struct spdk_json_val *params) 1298 { 1299 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1300 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1301 struct nvme_ctrlr *nvme_ctrlr; 1302 int rc; 1303 1304 ctx = calloc(1, sizeof(*ctx)); 1305 if (ctx == NULL) { 1306 SPDK_ERRLOG("Memory allocation failed\n"); 1307 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1308 "Memory allocation failed"); 1309 return; 1310 } 1311 1312 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1313 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1314 &req)) { 1315 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1316 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1317 goto err; 1318 } 1319 1320 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1321 if (nvme_ctrlr == NULL) { 1322 SPDK_ERRLOG("Failed at device lookup\n"); 1323 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1324 goto err; 1325 } 1326 1327 ctx->request = request; 1328 ctx->orig_thread = spdk_get_thread(); 1329 1330 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1331 if (rc != 0) { 1332 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1333 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1334 goto err; 1335 } 1336 1337 free_rpc_bdev_nvme_reset_controller_req(&req); 1338 return; 1339 1340 err: 1341 free_rpc_bdev_nvme_reset_controller_req(&req); 1342 free(ctx); 1343 } 1344 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1345 1346 struct rpc_get_controller_health_info { 1347 char *name; 1348 }; 1349 1350 struct spdk_nvme_health_info_context { 1351 struct spdk_jsonrpc_request *request; 1352 struct spdk_nvme_ctrlr *ctrlr; 1353 struct spdk_nvme_health_information_page health_page; 1354 }; 1355 1356 static void 1357 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1358 { 1359 free(r->name); 1360 } 1361 1362 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1363 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1364 }; 1365 1366 static void nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1367 { 1368 if (response == true) { 1369 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1370 "Internal error."); 1371 } 1372 1373 free(context); 1374 } 1375 1376 static void 1377 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1378 { 1379 int i; 1380 char buf[128]; 1381 struct spdk_nvme_health_info_context *context = cb_arg; 1382 struct spdk_jsonrpc_request *request = context->request; 1383 struct spdk_json_write_ctx *w; 1384 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1385 const struct spdk_nvme_transport_id *trid = NULL; 1386 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1387 struct spdk_nvme_health_information_page *health_page = NULL; 1388 1389 if (spdk_nvme_cpl_is_error(cpl)) { 1390 nvme_health_info_cleanup(context, true); 1391 SPDK_ERRLOG("get log page failed\n"); 1392 return; 1393 } 1394 1395 if (ctrlr == NULL) { 1396 nvme_health_info_cleanup(context, true); 1397 SPDK_ERRLOG("ctrlr is NULL\n"); 1398 return; 1399 } else { 1400 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1401 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1402 health_page = &(context->health_page); 1403 } 1404 1405 w = spdk_jsonrpc_begin_result(request); 1406 1407 spdk_json_write_object_begin(w); 1408 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1409 spdk_str_trim(buf); 1410 spdk_json_write_named_string(w, "model_number", buf); 1411 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1412 spdk_str_trim(buf); 1413 spdk_json_write_named_string(w, "serial_number", buf); 1414 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1415 spdk_str_trim(buf); 1416 spdk_json_write_named_string(w, "firmware_revision", buf); 1417 spdk_json_write_named_string(w, "traddr", trid->traddr); 1418 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1419 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1420 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1421 health_page->available_spare_threshold); 1422 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1423 spdk_json_write_named_uint128(w, "data_units_read", 1424 health_page->data_units_read[0], health_page->data_units_read[1]); 1425 spdk_json_write_named_uint128(w, "data_units_written", 1426 health_page->data_units_written[0], health_page->data_units_written[1]); 1427 spdk_json_write_named_uint128(w, "host_read_commands", 1428 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1429 spdk_json_write_named_uint128(w, "host_write_commands", 1430 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1431 spdk_json_write_named_uint128(w, "controller_busy_time", 1432 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1433 spdk_json_write_named_uint128(w, "power_cycles", 1434 health_page->power_cycles[0], health_page->power_cycles[1]); 1435 spdk_json_write_named_uint128(w, "power_on_hours", 1436 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1437 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1438 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1439 spdk_json_write_named_uint128(w, "media_errors", 1440 health_page->media_errors[0], health_page->media_errors[1]); 1441 spdk_json_write_named_uint128(w, "num_err_log_entries", 1442 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1443 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1444 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1445 health_page->critical_temp_time); 1446 for (i = 0; i < 8; i++) { 1447 if (health_page->temp_sensor[i] != 0) { 1448 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1449 } 1450 } 1451 spdk_json_write_object_end(w); 1452 1453 spdk_jsonrpc_end_result(request, w); 1454 nvme_health_info_cleanup(context, false); 1455 } 1456 1457 static void 1458 get_health_log_page(struct spdk_nvme_health_info_context *context) 1459 { 1460 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1461 1462 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1463 SPDK_NVME_GLOBAL_NS_TAG, 1464 &(context->health_page), sizeof(context->health_page), 0, 1465 get_health_log_page_completion, context)) { 1466 nvme_health_info_cleanup(context, true); 1467 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1468 } 1469 } 1470 1471 static void 1472 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1473 { 1474 struct spdk_nvme_health_info_context *context = cb_arg; 1475 1476 if (spdk_nvme_cpl_is_error(cpl)) { 1477 nvme_health_info_cleanup(context, true); 1478 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1479 } else { 1480 get_health_log_page(context); 1481 } 1482 } 1483 1484 static int 1485 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1486 { 1487 struct spdk_nvme_cmd cmd = {}; 1488 1489 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1490 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1491 1492 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1493 get_temperature_threshold_feature_completion, context); 1494 } 1495 1496 static void 1497 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1498 { 1499 struct spdk_nvme_health_info_context *context; 1500 1501 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1502 if (!context) { 1503 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1504 "Memory allocation error."); 1505 return; 1506 } 1507 1508 context->request = request; 1509 context->ctrlr = ctrlr; 1510 1511 if (get_temperature_threshold_feature(context)) { 1512 nvme_health_info_cleanup(context, true); 1513 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1514 } 1515 1516 return; 1517 } 1518 1519 static void 1520 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1521 const struct spdk_json_val *params) 1522 { 1523 struct rpc_get_controller_health_info req = {}; 1524 struct nvme_ctrlr *nvme_ctrlr = NULL; 1525 1526 if (!params) { 1527 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1528 "Missing device name"); 1529 1530 return; 1531 } 1532 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1533 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1534 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1535 free_rpc_get_controller_health_info(&req); 1536 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1537 "Invalid parameters"); 1538 1539 return; 1540 } 1541 1542 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1543 1544 if (!nvme_ctrlr) { 1545 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1546 free_rpc_get_controller_health_info(&req); 1547 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1548 "Device not found"); 1549 return; 1550 } 1551 1552 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1553 free_rpc_get_controller_health_info(&req); 1554 1555 return; 1556 } 1557 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1558 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1559 1560 struct rpc_bdev_nvme_start_discovery { 1561 char *name; 1562 char *trtype; 1563 char *adrfam; 1564 char *traddr; 1565 char *trsvcid; 1566 char *hostnqn; 1567 struct spdk_nvme_ctrlr_opts opts; 1568 }; 1569 1570 static void 1571 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1572 { 1573 free(req->name); 1574 free(req->trtype); 1575 free(req->adrfam); 1576 free(req->traddr); 1577 free(req->trsvcid); 1578 free(req->hostnqn); 1579 } 1580 1581 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1582 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1583 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1584 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1585 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1586 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1587 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1588 }; 1589 1590 struct rpc_bdev_nvme_start_discovery_ctx { 1591 struct rpc_bdev_nvme_start_discovery req; 1592 struct spdk_jsonrpc_request *request; 1593 }; 1594 1595 static void 1596 rpc_bdev_nvme_start_discovery_done(void *cb_ctx, int rc) 1597 { 1598 struct rpc_bdev_nvme_start_discovery_ctx *ctx = cb_ctx; 1599 struct spdk_jsonrpc_request *request = ctx->request; 1600 1601 if (rc < 0) { 1602 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1603 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1604 free(ctx); 1605 return; 1606 } 1607 1608 spdk_jsonrpc_send_bool_response(ctx->request, rc == 0); 1609 1610 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1611 free(ctx); 1612 } 1613 1614 static void 1615 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1616 const struct spdk_json_val *params) 1617 { 1618 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1619 struct spdk_nvme_transport_id trid = {}; 1620 size_t len, maxlen; 1621 int rc; 1622 1623 ctx = calloc(1, sizeof(*ctx)); 1624 if (!ctx) { 1625 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1626 return; 1627 } 1628 1629 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1630 1631 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1632 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1633 &ctx->req)) { 1634 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1635 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1636 "spdk_json_decode_object failed"); 1637 goto cleanup; 1638 } 1639 1640 /* Parse trstring */ 1641 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1642 if (rc < 0) { 1643 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1644 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1645 ctx->req.trtype); 1646 goto cleanup; 1647 } 1648 1649 /* Parse trtype */ 1650 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1651 assert(rc == 0); 1652 1653 /* Parse traddr */ 1654 maxlen = sizeof(trid.traddr); 1655 len = strnlen(ctx->req.traddr, maxlen); 1656 if (len == maxlen) { 1657 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1658 ctx->req.traddr); 1659 goto cleanup; 1660 } 1661 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1662 1663 /* Parse adrfam */ 1664 if (ctx->req.adrfam) { 1665 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1666 if (rc < 0) { 1667 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1668 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1669 ctx->req.adrfam); 1670 goto cleanup; 1671 } 1672 } 1673 1674 /* Parse trsvcid */ 1675 if (ctx->req.trsvcid) { 1676 maxlen = sizeof(trid.trsvcid); 1677 len = strnlen(ctx->req.trsvcid, maxlen); 1678 if (len == maxlen) { 1679 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1680 ctx->req.trsvcid); 1681 goto cleanup; 1682 } 1683 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1684 } 1685 1686 if (ctx->req.hostnqn) { 1687 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1688 ctx->req.hostnqn); 1689 } 1690 1691 ctx->request = request; 1692 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, 1693 rpc_bdev_nvme_start_discovery_done, ctx); 1694 if (rc) { 1695 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1696 goto cleanup; 1697 } 1698 1699 return; 1700 1701 cleanup: 1702 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1703 free(ctx); 1704 } 1705 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1706 SPDK_RPC_RUNTIME) 1707 1708 struct rpc_bdev_nvme_stop_discovery { 1709 char *name; 1710 }; 1711 1712 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1713 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1714 }; 1715 1716 struct rpc_bdev_nvme_stop_discovery_ctx { 1717 struct rpc_bdev_nvme_stop_discovery req; 1718 struct spdk_jsonrpc_request *request; 1719 }; 1720 1721 static void 1722 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1723 { 1724 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1725 1726 spdk_jsonrpc_send_bool_response(ctx->request, true); 1727 free(ctx->req.name); 1728 free(ctx); 1729 } 1730 1731 static void 1732 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1733 const struct spdk_json_val *params) 1734 { 1735 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1736 int rc; 1737 1738 ctx = calloc(1, sizeof(*ctx)); 1739 if (!ctx) { 1740 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1741 return; 1742 } 1743 1744 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1745 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1746 &ctx->req)) { 1747 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1748 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1749 "spdk_json_decode_object failed"); 1750 goto cleanup; 1751 } 1752 1753 ctx->request = request; 1754 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1755 if (rc) { 1756 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1757 goto cleanup; 1758 } 1759 1760 return; 1761 1762 cleanup: 1763 free(ctx->req.name); 1764 free(ctx); 1765 } 1766 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1767 SPDK_RPC_RUNTIME) 1768