1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "bdev_nvme.h" 37 38 #include "spdk/config.h" 39 40 #include "spdk/string.h" 41 #include "spdk/rpc.h" 42 #include "spdk/util.h" 43 #include "spdk/env.h" 44 #include "spdk/nvme.h" 45 #include "spdk/nvme_spec.h" 46 47 #include "spdk/log.h" 48 #include "spdk/bdev_module.h" 49 50 struct open_descriptors { 51 void *desc; 52 struct spdk_bdev *bdev; 53 TAILQ_ENTRY(open_descriptors) tqlst; 54 struct spdk_thread *thread; 55 }; 56 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t; 57 58 static int 59 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 60 { 61 enum spdk_bdev_timeout_action *action = out; 62 63 if (spdk_json_strequal(val, "none") == true) { 64 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 65 } else if (spdk_json_strequal(val, "abort") == true) { 66 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 67 } else if (spdk_json_strequal(val, "reset") == true) { 68 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 69 } else { 70 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 71 return -EINVAL; 72 } 73 74 return 0; 75 } 76 77 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 78 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 79 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 80 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 81 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 82 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 83 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 84 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 85 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 86 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 87 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 88 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 89 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 90 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 91 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 92 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 93 }; 94 95 static void 96 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 97 const struct spdk_json_val *params) 98 { 99 struct spdk_bdev_nvme_opts opts; 100 int rc; 101 102 bdev_nvme_get_opts(&opts); 103 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 104 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 105 &opts)) { 106 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 107 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 108 "spdk_json_decode_object failed"); 109 return; 110 } 111 112 rc = bdev_nvme_set_opts(&opts); 113 if (rc) { 114 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 115 return; 116 } 117 118 spdk_jsonrpc_send_bool_response(request, true); 119 120 return; 121 } 122 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 123 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 124 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_options, set_bdev_nvme_options) 125 126 struct rpc_bdev_nvme_hotplug { 127 bool enabled; 128 uint64_t period_us; 129 }; 130 131 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 132 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 133 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 134 }; 135 136 static void 137 rpc_bdev_nvme_set_hotplug_done(void *ctx) 138 { 139 struct spdk_jsonrpc_request *request = ctx; 140 141 spdk_jsonrpc_send_bool_response(request, true); 142 } 143 144 static void 145 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 146 const struct spdk_json_val *params) 147 { 148 struct rpc_bdev_nvme_hotplug req = {false, 0}; 149 int rc; 150 151 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 152 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 153 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 154 rc = -EINVAL; 155 goto invalid; 156 } 157 158 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 159 request); 160 if (rc) { 161 goto invalid; 162 } 163 164 return; 165 invalid: 166 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 167 } 168 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 169 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug) 170 171 struct rpc_bdev_nvme_attach_controller { 172 char *name; 173 char *trtype; 174 char *adrfam; 175 char *traddr; 176 char *trsvcid; 177 char *priority; 178 char *subnqn; 179 char *hostnqn; 180 char *hostaddr; 181 char *hostsvcid; 182 bool prchk_reftag; 183 bool prchk_guard; 184 uint64_t fabrics_connect_timeout_us; 185 char *multipath; 186 struct spdk_nvme_ctrlr_opts opts; 187 }; 188 189 static void 190 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 191 { 192 free(req->name); 193 free(req->trtype); 194 free(req->adrfam); 195 free(req->traddr); 196 free(req->trsvcid); 197 free(req->priority); 198 free(req->subnqn); 199 free(req->hostnqn); 200 free(req->hostaddr); 201 free(req->hostsvcid); 202 free(req->multipath); 203 } 204 205 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 206 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 207 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 208 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 209 210 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 211 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 212 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 213 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 214 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 215 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 216 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 217 218 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_reftag), spdk_json_decode_bool, true}, 219 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_guard), spdk_json_decode_bool, true}, 220 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.header_digest), spdk_json_decode_bool, true}, 221 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.data_digest), spdk_json_decode_bool, true}, 222 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 223 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), spdk_json_decode_string, true}, 224 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, opts.num_io_queues), spdk_json_decode_uint32, true}, 225 }; 226 227 #define NVME_MAX_BDEVS_PER_RPC 128 228 229 struct rpc_bdev_nvme_attach_controller_ctx { 230 struct rpc_bdev_nvme_attach_controller req; 231 uint32_t count; 232 size_t bdev_count; 233 const char *names[NVME_MAX_BDEVS_PER_RPC]; 234 struct spdk_jsonrpc_request *request; 235 }; 236 237 static void 238 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 239 { 240 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 241 struct spdk_jsonrpc_request *request = ctx->request; 242 struct spdk_json_write_ctx *w; 243 size_t i; 244 245 w = spdk_jsonrpc_begin_result(request); 246 spdk_json_write_array_begin(w); 247 for (i = 0; i < ctx->bdev_count; i++) { 248 spdk_json_write_string(w, ctx->names[i]); 249 } 250 spdk_json_write_array_end(w); 251 spdk_jsonrpc_end_result(request, w); 252 253 free_rpc_bdev_nvme_attach_controller(&ctx->req); 254 free(ctx); 255 } 256 257 static void 258 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 259 { 260 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 261 struct spdk_jsonrpc_request *request = ctx->request; 262 263 if (rc < 0) { 264 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 265 free_rpc_bdev_nvme_attach_controller(&ctx->req); 266 free(ctx); 267 return; 268 } 269 270 ctx->bdev_count = bdev_count; 271 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 272 } 273 274 static void 275 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 276 const struct spdk_json_val *params) 277 { 278 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 279 struct spdk_nvme_transport_id trid = {}; 280 const struct spdk_nvme_ctrlr_opts *opts; 281 const struct spdk_nvme_transport_id *ctrlr_trid; 282 uint32_t prchk_flags = 0; 283 struct nvme_ctrlr *ctrlr = NULL; 284 size_t len, maxlen; 285 bool multipath = false; 286 int rc; 287 288 ctx = calloc(1, sizeof(*ctx)); 289 if (!ctx) { 290 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 291 return; 292 } 293 294 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 295 296 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 297 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 298 &ctx->req)) { 299 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 300 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 301 "spdk_json_decode_object failed"); 302 goto cleanup; 303 } 304 305 /* Parse trstring */ 306 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 307 if (rc < 0) { 308 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 309 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 310 ctx->req.trtype); 311 goto cleanup; 312 } 313 314 /* Parse trtype */ 315 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 316 assert(rc == 0); 317 318 /* Parse traddr */ 319 maxlen = sizeof(trid.traddr); 320 len = strnlen(ctx->req.traddr, maxlen); 321 if (len == maxlen) { 322 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 323 ctx->req.traddr); 324 goto cleanup; 325 } 326 memcpy(trid.traddr, ctx->req.traddr, len + 1); 327 328 /* Parse adrfam */ 329 if (ctx->req.adrfam) { 330 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 331 if (rc < 0) { 332 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 333 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 334 ctx->req.adrfam); 335 goto cleanup; 336 } 337 } 338 339 /* Parse trsvcid */ 340 if (ctx->req.trsvcid) { 341 maxlen = sizeof(trid.trsvcid); 342 len = strnlen(ctx->req.trsvcid, maxlen); 343 if (len == maxlen) { 344 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 345 ctx->req.trsvcid); 346 goto cleanup; 347 } 348 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 349 } 350 351 /* Parse priority for the NVMe-oF transport connection */ 352 if (ctx->req.priority) { 353 trid.priority = spdk_strtol(ctx->req.priority, 10); 354 } 355 356 /* Parse subnqn */ 357 if (ctx->req.subnqn) { 358 maxlen = sizeof(trid.subnqn); 359 len = strnlen(ctx->req.subnqn, maxlen); 360 if (len == maxlen) { 361 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 362 ctx->req.subnqn); 363 goto cleanup; 364 } 365 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 366 } 367 368 if (ctx->req.hostnqn) { 369 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 370 ctx->req.hostnqn); 371 } 372 373 if (ctx->req.hostaddr) { 374 maxlen = sizeof(ctx->req.opts.src_addr); 375 len = strnlen(ctx->req.hostaddr, maxlen); 376 if (len == maxlen) { 377 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 378 ctx->req.hostaddr); 379 goto cleanup; 380 } 381 snprintf(ctx->req.opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 382 } 383 384 if (ctx->req.hostsvcid) { 385 maxlen = sizeof(ctx->req.opts.src_svcid); 386 len = strnlen(ctx->req.hostsvcid, maxlen); 387 if (len == maxlen) { 388 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 389 ctx->req.hostsvcid); 390 goto cleanup; 391 } 392 snprintf(ctx->req.opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 393 } 394 395 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 396 397 if (ctrlr) { 398 if (ctx->req.multipath == NULL) { 399 /* For now, this means add a failover path. This maintains backward compatibility 400 * with past behavior. In the future, this behavior will change to "disable". */ 401 SPDK_ERRLOG("The multipath parameter was not specified to bdev_nvme_attach_controller but " 402 "it was used to add a failover path. This behavior will default to rejecting " 403 "the request in the future. Specify the 'multipath' parameter to control the behavior"); 404 ctx->req.multipath = strdup("failover"); 405 if (ctx->req.multipath == NULL) { 406 SPDK_ERRLOG("cannot allocate multipath failover string\n"); 407 goto cleanup; 408 } 409 } 410 411 opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 412 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 413 414 /* This controller already exists. Check what the user wants to do. */ 415 if (strcasecmp(ctx->req.multipath, "disable") == 0) { 416 /* The user does not want to do any form of multipathing. */ 417 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 418 "A controller named %s already exists and multipath is disabled\n", 419 ctx->req.name); 420 goto cleanup; 421 } else if (strcasecmp(ctx->req.multipath, "failover") == 0 || 422 strcasecmp(ctx->req.multipath, "multipath") == 0) { 423 /* The user wants to add this as a failover path or add this to create multipath. */ 424 425 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 426 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 427 strncmp(ctx->req.opts.src_addr, opts->src_addr, sizeof(opts->src_addr)) == 0 && 428 strncmp(ctx->req.opts.src_svcid, opts->src_svcid, sizeof(opts->src_svcid)) == 0) { 429 /* Exactly same network path can't be added a second time */ 430 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 431 "A controller named %s already exists with the specified network path\n", 432 ctx->req.name); 433 goto cleanup; 434 } 435 } else { 436 /* Invalid multipath option */ 437 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 438 "Invalid multipath parameter: %s\n", 439 ctx->req.multipath); 440 goto cleanup; 441 } 442 443 if (strncmp(trid.subnqn, 444 ctrlr_trid->subnqn, 445 SPDK_NVMF_NQN_MAX_LEN) != 0) { 446 /* Different SUBNQN is not allowed when specifying the same controller name. */ 447 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 448 "A controller named %s already exists, but uses a different subnqn (%s)\n", 449 ctx->req.name, ctrlr_trid->subnqn); 450 goto cleanup; 451 } 452 453 454 455 if (strncmp(ctx->req.opts.hostnqn, opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 456 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 457 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 458 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 459 ctx->req.name, opts->hostnqn); 460 goto cleanup; 461 } 462 463 if (ctx->req.prchk_guard || ctx->req.prchk_reftag) { 464 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 465 "A controller named %s already exists. To add a path, do not specify PI options.\n", 466 ctx->req.name); 467 goto cleanup; 468 } 469 } 470 471 if (ctx->req.prchk_reftag) { 472 prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 473 } 474 475 if (ctx->req.prchk_guard) { 476 prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 477 } 478 479 if (ctx->req.multipath != NULL && strcasecmp(ctx->req.multipath, "multipath") == 0) { 480 multipath = true; 481 } 482 483 if (ctx->req.opts.num_io_queues == 0 || ctx->req.opts.num_io_queues > UINT16_MAX + 1) { 484 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 485 "num_io_queues out of bounds, min: %u max: %u\n", 486 1, UINT16_MAX + 1); 487 goto cleanup; 488 } 489 490 ctx->request = request; 491 ctx->count = NVME_MAX_BDEVS_PER_RPC; 492 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, prchk_flags, 493 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.opts, 494 multipath); 495 if (rc) { 496 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 497 goto cleanup; 498 } 499 500 return; 501 502 cleanup: 503 free_rpc_bdev_nvme_attach_controller(&ctx->req); 504 free(ctx); 505 } 506 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 507 SPDK_RPC_RUNTIME) 508 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev) 509 510 static void 511 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 512 { 513 struct spdk_json_write_ctx *w = ctx; 514 struct spdk_nvme_transport_id *trid; 515 struct nvme_ctrlr *nvme_ctrlr; 516 const struct spdk_nvme_ctrlr_opts *opts; 517 518 spdk_json_write_object_begin(w); 519 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 520 521 spdk_json_write_named_array_begin(w, "ctrlrs"); 522 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 523 spdk_json_write_object_begin(w); 524 #ifdef SPDK_CONFIG_NVME_CUSE 525 size_t cuse_name_size = 128; 526 char cuse_name[cuse_name_size]; 527 528 int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_ctrlr->ctrlr, cuse_name, &cuse_name_size); 529 if (rc == 0) { 530 spdk_json_write_named_string(w, "cuse_device", cuse_name); 531 } 532 #endif 533 trid = &nvme_ctrlr->active_path_id->trid; 534 spdk_json_write_named_object_begin(w, "trid"); 535 nvme_bdev_dump_trid_json(trid, w); 536 spdk_json_write_object_end(w); 537 538 opts = spdk_nvme_ctrlr_get_opts(nvme_ctrlr->ctrlr); 539 spdk_json_write_named_object_begin(w, "host"); 540 spdk_json_write_named_string(w, "nqn", opts->hostnqn); 541 spdk_json_write_named_string(w, "addr", opts->src_addr); 542 spdk_json_write_named_string(w, "svcid", opts->src_svcid); 543 spdk_json_write_object_end(w); 544 spdk_json_write_object_end(w); 545 } 546 spdk_json_write_array_end(w); 547 spdk_json_write_object_end(w); 548 } 549 550 struct rpc_bdev_nvme_get_controllers { 551 char *name; 552 }; 553 554 static void 555 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 556 { 557 free(r->name); 558 } 559 560 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 561 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 562 }; 563 564 static void 565 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 566 const struct spdk_json_val *params) 567 { 568 struct rpc_bdev_nvme_get_controllers req = {}; 569 struct spdk_json_write_ctx *w; 570 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 571 572 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 573 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 574 &req)) { 575 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 576 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 577 "spdk_json_decode_object failed"); 578 goto cleanup; 579 } 580 581 if (req.name) { 582 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 583 if (nbdev_ctrlr == NULL) { 584 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 585 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 586 goto cleanup; 587 } 588 } 589 590 w = spdk_jsonrpc_begin_result(request); 591 spdk_json_write_array_begin(w); 592 593 if (nbdev_ctrlr != NULL) { 594 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 595 } else { 596 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 597 } 598 599 spdk_json_write_array_end(w); 600 601 spdk_jsonrpc_end_result(request, w); 602 603 cleanup: 604 free_rpc_bdev_nvme_get_controllers(&req); 605 } 606 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 607 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_get_controllers, get_nvme_controllers) 608 609 struct rpc_bdev_nvme_detach_controller { 610 char *name; 611 char *trtype; 612 char *adrfam; 613 char *traddr; 614 char *trsvcid; 615 char *subnqn; 616 char *hostaddr; 617 char *hostsvcid; 618 }; 619 620 static void 621 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 622 { 623 free(req->name); 624 free(req->trtype); 625 free(req->adrfam); 626 free(req->traddr); 627 free(req->trsvcid); 628 free(req->subnqn); 629 free(req->hostaddr); 630 free(req->hostsvcid); 631 } 632 633 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 634 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 635 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 636 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 637 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 638 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 639 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 640 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 641 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 642 }; 643 644 static void 645 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 646 const struct spdk_json_val *params) 647 { 648 struct rpc_bdev_nvme_detach_controller req = {NULL}; 649 struct nvme_path_id path = {}; 650 size_t len, maxlen; 651 int rc = 0; 652 653 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 654 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 655 &req)) { 656 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 657 "spdk_json_decode_object failed"); 658 goto cleanup; 659 } 660 661 if (req.trtype != NULL) { 662 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 663 if (rc < 0) { 664 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 665 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 666 req.trtype); 667 goto cleanup; 668 } 669 670 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 671 if (rc < 0) { 672 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 673 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 674 req.trtype); 675 goto cleanup; 676 } 677 } 678 679 if (req.traddr != NULL) { 680 maxlen = sizeof(path.trid.traddr); 681 len = strnlen(req.traddr, maxlen); 682 if (len == maxlen) { 683 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 684 req.traddr); 685 goto cleanup; 686 } 687 memcpy(path.trid.traddr, req.traddr, len + 1); 688 } 689 690 if (req.adrfam != NULL) { 691 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 692 if (rc < 0) { 693 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 694 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 695 req.adrfam); 696 goto cleanup; 697 } 698 } 699 700 if (req.trsvcid != NULL) { 701 maxlen = sizeof(path.trid.trsvcid); 702 len = strnlen(req.trsvcid, maxlen); 703 if (len == maxlen) { 704 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 705 req.trsvcid); 706 goto cleanup; 707 } 708 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 709 } 710 711 /* Parse subnqn */ 712 if (req.subnqn != NULL) { 713 maxlen = sizeof(path.trid.subnqn); 714 len = strnlen(req.subnqn, maxlen); 715 if (len == maxlen) { 716 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 717 req.subnqn); 718 goto cleanup; 719 } 720 memcpy(path.trid.subnqn, req.subnqn, len + 1); 721 } 722 723 if (req.hostaddr) { 724 maxlen = sizeof(path.hostid.hostaddr); 725 len = strnlen(req.hostaddr, maxlen); 726 if (len == maxlen) { 727 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 728 req.hostaddr); 729 goto cleanup; 730 } 731 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 732 } 733 734 if (req.hostsvcid) { 735 maxlen = sizeof(path.hostid.hostsvcid); 736 len = strnlen(req.hostsvcid, maxlen); 737 if (len == maxlen) { 738 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 739 req.hostsvcid); 740 goto cleanup; 741 } 742 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 743 } 744 745 rc = bdev_nvme_delete(req.name, &path); 746 747 if (rc != 0) { 748 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 749 goto cleanup; 750 } 751 752 spdk_jsonrpc_send_bool_response(request, true); 753 754 cleanup: 755 free_rpc_bdev_nvme_detach_controller(&req); 756 } 757 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 758 SPDK_RPC_RUNTIME) 759 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_detach_controller, delete_nvme_controller) 760 761 struct rpc_apply_firmware { 762 char *filename; 763 char *bdev_name; 764 }; 765 766 static void 767 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 768 { 769 free(req->filename); 770 free(req->bdev_name); 771 } 772 773 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 774 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 775 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 776 }; 777 778 struct firmware_update_info { 779 void *fw_image; 780 void *p; 781 unsigned int size; 782 unsigned int size_remaining; 783 unsigned int offset; 784 unsigned int transfer; 785 786 void *desc; 787 struct spdk_io_channel *ch; 788 struct spdk_jsonrpc_request *request; 789 struct spdk_nvme_ctrlr *ctrlr; 790 open_descriptors_t desc_head; 791 struct rpc_apply_firmware *req; 792 }; 793 794 static void 795 _apply_firmware_cleanup(void *ctx) 796 { 797 struct spdk_bdev_desc *desc = ctx; 798 799 spdk_bdev_close(desc); 800 } 801 802 static void 803 apply_firmware_cleanup(void *cb_arg) 804 { 805 struct open_descriptors *opt, *tmp; 806 struct firmware_update_info *firm_ctx = cb_arg; 807 808 if (!firm_ctx) { 809 return; 810 } 811 812 if (firm_ctx->fw_image) { 813 spdk_free(firm_ctx->fw_image); 814 } 815 816 if (firm_ctx->req) { 817 free_rpc_apply_firmware(firm_ctx->req); 818 free(firm_ctx->req); 819 } 820 821 if (firm_ctx->ch) { 822 spdk_put_io_channel(firm_ctx->ch); 823 } 824 825 TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) { 826 TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst); 827 /* Close the underlying bdev on its same opened thread. */ 828 if (opt->thread && opt->thread != spdk_get_thread()) { 829 spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc); 830 } else { 831 spdk_bdev_close(opt->desc); 832 } 833 free(opt); 834 } 835 free(firm_ctx); 836 } 837 838 static void 839 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 840 { 841 struct spdk_json_write_ctx *w; 842 struct firmware_update_info *firm_ctx = cb_arg; 843 844 spdk_bdev_free_io(bdev_io); 845 846 if (!success) { 847 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 848 "firmware commit failed."); 849 apply_firmware_cleanup(firm_ctx); 850 return; 851 } 852 853 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 854 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 855 "Controller reset failed."); 856 apply_firmware_cleanup(firm_ctx); 857 return; 858 } 859 860 w = spdk_jsonrpc_begin_result(firm_ctx->request); 861 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 862 spdk_jsonrpc_end_result(firm_ctx->request, w); 863 apply_firmware_cleanup(firm_ctx); 864 } 865 866 static void 867 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 868 { 869 struct spdk_nvme_cmd cmd = {}; 870 struct spdk_nvme_fw_commit fw_commit; 871 int slot = 0; 872 int rc; 873 struct firmware_update_info *firm_ctx = cb_arg; 874 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 875 876 if (!success) { 877 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 878 "firmware download failed ."); 879 spdk_bdev_free_io(bdev_io); 880 apply_firmware_cleanup(firm_ctx); 881 return; 882 } 883 884 firm_ctx->p += firm_ctx->transfer; 885 firm_ctx->offset += firm_ctx->transfer; 886 firm_ctx->size_remaining -= firm_ctx->transfer; 887 888 switch (firm_ctx->size_remaining) { 889 case 0: 890 /* firmware download completed. Commit firmware */ 891 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 892 fw_commit.fs = slot; 893 fw_commit.ca = commit_action; 894 895 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 896 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 897 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 898 apply_firmware_complete_reset, firm_ctx); 899 if (rc) { 900 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 901 "firmware commit failed."); 902 spdk_bdev_free_io(bdev_io); 903 apply_firmware_cleanup(firm_ctx); 904 return; 905 } 906 break; 907 default: 908 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 909 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 910 911 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 912 cmd.cdw11 = firm_ctx->offset >> 2; 913 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 914 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 915 if (rc) { 916 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 917 "firmware download failed."); 918 spdk_bdev_free_io(bdev_io); 919 apply_firmware_cleanup(firm_ctx); 920 return; 921 } 922 break; 923 } 924 } 925 926 static void 927 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 928 { 929 } 930 931 static void 932 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 933 const struct spdk_json_val *params) 934 { 935 int rc; 936 int fd = -1; 937 struct stat fw_stat; 938 struct spdk_nvme_ctrlr *ctrlr; 939 char msg[1024]; 940 struct spdk_bdev *bdev; 941 struct spdk_bdev *bdev2; 942 struct open_descriptors *opt; 943 struct spdk_bdev_desc *desc; 944 struct spdk_nvme_cmd *cmd; 945 struct firmware_update_info *firm_ctx; 946 947 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 948 if (!firm_ctx) { 949 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 950 "Memory allocation error."); 951 return; 952 } 953 firm_ctx->fw_image = NULL; 954 TAILQ_INIT(&firm_ctx->desc_head); 955 firm_ctx->request = request; 956 957 firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware)); 958 if (!firm_ctx->req) { 959 snprintf(msg, sizeof(msg), "Memory allocation error."); 960 goto err; 961 } 962 963 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 964 SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) { 965 snprintf(msg, sizeof(msg), "spdk_json_decode_object failed."); 966 goto err; 967 } 968 969 if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) { 970 snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name); 971 goto err; 972 } 973 974 if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 975 snprintf(msg, sizeof(msg), "Controller information for %s were not found.", 976 firm_ctx->req->bdev_name); 977 goto err; 978 } 979 firm_ctx->ctrlr = ctrlr; 980 981 for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) { 982 983 if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) { 984 continue; 985 } 986 987 if (!(opt = malloc(sizeof(struct open_descriptors)))) { 988 snprintf(msg, sizeof(msg), "Memory allocation error."); 989 goto err; 990 } 991 992 if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) { 993 snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name); 994 free(opt); 995 goto err; 996 } 997 998 /* Save the thread where the base device is opened */ 999 opt->thread = spdk_get_thread(); 1000 1001 opt->desc = desc; 1002 opt->bdev = bdev; 1003 TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst); 1004 } 1005 1006 /* 1007 * find a descriptor associated with our bdev 1008 */ 1009 firm_ctx->desc = NULL; 1010 TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) { 1011 if (opt->bdev == bdev) { 1012 firm_ctx->desc = opt->desc; 1013 break; 1014 } 1015 } 1016 1017 if (!firm_ctx->desc) { 1018 snprintf(msg, sizeof(msg), "No descriptor were found."); 1019 goto err; 1020 } 1021 1022 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1023 if (!firm_ctx->ch) { 1024 snprintf(msg, sizeof(msg), "No channels were found."); 1025 goto err; 1026 } 1027 1028 fd = open(firm_ctx->req->filename, O_RDONLY); 1029 if (fd < 0) { 1030 snprintf(msg, sizeof(msg), "open file failed."); 1031 goto err; 1032 } 1033 1034 rc = fstat(fd, &fw_stat); 1035 if (rc < 0) { 1036 close(fd); 1037 snprintf(msg, sizeof(msg), "fstat failed."); 1038 goto err; 1039 } 1040 1041 firm_ctx->size = fw_stat.st_size; 1042 if (fw_stat.st_size % 4) { 1043 close(fd); 1044 snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4."); 1045 goto err; 1046 } 1047 1048 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1049 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1050 if (!firm_ctx->fw_image) { 1051 close(fd); 1052 snprintf(msg, sizeof(msg), "Memory allocation error."); 1053 goto err; 1054 } 1055 firm_ctx->p = firm_ctx->fw_image; 1056 1057 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1058 close(fd); 1059 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1060 goto err; 1061 } 1062 close(fd); 1063 1064 firm_ctx->offset = 0; 1065 firm_ctx->size_remaining = firm_ctx->size; 1066 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1067 1068 cmd = malloc(sizeof(struct spdk_nvme_cmd)); 1069 if (!cmd) { 1070 snprintf(msg, sizeof(msg), "Memory allocation error."); 1071 goto err; 1072 } 1073 memset(cmd, 0, sizeof(struct spdk_nvme_cmd)); 1074 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1075 1076 cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1077 cmd->cdw11 = firm_ctx->offset >> 2; 1078 1079 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p, 1080 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1081 if (rc == 0) { 1082 /* normal return here. */ 1083 return; 1084 } 1085 1086 free(cmd); 1087 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1088 err: 1089 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg); 1090 apply_firmware_cleanup(firm_ctx); 1091 } 1092 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1093 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_apply_firmware, apply_nvme_firmware) 1094 1095 struct rpc_bdev_nvme_transport_stat_ctx { 1096 struct spdk_jsonrpc_request *request; 1097 struct spdk_json_write_ctx *w; 1098 }; 1099 1100 static void 1101 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1102 struct spdk_nvme_transport_poll_group_stat *stat) 1103 { 1104 struct spdk_nvme_rdma_device_stat *device_stats; 1105 uint32_t i; 1106 1107 spdk_json_write_named_array_begin(w, "devices"); 1108 1109 for (i = 0; i < stat->rdma.num_devices; i++) { 1110 device_stats = &stat->rdma.device_stats[i]; 1111 spdk_json_write_object_begin(w); 1112 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1113 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1114 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1115 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1116 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1117 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1118 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1119 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1120 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1121 spdk_json_write_object_end(w); 1122 } 1123 spdk_json_write_array_end(w); 1124 } 1125 1126 static void 1127 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1128 struct spdk_nvme_transport_poll_group_stat *stat) 1129 { 1130 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1131 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1132 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1133 spdk_json_write_named_uint64(w, "cq_doorbell_updates", stat->pcie.cq_doorbell_updates); 1134 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1135 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1136 spdk_json_write_named_uint64(w, "sq_doobell_updates", stat->pcie.sq_doobell_updates); 1137 } 1138 1139 static void 1140 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1141 struct spdk_nvme_transport_poll_group_stat *stat) 1142 { 1143 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1144 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1145 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1146 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1147 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1148 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1149 } 1150 1151 static void 1152 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1153 { 1154 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1155 struct spdk_io_channel *ch; 1156 struct nvme_poll_group *group; 1157 struct spdk_nvme_poll_group_stat *stat; 1158 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1159 uint32_t j; 1160 int rc; 1161 1162 ctx = spdk_io_channel_iter_get_ctx(i); 1163 ch = spdk_io_channel_iter_get_channel(i); 1164 group = spdk_io_channel_get_ctx(ch); 1165 1166 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1167 if (rc) { 1168 spdk_for_each_channel_continue(i, rc); 1169 return; 1170 } 1171 1172 spdk_json_write_object_begin(ctx->w); 1173 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1174 spdk_json_write_named_array_begin(ctx->w, "transports"); 1175 1176 for (j = 0; j < stat->num_transports; j++) { 1177 tr_stat = stat->transport_stat[j]; 1178 spdk_json_write_object_begin(ctx->w); 1179 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1180 1181 switch (stat->transport_stat[j]->trtype) { 1182 case SPDK_NVME_TRANSPORT_RDMA: 1183 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1184 break; 1185 case SPDK_NVME_TRANSPORT_PCIE: 1186 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1187 break; 1188 case SPDK_NVME_TRANSPORT_TCP: 1189 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1190 break; 1191 default: 1192 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1193 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1194 } 1195 spdk_json_write_object_end(ctx->w); 1196 } 1197 /* transports array */ 1198 spdk_json_write_array_end(ctx->w); 1199 spdk_json_write_object_end(ctx->w); 1200 1201 spdk_nvme_poll_group_free_stats(group->group, stat); 1202 spdk_for_each_channel_continue(i, 0); 1203 } 1204 1205 static void 1206 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1207 { 1208 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1209 1210 spdk_json_write_array_end(ctx->w); 1211 spdk_json_write_object_end(ctx->w); 1212 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1213 free(ctx); 1214 } 1215 1216 static void 1217 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1218 const struct spdk_json_val *params) 1219 { 1220 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1221 1222 if (params) { 1223 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1224 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1225 return; 1226 } 1227 1228 ctx = calloc(1, sizeof(*ctx)); 1229 if (!ctx) { 1230 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1231 "Memory allocation error"); 1232 return; 1233 } 1234 ctx->request = request; 1235 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1236 spdk_json_write_object_begin(ctx->w); 1237 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1238 1239 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1240 rpc_bdev_nvme_stats_per_channel, 1241 ctx, 1242 rpc_bdev_nvme_stats_done); 1243 } 1244 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1245 SPDK_RPC_RUNTIME) 1246 1247 struct rpc_bdev_nvme_reset_controller_req { 1248 char *name; 1249 }; 1250 1251 static void 1252 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1253 { 1254 free(r->name); 1255 } 1256 1257 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1258 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1259 }; 1260 1261 struct rpc_bdev_nvme_reset_controller_ctx { 1262 struct spdk_jsonrpc_request *request; 1263 bool success; 1264 struct spdk_thread *orig_thread; 1265 }; 1266 1267 static void 1268 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1269 { 1270 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1271 1272 spdk_jsonrpc_send_bool_response(ctx->request, ctx->success); 1273 1274 free(ctx); 1275 } 1276 1277 static void 1278 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1279 { 1280 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1281 1282 ctx->success = success; 1283 1284 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1285 } 1286 1287 static void 1288 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1289 const struct spdk_json_val *params) 1290 { 1291 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1292 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1293 struct nvme_ctrlr *nvme_ctrlr; 1294 int rc; 1295 1296 ctx = calloc(1, sizeof(*ctx)); 1297 if (ctx == NULL) { 1298 SPDK_ERRLOG("Memory allocation failed\n"); 1299 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1300 "Memory allocation failed"); 1301 return; 1302 } 1303 1304 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1305 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1306 &req)) { 1307 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1308 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1309 goto err; 1310 } 1311 1312 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1313 if (nvme_ctrlr == NULL) { 1314 SPDK_ERRLOG("Failed at device lookup\n"); 1315 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1316 goto err; 1317 } 1318 1319 ctx->request = request; 1320 ctx->orig_thread = spdk_get_thread(); 1321 1322 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1323 if (rc != 0) { 1324 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1325 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1326 goto err; 1327 } 1328 1329 free_rpc_bdev_nvme_reset_controller_req(&req); 1330 return; 1331 1332 err: 1333 free_rpc_bdev_nvme_reset_controller_req(&req); 1334 free(ctx); 1335 } 1336 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1337 1338 struct rpc_get_controller_health_info { 1339 char *name; 1340 }; 1341 1342 struct spdk_nvme_health_info_context { 1343 struct spdk_jsonrpc_request *request; 1344 struct spdk_nvme_ctrlr *ctrlr; 1345 struct spdk_nvme_health_information_page health_page; 1346 }; 1347 1348 static void 1349 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1350 { 1351 free(r->name); 1352 } 1353 1354 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1355 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1356 }; 1357 1358 static void nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1359 { 1360 if (response == true) { 1361 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1362 "Internal error."); 1363 } 1364 1365 free(context); 1366 } 1367 1368 static void 1369 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1370 { 1371 int i; 1372 char buf[128]; 1373 struct spdk_nvme_health_info_context *context = cb_arg; 1374 struct spdk_jsonrpc_request *request = context->request; 1375 struct spdk_json_write_ctx *w; 1376 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1377 const struct spdk_nvme_transport_id *trid = NULL; 1378 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1379 struct spdk_nvme_health_information_page *health_page = NULL; 1380 1381 if (spdk_nvme_cpl_is_error(cpl)) { 1382 nvme_health_info_cleanup(context, true); 1383 SPDK_ERRLOG("get log page failed\n"); 1384 return; 1385 } 1386 1387 if (ctrlr == NULL) { 1388 nvme_health_info_cleanup(context, true); 1389 SPDK_ERRLOG("ctrlr is NULL\n"); 1390 return; 1391 } else { 1392 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1393 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1394 health_page = &(context->health_page); 1395 } 1396 1397 w = spdk_jsonrpc_begin_result(request); 1398 1399 spdk_json_write_object_begin(w); 1400 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1401 spdk_str_trim(buf); 1402 spdk_json_write_named_string(w, "model_number", buf); 1403 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1404 spdk_str_trim(buf); 1405 spdk_json_write_named_string(w, "serial_number", buf); 1406 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1407 spdk_str_trim(buf); 1408 spdk_json_write_named_string(w, "firmware_revision", buf); 1409 spdk_json_write_named_string(w, "traddr", trid->traddr); 1410 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1411 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1412 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1413 health_page->available_spare_threshold); 1414 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1415 spdk_json_write_named_uint128(w, "data_units_read", 1416 health_page->data_units_read[0], health_page->data_units_read[1]); 1417 spdk_json_write_named_uint128(w, "data_units_written", 1418 health_page->data_units_written[0], health_page->data_units_written[1]); 1419 spdk_json_write_named_uint128(w, "host_read_commands", 1420 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1421 spdk_json_write_named_uint128(w, "host_write_commands", 1422 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1423 spdk_json_write_named_uint128(w, "controller_busy_time", 1424 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1425 spdk_json_write_named_uint128(w, "power_cycles", 1426 health_page->power_cycles[0], health_page->power_cycles[1]); 1427 spdk_json_write_named_uint128(w, "power_on_hours", 1428 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1429 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1430 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1431 spdk_json_write_named_uint128(w, "media_errors", 1432 health_page->media_errors[0], health_page->media_errors[1]); 1433 spdk_json_write_named_uint128(w, "num_err_log_entries", 1434 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1435 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1436 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1437 health_page->critical_temp_time); 1438 for (i = 0; i < 8; i++) { 1439 if (health_page->temp_sensor[i] != 0) { 1440 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1441 } 1442 } 1443 spdk_json_write_object_end(w); 1444 1445 spdk_jsonrpc_end_result(request, w); 1446 nvme_health_info_cleanup(context, false); 1447 } 1448 1449 static void 1450 get_health_log_page(struct spdk_nvme_health_info_context *context) 1451 { 1452 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1453 1454 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1455 SPDK_NVME_GLOBAL_NS_TAG, 1456 &(context->health_page), sizeof(context->health_page), 0, 1457 get_health_log_page_completion, context)) { 1458 nvme_health_info_cleanup(context, true); 1459 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1460 } 1461 } 1462 1463 static void 1464 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1465 { 1466 struct spdk_nvme_health_info_context *context = cb_arg; 1467 1468 if (spdk_nvme_cpl_is_error(cpl)) { 1469 nvme_health_info_cleanup(context, true); 1470 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1471 } else { 1472 get_health_log_page(context); 1473 } 1474 } 1475 1476 static int 1477 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1478 { 1479 struct spdk_nvme_cmd cmd = {}; 1480 1481 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1482 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1483 1484 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1485 get_temperature_threshold_feature_completion, context); 1486 } 1487 1488 static void 1489 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1490 { 1491 struct spdk_nvme_health_info_context *context; 1492 1493 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1494 if (!context) { 1495 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1496 "Memory allocation error."); 1497 return; 1498 } 1499 1500 context->request = request; 1501 context->ctrlr = ctrlr; 1502 1503 if (get_temperature_threshold_feature(context)) { 1504 nvme_health_info_cleanup(context, true); 1505 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1506 } 1507 1508 return; 1509 } 1510 1511 static void 1512 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1513 const struct spdk_json_val *params) 1514 { 1515 struct rpc_get_controller_health_info req = {}; 1516 struct nvme_ctrlr *nvme_ctrlr = NULL; 1517 1518 if (!params) { 1519 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1520 "Missing device name"); 1521 1522 return; 1523 } 1524 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1525 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1526 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1527 free_rpc_get_controller_health_info(&req); 1528 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1529 "Invalid parameters"); 1530 1531 return; 1532 } 1533 1534 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1535 1536 if (!nvme_ctrlr) { 1537 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1538 free_rpc_get_controller_health_info(&req); 1539 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1540 "Device not found"); 1541 return; 1542 } 1543 1544 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1545 free_rpc_get_controller_health_info(&req); 1546 1547 return; 1548 } 1549 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1550 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1551