1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "bdev_nvme.h" 38 39 #include "spdk/config.h" 40 41 #include "spdk/string.h" 42 #include "spdk/rpc.h" 43 #include "spdk/util.h" 44 #include "spdk/env.h" 45 #include "spdk/nvme.h" 46 #include "spdk/nvme_spec.h" 47 48 #include "spdk/log.h" 49 #include "spdk/bdev_module.h" 50 51 struct open_descriptors { 52 void *desc; 53 struct spdk_bdev *bdev; 54 TAILQ_ENTRY(open_descriptors) tqlst; 55 struct spdk_thread *thread; 56 }; 57 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t; 58 59 static int 60 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 61 { 62 enum spdk_bdev_timeout_action *action = out; 63 64 if (spdk_json_strequal(val, "none") == true) { 65 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 66 } else if (spdk_json_strequal(val, "abort") == true) { 67 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 68 } else if (spdk_json_strequal(val, "reset") == true) { 69 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 70 } else { 71 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 72 return -EINVAL; 73 } 74 75 return 0; 76 } 77 78 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 79 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 80 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 81 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 82 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 83 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 84 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 85 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 86 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 87 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 88 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 89 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 90 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 91 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 92 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 93 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 94 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 95 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 96 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 97 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 98 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 99 }; 100 101 static void 102 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 103 const struct spdk_json_val *params) 104 { 105 struct spdk_bdev_nvme_opts opts; 106 int rc; 107 108 bdev_nvme_get_opts(&opts); 109 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 110 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 111 &opts)) { 112 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 113 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 114 "spdk_json_decode_object failed"); 115 return; 116 } 117 118 rc = bdev_nvme_set_opts(&opts); 119 if (rc) { 120 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 121 return; 122 } 123 124 spdk_jsonrpc_send_bool_response(request, true); 125 126 return; 127 } 128 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 129 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 130 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_options, set_bdev_nvme_options) 131 132 struct rpc_bdev_nvme_hotplug { 133 bool enabled; 134 uint64_t period_us; 135 }; 136 137 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 138 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 139 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 140 }; 141 142 static void 143 rpc_bdev_nvme_set_hotplug_done(void *ctx) 144 { 145 struct spdk_jsonrpc_request *request = ctx; 146 147 spdk_jsonrpc_send_bool_response(request, true); 148 } 149 150 static void 151 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 152 const struct spdk_json_val *params) 153 { 154 struct rpc_bdev_nvme_hotplug req = {false, 0}; 155 int rc; 156 157 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 158 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 159 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 160 rc = -EINVAL; 161 goto invalid; 162 } 163 164 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 165 request); 166 if (rc) { 167 goto invalid; 168 } 169 170 return; 171 invalid: 172 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 173 } 174 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 175 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug) 176 177 struct rpc_bdev_nvme_attach_controller { 178 char *name; 179 char *trtype; 180 char *adrfam; 181 char *traddr; 182 char *trsvcid; 183 char *priority; 184 char *subnqn; 185 char *hostnqn; 186 char *hostaddr; 187 char *hostsvcid; 188 char *multipath; 189 struct nvme_ctrlr_opts bdev_opts; 190 struct spdk_nvme_ctrlr_opts drv_opts; 191 }; 192 193 static void 194 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 195 { 196 free(req->name); 197 free(req->trtype); 198 free(req->adrfam); 199 free(req->traddr); 200 free(req->trsvcid); 201 free(req->priority); 202 free(req->subnqn); 203 free(req->hostnqn); 204 free(req->hostaddr); 205 free(req->hostsvcid); 206 free(req->multipath); 207 } 208 209 static int 210 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 211 { 212 uint32_t *flag = out; 213 bool reftag; 214 int rc; 215 216 rc = spdk_json_decode_bool(val, &reftag); 217 if (rc == 0 && reftag == true) { 218 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 219 } 220 221 return rc; 222 } 223 224 static int 225 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 226 { 227 uint32_t *flag = out; 228 bool guard; 229 int rc; 230 231 rc = spdk_json_decode_bool(val, &guard); 232 if (rc == 0 && guard == true) { 233 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 234 } 235 236 return rc; 237 } 238 239 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 240 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 241 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 242 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 243 244 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 245 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 246 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 247 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 248 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 249 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 250 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 251 252 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 253 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 254 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 255 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 256 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 257 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), spdk_json_decode_string, true}, 258 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 259 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 260 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 261 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 262 }; 263 264 #define NVME_MAX_BDEVS_PER_RPC 128 265 266 struct rpc_bdev_nvme_attach_controller_ctx { 267 struct rpc_bdev_nvme_attach_controller req; 268 uint32_t count; 269 size_t bdev_count; 270 const char *names[NVME_MAX_BDEVS_PER_RPC]; 271 struct spdk_jsonrpc_request *request; 272 }; 273 274 static void 275 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 276 { 277 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 278 struct spdk_jsonrpc_request *request = ctx->request; 279 struct spdk_json_write_ctx *w; 280 size_t i; 281 282 w = spdk_jsonrpc_begin_result(request); 283 spdk_json_write_array_begin(w); 284 for (i = 0; i < ctx->bdev_count; i++) { 285 spdk_json_write_string(w, ctx->names[i]); 286 } 287 spdk_json_write_array_end(w); 288 spdk_jsonrpc_end_result(request, w); 289 290 free_rpc_bdev_nvme_attach_controller(&ctx->req); 291 free(ctx); 292 } 293 294 static void 295 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 296 { 297 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 298 struct spdk_jsonrpc_request *request = ctx->request; 299 300 if (rc < 0) { 301 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 302 free_rpc_bdev_nvme_attach_controller(&ctx->req); 303 free(ctx); 304 return; 305 } 306 307 ctx->bdev_count = bdev_count; 308 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 309 } 310 311 static void 312 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 313 const struct spdk_json_val *params) 314 { 315 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 316 struct spdk_nvme_transport_id trid = {}; 317 const struct spdk_nvme_ctrlr_opts *drv_opts; 318 const struct spdk_nvme_transport_id *ctrlr_trid; 319 struct nvme_ctrlr *ctrlr = NULL; 320 size_t len, maxlen; 321 bool multipath = false; 322 int rc; 323 324 ctx = calloc(1, sizeof(*ctx)); 325 if (!ctx) { 326 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 327 return; 328 } 329 330 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 331 bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 332 333 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 334 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 335 &ctx->req)) { 336 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 337 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 338 "spdk_json_decode_object failed"); 339 goto cleanup; 340 } 341 342 /* Parse trstring */ 343 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 344 if (rc < 0) { 345 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 346 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 347 ctx->req.trtype); 348 goto cleanup; 349 } 350 351 /* Parse trtype */ 352 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 353 assert(rc == 0); 354 355 /* Parse traddr */ 356 maxlen = sizeof(trid.traddr); 357 len = strnlen(ctx->req.traddr, maxlen); 358 if (len == maxlen) { 359 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 360 ctx->req.traddr); 361 goto cleanup; 362 } 363 memcpy(trid.traddr, ctx->req.traddr, len + 1); 364 365 /* Parse adrfam */ 366 if (ctx->req.adrfam) { 367 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 368 if (rc < 0) { 369 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 370 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 371 ctx->req.adrfam); 372 goto cleanup; 373 } 374 } 375 376 /* Parse trsvcid */ 377 if (ctx->req.trsvcid) { 378 maxlen = sizeof(trid.trsvcid); 379 len = strnlen(ctx->req.trsvcid, maxlen); 380 if (len == maxlen) { 381 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 382 ctx->req.trsvcid); 383 goto cleanup; 384 } 385 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 386 } 387 388 /* Parse priority for the NVMe-oF transport connection */ 389 if (ctx->req.priority) { 390 trid.priority = spdk_strtol(ctx->req.priority, 10); 391 } 392 393 /* Parse subnqn */ 394 if (ctx->req.subnqn) { 395 maxlen = sizeof(trid.subnqn); 396 len = strnlen(ctx->req.subnqn, maxlen); 397 if (len == maxlen) { 398 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 399 ctx->req.subnqn); 400 goto cleanup; 401 } 402 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 403 } 404 405 if (ctx->req.hostnqn) { 406 snprintf(ctx->req.drv_opts.hostnqn, sizeof(ctx->req.drv_opts.hostnqn), "%s", 407 ctx->req.hostnqn); 408 } 409 410 if (ctx->req.hostaddr) { 411 maxlen = sizeof(ctx->req.drv_opts.src_addr); 412 len = strnlen(ctx->req.hostaddr, maxlen); 413 if (len == maxlen) { 414 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 415 ctx->req.hostaddr); 416 goto cleanup; 417 } 418 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 419 } 420 421 if (ctx->req.hostsvcid) { 422 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 423 len = strnlen(ctx->req.hostsvcid, maxlen); 424 if (len == maxlen) { 425 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 426 ctx->req.hostsvcid); 427 goto cleanup; 428 } 429 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 430 } 431 432 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 433 434 if (ctrlr) { 435 if (ctx->req.multipath == NULL) { 436 /* For now, this means add a failover path. This maintains backward compatibility 437 * with past behavior. In the future, this behavior will change to "disable". */ 438 SPDK_ERRLOG("The multipath parameter was not specified to bdev_nvme_attach_controller but " 439 "it was used to add a failover path. This behavior will default to rejecting " 440 "the request in the future. Specify the 'multipath' parameter to control the behavior\n"); 441 ctx->req.multipath = strdup("failover"); 442 if (ctx->req.multipath == NULL) { 443 SPDK_ERRLOG("cannot allocate multipath failover string\n"); 444 goto cleanup; 445 } 446 } 447 448 /* This controller already exists. Check what the user wants to do. */ 449 if (strcasecmp(ctx->req.multipath, "disable") == 0) { 450 /* The user does not want to do any form of multipathing. */ 451 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 452 "A controller named %s already exists and multipath is disabled\n", 453 ctx->req.name); 454 goto cleanup; 455 456 } else if (strcasecmp(ctx->req.multipath, "failover") != 0 && 457 strcasecmp(ctx->req.multipath, "multipath") != 0) { 458 /* Invalid multipath option */ 459 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 460 "Invalid multipath parameter: %s\n", 461 ctx->req.multipath); 462 goto cleanup; 463 } 464 465 /* The user wants to add this as a failover path or add this to create multipath. */ 466 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 467 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 468 469 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 470 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 471 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 472 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 473 /* Exactly same network path can't be added a second time */ 474 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 475 "A controller named %s already exists with the specified network path\n", 476 ctx->req.name); 477 goto cleanup; 478 } 479 480 if (strncmp(trid.subnqn, 481 ctrlr_trid->subnqn, 482 SPDK_NVMF_NQN_MAX_LEN) != 0) { 483 /* Different SUBNQN is not allowed when specifying the same controller name. */ 484 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 485 "A controller named %s already exists, but uses a different subnqn (%s)\n", 486 ctx->req.name, ctrlr_trid->subnqn); 487 goto cleanup; 488 } 489 490 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 491 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 492 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 493 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 494 ctx->req.name, drv_opts->hostnqn); 495 goto cleanup; 496 } 497 498 if (ctx->req.bdev_opts.prchk_flags) { 499 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 500 "A controller named %s already exists. To add a path, do not specify PI options.\n", 501 ctx->req.name); 502 goto cleanup; 503 } 504 505 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 506 } 507 508 if (ctx->req.multipath != NULL && strcasecmp(ctx->req.multipath, "multipath") == 0) { 509 multipath = true; 510 } 511 512 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 513 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 514 "num_io_queues out of bounds, min: %u max: %u\n", 515 1, UINT16_MAX + 1); 516 goto cleanup; 517 } 518 519 ctx->request = request; 520 ctx->count = NVME_MAX_BDEVS_PER_RPC; 521 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 522 ctx->req.bdev_opts.from_discovery_service = false; 523 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, 524 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 525 &ctx->req.bdev_opts, multipath); 526 if (rc) { 527 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 528 goto cleanup; 529 } 530 531 return; 532 533 cleanup: 534 free_rpc_bdev_nvme_attach_controller(&ctx->req); 535 free(ctx); 536 } 537 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 538 SPDK_RPC_RUNTIME) 539 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev) 540 541 static void 542 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 543 { 544 struct spdk_json_write_ctx *w = ctx; 545 struct nvme_ctrlr *nvme_ctrlr; 546 547 spdk_json_write_object_begin(w); 548 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 549 550 spdk_json_write_named_array_begin(w, "ctrlrs"); 551 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 552 nvme_ctrlr_info_json(w, nvme_ctrlr); 553 } 554 spdk_json_write_array_end(w); 555 spdk_json_write_object_end(w); 556 } 557 558 struct rpc_bdev_nvme_get_controllers { 559 char *name; 560 }; 561 562 static void 563 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 564 { 565 free(r->name); 566 } 567 568 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 569 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 570 }; 571 572 static void 573 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 574 const struct spdk_json_val *params) 575 { 576 struct rpc_bdev_nvme_get_controllers req = {}; 577 struct spdk_json_write_ctx *w; 578 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 579 580 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 581 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 582 &req)) { 583 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 584 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 585 "spdk_json_decode_object failed"); 586 goto cleanup; 587 } 588 589 if (req.name) { 590 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 591 if (nbdev_ctrlr == NULL) { 592 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 593 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 594 goto cleanup; 595 } 596 } 597 598 w = spdk_jsonrpc_begin_result(request); 599 spdk_json_write_array_begin(w); 600 601 if (nbdev_ctrlr != NULL) { 602 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 603 } else { 604 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 605 } 606 607 spdk_json_write_array_end(w); 608 609 spdk_jsonrpc_end_result(request, w); 610 611 cleanup: 612 free_rpc_bdev_nvme_get_controllers(&req); 613 } 614 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 615 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_get_controllers, get_nvme_controllers) 616 617 struct rpc_bdev_nvme_detach_controller { 618 char *name; 619 char *trtype; 620 char *adrfam; 621 char *traddr; 622 char *trsvcid; 623 char *subnqn; 624 char *hostaddr; 625 char *hostsvcid; 626 }; 627 628 static void 629 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 630 { 631 free(req->name); 632 free(req->trtype); 633 free(req->adrfam); 634 free(req->traddr); 635 free(req->trsvcid); 636 free(req->subnqn); 637 free(req->hostaddr); 638 free(req->hostsvcid); 639 } 640 641 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 642 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 643 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 644 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 645 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 646 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 647 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 648 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 649 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 650 }; 651 652 static void 653 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 654 const struct spdk_json_val *params) 655 { 656 struct rpc_bdev_nvme_detach_controller req = {NULL}; 657 struct nvme_path_id path = {}; 658 size_t len, maxlen; 659 int rc = 0; 660 661 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 662 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 663 &req)) { 664 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 665 "spdk_json_decode_object failed"); 666 goto cleanup; 667 } 668 669 if (req.trtype != NULL) { 670 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 671 if (rc < 0) { 672 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 673 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 674 req.trtype); 675 goto cleanup; 676 } 677 678 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 679 if (rc < 0) { 680 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 681 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 682 req.trtype); 683 goto cleanup; 684 } 685 } 686 687 if (req.traddr != NULL) { 688 maxlen = sizeof(path.trid.traddr); 689 len = strnlen(req.traddr, maxlen); 690 if (len == maxlen) { 691 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 692 req.traddr); 693 goto cleanup; 694 } 695 memcpy(path.trid.traddr, req.traddr, len + 1); 696 } 697 698 if (req.adrfam != NULL) { 699 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 700 if (rc < 0) { 701 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 702 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 703 req.adrfam); 704 goto cleanup; 705 } 706 } 707 708 if (req.trsvcid != NULL) { 709 maxlen = sizeof(path.trid.trsvcid); 710 len = strnlen(req.trsvcid, maxlen); 711 if (len == maxlen) { 712 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 713 req.trsvcid); 714 goto cleanup; 715 } 716 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 717 } 718 719 /* Parse subnqn */ 720 if (req.subnqn != NULL) { 721 maxlen = sizeof(path.trid.subnqn); 722 len = strnlen(req.subnqn, maxlen); 723 if (len == maxlen) { 724 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 725 req.subnqn); 726 goto cleanup; 727 } 728 memcpy(path.trid.subnqn, req.subnqn, len + 1); 729 } 730 731 if (req.hostaddr) { 732 maxlen = sizeof(path.hostid.hostaddr); 733 len = strnlen(req.hostaddr, maxlen); 734 if (len == maxlen) { 735 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 736 req.hostaddr); 737 goto cleanup; 738 } 739 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 740 } 741 742 if (req.hostsvcid) { 743 maxlen = sizeof(path.hostid.hostsvcid); 744 len = strnlen(req.hostsvcid, maxlen); 745 if (len == maxlen) { 746 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 747 req.hostsvcid); 748 goto cleanup; 749 } 750 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 751 } 752 753 rc = bdev_nvme_delete(req.name, &path); 754 755 if (rc != 0) { 756 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 757 goto cleanup; 758 } 759 760 spdk_jsonrpc_send_bool_response(request, true); 761 762 cleanup: 763 free_rpc_bdev_nvme_detach_controller(&req); 764 } 765 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 766 SPDK_RPC_RUNTIME) 767 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_detach_controller, delete_nvme_controller) 768 769 struct rpc_apply_firmware { 770 char *filename; 771 char *bdev_name; 772 }; 773 774 static void 775 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 776 { 777 free(req->filename); 778 free(req->bdev_name); 779 } 780 781 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 782 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 783 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 784 }; 785 786 struct firmware_update_info { 787 void *fw_image; 788 void *p; 789 unsigned int size; 790 unsigned int size_remaining; 791 unsigned int offset; 792 unsigned int transfer; 793 794 void *desc; 795 struct spdk_io_channel *ch; 796 struct spdk_jsonrpc_request *request; 797 struct spdk_nvme_ctrlr *ctrlr; 798 open_descriptors_t desc_head; 799 struct rpc_apply_firmware *req; 800 }; 801 802 static void 803 _apply_firmware_cleanup(void *ctx) 804 { 805 struct spdk_bdev_desc *desc = ctx; 806 807 spdk_bdev_close(desc); 808 } 809 810 static void 811 apply_firmware_cleanup(void *cb_arg) 812 { 813 struct open_descriptors *opt, *tmp; 814 struct firmware_update_info *firm_ctx = cb_arg; 815 816 if (!firm_ctx) { 817 return; 818 } 819 820 if (firm_ctx->fw_image) { 821 spdk_free(firm_ctx->fw_image); 822 } 823 824 if (firm_ctx->req) { 825 free_rpc_apply_firmware(firm_ctx->req); 826 free(firm_ctx->req); 827 } 828 829 if (firm_ctx->ch) { 830 spdk_put_io_channel(firm_ctx->ch); 831 } 832 833 TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) { 834 TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst); 835 /* Close the underlying bdev on its same opened thread. */ 836 if (opt->thread && opt->thread != spdk_get_thread()) { 837 spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc); 838 } else { 839 spdk_bdev_close(opt->desc); 840 } 841 free(opt); 842 } 843 free(firm_ctx); 844 } 845 846 static void 847 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 848 { 849 struct spdk_json_write_ctx *w; 850 struct firmware_update_info *firm_ctx = cb_arg; 851 852 spdk_bdev_free_io(bdev_io); 853 854 if (!success) { 855 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 856 "firmware commit failed."); 857 apply_firmware_cleanup(firm_ctx); 858 return; 859 } 860 861 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 862 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 863 "Controller reset failed."); 864 apply_firmware_cleanup(firm_ctx); 865 return; 866 } 867 868 w = spdk_jsonrpc_begin_result(firm_ctx->request); 869 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 870 spdk_jsonrpc_end_result(firm_ctx->request, w); 871 apply_firmware_cleanup(firm_ctx); 872 } 873 874 static void 875 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 876 { 877 struct spdk_nvme_cmd cmd = {}; 878 struct spdk_nvme_fw_commit fw_commit; 879 int slot = 0; 880 int rc; 881 struct firmware_update_info *firm_ctx = cb_arg; 882 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 883 884 spdk_bdev_free_io(bdev_io); 885 886 if (!success) { 887 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 888 "firmware download failed ."); 889 apply_firmware_cleanup(firm_ctx); 890 return; 891 } 892 893 firm_ctx->p += firm_ctx->transfer; 894 firm_ctx->offset += firm_ctx->transfer; 895 firm_ctx->size_remaining -= firm_ctx->transfer; 896 897 switch (firm_ctx->size_remaining) { 898 case 0: 899 /* firmware download completed. Commit firmware */ 900 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 901 fw_commit.fs = slot; 902 fw_commit.ca = commit_action; 903 904 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 905 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 906 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 907 apply_firmware_complete_reset, firm_ctx); 908 if (rc) { 909 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 910 "firmware commit failed."); 911 apply_firmware_cleanup(firm_ctx); 912 return; 913 } 914 break; 915 default: 916 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 917 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 918 919 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 920 cmd.cdw11 = firm_ctx->offset >> 2; 921 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 922 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 923 if (rc) { 924 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 925 "firmware download failed."); 926 apply_firmware_cleanup(firm_ctx); 927 return; 928 } 929 break; 930 } 931 } 932 933 static void 934 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 935 { 936 } 937 938 static void 939 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 940 const struct spdk_json_val *params) 941 { 942 int rc; 943 int fd = -1; 944 struct stat fw_stat; 945 struct spdk_nvme_ctrlr *ctrlr; 946 char msg[1024]; 947 struct spdk_bdev *bdev; 948 struct spdk_bdev *bdev2; 949 struct open_descriptors *opt; 950 struct spdk_bdev_desc *desc; 951 struct spdk_nvme_cmd *cmd; 952 struct firmware_update_info *firm_ctx; 953 954 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 955 if (!firm_ctx) { 956 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 957 "Memory allocation error."); 958 return; 959 } 960 firm_ctx->fw_image = NULL; 961 TAILQ_INIT(&firm_ctx->desc_head); 962 firm_ctx->request = request; 963 964 firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware)); 965 if (!firm_ctx->req) { 966 snprintf(msg, sizeof(msg), "Memory allocation error."); 967 goto err; 968 } 969 970 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 971 SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) { 972 snprintf(msg, sizeof(msg), "spdk_json_decode_object failed."); 973 goto err; 974 } 975 976 if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) { 977 snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name); 978 goto err; 979 } 980 981 if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 982 snprintf(msg, sizeof(msg), "Controller information for %s were not found.", 983 firm_ctx->req->bdev_name); 984 goto err; 985 } 986 firm_ctx->ctrlr = ctrlr; 987 988 for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) { 989 990 if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) { 991 continue; 992 } 993 994 if (!(opt = malloc(sizeof(struct open_descriptors)))) { 995 snprintf(msg, sizeof(msg), "Memory allocation error."); 996 goto err; 997 } 998 999 if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) { 1000 snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name); 1001 free(opt); 1002 goto err; 1003 } 1004 1005 /* Save the thread where the base device is opened */ 1006 opt->thread = spdk_get_thread(); 1007 1008 opt->desc = desc; 1009 opt->bdev = bdev; 1010 TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst); 1011 } 1012 1013 /* 1014 * find a descriptor associated with our bdev 1015 */ 1016 firm_ctx->desc = NULL; 1017 TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) { 1018 if (opt->bdev == bdev) { 1019 firm_ctx->desc = opt->desc; 1020 break; 1021 } 1022 } 1023 1024 if (!firm_ctx->desc) { 1025 snprintf(msg, sizeof(msg), "No descriptor were found."); 1026 goto err; 1027 } 1028 1029 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1030 if (!firm_ctx->ch) { 1031 snprintf(msg, sizeof(msg), "No channels were found."); 1032 goto err; 1033 } 1034 1035 fd = open(firm_ctx->req->filename, O_RDONLY); 1036 if (fd < 0) { 1037 snprintf(msg, sizeof(msg), "open file failed."); 1038 goto err; 1039 } 1040 1041 rc = fstat(fd, &fw_stat); 1042 if (rc < 0) { 1043 close(fd); 1044 snprintf(msg, sizeof(msg), "fstat failed."); 1045 goto err; 1046 } 1047 1048 firm_ctx->size = fw_stat.st_size; 1049 if (fw_stat.st_size % 4) { 1050 close(fd); 1051 snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4."); 1052 goto err; 1053 } 1054 1055 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1056 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1057 if (!firm_ctx->fw_image) { 1058 close(fd); 1059 snprintf(msg, sizeof(msg), "Memory allocation error."); 1060 goto err; 1061 } 1062 firm_ctx->p = firm_ctx->fw_image; 1063 1064 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1065 close(fd); 1066 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1067 goto err; 1068 } 1069 close(fd); 1070 1071 firm_ctx->offset = 0; 1072 firm_ctx->size_remaining = firm_ctx->size; 1073 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1074 1075 cmd = malloc(sizeof(struct spdk_nvme_cmd)); 1076 if (!cmd) { 1077 snprintf(msg, sizeof(msg), "Memory allocation error."); 1078 goto err; 1079 } 1080 memset(cmd, 0, sizeof(struct spdk_nvme_cmd)); 1081 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1082 1083 cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1084 cmd->cdw11 = firm_ctx->offset >> 2; 1085 1086 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p, 1087 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1088 if (rc == 0) { 1089 /* normal return here. */ 1090 return; 1091 } 1092 1093 free(cmd); 1094 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1095 err: 1096 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg); 1097 apply_firmware_cleanup(firm_ctx); 1098 } 1099 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1100 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_apply_firmware, apply_nvme_firmware) 1101 1102 struct rpc_bdev_nvme_transport_stat_ctx { 1103 struct spdk_jsonrpc_request *request; 1104 struct spdk_json_write_ctx *w; 1105 }; 1106 1107 static void 1108 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1109 struct spdk_nvme_transport_poll_group_stat *stat) 1110 { 1111 struct spdk_nvme_rdma_device_stat *device_stats; 1112 uint32_t i; 1113 1114 spdk_json_write_named_array_begin(w, "devices"); 1115 1116 for (i = 0; i < stat->rdma.num_devices; i++) { 1117 device_stats = &stat->rdma.device_stats[i]; 1118 spdk_json_write_object_begin(w); 1119 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1120 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1121 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1122 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1123 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1124 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1125 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1126 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1127 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1128 spdk_json_write_object_end(w); 1129 } 1130 spdk_json_write_array_end(w); 1131 } 1132 1133 static void 1134 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1135 struct spdk_nvme_transport_poll_group_stat *stat) 1136 { 1137 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1138 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1139 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1140 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1141 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1142 stat->pcie.cq_shadow_doorbell_updates); 1143 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1144 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1145 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1146 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1147 stat->pcie.sq_shadow_doorbell_updates); 1148 } 1149 1150 static void 1151 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1152 struct spdk_nvme_transport_poll_group_stat *stat) 1153 { 1154 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1155 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1156 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1157 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1158 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1159 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1160 } 1161 1162 static void 1163 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1164 { 1165 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1166 struct spdk_io_channel *ch; 1167 struct nvme_poll_group *group; 1168 struct spdk_nvme_poll_group_stat *stat; 1169 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1170 uint32_t j; 1171 int rc; 1172 1173 ctx = spdk_io_channel_iter_get_ctx(i); 1174 ch = spdk_io_channel_iter_get_channel(i); 1175 group = spdk_io_channel_get_ctx(ch); 1176 1177 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1178 if (rc) { 1179 spdk_for_each_channel_continue(i, rc); 1180 return; 1181 } 1182 1183 spdk_json_write_object_begin(ctx->w); 1184 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1185 spdk_json_write_named_array_begin(ctx->w, "transports"); 1186 1187 for (j = 0; j < stat->num_transports; j++) { 1188 tr_stat = stat->transport_stat[j]; 1189 spdk_json_write_object_begin(ctx->w); 1190 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1191 1192 switch (stat->transport_stat[j]->trtype) { 1193 case SPDK_NVME_TRANSPORT_RDMA: 1194 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1195 break; 1196 case SPDK_NVME_TRANSPORT_PCIE: 1197 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1198 break; 1199 case SPDK_NVME_TRANSPORT_TCP: 1200 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1201 break; 1202 default: 1203 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1204 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1205 } 1206 spdk_json_write_object_end(ctx->w); 1207 } 1208 /* transports array */ 1209 spdk_json_write_array_end(ctx->w); 1210 spdk_json_write_object_end(ctx->w); 1211 1212 spdk_nvme_poll_group_free_stats(group->group, stat); 1213 spdk_for_each_channel_continue(i, 0); 1214 } 1215 1216 static void 1217 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1218 { 1219 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1220 1221 spdk_json_write_array_end(ctx->w); 1222 spdk_json_write_object_end(ctx->w); 1223 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1224 free(ctx); 1225 } 1226 1227 static void 1228 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1229 const struct spdk_json_val *params) 1230 { 1231 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1232 1233 if (params) { 1234 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1235 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1236 return; 1237 } 1238 1239 ctx = calloc(1, sizeof(*ctx)); 1240 if (!ctx) { 1241 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1242 "Memory allocation error"); 1243 return; 1244 } 1245 ctx->request = request; 1246 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1247 spdk_json_write_object_begin(ctx->w); 1248 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1249 1250 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1251 rpc_bdev_nvme_stats_per_channel, 1252 ctx, 1253 rpc_bdev_nvme_stats_done); 1254 } 1255 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1256 SPDK_RPC_RUNTIME) 1257 1258 struct rpc_bdev_nvme_reset_controller_req { 1259 char *name; 1260 }; 1261 1262 static void 1263 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1264 { 1265 free(r->name); 1266 } 1267 1268 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1269 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1270 }; 1271 1272 struct rpc_bdev_nvme_reset_controller_ctx { 1273 struct spdk_jsonrpc_request *request; 1274 bool success; 1275 struct spdk_thread *orig_thread; 1276 }; 1277 1278 static void 1279 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1280 { 1281 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1282 1283 spdk_jsonrpc_send_bool_response(ctx->request, ctx->success); 1284 1285 free(ctx); 1286 } 1287 1288 static void 1289 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1290 { 1291 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1292 1293 ctx->success = success; 1294 1295 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1296 } 1297 1298 static void 1299 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1300 const struct spdk_json_val *params) 1301 { 1302 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1303 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1304 struct nvme_ctrlr *nvme_ctrlr; 1305 int rc; 1306 1307 ctx = calloc(1, sizeof(*ctx)); 1308 if (ctx == NULL) { 1309 SPDK_ERRLOG("Memory allocation failed\n"); 1310 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1311 "Memory allocation failed"); 1312 return; 1313 } 1314 1315 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1316 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1317 &req)) { 1318 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1319 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1320 goto err; 1321 } 1322 1323 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1324 if (nvme_ctrlr == NULL) { 1325 SPDK_ERRLOG("Failed at device lookup\n"); 1326 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1327 goto err; 1328 } 1329 1330 ctx->request = request; 1331 ctx->orig_thread = spdk_get_thread(); 1332 1333 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1334 if (rc != 0) { 1335 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1336 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1337 goto err; 1338 } 1339 1340 free_rpc_bdev_nvme_reset_controller_req(&req); 1341 return; 1342 1343 err: 1344 free_rpc_bdev_nvme_reset_controller_req(&req); 1345 free(ctx); 1346 } 1347 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1348 1349 struct rpc_get_controller_health_info { 1350 char *name; 1351 }; 1352 1353 struct spdk_nvme_health_info_context { 1354 struct spdk_jsonrpc_request *request; 1355 struct spdk_nvme_ctrlr *ctrlr; 1356 struct spdk_nvme_health_information_page health_page; 1357 }; 1358 1359 static void 1360 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1361 { 1362 free(r->name); 1363 } 1364 1365 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1366 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1367 }; 1368 1369 static void nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1370 { 1371 if (response == true) { 1372 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1373 "Internal error."); 1374 } 1375 1376 free(context); 1377 } 1378 1379 static void 1380 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1381 { 1382 int i; 1383 char buf[128]; 1384 struct spdk_nvme_health_info_context *context = cb_arg; 1385 struct spdk_jsonrpc_request *request = context->request; 1386 struct spdk_json_write_ctx *w; 1387 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1388 const struct spdk_nvme_transport_id *trid = NULL; 1389 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1390 struct spdk_nvme_health_information_page *health_page = NULL; 1391 1392 if (spdk_nvme_cpl_is_error(cpl)) { 1393 nvme_health_info_cleanup(context, true); 1394 SPDK_ERRLOG("get log page failed\n"); 1395 return; 1396 } 1397 1398 if (ctrlr == NULL) { 1399 nvme_health_info_cleanup(context, true); 1400 SPDK_ERRLOG("ctrlr is NULL\n"); 1401 return; 1402 } else { 1403 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1404 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1405 health_page = &(context->health_page); 1406 } 1407 1408 w = spdk_jsonrpc_begin_result(request); 1409 1410 spdk_json_write_object_begin(w); 1411 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1412 spdk_str_trim(buf); 1413 spdk_json_write_named_string(w, "model_number", buf); 1414 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1415 spdk_str_trim(buf); 1416 spdk_json_write_named_string(w, "serial_number", buf); 1417 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1418 spdk_str_trim(buf); 1419 spdk_json_write_named_string(w, "firmware_revision", buf); 1420 spdk_json_write_named_string(w, "traddr", trid->traddr); 1421 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1422 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1423 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1424 health_page->available_spare_threshold); 1425 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1426 spdk_json_write_named_uint128(w, "data_units_read", 1427 health_page->data_units_read[0], health_page->data_units_read[1]); 1428 spdk_json_write_named_uint128(w, "data_units_written", 1429 health_page->data_units_written[0], health_page->data_units_written[1]); 1430 spdk_json_write_named_uint128(w, "host_read_commands", 1431 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1432 spdk_json_write_named_uint128(w, "host_write_commands", 1433 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1434 spdk_json_write_named_uint128(w, "controller_busy_time", 1435 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1436 spdk_json_write_named_uint128(w, "power_cycles", 1437 health_page->power_cycles[0], health_page->power_cycles[1]); 1438 spdk_json_write_named_uint128(w, "power_on_hours", 1439 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1440 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1441 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1442 spdk_json_write_named_uint128(w, "media_errors", 1443 health_page->media_errors[0], health_page->media_errors[1]); 1444 spdk_json_write_named_uint128(w, "num_err_log_entries", 1445 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1446 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1447 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1448 health_page->critical_temp_time); 1449 for (i = 0; i < 8; i++) { 1450 if (health_page->temp_sensor[i] != 0) { 1451 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1452 } 1453 } 1454 spdk_json_write_object_end(w); 1455 1456 spdk_jsonrpc_end_result(request, w); 1457 nvme_health_info_cleanup(context, false); 1458 } 1459 1460 static void 1461 get_health_log_page(struct spdk_nvme_health_info_context *context) 1462 { 1463 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1464 1465 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1466 SPDK_NVME_GLOBAL_NS_TAG, 1467 &(context->health_page), sizeof(context->health_page), 0, 1468 get_health_log_page_completion, context)) { 1469 nvme_health_info_cleanup(context, true); 1470 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1471 } 1472 } 1473 1474 static void 1475 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1476 { 1477 struct spdk_nvme_health_info_context *context = cb_arg; 1478 1479 if (spdk_nvme_cpl_is_error(cpl)) { 1480 nvme_health_info_cleanup(context, true); 1481 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1482 } else { 1483 get_health_log_page(context); 1484 } 1485 } 1486 1487 static int 1488 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1489 { 1490 struct spdk_nvme_cmd cmd = {}; 1491 1492 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1493 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1494 1495 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1496 get_temperature_threshold_feature_completion, context); 1497 } 1498 1499 static void 1500 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1501 { 1502 struct spdk_nvme_health_info_context *context; 1503 1504 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1505 if (!context) { 1506 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1507 "Memory allocation error."); 1508 return; 1509 } 1510 1511 context->request = request; 1512 context->ctrlr = ctrlr; 1513 1514 if (get_temperature_threshold_feature(context)) { 1515 nvme_health_info_cleanup(context, true); 1516 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1517 } 1518 1519 return; 1520 } 1521 1522 static void 1523 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1524 const struct spdk_json_val *params) 1525 { 1526 struct rpc_get_controller_health_info req = {}; 1527 struct nvme_ctrlr *nvme_ctrlr = NULL; 1528 1529 if (!params) { 1530 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1531 "Missing device name"); 1532 1533 return; 1534 } 1535 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1536 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1537 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1538 free_rpc_get_controller_health_info(&req); 1539 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1540 "Invalid parameters"); 1541 1542 return; 1543 } 1544 1545 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1546 1547 if (!nvme_ctrlr) { 1548 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1549 free_rpc_get_controller_health_info(&req); 1550 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1551 "Device not found"); 1552 return; 1553 } 1554 1555 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1556 free_rpc_get_controller_health_info(&req); 1557 1558 return; 1559 } 1560 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1561 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1562 1563 struct rpc_bdev_nvme_start_discovery { 1564 char *name; 1565 char *trtype; 1566 char *adrfam; 1567 char *traddr; 1568 char *trsvcid; 1569 char *hostnqn; 1570 bool wait_for_attach; 1571 uint64_t attach_timeout_ms; 1572 struct spdk_nvme_ctrlr_opts opts; 1573 struct nvme_ctrlr_opts bdev_opts; 1574 }; 1575 1576 static void 1577 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1578 { 1579 free(req->name); 1580 free(req->trtype); 1581 free(req->adrfam); 1582 free(req->traddr); 1583 free(req->trsvcid); 1584 free(req->hostnqn); 1585 } 1586 1587 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1588 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1589 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1590 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1591 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1592 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1593 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1594 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1595 {"attach_timeout_ms", offsetof(struct rpc_bdev_nvme_start_discovery, attach_timeout_ms), spdk_json_decode_uint64, true}, 1596 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1597 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1598 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1599 }; 1600 1601 struct rpc_bdev_nvme_start_discovery_ctx { 1602 struct rpc_bdev_nvme_start_discovery req; 1603 struct spdk_jsonrpc_request *request; 1604 }; 1605 1606 static void 1607 rpc_bdev_nvme_start_discovery_done(void *ctx, int status) 1608 { 1609 struct spdk_jsonrpc_request *request = ctx; 1610 1611 if (status != 0) { 1612 spdk_jsonrpc_send_error_response(request, status, spdk_strerror(-status)); 1613 } else { 1614 spdk_jsonrpc_send_bool_response(request, true); 1615 } 1616 } 1617 1618 static void 1619 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1620 const struct spdk_json_val *params) 1621 { 1622 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1623 struct spdk_nvme_transport_id trid = {}; 1624 size_t len, maxlen; 1625 int rc; 1626 spdk_bdev_nvme_start_discovery_fn cb_fn; 1627 void *cb_ctx; 1628 1629 ctx = calloc(1, sizeof(*ctx)); 1630 if (!ctx) { 1631 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1632 return; 1633 } 1634 1635 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1636 1637 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1638 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1639 &ctx->req)) { 1640 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1641 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1642 "spdk_json_decode_object failed"); 1643 goto cleanup; 1644 } 1645 1646 /* Parse trstring */ 1647 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1648 if (rc < 0) { 1649 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1650 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1651 ctx->req.trtype); 1652 goto cleanup; 1653 } 1654 1655 /* Parse trtype */ 1656 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1657 assert(rc == 0); 1658 1659 /* Parse traddr */ 1660 maxlen = sizeof(trid.traddr); 1661 len = strnlen(ctx->req.traddr, maxlen); 1662 if (len == maxlen) { 1663 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1664 ctx->req.traddr); 1665 goto cleanup; 1666 } 1667 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1668 1669 /* Parse adrfam */ 1670 if (ctx->req.adrfam) { 1671 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1672 if (rc < 0) { 1673 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1674 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1675 ctx->req.adrfam); 1676 goto cleanup; 1677 } 1678 } 1679 1680 /* Parse trsvcid */ 1681 if (ctx->req.trsvcid) { 1682 maxlen = sizeof(trid.trsvcid); 1683 len = strnlen(ctx->req.trsvcid, maxlen); 1684 if (len == maxlen) { 1685 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1686 ctx->req.trsvcid); 1687 goto cleanup; 1688 } 1689 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1690 } 1691 1692 if (ctx->req.hostnqn) { 1693 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1694 ctx->req.hostnqn); 1695 } 1696 1697 if (ctx->req.attach_timeout_ms != 0) { 1698 ctx->req.wait_for_attach = true; 1699 } 1700 1701 ctx->request = request; 1702 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1703 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1704 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1705 ctx->req.attach_timeout_ms, cb_fn, cb_ctx); 1706 if (rc) { 1707 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1708 } else if (!ctx->req.wait_for_attach) { 1709 rpc_bdev_nvme_start_discovery_done(request, 0); 1710 } 1711 1712 cleanup: 1713 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1714 free(ctx); 1715 } 1716 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1717 SPDK_RPC_RUNTIME) 1718 1719 struct rpc_bdev_nvme_stop_discovery { 1720 char *name; 1721 }; 1722 1723 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1724 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1725 }; 1726 1727 struct rpc_bdev_nvme_stop_discovery_ctx { 1728 struct rpc_bdev_nvme_stop_discovery req; 1729 struct spdk_jsonrpc_request *request; 1730 }; 1731 1732 static void 1733 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1734 { 1735 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1736 1737 spdk_jsonrpc_send_bool_response(ctx->request, true); 1738 free(ctx->req.name); 1739 free(ctx); 1740 } 1741 1742 static void 1743 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1744 const struct spdk_json_val *params) 1745 { 1746 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1747 int rc; 1748 1749 ctx = calloc(1, sizeof(*ctx)); 1750 if (!ctx) { 1751 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1752 return; 1753 } 1754 1755 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1756 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1757 &ctx->req)) { 1758 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1759 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1760 "spdk_json_decode_object failed"); 1761 goto cleanup; 1762 } 1763 1764 ctx->request = request; 1765 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1766 if (rc) { 1767 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1768 goto cleanup; 1769 } 1770 1771 return; 1772 1773 cleanup: 1774 free(ctx->req.name); 1775 free(ctx); 1776 } 1777 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1778 SPDK_RPC_RUNTIME) 1779 1780 static void 1781 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1782 const struct spdk_json_val *params) 1783 { 1784 struct spdk_json_write_ctx *w; 1785 1786 w = spdk_jsonrpc_begin_result(request); 1787 bdev_nvme_get_discovery_info(w); 1788 spdk_jsonrpc_end_result(request, w); 1789 } 1790 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1791 SPDK_RPC_RUNTIME) 1792 1793 enum error_injection_cmd_type { 1794 NVME_ADMIN_CMD = 1, 1795 NVME_IO_CMD, 1796 }; 1797 1798 struct rpc_add_error_injection { 1799 char *name; 1800 enum error_injection_cmd_type cmd_type; 1801 uint8_t opc; 1802 bool do_not_submit; 1803 uint64_t timeout_in_us; 1804 uint32_t err_count; 1805 uint8_t sct; 1806 uint8_t sc; 1807 }; 1808 1809 static void 1810 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1811 { 1812 free(req->name); 1813 } 1814 1815 static int 1816 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1817 { 1818 int *cmd_type = out; 1819 1820 if (spdk_json_strequal(val, "admin")) { 1821 *cmd_type = NVME_ADMIN_CMD; 1822 } else if (spdk_json_strequal(val, "io")) { 1823 *cmd_type = NVME_IO_CMD; 1824 } else { 1825 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1826 return -EINVAL; 1827 } 1828 1829 return 0; 1830 } 1831 1832 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1833 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1834 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1835 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1836 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1837 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1838 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1839 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1840 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1841 }; 1842 1843 struct rpc_add_error_injection_ctx { 1844 struct spdk_jsonrpc_request *request; 1845 struct rpc_add_error_injection rpc; 1846 }; 1847 1848 static void 1849 rpc_add_error_injection_done(struct spdk_io_channel_iter *i, int status) 1850 { 1851 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1852 1853 if (status) { 1854 spdk_jsonrpc_send_error_response(ctx->request, status, 1855 "Failed to add the error injection."); 1856 } else { 1857 spdk_jsonrpc_send_bool_response(ctx->request, true); 1858 } 1859 1860 free_rpc_add_error_injection(&ctx->rpc); 1861 free(ctx); 1862 } 1863 1864 static void 1865 rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i) 1866 { 1867 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1868 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1869 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1870 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1871 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1872 int rc = 0; 1873 1874 if (qpair != NULL) { 1875 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1876 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1877 ctx->rpc.sct, ctx->rpc.sc); 1878 } 1879 1880 spdk_for_each_channel_continue(i, rc); 1881 } 1882 1883 static void 1884 rpc_bdev_nvme_add_error_injection( 1885 struct spdk_jsonrpc_request *request, 1886 const struct spdk_json_val *params) 1887 { 1888 struct rpc_add_error_injection_ctx *ctx; 1889 struct nvme_ctrlr *nvme_ctrlr; 1890 int rc; 1891 1892 ctx = calloc(1, sizeof(*ctx)); 1893 if (!ctx) { 1894 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1895 return; 1896 } 1897 ctx->rpc.err_count = 1; 1898 ctx->request = request; 1899 1900 if (spdk_json_decode_object(params, 1901 rpc_add_error_injection_decoders, 1902 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1903 &ctx->rpc)) { 1904 spdk_jsonrpc_send_error_response(request, -EINVAL, 1905 "Failed to parse the request"); 1906 goto cleanup; 1907 } 1908 1909 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1910 if (nvme_ctrlr == NULL) { 1911 SPDK_ERRLOG("No controller with specified name was found.\n"); 1912 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1913 goto cleanup; 1914 } 1915 1916 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1917 spdk_for_each_channel(nvme_ctrlr, 1918 rpc_add_error_injection_per_channel, 1919 ctx, 1920 rpc_add_error_injection_done); 1921 1922 return; 1923 } else { 1924 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1925 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1926 ctx->rpc.sct, ctx->rpc.sc); 1927 if (rc) { 1928 spdk_jsonrpc_send_error_response(request, -rc, 1929 "Failed to add the error injection"); 1930 } else { 1931 spdk_jsonrpc_send_bool_response(ctx->request, true); 1932 } 1933 } 1934 1935 cleanup: 1936 free_rpc_add_error_injection(&ctx->rpc); 1937 free(ctx); 1938 } 1939 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 1940 SPDK_RPC_RUNTIME) 1941 1942 struct rpc_remove_error_injection { 1943 char *name; 1944 enum error_injection_cmd_type cmd_type; 1945 uint8_t opc; 1946 }; 1947 1948 static void 1949 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 1950 { 1951 free(req->name); 1952 } 1953 1954 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 1955 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 1956 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1957 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 1958 }; 1959 1960 struct rpc_remove_error_injection_ctx { 1961 struct spdk_jsonrpc_request *request; 1962 struct rpc_remove_error_injection rpc; 1963 }; 1964 1965 static void 1966 rpc_remove_error_injection_done(struct spdk_io_channel_iter *i, int status) 1967 { 1968 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1969 1970 if (status) { 1971 spdk_jsonrpc_send_error_response(ctx->request, status, 1972 "Failed to remove the error injection."); 1973 } else { 1974 spdk_jsonrpc_send_bool_response(ctx->request, true); 1975 } 1976 1977 free_rpc_remove_error_injection(&ctx->rpc); 1978 free(ctx); 1979 } 1980 1981 static void 1982 rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i) 1983 { 1984 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1985 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1986 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1987 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1988 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1989 1990 if (qpair != NULL) { 1991 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 1992 } 1993 1994 spdk_for_each_channel_continue(i, 0); 1995 } 1996 1997 static void 1998 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 1999 const struct spdk_json_val *params) 2000 { 2001 struct rpc_remove_error_injection_ctx *ctx; 2002 struct nvme_ctrlr *nvme_ctrlr; 2003 2004 ctx = calloc(1, sizeof(*ctx)); 2005 if (!ctx) { 2006 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2007 return; 2008 } 2009 ctx->request = request; 2010 2011 if (spdk_json_decode_object(params, 2012 rpc_remove_error_injection_decoders, 2013 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 2014 &ctx->rpc)) { 2015 spdk_jsonrpc_send_error_response(request, -EINVAL, 2016 "Failed to parse the request"); 2017 goto cleanup; 2018 } 2019 2020 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 2021 if (nvme_ctrlr == NULL) { 2022 SPDK_ERRLOG("No controller with specified name was found.\n"); 2023 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 2024 goto cleanup; 2025 } 2026 2027 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 2028 spdk_for_each_channel(nvme_ctrlr, 2029 rpc_remove_error_injection_per_channel, 2030 ctx, 2031 rpc_remove_error_injection_done); 2032 return; 2033 } else { 2034 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2035 spdk_jsonrpc_send_bool_response(ctx->request, true); 2036 } 2037 2038 cleanup: 2039 free_rpc_remove_error_injection(&ctx->rpc); 2040 free(ctx); 2041 } 2042 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2043 SPDK_RPC_RUNTIME) 2044 2045 struct rpc_get_io_paths { 2046 char *name; 2047 }; 2048 2049 static void 2050 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2051 { 2052 free(r->name); 2053 } 2054 2055 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2056 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2057 }; 2058 2059 struct rpc_get_io_paths_ctx { 2060 struct rpc_get_io_paths req; 2061 struct spdk_jsonrpc_request *request; 2062 struct spdk_json_write_ctx *w; 2063 }; 2064 2065 static void 2066 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2067 { 2068 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2069 2070 spdk_json_write_array_end(ctx->w); 2071 2072 spdk_json_write_object_end(ctx->w); 2073 2074 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2075 2076 free_rpc_get_io_paths(&ctx->req); 2077 free(ctx); 2078 } 2079 2080 static void 2081 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2082 { 2083 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2084 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2085 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2086 struct nvme_qpair *qpair; 2087 struct nvme_io_path *io_path; 2088 struct nvme_bdev *nbdev; 2089 2090 spdk_json_write_object_begin(ctx->w); 2091 2092 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2093 2094 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2095 2096 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2097 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2098 nbdev = io_path->nvme_ns->bdev; 2099 2100 if (ctx->req.name != NULL && 2101 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2102 continue; 2103 } 2104 2105 nvme_io_path_info_json(ctx->w, io_path); 2106 } 2107 } 2108 2109 spdk_json_write_array_end(ctx->w); 2110 2111 spdk_json_write_object_end(ctx->w); 2112 2113 spdk_for_each_channel_continue(i, 0); 2114 } 2115 2116 static void 2117 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2118 const struct spdk_json_val *params) 2119 { 2120 struct rpc_get_io_paths_ctx *ctx; 2121 2122 ctx = calloc(1, sizeof(*ctx)); 2123 if (ctx == NULL) { 2124 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2125 return; 2126 } 2127 2128 if (params != NULL && 2129 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2130 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2131 &ctx->req)) { 2132 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2133 "bdev_nvme_get_io_paths requires no parameters"); 2134 2135 free_rpc_get_io_paths(&ctx->req); 2136 free(ctx); 2137 return; 2138 } 2139 2140 ctx->request = request; 2141 ctx->w = spdk_jsonrpc_begin_result(request); 2142 2143 spdk_json_write_object_begin(ctx->w); 2144 2145 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2146 2147 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2148 _rpc_bdev_nvme_get_io_paths, 2149 ctx, 2150 rpc_bdev_nvme_get_io_paths_done); 2151 } 2152 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2153 2154 struct rpc_bdev_nvme_set_preferred_path { 2155 char *name; 2156 uint16_t cntlid; 2157 }; 2158 2159 static void 2160 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2161 { 2162 free(req->name); 2163 } 2164 2165 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2166 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2167 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2168 }; 2169 2170 struct rpc_bdev_nvme_set_preferred_path_ctx { 2171 struct rpc_bdev_nvme_set_preferred_path req; 2172 struct spdk_jsonrpc_request *request; 2173 }; 2174 2175 static void 2176 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2177 { 2178 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2179 2180 if (rc == 0) { 2181 spdk_jsonrpc_send_bool_response(ctx->request, true); 2182 } else { 2183 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2184 } 2185 2186 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2187 free(ctx); 2188 } 2189 2190 static void 2191 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2192 const struct spdk_json_val *params) 2193 { 2194 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2195 2196 ctx = calloc(1, sizeof(*ctx)); 2197 if (ctx == NULL) { 2198 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2199 return; 2200 } 2201 2202 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2203 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2204 &ctx->req)) { 2205 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2206 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2207 "spdk_json_decode_object failed"); 2208 goto cleanup; 2209 } 2210 2211 ctx->request = request; 2212 2213 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2214 rpc_bdev_nvme_set_preferred_path_done, ctx); 2215 return; 2216 2217 cleanup: 2218 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2219 free(ctx); 2220 } 2221 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2222 SPDK_RPC_RUNTIME) 2223 2224 struct rpc_set_multipath_policy { 2225 char *name; 2226 enum bdev_nvme_multipath_policy policy; 2227 }; 2228 2229 static void 2230 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2231 { 2232 free(req->name); 2233 } 2234 2235 static int 2236 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2237 { 2238 enum bdev_nvme_multipath_policy *policy = out; 2239 2240 if (spdk_json_strequal(val, "active_passive") == true) { 2241 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2242 } else if (spdk_json_strequal(val, "active_active") == true) { 2243 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2244 } else { 2245 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2246 return -EINVAL; 2247 } 2248 2249 return 0; 2250 } 2251 2252 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2253 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2254 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2255 }; 2256 2257 struct rpc_set_multipath_policy_ctx { 2258 struct rpc_set_multipath_policy req; 2259 struct spdk_jsonrpc_request *request; 2260 }; 2261 2262 static void 2263 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2264 { 2265 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2266 2267 if (rc == 0) { 2268 spdk_jsonrpc_send_bool_response(ctx->request, true); 2269 } else { 2270 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2271 } 2272 2273 free_rpc_set_multipath_policy(&ctx->req); 2274 free(ctx); 2275 } 2276 2277 static void 2278 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2279 const struct spdk_json_val *params) 2280 { 2281 struct rpc_set_multipath_policy_ctx *ctx; 2282 2283 ctx = calloc(1, sizeof(*ctx)); 2284 if (ctx == NULL) { 2285 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2286 return; 2287 } 2288 2289 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2290 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2291 &ctx->req)) { 2292 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2293 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2294 "spdk_json_decode_object failed"); 2295 goto cleanup; 2296 } 2297 2298 ctx->request = request; 2299 2300 bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, 2301 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2302 return; 2303 2304 cleanup: 2305 free_rpc_set_multipath_policy(&ctx->req); 2306 free(ctx); 2307 } 2308 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2309 SPDK_RPC_RUNTIME) 2310