1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "bdev_nvme.h" 38 39 #include "spdk/config.h" 40 41 #include "spdk/string.h" 42 #include "spdk/rpc.h" 43 #include "spdk/util.h" 44 #include "spdk/env.h" 45 #include "spdk/nvme.h" 46 #include "spdk/nvme_spec.h" 47 48 #include "spdk/log.h" 49 #include "spdk/bdev_module.h" 50 51 struct open_descriptors { 52 void *desc; 53 struct spdk_bdev *bdev; 54 TAILQ_ENTRY(open_descriptors) tqlst; 55 struct spdk_thread *thread; 56 }; 57 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t; 58 59 static int 60 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 61 { 62 enum spdk_bdev_timeout_action *action = out; 63 64 if (spdk_json_strequal(val, "none") == true) { 65 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 66 } else if (spdk_json_strequal(val, "abort") == true) { 67 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 68 } else if (spdk_json_strequal(val, "reset") == true) { 69 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 70 } else { 71 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 72 return -EINVAL; 73 } 74 75 return 0; 76 } 77 78 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 79 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 80 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 81 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 82 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 83 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 84 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 85 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 86 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 87 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 88 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 89 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 90 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 91 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 92 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 93 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 94 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 95 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 96 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 97 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 98 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 99 }; 100 101 static void 102 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 103 const struct spdk_json_val *params) 104 { 105 struct spdk_bdev_nvme_opts opts; 106 int rc; 107 108 bdev_nvme_get_opts(&opts); 109 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 110 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 111 &opts)) { 112 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 113 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 114 "spdk_json_decode_object failed"); 115 return; 116 } 117 118 rc = bdev_nvme_set_opts(&opts); 119 if (rc) { 120 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 121 return; 122 } 123 124 spdk_jsonrpc_send_bool_response(request, true); 125 126 return; 127 } 128 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 129 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 130 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_options, set_bdev_nvme_options) 131 132 struct rpc_bdev_nvme_hotplug { 133 bool enabled; 134 uint64_t period_us; 135 }; 136 137 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 138 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 139 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 140 }; 141 142 static void 143 rpc_bdev_nvme_set_hotplug_done(void *ctx) 144 { 145 struct spdk_jsonrpc_request *request = ctx; 146 147 spdk_jsonrpc_send_bool_response(request, true); 148 } 149 150 static void 151 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 152 const struct spdk_json_val *params) 153 { 154 struct rpc_bdev_nvme_hotplug req = {false, 0}; 155 int rc; 156 157 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 158 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 159 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 160 rc = -EINVAL; 161 goto invalid; 162 } 163 164 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 165 request); 166 if (rc) { 167 goto invalid; 168 } 169 170 return; 171 invalid: 172 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 173 } 174 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 175 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug) 176 177 struct rpc_bdev_nvme_attach_controller { 178 char *name; 179 char *trtype; 180 char *adrfam; 181 char *traddr; 182 char *trsvcid; 183 char *priority; 184 char *subnqn; 185 char *hostnqn; 186 char *hostaddr; 187 char *hostsvcid; 188 char *multipath; 189 struct nvme_ctrlr_opts bdev_opts; 190 struct spdk_nvme_ctrlr_opts drv_opts; 191 }; 192 193 static void 194 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 195 { 196 free(req->name); 197 free(req->trtype); 198 free(req->adrfam); 199 free(req->traddr); 200 free(req->trsvcid); 201 free(req->priority); 202 free(req->subnqn); 203 free(req->hostnqn); 204 free(req->hostaddr); 205 free(req->hostsvcid); 206 free(req->multipath); 207 } 208 209 static int 210 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 211 { 212 uint32_t *flag = out; 213 bool reftag; 214 int rc; 215 216 rc = spdk_json_decode_bool(val, &reftag); 217 if (rc == 0 && reftag == true) { 218 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 219 } 220 221 return rc; 222 } 223 224 static int 225 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 226 { 227 uint32_t *flag = out; 228 bool guard; 229 int rc; 230 231 rc = spdk_json_decode_bool(val, &guard); 232 if (rc == 0 && guard == true) { 233 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 234 } 235 236 return rc; 237 } 238 239 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 240 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 241 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 242 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 243 244 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 245 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 246 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 247 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 248 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 249 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 250 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 251 252 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 253 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 254 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 255 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 256 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 257 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), spdk_json_decode_string, true}, 258 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 259 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 260 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 261 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 262 }; 263 264 #define NVME_MAX_BDEVS_PER_RPC 128 265 266 struct rpc_bdev_nvme_attach_controller_ctx { 267 struct rpc_bdev_nvme_attach_controller req; 268 uint32_t count; 269 size_t bdev_count; 270 const char *names[NVME_MAX_BDEVS_PER_RPC]; 271 struct spdk_jsonrpc_request *request; 272 }; 273 274 static void 275 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 276 { 277 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 278 struct spdk_jsonrpc_request *request = ctx->request; 279 struct spdk_json_write_ctx *w; 280 size_t i; 281 282 w = spdk_jsonrpc_begin_result(request); 283 spdk_json_write_array_begin(w); 284 for (i = 0; i < ctx->bdev_count; i++) { 285 spdk_json_write_string(w, ctx->names[i]); 286 } 287 spdk_json_write_array_end(w); 288 spdk_jsonrpc_end_result(request, w); 289 290 free_rpc_bdev_nvme_attach_controller(&ctx->req); 291 free(ctx); 292 } 293 294 static void 295 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 296 { 297 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 298 struct spdk_jsonrpc_request *request = ctx->request; 299 300 if (rc < 0) { 301 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 302 free_rpc_bdev_nvme_attach_controller(&ctx->req); 303 free(ctx); 304 return; 305 } 306 307 ctx->bdev_count = bdev_count; 308 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 309 } 310 311 static void 312 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 313 const struct spdk_json_val *params) 314 { 315 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 316 struct spdk_nvme_transport_id trid = {}; 317 const struct spdk_nvme_ctrlr_opts *drv_opts; 318 const struct spdk_nvme_transport_id *ctrlr_trid; 319 struct nvme_ctrlr *ctrlr = NULL; 320 size_t len, maxlen; 321 bool multipath = false; 322 int rc; 323 324 ctx = calloc(1, sizeof(*ctx)); 325 if (!ctx) { 326 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 327 return; 328 } 329 330 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 331 bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 332 333 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 334 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 335 &ctx->req)) { 336 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 337 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 338 "spdk_json_decode_object failed"); 339 goto cleanup; 340 } 341 342 /* Parse trstring */ 343 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 344 if (rc < 0) { 345 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 346 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 347 ctx->req.trtype); 348 goto cleanup; 349 } 350 351 /* Parse trtype */ 352 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 353 assert(rc == 0); 354 355 /* Parse traddr */ 356 maxlen = sizeof(trid.traddr); 357 len = strnlen(ctx->req.traddr, maxlen); 358 if (len == maxlen) { 359 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 360 ctx->req.traddr); 361 goto cleanup; 362 } 363 memcpy(trid.traddr, ctx->req.traddr, len + 1); 364 365 /* Parse adrfam */ 366 if (ctx->req.adrfam) { 367 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 368 if (rc < 0) { 369 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 370 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 371 ctx->req.adrfam); 372 goto cleanup; 373 } 374 } 375 376 /* Parse trsvcid */ 377 if (ctx->req.trsvcid) { 378 maxlen = sizeof(trid.trsvcid); 379 len = strnlen(ctx->req.trsvcid, maxlen); 380 if (len == maxlen) { 381 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 382 ctx->req.trsvcid); 383 goto cleanup; 384 } 385 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 386 } 387 388 /* Parse priority for the NVMe-oF transport connection */ 389 if (ctx->req.priority) { 390 trid.priority = spdk_strtol(ctx->req.priority, 10); 391 } 392 393 /* Parse subnqn */ 394 if (ctx->req.subnqn) { 395 maxlen = sizeof(trid.subnqn); 396 len = strnlen(ctx->req.subnqn, maxlen); 397 if (len == maxlen) { 398 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 399 ctx->req.subnqn); 400 goto cleanup; 401 } 402 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 403 } 404 405 if (ctx->req.hostnqn) { 406 snprintf(ctx->req.drv_opts.hostnqn, sizeof(ctx->req.drv_opts.hostnqn), "%s", 407 ctx->req.hostnqn); 408 } 409 410 if (ctx->req.hostaddr) { 411 maxlen = sizeof(ctx->req.drv_opts.src_addr); 412 len = strnlen(ctx->req.hostaddr, maxlen); 413 if (len == maxlen) { 414 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 415 ctx->req.hostaddr); 416 goto cleanup; 417 } 418 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 419 } 420 421 if (ctx->req.hostsvcid) { 422 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 423 len = strnlen(ctx->req.hostsvcid, maxlen); 424 if (len == maxlen) { 425 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 426 ctx->req.hostsvcid); 427 goto cleanup; 428 } 429 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 430 } 431 432 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 433 434 if (ctrlr) { 435 if (ctx->req.multipath == NULL) { 436 /* For now, this means add a failover path. This maintains backward compatibility 437 * with past behavior. In the future, this behavior will change to "disable". */ 438 SPDK_ERRLOG("The multipath parameter was not specified to bdev_nvme_attach_controller but " 439 "it was used to add a failover path. This behavior will default to rejecting " 440 "the request in the future. Specify the 'multipath' parameter to control the behavior\n"); 441 ctx->req.multipath = strdup("failover"); 442 if (ctx->req.multipath == NULL) { 443 SPDK_ERRLOG("cannot allocate multipath failover string\n"); 444 goto cleanup; 445 } 446 } 447 448 /* This controller already exists. Check what the user wants to do. */ 449 if (strcasecmp(ctx->req.multipath, "disable") == 0) { 450 /* The user does not want to do any form of multipathing. */ 451 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 452 "A controller named %s already exists and multipath is disabled\n", 453 ctx->req.name); 454 goto cleanup; 455 456 } else if (strcasecmp(ctx->req.multipath, "failover") != 0 && 457 strcasecmp(ctx->req.multipath, "multipath") != 0) { 458 /* Invalid multipath option */ 459 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 460 "Invalid multipath parameter: %s\n", 461 ctx->req.multipath); 462 goto cleanup; 463 } 464 465 /* The user wants to add this as a failover path or add this to create multipath. */ 466 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 467 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 468 469 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 470 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 471 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 472 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 473 /* Exactly same network path can't be added a second time */ 474 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 475 "A controller named %s already exists with the specified network path\n", 476 ctx->req.name); 477 goto cleanup; 478 } 479 480 if (strncmp(trid.subnqn, 481 ctrlr_trid->subnqn, 482 SPDK_NVMF_NQN_MAX_LEN) != 0) { 483 /* Different SUBNQN is not allowed when specifying the same controller name. */ 484 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 485 "A controller named %s already exists, but uses a different subnqn (%s)\n", 486 ctx->req.name, ctrlr_trid->subnqn); 487 goto cleanup; 488 } 489 490 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 491 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 492 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 493 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 494 ctx->req.name, drv_opts->hostnqn); 495 goto cleanup; 496 } 497 498 if (ctx->req.bdev_opts.prchk_flags) { 499 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 500 "A controller named %s already exists. To add a path, do not specify PI options.\n", 501 ctx->req.name); 502 goto cleanup; 503 } 504 505 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 506 } 507 508 if (ctx->req.multipath != NULL && strcasecmp(ctx->req.multipath, "multipath") == 0) { 509 multipath = true; 510 } 511 512 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 513 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 514 "num_io_queues out of bounds, min: %u max: %u\n", 515 1, UINT16_MAX + 1); 516 goto cleanup; 517 } 518 519 ctx->request = request; 520 ctx->count = NVME_MAX_BDEVS_PER_RPC; 521 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 522 ctx->req.bdev_opts.from_discovery_service = false; 523 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, 524 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 525 &ctx->req.bdev_opts, multipath); 526 if (rc) { 527 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 528 goto cleanup; 529 } 530 531 return; 532 533 cleanup: 534 free_rpc_bdev_nvme_attach_controller(&ctx->req); 535 free(ctx); 536 } 537 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 538 SPDK_RPC_RUNTIME) 539 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev) 540 541 static void 542 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 543 { 544 struct spdk_json_write_ctx *w = ctx; 545 struct nvme_ctrlr *nvme_ctrlr; 546 547 spdk_json_write_object_begin(w); 548 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 549 550 spdk_json_write_named_array_begin(w, "ctrlrs"); 551 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 552 nvme_ctrlr_info_json(w, nvme_ctrlr); 553 } 554 spdk_json_write_array_end(w); 555 spdk_json_write_object_end(w); 556 } 557 558 struct rpc_bdev_nvme_get_controllers { 559 char *name; 560 }; 561 562 static void 563 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 564 { 565 free(r->name); 566 } 567 568 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 569 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 570 }; 571 572 static void 573 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 574 const struct spdk_json_val *params) 575 { 576 struct rpc_bdev_nvme_get_controllers req = {}; 577 struct spdk_json_write_ctx *w; 578 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 579 580 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 581 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 582 &req)) { 583 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 584 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 585 "spdk_json_decode_object failed"); 586 goto cleanup; 587 } 588 589 if (req.name) { 590 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 591 if (nbdev_ctrlr == NULL) { 592 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 593 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 594 goto cleanup; 595 } 596 } 597 598 w = spdk_jsonrpc_begin_result(request); 599 spdk_json_write_array_begin(w); 600 601 if (nbdev_ctrlr != NULL) { 602 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 603 } else { 604 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 605 } 606 607 spdk_json_write_array_end(w); 608 609 spdk_jsonrpc_end_result(request, w); 610 611 cleanup: 612 free_rpc_bdev_nvme_get_controllers(&req); 613 } 614 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 615 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_get_controllers, get_nvme_controllers) 616 617 struct rpc_bdev_nvme_detach_controller { 618 char *name; 619 char *trtype; 620 char *adrfam; 621 char *traddr; 622 char *trsvcid; 623 char *subnqn; 624 char *hostaddr; 625 char *hostsvcid; 626 }; 627 628 static void 629 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 630 { 631 free(req->name); 632 free(req->trtype); 633 free(req->adrfam); 634 free(req->traddr); 635 free(req->trsvcid); 636 free(req->subnqn); 637 free(req->hostaddr); 638 free(req->hostsvcid); 639 } 640 641 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 642 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 643 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 644 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 645 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 646 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 647 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 648 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 649 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 650 }; 651 652 static void 653 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 654 const struct spdk_json_val *params) 655 { 656 struct rpc_bdev_nvme_detach_controller req = {NULL}; 657 struct nvme_path_id path = {}; 658 size_t len, maxlen; 659 int rc = 0; 660 661 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 662 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 663 &req)) { 664 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 665 "spdk_json_decode_object failed"); 666 goto cleanup; 667 } 668 669 if (req.trtype != NULL) { 670 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 671 if (rc < 0) { 672 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 673 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 674 req.trtype); 675 goto cleanup; 676 } 677 678 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 679 if (rc < 0) { 680 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 681 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 682 req.trtype); 683 goto cleanup; 684 } 685 } 686 687 if (req.traddr != NULL) { 688 maxlen = sizeof(path.trid.traddr); 689 len = strnlen(req.traddr, maxlen); 690 if (len == maxlen) { 691 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 692 req.traddr); 693 goto cleanup; 694 } 695 memcpy(path.trid.traddr, req.traddr, len + 1); 696 } 697 698 if (req.adrfam != NULL) { 699 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 700 if (rc < 0) { 701 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 702 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 703 req.adrfam); 704 goto cleanup; 705 } 706 } 707 708 if (req.trsvcid != NULL) { 709 maxlen = sizeof(path.trid.trsvcid); 710 len = strnlen(req.trsvcid, maxlen); 711 if (len == maxlen) { 712 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 713 req.trsvcid); 714 goto cleanup; 715 } 716 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 717 } 718 719 /* Parse subnqn */ 720 if (req.subnqn != NULL) { 721 maxlen = sizeof(path.trid.subnqn); 722 len = strnlen(req.subnqn, maxlen); 723 if (len == maxlen) { 724 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 725 req.subnqn); 726 goto cleanup; 727 } 728 memcpy(path.trid.subnqn, req.subnqn, len + 1); 729 } 730 731 if (req.hostaddr) { 732 maxlen = sizeof(path.hostid.hostaddr); 733 len = strnlen(req.hostaddr, maxlen); 734 if (len == maxlen) { 735 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 736 req.hostaddr); 737 goto cleanup; 738 } 739 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 740 } 741 742 if (req.hostsvcid) { 743 maxlen = sizeof(path.hostid.hostsvcid); 744 len = strnlen(req.hostsvcid, maxlen); 745 if (len == maxlen) { 746 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 747 req.hostsvcid); 748 goto cleanup; 749 } 750 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 751 } 752 753 rc = bdev_nvme_delete(req.name, &path); 754 755 if (rc != 0) { 756 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 757 goto cleanup; 758 } 759 760 spdk_jsonrpc_send_bool_response(request, true); 761 762 cleanup: 763 free_rpc_bdev_nvme_detach_controller(&req); 764 } 765 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 766 SPDK_RPC_RUNTIME) 767 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_detach_controller, delete_nvme_controller) 768 769 struct rpc_apply_firmware { 770 char *filename; 771 char *bdev_name; 772 }; 773 774 static void 775 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 776 { 777 free(req->filename); 778 free(req->bdev_name); 779 } 780 781 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 782 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 783 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 784 }; 785 786 struct firmware_update_info { 787 void *fw_image; 788 void *p; 789 unsigned int size; 790 unsigned int size_remaining; 791 unsigned int offset; 792 unsigned int transfer; 793 794 void *desc; 795 struct spdk_io_channel *ch; 796 struct spdk_jsonrpc_request *request; 797 struct spdk_nvme_ctrlr *ctrlr; 798 open_descriptors_t desc_head; 799 struct rpc_apply_firmware *req; 800 }; 801 802 static void 803 _apply_firmware_cleanup(void *ctx) 804 { 805 struct spdk_bdev_desc *desc = ctx; 806 807 spdk_bdev_close(desc); 808 } 809 810 static void 811 apply_firmware_cleanup(void *cb_arg) 812 { 813 struct open_descriptors *opt, *tmp; 814 struct firmware_update_info *firm_ctx = cb_arg; 815 816 if (!firm_ctx) { 817 return; 818 } 819 820 if (firm_ctx->fw_image) { 821 spdk_free(firm_ctx->fw_image); 822 } 823 824 if (firm_ctx->req) { 825 free_rpc_apply_firmware(firm_ctx->req); 826 free(firm_ctx->req); 827 } 828 829 if (firm_ctx->ch) { 830 spdk_put_io_channel(firm_ctx->ch); 831 } 832 833 TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) { 834 TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst); 835 /* Close the underlying bdev on its same opened thread. */ 836 if (opt->thread && opt->thread != spdk_get_thread()) { 837 spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc); 838 } else { 839 spdk_bdev_close(opt->desc); 840 } 841 free(opt); 842 } 843 free(firm_ctx); 844 } 845 846 static void 847 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 848 { 849 struct spdk_json_write_ctx *w; 850 struct firmware_update_info *firm_ctx = cb_arg; 851 852 spdk_bdev_free_io(bdev_io); 853 854 if (!success) { 855 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 856 "firmware commit failed."); 857 apply_firmware_cleanup(firm_ctx); 858 return; 859 } 860 861 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 862 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 863 "Controller reset failed."); 864 apply_firmware_cleanup(firm_ctx); 865 return; 866 } 867 868 w = spdk_jsonrpc_begin_result(firm_ctx->request); 869 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 870 spdk_jsonrpc_end_result(firm_ctx->request, w); 871 apply_firmware_cleanup(firm_ctx); 872 } 873 874 static void 875 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 876 { 877 struct spdk_nvme_cmd cmd = {}; 878 struct spdk_nvme_fw_commit fw_commit; 879 int slot = 0; 880 int rc; 881 struct firmware_update_info *firm_ctx = cb_arg; 882 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 883 884 spdk_bdev_free_io(bdev_io); 885 886 if (!success) { 887 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 888 "firmware download failed ."); 889 apply_firmware_cleanup(firm_ctx); 890 return; 891 } 892 893 firm_ctx->p += firm_ctx->transfer; 894 firm_ctx->offset += firm_ctx->transfer; 895 firm_ctx->size_remaining -= firm_ctx->transfer; 896 897 switch (firm_ctx->size_remaining) { 898 case 0: 899 /* firmware download completed. Commit firmware */ 900 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 901 fw_commit.fs = slot; 902 fw_commit.ca = commit_action; 903 904 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 905 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 906 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 907 apply_firmware_complete_reset, firm_ctx); 908 if (rc) { 909 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 910 "firmware commit failed."); 911 apply_firmware_cleanup(firm_ctx); 912 return; 913 } 914 break; 915 default: 916 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 917 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 918 919 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 920 cmd.cdw11 = firm_ctx->offset >> 2; 921 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 922 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 923 if (rc) { 924 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 925 "firmware download failed."); 926 apply_firmware_cleanup(firm_ctx); 927 return; 928 } 929 break; 930 } 931 } 932 933 static void 934 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 935 { 936 } 937 938 static void 939 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 940 const struct spdk_json_val *params) 941 { 942 int rc; 943 int fd = -1; 944 struct stat fw_stat; 945 struct spdk_nvme_ctrlr *ctrlr; 946 char msg[1024]; 947 struct spdk_bdev *bdev; 948 struct spdk_bdev *bdev2; 949 struct open_descriptors *opt; 950 struct spdk_bdev_desc *desc; 951 struct spdk_nvme_cmd *cmd; 952 struct firmware_update_info *firm_ctx; 953 954 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 955 if (!firm_ctx) { 956 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 957 "Memory allocation error."); 958 return; 959 } 960 firm_ctx->fw_image = NULL; 961 TAILQ_INIT(&firm_ctx->desc_head); 962 firm_ctx->request = request; 963 964 firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware)); 965 if (!firm_ctx->req) { 966 snprintf(msg, sizeof(msg), "Memory allocation error."); 967 goto err; 968 } 969 970 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 971 SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) { 972 snprintf(msg, sizeof(msg), "spdk_json_decode_object failed."); 973 goto err; 974 } 975 976 if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) { 977 snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name); 978 goto err; 979 } 980 981 if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 982 snprintf(msg, sizeof(msg), "Controller information for %s were not found.", 983 firm_ctx->req->bdev_name); 984 goto err; 985 } 986 firm_ctx->ctrlr = ctrlr; 987 988 for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) { 989 990 if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) { 991 continue; 992 } 993 994 if (!(opt = malloc(sizeof(struct open_descriptors)))) { 995 snprintf(msg, sizeof(msg), "Memory allocation error."); 996 goto err; 997 } 998 999 if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) { 1000 snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name); 1001 free(opt); 1002 goto err; 1003 } 1004 1005 /* Save the thread where the base device is opened */ 1006 opt->thread = spdk_get_thread(); 1007 1008 opt->desc = desc; 1009 opt->bdev = bdev; 1010 TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst); 1011 } 1012 1013 /* 1014 * find a descriptor associated with our bdev 1015 */ 1016 firm_ctx->desc = NULL; 1017 TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) { 1018 if (opt->bdev == bdev) { 1019 firm_ctx->desc = opt->desc; 1020 break; 1021 } 1022 } 1023 1024 if (!firm_ctx->desc) { 1025 snprintf(msg, sizeof(msg), "No descriptor were found."); 1026 goto err; 1027 } 1028 1029 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1030 if (!firm_ctx->ch) { 1031 snprintf(msg, sizeof(msg), "No channels were found."); 1032 goto err; 1033 } 1034 1035 fd = open(firm_ctx->req->filename, O_RDONLY); 1036 if (fd < 0) { 1037 snprintf(msg, sizeof(msg), "open file failed."); 1038 goto err; 1039 } 1040 1041 rc = fstat(fd, &fw_stat); 1042 if (rc < 0) { 1043 close(fd); 1044 snprintf(msg, sizeof(msg), "fstat failed."); 1045 goto err; 1046 } 1047 1048 firm_ctx->size = fw_stat.st_size; 1049 if (fw_stat.st_size % 4) { 1050 close(fd); 1051 snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4."); 1052 goto err; 1053 } 1054 1055 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1056 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1057 if (!firm_ctx->fw_image) { 1058 close(fd); 1059 snprintf(msg, sizeof(msg), "Memory allocation error."); 1060 goto err; 1061 } 1062 firm_ctx->p = firm_ctx->fw_image; 1063 1064 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1065 close(fd); 1066 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1067 goto err; 1068 } 1069 close(fd); 1070 1071 firm_ctx->offset = 0; 1072 firm_ctx->size_remaining = firm_ctx->size; 1073 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1074 1075 cmd = malloc(sizeof(struct spdk_nvme_cmd)); 1076 if (!cmd) { 1077 snprintf(msg, sizeof(msg), "Memory allocation error."); 1078 goto err; 1079 } 1080 memset(cmd, 0, sizeof(struct spdk_nvme_cmd)); 1081 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1082 1083 cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1084 cmd->cdw11 = firm_ctx->offset >> 2; 1085 1086 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p, 1087 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1088 if (rc == 0) { 1089 /* normal return here. */ 1090 return; 1091 } 1092 1093 free(cmd); 1094 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1095 err: 1096 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg); 1097 apply_firmware_cleanup(firm_ctx); 1098 } 1099 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1100 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_apply_firmware, apply_nvme_firmware) 1101 1102 struct rpc_bdev_nvme_transport_stat_ctx { 1103 struct spdk_jsonrpc_request *request; 1104 struct spdk_json_write_ctx *w; 1105 }; 1106 1107 static void 1108 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1109 struct spdk_nvme_transport_poll_group_stat *stat) 1110 { 1111 struct spdk_nvme_rdma_device_stat *device_stats; 1112 uint32_t i; 1113 1114 spdk_json_write_named_array_begin(w, "devices"); 1115 1116 for (i = 0; i < stat->rdma.num_devices; i++) { 1117 device_stats = &stat->rdma.device_stats[i]; 1118 spdk_json_write_object_begin(w); 1119 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1120 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1121 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1122 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1123 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1124 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1125 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1126 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1127 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1128 spdk_json_write_object_end(w); 1129 } 1130 spdk_json_write_array_end(w); 1131 } 1132 1133 static void 1134 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1135 struct spdk_nvme_transport_poll_group_stat *stat) 1136 { 1137 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1138 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1139 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1140 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1141 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1142 stat->pcie.cq_shadow_doorbell_updates); 1143 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1144 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1145 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1146 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1147 stat->pcie.sq_shadow_doorbell_updates); 1148 } 1149 1150 static void 1151 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1152 struct spdk_nvme_transport_poll_group_stat *stat) 1153 { 1154 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1155 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1156 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1157 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1158 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1159 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1160 } 1161 1162 static void 1163 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1164 { 1165 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1166 struct spdk_io_channel *ch; 1167 struct nvme_poll_group *group; 1168 struct spdk_nvme_poll_group_stat *stat; 1169 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1170 uint32_t j; 1171 int rc; 1172 1173 ctx = spdk_io_channel_iter_get_ctx(i); 1174 ch = spdk_io_channel_iter_get_channel(i); 1175 group = spdk_io_channel_get_ctx(ch); 1176 1177 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1178 if (rc) { 1179 spdk_for_each_channel_continue(i, rc); 1180 return; 1181 } 1182 1183 spdk_json_write_object_begin(ctx->w); 1184 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1185 spdk_json_write_named_array_begin(ctx->w, "transports"); 1186 1187 for (j = 0; j < stat->num_transports; j++) { 1188 tr_stat = stat->transport_stat[j]; 1189 spdk_json_write_object_begin(ctx->w); 1190 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1191 1192 switch (stat->transport_stat[j]->trtype) { 1193 case SPDK_NVME_TRANSPORT_RDMA: 1194 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1195 break; 1196 case SPDK_NVME_TRANSPORT_PCIE: 1197 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1198 break; 1199 case SPDK_NVME_TRANSPORT_TCP: 1200 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1201 break; 1202 default: 1203 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1204 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1205 } 1206 spdk_json_write_object_end(ctx->w); 1207 } 1208 /* transports array */ 1209 spdk_json_write_array_end(ctx->w); 1210 spdk_json_write_object_end(ctx->w); 1211 1212 spdk_nvme_poll_group_free_stats(group->group, stat); 1213 spdk_for_each_channel_continue(i, 0); 1214 } 1215 1216 static void 1217 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1218 { 1219 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1220 1221 spdk_json_write_array_end(ctx->w); 1222 spdk_json_write_object_end(ctx->w); 1223 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1224 free(ctx); 1225 } 1226 1227 static void 1228 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1229 const struct spdk_json_val *params) 1230 { 1231 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1232 1233 if (params) { 1234 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1235 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1236 return; 1237 } 1238 1239 ctx = calloc(1, sizeof(*ctx)); 1240 if (!ctx) { 1241 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1242 "Memory allocation error"); 1243 return; 1244 } 1245 ctx->request = request; 1246 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1247 spdk_json_write_object_begin(ctx->w); 1248 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1249 1250 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1251 rpc_bdev_nvme_stats_per_channel, 1252 ctx, 1253 rpc_bdev_nvme_stats_done); 1254 } 1255 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1256 SPDK_RPC_RUNTIME) 1257 1258 struct rpc_bdev_nvme_reset_controller_req { 1259 char *name; 1260 }; 1261 1262 static void 1263 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1264 { 1265 free(r->name); 1266 } 1267 1268 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1269 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1270 }; 1271 1272 struct rpc_bdev_nvme_reset_controller_ctx { 1273 struct spdk_jsonrpc_request *request; 1274 bool success; 1275 struct spdk_thread *orig_thread; 1276 }; 1277 1278 static void 1279 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1280 { 1281 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1282 1283 spdk_jsonrpc_send_bool_response(ctx->request, ctx->success); 1284 1285 free(ctx); 1286 } 1287 1288 static void 1289 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1290 { 1291 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1292 1293 ctx->success = success; 1294 1295 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1296 } 1297 1298 static void 1299 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1300 const struct spdk_json_val *params) 1301 { 1302 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1303 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1304 struct nvme_ctrlr *nvme_ctrlr; 1305 int rc; 1306 1307 ctx = calloc(1, sizeof(*ctx)); 1308 if (ctx == NULL) { 1309 SPDK_ERRLOG("Memory allocation failed\n"); 1310 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1311 "Memory allocation failed"); 1312 return; 1313 } 1314 1315 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1316 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1317 &req)) { 1318 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1319 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1320 goto err; 1321 } 1322 1323 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1324 if (nvme_ctrlr == NULL) { 1325 SPDK_ERRLOG("Failed at device lookup\n"); 1326 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1327 goto err; 1328 } 1329 1330 ctx->request = request; 1331 ctx->orig_thread = spdk_get_thread(); 1332 1333 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1334 if (rc != 0) { 1335 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1336 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1337 goto err; 1338 } 1339 1340 free_rpc_bdev_nvme_reset_controller_req(&req); 1341 return; 1342 1343 err: 1344 free_rpc_bdev_nvme_reset_controller_req(&req); 1345 free(ctx); 1346 } 1347 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1348 1349 struct rpc_get_controller_health_info { 1350 char *name; 1351 }; 1352 1353 struct spdk_nvme_health_info_context { 1354 struct spdk_jsonrpc_request *request; 1355 struct spdk_nvme_ctrlr *ctrlr; 1356 struct spdk_nvme_health_information_page health_page; 1357 }; 1358 1359 static void 1360 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1361 { 1362 free(r->name); 1363 } 1364 1365 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1366 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1367 }; 1368 1369 static void nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1370 { 1371 if (response == true) { 1372 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1373 "Internal error."); 1374 } 1375 1376 free(context); 1377 } 1378 1379 static void 1380 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1381 { 1382 int i; 1383 char buf[128]; 1384 struct spdk_nvme_health_info_context *context = cb_arg; 1385 struct spdk_jsonrpc_request *request = context->request; 1386 struct spdk_json_write_ctx *w; 1387 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1388 const struct spdk_nvme_transport_id *trid = NULL; 1389 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1390 struct spdk_nvme_health_information_page *health_page = NULL; 1391 1392 if (spdk_nvme_cpl_is_error(cpl)) { 1393 nvme_health_info_cleanup(context, true); 1394 SPDK_ERRLOG("get log page failed\n"); 1395 return; 1396 } 1397 1398 if (ctrlr == NULL) { 1399 nvme_health_info_cleanup(context, true); 1400 SPDK_ERRLOG("ctrlr is NULL\n"); 1401 return; 1402 } else { 1403 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1404 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1405 health_page = &(context->health_page); 1406 } 1407 1408 w = spdk_jsonrpc_begin_result(request); 1409 1410 spdk_json_write_object_begin(w); 1411 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1412 spdk_str_trim(buf); 1413 spdk_json_write_named_string(w, "model_number", buf); 1414 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1415 spdk_str_trim(buf); 1416 spdk_json_write_named_string(w, "serial_number", buf); 1417 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1418 spdk_str_trim(buf); 1419 spdk_json_write_named_string(w, "firmware_revision", buf); 1420 spdk_json_write_named_string(w, "traddr", trid->traddr); 1421 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1422 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1423 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1424 health_page->available_spare_threshold); 1425 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1426 spdk_json_write_named_uint128(w, "data_units_read", 1427 health_page->data_units_read[0], health_page->data_units_read[1]); 1428 spdk_json_write_named_uint128(w, "data_units_written", 1429 health_page->data_units_written[0], health_page->data_units_written[1]); 1430 spdk_json_write_named_uint128(w, "host_read_commands", 1431 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1432 spdk_json_write_named_uint128(w, "host_write_commands", 1433 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1434 spdk_json_write_named_uint128(w, "controller_busy_time", 1435 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1436 spdk_json_write_named_uint128(w, "power_cycles", 1437 health_page->power_cycles[0], health_page->power_cycles[1]); 1438 spdk_json_write_named_uint128(w, "power_on_hours", 1439 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1440 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1441 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1442 spdk_json_write_named_uint128(w, "media_errors", 1443 health_page->media_errors[0], health_page->media_errors[1]); 1444 spdk_json_write_named_uint128(w, "num_err_log_entries", 1445 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1446 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1447 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1448 health_page->critical_temp_time); 1449 for (i = 0; i < 8; i++) { 1450 if (health_page->temp_sensor[i] != 0) { 1451 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1452 } 1453 } 1454 spdk_json_write_object_end(w); 1455 1456 spdk_jsonrpc_end_result(request, w); 1457 nvme_health_info_cleanup(context, false); 1458 } 1459 1460 static void 1461 get_health_log_page(struct spdk_nvme_health_info_context *context) 1462 { 1463 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1464 1465 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1466 SPDK_NVME_GLOBAL_NS_TAG, 1467 &(context->health_page), sizeof(context->health_page), 0, 1468 get_health_log_page_completion, context)) { 1469 nvme_health_info_cleanup(context, true); 1470 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1471 } 1472 } 1473 1474 static void 1475 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1476 { 1477 struct spdk_nvme_health_info_context *context = cb_arg; 1478 1479 if (spdk_nvme_cpl_is_error(cpl)) { 1480 nvme_health_info_cleanup(context, true); 1481 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1482 } else { 1483 get_health_log_page(context); 1484 } 1485 } 1486 1487 static int 1488 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1489 { 1490 struct spdk_nvme_cmd cmd = {}; 1491 1492 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1493 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1494 1495 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1496 get_temperature_threshold_feature_completion, context); 1497 } 1498 1499 static void 1500 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1501 { 1502 struct spdk_nvme_health_info_context *context; 1503 1504 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1505 if (!context) { 1506 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1507 "Memory allocation error."); 1508 return; 1509 } 1510 1511 context->request = request; 1512 context->ctrlr = ctrlr; 1513 1514 if (get_temperature_threshold_feature(context)) { 1515 nvme_health_info_cleanup(context, true); 1516 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1517 } 1518 1519 return; 1520 } 1521 1522 static void 1523 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1524 const struct spdk_json_val *params) 1525 { 1526 struct rpc_get_controller_health_info req = {}; 1527 struct nvme_ctrlr *nvme_ctrlr = NULL; 1528 1529 if (!params) { 1530 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1531 "Missing device name"); 1532 1533 return; 1534 } 1535 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1536 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1537 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1538 free_rpc_get_controller_health_info(&req); 1539 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1540 "Invalid parameters"); 1541 1542 return; 1543 } 1544 1545 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1546 1547 if (!nvme_ctrlr) { 1548 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1549 free_rpc_get_controller_health_info(&req); 1550 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1551 "Device not found"); 1552 return; 1553 } 1554 1555 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1556 free_rpc_get_controller_health_info(&req); 1557 1558 return; 1559 } 1560 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1561 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1562 1563 struct rpc_bdev_nvme_start_discovery { 1564 char *name; 1565 char *trtype; 1566 char *adrfam; 1567 char *traddr; 1568 char *trsvcid; 1569 char *hostnqn; 1570 bool wait_for_attach; 1571 struct spdk_nvme_ctrlr_opts opts; 1572 struct nvme_ctrlr_opts bdev_opts; 1573 }; 1574 1575 static void 1576 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1577 { 1578 free(req->name); 1579 free(req->trtype); 1580 free(req->adrfam); 1581 free(req->traddr); 1582 free(req->trsvcid); 1583 free(req->hostnqn); 1584 } 1585 1586 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1587 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1588 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1589 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1590 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1591 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1592 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1593 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1594 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1595 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1596 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1597 }; 1598 1599 struct rpc_bdev_nvme_start_discovery_ctx { 1600 struct rpc_bdev_nvme_start_discovery req; 1601 struct spdk_jsonrpc_request *request; 1602 }; 1603 1604 static void 1605 rpc_bdev_nvme_start_discovery_done(void *ctx) 1606 { 1607 struct spdk_jsonrpc_request *request = ctx; 1608 1609 spdk_jsonrpc_send_bool_response(request, true); 1610 } 1611 1612 static void 1613 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1614 const struct spdk_json_val *params) 1615 { 1616 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1617 struct spdk_nvme_transport_id trid = {}; 1618 size_t len, maxlen; 1619 int rc; 1620 spdk_bdev_nvme_start_discovery_fn cb_fn; 1621 void *cb_ctx; 1622 1623 ctx = calloc(1, sizeof(*ctx)); 1624 if (!ctx) { 1625 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1626 return; 1627 } 1628 1629 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1630 1631 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1632 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1633 &ctx->req)) { 1634 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1635 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1636 "spdk_json_decode_object failed"); 1637 goto cleanup; 1638 } 1639 1640 /* Parse trstring */ 1641 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1642 if (rc < 0) { 1643 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1644 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1645 ctx->req.trtype); 1646 goto cleanup; 1647 } 1648 1649 /* Parse trtype */ 1650 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1651 assert(rc == 0); 1652 1653 /* Parse traddr */ 1654 maxlen = sizeof(trid.traddr); 1655 len = strnlen(ctx->req.traddr, maxlen); 1656 if (len == maxlen) { 1657 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1658 ctx->req.traddr); 1659 goto cleanup; 1660 } 1661 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1662 1663 /* Parse adrfam */ 1664 if (ctx->req.adrfam) { 1665 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1666 if (rc < 0) { 1667 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1668 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1669 ctx->req.adrfam); 1670 goto cleanup; 1671 } 1672 } 1673 1674 /* Parse trsvcid */ 1675 if (ctx->req.trsvcid) { 1676 maxlen = sizeof(trid.trsvcid); 1677 len = strnlen(ctx->req.trsvcid, maxlen); 1678 if (len == maxlen) { 1679 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1680 ctx->req.trsvcid); 1681 goto cleanup; 1682 } 1683 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1684 } 1685 1686 if (ctx->req.hostnqn) { 1687 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1688 ctx->req.hostnqn); 1689 } 1690 1691 ctx->request = request; 1692 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1693 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1694 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1695 cb_fn, cb_ctx); 1696 if (rc) { 1697 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1698 } else if (!ctx->req.wait_for_attach) { 1699 rpc_bdev_nvme_start_discovery_done(request); 1700 } 1701 1702 cleanup: 1703 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1704 free(ctx); 1705 } 1706 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1707 SPDK_RPC_RUNTIME) 1708 1709 struct rpc_bdev_nvme_stop_discovery { 1710 char *name; 1711 }; 1712 1713 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1714 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1715 }; 1716 1717 struct rpc_bdev_nvme_stop_discovery_ctx { 1718 struct rpc_bdev_nvme_stop_discovery req; 1719 struct spdk_jsonrpc_request *request; 1720 }; 1721 1722 static void 1723 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1724 { 1725 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1726 1727 spdk_jsonrpc_send_bool_response(ctx->request, true); 1728 free(ctx->req.name); 1729 free(ctx); 1730 } 1731 1732 static void 1733 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1734 const struct spdk_json_val *params) 1735 { 1736 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1737 int rc; 1738 1739 ctx = calloc(1, sizeof(*ctx)); 1740 if (!ctx) { 1741 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1742 return; 1743 } 1744 1745 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1746 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1747 &ctx->req)) { 1748 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1749 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1750 "spdk_json_decode_object failed"); 1751 goto cleanup; 1752 } 1753 1754 ctx->request = request; 1755 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1756 if (rc) { 1757 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1758 goto cleanup; 1759 } 1760 1761 return; 1762 1763 cleanup: 1764 free(ctx->req.name); 1765 free(ctx); 1766 } 1767 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1768 SPDK_RPC_RUNTIME) 1769 1770 static void 1771 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1772 const struct spdk_json_val *params) 1773 { 1774 struct spdk_json_write_ctx *w; 1775 1776 w = spdk_jsonrpc_begin_result(request); 1777 bdev_nvme_get_discovery_info(w); 1778 spdk_jsonrpc_end_result(request, w); 1779 } 1780 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1781 SPDK_RPC_RUNTIME) 1782 1783 enum error_injection_cmd_type { 1784 NVME_ADMIN_CMD = 1, 1785 NVME_IO_CMD, 1786 }; 1787 1788 struct rpc_add_error_injection { 1789 char *name; 1790 enum error_injection_cmd_type cmd_type; 1791 uint8_t opc; 1792 bool do_not_submit; 1793 uint64_t timeout_in_us; 1794 uint32_t err_count; 1795 uint8_t sct; 1796 uint8_t sc; 1797 }; 1798 1799 static void 1800 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1801 { 1802 free(req->name); 1803 } 1804 1805 static int 1806 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1807 { 1808 int *cmd_type = out; 1809 1810 if (spdk_json_strequal(val, "admin")) { 1811 *cmd_type = NVME_ADMIN_CMD; 1812 } else if (spdk_json_strequal(val, "io")) { 1813 *cmd_type = NVME_IO_CMD; 1814 } else { 1815 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1816 return -EINVAL; 1817 } 1818 1819 return 0; 1820 } 1821 1822 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1823 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1824 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1825 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1826 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1827 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1828 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1829 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1830 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1831 }; 1832 1833 struct rpc_add_error_injection_ctx { 1834 struct spdk_jsonrpc_request *request; 1835 struct rpc_add_error_injection rpc; 1836 }; 1837 1838 static void 1839 rpc_add_error_injection_done(struct spdk_io_channel_iter *i, int status) 1840 { 1841 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1842 1843 if (status) { 1844 spdk_jsonrpc_send_error_response(ctx->request, status, 1845 "Failed to add the error injection."); 1846 } else { 1847 spdk_jsonrpc_send_bool_response(ctx->request, true); 1848 } 1849 1850 free_rpc_add_error_injection(&ctx->rpc); 1851 free(ctx); 1852 } 1853 1854 static void 1855 rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i) 1856 { 1857 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1858 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1859 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1860 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1861 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1862 int rc = 0; 1863 1864 if (qpair != NULL) { 1865 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1866 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1867 ctx->rpc.sct, ctx->rpc.sc); 1868 } 1869 1870 spdk_for_each_channel_continue(i, rc); 1871 } 1872 1873 static void 1874 rpc_bdev_nvme_add_error_injection( 1875 struct spdk_jsonrpc_request *request, 1876 const struct spdk_json_val *params) 1877 { 1878 struct rpc_add_error_injection_ctx *ctx; 1879 struct nvme_ctrlr *nvme_ctrlr; 1880 int rc; 1881 1882 ctx = calloc(1, sizeof(*ctx)); 1883 if (!ctx) { 1884 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1885 return; 1886 } 1887 ctx->rpc.err_count = 1; 1888 ctx->request = request; 1889 1890 if (spdk_json_decode_object(params, 1891 rpc_add_error_injection_decoders, 1892 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1893 &ctx->rpc)) { 1894 spdk_jsonrpc_send_error_response(request, -EINVAL, 1895 "Failed to parse the request"); 1896 goto cleanup; 1897 } 1898 1899 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1900 if (nvme_ctrlr == NULL) { 1901 SPDK_ERRLOG("No controller with specified name was found.\n"); 1902 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1903 goto cleanup; 1904 } 1905 1906 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1907 spdk_for_each_channel(nvme_ctrlr, 1908 rpc_add_error_injection_per_channel, 1909 ctx, 1910 rpc_add_error_injection_done); 1911 1912 return; 1913 } else { 1914 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1915 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1916 ctx->rpc.sct, ctx->rpc.sc); 1917 if (rc) { 1918 spdk_jsonrpc_send_error_response(request, -rc, 1919 "Failed to add the error injection"); 1920 } else { 1921 spdk_jsonrpc_send_bool_response(ctx->request, true); 1922 } 1923 } 1924 1925 cleanup: 1926 free_rpc_add_error_injection(&ctx->rpc); 1927 free(ctx); 1928 } 1929 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 1930 SPDK_RPC_RUNTIME) 1931 1932 struct rpc_remove_error_injection { 1933 char *name; 1934 enum error_injection_cmd_type cmd_type; 1935 uint8_t opc; 1936 }; 1937 1938 static void 1939 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 1940 { 1941 free(req->name); 1942 } 1943 1944 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 1945 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 1946 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1947 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 1948 }; 1949 1950 struct rpc_remove_error_injection_ctx { 1951 struct spdk_jsonrpc_request *request; 1952 struct rpc_remove_error_injection rpc; 1953 }; 1954 1955 static void 1956 rpc_remove_error_injection_done(struct spdk_io_channel_iter *i, int status) 1957 { 1958 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1959 1960 if (status) { 1961 spdk_jsonrpc_send_error_response(ctx->request, status, 1962 "Failed to remove the error injection."); 1963 } else { 1964 spdk_jsonrpc_send_bool_response(ctx->request, true); 1965 } 1966 1967 free_rpc_remove_error_injection(&ctx->rpc); 1968 free(ctx); 1969 } 1970 1971 static void 1972 rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i) 1973 { 1974 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1975 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1976 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1977 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1978 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1979 1980 if (qpair != NULL) { 1981 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 1982 } 1983 1984 spdk_for_each_channel_continue(i, 0); 1985 } 1986 1987 static void 1988 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 1989 const struct spdk_json_val *params) 1990 { 1991 struct rpc_remove_error_injection_ctx *ctx; 1992 struct nvme_ctrlr *nvme_ctrlr; 1993 1994 ctx = calloc(1, sizeof(*ctx)); 1995 if (!ctx) { 1996 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1997 return; 1998 } 1999 ctx->request = request; 2000 2001 if (spdk_json_decode_object(params, 2002 rpc_remove_error_injection_decoders, 2003 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 2004 &ctx->rpc)) { 2005 spdk_jsonrpc_send_error_response(request, -EINVAL, 2006 "Failed to parse the request"); 2007 goto cleanup; 2008 } 2009 2010 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 2011 if (nvme_ctrlr == NULL) { 2012 SPDK_ERRLOG("No controller with specified name was found.\n"); 2013 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 2014 goto cleanup; 2015 } 2016 2017 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 2018 spdk_for_each_channel(nvme_ctrlr, 2019 rpc_remove_error_injection_per_channel, 2020 ctx, 2021 rpc_remove_error_injection_done); 2022 return; 2023 } else { 2024 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2025 spdk_jsonrpc_send_bool_response(ctx->request, true); 2026 } 2027 2028 cleanup: 2029 free_rpc_remove_error_injection(&ctx->rpc); 2030 free(ctx); 2031 } 2032 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2033 SPDK_RPC_RUNTIME) 2034 2035 struct rpc_get_io_paths { 2036 char *name; 2037 }; 2038 2039 static void 2040 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2041 { 2042 free(r->name); 2043 } 2044 2045 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2046 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2047 }; 2048 2049 struct rpc_get_io_paths_ctx { 2050 struct rpc_get_io_paths req; 2051 struct spdk_jsonrpc_request *request; 2052 struct spdk_json_write_ctx *w; 2053 }; 2054 2055 static void 2056 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2057 { 2058 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2059 2060 spdk_json_write_array_end(ctx->w); 2061 2062 spdk_json_write_object_end(ctx->w); 2063 2064 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2065 2066 free_rpc_get_io_paths(&ctx->req); 2067 free(ctx); 2068 } 2069 2070 static void 2071 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2072 { 2073 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2074 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2075 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2076 struct nvme_qpair *qpair; 2077 struct nvme_io_path *io_path; 2078 struct nvme_bdev *nbdev; 2079 2080 spdk_json_write_object_begin(ctx->w); 2081 2082 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2083 2084 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2085 2086 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2087 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2088 nbdev = io_path->nvme_ns->bdev; 2089 2090 if (ctx->req.name != NULL && 2091 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2092 continue; 2093 } 2094 2095 nvme_io_path_info_json(ctx->w, io_path); 2096 } 2097 } 2098 2099 spdk_json_write_array_end(ctx->w); 2100 2101 spdk_json_write_object_end(ctx->w); 2102 2103 spdk_for_each_channel_continue(i, 0); 2104 } 2105 2106 static void 2107 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2108 const struct spdk_json_val *params) 2109 { 2110 struct rpc_get_io_paths_ctx *ctx; 2111 2112 ctx = calloc(1, sizeof(*ctx)); 2113 if (ctx == NULL) { 2114 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2115 return; 2116 } 2117 2118 if (params != NULL && 2119 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2120 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2121 &ctx->req)) { 2122 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2123 "bdev_nvme_get_io_paths requires no parameters"); 2124 2125 free_rpc_get_io_paths(&ctx->req); 2126 free(ctx); 2127 return; 2128 } 2129 2130 ctx->request = request; 2131 ctx->w = spdk_jsonrpc_begin_result(request); 2132 2133 spdk_json_write_object_begin(ctx->w); 2134 2135 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2136 2137 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2138 _rpc_bdev_nvme_get_io_paths, 2139 ctx, 2140 rpc_bdev_nvme_get_io_paths_done); 2141 } 2142 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2143 2144 struct rpc_bdev_nvme_set_preferred_path { 2145 char *name; 2146 uint16_t cntlid; 2147 }; 2148 2149 static void 2150 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2151 { 2152 free(req->name); 2153 } 2154 2155 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2156 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2157 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2158 }; 2159 2160 struct rpc_bdev_nvme_set_preferred_path_ctx { 2161 struct rpc_bdev_nvme_set_preferred_path req; 2162 struct spdk_jsonrpc_request *request; 2163 }; 2164 2165 static void 2166 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2167 { 2168 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2169 2170 if (rc == 0) { 2171 spdk_jsonrpc_send_bool_response(ctx->request, true); 2172 } else { 2173 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2174 } 2175 2176 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2177 free(ctx); 2178 } 2179 2180 static void 2181 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2182 const struct spdk_json_val *params) 2183 { 2184 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2185 2186 ctx = calloc(1, sizeof(*ctx)); 2187 if (ctx == NULL) { 2188 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2189 return; 2190 } 2191 2192 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2193 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2194 &ctx->req)) { 2195 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2196 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2197 "spdk_json_decode_object failed"); 2198 goto cleanup; 2199 } 2200 2201 ctx->request = request; 2202 2203 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2204 rpc_bdev_nvme_set_preferred_path_done, ctx); 2205 return; 2206 2207 cleanup: 2208 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2209 free(ctx); 2210 } 2211 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2212 SPDK_RPC_RUNTIME) 2213 2214 struct rpc_set_multipath_policy { 2215 char *name; 2216 enum bdev_nvme_multipath_policy policy; 2217 }; 2218 2219 static void 2220 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2221 { 2222 free(req->name); 2223 } 2224 2225 static int 2226 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2227 { 2228 enum bdev_nvme_multipath_policy *policy = out; 2229 2230 if (spdk_json_strequal(val, "active_passive") == true) { 2231 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2232 } else if (spdk_json_strequal(val, "active_active") == true) { 2233 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2234 } else { 2235 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2236 return -EINVAL; 2237 } 2238 2239 return 0; 2240 } 2241 2242 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2243 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2244 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2245 }; 2246 2247 struct rpc_set_multipath_policy_ctx { 2248 struct rpc_set_multipath_policy req; 2249 struct spdk_jsonrpc_request *request; 2250 }; 2251 2252 static void 2253 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2254 { 2255 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2256 2257 if (rc == 0) { 2258 spdk_jsonrpc_send_bool_response(ctx->request, true); 2259 } else { 2260 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2261 } 2262 2263 free_rpc_set_multipath_policy(&ctx->req); 2264 free(ctx); 2265 } 2266 2267 static void 2268 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2269 const struct spdk_json_val *params) 2270 { 2271 struct rpc_set_multipath_policy_ctx *ctx; 2272 2273 ctx = calloc(1, sizeof(*ctx)); 2274 if (ctx == NULL) { 2275 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2276 return; 2277 } 2278 2279 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2280 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2281 &ctx->req)) { 2282 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2283 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2284 "spdk_json_decode_object failed"); 2285 goto cleanup; 2286 } 2287 2288 ctx->request = request; 2289 2290 bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, 2291 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2292 return; 2293 2294 cleanup: 2295 free_rpc_set_multipath_policy(&ctx->req); 2296 free(ctx); 2297 } 2298 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2299 SPDK_RPC_RUNTIME) 2300