1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 * Copyright (c) 2022 Dell Inc, or its subsidiaries. All rights reserved. 6 */ 7 8 #include "spdk/stdinc.h" 9 10 #include "bdev_nvme.h" 11 12 #include "spdk/config.h" 13 14 #include "spdk/string.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/env.h" 18 #include "spdk/nvme.h" 19 #include "spdk/nvme_spec.h" 20 21 #include "spdk/log.h" 22 #include "spdk/bdev_module.h" 23 24 #define TCP_PSK_INVALID_PERMISSIONS 0177 25 26 static bool g_tls_log = false; 27 28 static int 29 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 30 { 31 enum spdk_bdev_timeout_action *action = out; 32 33 if (spdk_json_strequal(val, "none") == true) { 34 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 35 } else if (spdk_json_strequal(val, "abort") == true) { 36 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 37 } else if (spdk_json_strequal(val, "reset") == true) { 38 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 39 } else { 40 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 41 return -EINVAL; 42 } 43 44 return 0; 45 } 46 47 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 48 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 49 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 50 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 51 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 52 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 53 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 54 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 55 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 56 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 57 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 58 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 59 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 60 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 61 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 62 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 63 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 64 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 65 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 66 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 67 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 68 {"generate_uuids", offsetof(struct spdk_bdev_nvme_opts, generate_uuids), spdk_json_decode_bool, true}, 69 {"transport_tos", offsetof(struct spdk_bdev_nvme_opts, transport_tos), spdk_json_decode_uint8, true}, 70 {"nvme_error_stat", offsetof(struct spdk_bdev_nvme_opts, nvme_error_stat), spdk_json_decode_bool, true}, 71 {"rdma_srq_size", offsetof(struct spdk_bdev_nvme_opts, rdma_srq_size), spdk_json_decode_uint32, true}, 72 {"io_path_stat", offsetof(struct spdk_bdev_nvme_opts, io_path_stat), spdk_json_decode_bool, true}, 73 {"allow_accel_sequence", offsetof(struct spdk_bdev_nvme_opts, allow_accel_sequence), spdk_json_decode_bool, true}, 74 }; 75 76 static void 77 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 78 const struct spdk_json_val *params) 79 { 80 struct spdk_bdev_nvme_opts opts; 81 int rc; 82 83 bdev_nvme_get_opts(&opts); 84 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 85 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 86 &opts)) { 87 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 88 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 89 "spdk_json_decode_object failed"); 90 return; 91 } 92 93 rc = bdev_nvme_set_opts(&opts); 94 if (rc == -EPERM) { 95 spdk_jsonrpc_send_error_response(request, -EPERM, 96 "RPC not permitted with nvme controllers already attached"); 97 } else if (rc) { 98 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 99 } else { 100 spdk_jsonrpc_send_bool_response(request, true); 101 } 102 103 return; 104 } 105 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 106 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 107 108 struct rpc_bdev_nvme_hotplug { 109 bool enabled; 110 uint64_t period_us; 111 }; 112 113 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 114 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 115 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 116 }; 117 118 static void 119 rpc_bdev_nvme_set_hotplug_done(void *ctx) 120 { 121 struct spdk_jsonrpc_request *request = ctx; 122 123 spdk_jsonrpc_send_bool_response(request, true); 124 } 125 126 static void 127 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 128 const struct spdk_json_val *params) 129 { 130 struct rpc_bdev_nvme_hotplug req = {false, 0}; 131 int rc; 132 133 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 134 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 135 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 136 rc = -EINVAL; 137 goto invalid; 138 } 139 140 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 141 request); 142 if (rc) { 143 goto invalid; 144 } 145 146 return; 147 invalid: 148 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 149 } 150 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 151 152 enum bdev_nvme_multipath_mode { 153 BDEV_NVME_MP_MODE_FAILOVER, 154 BDEV_NVME_MP_MODE_MULTIPATH, 155 BDEV_NVME_MP_MODE_DISABLE, 156 }; 157 158 struct rpc_bdev_nvme_attach_controller { 159 char *name; 160 char *trtype; 161 char *adrfam; 162 char *traddr; 163 char *trsvcid; 164 char *priority; 165 char *subnqn; 166 char *hostnqn; 167 char *hostaddr; 168 char *hostsvcid; 169 char *psk; 170 enum bdev_nvme_multipath_mode multipath; 171 struct nvme_ctrlr_opts bdev_opts; 172 struct spdk_nvme_ctrlr_opts drv_opts; 173 uint32_t max_bdevs; 174 }; 175 176 static void 177 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 178 { 179 free(req->name); 180 free(req->trtype); 181 free(req->adrfam); 182 free(req->traddr); 183 free(req->trsvcid); 184 free(req->priority); 185 free(req->subnqn); 186 free(req->hostnqn); 187 free(req->hostaddr); 188 free(req->hostsvcid); 189 free(req->psk); 190 } 191 192 static int 193 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 194 { 195 uint32_t *flag = out; 196 bool reftag; 197 int rc; 198 199 rc = spdk_json_decode_bool(val, &reftag); 200 if (rc == 0 && reftag == true) { 201 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 202 } 203 204 return rc; 205 } 206 207 static int 208 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 209 { 210 uint32_t *flag = out; 211 bool guard; 212 int rc; 213 214 rc = spdk_json_decode_bool(val, &guard); 215 if (rc == 0 && guard == true) { 216 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 217 } 218 219 return rc; 220 } 221 222 static int 223 bdev_nvme_decode_multipath(const struct spdk_json_val *val, void *out) 224 { 225 enum bdev_nvme_multipath_mode *multipath = out; 226 227 if (spdk_json_strequal(val, "failover") == true) { 228 *multipath = BDEV_NVME_MP_MODE_FAILOVER; 229 } else if (spdk_json_strequal(val, "multipath") == true) { 230 *multipath = BDEV_NVME_MP_MODE_MULTIPATH; 231 } else if (spdk_json_strequal(val, "disable") == true) { 232 *multipath = BDEV_NVME_MP_MODE_DISABLE; 233 } else { 234 SPDK_NOTICELOG("Invalid parameter value: multipath\n"); 235 return -EINVAL; 236 } 237 238 return 0; 239 } 240 241 242 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 243 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 244 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 245 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 246 247 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 248 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 249 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 250 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 251 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 252 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 253 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 254 255 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 256 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 257 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 258 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 259 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 260 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), bdev_nvme_decode_multipath, true}, 261 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 262 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 263 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 264 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 265 {"psk", offsetof(struct rpc_bdev_nvme_attach_controller, psk), spdk_json_decode_string, true}, 266 {"max_bdevs", offsetof(struct rpc_bdev_nvme_attach_controller, max_bdevs), spdk_json_decode_uint32, true}, 267 }; 268 269 #define DEFAULT_MAX_BDEVS_PER_RPC 128 270 271 struct rpc_bdev_nvme_attach_controller_ctx { 272 struct rpc_bdev_nvme_attach_controller req; 273 size_t bdev_count; 274 const char **names; 275 struct spdk_jsonrpc_request *request; 276 }; 277 278 static void 279 free_rpc_bdev_nvme_attach_controller_ctx(struct rpc_bdev_nvme_attach_controller_ctx *ctx) 280 { 281 free_rpc_bdev_nvme_attach_controller(&ctx->req); 282 free(ctx->names); 283 free(ctx); 284 } 285 286 static void 287 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 288 { 289 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 290 struct spdk_jsonrpc_request *request = ctx->request; 291 struct spdk_json_write_ctx *w; 292 size_t i; 293 294 w = spdk_jsonrpc_begin_result(request); 295 spdk_json_write_array_begin(w); 296 for (i = 0; i < ctx->bdev_count; i++) { 297 spdk_json_write_string(w, ctx->names[i]); 298 } 299 spdk_json_write_array_end(w); 300 spdk_jsonrpc_end_result(request, w); 301 302 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 303 } 304 305 static void 306 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 307 { 308 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 309 struct spdk_jsonrpc_request *request = ctx->request; 310 311 if (rc < 0) { 312 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 313 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 314 return; 315 } 316 317 ctx->bdev_count = bdev_count; 318 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 319 } 320 321 static int 322 tcp_load_psk(const char *fname, char *buf, size_t bufsz) 323 { 324 FILE *psk_file; 325 struct stat statbuf; 326 int rc; 327 328 if (stat(fname, &statbuf) != 0) { 329 SPDK_ERRLOG("Could not read permissions for PSK file\n"); 330 return -EACCES; 331 } 332 333 if ((statbuf.st_mode & TCP_PSK_INVALID_PERMISSIONS) != 0) { 334 SPDK_ERRLOG("Incorrect permissions for PSK file\n"); 335 return -EPERM; 336 } 337 if ((size_t)statbuf.st_size >= bufsz) { 338 SPDK_ERRLOG("Invalid PSK: too long\n"); 339 return -EINVAL; 340 } 341 psk_file = fopen(fname, "r"); 342 if (psk_file == NULL) { 343 SPDK_ERRLOG("Could not open PSK file\n"); 344 return -EINVAL; 345 } 346 347 memset(buf, 0, bufsz); 348 rc = fread(buf, 1, statbuf.st_size, psk_file); 349 if (rc != statbuf.st_size) { 350 SPDK_ERRLOG("Failed to read PSK\n"); 351 fclose(psk_file); 352 return -EINVAL; 353 } 354 355 fclose(psk_file); 356 return 0; 357 } 358 359 static void 360 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 361 const struct spdk_json_val *params) 362 { 363 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 364 struct spdk_nvme_transport_id trid = {}; 365 const struct spdk_nvme_ctrlr_opts *drv_opts; 366 const struct spdk_nvme_transport_id *ctrlr_trid; 367 struct nvme_ctrlr *ctrlr = NULL; 368 size_t len, maxlen; 369 bool multipath = false; 370 int rc; 371 372 ctx = calloc(1, sizeof(*ctx)); 373 if (!ctx) { 374 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 375 return; 376 } 377 378 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 379 bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 380 /* For now, initialize the multipath parameter to add a failover path. This maintains backward 381 * compatibility with past behavior. In the future, this behavior will change to "disable". */ 382 ctx->req.multipath = BDEV_NVME_MP_MODE_FAILOVER; 383 ctx->req.max_bdevs = DEFAULT_MAX_BDEVS_PER_RPC; 384 385 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 386 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 387 &ctx->req)) { 388 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 389 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 390 "spdk_json_decode_object failed"); 391 goto cleanup; 392 } 393 394 if (ctx->req.max_bdevs == 0) { 395 spdk_jsonrpc_send_error_response(request, -EINVAL, "max_bdevs cannot be zero"); 396 goto cleanup; 397 } 398 399 ctx->names = calloc(ctx->req.max_bdevs, sizeof(char *)); 400 if (ctx->names == NULL) { 401 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 402 goto cleanup; 403 } 404 405 /* Parse trstring */ 406 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 407 if (rc < 0) { 408 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 409 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 410 ctx->req.trtype); 411 goto cleanup; 412 } 413 414 /* Parse trtype */ 415 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 416 assert(rc == 0); 417 418 /* Parse traddr */ 419 maxlen = sizeof(trid.traddr); 420 len = strnlen(ctx->req.traddr, maxlen); 421 if (len == maxlen) { 422 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 423 ctx->req.traddr); 424 goto cleanup; 425 } 426 memcpy(trid.traddr, ctx->req.traddr, len + 1); 427 428 /* Parse adrfam */ 429 if (ctx->req.adrfam) { 430 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 431 if (rc < 0) { 432 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 433 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 434 ctx->req.adrfam); 435 goto cleanup; 436 } 437 } 438 439 /* Parse trsvcid */ 440 if (ctx->req.trsvcid) { 441 maxlen = sizeof(trid.trsvcid); 442 len = strnlen(ctx->req.trsvcid, maxlen); 443 if (len == maxlen) { 444 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 445 ctx->req.trsvcid); 446 goto cleanup; 447 } 448 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 449 } 450 451 /* Parse priority for the NVMe-oF transport connection */ 452 if (ctx->req.priority) { 453 trid.priority = spdk_strtol(ctx->req.priority, 10); 454 } 455 456 /* Parse subnqn */ 457 if (ctx->req.subnqn) { 458 maxlen = sizeof(trid.subnqn); 459 len = strnlen(ctx->req.subnqn, maxlen); 460 if (len == maxlen) { 461 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 462 ctx->req.subnqn); 463 goto cleanup; 464 } 465 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 466 } 467 468 if (ctx->req.hostnqn) { 469 snprintf(ctx->req.drv_opts.hostnqn, sizeof(ctx->req.drv_opts.hostnqn), "%s", 470 ctx->req.hostnqn); 471 } 472 473 if (ctx->req.psk) { 474 if (!g_tls_log) { 475 SPDK_NOTICELOG("TLS support is considered experimental\n"); 476 g_tls_log = true; 477 } 478 rc = tcp_load_psk(ctx->req.psk, ctx->req.drv_opts.psk, sizeof(ctx->req.drv_opts.psk)); 479 if (rc) { 480 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Could not retrieve PSK from file: %s", 481 ctx->req.psk); 482 goto cleanup; 483 } 484 rc = snprintf(ctx->req.bdev_opts.psk_path, sizeof(ctx->req.bdev_opts.psk_path), "%s", ctx->req.psk); 485 if (rc < 0 || (size_t)rc >= sizeof(ctx->req.bdev_opts.psk_path)) { 486 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Could not store PSK path: %s", 487 ctx->req.psk); 488 goto cleanup; 489 } 490 } 491 492 if (ctx->req.hostaddr) { 493 maxlen = sizeof(ctx->req.drv_opts.src_addr); 494 len = strnlen(ctx->req.hostaddr, maxlen); 495 if (len == maxlen) { 496 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 497 ctx->req.hostaddr); 498 goto cleanup; 499 } 500 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 501 } 502 503 if (ctx->req.hostsvcid) { 504 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 505 len = strnlen(ctx->req.hostsvcid, maxlen); 506 if (len == maxlen) { 507 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 508 ctx->req.hostsvcid); 509 goto cleanup; 510 } 511 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 512 } 513 514 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 515 516 if (ctrlr) { 517 /* This controller already exists. Check what the user wants to do. */ 518 if (ctx->req.multipath == BDEV_NVME_MP_MODE_DISABLE) { 519 /* The user does not want to do any form of multipathing. */ 520 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 521 "A controller named %s already exists and multipath is disabled\n", 522 ctx->req.name); 523 goto cleanup; 524 } 525 526 assert(ctx->req.multipath == BDEV_NVME_MP_MODE_FAILOVER || 527 ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH); 528 529 /* The user wants to add this as a failover path or add this to create multipath. */ 530 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 531 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 532 533 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 534 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 535 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 536 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 537 /* Exactly same network path can't be added a second time */ 538 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 539 "A controller named %s already exists with the specified network path\n", 540 ctx->req.name); 541 goto cleanup; 542 } 543 544 if (strncmp(trid.subnqn, 545 ctrlr_trid->subnqn, 546 SPDK_NVMF_NQN_MAX_LEN) != 0) { 547 /* Different SUBNQN is not allowed when specifying the same controller name. */ 548 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 549 "A controller named %s already exists, but uses a different subnqn (%s)\n", 550 ctx->req.name, ctrlr_trid->subnqn); 551 goto cleanup; 552 } 553 554 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 555 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 556 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 557 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 558 ctx->req.name, drv_opts->hostnqn); 559 goto cleanup; 560 } 561 562 if (ctx->req.bdev_opts.prchk_flags) { 563 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 564 "A controller named %s already exists. To add a path, do not specify PI options.\n", 565 ctx->req.name); 566 goto cleanup; 567 } 568 569 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 570 } 571 572 if (ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH) { 573 multipath = true; 574 } 575 576 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 577 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 578 "num_io_queues out of bounds, min: %u max: %u\n", 579 1, UINT16_MAX + 1); 580 goto cleanup; 581 } 582 583 ctx->request = request; 584 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 585 ctx->req.bdev_opts.from_discovery_service = false; 586 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->req.max_bdevs, 587 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 588 &ctx->req.bdev_opts, multipath); 589 if (rc) { 590 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 591 goto cleanup; 592 } 593 594 return; 595 596 cleanup: 597 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 598 } 599 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 600 SPDK_RPC_RUNTIME) 601 602 static void 603 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 604 { 605 struct spdk_json_write_ctx *w = ctx; 606 struct nvme_ctrlr *nvme_ctrlr; 607 608 spdk_json_write_object_begin(w); 609 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 610 611 spdk_json_write_named_array_begin(w, "ctrlrs"); 612 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 613 nvme_ctrlr_info_json(w, nvme_ctrlr); 614 } 615 spdk_json_write_array_end(w); 616 spdk_json_write_object_end(w); 617 } 618 619 struct rpc_bdev_nvme_get_controllers { 620 char *name; 621 }; 622 623 static void 624 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 625 { 626 free(r->name); 627 } 628 629 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 630 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 631 }; 632 633 static void 634 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 635 const struct spdk_json_val *params) 636 { 637 struct rpc_bdev_nvme_get_controllers req = {}; 638 struct spdk_json_write_ctx *w; 639 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 640 641 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 642 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 643 &req)) { 644 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 645 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 646 "spdk_json_decode_object failed"); 647 goto cleanup; 648 } 649 650 if (req.name) { 651 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 652 if (nbdev_ctrlr == NULL) { 653 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 654 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 655 goto cleanup; 656 } 657 } 658 659 w = spdk_jsonrpc_begin_result(request); 660 spdk_json_write_array_begin(w); 661 662 if (nbdev_ctrlr != NULL) { 663 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 664 } else { 665 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 666 } 667 668 spdk_json_write_array_end(w); 669 670 spdk_jsonrpc_end_result(request, w); 671 672 cleanup: 673 free_rpc_bdev_nvme_get_controllers(&req); 674 } 675 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 676 677 struct rpc_bdev_nvme_detach_controller { 678 char *name; 679 char *trtype; 680 char *adrfam; 681 char *traddr; 682 char *trsvcid; 683 char *subnqn; 684 char *hostaddr; 685 char *hostsvcid; 686 }; 687 688 static void 689 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 690 { 691 free(req->name); 692 free(req->trtype); 693 free(req->adrfam); 694 free(req->traddr); 695 free(req->trsvcid); 696 free(req->subnqn); 697 free(req->hostaddr); 698 free(req->hostsvcid); 699 } 700 701 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 702 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 703 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 704 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 705 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 706 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 707 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 708 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 709 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 710 }; 711 712 static void 713 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 714 const struct spdk_json_val *params) 715 { 716 struct rpc_bdev_nvme_detach_controller req = {NULL}; 717 struct nvme_path_id path = {}; 718 size_t len, maxlen; 719 int rc = 0; 720 721 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 722 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 723 &req)) { 724 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 725 "spdk_json_decode_object failed"); 726 goto cleanup; 727 } 728 729 if (req.trtype != NULL) { 730 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 731 if (rc < 0) { 732 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 733 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 734 req.trtype); 735 goto cleanup; 736 } 737 738 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 739 if (rc < 0) { 740 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 741 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 742 req.trtype); 743 goto cleanup; 744 } 745 } 746 747 if (req.traddr != NULL) { 748 maxlen = sizeof(path.trid.traddr); 749 len = strnlen(req.traddr, maxlen); 750 if (len == maxlen) { 751 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 752 req.traddr); 753 goto cleanup; 754 } 755 memcpy(path.trid.traddr, req.traddr, len + 1); 756 } 757 758 if (req.adrfam != NULL) { 759 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 760 if (rc < 0) { 761 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 762 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 763 req.adrfam); 764 goto cleanup; 765 } 766 } 767 768 if (req.trsvcid != NULL) { 769 maxlen = sizeof(path.trid.trsvcid); 770 len = strnlen(req.trsvcid, maxlen); 771 if (len == maxlen) { 772 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 773 req.trsvcid); 774 goto cleanup; 775 } 776 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 777 } 778 779 /* Parse subnqn */ 780 if (req.subnqn != NULL) { 781 maxlen = sizeof(path.trid.subnqn); 782 len = strnlen(req.subnqn, maxlen); 783 if (len == maxlen) { 784 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 785 req.subnqn); 786 goto cleanup; 787 } 788 memcpy(path.trid.subnqn, req.subnqn, len + 1); 789 } 790 791 if (req.hostaddr) { 792 maxlen = sizeof(path.hostid.hostaddr); 793 len = strnlen(req.hostaddr, maxlen); 794 if (len == maxlen) { 795 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 796 req.hostaddr); 797 goto cleanup; 798 } 799 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 800 } 801 802 if (req.hostsvcid) { 803 maxlen = sizeof(path.hostid.hostsvcid); 804 len = strnlen(req.hostsvcid, maxlen); 805 if (len == maxlen) { 806 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 807 req.hostsvcid); 808 goto cleanup; 809 } 810 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 811 } 812 813 rc = bdev_nvme_delete(req.name, &path); 814 815 if (rc != 0) { 816 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 817 goto cleanup; 818 } 819 820 spdk_jsonrpc_send_bool_response(request, true); 821 822 cleanup: 823 free_rpc_bdev_nvme_detach_controller(&req); 824 } 825 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 826 SPDK_RPC_RUNTIME) 827 828 struct rpc_apply_firmware { 829 char *filename; 830 char *bdev_name; 831 }; 832 833 static void 834 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 835 { 836 free(req->filename); 837 free(req->bdev_name); 838 } 839 840 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 841 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 842 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 843 }; 844 845 struct firmware_update_info { 846 void *fw_image; 847 void *p; 848 unsigned int size; 849 unsigned int size_remaining; 850 unsigned int offset; 851 unsigned int transfer; 852 bool success; 853 854 struct spdk_bdev_desc *desc; 855 struct spdk_io_channel *ch; 856 struct spdk_thread *orig_thread; 857 struct spdk_jsonrpc_request *request; 858 struct spdk_nvme_ctrlr *ctrlr; 859 struct rpc_apply_firmware req; 860 }; 861 862 static void 863 apply_firmware_cleanup(struct firmware_update_info *firm_ctx) 864 { 865 assert(firm_ctx != NULL); 866 assert(firm_ctx->orig_thread == spdk_get_thread()); 867 868 if (firm_ctx->fw_image) { 869 spdk_free(firm_ctx->fw_image); 870 } 871 872 free_rpc_apply_firmware(&firm_ctx->req); 873 874 if (firm_ctx->ch) { 875 spdk_put_io_channel(firm_ctx->ch); 876 } 877 878 if (firm_ctx->desc) { 879 spdk_bdev_close(firm_ctx->desc); 880 } 881 882 free(firm_ctx); 883 } 884 885 static void 886 _apply_firmware_complete_reset(void *ctx) 887 { 888 struct spdk_json_write_ctx *w; 889 struct firmware_update_info *firm_ctx = ctx; 890 891 assert(firm_ctx->orig_thread == spdk_get_thread()); 892 893 if (!firm_ctx->success) { 894 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 895 "firmware commit failed."); 896 apply_firmware_cleanup(firm_ctx); 897 return; 898 } 899 900 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 901 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 902 "Controller reset failed."); 903 apply_firmware_cleanup(firm_ctx); 904 return; 905 } 906 907 w = spdk_jsonrpc_begin_result(firm_ctx->request); 908 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 909 spdk_jsonrpc_end_result(firm_ctx->request, w); 910 apply_firmware_cleanup(firm_ctx); 911 } 912 913 static void 914 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 915 { 916 struct firmware_update_info *firm_ctx = cb_arg; 917 918 spdk_bdev_free_io(bdev_io); 919 920 firm_ctx->success = success; 921 922 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete_reset, firm_ctx); 923 } 924 925 static void apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 926 927 static void 928 _apply_firmware_complete(void *ctx) 929 { 930 struct spdk_nvme_cmd cmd = {}; 931 struct spdk_nvme_fw_commit fw_commit; 932 int slot = 0; 933 int rc; 934 struct firmware_update_info *firm_ctx = ctx; 935 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 936 937 assert(firm_ctx->orig_thread == spdk_get_thread()); 938 939 if (!firm_ctx->success) { 940 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 941 "firmware download failed ."); 942 apply_firmware_cleanup(firm_ctx); 943 return; 944 } 945 946 firm_ctx->p += firm_ctx->transfer; 947 firm_ctx->offset += firm_ctx->transfer; 948 firm_ctx->size_remaining -= firm_ctx->transfer; 949 950 switch (firm_ctx->size_remaining) { 951 case 0: 952 /* firmware download completed. Commit firmware */ 953 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 954 fw_commit.fs = slot; 955 fw_commit.ca = commit_action; 956 957 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 958 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 959 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 960 apply_firmware_complete_reset, firm_ctx); 961 if (rc) { 962 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 963 "firmware commit failed."); 964 apply_firmware_cleanup(firm_ctx); 965 return; 966 } 967 break; 968 default: 969 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 970 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 971 972 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 973 cmd.cdw11 = firm_ctx->offset >> 2; 974 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 975 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 976 if (rc) { 977 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 978 "firmware download failed."); 979 apply_firmware_cleanup(firm_ctx); 980 return; 981 } 982 break; 983 } 984 } 985 986 static void 987 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 988 { 989 struct firmware_update_info *firm_ctx = cb_arg; 990 991 spdk_bdev_free_io(bdev_io); 992 993 firm_ctx->success = success; 994 995 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete, firm_ctx); 996 } 997 998 static void 999 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 1000 { 1001 } 1002 1003 static void 1004 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 1005 const struct spdk_json_val *params) 1006 { 1007 int rc; 1008 int fd = -1; 1009 struct stat fw_stat; 1010 struct spdk_bdev *bdev; 1011 struct spdk_nvme_cmd cmd = {}; 1012 struct firmware_update_info *firm_ctx; 1013 1014 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 1015 if (!firm_ctx) { 1016 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1017 "Memory allocation error."); 1018 return; 1019 } 1020 firm_ctx->fw_image = NULL; 1021 firm_ctx->request = request; 1022 firm_ctx->orig_thread = spdk_get_thread(); 1023 1024 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 1025 SPDK_COUNTOF(rpc_apply_firmware_decoders), &firm_ctx->req)) { 1026 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1027 "spdk_json_decode_object failed."); 1028 goto err; 1029 } 1030 1031 if (spdk_bdev_open_ext(firm_ctx->req.bdev_name, true, apply_firmware_open_cb, NULL, 1032 &firm_ctx->desc) != 0) { 1033 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1034 "bdev %s could not be opened", 1035 firm_ctx->req.bdev_name); 1036 goto err; 1037 } 1038 bdev = spdk_bdev_desc_get_bdev(firm_ctx->desc); 1039 1040 if ((firm_ctx->ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 1041 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1042 "Controller information for %s were not found.", 1043 firm_ctx->req.bdev_name); 1044 goto err; 1045 } 1046 1047 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1048 if (!firm_ctx->ch) { 1049 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1050 "No channels were found."); 1051 goto err; 1052 } 1053 1054 fd = open(firm_ctx->req.filename, O_RDONLY); 1055 if (fd < 0) { 1056 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1057 "open file failed."); 1058 goto err; 1059 } 1060 1061 rc = fstat(fd, &fw_stat); 1062 if (rc < 0) { 1063 close(fd); 1064 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1065 "fstat failed."); 1066 goto err; 1067 } 1068 1069 firm_ctx->size = fw_stat.st_size; 1070 if (fw_stat.st_size % 4) { 1071 close(fd); 1072 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1073 "Firmware image size is not multiple of 4."); 1074 goto err; 1075 } 1076 1077 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1078 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1079 if (!firm_ctx->fw_image) { 1080 close(fd); 1081 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1082 "Memory allocation error."); 1083 goto err; 1084 } 1085 firm_ctx->p = firm_ctx->fw_image; 1086 1087 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1088 close(fd); 1089 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1090 "Read firmware image failed!"); 1091 goto err; 1092 } 1093 close(fd); 1094 1095 firm_ctx->offset = 0; 1096 firm_ctx->size_remaining = firm_ctx->size; 1097 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1098 1099 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1100 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1101 cmd.cdw11 = firm_ctx->offset >> 2; 1102 1103 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 1104 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1105 if (rc == 0) { 1106 /* normal return here. */ 1107 return; 1108 } 1109 1110 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1111 "Read firmware image failed!"); 1112 err: 1113 apply_firmware_cleanup(firm_ctx); 1114 } 1115 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1116 1117 struct rpc_bdev_nvme_transport_stat_ctx { 1118 struct spdk_jsonrpc_request *request; 1119 struct spdk_json_write_ctx *w; 1120 }; 1121 1122 static void 1123 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1124 struct spdk_nvme_transport_poll_group_stat *stat) 1125 { 1126 struct spdk_nvme_rdma_device_stat *device_stats; 1127 uint32_t i; 1128 1129 spdk_json_write_named_array_begin(w, "devices"); 1130 1131 for (i = 0; i < stat->rdma.num_devices; i++) { 1132 device_stats = &stat->rdma.device_stats[i]; 1133 spdk_json_write_object_begin(w); 1134 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1135 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1136 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1137 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1138 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1139 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1140 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1141 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1142 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1143 spdk_json_write_object_end(w); 1144 } 1145 spdk_json_write_array_end(w); 1146 } 1147 1148 static void 1149 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1150 struct spdk_nvme_transport_poll_group_stat *stat) 1151 { 1152 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1153 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1154 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1155 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1156 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1157 stat->pcie.cq_shadow_doorbell_updates); 1158 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1159 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1160 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1161 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1162 stat->pcie.sq_shadow_doorbell_updates); 1163 } 1164 1165 static void 1166 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1167 struct spdk_nvme_transport_poll_group_stat *stat) 1168 { 1169 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1170 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1171 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1172 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1173 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1174 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1175 } 1176 1177 static void 1178 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1179 { 1180 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1181 struct spdk_io_channel *ch; 1182 struct nvme_poll_group *group; 1183 struct spdk_nvme_poll_group_stat *stat; 1184 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1185 uint32_t j; 1186 int rc; 1187 1188 ctx = spdk_io_channel_iter_get_ctx(i); 1189 ch = spdk_io_channel_iter_get_channel(i); 1190 group = spdk_io_channel_get_ctx(ch); 1191 1192 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1193 if (rc) { 1194 spdk_for_each_channel_continue(i, rc); 1195 return; 1196 } 1197 1198 spdk_json_write_object_begin(ctx->w); 1199 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1200 spdk_json_write_named_array_begin(ctx->w, "transports"); 1201 1202 for (j = 0; j < stat->num_transports; j++) { 1203 tr_stat = stat->transport_stat[j]; 1204 spdk_json_write_object_begin(ctx->w); 1205 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1206 1207 switch (stat->transport_stat[j]->trtype) { 1208 case SPDK_NVME_TRANSPORT_RDMA: 1209 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1210 break; 1211 case SPDK_NVME_TRANSPORT_PCIE: 1212 case SPDK_NVME_TRANSPORT_VFIOUSER: 1213 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1214 break; 1215 case SPDK_NVME_TRANSPORT_TCP: 1216 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1217 break; 1218 default: 1219 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1220 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1221 } 1222 spdk_json_write_object_end(ctx->w); 1223 } 1224 /* transports array */ 1225 spdk_json_write_array_end(ctx->w); 1226 spdk_json_write_object_end(ctx->w); 1227 1228 spdk_nvme_poll_group_free_stats(group->group, stat); 1229 spdk_for_each_channel_continue(i, 0); 1230 } 1231 1232 static void 1233 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1234 { 1235 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1236 1237 spdk_json_write_array_end(ctx->w); 1238 spdk_json_write_object_end(ctx->w); 1239 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1240 free(ctx); 1241 } 1242 1243 static void 1244 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1245 const struct spdk_json_val *params) 1246 { 1247 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1248 1249 if (params) { 1250 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1251 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1252 return; 1253 } 1254 1255 ctx = calloc(1, sizeof(*ctx)); 1256 if (!ctx) { 1257 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1258 "Memory allocation error"); 1259 return; 1260 } 1261 ctx->request = request; 1262 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1263 spdk_json_write_object_begin(ctx->w); 1264 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1265 1266 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1267 rpc_bdev_nvme_stats_per_channel, 1268 ctx, 1269 rpc_bdev_nvme_stats_done); 1270 } 1271 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1272 SPDK_RPC_RUNTIME) 1273 1274 struct rpc_bdev_nvme_controller_op_req { 1275 char *name; 1276 uint16_t cntlid; 1277 }; 1278 1279 static void 1280 free_rpc_bdev_nvme_controller_op_req(struct rpc_bdev_nvme_controller_op_req *r) 1281 { 1282 free(r->name); 1283 } 1284 1285 static const struct spdk_json_object_decoder rpc_bdev_nvme_controller_op_req_decoders[] = { 1286 {"name", offsetof(struct rpc_bdev_nvme_controller_op_req, name), spdk_json_decode_string}, 1287 {"cntlid", offsetof(struct rpc_bdev_nvme_controller_op_req, cntlid), spdk_json_decode_uint16, true}, 1288 }; 1289 1290 static void 1291 rpc_bdev_nvme_controller_op_cb(void *cb_arg, int rc) 1292 { 1293 struct spdk_jsonrpc_request *request = cb_arg; 1294 1295 if (rc == 0) { 1296 spdk_jsonrpc_send_bool_response(request, true); 1297 } else { 1298 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1299 } 1300 } 1301 1302 static void 1303 rpc_bdev_nvme_controller_op(struct spdk_jsonrpc_request *request, 1304 const struct spdk_json_val *params, 1305 enum nvme_ctrlr_op op) 1306 { 1307 struct rpc_bdev_nvme_controller_op_req req = {NULL}; 1308 struct nvme_bdev_ctrlr *nbdev_ctrlr; 1309 struct nvme_ctrlr *nvme_ctrlr; 1310 1311 if (spdk_json_decode_object(params, rpc_bdev_nvme_controller_op_req_decoders, 1312 SPDK_COUNTOF(rpc_bdev_nvme_controller_op_req_decoders), 1313 &req)) { 1314 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1315 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1316 goto exit; 1317 } 1318 1319 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 1320 if (nbdev_ctrlr == NULL) { 1321 SPDK_ERRLOG("Failed at NVMe bdev controller lookup\n"); 1322 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1323 goto exit; 1324 } 1325 1326 if (req.cntlid == 0) { 1327 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, op, rpc_bdev_nvme_controller_op_cb, request); 1328 } else { 1329 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr_by_id(nbdev_ctrlr, req.cntlid); 1330 if (nvme_ctrlr == NULL) { 1331 SPDK_ERRLOG("Failed at NVMe controller lookup\n"); 1332 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1333 goto exit; 1334 } 1335 nvme_ctrlr_op_rpc(nvme_ctrlr, op, rpc_bdev_nvme_controller_op_cb, request); 1336 } 1337 1338 exit: 1339 free_rpc_bdev_nvme_controller_op_req(&req); 1340 } 1341 1342 static void 1343 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1344 const struct spdk_json_val *params) 1345 { 1346 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_RESET); 1347 } 1348 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1349 1350 static void 1351 rpc_bdev_nvme_enable_controller(struct spdk_jsonrpc_request *request, 1352 const struct spdk_json_val *params) 1353 { 1354 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_ENABLE); 1355 } 1356 SPDK_RPC_REGISTER("bdev_nvme_enable_controller", rpc_bdev_nvme_enable_controller, SPDK_RPC_RUNTIME) 1357 1358 static void 1359 rpc_bdev_nvme_disable_controller(struct spdk_jsonrpc_request *request, 1360 const struct spdk_json_val *params) 1361 { 1362 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_DISABLE); 1363 } 1364 SPDK_RPC_REGISTER("bdev_nvme_disable_controller", rpc_bdev_nvme_disable_controller, 1365 SPDK_RPC_RUNTIME) 1366 1367 struct rpc_get_controller_health_info { 1368 char *name; 1369 }; 1370 1371 struct spdk_nvme_health_info_context { 1372 struct spdk_jsonrpc_request *request; 1373 struct spdk_nvme_ctrlr *ctrlr; 1374 struct spdk_nvme_health_information_page health_page; 1375 }; 1376 1377 static void 1378 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1379 { 1380 free(r->name); 1381 } 1382 1383 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1384 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1385 }; 1386 1387 static void 1388 nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1389 { 1390 if (response == true) { 1391 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1392 "Internal error."); 1393 } 1394 1395 free(context); 1396 } 1397 1398 static void 1399 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1400 { 1401 int i; 1402 char buf[128]; 1403 struct spdk_nvme_health_info_context *context = cb_arg; 1404 struct spdk_jsonrpc_request *request = context->request; 1405 struct spdk_json_write_ctx *w; 1406 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1407 const struct spdk_nvme_transport_id *trid = NULL; 1408 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1409 struct spdk_nvme_health_information_page *health_page = NULL; 1410 1411 if (spdk_nvme_cpl_is_error(cpl)) { 1412 nvme_health_info_cleanup(context, true); 1413 SPDK_ERRLOG("get log page failed\n"); 1414 return; 1415 } 1416 1417 if (ctrlr == NULL) { 1418 nvme_health_info_cleanup(context, true); 1419 SPDK_ERRLOG("ctrlr is NULL\n"); 1420 return; 1421 } else { 1422 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1423 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1424 health_page = &(context->health_page); 1425 } 1426 1427 w = spdk_jsonrpc_begin_result(request); 1428 1429 spdk_json_write_object_begin(w); 1430 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1431 spdk_str_trim(buf); 1432 spdk_json_write_named_string(w, "model_number", buf); 1433 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1434 spdk_str_trim(buf); 1435 spdk_json_write_named_string(w, "serial_number", buf); 1436 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1437 spdk_str_trim(buf); 1438 spdk_json_write_named_string(w, "firmware_revision", buf); 1439 spdk_json_write_named_string(w, "traddr", trid->traddr); 1440 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1441 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1442 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1443 health_page->available_spare_threshold); 1444 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1445 spdk_json_write_named_uint128(w, "data_units_read", 1446 health_page->data_units_read[0], health_page->data_units_read[1]); 1447 spdk_json_write_named_uint128(w, "data_units_written", 1448 health_page->data_units_written[0], health_page->data_units_written[1]); 1449 spdk_json_write_named_uint128(w, "host_read_commands", 1450 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1451 spdk_json_write_named_uint128(w, "host_write_commands", 1452 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1453 spdk_json_write_named_uint128(w, "controller_busy_time", 1454 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1455 spdk_json_write_named_uint128(w, "power_cycles", 1456 health_page->power_cycles[0], health_page->power_cycles[1]); 1457 spdk_json_write_named_uint128(w, "power_on_hours", 1458 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1459 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1460 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1461 spdk_json_write_named_uint128(w, "media_errors", 1462 health_page->media_errors[0], health_page->media_errors[1]); 1463 spdk_json_write_named_uint128(w, "num_err_log_entries", 1464 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1465 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1466 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1467 health_page->critical_temp_time); 1468 for (i = 0; i < 8; i++) { 1469 if (health_page->temp_sensor[i] != 0) { 1470 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1471 } 1472 } 1473 spdk_json_write_object_end(w); 1474 1475 spdk_jsonrpc_end_result(request, w); 1476 nvme_health_info_cleanup(context, false); 1477 } 1478 1479 static void 1480 get_health_log_page(struct spdk_nvme_health_info_context *context) 1481 { 1482 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1483 1484 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1485 SPDK_NVME_GLOBAL_NS_TAG, 1486 &(context->health_page), sizeof(context->health_page), 0, 1487 get_health_log_page_completion, context)) { 1488 nvme_health_info_cleanup(context, true); 1489 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1490 } 1491 } 1492 1493 static void 1494 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1495 { 1496 struct spdk_nvme_health_info_context *context = cb_arg; 1497 1498 if (spdk_nvme_cpl_is_error(cpl)) { 1499 nvme_health_info_cleanup(context, true); 1500 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1501 } else { 1502 get_health_log_page(context); 1503 } 1504 } 1505 1506 static int 1507 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1508 { 1509 struct spdk_nvme_cmd cmd = {}; 1510 1511 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1512 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1513 1514 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1515 get_temperature_threshold_feature_completion, context); 1516 } 1517 1518 static void 1519 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1520 { 1521 struct spdk_nvme_health_info_context *context; 1522 1523 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1524 if (!context) { 1525 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1526 "Memory allocation error."); 1527 return; 1528 } 1529 1530 context->request = request; 1531 context->ctrlr = ctrlr; 1532 1533 if (get_temperature_threshold_feature(context)) { 1534 nvme_health_info_cleanup(context, true); 1535 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1536 } 1537 1538 return; 1539 } 1540 1541 static void 1542 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1543 const struct spdk_json_val *params) 1544 { 1545 struct rpc_get_controller_health_info req = {}; 1546 struct nvme_ctrlr *nvme_ctrlr = NULL; 1547 1548 if (!params) { 1549 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1550 "Missing device name"); 1551 1552 return; 1553 } 1554 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1555 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1556 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1557 free_rpc_get_controller_health_info(&req); 1558 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1559 "Invalid parameters"); 1560 1561 return; 1562 } 1563 1564 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1565 1566 if (!nvme_ctrlr) { 1567 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1568 free_rpc_get_controller_health_info(&req); 1569 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1570 "Device not found"); 1571 return; 1572 } 1573 1574 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1575 free_rpc_get_controller_health_info(&req); 1576 1577 return; 1578 } 1579 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1580 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1581 1582 struct rpc_bdev_nvme_start_discovery { 1583 char *name; 1584 char *trtype; 1585 char *adrfam; 1586 char *traddr; 1587 char *trsvcid; 1588 char *hostnqn; 1589 bool wait_for_attach; 1590 uint64_t attach_timeout_ms; 1591 struct spdk_nvme_ctrlr_opts opts; 1592 struct nvme_ctrlr_opts bdev_opts; 1593 }; 1594 1595 static void 1596 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1597 { 1598 free(req->name); 1599 free(req->trtype); 1600 free(req->adrfam); 1601 free(req->traddr); 1602 free(req->trsvcid); 1603 free(req->hostnqn); 1604 } 1605 1606 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1607 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1608 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1609 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1610 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1611 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1612 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1613 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1614 {"attach_timeout_ms", offsetof(struct rpc_bdev_nvme_start_discovery, attach_timeout_ms), spdk_json_decode_uint64, true}, 1615 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1616 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1617 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1618 }; 1619 1620 struct rpc_bdev_nvme_start_discovery_ctx { 1621 struct rpc_bdev_nvme_start_discovery req; 1622 struct spdk_jsonrpc_request *request; 1623 }; 1624 1625 static void 1626 rpc_bdev_nvme_start_discovery_done(void *ctx, int status) 1627 { 1628 struct spdk_jsonrpc_request *request = ctx; 1629 1630 if (status != 0) { 1631 spdk_jsonrpc_send_error_response(request, status, spdk_strerror(-status)); 1632 } else { 1633 spdk_jsonrpc_send_bool_response(request, true); 1634 } 1635 } 1636 1637 static void 1638 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1639 const struct spdk_json_val *params) 1640 { 1641 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1642 struct spdk_nvme_transport_id trid = {}; 1643 size_t len, maxlen; 1644 int rc; 1645 spdk_bdev_nvme_start_discovery_fn cb_fn; 1646 void *cb_ctx; 1647 1648 ctx = calloc(1, sizeof(*ctx)); 1649 if (!ctx) { 1650 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1651 return; 1652 } 1653 1654 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1655 1656 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1657 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1658 &ctx->req)) { 1659 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1660 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1661 "spdk_json_decode_object failed"); 1662 goto cleanup; 1663 } 1664 1665 /* Parse trstring */ 1666 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1667 if (rc < 0) { 1668 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1669 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1670 ctx->req.trtype); 1671 goto cleanup; 1672 } 1673 1674 /* Parse trtype */ 1675 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1676 assert(rc == 0); 1677 1678 /* Parse traddr */ 1679 maxlen = sizeof(trid.traddr); 1680 len = strnlen(ctx->req.traddr, maxlen); 1681 if (len == maxlen) { 1682 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1683 ctx->req.traddr); 1684 goto cleanup; 1685 } 1686 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1687 1688 /* Parse adrfam */ 1689 if (ctx->req.adrfam) { 1690 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1691 if (rc < 0) { 1692 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1693 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1694 ctx->req.adrfam); 1695 goto cleanup; 1696 } 1697 } 1698 1699 /* Parse trsvcid */ 1700 if (ctx->req.trsvcid) { 1701 maxlen = sizeof(trid.trsvcid); 1702 len = strnlen(ctx->req.trsvcid, maxlen); 1703 if (len == maxlen) { 1704 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1705 ctx->req.trsvcid); 1706 goto cleanup; 1707 } 1708 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1709 } 1710 1711 if (ctx->req.hostnqn) { 1712 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1713 ctx->req.hostnqn); 1714 } 1715 1716 if (ctx->req.attach_timeout_ms != 0) { 1717 ctx->req.wait_for_attach = true; 1718 } 1719 1720 ctx->request = request; 1721 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1722 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1723 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1724 ctx->req.attach_timeout_ms, false, cb_fn, cb_ctx); 1725 if (rc) { 1726 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1727 } else if (!ctx->req.wait_for_attach) { 1728 rpc_bdev_nvme_start_discovery_done(request, 0); 1729 } 1730 1731 cleanup: 1732 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1733 free(ctx); 1734 } 1735 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1736 SPDK_RPC_RUNTIME) 1737 1738 struct rpc_bdev_nvme_stop_discovery { 1739 char *name; 1740 }; 1741 1742 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1743 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1744 }; 1745 1746 struct rpc_bdev_nvme_stop_discovery_ctx { 1747 struct rpc_bdev_nvme_stop_discovery req; 1748 struct spdk_jsonrpc_request *request; 1749 }; 1750 1751 static void 1752 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1753 { 1754 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1755 1756 spdk_jsonrpc_send_bool_response(ctx->request, true); 1757 free(ctx->req.name); 1758 free(ctx); 1759 } 1760 1761 static void 1762 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1763 const struct spdk_json_val *params) 1764 { 1765 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1766 int rc; 1767 1768 ctx = calloc(1, sizeof(*ctx)); 1769 if (!ctx) { 1770 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1771 return; 1772 } 1773 1774 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1775 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1776 &ctx->req)) { 1777 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1778 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1779 "spdk_json_decode_object failed"); 1780 goto cleanup; 1781 } 1782 1783 ctx->request = request; 1784 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1785 if (rc) { 1786 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1787 goto cleanup; 1788 } 1789 1790 return; 1791 1792 cleanup: 1793 free(ctx->req.name); 1794 free(ctx); 1795 } 1796 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1797 SPDK_RPC_RUNTIME) 1798 1799 static void 1800 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1801 const struct spdk_json_val *params) 1802 { 1803 struct spdk_json_write_ctx *w; 1804 1805 w = spdk_jsonrpc_begin_result(request); 1806 bdev_nvme_get_discovery_info(w); 1807 spdk_jsonrpc_end_result(request, w); 1808 } 1809 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1810 SPDK_RPC_RUNTIME) 1811 1812 enum error_injection_cmd_type { 1813 NVME_ADMIN_CMD = 1, 1814 NVME_IO_CMD, 1815 }; 1816 1817 struct rpc_add_error_injection { 1818 char *name; 1819 enum error_injection_cmd_type cmd_type; 1820 uint8_t opc; 1821 bool do_not_submit; 1822 uint64_t timeout_in_us; 1823 uint32_t err_count; 1824 uint8_t sct; 1825 uint8_t sc; 1826 }; 1827 1828 static void 1829 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1830 { 1831 free(req->name); 1832 } 1833 1834 static int 1835 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1836 { 1837 int *cmd_type = out; 1838 1839 if (spdk_json_strequal(val, "admin")) { 1840 *cmd_type = NVME_ADMIN_CMD; 1841 } else if (spdk_json_strequal(val, "io")) { 1842 *cmd_type = NVME_IO_CMD; 1843 } else { 1844 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1845 return -EINVAL; 1846 } 1847 1848 return 0; 1849 } 1850 1851 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1852 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1853 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1854 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1855 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1856 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1857 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1858 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1859 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1860 }; 1861 1862 struct rpc_add_error_injection_ctx { 1863 struct spdk_jsonrpc_request *request; 1864 struct rpc_add_error_injection rpc; 1865 }; 1866 1867 static void 1868 rpc_add_error_injection_done(struct spdk_io_channel_iter *i, int status) 1869 { 1870 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1871 1872 if (status) { 1873 spdk_jsonrpc_send_error_response(ctx->request, status, 1874 "Failed to add the error injection."); 1875 } else { 1876 spdk_jsonrpc_send_bool_response(ctx->request, true); 1877 } 1878 1879 free_rpc_add_error_injection(&ctx->rpc); 1880 free(ctx); 1881 } 1882 1883 static void 1884 rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i) 1885 { 1886 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1887 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1888 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1889 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1890 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1891 int rc = 0; 1892 1893 if (qpair != NULL) { 1894 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1895 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1896 ctx->rpc.sct, ctx->rpc.sc); 1897 } 1898 1899 spdk_for_each_channel_continue(i, rc); 1900 } 1901 1902 static void 1903 rpc_bdev_nvme_add_error_injection( 1904 struct spdk_jsonrpc_request *request, 1905 const struct spdk_json_val *params) 1906 { 1907 struct rpc_add_error_injection_ctx *ctx; 1908 struct nvme_ctrlr *nvme_ctrlr; 1909 int rc; 1910 1911 ctx = calloc(1, sizeof(*ctx)); 1912 if (!ctx) { 1913 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1914 return; 1915 } 1916 ctx->rpc.err_count = 1; 1917 ctx->request = request; 1918 1919 if (spdk_json_decode_object(params, 1920 rpc_add_error_injection_decoders, 1921 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1922 &ctx->rpc)) { 1923 spdk_jsonrpc_send_error_response(request, -EINVAL, 1924 "Failed to parse the request"); 1925 goto cleanup; 1926 } 1927 1928 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1929 if (nvme_ctrlr == NULL) { 1930 SPDK_ERRLOG("No controller with specified name was found.\n"); 1931 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1932 goto cleanup; 1933 } 1934 1935 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1936 spdk_for_each_channel(nvme_ctrlr, 1937 rpc_add_error_injection_per_channel, 1938 ctx, 1939 rpc_add_error_injection_done); 1940 1941 return; 1942 } else { 1943 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1944 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1945 ctx->rpc.sct, ctx->rpc.sc); 1946 if (rc) { 1947 spdk_jsonrpc_send_error_response(request, -rc, 1948 "Failed to add the error injection"); 1949 } else { 1950 spdk_jsonrpc_send_bool_response(ctx->request, true); 1951 } 1952 } 1953 1954 cleanup: 1955 free_rpc_add_error_injection(&ctx->rpc); 1956 free(ctx); 1957 } 1958 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 1959 SPDK_RPC_RUNTIME) 1960 1961 struct rpc_remove_error_injection { 1962 char *name; 1963 enum error_injection_cmd_type cmd_type; 1964 uint8_t opc; 1965 }; 1966 1967 static void 1968 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 1969 { 1970 free(req->name); 1971 } 1972 1973 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 1974 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 1975 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1976 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 1977 }; 1978 1979 struct rpc_remove_error_injection_ctx { 1980 struct spdk_jsonrpc_request *request; 1981 struct rpc_remove_error_injection rpc; 1982 }; 1983 1984 static void 1985 rpc_remove_error_injection_done(struct spdk_io_channel_iter *i, int status) 1986 { 1987 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1988 1989 if (status) { 1990 spdk_jsonrpc_send_error_response(ctx->request, status, 1991 "Failed to remove the error injection."); 1992 } else { 1993 spdk_jsonrpc_send_bool_response(ctx->request, true); 1994 } 1995 1996 free_rpc_remove_error_injection(&ctx->rpc); 1997 free(ctx); 1998 } 1999 2000 static void 2001 rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i) 2002 { 2003 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 2004 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2005 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 2006 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 2007 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 2008 2009 if (qpair != NULL) { 2010 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 2011 } 2012 2013 spdk_for_each_channel_continue(i, 0); 2014 } 2015 2016 static void 2017 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 2018 const struct spdk_json_val *params) 2019 { 2020 struct rpc_remove_error_injection_ctx *ctx; 2021 struct nvme_ctrlr *nvme_ctrlr; 2022 2023 ctx = calloc(1, sizeof(*ctx)); 2024 if (!ctx) { 2025 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2026 return; 2027 } 2028 ctx->request = request; 2029 2030 if (spdk_json_decode_object(params, 2031 rpc_remove_error_injection_decoders, 2032 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 2033 &ctx->rpc)) { 2034 spdk_jsonrpc_send_error_response(request, -EINVAL, 2035 "Failed to parse the request"); 2036 goto cleanup; 2037 } 2038 2039 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 2040 if (nvme_ctrlr == NULL) { 2041 SPDK_ERRLOG("No controller with specified name was found.\n"); 2042 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 2043 goto cleanup; 2044 } 2045 2046 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 2047 spdk_for_each_channel(nvme_ctrlr, 2048 rpc_remove_error_injection_per_channel, 2049 ctx, 2050 rpc_remove_error_injection_done); 2051 return; 2052 } else { 2053 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2054 spdk_jsonrpc_send_bool_response(ctx->request, true); 2055 } 2056 2057 cleanup: 2058 free_rpc_remove_error_injection(&ctx->rpc); 2059 free(ctx); 2060 } 2061 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2062 SPDK_RPC_RUNTIME) 2063 2064 struct rpc_get_io_paths { 2065 char *name; 2066 }; 2067 2068 static void 2069 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2070 { 2071 free(r->name); 2072 } 2073 2074 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2075 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2076 }; 2077 2078 struct rpc_get_io_paths_ctx { 2079 struct rpc_get_io_paths req; 2080 struct spdk_jsonrpc_request *request; 2081 struct spdk_json_write_ctx *w; 2082 }; 2083 2084 static void 2085 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2086 { 2087 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2088 2089 spdk_json_write_array_end(ctx->w); 2090 2091 spdk_json_write_object_end(ctx->w); 2092 2093 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2094 2095 free_rpc_get_io_paths(&ctx->req); 2096 free(ctx); 2097 } 2098 2099 static void 2100 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2101 { 2102 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2103 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2104 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2105 struct nvme_qpair *qpair; 2106 struct nvme_io_path *io_path; 2107 struct nvme_bdev *nbdev; 2108 2109 spdk_json_write_object_begin(ctx->w); 2110 2111 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2112 2113 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2114 2115 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2116 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2117 nbdev = io_path->nvme_ns->bdev; 2118 2119 if (ctx->req.name != NULL && 2120 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2121 continue; 2122 } 2123 2124 nvme_io_path_info_json(ctx->w, io_path); 2125 } 2126 } 2127 2128 spdk_json_write_array_end(ctx->w); 2129 2130 spdk_json_write_object_end(ctx->w); 2131 2132 spdk_for_each_channel_continue(i, 0); 2133 } 2134 2135 static void 2136 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2137 const struct spdk_json_val *params) 2138 { 2139 struct rpc_get_io_paths_ctx *ctx; 2140 2141 ctx = calloc(1, sizeof(*ctx)); 2142 if (ctx == NULL) { 2143 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2144 return; 2145 } 2146 2147 if (params != NULL && 2148 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2149 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2150 &ctx->req)) { 2151 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2152 "bdev_nvme_get_io_paths requires no parameters"); 2153 2154 free_rpc_get_io_paths(&ctx->req); 2155 free(ctx); 2156 return; 2157 } 2158 2159 ctx->request = request; 2160 ctx->w = spdk_jsonrpc_begin_result(request); 2161 2162 spdk_json_write_object_begin(ctx->w); 2163 2164 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2165 2166 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2167 _rpc_bdev_nvme_get_io_paths, 2168 ctx, 2169 rpc_bdev_nvme_get_io_paths_done); 2170 } 2171 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2172 2173 struct rpc_bdev_nvme_set_preferred_path { 2174 char *name; 2175 uint16_t cntlid; 2176 }; 2177 2178 static void 2179 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2180 { 2181 free(req->name); 2182 } 2183 2184 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2185 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2186 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2187 }; 2188 2189 struct rpc_bdev_nvme_set_preferred_path_ctx { 2190 struct rpc_bdev_nvme_set_preferred_path req; 2191 struct spdk_jsonrpc_request *request; 2192 }; 2193 2194 static void 2195 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2196 { 2197 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2198 2199 if (rc == 0) { 2200 spdk_jsonrpc_send_bool_response(ctx->request, true); 2201 } else { 2202 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2203 } 2204 2205 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2206 free(ctx); 2207 } 2208 2209 static void 2210 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2211 const struct spdk_json_val *params) 2212 { 2213 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2214 2215 ctx = calloc(1, sizeof(*ctx)); 2216 if (ctx == NULL) { 2217 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2218 return; 2219 } 2220 2221 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2222 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2223 &ctx->req)) { 2224 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2225 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2226 "spdk_json_decode_object failed"); 2227 goto cleanup; 2228 } 2229 2230 ctx->request = request; 2231 2232 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2233 rpc_bdev_nvme_set_preferred_path_done, ctx); 2234 return; 2235 2236 cleanup: 2237 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2238 free(ctx); 2239 } 2240 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2241 SPDK_RPC_RUNTIME) 2242 2243 struct rpc_set_multipath_policy { 2244 char *name; 2245 enum bdev_nvme_multipath_policy policy; 2246 enum bdev_nvme_multipath_selector selector; 2247 uint32_t rr_min_io; 2248 }; 2249 2250 static void 2251 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2252 { 2253 free(req->name); 2254 } 2255 2256 static int 2257 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2258 { 2259 enum bdev_nvme_multipath_policy *policy = out; 2260 2261 if (spdk_json_strequal(val, "active_passive") == true) { 2262 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2263 } else if (spdk_json_strequal(val, "active_active") == true) { 2264 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2265 } else { 2266 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2267 return -EINVAL; 2268 } 2269 2270 return 0; 2271 } 2272 2273 static int 2274 rpc_decode_mp_selector(const struct spdk_json_val *val, void *out) 2275 { 2276 enum bdev_nvme_multipath_selector *selector = out; 2277 2278 if (spdk_json_strequal(val, "round_robin") == true) { 2279 *selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN; 2280 } else if (spdk_json_strequal(val, "queue_depth") == true) { 2281 *selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH; 2282 } else { 2283 SPDK_NOTICELOG("Invalid parameter value: selector\n"); 2284 return -EINVAL; 2285 } 2286 2287 return 0; 2288 } 2289 2290 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2291 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2292 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2293 {"selector", offsetof(struct rpc_set_multipath_policy, selector), rpc_decode_mp_selector, true}, 2294 {"rr_min_io", offsetof(struct rpc_set_multipath_policy, rr_min_io), spdk_json_decode_uint32, true}, 2295 }; 2296 2297 struct rpc_set_multipath_policy_ctx { 2298 struct rpc_set_multipath_policy req; 2299 struct spdk_jsonrpc_request *request; 2300 }; 2301 2302 static void 2303 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2304 { 2305 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2306 2307 if (rc == 0) { 2308 spdk_jsonrpc_send_bool_response(ctx->request, true); 2309 } else { 2310 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2311 } 2312 2313 free_rpc_set_multipath_policy(&ctx->req); 2314 free(ctx); 2315 } 2316 2317 static void 2318 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2319 const struct spdk_json_val *params) 2320 { 2321 struct rpc_set_multipath_policy_ctx *ctx; 2322 2323 ctx = calloc(1, sizeof(*ctx)); 2324 if (ctx == NULL) { 2325 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2326 return; 2327 } 2328 2329 ctx->req.rr_min_io = UINT32_MAX; 2330 2331 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2332 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2333 &ctx->req)) { 2334 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2335 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2336 "spdk_json_decode_object failed"); 2337 goto cleanup; 2338 } 2339 2340 ctx->request = request; 2341 2342 if (ctx->req.policy != BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE && ctx->req.selector > 0) { 2343 SPDK_ERRLOG("selector only works in active_active mode\n"); 2344 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2345 "spdk_json_decode_object failed"); 2346 goto cleanup; 2347 } 2348 2349 bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, ctx->req.selector, 2350 ctx->req.rr_min_io, 2351 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2352 return; 2353 2354 cleanup: 2355 free_rpc_set_multipath_policy(&ctx->req); 2356 free(ctx); 2357 } 2358 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2359 SPDK_RPC_RUNTIME) 2360 2361 struct rpc_bdev_nvme_start_mdns_discovery { 2362 char *name; 2363 char *svcname; 2364 char *hostnqn; 2365 struct spdk_nvme_ctrlr_opts opts; 2366 struct nvme_ctrlr_opts bdev_opts; 2367 }; 2368 2369 static void 2370 free_rpc_bdev_nvme_start_mdns_discovery(struct rpc_bdev_nvme_start_mdns_discovery *req) 2371 { 2372 free(req->name); 2373 free(req->svcname); 2374 free(req->hostnqn); 2375 } 2376 2377 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_mdns_discovery_decoders[] = { 2378 {"name", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, name), spdk_json_decode_string}, 2379 {"svcname", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, svcname), spdk_json_decode_string}, 2380 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, hostnqn), spdk_json_decode_string, true}, 2381 }; 2382 2383 struct rpc_bdev_nvme_start_mdns_discovery_ctx { 2384 struct rpc_bdev_nvme_start_mdns_discovery req; 2385 struct spdk_jsonrpc_request *request; 2386 }; 2387 2388 static void 2389 rpc_bdev_nvme_start_mdns_discovery(struct spdk_jsonrpc_request *request, 2390 const struct spdk_json_val *params) 2391 { 2392 struct rpc_bdev_nvme_start_mdns_discovery_ctx *ctx; 2393 int rc; 2394 2395 ctx = calloc(1, sizeof(*ctx)); 2396 if (!ctx) { 2397 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2398 return; 2399 } 2400 2401 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 2402 2403 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_mdns_discovery_decoders, 2404 SPDK_COUNTOF(rpc_bdev_nvme_start_mdns_discovery_decoders), 2405 &ctx->req)) { 2406 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2407 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2408 "spdk_json_decode_object failed"); 2409 goto cleanup; 2410 } 2411 2412 if (ctx->req.hostnqn) { 2413 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 2414 ctx->req.hostnqn); 2415 } 2416 ctx->request = request; 2417 rc = bdev_nvme_start_mdns_discovery(ctx->req.name, ctx->req.svcname, &ctx->req.opts, 2418 &ctx->req.bdev_opts); 2419 if (rc) { 2420 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2421 } else { 2422 spdk_jsonrpc_send_bool_response(request, true); 2423 } 2424 2425 cleanup: 2426 free_rpc_bdev_nvme_start_mdns_discovery(&ctx->req); 2427 free(ctx); 2428 } 2429 SPDK_RPC_REGISTER("bdev_nvme_start_mdns_discovery", rpc_bdev_nvme_start_mdns_discovery, 2430 SPDK_RPC_RUNTIME) 2431 2432 struct rpc_bdev_nvme_stop_mdns_discovery { 2433 char *name; 2434 }; 2435 2436 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_mdns_discovery_decoders[] = { 2437 {"name", offsetof(struct rpc_bdev_nvme_stop_mdns_discovery, name), spdk_json_decode_string}, 2438 }; 2439 2440 struct rpc_bdev_nvme_stop_mdns_discovery_ctx { 2441 struct rpc_bdev_nvme_stop_mdns_discovery req; 2442 struct spdk_jsonrpc_request *request; 2443 }; 2444 2445 static void 2446 rpc_bdev_nvme_stop_mdns_discovery(struct spdk_jsonrpc_request *request, 2447 const struct spdk_json_val *params) 2448 { 2449 struct rpc_bdev_nvme_stop_mdns_discovery_ctx *ctx; 2450 int rc; 2451 2452 ctx = calloc(1, sizeof(*ctx)); 2453 if (!ctx) { 2454 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2455 return; 2456 } 2457 2458 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_mdns_discovery_decoders, 2459 SPDK_COUNTOF(rpc_bdev_nvme_stop_mdns_discovery_decoders), 2460 &ctx->req)) { 2461 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2462 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2463 "spdk_json_decode_object failed"); 2464 goto cleanup; 2465 } 2466 2467 ctx->request = request; 2468 rc = bdev_nvme_stop_mdns_discovery(ctx->req.name); 2469 2470 if (rc) { 2471 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2472 goto cleanup; 2473 } 2474 spdk_jsonrpc_send_bool_response(ctx->request, true); 2475 2476 cleanup: 2477 free(ctx->req.name); 2478 free(ctx); 2479 } 2480 SPDK_RPC_REGISTER("bdev_nvme_stop_mdns_discovery", rpc_bdev_nvme_stop_mdns_discovery, 2481 SPDK_RPC_RUNTIME) 2482 2483 static void 2484 rpc_bdev_nvme_get_mdns_discovery_info(struct spdk_jsonrpc_request *request, 2485 const struct spdk_json_val *params) 2486 { 2487 bdev_nvme_get_mdns_discovery_info(request); 2488 } 2489 2490 SPDK_RPC_REGISTER("bdev_nvme_get_mdns_discovery_info", rpc_bdev_nvme_get_mdns_discovery_info, 2491 SPDK_RPC_RUNTIME) 2492 2493 struct rpc_get_path_stat { 2494 char *name; 2495 }; 2496 2497 struct path_stat { 2498 struct spdk_bdev_io_stat stat; 2499 struct spdk_nvme_transport_id trid; 2500 struct nvme_ns *ns; 2501 }; 2502 2503 struct rpc_bdev_nvme_path_stat_ctx { 2504 struct spdk_jsonrpc_request *request; 2505 struct path_stat *path_stat; 2506 uint32_t num_paths; 2507 struct spdk_bdev_desc *desc; 2508 }; 2509 2510 static void 2511 free_rpc_get_path_stat(struct rpc_get_path_stat *req) 2512 { 2513 free(req->name); 2514 } 2515 2516 static const struct spdk_json_object_decoder rpc_get_path_stat_decoders[] = { 2517 {"name", offsetof(struct rpc_get_path_stat, name), spdk_json_decode_string}, 2518 }; 2519 2520 static void 2521 dummy_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx) 2522 { 2523 } 2524 2525 static void 2526 rpc_bdev_nvme_path_stat_per_channel(struct spdk_io_channel_iter *i) 2527 { 2528 struct rpc_bdev_nvme_path_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2529 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 2530 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2531 struct nvme_io_path *io_path; 2532 struct path_stat *path_stat; 2533 uint32_t j; 2534 2535 assert(ctx->num_paths != 0); 2536 2537 for (j = 0; j < ctx->num_paths; j++) { 2538 path_stat = &ctx->path_stat[j]; 2539 2540 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 2541 if (path_stat->ns == io_path->nvme_ns) { 2542 assert(io_path->stat != NULL); 2543 spdk_bdev_add_io_stat(&path_stat->stat, io_path->stat); 2544 } 2545 } 2546 } 2547 2548 spdk_for_each_channel_continue(i, 0); 2549 } 2550 2551 static void 2552 rpc_bdev_nvme_path_stat_done(struct spdk_io_channel_iter *i, int status) 2553 { 2554 struct rpc_bdev_nvme_path_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2555 struct nvme_bdev *nbdev = spdk_io_channel_iter_get_io_device(i); 2556 struct spdk_json_write_ctx *w; 2557 struct path_stat *path_stat; 2558 uint32_t j; 2559 2560 assert(ctx->num_paths != 0); 2561 2562 w = spdk_jsonrpc_begin_result(ctx->request); 2563 spdk_json_write_object_begin(w); 2564 spdk_json_write_named_string(w, "name", nbdev->disk.name); 2565 spdk_json_write_named_array_begin(w, "stats"); 2566 2567 for (j = 0; j < ctx->num_paths; j++) { 2568 path_stat = &ctx->path_stat[j]; 2569 spdk_json_write_object_begin(w); 2570 2571 spdk_json_write_named_object_begin(w, "trid"); 2572 nvme_bdev_dump_trid_json(&path_stat->trid, w); 2573 spdk_json_write_object_end(w); 2574 2575 spdk_json_write_named_object_begin(w, "stat"); 2576 spdk_bdev_dump_io_stat_json(&path_stat->stat, w); 2577 spdk_json_write_object_end(w); 2578 2579 spdk_json_write_object_end(w); 2580 } 2581 2582 spdk_json_write_array_end(w); 2583 spdk_json_write_object_end(w); 2584 spdk_jsonrpc_end_result(ctx->request, w); 2585 2586 spdk_bdev_close(ctx->desc); 2587 free(ctx->path_stat); 2588 free(ctx); 2589 } 2590 2591 static void 2592 rpc_bdev_nvme_get_path_iostat(struct spdk_jsonrpc_request *request, 2593 const struct spdk_json_val *params) 2594 { 2595 struct rpc_get_path_stat req = {}; 2596 struct spdk_bdev_desc *desc = NULL; 2597 struct spdk_bdev *bdev; 2598 struct nvme_bdev *nbdev; 2599 struct nvme_ns *nvme_ns; 2600 struct path_stat *path_stat; 2601 struct rpc_bdev_nvme_path_stat_ctx *ctx; 2602 struct spdk_bdev_nvme_opts opts; 2603 uint32_t num_paths = 0, i = 0; 2604 int rc; 2605 2606 bdev_nvme_get_opts(&opts); 2607 if (!opts.io_path_stat) { 2608 SPDK_ERRLOG("RPC not enabled if enable_io_path_stat is false\n"); 2609 spdk_jsonrpc_send_error_response(request, -EPERM, 2610 "RPC not enabled if enable_io_path_stat is false"); 2611 return; 2612 } 2613 2614 if (spdk_json_decode_object(params, rpc_get_path_stat_decoders, 2615 SPDK_COUNTOF(rpc_get_path_stat_decoders), 2616 &req)) { 2617 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2618 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2619 "spdk_json_decode_object failed"); 2620 free_rpc_get_path_stat(&req); 2621 return; 2622 } 2623 2624 rc = spdk_bdev_open_ext(req.name, false, dummy_bdev_event_cb, NULL, &desc); 2625 if (rc != 0) { 2626 SPDK_ERRLOG("Failed to open bdev '%s': %d\n", req.name, rc); 2627 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2628 free_rpc_get_path_stat(&req); 2629 return; 2630 } 2631 2632 free_rpc_get_path_stat(&req); 2633 2634 ctx = calloc(1, sizeof(struct rpc_bdev_nvme_path_stat_ctx)); 2635 if (ctx == NULL) { 2636 spdk_bdev_close(desc); 2637 SPDK_ERRLOG("Failed to allocate rpc_bdev_nvme_path_stat_ctx struct\n"); 2638 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2639 return; 2640 } 2641 2642 bdev = spdk_bdev_desc_get_bdev(desc); 2643 nbdev = bdev->ctxt; 2644 2645 pthread_mutex_lock(&nbdev->mutex); 2646 if (nbdev->ref == 0) { 2647 rc = -ENOENT; 2648 goto err; 2649 } 2650 2651 num_paths = nbdev->ref; 2652 path_stat = calloc(num_paths, sizeof(struct path_stat)); 2653 if (path_stat == NULL) { 2654 rc = -ENOMEM; 2655 SPDK_ERRLOG("Failed to allocate memory for path_stat.\n"); 2656 goto err; 2657 } 2658 2659 /* store the history stat */ 2660 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) { 2661 assert(i < num_paths); 2662 path_stat[i].ns = nvme_ns; 2663 path_stat[i].trid = nvme_ns->ctrlr->active_path_id->trid; 2664 2665 assert(nvme_ns->stat != NULL); 2666 memcpy(&path_stat[i].stat, nvme_ns->stat, sizeof(struct spdk_bdev_io_stat)); 2667 i++; 2668 } 2669 pthread_mutex_unlock(&nbdev->mutex); 2670 2671 ctx->request = request; 2672 ctx->desc = desc; 2673 ctx->path_stat = path_stat; 2674 ctx->num_paths = num_paths; 2675 2676 spdk_for_each_channel(nbdev, 2677 rpc_bdev_nvme_path_stat_per_channel, 2678 ctx, 2679 rpc_bdev_nvme_path_stat_done); 2680 return; 2681 2682 err: 2683 pthread_mutex_unlock(&nbdev->mutex); 2684 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2685 spdk_bdev_close(desc); 2686 free(ctx); 2687 } 2688 SPDK_RPC_REGISTER("bdev_nvme_get_path_iostat", rpc_bdev_nvme_get_path_iostat, 2689 SPDK_RPC_RUNTIME) 2690