1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 * Copyright (c) 2022 Dell Inc, or its subsidiaries. All rights reserved. 6 */ 7 8 #include "spdk/stdinc.h" 9 10 #include "bdev_nvme.h" 11 12 #include "spdk/config.h" 13 14 #include "spdk/string.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/env.h" 18 #include "spdk/nvme.h" 19 #include "spdk/nvme_spec.h" 20 21 #include "spdk/log.h" 22 #include "spdk/bdev_module.h" 23 24 static bool g_tls_log = false; 25 26 static int 27 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 28 { 29 enum spdk_bdev_timeout_action *action = out; 30 31 if (spdk_json_strequal(val, "none") == true) { 32 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 33 } else if (spdk_json_strequal(val, "abort") == true) { 34 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 35 } else if (spdk_json_strequal(val, "reset") == true) { 36 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 37 } else { 38 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 39 return -EINVAL; 40 } 41 42 return 0; 43 } 44 45 static int 46 rpc_decode_digest(const struct spdk_json_val *val, void *out) 47 { 48 uint32_t *flags = out; 49 char *digest = NULL; 50 int rc; 51 52 rc = spdk_json_decode_string(val, &digest); 53 if (rc != 0) { 54 return rc; 55 } 56 57 rc = spdk_nvme_dhchap_get_digest_id(digest); 58 if (rc >= 0) { 59 *flags |= SPDK_BIT(rc); 60 rc = 0; 61 } 62 free(digest); 63 64 return rc; 65 } 66 67 static int 68 rpc_decode_digest_array(const struct spdk_json_val *val, void *out) 69 { 70 uint32_t *flags = out; 71 size_t count; 72 73 *flags = 0; 74 75 return spdk_json_decode_array(val, rpc_decode_digest, out, 32, &count, 0); 76 } 77 78 static int 79 rpc_decode_dhgroup(const struct spdk_json_val *val, void *out) 80 { 81 uint32_t *flags = out; 82 char *dhgroup = NULL; 83 int rc; 84 85 rc = spdk_json_decode_string(val, &dhgroup); 86 if (rc != 0) { 87 return rc; 88 } 89 90 rc = spdk_nvme_dhchap_get_dhgroup_id(dhgroup); 91 if (rc >= 0) { 92 *flags |= SPDK_BIT(rc); 93 rc = 0; 94 } 95 free(dhgroup); 96 97 return rc; 98 } 99 100 static int 101 rpc_decode_dhgroup_array(const struct spdk_json_val *val, void *out) 102 { 103 uint32_t *flags = out; 104 size_t count; 105 106 *flags = 0; 107 108 return spdk_json_decode_array(val, rpc_decode_dhgroup, out, 32, &count, 0); 109 } 110 111 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 112 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 113 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 114 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 115 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 116 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 117 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 118 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 119 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 120 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 121 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 122 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 123 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 124 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 125 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 126 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 127 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 128 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 129 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 130 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 131 {"generate_uuids", offsetof(struct spdk_bdev_nvme_opts, generate_uuids), spdk_json_decode_bool, true}, 132 {"transport_tos", offsetof(struct spdk_bdev_nvme_opts, transport_tos), spdk_json_decode_uint8, true}, 133 {"nvme_error_stat", offsetof(struct spdk_bdev_nvme_opts, nvme_error_stat), spdk_json_decode_bool, true}, 134 {"rdma_srq_size", offsetof(struct spdk_bdev_nvme_opts, rdma_srq_size), spdk_json_decode_uint32, true}, 135 {"io_path_stat", offsetof(struct spdk_bdev_nvme_opts, io_path_stat), spdk_json_decode_bool, true}, 136 {"allow_accel_sequence", offsetof(struct spdk_bdev_nvme_opts, allow_accel_sequence), spdk_json_decode_bool, true}, 137 {"rdma_max_cq_size", offsetof(struct spdk_bdev_nvme_opts, rdma_max_cq_size), spdk_json_decode_uint32, true}, 138 {"rdma_cm_event_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, rdma_cm_event_timeout_ms), spdk_json_decode_uint16, true}, 139 {"dhchap_digests", offsetof(struct spdk_bdev_nvme_opts, dhchap_digests), rpc_decode_digest_array, true}, 140 {"dhchap_dhgroups", offsetof(struct spdk_bdev_nvme_opts, dhchap_dhgroups), rpc_decode_dhgroup_array, true}, 141 }; 142 143 static void 144 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 145 const struct spdk_json_val *params) 146 { 147 struct spdk_bdev_nvme_opts opts; 148 int rc; 149 150 bdev_nvme_get_opts(&opts); 151 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 152 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 153 &opts)) { 154 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 155 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 156 "spdk_json_decode_object failed"); 157 return; 158 } 159 160 rc = bdev_nvme_set_opts(&opts); 161 if (rc == -EPERM) { 162 spdk_jsonrpc_send_error_response(request, -EPERM, 163 "RPC not permitted with nvme controllers already attached"); 164 } else if (rc) { 165 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 166 } else { 167 spdk_jsonrpc_send_bool_response(request, true); 168 } 169 170 return; 171 } 172 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 173 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 174 175 struct rpc_bdev_nvme_hotplug { 176 bool enabled; 177 uint64_t period_us; 178 }; 179 180 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 181 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 182 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 183 }; 184 185 static void 186 rpc_bdev_nvme_set_hotplug_done(void *ctx) 187 { 188 struct spdk_jsonrpc_request *request = ctx; 189 190 spdk_jsonrpc_send_bool_response(request, true); 191 } 192 193 static void 194 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 195 const struct spdk_json_val *params) 196 { 197 struct rpc_bdev_nvme_hotplug req = {false, 0}; 198 int rc; 199 200 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 201 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 202 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 203 rc = -EINVAL; 204 goto invalid; 205 } 206 207 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 208 request); 209 if (rc) { 210 goto invalid; 211 } 212 213 return; 214 invalid: 215 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 216 } 217 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 218 219 enum bdev_nvme_multipath_mode { 220 BDEV_NVME_MP_MODE_FAILOVER, 221 BDEV_NVME_MP_MODE_MULTIPATH, 222 BDEV_NVME_MP_MODE_DISABLE, 223 }; 224 225 struct rpc_bdev_nvme_attach_controller { 226 char *name; 227 char *trtype; 228 char *adrfam; 229 char *traddr; 230 char *trsvcid; 231 char *priority; 232 char *subnqn; 233 char *hostnqn; 234 char *hostaddr; 235 char *hostsvcid; 236 char *psk; 237 char *dhchap_key; 238 char *dhchap_ctrlr_key; 239 enum bdev_nvme_multipath_mode multipath; 240 struct spdk_bdev_nvme_ctrlr_opts bdev_opts; 241 struct spdk_nvme_ctrlr_opts drv_opts; 242 uint32_t max_bdevs; 243 }; 244 245 static void 246 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 247 { 248 free(req->name); 249 free(req->trtype); 250 free(req->adrfam); 251 free(req->traddr); 252 free(req->trsvcid); 253 free(req->priority); 254 free(req->subnqn); 255 free(req->hostnqn); 256 free(req->hostaddr); 257 free(req->hostsvcid); 258 free(req->psk); 259 free(req->dhchap_key); 260 free(req->dhchap_ctrlr_key); 261 spdk_memset_s(req->drv_opts.psk, sizeof(req->drv_opts.psk), 0, sizeof(req->drv_opts.psk)); 262 } 263 264 static int 265 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 266 { 267 uint32_t *flag = out; 268 bool reftag; 269 int rc; 270 271 rc = spdk_json_decode_bool(val, &reftag); 272 if (rc == 0 && reftag == true) { 273 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 274 } 275 276 return rc; 277 } 278 279 static int 280 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 281 { 282 uint32_t *flag = out; 283 bool guard; 284 int rc; 285 286 rc = spdk_json_decode_bool(val, &guard); 287 if (rc == 0 && guard == true) { 288 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 289 } 290 291 return rc; 292 } 293 294 static int 295 bdev_nvme_decode_multipath(const struct spdk_json_val *val, void *out) 296 { 297 enum bdev_nvme_multipath_mode *multipath = out; 298 299 if (spdk_json_strequal(val, "failover") == true) { 300 *multipath = BDEV_NVME_MP_MODE_FAILOVER; 301 } else if (spdk_json_strequal(val, "multipath") == true) { 302 *multipath = BDEV_NVME_MP_MODE_MULTIPATH; 303 } else if (spdk_json_strequal(val, "disable") == true) { 304 *multipath = BDEV_NVME_MP_MODE_DISABLE; 305 } else { 306 SPDK_NOTICELOG("Invalid parameter value: multipath\n"); 307 return -EINVAL; 308 } 309 310 return 0; 311 } 312 313 314 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 315 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 316 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 317 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 318 319 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 320 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 321 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 322 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 323 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 324 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 325 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 326 327 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 328 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 329 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 330 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 331 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 332 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), bdev_nvme_decode_multipath, true}, 333 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 334 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 335 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 336 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 337 {"psk", offsetof(struct rpc_bdev_nvme_attach_controller, psk), spdk_json_decode_string, true}, 338 {"max_bdevs", offsetof(struct rpc_bdev_nvme_attach_controller, max_bdevs), spdk_json_decode_uint32, true}, 339 {"dhchap_key", offsetof(struct rpc_bdev_nvme_attach_controller, dhchap_key), spdk_json_decode_string, true}, 340 {"dhchap_ctrlr_key", offsetof(struct rpc_bdev_nvme_attach_controller, dhchap_ctrlr_key), spdk_json_decode_string, true}, 341 {"allow_unrecognized_csi", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.allow_unrecognized_csi), spdk_json_decode_bool, true}, 342 }; 343 344 #define DEFAULT_MAX_BDEVS_PER_RPC 128 345 346 struct rpc_bdev_nvme_attach_controller_ctx { 347 struct rpc_bdev_nvme_attach_controller req; 348 size_t bdev_count; 349 const char **names; 350 struct spdk_jsonrpc_request *request; 351 }; 352 353 static void 354 free_rpc_bdev_nvme_attach_controller_ctx(struct rpc_bdev_nvme_attach_controller_ctx *ctx) 355 { 356 free_rpc_bdev_nvme_attach_controller(&ctx->req); 357 free(ctx->names); 358 free(ctx); 359 } 360 361 static void 362 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 363 { 364 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 365 struct spdk_jsonrpc_request *request = ctx->request; 366 struct spdk_json_write_ctx *w; 367 size_t i; 368 369 w = spdk_jsonrpc_begin_result(request); 370 spdk_json_write_array_begin(w); 371 for (i = 0; i < ctx->bdev_count; i++) { 372 spdk_json_write_string(w, ctx->names[i]); 373 } 374 spdk_json_write_array_end(w); 375 spdk_jsonrpc_end_result(request, w); 376 377 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 378 } 379 380 static void 381 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 382 { 383 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 384 struct spdk_jsonrpc_request *request = ctx->request; 385 386 if (rc < 0) { 387 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 388 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 389 return; 390 } 391 392 ctx->bdev_count = bdev_count; 393 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 394 } 395 396 static void 397 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 398 const struct spdk_json_val *params) 399 { 400 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 401 struct spdk_nvme_transport_id trid = {}; 402 const struct spdk_nvme_ctrlr_opts *drv_opts; 403 const struct spdk_nvme_transport_id *ctrlr_trid; 404 struct nvme_ctrlr *ctrlr = NULL; 405 size_t len, maxlen; 406 bool multipath = false; 407 int rc; 408 409 ctx = calloc(1, sizeof(*ctx)); 410 if (!ctx) { 411 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 412 return; 413 } 414 415 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 416 spdk_bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 417 /* For now, initialize the multipath parameter to add a failover path. This maintains backward 418 * compatibility with past behavior. In the future, this behavior will change to "disable". */ 419 ctx->req.multipath = BDEV_NVME_MP_MODE_FAILOVER; 420 ctx->req.max_bdevs = DEFAULT_MAX_BDEVS_PER_RPC; 421 422 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 423 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 424 &ctx->req)) { 425 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 426 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 427 "spdk_json_decode_object failed"); 428 goto cleanup; 429 } 430 431 if (ctx->req.max_bdevs == 0) { 432 spdk_jsonrpc_send_error_response(request, -EINVAL, "max_bdevs cannot be zero"); 433 goto cleanup; 434 } 435 436 ctx->names = calloc(ctx->req.max_bdevs, sizeof(char *)); 437 if (ctx->names == NULL) { 438 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 439 goto cleanup; 440 } 441 442 /* Parse trstring */ 443 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 444 if (rc < 0) { 445 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 446 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 447 ctx->req.trtype); 448 goto cleanup; 449 } 450 451 /* Parse trtype */ 452 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 453 assert(rc == 0); 454 455 /* Parse traddr */ 456 maxlen = sizeof(trid.traddr); 457 len = strnlen(ctx->req.traddr, maxlen); 458 if (len == maxlen) { 459 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 460 ctx->req.traddr); 461 goto cleanup; 462 } 463 memcpy(trid.traddr, ctx->req.traddr, len + 1); 464 465 /* Parse adrfam */ 466 if (ctx->req.adrfam) { 467 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 468 if (rc < 0) { 469 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 470 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 471 ctx->req.adrfam); 472 goto cleanup; 473 } 474 } 475 476 /* Parse trsvcid */ 477 if (ctx->req.trsvcid) { 478 maxlen = sizeof(trid.trsvcid); 479 len = strnlen(ctx->req.trsvcid, maxlen); 480 if (len == maxlen) { 481 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 482 ctx->req.trsvcid); 483 goto cleanup; 484 } 485 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 486 } 487 488 /* Parse priority for the NVMe-oF transport connection */ 489 if (ctx->req.priority) { 490 trid.priority = spdk_strtol(ctx->req.priority, 10); 491 } 492 493 /* Parse subnqn */ 494 if (ctx->req.subnqn) { 495 maxlen = sizeof(trid.subnqn); 496 len = strnlen(ctx->req.subnqn, maxlen); 497 if (len == maxlen) { 498 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 499 ctx->req.subnqn); 500 goto cleanup; 501 } 502 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 503 } 504 505 if (ctx->req.hostnqn) { 506 maxlen = sizeof(ctx->req.drv_opts.hostnqn); 507 len = strnlen(ctx->req.hostnqn, maxlen); 508 if (len == maxlen) { 509 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostnqn too long: %s", 510 ctx->req.hostnqn); 511 goto cleanup; 512 } 513 memcpy(ctx->req.drv_opts.hostnqn, ctx->req.hostnqn, len + 1); 514 } 515 516 if (ctx->req.psk) { 517 if (!g_tls_log) { 518 SPDK_NOTICELOG("TLS support is considered experimental\n"); 519 g_tls_log = true; 520 } 521 522 rc = snprintf(ctx->req.bdev_opts.psk, sizeof(ctx->req.bdev_opts.psk), "%s", ctx->req.psk); 523 if (rc < 0 || (size_t)rc >= sizeof(ctx->req.bdev_opts.psk)) { 524 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Could not store PSK: %s", 525 ctx->req.psk); 526 goto cleanup; 527 } 528 } 529 530 if (ctx->req.hostaddr) { 531 maxlen = sizeof(ctx->req.drv_opts.src_addr); 532 len = strnlen(ctx->req.hostaddr, maxlen); 533 if (len == maxlen) { 534 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 535 ctx->req.hostaddr); 536 goto cleanup; 537 } 538 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 539 } 540 541 if (ctx->req.hostsvcid) { 542 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 543 len = strnlen(ctx->req.hostsvcid, maxlen); 544 if (len == maxlen) { 545 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 546 ctx->req.hostsvcid); 547 goto cleanup; 548 } 549 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 550 } 551 552 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 553 554 if (ctrlr) { 555 /* This controller already exists. Check what the user wants to do. */ 556 if (ctx->req.multipath == BDEV_NVME_MP_MODE_DISABLE) { 557 /* The user does not want to do any form of multipathing. */ 558 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 559 "A controller named %s already exists and multipath is disabled", 560 ctx->req.name); 561 goto cleanup; 562 } 563 564 assert(ctx->req.multipath == BDEV_NVME_MP_MODE_FAILOVER || 565 ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH); 566 567 /* The user wants to add this as a failover path or add this to create multipath. */ 568 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 569 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 570 571 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 572 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 573 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 574 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 575 /* Exactly same network path can't be added a second time */ 576 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 577 "A controller named %s already exists with the specified network path", 578 ctx->req.name); 579 goto cleanup; 580 } 581 582 if (strncmp(trid.subnqn, 583 ctrlr_trid->subnqn, 584 SPDK_NVMF_NQN_MAX_LEN) != 0) { 585 /* Different SUBNQN is not allowed when specifying the same controller name. */ 586 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 587 "A controller named %s already exists, but uses a different subnqn (%s)", 588 ctx->req.name, ctrlr_trid->subnqn); 589 goto cleanup; 590 } 591 592 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 593 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 594 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 595 "A controller named %s already exists, but uses a different hostnqn (%s)", 596 ctx->req.name, drv_opts->hostnqn); 597 goto cleanup; 598 } 599 600 if (ctx->req.bdev_opts.prchk_flags) { 601 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 602 "A controller named %s already exists. To add a path, do not specify PI options.", 603 ctx->req.name); 604 goto cleanup; 605 } 606 607 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 608 } 609 610 if (ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH) { 611 multipath = true; 612 } 613 614 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 615 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 616 "num_io_queues out of bounds, min: %u max: %u", 617 1, UINT16_MAX + 1); 618 goto cleanup; 619 } 620 621 ctx->request = request; 622 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 623 ctx->req.bdev_opts.from_discovery_service = false; 624 ctx->req.bdev_opts.dhchap_key = ctx->req.dhchap_key; 625 ctx->req.bdev_opts.dhchap_ctrlr_key = ctx->req.dhchap_ctrlr_key; 626 ctx->req.bdev_opts.multipath = multipath; 627 rc = spdk_bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->req.max_bdevs, 628 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 629 &ctx->req.bdev_opts, multipath); 630 if (rc) { 631 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 632 goto cleanup; 633 } 634 635 return; 636 637 cleanup: 638 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 639 } 640 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 641 SPDK_RPC_RUNTIME) 642 643 static void 644 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 645 { 646 struct spdk_json_write_ctx *w = ctx; 647 struct nvme_ctrlr *nvme_ctrlr; 648 649 spdk_json_write_object_begin(w); 650 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 651 652 spdk_json_write_named_array_begin(w, "ctrlrs"); 653 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 654 nvme_ctrlr_info_json(w, nvme_ctrlr); 655 } 656 spdk_json_write_array_end(w); 657 spdk_json_write_object_end(w); 658 } 659 660 struct rpc_bdev_nvme_get_controllers { 661 char *name; 662 }; 663 664 static void 665 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 666 { 667 free(r->name); 668 } 669 670 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 671 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 672 }; 673 674 static void 675 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 676 const struct spdk_json_val *params) 677 { 678 struct rpc_bdev_nvme_get_controllers req = {}; 679 struct spdk_json_write_ctx *w; 680 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 681 682 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 683 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 684 &req)) { 685 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 686 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 687 "spdk_json_decode_object failed"); 688 goto cleanup; 689 } 690 691 if (req.name) { 692 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 693 if (nbdev_ctrlr == NULL) { 694 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 695 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 696 goto cleanup; 697 } 698 } 699 700 w = spdk_jsonrpc_begin_result(request); 701 spdk_json_write_array_begin(w); 702 703 if (nbdev_ctrlr != NULL) { 704 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 705 } else { 706 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 707 } 708 709 spdk_json_write_array_end(w); 710 711 spdk_jsonrpc_end_result(request, w); 712 713 cleanup: 714 free_rpc_bdev_nvme_get_controllers(&req); 715 } 716 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 717 718 struct rpc_bdev_nvme_detach_controller { 719 char *name; 720 char *trtype; 721 char *adrfam; 722 char *traddr; 723 char *trsvcid; 724 char *subnqn; 725 char *hostaddr; 726 char *hostsvcid; 727 }; 728 729 static void 730 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 731 { 732 free(req->name); 733 free(req->trtype); 734 free(req->adrfam); 735 free(req->traddr); 736 free(req->trsvcid); 737 free(req->subnqn); 738 free(req->hostaddr); 739 free(req->hostsvcid); 740 } 741 742 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 743 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 744 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 745 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 746 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 747 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 748 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 749 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 750 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 751 }; 752 753 static void 754 rpc_bdev_nvme_detach_controller_done(void *arg, int rc) 755 { 756 struct spdk_jsonrpc_request *request = arg; 757 758 if (rc == 0) { 759 spdk_jsonrpc_send_bool_response(request, true); 760 } else { 761 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 762 } 763 } 764 765 static void 766 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 767 const struct spdk_json_val *params) 768 { 769 struct rpc_bdev_nvme_detach_controller req = {NULL}; 770 struct nvme_path_id path = {}; 771 size_t len, maxlen; 772 int rc = 0; 773 774 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 775 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 776 &req)) { 777 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 778 "spdk_json_decode_object failed"); 779 goto cleanup; 780 } 781 782 if (req.trtype != NULL) { 783 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 784 if (rc < 0) { 785 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 786 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 787 req.trtype); 788 goto cleanup; 789 } 790 791 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 792 if (rc < 0) { 793 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 794 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 795 req.trtype); 796 goto cleanup; 797 } 798 } 799 800 if (req.traddr != NULL) { 801 maxlen = sizeof(path.trid.traddr); 802 len = strnlen(req.traddr, maxlen); 803 if (len == maxlen) { 804 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 805 req.traddr); 806 goto cleanup; 807 } 808 memcpy(path.trid.traddr, req.traddr, len + 1); 809 } 810 811 if (req.adrfam != NULL) { 812 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 813 if (rc < 0) { 814 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 815 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 816 req.adrfam); 817 goto cleanup; 818 } 819 } 820 821 if (req.trsvcid != NULL) { 822 maxlen = sizeof(path.trid.trsvcid); 823 len = strnlen(req.trsvcid, maxlen); 824 if (len == maxlen) { 825 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 826 req.trsvcid); 827 goto cleanup; 828 } 829 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 830 } 831 832 /* Parse subnqn */ 833 if (req.subnqn != NULL) { 834 maxlen = sizeof(path.trid.subnqn); 835 len = strnlen(req.subnqn, maxlen); 836 if (len == maxlen) { 837 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 838 req.subnqn); 839 goto cleanup; 840 } 841 memcpy(path.trid.subnqn, req.subnqn, len + 1); 842 } 843 844 if (req.hostaddr) { 845 maxlen = sizeof(path.hostid.hostaddr); 846 len = strnlen(req.hostaddr, maxlen); 847 if (len == maxlen) { 848 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 849 req.hostaddr); 850 goto cleanup; 851 } 852 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 853 } 854 855 if (req.hostsvcid) { 856 maxlen = sizeof(path.hostid.hostsvcid); 857 len = strnlen(req.hostsvcid, maxlen); 858 if (len == maxlen) { 859 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 860 req.hostsvcid); 861 goto cleanup; 862 } 863 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 864 } 865 866 rc = bdev_nvme_delete(req.name, &path, rpc_bdev_nvme_detach_controller_done, request); 867 868 if (rc != 0) { 869 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 870 } 871 872 cleanup: 873 free_rpc_bdev_nvme_detach_controller(&req); 874 } 875 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 876 SPDK_RPC_RUNTIME) 877 878 struct rpc_apply_firmware { 879 char *filename; 880 char *bdev_name; 881 }; 882 883 static void 884 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 885 { 886 free(req->filename); 887 free(req->bdev_name); 888 } 889 890 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 891 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 892 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 893 }; 894 895 struct firmware_update_info { 896 void *fw_image; 897 void *p; 898 unsigned int size; 899 unsigned int size_remaining; 900 unsigned int offset; 901 unsigned int transfer; 902 bool success; 903 904 struct spdk_bdev_desc *desc; 905 struct spdk_io_channel *ch; 906 struct spdk_thread *orig_thread; 907 struct spdk_jsonrpc_request *request; 908 struct spdk_nvme_ctrlr *ctrlr; 909 struct rpc_apply_firmware req; 910 }; 911 912 static void 913 apply_firmware_cleanup(struct firmware_update_info *firm_ctx) 914 { 915 assert(firm_ctx != NULL); 916 assert(firm_ctx->orig_thread == spdk_get_thread()); 917 918 if (firm_ctx->fw_image) { 919 spdk_free(firm_ctx->fw_image); 920 } 921 922 free_rpc_apply_firmware(&firm_ctx->req); 923 924 if (firm_ctx->ch) { 925 spdk_put_io_channel(firm_ctx->ch); 926 } 927 928 if (firm_ctx->desc) { 929 spdk_bdev_close(firm_ctx->desc); 930 } 931 932 free(firm_ctx); 933 } 934 935 static void 936 _apply_firmware_complete_reset(void *ctx) 937 { 938 struct spdk_json_write_ctx *w; 939 struct firmware_update_info *firm_ctx = ctx; 940 941 assert(firm_ctx->orig_thread == spdk_get_thread()); 942 943 if (!firm_ctx->success) { 944 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 945 "firmware commit failed."); 946 apply_firmware_cleanup(firm_ctx); 947 return; 948 } 949 950 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 951 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 952 "Controller reset failed."); 953 apply_firmware_cleanup(firm_ctx); 954 return; 955 } 956 957 w = spdk_jsonrpc_begin_result(firm_ctx->request); 958 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 959 spdk_jsonrpc_end_result(firm_ctx->request, w); 960 apply_firmware_cleanup(firm_ctx); 961 } 962 963 static void 964 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 965 { 966 struct firmware_update_info *firm_ctx = cb_arg; 967 968 spdk_bdev_free_io(bdev_io); 969 970 firm_ctx->success = success; 971 972 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete_reset, firm_ctx); 973 } 974 975 static void apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 976 977 static void 978 _apply_firmware_complete(void *ctx) 979 { 980 struct spdk_nvme_cmd cmd = {}; 981 struct spdk_nvme_fw_commit fw_commit; 982 int slot = 0; 983 int rc; 984 struct firmware_update_info *firm_ctx = ctx; 985 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 986 987 assert(firm_ctx->orig_thread == spdk_get_thread()); 988 989 if (!firm_ctx->success) { 990 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 991 "firmware download failed ."); 992 apply_firmware_cleanup(firm_ctx); 993 return; 994 } 995 996 firm_ctx->p += firm_ctx->transfer; 997 firm_ctx->offset += firm_ctx->transfer; 998 firm_ctx->size_remaining -= firm_ctx->transfer; 999 1000 switch (firm_ctx->size_remaining) { 1001 case 0: 1002 /* firmware download completed. Commit firmware */ 1003 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 1004 fw_commit.fs = slot; 1005 fw_commit.ca = commit_action; 1006 1007 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 1008 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 1009 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 1010 apply_firmware_complete_reset, firm_ctx); 1011 if (rc) { 1012 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1013 "firmware commit failed."); 1014 apply_firmware_cleanup(firm_ctx); 1015 return; 1016 } 1017 break; 1018 default: 1019 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1020 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1021 1022 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1023 cmd.cdw11 = firm_ctx->offset >> 2; 1024 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 1025 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1026 if (rc) { 1027 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1028 "firmware download failed."); 1029 apply_firmware_cleanup(firm_ctx); 1030 return; 1031 } 1032 break; 1033 } 1034 } 1035 1036 static void 1037 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1038 { 1039 struct firmware_update_info *firm_ctx = cb_arg; 1040 1041 spdk_bdev_free_io(bdev_io); 1042 1043 firm_ctx->success = success; 1044 1045 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete, firm_ctx); 1046 } 1047 1048 static void 1049 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 1050 { 1051 } 1052 1053 static void 1054 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 1055 const struct spdk_json_val *params) 1056 { 1057 int rc; 1058 int fd = -1; 1059 struct stat fw_stat; 1060 struct spdk_bdev *bdev; 1061 struct spdk_nvme_cmd cmd = {}; 1062 struct firmware_update_info *firm_ctx; 1063 1064 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 1065 if (!firm_ctx) { 1066 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1067 "Memory allocation error."); 1068 return; 1069 } 1070 firm_ctx->fw_image = NULL; 1071 firm_ctx->request = request; 1072 firm_ctx->orig_thread = spdk_get_thread(); 1073 1074 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 1075 SPDK_COUNTOF(rpc_apply_firmware_decoders), &firm_ctx->req)) { 1076 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1077 "spdk_json_decode_object failed."); 1078 goto err; 1079 } 1080 1081 if (spdk_bdev_open_ext(firm_ctx->req.bdev_name, true, apply_firmware_open_cb, NULL, 1082 &firm_ctx->desc) != 0) { 1083 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1084 "bdev %s could not be opened", 1085 firm_ctx->req.bdev_name); 1086 goto err; 1087 } 1088 bdev = spdk_bdev_desc_get_bdev(firm_ctx->desc); 1089 1090 if ((firm_ctx->ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 1091 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1092 "Controller information for %s were not found.", 1093 firm_ctx->req.bdev_name); 1094 goto err; 1095 } 1096 1097 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1098 if (!firm_ctx->ch) { 1099 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1100 "No channels were found."); 1101 goto err; 1102 } 1103 1104 fd = open(firm_ctx->req.filename, O_RDONLY); 1105 if (fd < 0) { 1106 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1107 "open file failed."); 1108 goto err; 1109 } 1110 1111 rc = fstat(fd, &fw_stat); 1112 if (rc < 0) { 1113 close(fd); 1114 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1115 "fstat failed."); 1116 goto err; 1117 } 1118 1119 firm_ctx->size = fw_stat.st_size; 1120 if (fw_stat.st_size % 4) { 1121 close(fd); 1122 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1123 "Firmware image size is not multiple of 4."); 1124 goto err; 1125 } 1126 1127 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1128 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1129 if (!firm_ctx->fw_image) { 1130 close(fd); 1131 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1132 "Memory allocation error."); 1133 goto err; 1134 } 1135 firm_ctx->p = firm_ctx->fw_image; 1136 1137 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1138 close(fd); 1139 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1140 "Read firmware image failed!"); 1141 goto err; 1142 } 1143 close(fd); 1144 1145 firm_ctx->offset = 0; 1146 firm_ctx->size_remaining = firm_ctx->size; 1147 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1148 1149 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1150 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1151 cmd.cdw11 = firm_ctx->offset >> 2; 1152 1153 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 1154 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1155 if (rc == 0) { 1156 /* normal return here. */ 1157 return; 1158 } 1159 1160 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1161 "Read firmware image failed!"); 1162 err: 1163 apply_firmware_cleanup(firm_ctx); 1164 } 1165 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1166 1167 struct rpc_bdev_nvme_transport_stat_ctx { 1168 struct spdk_jsonrpc_request *request; 1169 struct spdk_json_write_ctx *w; 1170 }; 1171 1172 static void 1173 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1174 struct spdk_nvme_transport_poll_group_stat *stat) 1175 { 1176 struct spdk_nvme_rdma_device_stat *device_stats; 1177 uint32_t i; 1178 1179 spdk_json_write_named_array_begin(w, "devices"); 1180 1181 for (i = 0; i < stat->rdma.num_devices; i++) { 1182 device_stats = &stat->rdma.device_stats[i]; 1183 spdk_json_write_object_begin(w); 1184 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1185 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1186 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1187 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1188 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1189 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1190 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1191 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1192 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1193 spdk_json_write_object_end(w); 1194 } 1195 spdk_json_write_array_end(w); 1196 } 1197 1198 static void 1199 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1200 struct spdk_nvme_transport_poll_group_stat *stat) 1201 { 1202 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1203 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1204 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1205 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1206 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1207 stat->pcie.cq_shadow_doorbell_updates); 1208 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1209 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1210 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1211 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1212 stat->pcie.sq_shadow_doorbell_updates); 1213 } 1214 1215 static void 1216 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1217 struct spdk_nvme_transport_poll_group_stat *stat) 1218 { 1219 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1220 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1221 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1222 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1223 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1224 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1225 } 1226 1227 static void 1228 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1229 { 1230 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1231 struct spdk_io_channel *ch; 1232 struct nvme_poll_group *group; 1233 struct spdk_nvme_poll_group_stat *stat; 1234 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1235 uint32_t j; 1236 int rc; 1237 1238 ctx = spdk_io_channel_iter_get_ctx(i); 1239 ch = spdk_io_channel_iter_get_channel(i); 1240 group = spdk_io_channel_get_ctx(ch); 1241 1242 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1243 if (rc) { 1244 spdk_for_each_channel_continue(i, rc); 1245 return; 1246 } 1247 1248 spdk_json_write_object_begin(ctx->w); 1249 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1250 spdk_json_write_named_array_begin(ctx->w, "transports"); 1251 1252 for (j = 0; j < stat->num_transports; j++) { 1253 tr_stat = stat->transport_stat[j]; 1254 spdk_json_write_object_begin(ctx->w); 1255 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1256 1257 switch (stat->transport_stat[j]->trtype) { 1258 case SPDK_NVME_TRANSPORT_RDMA: 1259 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1260 break; 1261 case SPDK_NVME_TRANSPORT_PCIE: 1262 case SPDK_NVME_TRANSPORT_VFIOUSER: 1263 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1264 break; 1265 case SPDK_NVME_TRANSPORT_TCP: 1266 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1267 break; 1268 default: 1269 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1270 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1271 } 1272 spdk_json_write_object_end(ctx->w); 1273 } 1274 /* transports array */ 1275 spdk_json_write_array_end(ctx->w); 1276 spdk_json_write_object_end(ctx->w); 1277 1278 spdk_nvme_poll_group_free_stats(group->group, stat); 1279 spdk_for_each_channel_continue(i, 0); 1280 } 1281 1282 static void 1283 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1284 { 1285 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1286 1287 spdk_json_write_array_end(ctx->w); 1288 spdk_json_write_object_end(ctx->w); 1289 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1290 free(ctx); 1291 } 1292 1293 static void 1294 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1295 const struct spdk_json_val *params) 1296 { 1297 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1298 1299 if (params) { 1300 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1301 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1302 return; 1303 } 1304 1305 ctx = calloc(1, sizeof(*ctx)); 1306 if (!ctx) { 1307 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1308 "Memory allocation error"); 1309 return; 1310 } 1311 ctx->request = request; 1312 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1313 spdk_json_write_object_begin(ctx->w); 1314 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1315 1316 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1317 rpc_bdev_nvme_stats_per_channel, 1318 ctx, 1319 rpc_bdev_nvme_stats_done); 1320 } 1321 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1322 SPDK_RPC_RUNTIME) 1323 1324 struct rpc_bdev_nvme_controller_op_req { 1325 char *name; 1326 uint16_t cntlid; 1327 }; 1328 1329 static void 1330 free_rpc_bdev_nvme_controller_op_req(struct rpc_bdev_nvme_controller_op_req *r) 1331 { 1332 free(r->name); 1333 } 1334 1335 static const struct spdk_json_object_decoder rpc_bdev_nvme_controller_op_req_decoders[] = { 1336 {"name", offsetof(struct rpc_bdev_nvme_controller_op_req, name), spdk_json_decode_string}, 1337 {"cntlid", offsetof(struct rpc_bdev_nvme_controller_op_req, cntlid), spdk_json_decode_uint16, true}, 1338 }; 1339 1340 static void 1341 rpc_bdev_nvme_controller_op_cb(void *cb_arg, int rc) 1342 { 1343 struct spdk_jsonrpc_request *request = cb_arg; 1344 1345 if (rc == 0) { 1346 spdk_jsonrpc_send_bool_response(request, true); 1347 } else { 1348 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1349 } 1350 } 1351 1352 static void 1353 rpc_bdev_nvme_controller_op(struct spdk_jsonrpc_request *request, 1354 const struct spdk_json_val *params, 1355 enum nvme_ctrlr_op op) 1356 { 1357 struct rpc_bdev_nvme_controller_op_req req = {NULL}; 1358 struct nvme_bdev_ctrlr *nbdev_ctrlr; 1359 struct nvme_ctrlr *nvme_ctrlr; 1360 1361 if (spdk_json_decode_object(params, rpc_bdev_nvme_controller_op_req_decoders, 1362 SPDK_COUNTOF(rpc_bdev_nvme_controller_op_req_decoders), 1363 &req)) { 1364 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1365 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1366 goto exit; 1367 } 1368 1369 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 1370 if (nbdev_ctrlr == NULL) { 1371 SPDK_ERRLOG("Failed at NVMe bdev controller lookup\n"); 1372 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1373 goto exit; 1374 } 1375 1376 if (req.cntlid == 0) { 1377 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, op, rpc_bdev_nvme_controller_op_cb, request); 1378 } else { 1379 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr_by_id(nbdev_ctrlr, req.cntlid); 1380 if (nvme_ctrlr == NULL) { 1381 SPDK_ERRLOG("Failed at NVMe controller lookup\n"); 1382 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1383 goto exit; 1384 } 1385 nvme_ctrlr_op_rpc(nvme_ctrlr, op, rpc_bdev_nvme_controller_op_cb, request); 1386 } 1387 1388 exit: 1389 free_rpc_bdev_nvme_controller_op_req(&req); 1390 } 1391 1392 static void 1393 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1394 const struct spdk_json_val *params) 1395 { 1396 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_RESET); 1397 } 1398 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1399 1400 static void 1401 rpc_bdev_nvme_enable_controller(struct spdk_jsonrpc_request *request, 1402 const struct spdk_json_val *params) 1403 { 1404 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_ENABLE); 1405 } 1406 SPDK_RPC_REGISTER("bdev_nvme_enable_controller", rpc_bdev_nvme_enable_controller, SPDK_RPC_RUNTIME) 1407 1408 static void 1409 rpc_bdev_nvme_disable_controller(struct spdk_jsonrpc_request *request, 1410 const struct spdk_json_val *params) 1411 { 1412 rpc_bdev_nvme_controller_op(request, params, NVME_CTRLR_OP_DISABLE); 1413 } 1414 SPDK_RPC_REGISTER("bdev_nvme_disable_controller", rpc_bdev_nvme_disable_controller, 1415 SPDK_RPC_RUNTIME) 1416 1417 struct rpc_get_controller_health_info { 1418 char *name; 1419 }; 1420 1421 struct spdk_nvme_health_info_context { 1422 struct spdk_jsonrpc_request *request; 1423 struct spdk_nvme_ctrlr *ctrlr; 1424 struct spdk_nvme_health_information_page health_page; 1425 }; 1426 1427 static void 1428 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1429 { 1430 free(r->name); 1431 } 1432 1433 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1434 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1435 }; 1436 1437 static void 1438 nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1439 { 1440 if (response == true) { 1441 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1442 "Internal error."); 1443 } 1444 1445 free(context); 1446 } 1447 1448 static void 1449 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1450 { 1451 int i; 1452 char buf[128]; 1453 struct spdk_nvme_health_info_context *context = cb_arg; 1454 struct spdk_jsonrpc_request *request = context->request; 1455 struct spdk_json_write_ctx *w; 1456 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1457 const struct spdk_nvme_transport_id *trid = NULL; 1458 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1459 struct spdk_nvme_health_information_page *health_page = NULL; 1460 1461 if (spdk_nvme_cpl_is_error(cpl)) { 1462 nvme_health_info_cleanup(context, true); 1463 SPDK_ERRLOG("get log page failed\n"); 1464 return; 1465 } 1466 1467 if (ctrlr == NULL) { 1468 nvme_health_info_cleanup(context, true); 1469 SPDK_ERRLOG("ctrlr is NULL\n"); 1470 return; 1471 } else { 1472 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1473 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1474 health_page = &(context->health_page); 1475 } 1476 1477 w = spdk_jsonrpc_begin_result(request); 1478 1479 spdk_json_write_object_begin(w); 1480 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1481 spdk_str_trim(buf); 1482 spdk_json_write_named_string(w, "model_number", buf); 1483 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1484 spdk_str_trim(buf); 1485 spdk_json_write_named_string(w, "serial_number", buf); 1486 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1487 spdk_str_trim(buf); 1488 spdk_json_write_named_string(w, "firmware_revision", buf); 1489 spdk_json_write_named_string(w, "traddr", trid->traddr); 1490 spdk_json_write_named_uint64(w, "critical_warning", health_page->critical_warning.raw); 1491 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1492 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1493 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1494 health_page->available_spare_threshold); 1495 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1496 spdk_json_write_named_uint128(w, "data_units_read", 1497 health_page->data_units_read[0], health_page->data_units_read[1]); 1498 spdk_json_write_named_uint128(w, "data_units_written", 1499 health_page->data_units_written[0], health_page->data_units_written[1]); 1500 spdk_json_write_named_uint128(w, "host_read_commands", 1501 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1502 spdk_json_write_named_uint128(w, "host_write_commands", 1503 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1504 spdk_json_write_named_uint128(w, "controller_busy_time", 1505 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1506 spdk_json_write_named_uint128(w, "power_cycles", 1507 health_page->power_cycles[0], health_page->power_cycles[1]); 1508 spdk_json_write_named_uint128(w, "power_on_hours", 1509 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1510 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1511 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1512 spdk_json_write_named_uint128(w, "media_errors", 1513 health_page->media_errors[0], health_page->media_errors[1]); 1514 spdk_json_write_named_uint128(w, "num_err_log_entries", 1515 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1516 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1517 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1518 health_page->critical_temp_time); 1519 for (i = 0; i < 8; i++) { 1520 if (health_page->temp_sensor[i] != 0) { 1521 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1522 } 1523 } 1524 spdk_json_write_object_end(w); 1525 1526 spdk_jsonrpc_end_result(request, w); 1527 nvme_health_info_cleanup(context, false); 1528 } 1529 1530 static void 1531 get_health_log_page(struct spdk_nvme_health_info_context *context) 1532 { 1533 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1534 1535 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1536 SPDK_NVME_GLOBAL_NS_TAG, 1537 &(context->health_page), sizeof(context->health_page), 0, 1538 get_health_log_page_completion, context)) { 1539 nvme_health_info_cleanup(context, true); 1540 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1541 } 1542 } 1543 1544 static void 1545 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1546 { 1547 struct spdk_nvme_health_info_context *context = cb_arg; 1548 1549 if (spdk_nvme_cpl_is_error(cpl)) { 1550 nvme_health_info_cleanup(context, true); 1551 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1552 } else { 1553 get_health_log_page(context); 1554 } 1555 } 1556 1557 static int 1558 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1559 { 1560 struct spdk_nvme_cmd cmd = {}; 1561 1562 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1563 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1564 1565 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1566 get_temperature_threshold_feature_completion, context); 1567 } 1568 1569 static void 1570 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1571 { 1572 struct spdk_nvme_health_info_context *context; 1573 1574 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1575 if (!context) { 1576 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1577 "Memory allocation error."); 1578 return; 1579 } 1580 1581 context->request = request; 1582 context->ctrlr = ctrlr; 1583 1584 if (get_temperature_threshold_feature(context)) { 1585 nvme_health_info_cleanup(context, true); 1586 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1587 } 1588 1589 return; 1590 } 1591 1592 static void 1593 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1594 const struct spdk_json_val *params) 1595 { 1596 struct rpc_get_controller_health_info req = {}; 1597 struct nvme_ctrlr *nvme_ctrlr = NULL; 1598 1599 if (!params) { 1600 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1601 "Missing device name"); 1602 1603 return; 1604 } 1605 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1606 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1607 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1608 free_rpc_get_controller_health_info(&req); 1609 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1610 "Invalid parameters"); 1611 1612 return; 1613 } 1614 1615 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1616 1617 if (!nvme_ctrlr) { 1618 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1619 free_rpc_get_controller_health_info(&req); 1620 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1621 "Device not found"); 1622 return; 1623 } 1624 1625 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1626 free_rpc_get_controller_health_info(&req); 1627 1628 return; 1629 } 1630 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1631 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1632 1633 struct rpc_bdev_nvme_start_discovery { 1634 char *name; 1635 char *trtype; 1636 char *adrfam; 1637 char *traddr; 1638 char *trsvcid; 1639 char *hostnqn; 1640 bool wait_for_attach; 1641 uint64_t attach_timeout_ms; 1642 struct spdk_nvme_ctrlr_opts opts; 1643 struct spdk_bdev_nvme_ctrlr_opts bdev_opts; 1644 }; 1645 1646 static void 1647 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1648 { 1649 free(req->name); 1650 free(req->trtype); 1651 free(req->adrfam); 1652 free(req->traddr); 1653 free(req->trsvcid); 1654 free(req->hostnqn); 1655 } 1656 1657 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1658 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1659 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1660 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1661 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1662 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1663 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1664 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1665 {"attach_timeout_ms", offsetof(struct rpc_bdev_nvme_start_discovery, attach_timeout_ms), spdk_json_decode_uint64, true}, 1666 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1667 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1668 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1669 }; 1670 1671 struct rpc_bdev_nvme_start_discovery_ctx { 1672 struct rpc_bdev_nvme_start_discovery req; 1673 struct spdk_jsonrpc_request *request; 1674 }; 1675 1676 static void 1677 rpc_bdev_nvme_start_discovery_done(void *ctx, int status) 1678 { 1679 struct spdk_jsonrpc_request *request = ctx; 1680 1681 if (status != 0) { 1682 spdk_jsonrpc_send_error_response(request, status, spdk_strerror(-status)); 1683 } else { 1684 spdk_jsonrpc_send_bool_response(request, true); 1685 } 1686 } 1687 1688 static void 1689 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1690 const struct spdk_json_val *params) 1691 { 1692 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1693 struct spdk_nvme_transport_id trid = {}; 1694 size_t len, maxlen; 1695 int rc; 1696 spdk_bdev_nvme_start_discovery_fn cb_fn; 1697 void *cb_ctx; 1698 1699 ctx = calloc(1, sizeof(*ctx)); 1700 if (!ctx) { 1701 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1702 return; 1703 } 1704 1705 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1706 1707 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1708 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1709 &ctx->req)) { 1710 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1711 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1712 "spdk_json_decode_object failed"); 1713 goto cleanup; 1714 } 1715 1716 /* Parse trstring */ 1717 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1718 if (rc < 0) { 1719 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1720 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1721 ctx->req.trtype); 1722 goto cleanup; 1723 } 1724 1725 /* Parse trtype */ 1726 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1727 assert(rc == 0); 1728 1729 /* Parse traddr */ 1730 maxlen = sizeof(trid.traddr); 1731 len = strnlen(ctx->req.traddr, maxlen); 1732 if (len == maxlen) { 1733 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1734 ctx->req.traddr); 1735 goto cleanup; 1736 } 1737 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1738 1739 /* Parse adrfam */ 1740 if (ctx->req.adrfam) { 1741 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1742 if (rc < 0) { 1743 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1744 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1745 ctx->req.adrfam); 1746 goto cleanup; 1747 } 1748 } 1749 1750 /* Parse trsvcid */ 1751 if (ctx->req.trsvcid) { 1752 maxlen = sizeof(trid.trsvcid); 1753 len = strnlen(ctx->req.trsvcid, maxlen); 1754 if (len == maxlen) { 1755 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1756 ctx->req.trsvcid); 1757 goto cleanup; 1758 } 1759 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1760 } 1761 1762 if (ctx->req.hostnqn) { 1763 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1764 ctx->req.hostnqn); 1765 } 1766 1767 if (ctx->req.attach_timeout_ms != 0) { 1768 ctx->req.wait_for_attach = true; 1769 } 1770 1771 ctx->request = request; 1772 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1773 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1774 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1775 ctx->req.attach_timeout_ms, false, cb_fn, cb_ctx); 1776 if (rc) { 1777 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1778 } else if (!ctx->req.wait_for_attach) { 1779 rpc_bdev_nvme_start_discovery_done(request, 0); 1780 } 1781 1782 cleanup: 1783 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1784 free(ctx); 1785 } 1786 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1787 SPDK_RPC_RUNTIME) 1788 1789 struct rpc_bdev_nvme_stop_discovery { 1790 char *name; 1791 }; 1792 1793 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1794 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1795 }; 1796 1797 struct rpc_bdev_nvme_stop_discovery_ctx { 1798 struct rpc_bdev_nvme_stop_discovery req; 1799 struct spdk_jsonrpc_request *request; 1800 }; 1801 1802 static void 1803 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1804 { 1805 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1806 1807 spdk_jsonrpc_send_bool_response(ctx->request, true); 1808 free(ctx->req.name); 1809 free(ctx); 1810 } 1811 1812 static void 1813 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1814 const struct spdk_json_val *params) 1815 { 1816 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1817 int rc; 1818 1819 ctx = calloc(1, sizeof(*ctx)); 1820 if (!ctx) { 1821 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1822 return; 1823 } 1824 1825 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1826 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1827 &ctx->req)) { 1828 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1829 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1830 "spdk_json_decode_object failed"); 1831 goto cleanup; 1832 } 1833 1834 ctx->request = request; 1835 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1836 if (rc) { 1837 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1838 goto cleanup; 1839 } 1840 1841 return; 1842 1843 cleanup: 1844 free(ctx->req.name); 1845 free(ctx); 1846 } 1847 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1848 SPDK_RPC_RUNTIME) 1849 1850 static void 1851 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1852 const struct spdk_json_val *params) 1853 { 1854 struct spdk_json_write_ctx *w; 1855 1856 w = spdk_jsonrpc_begin_result(request); 1857 bdev_nvme_get_discovery_info(w); 1858 spdk_jsonrpc_end_result(request, w); 1859 } 1860 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1861 SPDK_RPC_RUNTIME) 1862 1863 enum error_injection_cmd_type { 1864 NVME_ADMIN_CMD = 1, 1865 NVME_IO_CMD, 1866 }; 1867 1868 struct rpc_add_error_injection { 1869 char *name; 1870 enum error_injection_cmd_type cmd_type; 1871 uint8_t opc; 1872 bool do_not_submit; 1873 uint64_t timeout_in_us; 1874 uint32_t err_count; 1875 uint8_t sct; 1876 uint8_t sc; 1877 }; 1878 1879 static void 1880 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1881 { 1882 free(req->name); 1883 } 1884 1885 static int 1886 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1887 { 1888 int *cmd_type = out; 1889 1890 if (spdk_json_strequal(val, "admin")) { 1891 *cmd_type = NVME_ADMIN_CMD; 1892 } else if (spdk_json_strequal(val, "io")) { 1893 *cmd_type = NVME_IO_CMD; 1894 } else { 1895 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1896 return -EINVAL; 1897 } 1898 1899 return 0; 1900 } 1901 1902 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1903 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1904 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1905 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1906 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1907 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1908 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1909 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1910 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1911 }; 1912 1913 struct rpc_add_error_injection_ctx { 1914 struct spdk_jsonrpc_request *request; 1915 struct rpc_add_error_injection rpc; 1916 }; 1917 1918 static void 1919 rpc_add_error_injection_done(struct nvme_ctrlr *nvme_ctrlr, void *_ctx, int status) 1920 { 1921 struct rpc_add_error_injection_ctx *ctx = _ctx; 1922 1923 if (status) { 1924 spdk_jsonrpc_send_error_response(ctx->request, status, 1925 "Failed to add the error injection."); 1926 } else { 1927 spdk_jsonrpc_send_bool_response(ctx->request, true); 1928 } 1929 1930 free_rpc_add_error_injection(&ctx->rpc); 1931 free(ctx); 1932 } 1933 1934 static void 1935 rpc_add_error_injection_per_channel(struct nvme_ctrlr_channel_iter *i, 1936 struct nvme_ctrlr *nvme_ctrlr, 1937 struct nvme_ctrlr_channel *ctrlr_ch, 1938 void *_ctx) 1939 { 1940 struct rpc_add_error_injection_ctx *ctx = _ctx; 1941 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1942 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1943 int rc = 0; 1944 1945 if (qpair != NULL) { 1946 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1947 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1948 ctx->rpc.sct, ctx->rpc.sc); 1949 } 1950 1951 nvme_ctrlr_for_each_channel_continue(i, rc); 1952 } 1953 1954 static void 1955 rpc_bdev_nvme_add_error_injection( 1956 struct spdk_jsonrpc_request *request, 1957 const struct spdk_json_val *params) 1958 { 1959 struct rpc_add_error_injection_ctx *ctx; 1960 struct nvme_ctrlr *nvme_ctrlr; 1961 int rc; 1962 1963 ctx = calloc(1, sizeof(*ctx)); 1964 if (!ctx) { 1965 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1966 return; 1967 } 1968 ctx->rpc.err_count = 1; 1969 ctx->request = request; 1970 1971 if (spdk_json_decode_object(params, 1972 rpc_add_error_injection_decoders, 1973 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1974 &ctx->rpc)) { 1975 spdk_jsonrpc_send_error_response(request, -EINVAL, 1976 "Failed to parse the request"); 1977 goto cleanup; 1978 } 1979 1980 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1981 if (nvme_ctrlr == NULL) { 1982 SPDK_ERRLOG("No controller with specified name was found.\n"); 1983 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1984 goto cleanup; 1985 } 1986 1987 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1988 nvme_ctrlr_for_each_channel(nvme_ctrlr, 1989 rpc_add_error_injection_per_channel, 1990 ctx, 1991 rpc_add_error_injection_done); 1992 1993 return; 1994 } else { 1995 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1996 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1997 ctx->rpc.sct, ctx->rpc.sc); 1998 if (rc) { 1999 spdk_jsonrpc_send_error_response(request, -rc, 2000 "Failed to add the error injection"); 2001 } else { 2002 spdk_jsonrpc_send_bool_response(ctx->request, true); 2003 } 2004 } 2005 2006 cleanup: 2007 free_rpc_add_error_injection(&ctx->rpc); 2008 free(ctx); 2009 } 2010 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 2011 SPDK_RPC_RUNTIME) 2012 2013 struct rpc_remove_error_injection { 2014 char *name; 2015 enum error_injection_cmd_type cmd_type; 2016 uint8_t opc; 2017 }; 2018 2019 static void 2020 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 2021 { 2022 free(req->name); 2023 } 2024 2025 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 2026 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 2027 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 2028 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 2029 }; 2030 2031 struct rpc_remove_error_injection_ctx { 2032 struct spdk_jsonrpc_request *request; 2033 struct rpc_remove_error_injection rpc; 2034 }; 2035 2036 static void 2037 rpc_remove_error_injection_done(struct nvme_ctrlr *nvme_ctrlr, void *_ctx, int status) 2038 { 2039 struct rpc_remove_error_injection_ctx *ctx = _ctx; 2040 2041 if (status) { 2042 spdk_jsonrpc_send_error_response(ctx->request, status, 2043 "Failed to remove the error injection."); 2044 } else { 2045 spdk_jsonrpc_send_bool_response(ctx->request, true); 2046 } 2047 2048 free_rpc_remove_error_injection(&ctx->rpc); 2049 free(ctx); 2050 } 2051 2052 static void 2053 rpc_remove_error_injection_per_channel(struct nvme_ctrlr_channel_iter *i, 2054 struct nvme_ctrlr *nvme_ctrlr, 2055 struct nvme_ctrlr_channel *ctrlr_ch, 2056 void *_ctx) 2057 { 2058 struct rpc_remove_error_injection_ctx *ctx = _ctx; 2059 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 2060 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 2061 2062 if (qpair != NULL) { 2063 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 2064 } 2065 2066 nvme_ctrlr_for_each_channel_continue(i, 0); 2067 } 2068 2069 static void 2070 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 2071 const struct spdk_json_val *params) 2072 { 2073 struct rpc_remove_error_injection_ctx *ctx; 2074 struct nvme_ctrlr *nvme_ctrlr; 2075 2076 ctx = calloc(1, sizeof(*ctx)); 2077 if (!ctx) { 2078 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2079 return; 2080 } 2081 ctx->request = request; 2082 2083 if (spdk_json_decode_object(params, 2084 rpc_remove_error_injection_decoders, 2085 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 2086 &ctx->rpc)) { 2087 spdk_jsonrpc_send_error_response(request, -EINVAL, 2088 "Failed to parse the request"); 2089 goto cleanup; 2090 } 2091 2092 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 2093 if (nvme_ctrlr == NULL) { 2094 SPDK_ERRLOG("No controller with specified name was found.\n"); 2095 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 2096 goto cleanup; 2097 } 2098 2099 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 2100 nvme_ctrlr_for_each_channel(nvme_ctrlr, 2101 rpc_remove_error_injection_per_channel, 2102 ctx, 2103 rpc_remove_error_injection_done); 2104 return; 2105 } else { 2106 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2107 spdk_jsonrpc_send_bool_response(ctx->request, true); 2108 } 2109 2110 cleanup: 2111 free_rpc_remove_error_injection(&ctx->rpc); 2112 free(ctx); 2113 } 2114 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2115 SPDK_RPC_RUNTIME) 2116 2117 struct rpc_get_io_paths { 2118 char *name; 2119 }; 2120 2121 static void 2122 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2123 { 2124 free(r->name); 2125 } 2126 2127 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2128 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2129 }; 2130 2131 struct rpc_get_io_paths_ctx { 2132 struct rpc_get_io_paths req; 2133 struct spdk_jsonrpc_request *request; 2134 struct spdk_json_write_ctx *w; 2135 }; 2136 2137 static void 2138 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2139 { 2140 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2141 2142 spdk_json_write_array_end(ctx->w); 2143 2144 spdk_json_write_object_end(ctx->w); 2145 2146 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2147 2148 free_rpc_get_io_paths(&ctx->req); 2149 free(ctx); 2150 } 2151 2152 static void 2153 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2154 { 2155 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2156 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2157 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2158 struct nvme_qpair *qpair; 2159 struct nvme_io_path *io_path; 2160 struct nvme_bdev *nbdev; 2161 2162 spdk_json_write_object_begin(ctx->w); 2163 2164 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2165 2166 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2167 2168 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2169 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2170 nbdev = io_path->nvme_ns->bdev; 2171 2172 if (ctx->req.name != NULL && 2173 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2174 continue; 2175 } 2176 2177 nvme_io_path_info_json(ctx->w, io_path); 2178 } 2179 } 2180 2181 spdk_json_write_array_end(ctx->w); 2182 2183 spdk_json_write_object_end(ctx->w); 2184 2185 spdk_for_each_channel_continue(i, 0); 2186 } 2187 2188 static void 2189 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2190 const struct spdk_json_val *params) 2191 { 2192 struct rpc_get_io_paths_ctx *ctx; 2193 2194 ctx = calloc(1, sizeof(*ctx)); 2195 if (ctx == NULL) { 2196 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2197 return; 2198 } 2199 2200 if (params != NULL && 2201 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2202 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2203 &ctx->req)) { 2204 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2205 "bdev_nvme_get_io_paths requires no parameters"); 2206 2207 free_rpc_get_io_paths(&ctx->req); 2208 free(ctx); 2209 return; 2210 } 2211 2212 ctx->request = request; 2213 ctx->w = spdk_jsonrpc_begin_result(request); 2214 2215 spdk_json_write_object_begin(ctx->w); 2216 2217 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2218 2219 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2220 _rpc_bdev_nvme_get_io_paths, 2221 ctx, 2222 rpc_bdev_nvme_get_io_paths_done); 2223 } 2224 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2225 2226 struct rpc_bdev_nvme_set_preferred_path { 2227 char *name; 2228 uint16_t cntlid; 2229 }; 2230 2231 static void 2232 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2233 { 2234 free(req->name); 2235 } 2236 2237 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2238 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2239 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2240 }; 2241 2242 struct rpc_bdev_nvme_set_preferred_path_ctx { 2243 struct rpc_bdev_nvme_set_preferred_path req; 2244 struct spdk_jsonrpc_request *request; 2245 }; 2246 2247 static void 2248 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2249 { 2250 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2251 2252 if (rc == 0) { 2253 spdk_jsonrpc_send_bool_response(ctx->request, true); 2254 } else { 2255 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2256 } 2257 2258 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2259 free(ctx); 2260 } 2261 2262 static void 2263 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2264 const struct spdk_json_val *params) 2265 { 2266 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2267 2268 ctx = calloc(1, sizeof(*ctx)); 2269 if (ctx == NULL) { 2270 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2271 return; 2272 } 2273 2274 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2275 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2276 &ctx->req)) { 2277 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2278 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2279 "spdk_json_decode_object failed"); 2280 goto cleanup; 2281 } 2282 2283 ctx->request = request; 2284 2285 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2286 rpc_bdev_nvme_set_preferred_path_done, ctx); 2287 return; 2288 2289 cleanup: 2290 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2291 free(ctx); 2292 } 2293 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2294 SPDK_RPC_RUNTIME) 2295 2296 struct rpc_set_multipath_policy { 2297 char *name; 2298 enum spdk_bdev_nvme_multipath_policy policy; 2299 enum spdk_bdev_nvme_multipath_selector selector; 2300 uint32_t rr_min_io; 2301 }; 2302 2303 static void 2304 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2305 { 2306 free(req->name); 2307 } 2308 2309 static int 2310 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2311 { 2312 enum spdk_bdev_nvme_multipath_policy *policy = out; 2313 2314 if (spdk_json_strequal(val, "active_passive") == true) { 2315 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2316 } else if (spdk_json_strequal(val, "active_active") == true) { 2317 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2318 } else { 2319 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2320 return -EINVAL; 2321 } 2322 2323 return 0; 2324 } 2325 2326 static int 2327 rpc_decode_mp_selector(const struct spdk_json_val *val, void *out) 2328 { 2329 enum spdk_bdev_nvme_multipath_selector *selector = out; 2330 2331 if (spdk_json_strequal(val, "round_robin") == true) { 2332 *selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN; 2333 } else if (spdk_json_strequal(val, "queue_depth") == true) { 2334 *selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH; 2335 } else { 2336 SPDK_NOTICELOG("Invalid parameter value: selector\n"); 2337 return -EINVAL; 2338 } 2339 2340 return 0; 2341 } 2342 2343 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2344 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2345 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2346 {"selector", offsetof(struct rpc_set_multipath_policy, selector), rpc_decode_mp_selector, true}, 2347 {"rr_min_io", offsetof(struct rpc_set_multipath_policy, rr_min_io), spdk_json_decode_uint32, true}, 2348 }; 2349 2350 struct rpc_set_multipath_policy_ctx { 2351 struct rpc_set_multipath_policy req; 2352 struct spdk_jsonrpc_request *request; 2353 }; 2354 2355 static void 2356 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2357 { 2358 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2359 2360 if (rc == 0) { 2361 spdk_jsonrpc_send_bool_response(ctx->request, true); 2362 } else { 2363 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2364 } 2365 2366 free_rpc_set_multipath_policy(&ctx->req); 2367 free(ctx); 2368 } 2369 2370 static void 2371 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2372 const struct spdk_json_val *params) 2373 { 2374 struct rpc_set_multipath_policy_ctx *ctx; 2375 2376 ctx = calloc(1, sizeof(*ctx)); 2377 if (ctx == NULL) { 2378 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2379 return; 2380 } 2381 2382 ctx->req.rr_min_io = UINT32_MAX; 2383 ctx->req.selector = UINT32_MAX; 2384 2385 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2386 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2387 &ctx->req)) { 2388 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2389 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2390 "spdk_json_decode_object failed"); 2391 goto cleanup; 2392 } 2393 2394 ctx->request = request; 2395 if (ctx->req.selector == UINT32_MAX) { 2396 if (ctx->req.policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE) { 2397 ctx->req.selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN; 2398 } else { 2399 ctx->req.selector = 0; 2400 } 2401 } 2402 2403 if (ctx->req.policy != BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE && ctx->req.selector > 0) { 2404 SPDK_ERRLOG("selector only works in active_active mode\n"); 2405 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2406 "spdk_json_decode_object failed"); 2407 goto cleanup; 2408 } 2409 2410 spdk_bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, ctx->req.selector, 2411 ctx->req.rr_min_io, 2412 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2413 return; 2414 2415 cleanup: 2416 free_rpc_set_multipath_policy(&ctx->req); 2417 free(ctx); 2418 } 2419 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2420 SPDK_RPC_RUNTIME) 2421 2422 struct rpc_bdev_nvme_start_mdns_discovery { 2423 char *name; 2424 char *svcname; 2425 char *hostnqn; 2426 struct spdk_nvme_ctrlr_opts opts; 2427 struct spdk_bdev_nvme_ctrlr_opts bdev_opts; 2428 }; 2429 2430 static void 2431 free_rpc_bdev_nvme_start_mdns_discovery(struct rpc_bdev_nvme_start_mdns_discovery *req) 2432 { 2433 free(req->name); 2434 free(req->svcname); 2435 free(req->hostnqn); 2436 } 2437 2438 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_mdns_discovery_decoders[] = { 2439 {"name", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, name), spdk_json_decode_string}, 2440 {"svcname", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, svcname), spdk_json_decode_string}, 2441 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, hostnqn), spdk_json_decode_string, true}, 2442 }; 2443 2444 struct rpc_bdev_nvme_start_mdns_discovery_ctx { 2445 struct rpc_bdev_nvme_start_mdns_discovery req; 2446 struct spdk_jsonrpc_request *request; 2447 }; 2448 2449 static void 2450 rpc_bdev_nvme_start_mdns_discovery(struct spdk_jsonrpc_request *request, 2451 const struct spdk_json_val *params) 2452 { 2453 struct rpc_bdev_nvme_start_mdns_discovery_ctx *ctx; 2454 int rc; 2455 2456 ctx = calloc(1, sizeof(*ctx)); 2457 if (!ctx) { 2458 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2459 return; 2460 } 2461 2462 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 2463 2464 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_mdns_discovery_decoders, 2465 SPDK_COUNTOF(rpc_bdev_nvme_start_mdns_discovery_decoders), 2466 &ctx->req)) { 2467 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2468 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2469 "spdk_json_decode_object failed"); 2470 goto cleanup; 2471 } 2472 2473 if (ctx->req.hostnqn) { 2474 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 2475 ctx->req.hostnqn); 2476 } 2477 ctx->request = request; 2478 rc = bdev_nvme_start_mdns_discovery(ctx->req.name, ctx->req.svcname, &ctx->req.opts, 2479 &ctx->req.bdev_opts); 2480 if (rc) { 2481 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2482 } else { 2483 spdk_jsonrpc_send_bool_response(request, true); 2484 } 2485 2486 cleanup: 2487 free_rpc_bdev_nvme_start_mdns_discovery(&ctx->req); 2488 free(ctx); 2489 } 2490 SPDK_RPC_REGISTER("bdev_nvme_start_mdns_discovery", rpc_bdev_nvme_start_mdns_discovery, 2491 SPDK_RPC_RUNTIME) 2492 2493 struct rpc_bdev_nvme_stop_mdns_discovery { 2494 char *name; 2495 }; 2496 2497 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_mdns_discovery_decoders[] = { 2498 {"name", offsetof(struct rpc_bdev_nvme_stop_mdns_discovery, name), spdk_json_decode_string}, 2499 }; 2500 2501 struct rpc_bdev_nvme_stop_mdns_discovery_ctx { 2502 struct rpc_bdev_nvme_stop_mdns_discovery req; 2503 struct spdk_jsonrpc_request *request; 2504 }; 2505 2506 static void 2507 rpc_bdev_nvme_stop_mdns_discovery(struct spdk_jsonrpc_request *request, 2508 const struct spdk_json_val *params) 2509 { 2510 struct rpc_bdev_nvme_stop_mdns_discovery_ctx *ctx; 2511 int rc; 2512 2513 ctx = calloc(1, sizeof(*ctx)); 2514 if (!ctx) { 2515 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2516 return; 2517 } 2518 2519 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_mdns_discovery_decoders, 2520 SPDK_COUNTOF(rpc_bdev_nvme_stop_mdns_discovery_decoders), 2521 &ctx->req)) { 2522 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2523 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2524 "spdk_json_decode_object failed"); 2525 goto cleanup; 2526 } 2527 2528 ctx->request = request; 2529 rc = bdev_nvme_stop_mdns_discovery(ctx->req.name); 2530 2531 if (rc) { 2532 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2533 goto cleanup; 2534 } 2535 spdk_jsonrpc_send_bool_response(ctx->request, true); 2536 2537 cleanup: 2538 free(ctx->req.name); 2539 free(ctx); 2540 } 2541 SPDK_RPC_REGISTER("bdev_nvme_stop_mdns_discovery", rpc_bdev_nvme_stop_mdns_discovery, 2542 SPDK_RPC_RUNTIME) 2543 2544 static void 2545 rpc_bdev_nvme_get_mdns_discovery_info(struct spdk_jsonrpc_request *request, 2546 const struct spdk_json_val *params) 2547 { 2548 bdev_nvme_get_mdns_discovery_info(request); 2549 } 2550 2551 SPDK_RPC_REGISTER("bdev_nvme_get_mdns_discovery_info", rpc_bdev_nvme_get_mdns_discovery_info, 2552 SPDK_RPC_RUNTIME) 2553 2554 struct rpc_get_path_stat { 2555 char *name; 2556 }; 2557 2558 struct path_stat { 2559 struct spdk_bdev_io_stat stat; 2560 struct spdk_nvme_transport_id trid; 2561 struct nvme_ns *ns; 2562 }; 2563 2564 struct rpc_bdev_nvme_path_stat_ctx { 2565 struct spdk_jsonrpc_request *request; 2566 struct path_stat *path_stat; 2567 uint32_t num_paths; 2568 struct spdk_bdev_desc *desc; 2569 }; 2570 2571 static void 2572 free_rpc_get_path_stat(struct rpc_get_path_stat *req) 2573 { 2574 free(req->name); 2575 } 2576 2577 static const struct spdk_json_object_decoder rpc_get_path_stat_decoders[] = { 2578 {"name", offsetof(struct rpc_get_path_stat, name), spdk_json_decode_string}, 2579 }; 2580 2581 static void 2582 dummy_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx) 2583 { 2584 } 2585 2586 static void 2587 rpc_bdev_nvme_path_stat_per_channel(struct nvme_bdev_channel_iter *i, 2588 struct nvme_bdev *nbdev, 2589 struct nvme_bdev_channel *nbdev_ch, 2590 void *_ctx) 2591 { 2592 struct rpc_bdev_nvme_path_stat_ctx *ctx = _ctx; 2593 struct nvme_io_path *io_path; 2594 struct path_stat *path_stat; 2595 uint32_t j; 2596 2597 assert(ctx->num_paths != 0); 2598 2599 for (j = 0; j < ctx->num_paths; j++) { 2600 path_stat = &ctx->path_stat[j]; 2601 2602 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 2603 if (path_stat->ns == io_path->nvme_ns) { 2604 assert(io_path->stat != NULL); 2605 spdk_bdev_add_io_stat(&path_stat->stat, io_path->stat); 2606 } 2607 } 2608 } 2609 2610 nvme_bdev_for_each_channel_continue(i, 0); 2611 } 2612 2613 static void 2614 rpc_bdev_nvme_path_stat_done(struct nvme_bdev *nbdev, void *_ctx, int status) 2615 { 2616 struct rpc_bdev_nvme_path_stat_ctx *ctx = _ctx; 2617 struct spdk_json_write_ctx *w; 2618 struct path_stat *path_stat; 2619 uint32_t j; 2620 2621 assert(ctx->num_paths != 0); 2622 2623 w = spdk_jsonrpc_begin_result(ctx->request); 2624 spdk_json_write_object_begin(w); 2625 spdk_json_write_named_string(w, "name", nbdev->disk.name); 2626 spdk_json_write_named_array_begin(w, "stats"); 2627 2628 for (j = 0; j < ctx->num_paths; j++) { 2629 path_stat = &ctx->path_stat[j]; 2630 spdk_json_write_object_begin(w); 2631 2632 spdk_json_write_named_object_begin(w, "trid"); 2633 nvme_bdev_dump_trid_json(&path_stat->trid, w); 2634 spdk_json_write_object_end(w); 2635 2636 spdk_json_write_named_object_begin(w, "stat"); 2637 spdk_bdev_dump_io_stat_json(&path_stat->stat, w); 2638 spdk_json_write_object_end(w); 2639 2640 spdk_json_write_object_end(w); 2641 } 2642 2643 spdk_json_write_array_end(w); 2644 spdk_json_write_object_end(w); 2645 spdk_jsonrpc_end_result(ctx->request, w); 2646 2647 spdk_bdev_close(ctx->desc); 2648 free(ctx->path_stat); 2649 free(ctx); 2650 } 2651 2652 static void 2653 rpc_bdev_nvme_get_path_iostat(struct spdk_jsonrpc_request *request, 2654 const struct spdk_json_val *params) 2655 { 2656 struct rpc_get_path_stat req = {}; 2657 struct spdk_bdev_desc *desc = NULL; 2658 struct spdk_bdev *bdev; 2659 struct nvme_bdev *nbdev; 2660 struct nvme_ns *nvme_ns; 2661 struct path_stat *path_stat; 2662 struct rpc_bdev_nvme_path_stat_ctx *ctx; 2663 struct spdk_bdev_nvme_opts opts; 2664 uint32_t num_paths = 0, i = 0; 2665 int rc; 2666 2667 bdev_nvme_get_opts(&opts); 2668 if (!opts.io_path_stat) { 2669 SPDK_ERRLOG("RPC not enabled if enable_io_path_stat is false\n"); 2670 spdk_jsonrpc_send_error_response(request, -EPERM, 2671 "RPC not enabled if enable_io_path_stat is false"); 2672 return; 2673 } 2674 2675 if (spdk_json_decode_object(params, rpc_get_path_stat_decoders, 2676 SPDK_COUNTOF(rpc_get_path_stat_decoders), 2677 &req)) { 2678 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2679 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2680 "spdk_json_decode_object failed"); 2681 free_rpc_get_path_stat(&req); 2682 return; 2683 } 2684 2685 rc = spdk_bdev_open_ext(req.name, false, dummy_bdev_event_cb, NULL, &desc); 2686 if (rc != 0) { 2687 SPDK_ERRLOG("Failed to open bdev '%s': %d\n", req.name, rc); 2688 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2689 free_rpc_get_path_stat(&req); 2690 return; 2691 } 2692 2693 free_rpc_get_path_stat(&req); 2694 2695 ctx = calloc(1, sizeof(struct rpc_bdev_nvme_path_stat_ctx)); 2696 if (ctx == NULL) { 2697 spdk_bdev_close(desc); 2698 SPDK_ERRLOG("Failed to allocate rpc_bdev_nvme_path_stat_ctx struct\n"); 2699 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2700 return; 2701 } 2702 2703 bdev = spdk_bdev_desc_get_bdev(desc); 2704 nbdev = bdev->ctxt; 2705 2706 pthread_mutex_lock(&nbdev->mutex); 2707 if (nbdev->ref == 0) { 2708 rc = -ENOENT; 2709 goto err; 2710 } 2711 2712 num_paths = nbdev->ref; 2713 path_stat = calloc(num_paths, sizeof(struct path_stat)); 2714 if (path_stat == NULL) { 2715 rc = -ENOMEM; 2716 SPDK_ERRLOG("Failed to allocate memory for path_stat.\n"); 2717 goto err; 2718 } 2719 2720 /* store the history stat */ 2721 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) { 2722 assert(i < num_paths); 2723 path_stat[i].ns = nvme_ns; 2724 path_stat[i].trid = nvme_ns->ctrlr->active_path_id->trid; 2725 2726 assert(nvme_ns->stat != NULL); 2727 memcpy(&path_stat[i].stat, nvme_ns->stat, sizeof(struct spdk_bdev_io_stat)); 2728 i++; 2729 } 2730 pthread_mutex_unlock(&nbdev->mutex); 2731 2732 ctx->request = request; 2733 ctx->desc = desc; 2734 ctx->path_stat = path_stat; 2735 ctx->num_paths = num_paths; 2736 2737 nvme_bdev_for_each_channel(nbdev, 2738 rpc_bdev_nvme_path_stat_per_channel, 2739 ctx, 2740 rpc_bdev_nvme_path_stat_done); 2741 return; 2742 2743 err: 2744 pthread_mutex_unlock(&nbdev->mutex); 2745 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2746 spdk_bdev_close(desc); 2747 free(ctx); 2748 } 2749 SPDK_RPC_REGISTER("bdev_nvme_get_path_iostat", rpc_bdev_nvme_get_path_iostat, 2750 SPDK_RPC_RUNTIME) 2751