1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 * Copyright (c) 2022 Dell Inc, or its subsidiaries. All rights reserved. 6 */ 7 8 #include "spdk/stdinc.h" 9 10 #include "bdev_nvme.h" 11 12 #include "spdk/config.h" 13 14 #include "spdk/string.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/env.h" 18 #include "spdk/nvme.h" 19 #include "spdk/nvme_spec.h" 20 21 #include "spdk/log.h" 22 #include "spdk/bdev_module.h" 23 24 static int 25 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 26 { 27 enum spdk_bdev_timeout_action *action = out; 28 29 if (spdk_json_strequal(val, "none") == true) { 30 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 31 } else if (spdk_json_strequal(val, "abort") == true) { 32 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 33 } else if (spdk_json_strequal(val, "reset") == true) { 34 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 35 } else { 36 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 37 return -EINVAL; 38 } 39 40 return 0; 41 } 42 43 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 44 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 45 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 46 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 47 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 48 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 49 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 50 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 51 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 52 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 53 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 54 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 55 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 56 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 57 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 58 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 59 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 60 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 61 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 62 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 63 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 64 {"generate_uuids", offsetof(struct spdk_bdev_nvme_opts, generate_uuids), spdk_json_decode_bool, true}, 65 {"transport_tos", offsetof(struct spdk_bdev_nvme_opts, transport_tos), spdk_json_decode_uint8, true}, 66 {"nvme_error_stat", offsetof(struct spdk_bdev_nvme_opts, nvme_error_stat), spdk_json_decode_bool, true}, 67 {"rdma_srq_size", offsetof(struct spdk_bdev_nvme_opts, rdma_srq_size), spdk_json_decode_uint32, true}, 68 {"io_path_stat", offsetof(struct spdk_bdev_nvme_opts, io_path_stat), spdk_json_decode_bool, true}, 69 }; 70 71 static void 72 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 73 const struct spdk_json_val *params) 74 { 75 struct spdk_bdev_nvme_opts opts; 76 int rc; 77 78 bdev_nvme_get_opts(&opts); 79 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 80 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 81 &opts)) { 82 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 83 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 84 "spdk_json_decode_object failed"); 85 return; 86 } 87 88 rc = bdev_nvme_set_opts(&opts); 89 if (rc == -EPERM) { 90 spdk_jsonrpc_send_error_response(request, -EPERM, 91 "RPC not permitted with nvme controllers already attached"); 92 } else if (rc) { 93 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 94 } else { 95 spdk_jsonrpc_send_bool_response(request, true); 96 } 97 98 return; 99 } 100 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 101 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 102 103 struct rpc_bdev_nvme_hotplug { 104 bool enabled; 105 uint64_t period_us; 106 }; 107 108 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 109 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 110 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 111 }; 112 113 static void 114 rpc_bdev_nvme_set_hotplug_done(void *ctx) 115 { 116 struct spdk_jsonrpc_request *request = ctx; 117 118 spdk_jsonrpc_send_bool_response(request, true); 119 } 120 121 static void 122 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 123 const struct spdk_json_val *params) 124 { 125 struct rpc_bdev_nvme_hotplug req = {false, 0}; 126 int rc; 127 128 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 129 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 130 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 131 rc = -EINVAL; 132 goto invalid; 133 } 134 135 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 136 request); 137 if (rc) { 138 goto invalid; 139 } 140 141 return; 142 invalid: 143 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 144 } 145 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 146 147 enum bdev_nvme_multipath_mode { 148 BDEV_NVME_MP_MODE_FAILOVER, 149 BDEV_NVME_MP_MODE_MULTIPATH, 150 BDEV_NVME_MP_MODE_DISABLE, 151 }; 152 153 struct rpc_bdev_nvme_attach_controller { 154 char *name; 155 char *trtype; 156 char *adrfam; 157 char *traddr; 158 char *trsvcid; 159 char *priority; 160 char *subnqn; 161 char *hostnqn; 162 char *hostaddr; 163 char *hostsvcid; 164 char *psk; 165 enum bdev_nvme_multipath_mode multipath; 166 struct nvme_ctrlr_opts bdev_opts; 167 struct spdk_nvme_ctrlr_opts drv_opts; 168 uint32_t max_bdevs; 169 }; 170 171 static void 172 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 173 { 174 free(req->name); 175 free(req->trtype); 176 free(req->adrfam); 177 free(req->traddr); 178 free(req->trsvcid); 179 free(req->priority); 180 free(req->subnqn); 181 free(req->hostnqn); 182 free(req->hostaddr); 183 free(req->hostsvcid); 184 free(req->psk); 185 } 186 187 static int 188 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 189 { 190 uint32_t *flag = out; 191 bool reftag; 192 int rc; 193 194 rc = spdk_json_decode_bool(val, &reftag); 195 if (rc == 0 && reftag == true) { 196 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 197 } 198 199 return rc; 200 } 201 202 static int 203 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 204 { 205 uint32_t *flag = out; 206 bool guard; 207 int rc; 208 209 rc = spdk_json_decode_bool(val, &guard); 210 if (rc == 0 && guard == true) { 211 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 212 } 213 214 return rc; 215 } 216 217 static int 218 bdev_nvme_decode_multipath(const struct spdk_json_val *val, void *out) 219 { 220 enum bdev_nvme_multipath_mode *multipath = out; 221 222 if (spdk_json_strequal(val, "failover") == true) { 223 *multipath = BDEV_NVME_MP_MODE_FAILOVER; 224 } else if (spdk_json_strequal(val, "multipath") == true) { 225 *multipath = BDEV_NVME_MP_MODE_MULTIPATH; 226 } else if (spdk_json_strequal(val, "disable") == true) { 227 *multipath = BDEV_NVME_MP_MODE_DISABLE; 228 } else { 229 SPDK_NOTICELOG("Invalid parameter value: multipath\n"); 230 return -EINVAL; 231 } 232 233 return 0; 234 } 235 236 237 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 238 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 239 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 240 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 241 242 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 243 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 244 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 245 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 246 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 247 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 248 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 249 250 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 251 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 252 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 253 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 254 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 255 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), bdev_nvme_decode_multipath, true}, 256 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 257 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 258 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 259 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 260 {"psk", offsetof(struct rpc_bdev_nvme_attach_controller, psk), spdk_json_decode_string, true}, 261 {"max_bdevs", offsetof(struct rpc_bdev_nvme_attach_controller, max_bdevs), spdk_json_decode_uint32, true}, 262 }; 263 264 #define DEFAULT_MAX_BDEVS_PER_RPC 128 265 266 struct rpc_bdev_nvme_attach_controller_ctx { 267 struct rpc_bdev_nvme_attach_controller req; 268 size_t bdev_count; 269 const char **names; 270 struct spdk_jsonrpc_request *request; 271 }; 272 273 static void 274 free_rpc_bdev_nvme_attach_controller_ctx(struct rpc_bdev_nvme_attach_controller_ctx *ctx) 275 { 276 free_rpc_bdev_nvme_attach_controller(&ctx->req); 277 free(ctx->names); 278 free(ctx); 279 } 280 281 static void 282 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 283 { 284 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 285 struct spdk_jsonrpc_request *request = ctx->request; 286 struct spdk_json_write_ctx *w; 287 size_t i; 288 289 w = spdk_jsonrpc_begin_result(request); 290 spdk_json_write_array_begin(w); 291 for (i = 0; i < ctx->bdev_count; i++) { 292 spdk_json_write_string(w, ctx->names[i]); 293 } 294 spdk_json_write_array_end(w); 295 spdk_jsonrpc_end_result(request, w); 296 297 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 298 } 299 300 static void 301 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 302 { 303 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 304 struct spdk_jsonrpc_request *request = ctx->request; 305 306 if (rc < 0) { 307 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 308 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 309 return; 310 } 311 312 ctx->bdev_count = bdev_count; 313 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 314 } 315 316 static void 317 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 318 const struct spdk_json_val *params) 319 { 320 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 321 struct spdk_nvme_transport_id trid = {}; 322 const struct spdk_nvme_ctrlr_opts *drv_opts; 323 const struct spdk_nvme_transport_id *ctrlr_trid; 324 struct nvme_ctrlr *ctrlr = NULL; 325 size_t len, maxlen; 326 bool multipath = false; 327 int rc; 328 329 ctx = calloc(1, sizeof(*ctx)); 330 if (!ctx) { 331 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 332 return; 333 } 334 335 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 336 bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 337 /* For now, initialize the multipath parameter to add a failover path. This maintains backward 338 * compatibility with past behavior. In the future, this behavior will change to "disable". */ 339 ctx->req.multipath = BDEV_NVME_MP_MODE_FAILOVER; 340 ctx->req.max_bdevs = DEFAULT_MAX_BDEVS_PER_RPC; 341 342 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 343 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 344 &ctx->req)) { 345 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 346 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 347 "spdk_json_decode_object failed"); 348 goto cleanup; 349 } 350 351 if (ctx->req.max_bdevs == 0) { 352 spdk_jsonrpc_send_error_response(request, -EINVAL, "max_bdevs cannot be zero"); 353 goto cleanup; 354 } 355 356 ctx->names = calloc(ctx->req.max_bdevs, sizeof(char *)); 357 if (ctx->names == NULL) { 358 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 359 goto cleanup; 360 } 361 362 /* Parse trstring */ 363 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 364 if (rc < 0) { 365 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 366 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 367 ctx->req.trtype); 368 goto cleanup; 369 } 370 371 /* Parse trtype */ 372 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 373 assert(rc == 0); 374 375 /* Parse traddr */ 376 maxlen = sizeof(trid.traddr); 377 len = strnlen(ctx->req.traddr, maxlen); 378 if (len == maxlen) { 379 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 380 ctx->req.traddr); 381 goto cleanup; 382 } 383 memcpy(trid.traddr, ctx->req.traddr, len + 1); 384 385 /* Parse adrfam */ 386 if (ctx->req.adrfam) { 387 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 388 if (rc < 0) { 389 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 390 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 391 ctx->req.adrfam); 392 goto cleanup; 393 } 394 } 395 396 /* Parse trsvcid */ 397 if (ctx->req.trsvcid) { 398 maxlen = sizeof(trid.trsvcid); 399 len = strnlen(ctx->req.trsvcid, maxlen); 400 if (len == maxlen) { 401 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 402 ctx->req.trsvcid); 403 goto cleanup; 404 } 405 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 406 } 407 408 /* Parse priority for the NVMe-oF transport connection */ 409 if (ctx->req.priority) { 410 trid.priority = spdk_strtol(ctx->req.priority, 10); 411 } 412 413 /* Parse subnqn */ 414 if (ctx->req.subnqn) { 415 maxlen = sizeof(trid.subnqn); 416 len = strnlen(ctx->req.subnqn, maxlen); 417 if (len == maxlen) { 418 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 419 ctx->req.subnqn); 420 goto cleanup; 421 } 422 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 423 } 424 425 if (ctx->req.hostnqn) { 426 snprintf(ctx->req.drv_opts.hostnqn, sizeof(ctx->req.drv_opts.hostnqn), "%s", 427 ctx->req.hostnqn); 428 } 429 430 if (ctx->req.psk) { 431 snprintf(ctx->req.drv_opts.psk, sizeof(ctx->req.drv_opts.psk), "%s", 432 ctx->req.psk); 433 } 434 435 if (ctx->req.hostaddr) { 436 maxlen = sizeof(ctx->req.drv_opts.src_addr); 437 len = strnlen(ctx->req.hostaddr, maxlen); 438 if (len == maxlen) { 439 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 440 ctx->req.hostaddr); 441 goto cleanup; 442 } 443 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 444 } 445 446 if (ctx->req.hostsvcid) { 447 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 448 len = strnlen(ctx->req.hostsvcid, maxlen); 449 if (len == maxlen) { 450 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 451 ctx->req.hostsvcid); 452 goto cleanup; 453 } 454 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 455 } 456 457 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 458 459 if (ctrlr) { 460 /* This controller already exists. Check what the user wants to do. */ 461 if (ctx->req.multipath == BDEV_NVME_MP_MODE_DISABLE) { 462 /* The user does not want to do any form of multipathing. */ 463 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 464 "A controller named %s already exists and multipath is disabled\n", 465 ctx->req.name); 466 goto cleanup; 467 } 468 469 assert(ctx->req.multipath == BDEV_NVME_MP_MODE_FAILOVER || 470 ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH); 471 472 /* The user wants to add this as a failover path or add this to create multipath. */ 473 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 474 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 475 476 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 477 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 478 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 479 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 480 /* Exactly same network path can't be added a second time */ 481 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 482 "A controller named %s already exists with the specified network path\n", 483 ctx->req.name); 484 goto cleanup; 485 } 486 487 if (strncmp(trid.subnqn, 488 ctrlr_trid->subnqn, 489 SPDK_NVMF_NQN_MAX_LEN) != 0) { 490 /* Different SUBNQN is not allowed when specifying the same controller name. */ 491 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 492 "A controller named %s already exists, but uses a different subnqn (%s)\n", 493 ctx->req.name, ctrlr_trid->subnqn); 494 goto cleanup; 495 } 496 497 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 498 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 499 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 500 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 501 ctx->req.name, drv_opts->hostnqn); 502 goto cleanup; 503 } 504 505 if (ctx->req.bdev_opts.prchk_flags) { 506 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 507 "A controller named %s already exists. To add a path, do not specify PI options.\n", 508 ctx->req.name); 509 goto cleanup; 510 } 511 512 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 513 } 514 515 if (ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH) { 516 multipath = true; 517 } 518 519 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 520 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 521 "num_io_queues out of bounds, min: %u max: %u\n", 522 1, UINT16_MAX + 1); 523 goto cleanup; 524 } 525 526 ctx->request = request; 527 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 528 ctx->req.bdev_opts.from_discovery_service = false; 529 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->req.max_bdevs, 530 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 531 &ctx->req.bdev_opts, multipath); 532 if (rc) { 533 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 534 goto cleanup; 535 } 536 537 return; 538 539 cleanup: 540 free_rpc_bdev_nvme_attach_controller_ctx(ctx); 541 } 542 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 543 SPDK_RPC_RUNTIME) 544 545 static void 546 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 547 { 548 struct spdk_json_write_ctx *w = ctx; 549 struct nvme_ctrlr *nvme_ctrlr; 550 551 spdk_json_write_object_begin(w); 552 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 553 554 spdk_json_write_named_array_begin(w, "ctrlrs"); 555 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 556 nvme_ctrlr_info_json(w, nvme_ctrlr); 557 } 558 spdk_json_write_array_end(w); 559 spdk_json_write_object_end(w); 560 } 561 562 struct rpc_bdev_nvme_get_controllers { 563 char *name; 564 }; 565 566 static void 567 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 568 { 569 free(r->name); 570 } 571 572 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 573 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 574 }; 575 576 static void 577 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 578 const struct spdk_json_val *params) 579 { 580 struct rpc_bdev_nvme_get_controllers req = {}; 581 struct spdk_json_write_ctx *w; 582 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 583 584 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 585 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 586 &req)) { 587 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 588 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 589 "spdk_json_decode_object failed"); 590 goto cleanup; 591 } 592 593 if (req.name) { 594 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 595 if (nbdev_ctrlr == NULL) { 596 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 597 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 598 goto cleanup; 599 } 600 } 601 602 w = spdk_jsonrpc_begin_result(request); 603 spdk_json_write_array_begin(w); 604 605 if (nbdev_ctrlr != NULL) { 606 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 607 } else { 608 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 609 } 610 611 spdk_json_write_array_end(w); 612 613 spdk_jsonrpc_end_result(request, w); 614 615 cleanup: 616 free_rpc_bdev_nvme_get_controllers(&req); 617 } 618 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 619 620 struct rpc_bdev_nvme_detach_controller { 621 char *name; 622 char *trtype; 623 char *adrfam; 624 char *traddr; 625 char *trsvcid; 626 char *subnqn; 627 char *hostaddr; 628 char *hostsvcid; 629 }; 630 631 static void 632 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 633 { 634 free(req->name); 635 free(req->trtype); 636 free(req->adrfam); 637 free(req->traddr); 638 free(req->trsvcid); 639 free(req->subnqn); 640 free(req->hostaddr); 641 free(req->hostsvcid); 642 } 643 644 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 645 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 646 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 647 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 648 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 649 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 650 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 651 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 652 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 653 }; 654 655 static void 656 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 657 const struct spdk_json_val *params) 658 { 659 struct rpc_bdev_nvme_detach_controller req = {NULL}; 660 struct nvme_path_id path = {}; 661 size_t len, maxlen; 662 int rc = 0; 663 664 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 665 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 666 &req)) { 667 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 668 "spdk_json_decode_object failed"); 669 goto cleanup; 670 } 671 672 if (req.trtype != NULL) { 673 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 674 if (rc < 0) { 675 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 676 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 677 req.trtype); 678 goto cleanup; 679 } 680 681 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 682 if (rc < 0) { 683 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 684 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 685 req.trtype); 686 goto cleanup; 687 } 688 } 689 690 if (req.traddr != NULL) { 691 maxlen = sizeof(path.trid.traddr); 692 len = strnlen(req.traddr, maxlen); 693 if (len == maxlen) { 694 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 695 req.traddr); 696 goto cleanup; 697 } 698 memcpy(path.trid.traddr, req.traddr, len + 1); 699 } 700 701 if (req.adrfam != NULL) { 702 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 703 if (rc < 0) { 704 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 705 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 706 req.adrfam); 707 goto cleanup; 708 } 709 } 710 711 if (req.trsvcid != NULL) { 712 maxlen = sizeof(path.trid.trsvcid); 713 len = strnlen(req.trsvcid, maxlen); 714 if (len == maxlen) { 715 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 716 req.trsvcid); 717 goto cleanup; 718 } 719 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 720 } 721 722 /* Parse subnqn */ 723 if (req.subnqn != NULL) { 724 maxlen = sizeof(path.trid.subnqn); 725 len = strnlen(req.subnqn, maxlen); 726 if (len == maxlen) { 727 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 728 req.subnqn); 729 goto cleanup; 730 } 731 memcpy(path.trid.subnqn, req.subnqn, len + 1); 732 } 733 734 if (req.hostaddr) { 735 maxlen = sizeof(path.hostid.hostaddr); 736 len = strnlen(req.hostaddr, maxlen); 737 if (len == maxlen) { 738 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 739 req.hostaddr); 740 goto cleanup; 741 } 742 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 743 } 744 745 if (req.hostsvcid) { 746 maxlen = sizeof(path.hostid.hostsvcid); 747 len = strnlen(req.hostsvcid, maxlen); 748 if (len == maxlen) { 749 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 750 req.hostsvcid); 751 goto cleanup; 752 } 753 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 754 } 755 756 rc = bdev_nvme_delete(req.name, &path); 757 758 if (rc != 0) { 759 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 760 goto cleanup; 761 } 762 763 spdk_jsonrpc_send_bool_response(request, true); 764 765 cleanup: 766 free_rpc_bdev_nvme_detach_controller(&req); 767 } 768 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 769 SPDK_RPC_RUNTIME) 770 771 struct rpc_apply_firmware { 772 char *filename; 773 char *bdev_name; 774 }; 775 776 static void 777 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 778 { 779 free(req->filename); 780 free(req->bdev_name); 781 } 782 783 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 784 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 785 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 786 }; 787 788 struct firmware_update_info { 789 void *fw_image; 790 void *p; 791 unsigned int size; 792 unsigned int size_remaining; 793 unsigned int offset; 794 unsigned int transfer; 795 bool success; 796 797 struct spdk_bdev_desc *desc; 798 struct spdk_io_channel *ch; 799 struct spdk_thread *orig_thread; 800 struct spdk_jsonrpc_request *request; 801 struct spdk_nvme_ctrlr *ctrlr; 802 struct rpc_apply_firmware req; 803 }; 804 805 static void 806 apply_firmware_cleanup(struct firmware_update_info *firm_ctx) 807 { 808 assert(firm_ctx != NULL); 809 assert(firm_ctx->orig_thread == spdk_get_thread()); 810 811 if (firm_ctx->fw_image) { 812 spdk_free(firm_ctx->fw_image); 813 } 814 815 free_rpc_apply_firmware(&firm_ctx->req); 816 817 if (firm_ctx->ch) { 818 spdk_put_io_channel(firm_ctx->ch); 819 } 820 821 if (firm_ctx->desc) { 822 spdk_bdev_close(firm_ctx->desc); 823 } 824 825 free(firm_ctx); 826 } 827 828 static void 829 _apply_firmware_complete_reset(void *ctx) 830 { 831 struct spdk_json_write_ctx *w; 832 struct firmware_update_info *firm_ctx = ctx; 833 834 assert(firm_ctx->orig_thread == spdk_get_thread()); 835 836 if (!firm_ctx->success) { 837 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 838 "firmware commit failed."); 839 apply_firmware_cleanup(firm_ctx); 840 return; 841 } 842 843 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 844 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 845 "Controller reset failed."); 846 apply_firmware_cleanup(firm_ctx); 847 return; 848 } 849 850 w = spdk_jsonrpc_begin_result(firm_ctx->request); 851 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 852 spdk_jsonrpc_end_result(firm_ctx->request, w); 853 apply_firmware_cleanup(firm_ctx); 854 } 855 856 static void 857 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 858 { 859 struct firmware_update_info *firm_ctx = cb_arg; 860 861 spdk_bdev_free_io(bdev_io); 862 863 firm_ctx->success = success; 864 865 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete_reset, firm_ctx); 866 } 867 868 static void apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 869 870 static void 871 _apply_firmware_complete(void *ctx) 872 { 873 struct spdk_nvme_cmd cmd = {}; 874 struct spdk_nvme_fw_commit fw_commit; 875 int slot = 0; 876 int rc; 877 struct firmware_update_info *firm_ctx = ctx; 878 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 879 880 assert(firm_ctx->orig_thread == spdk_get_thread()); 881 882 if (!firm_ctx->success) { 883 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 884 "firmware download failed ."); 885 apply_firmware_cleanup(firm_ctx); 886 return; 887 } 888 889 firm_ctx->p += firm_ctx->transfer; 890 firm_ctx->offset += firm_ctx->transfer; 891 firm_ctx->size_remaining -= firm_ctx->transfer; 892 893 switch (firm_ctx->size_remaining) { 894 case 0: 895 /* firmware download completed. Commit firmware */ 896 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 897 fw_commit.fs = slot; 898 fw_commit.ca = commit_action; 899 900 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 901 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 902 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 903 apply_firmware_complete_reset, firm_ctx); 904 if (rc) { 905 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 906 "firmware commit failed."); 907 apply_firmware_cleanup(firm_ctx); 908 return; 909 } 910 break; 911 default: 912 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 913 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 914 915 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 916 cmd.cdw11 = firm_ctx->offset >> 2; 917 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 918 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 919 if (rc) { 920 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 921 "firmware download failed."); 922 apply_firmware_cleanup(firm_ctx); 923 return; 924 } 925 break; 926 } 927 } 928 929 static void 930 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 931 { 932 struct firmware_update_info *firm_ctx = cb_arg; 933 934 spdk_bdev_free_io(bdev_io); 935 936 firm_ctx->success = success; 937 938 spdk_thread_exec_msg(firm_ctx->orig_thread, _apply_firmware_complete, firm_ctx); 939 } 940 941 static void 942 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 943 { 944 } 945 946 static void 947 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 948 const struct spdk_json_val *params) 949 { 950 int rc; 951 int fd = -1; 952 struct stat fw_stat; 953 struct spdk_bdev *bdev; 954 struct spdk_nvme_cmd cmd = {}; 955 struct firmware_update_info *firm_ctx; 956 957 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 958 if (!firm_ctx) { 959 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 960 "Memory allocation error."); 961 return; 962 } 963 firm_ctx->fw_image = NULL; 964 firm_ctx->request = request; 965 firm_ctx->orig_thread = spdk_get_thread(); 966 967 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 968 SPDK_COUNTOF(rpc_apply_firmware_decoders), &firm_ctx->req)) { 969 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 970 "spdk_json_decode_object failed."); 971 goto err; 972 } 973 974 if (spdk_bdev_open_ext(firm_ctx->req.bdev_name, true, apply_firmware_open_cb, NULL, 975 &firm_ctx->desc) != 0) { 976 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 977 "bdev %s could not be opened", 978 firm_ctx->req.bdev_name); 979 goto err; 980 } 981 bdev = spdk_bdev_desc_get_bdev(firm_ctx->desc); 982 983 if ((firm_ctx->ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 984 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 985 "Controller information for %s were not found.", 986 firm_ctx->req.bdev_name); 987 goto err; 988 } 989 990 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 991 if (!firm_ctx->ch) { 992 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 993 "No channels were found."); 994 goto err; 995 } 996 997 fd = open(firm_ctx->req.filename, O_RDONLY); 998 if (fd < 0) { 999 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1000 "open file failed."); 1001 goto err; 1002 } 1003 1004 rc = fstat(fd, &fw_stat); 1005 if (rc < 0) { 1006 close(fd); 1007 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1008 "fstat failed."); 1009 goto err; 1010 } 1011 1012 firm_ctx->size = fw_stat.st_size; 1013 if (fw_stat.st_size % 4) { 1014 close(fd); 1015 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1016 "Firmware image size is not multiple of 4."); 1017 goto err; 1018 } 1019 1020 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1021 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1022 if (!firm_ctx->fw_image) { 1023 close(fd); 1024 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1025 "Memory allocation error."); 1026 goto err; 1027 } 1028 firm_ctx->p = firm_ctx->fw_image; 1029 1030 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1031 close(fd); 1032 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1033 "Read firmware image failed!"); 1034 goto err; 1035 } 1036 close(fd); 1037 1038 firm_ctx->offset = 0; 1039 firm_ctx->size_remaining = firm_ctx->size; 1040 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1041 1042 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1043 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1044 cmd.cdw11 = firm_ctx->offset >> 2; 1045 1046 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 1047 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1048 if (rc == 0) { 1049 /* normal return here. */ 1050 return; 1051 } 1052 1053 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1054 "Read firmware image failed!"); 1055 err: 1056 apply_firmware_cleanup(firm_ctx); 1057 } 1058 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1059 1060 struct rpc_bdev_nvme_transport_stat_ctx { 1061 struct spdk_jsonrpc_request *request; 1062 struct spdk_json_write_ctx *w; 1063 }; 1064 1065 static void 1066 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1067 struct spdk_nvme_transport_poll_group_stat *stat) 1068 { 1069 struct spdk_nvme_rdma_device_stat *device_stats; 1070 uint32_t i; 1071 1072 spdk_json_write_named_array_begin(w, "devices"); 1073 1074 for (i = 0; i < stat->rdma.num_devices; i++) { 1075 device_stats = &stat->rdma.device_stats[i]; 1076 spdk_json_write_object_begin(w); 1077 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1078 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1079 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1080 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1081 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1082 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1083 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1084 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1085 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1086 spdk_json_write_object_end(w); 1087 } 1088 spdk_json_write_array_end(w); 1089 } 1090 1091 static void 1092 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1093 struct spdk_nvme_transport_poll_group_stat *stat) 1094 { 1095 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1096 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1097 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1098 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1099 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1100 stat->pcie.cq_shadow_doorbell_updates); 1101 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1102 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1103 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1104 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1105 stat->pcie.sq_shadow_doorbell_updates); 1106 } 1107 1108 static void 1109 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1110 struct spdk_nvme_transport_poll_group_stat *stat) 1111 { 1112 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1113 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1114 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1115 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1116 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1117 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1118 } 1119 1120 static void 1121 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1122 { 1123 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1124 struct spdk_io_channel *ch; 1125 struct nvme_poll_group *group; 1126 struct spdk_nvme_poll_group_stat *stat; 1127 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1128 uint32_t j; 1129 int rc; 1130 1131 ctx = spdk_io_channel_iter_get_ctx(i); 1132 ch = spdk_io_channel_iter_get_channel(i); 1133 group = spdk_io_channel_get_ctx(ch); 1134 1135 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1136 if (rc) { 1137 spdk_for_each_channel_continue(i, rc); 1138 return; 1139 } 1140 1141 spdk_json_write_object_begin(ctx->w); 1142 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1143 spdk_json_write_named_array_begin(ctx->w, "transports"); 1144 1145 for (j = 0; j < stat->num_transports; j++) { 1146 tr_stat = stat->transport_stat[j]; 1147 spdk_json_write_object_begin(ctx->w); 1148 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1149 1150 switch (stat->transport_stat[j]->trtype) { 1151 case SPDK_NVME_TRANSPORT_RDMA: 1152 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1153 break; 1154 case SPDK_NVME_TRANSPORT_PCIE: 1155 case SPDK_NVME_TRANSPORT_VFIOUSER: 1156 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1157 break; 1158 case SPDK_NVME_TRANSPORT_TCP: 1159 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1160 break; 1161 default: 1162 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1163 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1164 } 1165 spdk_json_write_object_end(ctx->w); 1166 } 1167 /* transports array */ 1168 spdk_json_write_array_end(ctx->w); 1169 spdk_json_write_object_end(ctx->w); 1170 1171 spdk_nvme_poll_group_free_stats(group->group, stat); 1172 spdk_for_each_channel_continue(i, 0); 1173 } 1174 1175 static void 1176 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1177 { 1178 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1179 1180 spdk_json_write_array_end(ctx->w); 1181 spdk_json_write_object_end(ctx->w); 1182 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1183 free(ctx); 1184 } 1185 1186 static void 1187 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1188 const struct spdk_json_val *params) 1189 { 1190 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1191 1192 if (params) { 1193 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1194 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1195 return; 1196 } 1197 1198 ctx = calloc(1, sizeof(*ctx)); 1199 if (!ctx) { 1200 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1201 "Memory allocation error"); 1202 return; 1203 } 1204 ctx->request = request; 1205 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1206 spdk_json_write_object_begin(ctx->w); 1207 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1208 1209 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1210 rpc_bdev_nvme_stats_per_channel, 1211 ctx, 1212 rpc_bdev_nvme_stats_done); 1213 } 1214 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1215 SPDK_RPC_RUNTIME) 1216 1217 struct rpc_bdev_nvme_reset_controller_req { 1218 char *name; 1219 }; 1220 1221 static void 1222 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1223 { 1224 free(r->name); 1225 } 1226 1227 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1228 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1229 }; 1230 1231 struct rpc_bdev_nvme_reset_controller_ctx { 1232 struct spdk_jsonrpc_request *request; 1233 bool success; 1234 struct spdk_thread *orig_thread; 1235 }; 1236 1237 static void 1238 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1239 { 1240 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1241 1242 if (ctx->success) { 1243 spdk_jsonrpc_send_bool_response(ctx->request, true); 1244 } else { 1245 spdk_jsonrpc_send_error_response(ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1246 "Controller reset failed"); 1247 } 1248 1249 free(ctx); 1250 } 1251 1252 static void 1253 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1254 { 1255 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1256 1257 ctx->success = success; 1258 1259 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1260 } 1261 1262 static void 1263 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1264 const struct spdk_json_val *params) 1265 { 1266 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1267 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1268 struct nvme_ctrlr *nvme_ctrlr; 1269 int rc; 1270 1271 ctx = calloc(1, sizeof(*ctx)); 1272 if (ctx == NULL) { 1273 SPDK_ERRLOG("Memory allocation failed\n"); 1274 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1275 "Memory allocation failed"); 1276 return; 1277 } 1278 1279 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1280 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1281 &req)) { 1282 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1283 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1284 goto err; 1285 } 1286 1287 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1288 if (nvme_ctrlr == NULL) { 1289 SPDK_ERRLOG("Failed at device lookup\n"); 1290 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1291 goto err; 1292 } 1293 1294 ctx->request = request; 1295 ctx->orig_thread = spdk_get_thread(); 1296 1297 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1298 if (rc != 0) { 1299 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1300 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1301 goto err; 1302 } 1303 1304 free_rpc_bdev_nvme_reset_controller_req(&req); 1305 return; 1306 1307 err: 1308 free_rpc_bdev_nvme_reset_controller_req(&req); 1309 free(ctx); 1310 } 1311 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1312 1313 struct rpc_get_controller_health_info { 1314 char *name; 1315 }; 1316 1317 struct spdk_nvme_health_info_context { 1318 struct spdk_jsonrpc_request *request; 1319 struct spdk_nvme_ctrlr *ctrlr; 1320 struct spdk_nvme_health_information_page health_page; 1321 }; 1322 1323 static void 1324 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1325 { 1326 free(r->name); 1327 } 1328 1329 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1330 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1331 }; 1332 1333 static void 1334 nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1335 { 1336 if (response == true) { 1337 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1338 "Internal error."); 1339 } 1340 1341 free(context); 1342 } 1343 1344 static void 1345 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1346 { 1347 int i; 1348 char buf[128]; 1349 struct spdk_nvme_health_info_context *context = cb_arg; 1350 struct spdk_jsonrpc_request *request = context->request; 1351 struct spdk_json_write_ctx *w; 1352 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1353 const struct spdk_nvme_transport_id *trid = NULL; 1354 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1355 struct spdk_nvme_health_information_page *health_page = NULL; 1356 1357 if (spdk_nvme_cpl_is_error(cpl)) { 1358 nvme_health_info_cleanup(context, true); 1359 SPDK_ERRLOG("get log page failed\n"); 1360 return; 1361 } 1362 1363 if (ctrlr == NULL) { 1364 nvme_health_info_cleanup(context, true); 1365 SPDK_ERRLOG("ctrlr is NULL\n"); 1366 return; 1367 } else { 1368 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1369 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1370 health_page = &(context->health_page); 1371 } 1372 1373 w = spdk_jsonrpc_begin_result(request); 1374 1375 spdk_json_write_object_begin(w); 1376 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1377 spdk_str_trim(buf); 1378 spdk_json_write_named_string(w, "model_number", buf); 1379 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1380 spdk_str_trim(buf); 1381 spdk_json_write_named_string(w, "serial_number", buf); 1382 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1383 spdk_str_trim(buf); 1384 spdk_json_write_named_string(w, "firmware_revision", buf); 1385 spdk_json_write_named_string(w, "traddr", trid->traddr); 1386 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1387 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1388 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1389 health_page->available_spare_threshold); 1390 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1391 spdk_json_write_named_uint128(w, "data_units_read", 1392 health_page->data_units_read[0], health_page->data_units_read[1]); 1393 spdk_json_write_named_uint128(w, "data_units_written", 1394 health_page->data_units_written[0], health_page->data_units_written[1]); 1395 spdk_json_write_named_uint128(w, "host_read_commands", 1396 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1397 spdk_json_write_named_uint128(w, "host_write_commands", 1398 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1399 spdk_json_write_named_uint128(w, "controller_busy_time", 1400 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1401 spdk_json_write_named_uint128(w, "power_cycles", 1402 health_page->power_cycles[0], health_page->power_cycles[1]); 1403 spdk_json_write_named_uint128(w, "power_on_hours", 1404 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1405 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1406 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1407 spdk_json_write_named_uint128(w, "media_errors", 1408 health_page->media_errors[0], health_page->media_errors[1]); 1409 spdk_json_write_named_uint128(w, "num_err_log_entries", 1410 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1411 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1412 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1413 health_page->critical_temp_time); 1414 for (i = 0; i < 8; i++) { 1415 if (health_page->temp_sensor[i] != 0) { 1416 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1417 } 1418 } 1419 spdk_json_write_object_end(w); 1420 1421 spdk_jsonrpc_end_result(request, w); 1422 nvme_health_info_cleanup(context, false); 1423 } 1424 1425 static void 1426 get_health_log_page(struct spdk_nvme_health_info_context *context) 1427 { 1428 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1429 1430 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1431 SPDK_NVME_GLOBAL_NS_TAG, 1432 &(context->health_page), sizeof(context->health_page), 0, 1433 get_health_log_page_completion, context)) { 1434 nvme_health_info_cleanup(context, true); 1435 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1436 } 1437 } 1438 1439 static void 1440 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1441 { 1442 struct spdk_nvme_health_info_context *context = cb_arg; 1443 1444 if (spdk_nvme_cpl_is_error(cpl)) { 1445 nvme_health_info_cleanup(context, true); 1446 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1447 } else { 1448 get_health_log_page(context); 1449 } 1450 } 1451 1452 static int 1453 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1454 { 1455 struct spdk_nvme_cmd cmd = {}; 1456 1457 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1458 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1459 1460 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1461 get_temperature_threshold_feature_completion, context); 1462 } 1463 1464 static void 1465 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1466 { 1467 struct spdk_nvme_health_info_context *context; 1468 1469 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1470 if (!context) { 1471 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1472 "Memory allocation error."); 1473 return; 1474 } 1475 1476 context->request = request; 1477 context->ctrlr = ctrlr; 1478 1479 if (get_temperature_threshold_feature(context)) { 1480 nvme_health_info_cleanup(context, true); 1481 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1482 } 1483 1484 return; 1485 } 1486 1487 static void 1488 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1489 const struct spdk_json_val *params) 1490 { 1491 struct rpc_get_controller_health_info req = {}; 1492 struct nvme_ctrlr *nvme_ctrlr = NULL; 1493 1494 if (!params) { 1495 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1496 "Missing device name"); 1497 1498 return; 1499 } 1500 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1501 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1502 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1503 free_rpc_get_controller_health_info(&req); 1504 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1505 "Invalid parameters"); 1506 1507 return; 1508 } 1509 1510 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1511 1512 if (!nvme_ctrlr) { 1513 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1514 free_rpc_get_controller_health_info(&req); 1515 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1516 "Device not found"); 1517 return; 1518 } 1519 1520 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1521 free_rpc_get_controller_health_info(&req); 1522 1523 return; 1524 } 1525 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1526 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1527 1528 struct rpc_bdev_nvme_start_discovery { 1529 char *name; 1530 char *trtype; 1531 char *adrfam; 1532 char *traddr; 1533 char *trsvcid; 1534 char *hostnqn; 1535 bool wait_for_attach; 1536 uint64_t attach_timeout_ms; 1537 struct spdk_nvme_ctrlr_opts opts; 1538 struct nvme_ctrlr_opts bdev_opts; 1539 }; 1540 1541 static void 1542 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1543 { 1544 free(req->name); 1545 free(req->trtype); 1546 free(req->adrfam); 1547 free(req->traddr); 1548 free(req->trsvcid); 1549 free(req->hostnqn); 1550 } 1551 1552 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1553 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1554 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1555 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1556 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1557 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1558 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1559 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1560 {"attach_timeout_ms", offsetof(struct rpc_bdev_nvme_start_discovery, attach_timeout_ms), spdk_json_decode_uint64, true}, 1561 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1562 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1563 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1564 }; 1565 1566 struct rpc_bdev_nvme_start_discovery_ctx { 1567 struct rpc_bdev_nvme_start_discovery req; 1568 struct spdk_jsonrpc_request *request; 1569 }; 1570 1571 static void 1572 rpc_bdev_nvme_start_discovery_done(void *ctx, int status) 1573 { 1574 struct spdk_jsonrpc_request *request = ctx; 1575 1576 if (status != 0) { 1577 spdk_jsonrpc_send_error_response(request, status, spdk_strerror(-status)); 1578 } else { 1579 spdk_jsonrpc_send_bool_response(request, true); 1580 } 1581 } 1582 1583 static void 1584 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1585 const struct spdk_json_val *params) 1586 { 1587 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1588 struct spdk_nvme_transport_id trid = {}; 1589 size_t len, maxlen; 1590 int rc; 1591 spdk_bdev_nvme_start_discovery_fn cb_fn; 1592 void *cb_ctx; 1593 1594 ctx = calloc(1, sizeof(*ctx)); 1595 if (!ctx) { 1596 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1597 return; 1598 } 1599 1600 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1601 1602 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1603 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1604 &ctx->req)) { 1605 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1606 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1607 "spdk_json_decode_object failed"); 1608 goto cleanup; 1609 } 1610 1611 /* Parse trstring */ 1612 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1613 if (rc < 0) { 1614 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1615 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1616 ctx->req.trtype); 1617 goto cleanup; 1618 } 1619 1620 /* Parse trtype */ 1621 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1622 assert(rc == 0); 1623 1624 /* Parse traddr */ 1625 maxlen = sizeof(trid.traddr); 1626 len = strnlen(ctx->req.traddr, maxlen); 1627 if (len == maxlen) { 1628 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1629 ctx->req.traddr); 1630 goto cleanup; 1631 } 1632 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1633 1634 /* Parse adrfam */ 1635 if (ctx->req.adrfam) { 1636 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1637 if (rc < 0) { 1638 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1639 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1640 ctx->req.adrfam); 1641 goto cleanup; 1642 } 1643 } 1644 1645 /* Parse trsvcid */ 1646 if (ctx->req.trsvcid) { 1647 maxlen = sizeof(trid.trsvcid); 1648 len = strnlen(ctx->req.trsvcid, maxlen); 1649 if (len == maxlen) { 1650 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1651 ctx->req.trsvcid); 1652 goto cleanup; 1653 } 1654 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1655 } 1656 1657 if (ctx->req.hostnqn) { 1658 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1659 ctx->req.hostnqn); 1660 } 1661 1662 if (ctx->req.attach_timeout_ms != 0) { 1663 ctx->req.wait_for_attach = true; 1664 } 1665 1666 ctx->request = request; 1667 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1668 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1669 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1670 ctx->req.attach_timeout_ms, false, cb_fn, cb_ctx); 1671 if (rc) { 1672 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1673 } else if (!ctx->req.wait_for_attach) { 1674 rpc_bdev_nvme_start_discovery_done(request, 0); 1675 } 1676 1677 cleanup: 1678 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1679 free(ctx); 1680 } 1681 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1682 SPDK_RPC_RUNTIME) 1683 1684 struct rpc_bdev_nvme_stop_discovery { 1685 char *name; 1686 }; 1687 1688 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1689 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1690 }; 1691 1692 struct rpc_bdev_nvme_stop_discovery_ctx { 1693 struct rpc_bdev_nvme_stop_discovery req; 1694 struct spdk_jsonrpc_request *request; 1695 }; 1696 1697 static void 1698 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1699 { 1700 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1701 1702 spdk_jsonrpc_send_bool_response(ctx->request, true); 1703 free(ctx->req.name); 1704 free(ctx); 1705 } 1706 1707 static void 1708 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1709 const struct spdk_json_val *params) 1710 { 1711 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1712 int rc; 1713 1714 ctx = calloc(1, sizeof(*ctx)); 1715 if (!ctx) { 1716 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1717 return; 1718 } 1719 1720 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1721 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1722 &ctx->req)) { 1723 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1724 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1725 "spdk_json_decode_object failed"); 1726 goto cleanup; 1727 } 1728 1729 ctx->request = request; 1730 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1731 if (rc) { 1732 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1733 goto cleanup; 1734 } 1735 1736 return; 1737 1738 cleanup: 1739 free(ctx->req.name); 1740 free(ctx); 1741 } 1742 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1743 SPDK_RPC_RUNTIME) 1744 1745 static void 1746 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1747 const struct spdk_json_val *params) 1748 { 1749 struct spdk_json_write_ctx *w; 1750 1751 w = spdk_jsonrpc_begin_result(request); 1752 bdev_nvme_get_discovery_info(w); 1753 spdk_jsonrpc_end_result(request, w); 1754 } 1755 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1756 SPDK_RPC_RUNTIME) 1757 1758 enum error_injection_cmd_type { 1759 NVME_ADMIN_CMD = 1, 1760 NVME_IO_CMD, 1761 }; 1762 1763 struct rpc_add_error_injection { 1764 char *name; 1765 enum error_injection_cmd_type cmd_type; 1766 uint8_t opc; 1767 bool do_not_submit; 1768 uint64_t timeout_in_us; 1769 uint32_t err_count; 1770 uint8_t sct; 1771 uint8_t sc; 1772 }; 1773 1774 static void 1775 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1776 { 1777 free(req->name); 1778 } 1779 1780 static int 1781 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1782 { 1783 int *cmd_type = out; 1784 1785 if (spdk_json_strequal(val, "admin")) { 1786 *cmd_type = NVME_ADMIN_CMD; 1787 } else if (spdk_json_strequal(val, "io")) { 1788 *cmd_type = NVME_IO_CMD; 1789 } else { 1790 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1791 return -EINVAL; 1792 } 1793 1794 return 0; 1795 } 1796 1797 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1798 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1799 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1800 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1801 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1802 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1803 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1804 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1805 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1806 }; 1807 1808 struct rpc_add_error_injection_ctx { 1809 struct spdk_jsonrpc_request *request; 1810 struct rpc_add_error_injection rpc; 1811 }; 1812 1813 static void 1814 rpc_add_error_injection_done(struct spdk_io_channel_iter *i, int status) 1815 { 1816 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1817 1818 if (status) { 1819 spdk_jsonrpc_send_error_response(ctx->request, status, 1820 "Failed to add the error injection."); 1821 } else { 1822 spdk_jsonrpc_send_bool_response(ctx->request, true); 1823 } 1824 1825 free_rpc_add_error_injection(&ctx->rpc); 1826 free(ctx); 1827 } 1828 1829 static void 1830 rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i) 1831 { 1832 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1833 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1834 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1835 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1836 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1837 int rc = 0; 1838 1839 if (qpair != NULL) { 1840 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1841 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1842 ctx->rpc.sct, ctx->rpc.sc); 1843 } 1844 1845 spdk_for_each_channel_continue(i, rc); 1846 } 1847 1848 static void 1849 rpc_bdev_nvme_add_error_injection( 1850 struct spdk_jsonrpc_request *request, 1851 const struct spdk_json_val *params) 1852 { 1853 struct rpc_add_error_injection_ctx *ctx; 1854 struct nvme_ctrlr *nvme_ctrlr; 1855 int rc; 1856 1857 ctx = calloc(1, sizeof(*ctx)); 1858 if (!ctx) { 1859 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1860 return; 1861 } 1862 ctx->rpc.err_count = 1; 1863 ctx->request = request; 1864 1865 if (spdk_json_decode_object(params, 1866 rpc_add_error_injection_decoders, 1867 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1868 &ctx->rpc)) { 1869 spdk_jsonrpc_send_error_response(request, -EINVAL, 1870 "Failed to parse the request"); 1871 goto cleanup; 1872 } 1873 1874 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1875 if (nvme_ctrlr == NULL) { 1876 SPDK_ERRLOG("No controller with specified name was found.\n"); 1877 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1878 goto cleanup; 1879 } 1880 1881 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1882 spdk_for_each_channel(nvme_ctrlr, 1883 rpc_add_error_injection_per_channel, 1884 ctx, 1885 rpc_add_error_injection_done); 1886 1887 return; 1888 } else { 1889 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1890 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1891 ctx->rpc.sct, ctx->rpc.sc); 1892 if (rc) { 1893 spdk_jsonrpc_send_error_response(request, -rc, 1894 "Failed to add the error injection"); 1895 } else { 1896 spdk_jsonrpc_send_bool_response(ctx->request, true); 1897 } 1898 } 1899 1900 cleanup: 1901 free_rpc_add_error_injection(&ctx->rpc); 1902 free(ctx); 1903 } 1904 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 1905 SPDK_RPC_RUNTIME) 1906 1907 struct rpc_remove_error_injection { 1908 char *name; 1909 enum error_injection_cmd_type cmd_type; 1910 uint8_t opc; 1911 }; 1912 1913 static void 1914 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 1915 { 1916 free(req->name); 1917 } 1918 1919 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 1920 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 1921 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1922 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 1923 }; 1924 1925 struct rpc_remove_error_injection_ctx { 1926 struct spdk_jsonrpc_request *request; 1927 struct rpc_remove_error_injection rpc; 1928 }; 1929 1930 static void 1931 rpc_remove_error_injection_done(struct spdk_io_channel_iter *i, int status) 1932 { 1933 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1934 1935 if (status) { 1936 spdk_jsonrpc_send_error_response(ctx->request, status, 1937 "Failed to remove the error injection."); 1938 } else { 1939 spdk_jsonrpc_send_bool_response(ctx->request, true); 1940 } 1941 1942 free_rpc_remove_error_injection(&ctx->rpc); 1943 free(ctx); 1944 } 1945 1946 static void 1947 rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i) 1948 { 1949 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1950 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1951 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1952 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1953 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1954 1955 if (qpair != NULL) { 1956 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 1957 } 1958 1959 spdk_for_each_channel_continue(i, 0); 1960 } 1961 1962 static void 1963 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 1964 const struct spdk_json_val *params) 1965 { 1966 struct rpc_remove_error_injection_ctx *ctx; 1967 struct nvme_ctrlr *nvme_ctrlr; 1968 1969 ctx = calloc(1, sizeof(*ctx)); 1970 if (!ctx) { 1971 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1972 return; 1973 } 1974 ctx->request = request; 1975 1976 if (spdk_json_decode_object(params, 1977 rpc_remove_error_injection_decoders, 1978 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 1979 &ctx->rpc)) { 1980 spdk_jsonrpc_send_error_response(request, -EINVAL, 1981 "Failed to parse the request"); 1982 goto cleanup; 1983 } 1984 1985 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1986 if (nvme_ctrlr == NULL) { 1987 SPDK_ERRLOG("No controller with specified name was found.\n"); 1988 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1989 goto cleanup; 1990 } 1991 1992 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1993 spdk_for_each_channel(nvme_ctrlr, 1994 rpc_remove_error_injection_per_channel, 1995 ctx, 1996 rpc_remove_error_injection_done); 1997 return; 1998 } else { 1999 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2000 spdk_jsonrpc_send_bool_response(ctx->request, true); 2001 } 2002 2003 cleanup: 2004 free_rpc_remove_error_injection(&ctx->rpc); 2005 free(ctx); 2006 } 2007 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2008 SPDK_RPC_RUNTIME) 2009 2010 struct rpc_get_io_paths { 2011 char *name; 2012 }; 2013 2014 static void 2015 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2016 { 2017 free(r->name); 2018 } 2019 2020 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2021 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2022 }; 2023 2024 struct rpc_get_io_paths_ctx { 2025 struct rpc_get_io_paths req; 2026 struct spdk_jsonrpc_request *request; 2027 struct spdk_json_write_ctx *w; 2028 }; 2029 2030 static void 2031 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2032 { 2033 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2034 2035 spdk_json_write_array_end(ctx->w); 2036 2037 spdk_json_write_object_end(ctx->w); 2038 2039 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2040 2041 free_rpc_get_io_paths(&ctx->req); 2042 free(ctx); 2043 } 2044 2045 static void 2046 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2047 { 2048 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2049 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2050 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2051 struct nvme_qpair *qpair; 2052 struct nvme_io_path *io_path; 2053 struct nvme_bdev *nbdev; 2054 2055 spdk_json_write_object_begin(ctx->w); 2056 2057 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2058 2059 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2060 2061 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2062 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2063 nbdev = io_path->nvme_ns->bdev; 2064 2065 if (ctx->req.name != NULL && 2066 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2067 continue; 2068 } 2069 2070 nvme_io_path_info_json(ctx->w, io_path); 2071 } 2072 } 2073 2074 spdk_json_write_array_end(ctx->w); 2075 2076 spdk_json_write_object_end(ctx->w); 2077 2078 spdk_for_each_channel_continue(i, 0); 2079 } 2080 2081 static void 2082 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2083 const struct spdk_json_val *params) 2084 { 2085 struct rpc_get_io_paths_ctx *ctx; 2086 2087 ctx = calloc(1, sizeof(*ctx)); 2088 if (ctx == NULL) { 2089 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2090 return; 2091 } 2092 2093 if (params != NULL && 2094 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2095 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2096 &ctx->req)) { 2097 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2098 "bdev_nvme_get_io_paths requires no parameters"); 2099 2100 free_rpc_get_io_paths(&ctx->req); 2101 free(ctx); 2102 return; 2103 } 2104 2105 ctx->request = request; 2106 ctx->w = spdk_jsonrpc_begin_result(request); 2107 2108 spdk_json_write_object_begin(ctx->w); 2109 2110 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2111 2112 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2113 _rpc_bdev_nvme_get_io_paths, 2114 ctx, 2115 rpc_bdev_nvme_get_io_paths_done); 2116 } 2117 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2118 2119 struct rpc_bdev_nvme_set_preferred_path { 2120 char *name; 2121 uint16_t cntlid; 2122 }; 2123 2124 static void 2125 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2126 { 2127 free(req->name); 2128 } 2129 2130 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2131 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2132 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2133 }; 2134 2135 struct rpc_bdev_nvme_set_preferred_path_ctx { 2136 struct rpc_bdev_nvme_set_preferred_path req; 2137 struct spdk_jsonrpc_request *request; 2138 }; 2139 2140 static void 2141 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2142 { 2143 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2144 2145 if (rc == 0) { 2146 spdk_jsonrpc_send_bool_response(ctx->request, true); 2147 } else { 2148 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2149 } 2150 2151 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2152 free(ctx); 2153 } 2154 2155 static void 2156 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2157 const struct spdk_json_val *params) 2158 { 2159 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2160 2161 ctx = calloc(1, sizeof(*ctx)); 2162 if (ctx == NULL) { 2163 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2164 return; 2165 } 2166 2167 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2168 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2169 &ctx->req)) { 2170 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2171 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2172 "spdk_json_decode_object failed"); 2173 goto cleanup; 2174 } 2175 2176 ctx->request = request; 2177 2178 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2179 rpc_bdev_nvme_set_preferred_path_done, ctx); 2180 return; 2181 2182 cleanup: 2183 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2184 free(ctx); 2185 } 2186 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2187 SPDK_RPC_RUNTIME) 2188 2189 struct rpc_set_multipath_policy { 2190 char *name; 2191 enum bdev_nvme_multipath_policy policy; 2192 enum bdev_nvme_multipath_selector selector; 2193 uint32_t rr_min_io; 2194 }; 2195 2196 static void 2197 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2198 { 2199 free(req->name); 2200 } 2201 2202 static int 2203 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2204 { 2205 enum bdev_nvme_multipath_policy *policy = out; 2206 2207 if (spdk_json_strequal(val, "active_passive") == true) { 2208 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2209 } else if (spdk_json_strequal(val, "active_active") == true) { 2210 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2211 } else { 2212 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2213 return -EINVAL; 2214 } 2215 2216 return 0; 2217 } 2218 2219 static int 2220 rpc_decode_mp_selector(const struct spdk_json_val *val, void *out) 2221 { 2222 enum bdev_nvme_multipath_selector *selector = out; 2223 2224 if (spdk_json_strequal(val, "round_robin") == true) { 2225 *selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN; 2226 } else if (spdk_json_strequal(val, "queue_depth") == true) { 2227 *selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH; 2228 } else { 2229 SPDK_NOTICELOG("Invalid parameter value: selector\n"); 2230 return -EINVAL; 2231 } 2232 2233 return 0; 2234 } 2235 2236 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2237 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2238 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2239 {"selector", offsetof(struct rpc_set_multipath_policy, selector), rpc_decode_mp_selector, true}, 2240 {"rr_min_io", offsetof(struct rpc_set_multipath_policy, rr_min_io), spdk_json_decode_uint32, true}, 2241 }; 2242 2243 struct rpc_set_multipath_policy_ctx { 2244 struct rpc_set_multipath_policy req; 2245 struct spdk_jsonrpc_request *request; 2246 }; 2247 2248 static void 2249 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2250 { 2251 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2252 2253 if (rc == 0) { 2254 spdk_jsonrpc_send_bool_response(ctx->request, true); 2255 } else { 2256 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2257 } 2258 2259 free_rpc_set_multipath_policy(&ctx->req); 2260 free(ctx); 2261 } 2262 2263 static void 2264 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2265 const struct spdk_json_val *params) 2266 { 2267 struct rpc_set_multipath_policy_ctx *ctx; 2268 2269 ctx = calloc(1, sizeof(*ctx)); 2270 if (ctx == NULL) { 2271 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2272 return; 2273 } 2274 2275 ctx->req.rr_min_io = UINT32_MAX; 2276 2277 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2278 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2279 &ctx->req)) { 2280 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2281 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2282 "spdk_json_decode_object failed"); 2283 goto cleanup; 2284 } 2285 2286 ctx->request = request; 2287 2288 if (ctx->req.policy != BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE && ctx->req.selector > 0) { 2289 SPDK_ERRLOG("selector only works in active_active mode\n"); 2290 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2291 "spdk_json_decode_object failed"); 2292 goto cleanup; 2293 } 2294 2295 bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, ctx->req.selector, 2296 ctx->req.rr_min_io, 2297 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2298 return; 2299 2300 cleanup: 2301 free_rpc_set_multipath_policy(&ctx->req); 2302 free(ctx); 2303 } 2304 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2305 SPDK_RPC_RUNTIME) 2306 2307 struct rpc_bdev_nvme_start_mdns_discovery { 2308 char *name; 2309 char *svcname; 2310 char *hostnqn; 2311 struct spdk_nvme_ctrlr_opts opts; 2312 struct nvme_ctrlr_opts bdev_opts; 2313 }; 2314 2315 static void 2316 free_rpc_bdev_nvme_start_mdns_discovery(struct rpc_bdev_nvme_start_mdns_discovery *req) 2317 { 2318 free(req->name); 2319 free(req->svcname); 2320 free(req->hostnqn); 2321 } 2322 2323 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_mdns_discovery_decoders[] = { 2324 {"name", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, name), spdk_json_decode_string}, 2325 {"svcname", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, svcname), spdk_json_decode_string}, 2326 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_mdns_discovery, hostnqn), spdk_json_decode_string, true}, 2327 }; 2328 2329 struct rpc_bdev_nvme_start_mdns_discovery_ctx { 2330 struct rpc_bdev_nvme_start_mdns_discovery req; 2331 struct spdk_jsonrpc_request *request; 2332 }; 2333 2334 static void 2335 rpc_bdev_nvme_start_mdns_discovery(struct spdk_jsonrpc_request *request, 2336 const struct spdk_json_val *params) 2337 { 2338 struct rpc_bdev_nvme_start_mdns_discovery_ctx *ctx; 2339 int rc; 2340 2341 ctx = calloc(1, sizeof(*ctx)); 2342 if (!ctx) { 2343 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2344 return; 2345 } 2346 2347 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 2348 2349 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_mdns_discovery_decoders, 2350 SPDK_COUNTOF(rpc_bdev_nvme_start_mdns_discovery_decoders), 2351 &ctx->req)) { 2352 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2353 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2354 "spdk_json_decode_object failed"); 2355 goto cleanup; 2356 } 2357 2358 if (ctx->req.hostnqn) { 2359 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 2360 ctx->req.hostnqn); 2361 } 2362 ctx->request = request; 2363 rc = bdev_nvme_start_mdns_discovery(ctx->req.name, ctx->req.svcname, &ctx->req.opts, 2364 &ctx->req.bdev_opts); 2365 if (rc) { 2366 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2367 } else { 2368 spdk_jsonrpc_send_bool_response(request, true); 2369 } 2370 2371 cleanup: 2372 free_rpc_bdev_nvme_start_mdns_discovery(&ctx->req); 2373 free(ctx); 2374 } 2375 SPDK_RPC_REGISTER("bdev_nvme_start_mdns_discovery", rpc_bdev_nvme_start_mdns_discovery, 2376 SPDK_RPC_RUNTIME) 2377 2378 struct rpc_bdev_nvme_stop_mdns_discovery { 2379 char *name; 2380 }; 2381 2382 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_mdns_discovery_decoders[] = { 2383 {"name", offsetof(struct rpc_bdev_nvme_stop_mdns_discovery, name), spdk_json_decode_string}, 2384 }; 2385 2386 struct rpc_bdev_nvme_stop_mdns_discovery_ctx { 2387 struct rpc_bdev_nvme_stop_mdns_discovery req; 2388 struct spdk_jsonrpc_request *request; 2389 }; 2390 2391 static void 2392 rpc_bdev_nvme_stop_mdns_discovery(struct spdk_jsonrpc_request *request, 2393 const struct spdk_json_val *params) 2394 { 2395 struct rpc_bdev_nvme_stop_mdns_discovery_ctx *ctx; 2396 int rc; 2397 2398 ctx = calloc(1, sizeof(*ctx)); 2399 if (!ctx) { 2400 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2401 return; 2402 } 2403 2404 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_mdns_discovery_decoders, 2405 SPDK_COUNTOF(rpc_bdev_nvme_stop_mdns_discovery_decoders), 2406 &ctx->req)) { 2407 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2408 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2409 "spdk_json_decode_object failed"); 2410 goto cleanup; 2411 } 2412 2413 ctx->request = request; 2414 rc = bdev_nvme_stop_mdns_discovery(ctx->req.name); 2415 2416 if (rc) { 2417 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2418 goto cleanup; 2419 } 2420 spdk_jsonrpc_send_bool_response(ctx->request, true); 2421 2422 cleanup: 2423 free(ctx->req.name); 2424 free(ctx); 2425 } 2426 SPDK_RPC_REGISTER("bdev_nvme_stop_mdns_discovery", rpc_bdev_nvme_stop_mdns_discovery, 2427 SPDK_RPC_RUNTIME) 2428 2429 static void 2430 rpc_bdev_nvme_get_mdns_discovery_info(struct spdk_jsonrpc_request *request, 2431 const struct spdk_json_val *params) 2432 { 2433 bdev_nvme_get_mdns_discovery_info(request); 2434 } 2435 2436 SPDK_RPC_REGISTER("bdev_nvme_get_mdns_discovery_info", rpc_bdev_nvme_get_mdns_discovery_info, 2437 SPDK_RPC_RUNTIME) 2438 2439 struct rpc_get_path_stat { 2440 char *name; 2441 }; 2442 2443 struct path_stat { 2444 struct spdk_bdev_io_stat stat; 2445 struct spdk_nvme_transport_id trid; 2446 struct nvme_ns *ns; 2447 }; 2448 2449 struct rpc_bdev_nvme_path_stat_ctx { 2450 struct spdk_jsonrpc_request *request; 2451 struct path_stat *path_stat; 2452 uint32_t num_paths; 2453 struct spdk_bdev_desc *desc; 2454 }; 2455 2456 static void 2457 free_rpc_get_path_stat(struct rpc_get_path_stat *req) 2458 { 2459 free(req->name); 2460 } 2461 2462 static const struct spdk_json_object_decoder rpc_get_path_stat_decoders[] = { 2463 {"name", offsetof(struct rpc_get_path_stat, name), spdk_json_decode_string}, 2464 }; 2465 2466 static void 2467 dummy_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx) 2468 { 2469 } 2470 2471 static void 2472 rpc_bdev_nvme_path_stat_per_channel(struct spdk_io_channel_iter *i) 2473 { 2474 struct rpc_bdev_nvme_path_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2475 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 2476 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2477 struct nvme_io_path *io_path; 2478 struct path_stat *path_stat; 2479 uint32_t j; 2480 2481 assert(ctx->num_paths != 0); 2482 2483 for (j = 0; j < ctx->num_paths; j++) { 2484 path_stat = &ctx->path_stat[j]; 2485 2486 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 2487 if (path_stat->ns == io_path->nvme_ns) { 2488 assert(io_path->stat != NULL); 2489 spdk_bdev_add_io_stat(&path_stat->stat, io_path->stat); 2490 } 2491 } 2492 } 2493 2494 spdk_for_each_channel_continue(i, 0); 2495 } 2496 2497 static void 2498 rpc_bdev_nvme_path_stat_done(struct spdk_io_channel_iter *i, int status) 2499 { 2500 struct rpc_bdev_nvme_path_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2501 struct nvme_bdev *nbdev = spdk_io_channel_iter_get_io_device(i); 2502 struct spdk_json_write_ctx *w; 2503 struct path_stat *path_stat; 2504 uint32_t j; 2505 2506 assert(ctx->num_paths != 0); 2507 2508 w = spdk_jsonrpc_begin_result(ctx->request); 2509 spdk_json_write_object_begin(w); 2510 spdk_json_write_named_string(w, "name", nbdev->disk.name); 2511 spdk_json_write_named_array_begin(w, "stats"); 2512 2513 for (j = 0; j < ctx->num_paths; j++) { 2514 path_stat = &ctx->path_stat[j]; 2515 spdk_json_write_object_begin(w); 2516 2517 spdk_json_write_named_object_begin(w, "trid"); 2518 nvme_bdev_dump_trid_json(&path_stat->trid, w); 2519 spdk_json_write_object_end(w); 2520 2521 spdk_json_write_named_object_begin(w, "stat"); 2522 spdk_bdev_dump_io_stat_json(&path_stat->stat, w); 2523 spdk_json_write_object_end(w); 2524 2525 spdk_json_write_object_end(w); 2526 } 2527 2528 spdk_json_write_array_end(w); 2529 spdk_json_write_object_end(w); 2530 spdk_jsonrpc_end_result(ctx->request, w); 2531 2532 spdk_bdev_close(ctx->desc); 2533 free(ctx->path_stat); 2534 free(ctx); 2535 } 2536 2537 static void 2538 rpc_bdev_nvme_get_path_iostat(struct spdk_jsonrpc_request *request, 2539 const struct spdk_json_val *params) 2540 { 2541 struct rpc_get_path_stat req = {}; 2542 struct spdk_bdev_desc *desc = NULL; 2543 struct spdk_bdev *bdev; 2544 struct nvme_bdev *nbdev; 2545 struct nvme_ns *nvme_ns; 2546 struct path_stat *path_stat; 2547 struct rpc_bdev_nvme_path_stat_ctx *ctx; 2548 struct spdk_bdev_nvme_opts opts; 2549 uint32_t num_paths = 0, i = 0; 2550 int rc; 2551 2552 bdev_nvme_get_opts(&opts); 2553 if (!opts.io_path_stat) { 2554 SPDK_ERRLOG("RPC not enabled if enable_io_path_stat is false\n"); 2555 spdk_jsonrpc_send_error_response(request, -EPERM, 2556 "RPC not enabled if enable_io_path_stat is false"); 2557 return; 2558 } 2559 2560 if (spdk_json_decode_object(params, rpc_get_path_stat_decoders, 2561 SPDK_COUNTOF(rpc_get_path_stat_decoders), 2562 &req)) { 2563 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2564 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2565 "spdk_json_decode_object failed"); 2566 free_rpc_get_path_stat(&req); 2567 return; 2568 } 2569 2570 rc = spdk_bdev_open_ext(req.name, false, dummy_bdev_event_cb, NULL, &desc); 2571 if (rc != 0) { 2572 SPDK_ERRLOG("Failed to open bdev '%s': %d\n", req.name, rc); 2573 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2574 free_rpc_get_path_stat(&req); 2575 return; 2576 } 2577 2578 free_rpc_get_path_stat(&req); 2579 2580 ctx = calloc(1, sizeof(struct rpc_bdev_nvme_path_stat_ctx)); 2581 if (ctx == NULL) { 2582 spdk_bdev_close(desc); 2583 SPDK_ERRLOG("Failed to allocate rpc_bdev_nvme_path_stat_ctx struct\n"); 2584 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2585 return; 2586 } 2587 2588 bdev = spdk_bdev_desc_get_bdev(desc); 2589 nbdev = bdev->ctxt; 2590 2591 pthread_mutex_lock(&nbdev->mutex); 2592 if (nbdev->ref == 0) { 2593 rc = -ENOENT; 2594 goto err; 2595 } 2596 2597 num_paths = nbdev->ref; 2598 path_stat = calloc(num_paths, sizeof(struct path_stat)); 2599 if (path_stat == NULL) { 2600 rc = -ENOMEM; 2601 SPDK_ERRLOG("Failed to allocate memory for path_stat.\n"); 2602 goto err; 2603 } 2604 2605 /* store the history stat */ 2606 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) { 2607 assert(i < num_paths); 2608 path_stat[i].ns = nvme_ns; 2609 path_stat[i].trid = nvme_ns->ctrlr->active_path_id->trid; 2610 2611 assert(nvme_ns->stat != NULL); 2612 memcpy(&path_stat[i].stat, nvme_ns->stat, sizeof(struct spdk_bdev_io_stat)); 2613 i++; 2614 } 2615 pthread_mutex_unlock(&nbdev->mutex); 2616 2617 ctx->request = request; 2618 ctx->desc = desc; 2619 ctx->path_stat = path_stat; 2620 ctx->num_paths = num_paths; 2621 2622 spdk_for_each_channel(nbdev, 2623 rpc_bdev_nvme_path_stat_per_channel, 2624 ctx, 2625 rpc_bdev_nvme_path_stat_done); 2626 return; 2627 2628 err: 2629 pthread_mutex_unlock(&nbdev->mutex); 2630 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 2631 spdk_bdev_close(desc); 2632 free(ctx); 2633 } 2634 SPDK_RPC_REGISTER("bdev_nvme_get_path_iostat", rpc_bdev_nvme_get_path_iostat, 2635 SPDK_RPC_RUNTIME) 2636