1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "bdev_nvme.h" 10 11 #include "spdk/config.h" 12 13 #include "spdk/string.h" 14 #include "spdk/rpc.h" 15 #include "spdk/util.h" 16 #include "spdk/env.h" 17 #include "spdk/nvme.h" 18 #include "spdk/nvme_spec.h" 19 20 #include "spdk/log.h" 21 #include "spdk/bdev_module.h" 22 23 struct open_descriptors { 24 void *desc; 25 struct spdk_bdev *bdev; 26 TAILQ_ENTRY(open_descriptors) tqlst; 27 struct spdk_thread *thread; 28 }; 29 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t; 30 31 static int 32 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out) 33 { 34 enum spdk_bdev_timeout_action *action = out; 35 36 if (spdk_json_strequal(val, "none") == true) { 37 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE; 38 } else if (spdk_json_strequal(val, "abort") == true) { 39 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT; 40 } else if (spdk_json_strequal(val, "reset") == true) { 41 *action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET; 42 } else { 43 SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n"); 44 return -EINVAL; 45 } 46 47 return 0; 48 } 49 50 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = { 51 {"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true}, 52 {"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true}, 53 {"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true}, 54 {"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true}, 55 {"retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 56 {"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true}, 57 {"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true}, 58 {"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true}, 59 {"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true}, 60 {"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true}, 61 {"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true}, 62 {"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true}, 63 {"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true}, 64 {"transport_retry_count", offsetof(struct spdk_bdev_nvme_opts, transport_retry_count), spdk_json_decode_uint32, true}, 65 {"bdev_retry_count", offsetof(struct spdk_bdev_nvme_opts, bdev_retry_count), spdk_json_decode_int32, true}, 66 {"transport_ack_timeout", offsetof(struct spdk_bdev_nvme_opts, transport_ack_timeout), spdk_json_decode_uint8, true}, 67 {"ctrlr_loss_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 68 {"reconnect_delay_sec", offsetof(struct spdk_bdev_nvme_opts, reconnect_delay_sec), spdk_json_decode_uint32, true}, 69 {"fast_io_fail_timeout_sec", offsetof(struct spdk_bdev_nvme_opts, fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 70 {"disable_auto_failback", offsetof(struct spdk_bdev_nvme_opts, disable_auto_failback), spdk_json_decode_bool, true}, 71 }; 72 73 static void 74 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request, 75 const struct spdk_json_val *params) 76 { 77 struct spdk_bdev_nvme_opts opts; 78 int rc; 79 80 bdev_nvme_get_opts(&opts); 81 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders, 82 SPDK_COUNTOF(rpc_bdev_nvme_options_decoders), 83 &opts)) { 84 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 85 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 86 "spdk_json_decode_object failed"); 87 return; 88 } 89 90 rc = bdev_nvme_set_opts(&opts); 91 if (rc == -EPERM) { 92 spdk_jsonrpc_send_error_response(request, -EPERM, 93 "RPC not permitted with nvme controllers already attached"); 94 } else if (rc) { 95 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 96 } else { 97 spdk_jsonrpc_send_bool_response(request, true); 98 } 99 100 return; 101 } 102 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options, 103 SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) 104 105 struct rpc_bdev_nvme_hotplug { 106 bool enabled; 107 uint64_t period_us; 108 }; 109 110 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = { 111 {"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false}, 112 {"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true}, 113 }; 114 115 static void 116 rpc_bdev_nvme_set_hotplug_done(void *ctx) 117 { 118 struct spdk_jsonrpc_request *request = ctx; 119 120 spdk_jsonrpc_send_bool_response(request, true); 121 } 122 123 static void 124 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request, 125 const struct spdk_json_val *params) 126 { 127 struct rpc_bdev_nvme_hotplug req = {false, 0}; 128 int rc; 129 130 if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders, 131 SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) { 132 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 133 rc = -EINVAL; 134 goto invalid; 135 } 136 137 rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done, 138 request); 139 if (rc) { 140 goto invalid; 141 } 142 143 return; 144 invalid: 145 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc)); 146 } 147 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME) 148 149 enum bdev_nvme_multipath_mode { 150 BDEV_NVME_MP_MODE_FAILOVER, 151 BDEV_NVME_MP_MODE_MULTIPATH, 152 BDEV_NVME_MP_MODE_DISABLE, 153 }; 154 155 struct rpc_bdev_nvme_attach_controller { 156 char *name; 157 char *trtype; 158 char *adrfam; 159 char *traddr; 160 char *trsvcid; 161 char *priority; 162 char *subnqn; 163 char *hostnqn; 164 char *hostaddr; 165 char *hostsvcid; 166 enum bdev_nvme_multipath_mode multipath; 167 struct nvme_ctrlr_opts bdev_opts; 168 struct spdk_nvme_ctrlr_opts drv_opts; 169 }; 170 171 static void 172 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req) 173 { 174 free(req->name); 175 free(req->trtype); 176 free(req->adrfam); 177 free(req->traddr); 178 free(req->trsvcid); 179 free(req->priority); 180 free(req->subnqn); 181 free(req->hostnqn); 182 free(req->hostaddr); 183 free(req->hostsvcid); 184 } 185 186 static int 187 bdev_nvme_decode_reftag(const struct spdk_json_val *val, void *out) 188 { 189 uint32_t *flag = out; 190 bool reftag; 191 int rc; 192 193 rc = spdk_json_decode_bool(val, &reftag); 194 if (rc == 0 && reftag == true) { 195 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 196 } 197 198 return rc; 199 } 200 201 static int 202 bdev_nvme_decode_guard(const struct spdk_json_val *val, void *out) 203 { 204 uint32_t *flag = out; 205 bool guard; 206 int rc; 207 208 rc = spdk_json_decode_bool(val, &guard); 209 if (rc == 0 && guard == true) { 210 *flag |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 211 } 212 213 return rc; 214 } 215 216 static int 217 bdev_nvme_decode_multipath(const struct spdk_json_val *val, void *out) 218 { 219 enum bdev_nvme_multipath_mode *multipath = out; 220 221 if (spdk_json_strequal(val, "failover") == true) { 222 *multipath = BDEV_NVME_MP_MODE_FAILOVER; 223 } else if (spdk_json_strequal(val, "multipath") == true) { 224 *multipath = BDEV_NVME_MP_MODE_MULTIPATH; 225 } else if (spdk_json_strequal(val, "disable") == true) { 226 *multipath = BDEV_NVME_MP_MODE_DISABLE; 227 } else { 228 SPDK_NOTICELOG("Invalid parameter value: multipath\n"); 229 return -EINVAL; 230 } 231 232 return 0; 233 } 234 235 236 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = { 237 {"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string}, 238 {"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string}, 239 {"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string}, 240 241 {"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true}, 242 {"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true}, 243 {"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true}, 244 {"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true}, 245 {"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true}, 246 {"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true}, 247 {"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true}, 248 249 {"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_reftag, true}, 250 {"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.prchk_flags), bdev_nvme_decode_guard, true}, 251 {"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.header_digest), spdk_json_decode_bool, true}, 252 {"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.data_digest), spdk_json_decode_bool, true}, 253 {"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true}, 254 {"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), bdev_nvme_decode_multipath, true}, 255 {"num_io_queues", offsetof(struct rpc_bdev_nvme_attach_controller, drv_opts.num_io_queues), spdk_json_decode_uint32, true}, 256 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 257 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 258 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_attach_controller, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 259 }; 260 261 #define NVME_MAX_BDEVS_PER_RPC 128 262 263 struct rpc_bdev_nvme_attach_controller_ctx { 264 struct rpc_bdev_nvme_attach_controller req; 265 uint32_t count; 266 size_t bdev_count; 267 const char *names[NVME_MAX_BDEVS_PER_RPC]; 268 struct spdk_jsonrpc_request *request; 269 }; 270 271 static void 272 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx) 273 { 274 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 275 struct spdk_jsonrpc_request *request = ctx->request; 276 struct spdk_json_write_ctx *w; 277 size_t i; 278 279 w = spdk_jsonrpc_begin_result(request); 280 spdk_json_write_array_begin(w); 281 for (i = 0; i < ctx->bdev_count; i++) { 282 spdk_json_write_string(w, ctx->names[i]); 283 } 284 spdk_json_write_array_end(w); 285 spdk_jsonrpc_end_result(request, w); 286 287 free_rpc_bdev_nvme_attach_controller(&ctx->req); 288 free(ctx); 289 } 290 291 static void 292 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc) 293 { 294 struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx; 295 struct spdk_jsonrpc_request *request = ctx->request; 296 297 if (rc < 0) { 298 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 299 free_rpc_bdev_nvme_attach_controller(&ctx->req); 300 free(ctx); 301 return; 302 } 303 304 ctx->bdev_count = bdev_count; 305 spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx); 306 } 307 308 static void 309 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, 310 const struct spdk_json_val *params) 311 { 312 struct rpc_bdev_nvme_attach_controller_ctx *ctx; 313 struct spdk_nvme_transport_id trid = {}; 314 const struct spdk_nvme_ctrlr_opts *drv_opts; 315 const struct spdk_nvme_transport_id *ctrlr_trid; 316 struct nvme_ctrlr *ctrlr = NULL; 317 size_t len, maxlen; 318 bool multipath = false; 319 int rc; 320 321 ctx = calloc(1, sizeof(*ctx)); 322 if (!ctx) { 323 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 324 return; 325 } 326 327 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.drv_opts, sizeof(ctx->req.drv_opts)); 328 bdev_nvme_get_default_ctrlr_opts(&ctx->req.bdev_opts); 329 /* For now, initialize the multipath parameter to add a failover path. This maintains backward 330 * compatibility with past behavior. In the future, this behavior will change to "disable". */ 331 ctx->req.multipath = BDEV_NVME_MP_MODE_FAILOVER; 332 333 if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders, 334 SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders), 335 &ctx->req)) { 336 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 337 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 338 "spdk_json_decode_object failed"); 339 goto cleanup; 340 } 341 342 /* Parse trstring */ 343 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 344 if (rc < 0) { 345 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 346 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 347 ctx->req.trtype); 348 goto cleanup; 349 } 350 351 /* Parse trtype */ 352 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 353 assert(rc == 0); 354 355 /* Parse traddr */ 356 maxlen = sizeof(trid.traddr); 357 len = strnlen(ctx->req.traddr, maxlen); 358 if (len == maxlen) { 359 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 360 ctx->req.traddr); 361 goto cleanup; 362 } 363 memcpy(trid.traddr, ctx->req.traddr, len + 1); 364 365 /* Parse adrfam */ 366 if (ctx->req.adrfam) { 367 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 368 if (rc < 0) { 369 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 370 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 371 ctx->req.adrfam); 372 goto cleanup; 373 } 374 } 375 376 /* Parse trsvcid */ 377 if (ctx->req.trsvcid) { 378 maxlen = sizeof(trid.trsvcid); 379 len = strnlen(ctx->req.trsvcid, maxlen); 380 if (len == maxlen) { 381 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 382 ctx->req.trsvcid); 383 goto cleanup; 384 } 385 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 386 } 387 388 /* Parse priority for the NVMe-oF transport connection */ 389 if (ctx->req.priority) { 390 trid.priority = spdk_strtol(ctx->req.priority, 10); 391 } 392 393 /* Parse subnqn */ 394 if (ctx->req.subnqn) { 395 maxlen = sizeof(trid.subnqn); 396 len = strnlen(ctx->req.subnqn, maxlen); 397 if (len == maxlen) { 398 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 399 ctx->req.subnqn); 400 goto cleanup; 401 } 402 memcpy(trid.subnqn, ctx->req.subnqn, len + 1); 403 } 404 405 if (ctx->req.hostnqn) { 406 snprintf(ctx->req.drv_opts.hostnqn, sizeof(ctx->req.drv_opts.hostnqn), "%s", 407 ctx->req.hostnqn); 408 } 409 410 if (ctx->req.hostaddr) { 411 maxlen = sizeof(ctx->req.drv_opts.src_addr); 412 len = strnlen(ctx->req.hostaddr, maxlen); 413 if (len == maxlen) { 414 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 415 ctx->req.hostaddr); 416 goto cleanup; 417 } 418 snprintf(ctx->req.drv_opts.src_addr, maxlen, "%s", ctx->req.hostaddr); 419 } 420 421 if (ctx->req.hostsvcid) { 422 maxlen = sizeof(ctx->req.drv_opts.src_svcid); 423 len = strnlen(ctx->req.hostsvcid, maxlen); 424 if (len == maxlen) { 425 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 426 ctx->req.hostsvcid); 427 goto cleanup; 428 } 429 snprintf(ctx->req.drv_opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid); 430 } 431 432 ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); 433 434 if (ctrlr) { 435 /* This controller already exists. Check what the user wants to do. */ 436 if (ctx->req.multipath == BDEV_NVME_MP_MODE_DISABLE) { 437 /* The user does not want to do any form of multipathing. */ 438 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 439 "A controller named %s already exists and multipath is disabled\n", 440 ctx->req.name); 441 goto cleanup; 442 } 443 444 assert(ctx->req.multipath == BDEV_NVME_MP_MODE_FAILOVER || 445 ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH); 446 447 /* The user wants to add this as a failover path or add this to create multipath. */ 448 drv_opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr); 449 ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr); 450 451 if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 && 452 strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 && 453 strncmp(ctx->req.drv_opts.src_addr, drv_opts->src_addr, sizeof(drv_opts->src_addr)) == 0 && 454 strncmp(ctx->req.drv_opts.src_svcid, drv_opts->src_svcid, sizeof(drv_opts->src_svcid)) == 0) { 455 /* Exactly same network path can't be added a second time */ 456 spdk_jsonrpc_send_error_response_fmt(request, -EALREADY, 457 "A controller named %s already exists with the specified network path\n", 458 ctx->req.name); 459 goto cleanup; 460 } 461 462 if (strncmp(trid.subnqn, 463 ctrlr_trid->subnqn, 464 SPDK_NVMF_NQN_MAX_LEN) != 0) { 465 /* Different SUBNQN is not allowed when specifying the same controller name. */ 466 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 467 "A controller named %s already exists, but uses a different subnqn (%s)\n", 468 ctx->req.name, ctrlr_trid->subnqn); 469 goto cleanup; 470 } 471 472 if (strncmp(ctx->req.drv_opts.hostnqn, drv_opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) { 473 /* Different HOSTNQN is not allowed when specifying the same controller name. */ 474 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 475 "A controller named %s already exists, but uses a different hostnqn (%s)\n", 476 ctx->req.name, drv_opts->hostnqn); 477 goto cleanup; 478 } 479 480 if (ctx->req.bdev_opts.prchk_flags) { 481 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 482 "A controller named %s already exists. To add a path, do not specify PI options.\n", 483 ctx->req.name); 484 goto cleanup; 485 } 486 487 ctx->req.bdev_opts.prchk_flags = ctrlr->opts.prchk_flags; 488 } 489 490 if (ctx->req.multipath == BDEV_NVME_MP_MODE_MULTIPATH) { 491 multipath = true; 492 } 493 494 if (ctx->req.drv_opts.num_io_queues == 0 || ctx->req.drv_opts.num_io_queues > UINT16_MAX + 1) { 495 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, 496 "num_io_queues out of bounds, min: %u max: %u\n", 497 1, UINT16_MAX + 1); 498 goto cleanup; 499 } 500 501 ctx->request = request; 502 ctx->count = NVME_MAX_BDEVS_PER_RPC; 503 /* Should already be zero due to the calloc(), but set explicitly for clarity. */ 504 ctx->req.bdev_opts.from_discovery_service = false; 505 rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, 506 rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.drv_opts, 507 &ctx->req.bdev_opts, multipath); 508 if (rc) { 509 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 510 goto cleanup; 511 } 512 513 return; 514 515 cleanup: 516 free_rpc_bdev_nvme_attach_controller(&ctx->req); 517 free(ctx); 518 } 519 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller, 520 SPDK_RPC_RUNTIME) 521 522 static void 523 rpc_dump_nvme_bdev_controller_info(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx) 524 { 525 struct spdk_json_write_ctx *w = ctx; 526 struct nvme_ctrlr *nvme_ctrlr; 527 528 spdk_json_write_object_begin(w); 529 spdk_json_write_named_string(w, "name", nbdev_ctrlr->name); 530 531 spdk_json_write_named_array_begin(w, "ctrlrs"); 532 TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) { 533 nvme_ctrlr_info_json(w, nvme_ctrlr); 534 } 535 spdk_json_write_array_end(w); 536 spdk_json_write_object_end(w); 537 } 538 539 struct rpc_bdev_nvme_get_controllers { 540 char *name; 541 }; 542 543 static void 544 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r) 545 { 546 free(r->name); 547 } 548 549 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = { 550 {"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true}, 551 }; 552 553 static void 554 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, 555 const struct spdk_json_val *params) 556 { 557 struct rpc_bdev_nvme_get_controllers req = {}; 558 struct spdk_json_write_ctx *w; 559 struct nvme_bdev_ctrlr *nbdev_ctrlr = NULL; 560 561 if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, 562 SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), 563 &req)) { 564 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 565 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 566 "spdk_json_decode_object failed"); 567 goto cleanup; 568 } 569 570 if (req.name) { 571 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); 572 if (nbdev_ctrlr == NULL) { 573 SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); 574 spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); 575 goto cleanup; 576 } 577 } 578 579 w = spdk_jsonrpc_begin_result(request); 580 spdk_json_write_array_begin(w); 581 582 if (nbdev_ctrlr != NULL) { 583 rpc_dump_nvme_bdev_controller_info(nbdev_ctrlr, w); 584 } else { 585 nvme_bdev_ctrlr_for_each(rpc_dump_nvme_bdev_controller_info, w); 586 } 587 588 spdk_json_write_array_end(w); 589 590 spdk_jsonrpc_end_result(request, w); 591 592 cleanup: 593 free_rpc_bdev_nvme_get_controllers(&req); 594 } 595 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME) 596 597 struct rpc_bdev_nvme_detach_controller { 598 char *name; 599 char *trtype; 600 char *adrfam; 601 char *traddr; 602 char *trsvcid; 603 char *subnqn; 604 char *hostaddr; 605 char *hostsvcid; 606 }; 607 608 static void 609 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req) 610 { 611 free(req->name); 612 free(req->trtype); 613 free(req->adrfam); 614 free(req->traddr); 615 free(req->trsvcid); 616 free(req->subnqn); 617 free(req->hostaddr); 618 free(req->hostsvcid); 619 } 620 621 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = { 622 {"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string}, 623 {"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true}, 624 {"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true}, 625 {"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true}, 626 {"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true}, 627 {"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true}, 628 {"hostaddr", offsetof(struct rpc_bdev_nvme_detach_controller, hostaddr), spdk_json_decode_string, true}, 629 {"hostsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, hostsvcid), spdk_json_decode_string, true}, 630 }; 631 632 static void 633 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request, 634 const struct spdk_json_val *params) 635 { 636 struct rpc_bdev_nvme_detach_controller req = {NULL}; 637 struct nvme_path_id path = {}; 638 size_t len, maxlen; 639 int rc = 0; 640 641 if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders, 642 SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders), 643 &req)) { 644 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 645 "spdk_json_decode_object failed"); 646 goto cleanup; 647 } 648 649 if (req.trtype != NULL) { 650 rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype); 651 if (rc < 0) { 652 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 653 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 654 req.trtype); 655 goto cleanup; 656 } 657 658 rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype); 659 if (rc < 0) { 660 SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype); 661 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 662 req.trtype); 663 goto cleanup; 664 } 665 } 666 667 if (req.traddr != NULL) { 668 maxlen = sizeof(path.trid.traddr); 669 len = strnlen(req.traddr, maxlen); 670 if (len == maxlen) { 671 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 672 req.traddr); 673 goto cleanup; 674 } 675 memcpy(path.trid.traddr, req.traddr, len + 1); 676 } 677 678 if (req.adrfam != NULL) { 679 rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam); 680 if (rc < 0) { 681 SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam); 682 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 683 req.adrfam); 684 goto cleanup; 685 } 686 } 687 688 if (req.trsvcid != NULL) { 689 maxlen = sizeof(path.trid.trsvcid); 690 len = strnlen(req.trsvcid, maxlen); 691 if (len == maxlen) { 692 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 693 req.trsvcid); 694 goto cleanup; 695 } 696 memcpy(path.trid.trsvcid, req.trsvcid, len + 1); 697 } 698 699 /* Parse subnqn */ 700 if (req.subnqn != NULL) { 701 maxlen = sizeof(path.trid.subnqn); 702 len = strnlen(req.subnqn, maxlen); 703 if (len == maxlen) { 704 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s", 705 req.subnqn); 706 goto cleanup; 707 } 708 memcpy(path.trid.subnqn, req.subnqn, len + 1); 709 } 710 711 if (req.hostaddr) { 712 maxlen = sizeof(path.hostid.hostaddr); 713 len = strnlen(req.hostaddr, maxlen); 714 if (len == maxlen) { 715 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s", 716 req.hostaddr); 717 goto cleanup; 718 } 719 snprintf(path.hostid.hostaddr, maxlen, "%s", req.hostaddr); 720 } 721 722 if (req.hostsvcid) { 723 maxlen = sizeof(path.hostid.hostsvcid); 724 len = strnlen(req.hostsvcid, maxlen); 725 if (len == maxlen) { 726 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s", 727 req.hostsvcid); 728 goto cleanup; 729 } 730 snprintf(path.hostid.hostsvcid, maxlen, "%s", req.hostsvcid); 731 } 732 733 rc = bdev_nvme_delete(req.name, &path); 734 735 if (rc != 0) { 736 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 737 goto cleanup; 738 } 739 740 spdk_jsonrpc_send_bool_response(request, true); 741 742 cleanup: 743 free_rpc_bdev_nvme_detach_controller(&req); 744 } 745 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller, 746 SPDK_RPC_RUNTIME) 747 748 struct rpc_apply_firmware { 749 char *filename; 750 char *bdev_name; 751 }; 752 753 static void 754 free_rpc_apply_firmware(struct rpc_apply_firmware *req) 755 { 756 free(req->filename); 757 free(req->bdev_name); 758 } 759 760 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = { 761 {"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string}, 762 {"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string}, 763 }; 764 765 struct firmware_update_info { 766 void *fw_image; 767 void *p; 768 unsigned int size; 769 unsigned int size_remaining; 770 unsigned int offset; 771 unsigned int transfer; 772 773 void *desc; 774 struct spdk_io_channel *ch; 775 struct spdk_jsonrpc_request *request; 776 struct spdk_nvme_ctrlr *ctrlr; 777 open_descriptors_t desc_head; 778 struct rpc_apply_firmware *req; 779 }; 780 781 static void 782 _apply_firmware_cleanup(void *ctx) 783 { 784 struct spdk_bdev_desc *desc = ctx; 785 786 spdk_bdev_close(desc); 787 } 788 789 static void 790 apply_firmware_cleanup(void *cb_arg) 791 { 792 struct open_descriptors *opt, *tmp; 793 struct firmware_update_info *firm_ctx = cb_arg; 794 795 if (!firm_ctx) { 796 return; 797 } 798 799 if (firm_ctx->fw_image) { 800 spdk_free(firm_ctx->fw_image); 801 } 802 803 if (firm_ctx->req) { 804 free_rpc_apply_firmware(firm_ctx->req); 805 free(firm_ctx->req); 806 } 807 808 if (firm_ctx->ch) { 809 spdk_put_io_channel(firm_ctx->ch); 810 } 811 812 TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) { 813 TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst); 814 /* Close the underlying bdev on its same opened thread. */ 815 if (opt->thread && opt->thread != spdk_get_thread()) { 816 spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc); 817 } else { 818 spdk_bdev_close(opt->desc); 819 } 820 free(opt); 821 } 822 free(firm_ctx); 823 } 824 825 static void 826 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 827 { 828 struct spdk_json_write_ctx *w; 829 struct firmware_update_info *firm_ctx = cb_arg; 830 831 spdk_bdev_free_io(bdev_io); 832 833 if (!success) { 834 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 835 "firmware commit failed."); 836 apply_firmware_cleanup(firm_ctx); 837 return; 838 } 839 840 if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) { 841 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 842 "Controller reset failed."); 843 apply_firmware_cleanup(firm_ctx); 844 return; 845 } 846 847 w = spdk_jsonrpc_begin_result(firm_ctx->request); 848 spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress."); 849 spdk_jsonrpc_end_result(firm_ctx->request, w); 850 apply_firmware_cleanup(firm_ctx); 851 } 852 853 static void 854 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 855 { 856 struct spdk_nvme_cmd cmd = {}; 857 struct spdk_nvme_fw_commit fw_commit; 858 int slot = 0; 859 int rc; 860 struct firmware_update_info *firm_ctx = cb_arg; 861 enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG; 862 863 spdk_bdev_free_io(bdev_io); 864 865 if (!success) { 866 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 867 "firmware download failed ."); 868 apply_firmware_cleanup(firm_ctx); 869 return; 870 } 871 872 firm_ctx->p += firm_ctx->transfer; 873 firm_ctx->offset += firm_ctx->transfer; 874 firm_ctx->size_remaining -= firm_ctx->transfer; 875 876 switch (firm_ctx->size_remaining) { 877 case 0: 878 /* firmware download completed. Commit firmware */ 879 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit)); 880 fw_commit.fs = slot; 881 fw_commit.ca = commit_action; 882 883 cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 884 memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t)); 885 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0, 886 apply_firmware_complete_reset, firm_ctx); 887 if (rc) { 888 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 889 "firmware commit failed."); 890 apply_firmware_cleanup(firm_ctx); 891 return; 892 } 893 break; 894 default: 895 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 896 cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 897 898 cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 899 cmd.cdw11 = firm_ctx->offset >> 2; 900 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p, 901 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 902 if (rc) { 903 spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 904 "firmware download failed."); 905 apply_firmware_cleanup(firm_ctx); 906 return; 907 } 908 break; 909 } 910 } 911 912 static void 913 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 914 { 915 } 916 917 static void 918 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request, 919 const struct spdk_json_val *params) 920 { 921 int rc; 922 int fd = -1; 923 struct stat fw_stat; 924 struct spdk_nvme_ctrlr *ctrlr; 925 char msg[1024]; 926 struct spdk_bdev *bdev; 927 struct spdk_bdev *bdev2; 928 struct open_descriptors *opt; 929 struct spdk_bdev_desc *desc; 930 struct spdk_nvme_cmd *cmd; 931 struct firmware_update_info *firm_ctx; 932 933 firm_ctx = calloc(1, sizeof(struct firmware_update_info)); 934 if (!firm_ctx) { 935 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 936 "Memory allocation error."); 937 return; 938 } 939 firm_ctx->fw_image = NULL; 940 TAILQ_INIT(&firm_ctx->desc_head); 941 firm_ctx->request = request; 942 943 firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware)); 944 if (!firm_ctx->req) { 945 snprintf(msg, sizeof(msg), "Memory allocation error."); 946 goto err; 947 } 948 949 if (spdk_json_decode_object(params, rpc_apply_firmware_decoders, 950 SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) { 951 snprintf(msg, sizeof(msg), "spdk_json_decode_object failed."); 952 goto err; 953 } 954 955 if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) { 956 snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name); 957 goto err; 958 } 959 960 if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) { 961 snprintf(msg, sizeof(msg), "Controller information for %s were not found.", 962 firm_ctx->req->bdev_name); 963 goto err; 964 } 965 firm_ctx->ctrlr = ctrlr; 966 967 for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) { 968 969 if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) { 970 continue; 971 } 972 973 if (!(opt = malloc(sizeof(struct open_descriptors)))) { 974 snprintf(msg, sizeof(msg), "Memory allocation error."); 975 goto err; 976 } 977 978 if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) { 979 snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name); 980 free(opt); 981 goto err; 982 } 983 984 /* Save the thread where the base device is opened */ 985 opt->thread = spdk_get_thread(); 986 987 opt->desc = desc; 988 opt->bdev = bdev; 989 TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst); 990 } 991 992 /* 993 * find a descriptor associated with our bdev 994 */ 995 firm_ctx->desc = NULL; 996 TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) { 997 if (opt->bdev == bdev) { 998 firm_ctx->desc = opt->desc; 999 break; 1000 } 1001 } 1002 1003 if (!firm_ctx->desc) { 1004 snprintf(msg, sizeof(msg), "No descriptor were found."); 1005 goto err; 1006 } 1007 1008 firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc); 1009 if (!firm_ctx->ch) { 1010 snprintf(msg, sizeof(msg), "No channels were found."); 1011 goto err; 1012 } 1013 1014 fd = open(firm_ctx->req->filename, O_RDONLY); 1015 if (fd < 0) { 1016 snprintf(msg, sizeof(msg), "open file failed."); 1017 goto err; 1018 } 1019 1020 rc = fstat(fd, &fw_stat); 1021 if (rc < 0) { 1022 close(fd); 1023 snprintf(msg, sizeof(msg), "fstat failed."); 1024 goto err; 1025 } 1026 1027 firm_ctx->size = fw_stat.st_size; 1028 if (fw_stat.st_size % 4) { 1029 close(fd); 1030 snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4."); 1031 goto err; 1032 } 1033 1034 firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL, 1035 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1036 if (!firm_ctx->fw_image) { 1037 close(fd); 1038 snprintf(msg, sizeof(msg), "Memory allocation error."); 1039 goto err; 1040 } 1041 firm_ctx->p = firm_ctx->fw_image; 1042 1043 if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) { 1044 close(fd); 1045 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1046 goto err; 1047 } 1048 close(fd); 1049 1050 firm_ctx->offset = 0; 1051 firm_ctx->size_remaining = firm_ctx->size; 1052 firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096); 1053 1054 cmd = malloc(sizeof(struct spdk_nvme_cmd)); 1055 if (!cmd) { 1056 snprintf(msg, sizeof(msg), "Memory allocation error."); 1057 goto err; 1058 } 1059 memset(cmd, 0, sizeof(struct spdk_nvme_cmd)); 1060 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 1061 1062 cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer); 1063 cmd->cdw11 = firm_ctx->offset >> 2; 1064 1065 rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p, 1066 firm_ctx->transfer, apply_firmware_complete, firm_ctx); 1067 if (rc == 0) { 1068 /* normal return here. */ 1069 return; 1070 } 1071 1072 free(cmd); 1073 snprintf(msg, sizeof(msg), "Read firmware image failed!"); 1074 err: 1075 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg); 1076 apply_firmware_cleanup(firm_ctx); 1077 } 1078 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME) 1079 1080 struct rpc_bdev_nvme_transport_stat_ctx { 1081 struct spdk_jsonrpc_request *request; 1082 struct spdk_json_write_ctx *w; 1083 }; 1084 1085 static void 1086 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w, 1087 struct spdk_nvme_transport_poll_group_stat *stat) 1088 { 1089 struct spdk_nvme_rdma_device_stat *device_stats; 1090 uint32_t i; 1091 1092 spdk_json_write_named_array_begin(w, "devices"); 1093 1094 for (i = 0; i < stat->rdma.num_devices; i++) { 1095 device_stats = &stat->rdma.device_stats[i]; 1096 spdk_json_write_object_begin(w); 1097 spdk_json_write_named_string(w, "dev_name", device_stats->name); 1098 spdk_json_write_named_uint64(w, "polls", device_stats->polls); 1099 spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls); 1100 spdk_json_write_named_uint64(w, "completions", device_stats->completions); 1101 spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests); 1102 spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs); 1103 spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates); 1104 spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs); 1105 spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates); 1106 spdk_json_write_object_end(w); 1107 } 1108 spdk_json_write_array_end(w); 1109 } 1110 1111 static void 1112 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w, 1113 struct spdk_nvme_transport_poll_group_stat *stat) 1114 { 1115 spdk_json_write_named_uint64(w, "polls", stat->pcie.polls); 1116 spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls); 1117 spdk_json_write_named_uint64(w, "completions", stat->pcie.completions); 1118 spdk_json_write_named_uint64(w, "cq_mmio_doorbell_updates", stat->pcie.cq_mmio_doorbell_updates); 1119 spdk_json_write_named_uint64(w, "cq_shadow_doorbell_updates", 1120 stat->pcie.cq_shadow_doorbell_updates); 1121 spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests); 1122 spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests); 1123 spdk_json_write_named_uint64(w, "sq_mmio_doorbell_updates", stat->pcie.sq_mmio_doorbell_updates); 1124 spdk_json_write_named_uint64(w, "sq_shadow_doorbell_updates", 1125 stat->pcie.sq_shadow_doorbell_updates); 1126 } 1127 1128 static void 1129 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w, 1130 struct spdk_nvme_transport_poll_group_stat *stat) 1131 { 1132 spdk_json_write_named_uint64(w, "polls", stat->tcp.polls); 1133 spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls); 1134 spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions); 1135 spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions); 1136 spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests); 1137 spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests); 1138 } 1139 1140 static void 1141 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i) 1142 { 1143 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1144 struct spdk_io_channel *ch; 1145 struct nvme_poll_group *group; 1146 struct spdk_nvme_poll_group_stat *stat; 1147 struct spdk_nvme_transport_poll_group_stat *tr_stat; 1148 uint32_t j; 1149 int rc; 1150 1151 ctx = spdk_io_channel_iter_get_ctx(i); 1152 ch = spdk_io_channel_iter_get_channel(i); 1153 group = spdk_io_channel_get_ctx(ch); 1154 1155 rc = spdk_nvme_poll_group_get_stats(group->group, &stat); 1156 if (rc) { 1157 spdk_for_each_channel_continue(i, rc); 1158 return; 1159 } 1160 1161 spdk_json_write_object_begin(ctx->w); 1162 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 1163 spdk_json_write_named_array_begin(ctx->w, "transports"); 1164 1165 for (j = 0; j < stat->num_transports; j++) { 1166 tr_stat = stat->transport_stat[j]; 1167 spdk_json_write_object_begin(ctx->w); 1168 spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1169 1170 switch (stat->transport_stat[j]->trtype) { 1171 case SPDK_NVME_TRANSPORT_RDMA: 1172 rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat); 1173 break; 1174 case SPDK_NVME_TRANSPORT_PCIE: 1175 case SPDK_NVME_TRANSPORT_VFIOUSER: 1176 rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat); 1177 break; 1178 case SPDK_NVME_TRANSPORT_TCP: 1179 rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat); 1180 break; 1181 default: 1182 SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype, 1183 spdk_nvme_transport_id_trtype_str(tr_stat->trtype)); 1184 } 1185 spdk_json_write_object_end(ctx->w); 1186 } 1187 /* transports array */ 1188 spdk_json_write_array_end(ctx->w); 1189 spdk_json_write_object_end(ctx->w); 1190 1191 spdk_nvme_poll_group_free_stats(group->group, stat); 1192 spdk_for_each_channel_continue(i, 0); 1193 } 1194 1195 static void 1196 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status) 1197 { 1198 struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1199 1200 spdk_json_write_array_end(ctx->w); 1201 spdk_json_write_object_end(ctx->w); 1202 spdk_jsonrpc_end_result(ctx->request, ctx->w); 1203 free(ctx); 1204 } 1205 1206 static void 1207 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, 1208 const struct spdk_json_val *params) 1209 { 1210 struct rpc_bdev_nvme_transport_stat_ctx *ctx; 1211 1212 if (params) { 1213 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 1214 "'bdev_nvme_get_transport_statistics' requires no arguments"); 1215 return; 1216 } 1217 1218 ctx = calloc(1, sizeof(*ctx)); 1219 if (!ctx) { 1220 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1221 "Memory allocation error"); 1222 return; 1223 } 1224 ctx->request = request; 1225 ctx->w = spdk_jsonrpc_begin_result(ctx->request); 1226 spdk_json_write_object_begin(ctx->w); 1227 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 1228 1229 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 1230 rpc_bdev_nvme_stats_per_channel, 1231 ctx, 1232 rpc_bdev_nvme_stats_done); 1233 } 1234 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics, 1235 SPDK_RPC_RUNTIME) 1236 1237 struct rpc_bdev_nvme_reset_controller_req { 1238 char *name; 1239 }; 1240 1241 static void 1242 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r) 1243 { 1244 free(r->name); 1245 } 1246 1247 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = { 1248 {"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string}, 1249 }; 1250 1251 struct rpc_bdev_nvme_reset_controller_ctx { 1252 struct spdk_jsonrpc_request *request; 1253 bool success; 1254 struct spdk_thread *orig_thread; 1255 }; 1256 1257 static void 1258 _rpc_bdev_nvme_reset_controller_cb(void *_ctx) 1259 { 1260 struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx; 1261 1262 spdk_jsonrpc_send_bool_response(ctx->request, ctx->success); 1263 1264 free(ctx); 1265 } 1266 1267 static void 1268 rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success) 1269 { 1270 struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg; 1271 1272 ctx->success = success; 1273 1274 spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx); 1275 } 1276 1277 static void 1278 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request, 1279 const struct spdk_json_val *params) 1280 { 1281 struct rpc_bdev_nvme_reset_controller_req req = {NULL}; 1282 struct rpc_bdev_nvme_reset_controller_ctx *ctx; 1283 struct nvme_ctrlr *nvme_ctrlr; 1284 int rc; 1285 1286 ctx = calloc(1, sizeof(*ctx)); 1287 if (ctx == NULL) { 1288 SPDK_ERRLOG("Memory allocation failed\n"); 1289 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1290 "Memory allocation failed"); 1291 return; 1292 } 1293 1294 if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders, 1295 SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders), 1296 &req)) { 1297 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1298 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL)); 1299 goto err; 1300 } 1301 1302 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1303 if (nvme_ctrlr == NULL) { 1304 SPDK_ERRLOG("Failed at device lookup\n"); 1305 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1306 goto err; 1307 } 1308 1309 ctx->request = request; 1310 ctx->orig_thread = spdk_get_thread(); 1311 1312 rc = bdev_nvme_reset_rpc(nvme_ctrlr, rpc_bdev_nvme_reset_controller_cb, ctx); 1313 if (rc != 0) { 1314 SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n"); 1315 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc)); 1316 goto err; 1317 } 1318 1319 free_rpc_bdev_nvme_reset_controller_req(&req); 1320 return; 1321 1322 err: 1323 free_rpc_bdev_nvme_reset_controller_req(&req); 1324 free(ctx); 1325 } 1326 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME) 1327 1328 struct rpc_get_controller_health_info { 1329 char *name; 1330 }; 1331 1332 struct spdk_nvme_health_info_context { 1333 struct spdk_jsonrpc_request *request; 1334 struct spdk_nvme_ctrlr *ctrlr; 1335 struct spdk_nvme_health_information_page health_page; 1336 }; 1337 1338 static void 1339 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r) 1340 { 1341 free(r->name); 1342 } 1343 1344 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = { 1345 {"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true}, 1346 }; 1347 1348 static void 1349 nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response) 1350 { 1351 if (response == true) { 1352 spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1353 "Internal error."); 1354 } 1355 1356 free(context); 1357 } 1358 1359 static void 1360 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1361 { 1362 int i; 1363 char buf[128]; 1364 struct spdk_nvme_health_info_context *context = cb_arg; 1365 struct spdk_jsonrpc_request *request = context->request; 1366 struct spdk_json_write_ctx *w; 1367 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1368 const struct spdk_nvme_transport_id *trid = NULL; 1369 const struct spdk_nvme_ctrlr_data *cdata = NULL; 1370 struct spdk_nvme_health_information_page *health_page = NULL; 1371 1372 if (spdk_nvme_cpl_is_error(cpl)) { 1373 nvme_health_info_cleanup(context, true); 1374 SPDK_ERRLOG("get log page failed\n"); 1375 return; 1376 } 1377 1378 if (ctrlr == NULL) { 1379 nvme_health_info_cleanup(context, true); 1380 SPDK_ERRLOG("ctrlr is NULL\n"); 1381 return; 1382 } else { 1383 trid = spdk_nvme_ctrlr_get_transport_id(ctrlr); 1384 cdata = spdk_nvme_ctrlr_get_data(ctrlr); 1385 health_page = &(context->health_page); 1386 } 1387 1388 w = spdk_jsonrpc_begin_result(request); 1389 1390 spdk_json_write_object_begin(w); 1391 snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn); 1392 spdk_str_trim(buf); 1393 spdk_json_write_named_string(w, "model_number", buf); 1394 snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn); 1395 spdk_str_trim(buf); 1396 spdk_json_write_named_string(w, "serial_number", buf); 1397 snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr); 1398 spdk_str_trim(buf); 1399 spdk_json_write_named_string(w, "firmware_revision", buf); 1400 spdk_json_write_named_string(w, "traddr", trid->traddr); 1401 spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273); 1402 spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare); 1403 spdk_json_write_named_uint64(w, "available_spare_threshold_percentage", 1404 health_page->available_spare_threshold); 1405 spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used); 1406 spdk_json_write_named_uint128(w, "data_units_read", 1407 health_page->data_units_read[0], health_page->data_units_read[1]); 1408 spdk_json_write_named_uint128(w, "data_units_written", 1409 health_page->data_units_written[0], health_page->data_units_written[1]); 1410 spdk_json_write_named_uint128(w, "host_read_commands", 1411 health_page->host_read_commands[0], health_page->host_read_commands[1]); 1412 spdk_json_write_named_uint128(w, "host_write_commands", 1413 health_page->host_write_commands[0], health_page->host_write_commands[1]); 1414 spdk_json_write_named_uint128(w, "controller_busy_time", 1415 health_page->controller_busy_time[0], health_page->controller_busy_time[1]); 1416 spdk_json_write_named_uint128(w, "power_cycles", 1417 health_page->power_cycles[0], health_page->power_cycles[1]); 1418 spdk_json_write_named_uint128(w, "power_on_hours", 1419 health_page->power_on_hours[0], health_page->power_on_hours[1]); 1420 spdk_json_write_named_uint128(w, "unsafe_shutdowns", 1421 health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]); 1422 spdk_json_write_named_uint128(w, "media_errors", 1423 health_page->media_errors[0], health_page->media_errors[1]); 1424 spdk_json_write_named_uint128(w, "num_err_log_entries", 1425 health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]); 1426 spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time); 1427 spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes", 1428 health_page->critical_temp_time); 1429 for (i = 0; i < 8; i++) { 1430 if (health_page->temp_sensor[i] != 0) { 1431 spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273); 1432 } 1433 } 1434 spdk_json_write_object_end(w); 1435 1436 spdk_jsonrpc_end_result(request, w); 1437 nvme_health_info_cleanup(context, false); 1438 } 1439 1440 static void 1441 get_health_log_page(struct spdk_nvme_health_info_context *context) 1442 { 1443 struct spdk_nvme_ctrlr *ctrlr = context->ctrlr; 1444 1445 if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, 1446 SPDK_NVME_GLOBAL_NS_TAG, 1447 &(context->health_page), sizeof(context->health_page), 0, 1448 get_health_log_page_completion, context)) { 1449 nvme_health_info_cleanup(context, true); 1450 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n"); 1451 } 1452 } 1453 1454 static void 1455 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) 1456 { 1457 struct spdk_nvme_health_info_context *context = cb_arg; 1458 1459 if (spdk_nvme_cpl_is_error(cpl)) { 1460 nvme_health_info_cleanup(context, true); 1461 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n"); 1462 } else { 1463 get_health_log_page(context); 1464 } 1465 } 1466 1467 static int 1468 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context) 1469 { 1470 struct spdk_nvme_cmd cmd = {}; 1471 1472 cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1473 cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1474 1475 return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0, 1476 get_temperature_threshold_feature_completion, context); 1477 } 1478 1479 static void 1480 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr) 1481 { 1482 struct spdk_nvme_health_info_context *context; 1483 1484 context = calloc(1, sizeof(struct spdk_nvme_health_info_context)); 1485 if (!context) { 1486 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1487 "Memory allocation error."); 1488 return; 1489 } 1490 1491 context->request = request; 1492 context->ctrlr = ctrlr; 1493 1494 if (get_temperature_threshold_feature(context)) { 1495 nvme_health_info_cleanup(context, true); 1496 SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n"); 1497 } 1498 1499 return; 1500 } 1501 1502 static void 1503 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request, 1504 const struct spdk_json_val *params) 1505 { 1506 struct rpc_get_controller_health_info req = {}; 1507 struct nvme_ctrlr *nvme_ctrlr = NULL; 1508 1509 if (!params) { 1510 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1511 "Missing device name"); 1512 1513 return; 1514 } 1515 if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders, 1516 SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) { 1517 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1518 free_rpc_get_controller_health_info(&req); 1519 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1520 "Invalid parameters"); 1521 1522 return; 1523 } 1524 1525 nvme_ctrlr = nvme_ctrlr_get_by_name(req.name); 1526 1527 if (!nvme_ctrlr) { 1528 SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name); 1529 free_rpc_get_controller_health_info(&req); 1530 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1531 "Device not found"); 1532 return; 1533 } 1534 1535 get_controller_health_info(request, nvme_ctrlr->ctrlr); 1536 free_rpc_get_controller_health_info(&req); 1537 1538 return; 1539 } 1540 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info", 1541 rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME) 1542 1543 struct rpc_bdev_nvme_start_discovery { 1544 char *name; 1545 char *trtype; 1546 char *adrfam; 1547 char *traddr; 1548 char *trsvcid; 1549 char *hostnqn; 1550 bool wait_for_attach; 1551 uint64_t attach_timeout_ms; 1552 struct spdk_nvme_ctrlr_opts opts; 1553 struct nvme_ctrlr_opts bdev_opts; 1554 }; 1555 1556 static void 1557 free_rpc_bdev_nvme_start_discovery(struct rpc_bdev_nvme_start_discovery *req) 1558 { 1559 free(req->name); 1560 free(req->trtype); 1561 free(req->adrfam); 1562 free(req->traddr); 1563 free(req->trsvcid); 1564 free(req->hostnqn); 1565 } 1566 1567 static const struct spdk_json_object_decoder rpc_bdev_nvme_start_discovery_decoders[] = { 1568 {"name", offsetof(struct rpc_bdev_nvme_start_discovery, name), spdk_json_decode_string}, 1569 {"trtype", offsetof(struct rpc_bdev_nvme_start_discovery, trtype), spdk_json_decode_string}, 1570 {"traddr", offsetof(struct rpc_bdev_nvme_start_discovery, traddr), spdk_json_decode_string}, 1571 {"adrfam", offsetof(struct rpc_bdev_nvme_start_discovery, adrfam), spdk_json_decode_string, true}, 1572 {"trsvcid", offsetof(struct rpc_bdev_nvme_start_discovery, trsvcid), spdk_json_decode_string, true}, 1573 {"hostnqn", offsetof(struct rpc_bdev_nvme_start_discovery, hostnqn), spdk_json_decode_string, true}, 1574 {"wait_for_attach", offsetof(struct rpc_bdev_nvme_start_discovery, wait_for_attach), spdk_json_decode_bool, true}, 1575 {"attach_timeout_ms", offsetof(struct rpc_bdev_nvme_start_discovery, attach_timeout_ms), spdk_json_decode_uint64, true}, 1576 {"ctrlr_loss_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.ctrlr_loss_timeout_sec), spdk_json_decode_int32, true}, 1577 {"reconnect_delay_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.reconnect_delay_sec), spdk_json_decode_uint32, true}, 1578 {"fast_io_fail_timeout_sec", offsetof(struct rpc_bdev_nvme_start_discovery, bdev_opts.fast_io_fail_timeout_sec), spdk_json_decode_uint32, true}, 1579 }; 1580 1581 struct rpc_bdev_nvme_start_discovery_ctx { 1582 struct rpc_bdev_nvme_start_discovery req; 1583 struct spdk_jsonrpc_request *request; 1584 }; 1585 1586 static void 1587 rpc_bdev_nvme_start_discovery_done(void *ctx, int status) 1588 { 1589 struct spdk_jsonrpc_request *request = ctx; 1590 1591 if (status != 0) { 1592 spdk_jsonrpc_send_error_response(request, status, spdk_strerror(-status)); 1593 } else { 1594 spdk_jsonrpc_send_bool_response(request, true); 1595 } 1596 } 1597 1598 static void 1599 rpc_bdev_nvme_start_discovery(struct spdk_jsonrpc_request *request, 1600 const struct spdk_json_val *params) 1601 { 1602 struct rpc_bdev_nvme_start_discovery_ctx *ctx; 1603 struct spdk_nvme_transport_id trid = {}; 1604 size_t len, maxlen; 1605 int rc; 1606 spdk_bdev_nvme_start_discovery_fn cb_fn; 1607 void *cb_ctx; 1608 1609 ctx = calloc(1, sizeof(*ctx)); 1610 if (!ctx) { 1611 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1612 return; 1613 } 1614 1615 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts)); 1616 1617 if (spdk_json_decode_object(params, rpc_bdev_nvme_start_discovery_decoders, 1618 SPDK_COUNTOF(rpc_bdev_nvme_start_discovery_decoders), 1619 &ctx->req)) { 1620 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1621 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1622 "spdk_json_decode_object failed"); 1623 goto cleanup; 1624 } 1625 1626 /* Parse trstring */ 1627 rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype); 1628 if (rc < 0) { 1629 SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype); 1630 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s", 1631 ctx->req.trtype); 1632 goto cleanup; 1633 } 1634 1635 /* Parse trtype */ 1636 rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); 1637 assert(rc == 0); 1638 1639 /* Parse traddr */ 1640 maxlen = sizeof(trid.traddr); 1641 len = strnlen(ctx->req.traddr, maxlen); 1642 if (len == maxlen) { 1643 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s", 1644 ctx->req.traddr); 1645 goto cleanup; 1646 } 1647 memcpy(trid.traddr, ctx->req.traddr, len + 1); 1648 1649 /* Parse adrfam */ 1650 if (ctx->req.adrfam) { 1651 rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam); 1652 if (rc < 0) { 1653 SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam); 1654 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s", 1655 ctx->req.adrfam); 1656 goto cleanup; 1657 } 1658 } 1659 1660 /* Parse trsvcid */ 1661 if (ctx->req.trsvcid) { 1662 maxlen = sizeof(trid.trsvcid); 1663 len = strnlen(ctx->req.trsvcid, maxlen); 1664 if (len == maxlen) { 1665 spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s", 1666 ctx->req.trsvcid); 1667 goto cleanup; 1668 } 1669 memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1); 1670 } 1671 1672 if (ctx->req.hostnqn) { 1673 snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s", 1674 ctx->req.hostnqn); 1675 } 1676 1677 if (ctx->req.attach_timeout_ms != 0) { 1678 ctx->req.wait_for_attach = true; 1679 } 1680 1681 ctx->request = request; 1682 cb_fn = ctx->req.wait_for_attach ? rpc_bdev_nvme_start_discovery_done : NULL; 1683 cb_ctx = ctx->req.wait_for_attach ? request : NULL; 1684 rc = bdev_nvme_start_discovery(&trid, ctx->req.name, &ctx->req.opts, &ctx->req.bdev_opts, 1685 ctx->req.attach_timeout_ms, cb_fn, cb_ctx); 1686 if (rc) { 1687 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1688 } else if (!ctx->req.wait_for_attach) { 1689 rpc_bdev_nvme_start_discovery_done(request, 0); 1690 } 1691 1692 cleanup: 1693 free_rpc_bdev_nvme_start_discovery(&ctx->req); 1694 free(ctx); 1695 } 1696 SPDK_RPC_REGISTER("bdev_nvme_start_discovery", rpc_bdev_nvme_start_discovery, 1697 SPDK_RPC_RUNTIME) 1698 1699 struct rpc_bdev_nvme_stop_discovery { 1700 char *name; 1701 }; 1702 1703 static const struct spdk_json_object_decoder rpc_bdev_nvme_stop_discovery_decoders[] = { 1704 {"name", offsetof(struct rpc_bdev_nvme_stop_discovery, name), spdk_json_decode_string}, 1705 }; 1706 1707 struct rpc_bdev_nvme_stop_discovery_ctx { 1708 struct rpc_bdev_nvme_stop_discovery req; 1709 struct spdk_jsonrpc_request *request; 1710 }; 1711 1712 static void 1713 rpc_bdev_nvme_stop_discovery_done(void *cb_ctx) 1714 { 1715 struct rpc_bdev_nvme_stop_discovery_ctx *ctx = cb_ctx; 1716 1717 spdk_jsonrpc_send_bool_response(ctx->request, true); 1718 free(ctx->req.name); 1719 free(ctx); 1720 } 1721 1722 static void 1723 rpc_bdev_nvme_stop_discovery(struct spdk_jsonrpc_request *request, 1724 const struct spdk_json_val *params) 1725 { 1726 struct rpc_bdev_nvme_stop_discovery_ctx *ctx; 1727 int rc; 1728 1729 ctx = calloc(1, sizeof(*ctx)); 1730 if (!ctx) { 1731 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1732 return; 1733 } 1734 1735 if (spdk_json_decode_object(params, rpc_bdev_nvme_stop_discovery_decoders, 1736 SPDK_COUNTOF(rpc_bdev_nvme_stop_discovery_decoders), 1737 &ctx->req)) { 1738 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1739 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1740 "spdk_json_decode_object failed"); 1741 goto cleanup; 1742 } 1743 1744 ctx->request = request; 1745 rc = bdev_nvme_stop_discovery(ctx->req.name, rpc_bdev_nvme_stop_discovery_done, ctx); 1746 if (rc) { 1747 spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); 1748 goto cleanup; 1749 } 1750 1751 return; 1752 1753 cleanup: 1754 free(ctx->req.name); 1755 free(ctx); 1756 } 1757 SPDK_RPC_REGISTER("bdev_nvme_stop_discovery", rpc_bdev_nvme_stop_discovery, 1758 SPDK_RPC_RUNTIME) 1759 1760 static void 1761 rpc_bdev_nvme_get_discovery_info(struct spdk_jsonrpc_request *request, 1762 const struct spdk_json_val *params) 1763 { 1764 struct spdk_json_write_ctx *w; 1765 1766 w = spdk_jsonrpc_begin_result(request); 1767 bdev_nvme_get_discovery_info(w); 1768 spdk_jsonrpc_end_result(request, w); 1769 } 1770 SPDK_RPC_REGISTER("bdev_nvme_get_discovery_info", rpc_bdev_nvme_get_discovery_info, 1771 SPDK_RPC_RUNTIME) 1772 1773 enum error_injection_cmd_type { 1774 NVME_ADMIN_CMD = 1, 1775 NVME_IO_CMD, 1776 }; 1777 1778 struct rpc_add_error_injection { 1779 char *name; 1780 enum error_injection_cmd_type cmd_type; 1781 uint8_t opc; 1782 bool do_not_submit; 1783 uint64_t timeout_in_us; 1784 uint32_t err_count; 1785 uint8_t sct; 1786 uint8_t sc; 1787 }; 1788 1789 static void 1790 free_rpc_add_error_injection(struct rpc_add_error_injection *req) 1791 { 1792 free(req->name); 1793 } 1794 1795 static int 1796 rpc_error_injection_decode_cmd_type(const struct spdk_json_val *val, void *out) 1797 { 1798 int *cmd_type = out; 1799 1800 if (spdk_json_strequal(val, "admin")) { 1801 *cmd_type = NVME_ADMIN_CMD; 1802 } else if (spdk_json_strequal(val, "io")) { 1803 *cmd_type = NVME_IO_CMD; 1804 } else { 1805 SPDK_ERRLOG("Invalid parameter value: cmd_type\n"); 1806 return -EINVAL; 1807 } 1808 1809 return 0; 1810 } 1811 1812 static const struct spdk_json_object_decoder rpc_add_error_injection_decoders[] = { 1813 { "name", offsetof(struct rpc_add_error_injection, name), spdk_json_decode_string }, 1814 { "cmd_type", offsetof(struct rpc_add_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1815 { "opc", offsetof(struct rpc_add_error_injection, opc), spdk_json_decode_uint8 }, 1816 { "do_not_submit", offsetof(struct rpc_add_error_injection, do_not_submit), spdk_json_decode_bool, true }, 1817 { "timeout_in_us", offsetof(struct rpc_add_error_injection, timeout_in_us), spdk_json_decode_uint64, true }, 1818 { "err_count", offsetof(struct rpc_add_error_injection, err_count), spdk_json_decode_uint32, true }, 1819 { "sct", offsetof(struct rpc_add_error_injection, sct), spdk_json_decode_uint8, true}, 1820 { "sc", offsetof(struct rpc_add_error_injection, sc), spdk_json_decode_uint8, true}, 1821 }; 1822 1823 struct rpc_add_error_injection_ctx { 1824 struct spdk_jsonrpc_request *request; 1825 struct rpc_add_error_injection rpc; 1826 }; 1827 1828 static void 1829 rpc_add_error_injection_done(struct spdk_io_channel_iter *i, int status) 1830 { 1831 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1832 1833 if (status) { 1834 spdk_jsonrpc_send_error_response(ctx->request, status, 1835 "Failed to add the error injection."); 1836 } else { 1837 spdk_jsonrpc_send_bool_response(ctx->request, true); 1838 } 1839 1840 free_rpc_add_error_injection(&ctx->rpc); 1841 free(ctx); 1842 } 1843 1844 static void 1845 rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i) 1846 { 1847 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1848 struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1849 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1850 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1851 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1852 int rc = 0; 1853 1854 if (qpair != NULL) { 1855 rc = spdk_nvme_qpair_add_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc, 1856 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1857 ctx->rpc.sct, ctx->rpc.sc); 1858 } 1859 1860 spdk_for_each_channel_continue(i, rc); 1861 } 1862 1863 static void 1864 rpc_bdev_nvme_add_error_injection( 1865 struct spdk_jsonrpc_request *request, 1866 const struct spdk_json_val *params) 1867 { 1868 struct rpc_add_error_injection_ctx *ctx; 1869 struct nvme_ctrlr *nvme_ctrlr; 1870 int rc; 1871 1872 ctx = calloc(1, sizeof(*ctx)); 1873 if (!ctx) { 1874 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1875 return; 1876 } 1877 ctx->rpc.err_count = 1; 1878 ctx->request = request; 1879 1880 if (spdk_json_decode_object(params, 1881 rpc_add_error_injection_decoders, 1882 SPDK_COUNTOF(rpc_add_error_injection_decoders), 1883 &ctx->rpc)) { 1884 spdk_jsonrpc_send_error_response(request, -EINVAL, 1885 "Failed to parse the request"); 1886 goto cleanup; 1887 } 1888 1889 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 1890 if (nvme_ctrlr == NULL) { 1891 SPDK_ERRLOG("No controller with specified name was found.\n"); 1892 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 1893 goto cleanup; 1894 } 1895 1896 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 1897 spdk_for_each_channel(nvme_ctrlr, 1898 rpc_add_error_injection_per_channel, 1899 ctx, 1900 rpc_add_error_injection_done); 1901 1902 return; 1903 } else { 1904 rc = spdk_nvme_qpair_add_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc, 1905 ctx->rpc.do_not_submit, ctx->rpc.timeout_in_us, ctx->rpc.err_count, 1906 ctx->rpc.sct, ctx->rpc.sc); 1907 if (rc) { 1908 spdk_jsonrpc_send_error_response(request, -rc, 1909 "Failed to add the error injection"); 1910 } else { 1911 spdk_jsonrpc_send_bool_response(ctx->request, true); 1912 } 1913 } 1914 1915 cleanup: 1916 free_rpc_add_error_injection(&ctx->rpc); 1917 free(ctx); 1918 } 1919 SPDK_RPC_REGISTER("bdev_nvme_add_error_injection", rpc_bdev_nvme_add_error_injection, 1920 SPDK_RPC_RUNTIME) 1921 1922 struct rpc_remove_error_injection { 1923 char *name; 1924 enum error_injection_cmd_type cmd_type; 1925 uint8_t opc; 1926 }; 1927 1928 static void 1929 free_rpc_remove_error_injection(struct rpc_remove_error_injection *req) 1930 { 1931 free(req->name); 1932 } 1933 1934 static const struct spdk_json_object_decoder rpc_remove_error_injection_decoders[] = { 1935 { "name", offsetof(struct rpc_remove_error_injection, name), spdk_json_decode_string }, 1936 { "cmd_type", offsetof(struct rpc_remove_error_injection, cmd_type), rpc_error_injection_decode_cmd_type }, 1937 { "opc", offsetof(struct rpc_remove_error_injection, opc), spdk_json_decode_uint8 }, 1938 }; 1939 1940 struct rpc_remove_error_injection_ctx { 1941 struct spdk_jsonrpc_request *request; 1942 struct rpc_remove_error_injection rpc; 1943 }; 1944 1945 static void 1946 rpc_remove_error_injection_done(struct spdk_io_channel_iter *i, int status) 1947 { 1948 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1949 1950 if (status) { 1951 spdk_jsonrpc_send_error_response(ctx->request, status, 1952 "Failed to remove the error injection."); 1953 } else { 1954 spdk_jsonrpc_send_bool_response(ctx->request, true); 1955 } 1956 1957 free_rpc_remove_error_injection(&ctx->rpc); 1958 free(ctx); 1959 } 1960 1961 static void 1962 rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i) 1963 { 1964 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1965 struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1966 struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); 1967 struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair; 1968 struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr; 1969 1970 if (qpair != NULL) { 1971 spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc); 1972 } 1973 1974 spdk_for_each_channel_continue(i, 0); 1975 } 1976 1977 static void 1978 rpc_bdev_nvme_remove_error_injection(struct spdk_jsonrpc_request *request, 1979 const struct spdk_json_val *params) 1980 { 1981 struct rpc_remove_error_injection_ctx *ctx; 1982 struct nvme_ctrlr *nvme_ctrlr; 1983 1984 ctx = calloc(1, sizeof(*ctx)); 1985 if (!ctx) { 1986 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 1987 return; 1988 } 1989 ctx->request = request; 1990 1991 if (spdk_json_decode_object(params, 1992 rpc_remove_error_injection_decoders, 1993 SPDK_COUNTOF(rpc_remove_error_injection_decoders), 1994 &ctx->rpc)) { 1995 spdk_jsonrpc_send_error_response(request, -EINVAL, 1996 "Failed to parse the request"); 1997 goto cleanup; 1998 } 1999 2000 nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->rpc.name); 2001 if (nvme_ctrlr == NULL) { 2002 SPDK_ERRLOG("No controller with specified name was found.\n"); 2003 spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); 2004 goto cleanup; 2005 } 2006 2007 if (ctx->rpc.cmd_type == NVME_IO_CMD) { 2008 spdk_for_each_channel(nvme_ctrlr, 2009 rpc_remove_error_injection_per_channel, 2010 ctx, 2011 rpc_remove_error_injection_done); 2012 return; 2013 } else { 2014 spdk_nvme_qpair_remove_cmd_error_injection(nvme_ctrlr->ctrlr, NULL, ctx->rpc.opc); 2015 spdk_jsonrpc_send_bool_response(ctx->request, true); 2016 } 2017 2018 cleanup: 2019 free_rpc_remove_error_injection(&ctx->rpc); 2020 free(ctx); 2021 } 2022 SPDK_RPC_REGISTER("bdev_nvme_remove_error_injection", rpc_bdev_nvme_remove_error_injection, 2023 SPDK_RPC_RUNTIME) 2024 2025 struct rpc_get_io_paths { 2026 char *name; 2027 }; 2028 2029 static void 2030 free_rpc_get_io_paths(struct rpc_get_io_paths *r) 2031 { 2032 free(r->name); 2033 } 2034 2035 static const struct spdk_json_object_decoder rpc_get_io_paths_decoders[] = { 2036 {"name", offsetof(struct rpc_get_io_paths, name), spdk_json_decode_string, true}, 2037 }; 2038 2039 struct rpc_get_io_paths_ctx { 2040 struct rpc_get_io_paths req; 2041 struct spdk_jsonrpc_request *request; 2042 struct spdk_json_write_ctx *w; 2043 }; 2044 2045 static void 2046 rpc_bdev_nvme_get_io_paths_done(struct spdk_io_channel_iter *i, int status) 2047 { 2048 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2049 2050 spdk_json_write_array_end(ctx->w); 2051 2052 spdk_json_write_object_end(ctx->w); 2053 2054 spdk_jsonrpc_end_result(ctx->request, ctx->w); 2055 2056 free_rpc_get_io_paths(&ctx->req); 2057 free(ctx); 2058 } 2059 2060 static void 2061 _rpc_bdev_nvme_get_io_paths(struct spdk_io_channel_iter *i) 2062 { 2063 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 2064 struct nvme_poll_group *group = spdk_io_channel_get_ctx(_ch); 2065 struct rpc_get_io_paths_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2066 struct nvme_qpair *qpair; 2067 struct nvme_io_path *io_path; 2068 struct nvme_bdev *nbdev; 2069 2070 spdk_json_write_object_begin(ctx->w); 2071 2072 spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread())); 2073 2074 spdk_json_write_named_array_begin(ctx->w, "io_paths"); 2075 2076 TAILQ_FOREACH(qpair, &group->qpair_list, tailq) { 2077 TAILQ_FOREACH(io_path, &qpair->io_path_list, tailq) { 2078 nbdev = io_path->nvme_ns->bdev; 2079 2080 if (ctx->req.name != NULL && 2081 strcmp(ctx->req.name, nbdev->disk.name) != 0) { 2082 continue; 2083 } 2084 2085 nvme_io_path_info_json(ctx->w, io_path); 2086 } 2087 } 2088 2089 spdk_json_write_array_end(ctx->w); 2090 2091 spdk_json_write_object_end(ctx->w); 2092 2093 spdk_for_each_channel_continue(i, 0); 2094 } 2095 2096 static void 2097 rpc_bdev_nvme_get_io_paths(struct spdk_jsonrpc_request *request, 2098 const struct spdk_json_val *params) 2099 { 2100 struct rpc_get_io_paths_ctx *ctx; 2101 2102 ctx = calloc(1, sizeof(*ctx)); 2103 if (ctx == NULL) { 2104 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2105 return; 2106 } 2107 2108 if (params != NULL && 2109 spdk_json_decode_object(params, rpc_get_io_paths_decoders, 2110 SPDK_COUNTOF(rpc_get_io_paths_decoders), 2111 &ctx->req)) { 2112 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, 2113 "bdev_nvme_get_io_paths requires no parameters"); 2114 2115 free_rpc_get_io_paths(&ctx->req); 2116 free(ctx); 2117 return; 2118 } 2119 2120 ctx->request = request; 2121 ctx->w = spdk_jsonrpc_begin_result(request); 2122 2123 spdk_json_write_object_begin(ctx->w); 2124 2125 spdk_json_write_named_array_begin(ctx->w, "poll_groups"); 2126 2127 spdk_for_each_channel(&g_nvme_bdev_ctrlrs, 2128 _rpc_bdev_nvme_get_io_paths, 2129 ctx, 2130 rpc_bdev_nvme_get_io_paths_done); 2131 } 2132 SPDK_RPC_REGISTER("bdev_nvme_get_io_paths", rpc_bdev_nvme_get_io_paths, SPDK_RPC_RUNTIME) 2133 2134 struct rpc_bdev_nvme_set_preferred_path { 2135 char *name; 2136 uint16_t cntlid; 2137 }; 2138 2139 static void 2140 free_rpc_bdev_nvme_set_preferred_path(struct rpc_bdev_nvme_set_preferred_path *req) 2141 { 2142 free(req->name); 2143 } 2144 2145 static const struct spdk_json_object_decoder rpc_bdev_nvme_set_preferred_path_decoders[] = { 2146 {"name", offsetof(struct rpc_bdev_nvme_set_preferred_path, name), spdk_json_decode_string}, 2147 {"cntlid", offsetof(struct rpc_bdev_nvme_set_preferred_path, cntlid), spdk_json_decode_uint16}, 2148 }; 2149 2150 struct rpc_bdev_nvme_set_preferred_path_ctx { 2151 struct rpc_bdev_nvme_set_preferred_path req; 2152 struct spdk_jsonrpc_request *request; 2153 }; 2154 2155 static void 2156 rpc_bdev_nvme_set_preferred_path_done(void *cb_arg, int rc) 2157 { 2158 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx = cb_arg; 2159 2160 if (rc == 0) { 2161 spdk_jsonrpc_send_bool_response(ctx->request, true); 2162 } else { 2163 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2164 } 2165 2166 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2167 free(ctx); 2168 } 2169 2170 static void 2171 rpc_bdev_nvme_set_preferred_path(struct spdk_jsonrpc_request *request, 2172 const struct spdk_json_val *params) 2173 { 2174 struct rpc_bdev_nvme_set_preferred_path_ctx *ctx; 2175 2176 ctx = calloc(1, sizeof(*ctx)); 2177 if (ctx == NULL) { 2178 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2179 return; 2180 } 2181 2182 if (spdk_json_decode_object(params, rpc_bdev_nvme_set_preferred_path_decoders, 2183 SPDK_COUNTOF(rpc_bdev_nvme_set_preferred_path_decoders), 2184 &ctx->req)) { 2185 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2186 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2187 "spdk_json_decode_object failed"); 2188 goto cleanup; 2189 } 2190 2191 ctx->request = request; 2192 2193 bdev_nvme_set_preferred_path(ctx->req.name, ctx->req.cntlid, 2194 rpc_bdev_nvme_set_preferred_path_done, ctx); 2195 return; 2196 2197 cleanup: 2198 free_rpc_bdev_nvme_set_preferred_path(&ctx->req); 2199 free(ctx); 2200 } 2201 SPDK_RPC_REGISTER("bdev_nvme_set_preferred_path", rpc_bdev_nvme_set_preferred_path, 2202 SPDK_RPC_RUNTIME) 2203 2204 struct rpc_set_multipath_policy { 2205 char *name; 2206 enum bdev_nvme_multipath_policy policy; 2207 }; 2208 2209 static void 2210 free_rpc_set_multipath_policy(struct rpc_set_multipath_policy *req) 2211 { 2212 free(req->name); 2213 } 2214 2215 static int 2216 rpc_decode_mp_policy(const struct spdk_json_val *val, void *out) 2217 { 2218 enum bdev_nvme_multipath_policy *policy = out; 2219 2220 if (spdk_json_strequal(val, "active_passive") == true) { 2221 *policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 2222 } else if (spdk_json_strequal(val, "active_active") == true) { 2223 *policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 2224 } else { 2225 SPDK_NOTICELOG("Invalid parameter value: policy\n"); 2226 return -EINVAL; 2227 } 2228 2229 return 0; 2230 } 2231 2232 static const struct spdk_json_object_decoder rpc_set_multipath_policy_decoders[] = { 2233 {"name", offsetof(struct rpc_set_multipath_policy, name), spdk_json_decode_string}, 2234 {"policy", offsetof(struct rpc_set_multipath_policy, policy), rpc_decode_mp_policy}, 2235 }; 2236 2237 struct rpc_set_multipath_policy_ctx { 2238 struct rpc_set_multipath_policy req; 2239 struct spdk_jsonrpc_request *request; 2240 }; 2241 2242 static void 2243 rpc_bdev_nvme_set_multipath_policy_done(void *cb_arg, int rc) 2244 { 2245 struct rpc_set_multipath_policy_ctx *ctx = cb_arg; 2246 2247 if (rc == 0) { 2248 spdk_jsonrpc_send_bool_response(ctx->request, true); 2249 } else { 2250 spdk_jsonrpc_send_error_response(ctx->request, rc, spdk_strerror(-rc)); 2251 } 2252 2253 free_rpc_set_multipath_policy(&ctx->req); 2254 free(ctx); 2255 } 2256 2257 static void 2258 rpc_bdev_nvme_set_multipath_policy(struct spdk_jsonrpc_request *request, 2259 const struct spdk_json_val *params) 2260 { 2261 struct rpc_set_multipath_policy_ctx *ctx; 2262 2263 ctx = calloc(1, sizeof(*ctx)); 2264 if (ctx == NULL) { 2265 spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM)); 2266 return; 2267 } 2268 2269 if (spdk_json_decode_object(params, rpc_set_multipath_policy_decoders, 2270 SPDK_COUNTOF(rpc_set_multipath_policy_decoders), 2271 &ctx->req)) { 2272 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 2273 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 2274 "spdk_json_decode_object failed"); 2275 goto cleanup; 2276 } 2277 2278 ctx->request = request; 2279 2280 bdev_nvme_set_multipath_policy(ctx->req.name, ctx->req.policy, 2281 rpc_bdev_nvme_set_multipath_policy_done, ctx); 2282 return; 2283 2284 cleanup: 2285 free_rpc_set_multipath_policy(&ctx->req); 2286 free(ctx); 2287 } 2288 SPDK_RPC_REGISTER("bdev_nvme_set_multipath_policy", rpc_bdev_nvme_set_multipath_policy, 2289 SPDK_RPC_RUNTIME) 2290