xref: /spdk/module/bdev/nvme/bdev_nvme_rpc.c (revision 9544fe07aad355262fcaa65dc27f9965a8ea4617)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "bdev_nvme.h"
37 
38 #include "spdk/config.h"
39 
40 #include "spdk/string.h"
41 #include "spdk/rpc.h"
42 #include "spdk/util.h"
43 #include "spdk/env.h"
44 #include "spdk/nvme.h"
45 #include "spdk/nvme_spec.h"
46 
47 #include "spdk/log.h"
48 #include "spdk/bdev_module.h"
49 
50 struct open_descriptors {
51 	void *desc;
52 	struct  spdk_bdev *bdev;
53 	TAILQ_ENTRY(open_descriptors) tqlst;
54 	struct spdk_thread *thread;
55 };
56 typedef TAILQ_HEAD(, open_descriptors) open_descriptors_t;
57 
58 static int
59 rpc_decode_action_on_timeout(const struct spdk_json_val *val, void *out)
60 {
61 	enum spdk_bdev_timeout_action *action = out;
62 
63 	if (spdk_json_strequal(val, "none") == true) {
64 		*action = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE;
65 	} else if (spdk_json_strequal(val, "abort") == true) {
66 		*action = SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT;
67 	} else if (spdk_json_strequal(val, "reset") == true) {
68 		*action = SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET;
69 	} else {
70 		SPDK_NOTICELOG("Invalid parameter value: action_on_timeout\n");
71 		return -EINVAL;
72 	}
73 
74 	return 0;
75 }
76 
77 static const struct spdk_json_object_decoder rpc_bdev_nvme_options_decoders[] = {
78 	{"action_on_timeout", offsetof(struct spdk_bdev_nvme_opts, action_on_timeout), rpc_decode_action_on_timeout, true},
79 	{"timeout_us", offsetof(struct spdk_bdev_nvme_opts, timeout_us), spdk_json_decode_uint64, true},
80 	{"timeout_admin_us", offsetof(struct spdk_bdev_nvme_opts, timeout_admin_us), spdk_json_decode_uint64, true},
81 	{"keep_alive_timeout_ms", offsetof(struct spdk_bdev_nvme_opts, keep_alive_timeout_ms), spdk_json_decode_uint32, true},
82 	{"retry_count", offsetof(struct spdk_bdev_nvme_opts, retry_count), spdk_json_decode_uint32, true},
83 	{"arbitration_burst", offsetof(struct spdk_bdev_nvme_opts, arbitration_burst), spdk_json_decode_uint32, true},
84 	{"low_priority_weight", offsetof(struct spdk_bdev_nvme_opts, low_priority_weight), spdk_json_decode_uint32, true},
85 	{"medium_priority_weight", offsetof(struct spdk_bdev_nvme_opts, medium_priority_weight), spdk_json_decode_uint32, true},
86 	{"high_priority_weight", offsetof(struct spdk_bdev_nvme_opts, high_priority_weight), spdk_json_decode_uint32, true},
87 	{"nvme_adminq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_adminq_poll_period_us), spdk_json_decode_uint64, true},
88 	{"nvme_ioq_poll_period_us", offsetof(struct spdk_bdev_nvme_opts, nvme_ioq_poll_period_us), spdk_json_decode_uint64, true},
89 	{"io_queue_requests", offsetof(struct spdk_bdev_nvme_opts, io_queue_requests), spdk_json_decode_uint32, true},
90 	{"delay_cmd_submit", offsetof(struct spdk_bdev_nvme_opts, delay_cmd_submit), spdk_json_decode_bool, true},
91 };
92 
93 static void
94 rpc_bdev_nvme_set_options(struct spdk_jsonrpc_request *request,
95 			  const struct spdk_json_val *params)
96 {
97 	struct spdk_bdev_nvme_opts opts;
98 	int rc;
99 
100 	bdev_nvme_get_opts(&opts);
101 	if (params && spdk_json_decode_object(params, rpc_bdev_nvme_options_decoders,
102 					      SPDK_COUNTOF(rpc_bdev_nvme_options_decoders),
103 					      &opts)) {
104 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
105 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
106 						 "spdk_json_decode_object failed");
107 		return;
108 	}
109 
110 	rc = bdev_nvme_set_opts(&opts);
111 	if (rc) {
112 		spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
113 		return;
114 	}
115 
116 	spdk_jsonrpc_send_bool_response(request, true);
117 
118 	return;
119 }
120 SPDK_RPC_REGISTER("bdev_nvme_set_options", rpc_bdev_nvme_set_options,
121 		  SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME)
122 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_options, set_bdev_nvme_options)
123 
124 struct rpc_bdev_nvme_hotplug {
125 	bool enabled;
126 	uint64_t period_us;
127 };
128 
129 static const struct spdk_json_object_decoder rpc_bdev_nvme_hotplug_decoders[] = {
130 	{"enable", offsetof(struct rpc_bdev_nvme_hotplug, enabled), spdk_json_decode_bool, false},
131 	{"period_us", offsetof(struct rpc_bdev_nvme_hotplug, period_us), spdk_json_decode_uint64, true},
132 };
133 
134 static void
135 rpc_bdev_nvme_set_hotplug_done(void *ctx)
136 {
137 	struct spdk_jsonrpc_request *request = ctx;
138 
139 	spdk_jsonrpc_send_bool_response(request, true);
140 }
141 
142 static void
143 rpc_bdev_nvme_set_hotplug(struct spdk_jsonrpc_request *request,
144 			  const struct spdk_json_val *params)
145 {
146 	struct rpc_bdev_nvme_hotplug req = {false, 0};
147 	int rc;
148 
149 	if (spdk_json_decode_object(params, rpc_bdev_nvme_hotplug_decoders,
150 				    SPDK_COUNTOF(rpc_bdev_nvme_hotplug_decoders), &req)) {
151 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
152 		rc = -EINVAL;
153 		goto invalid;
154 	}
155 
156 	rc = bdev_nvme_set_hotplug(req.enabled, req.period_us, rpc_bdev_nvme_set_hotplug_done,
157 				   request);
158 	if (rc) {
159 		goto invalid;
160 	}
161 
162 	return;
163 invalid:
164 	spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(-rc));
165 }
166 SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME)
167 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug)
168 
169 struct rpc_bdev_nvme_attach_controller {
170 	char *name;
171 	char *trtype;
172 	char *adrfam;
173 	char *traddr;
174 	char *trsvcid;
175 	char *priority;
176 	char *subnqn;
177 	char *hostnqn;
178 	char *hostaddr;
179 	char *hostsvcid;
180 	bool prchk_reftag;
181 	bool prchk_guard;
182 	uint64_t fabrics_connect_timeout_us;
183 	char *multipath;
184 	struct spdk_nvme_ctrlr_opts opts;
185 };
186 
187 static void
188 free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req)
189 {
190 	free(req->name);
191 	free(req->trtype);
192 	free(req->adrfam);
193 	free(req->traddr);
194 	free(req->trsvcid);
195 	free(req->priority);
196 	free(req->subnqn);
197 	free(req->hostnqn);
198 	free(req->hostaddr);
199 	free(req->hostsvcid);
200 	free(req->multipath);
201 }
202 
203 static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = {
204 	{"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string},
205 	{"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string},
206 	{"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string},
207 
208 	{"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true},
209 	{"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true},
210 	{"priority", offsetof(struct rpc_bdev_nvme_attach_controller, priority), spdk_json_decode_string, true},
211 	{"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true},
212 	{"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true},
213 	{"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true},
214 	{"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true},
215 
216 	{"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_reftag), spdk_json_decode_bool, true},
217 	{"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_guard), spdk_json_decode_bool, true},
218 	{"hdgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.header_digest), spdk_json_decode_bool, true},
219 	{"ddgst", offsetof(struct rpc_bdev_nvme_attach_controller, opts.data_digest), spdk_json_decode_bool, true},
220 	{"fabrics_connect_timeout_us", offsetof(struct rpc_bdev_nvme_attach_controller, opts.fabrics_connect_timeout_us), spdk_json_decode_uint64, true},
221 	{"multipath", offsetof(struct rpc_bdev_nvme_attach_controller, multipath), spdk_json_decode_string, true},
222 };
223 
224 #define NVME_MAX_BDEVS_PER_RPC 128
225 
226 struct rpc_bdev_nvme_attach_controller_ctx {
227 	struct rpc_bdev_nvme_attach_controller req;
228 	uint32_t count;
229 	size_t bdev_count;
230 	const char *names[NVME_MAX_BDEVS_PER_RPC];
231 	struct spdk_jsonrpc_request *request;
232 };
233 
234 static void
235 rpc_bdev_nvme_attach_controller_examined(void *cb_ctx)
236 {
237 	struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx;
238 	struct spdk_jsonrpc_request *request = ctx->request;
239 	struct spdk_json_write_ctx *w;
240 	size_t i;
241 
242 	w = spdk_jsonrpc_begin_result(request);
243 	spdk_json_write_array_begin(w);
244 	for (i = 0; i < ctx->bdev_count; i++) {
245 		spdk_json_write_string(w, ctx->names[i]);
246 	}
247 	spdk_json_write_array_end(w);
248 	spdk_jsonrpc_end_result(request, w);
249 
250 	free_rpc_bdev_nvme_attach_controller(&ctx->req);
251 	free(ctx);
252 }
253 
254 static void
255 rpc_bdev_nvme_attach_controller_done(void *cb_ctx, size_t bdev_count, int rc)
256 {
257 	struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx;
258 	struct spdk_jsonrpc_request *request = ctx->request;
259 
260 	if (rc < 0) {
261 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
262 		free_rpc_bdev_nvme_attach_controller(&ctx->req);
263 		free(ctx);
264 		return;
265 	}
266 
267 	ctx->bdev_count = bdev_count;
268 	spdk_bdev_wait_for_examine(rpc_bdev_nvme_attach_controller_examined, ctx);
269 }
270 
271 static void
272 rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request,
273 				const struct spdk_json_val *params)
274 {
275 	struct rpc_bdev_nvme_attach_controller_ctx *ctx;
276 	struct spdk_nvme_transport_id trid = {};
277 	const struct spdk_nvme_ctrlr_opts *opts;
278 	const struct spdk_nvme_transport_id *ctrlr_trid;
279 	uint32_t prchk_flags = 0;
280 	struct nvme_ctrlr *ctrlr = NULL;
281 	size_t len, maxlen;
282 	int rc;
283 
284 	ctx = calloc(1, sizeof(*ctx));
285 	if (!ctx) {
286 		spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(ENOMEM));
287 		return;
288 	}
289 
290 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->req.opts, sizeof(ctx->req.opts));
291 
292 	if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders,
293 				    SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders),
294 				    &ctx->req)) {
295 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
296 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
297 						 "spdk_json_decode_object failed");
298 		goto cleanup;
299 	}
300 
301 	/* Parse trstring */
302 	rc = spdk_nvme_transport_id_populate_trstring(&trid, ctx->req.trtype);
303 	if (rc < 0) {
304 		SPDK_ERRLOG("Failed to parse trtype: %s\n", ctx->req.trtype);
305 		spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s",
306 						     ctx->req.trtype);
307 		goto cleanup;
308 	}
309 
310 	/* Parse trtype */
311 	rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype);
312 	assert(rc == 0);
313 
314 	/* Parse traddr */
315 	maxlen = sizeof(trid.traddr);
316 	len = strnlen(ctx->req.traddr, maxlen);
317 	if (len == maxlen) {
318 		spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s",
319 						     ctx->req.traddr);
320 		goto cleanup;
321 	}
322 	memcpy(trid.traddr, ctx->req.traddr, len + 1);
323 
324 	/* Parse adrfam */
325 	if (ctx->req.adrfam) {
326 		rc = spdk_nvme_transport_id_parse_adrfam(&trid.adrfam, ctx->req.adrfam);
327 		if (rc < 0) {
328 			SPDK_ERRLOG("Failed to parse adrfam: %s\n", ctx->req.adrfam);
329 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s",
330 							     ctx->req.adrfam);
331 			goto cleanup;
332 		}
333 	}
334 
335 	/* Parse trsvcid */
336 	if (ctx->req.trsvcid) {
337 		maxlen = sizeof(trid.trsvcid);
338 		len = strnlen(ctx->req.trsvcid, maxlen);
339 		if (len == maxlen) {
340 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s",
341 							     ctx->req.trsvcid);
342 			goto cleanup;
343 		}
344 		memcpy(trid.trsvcid, ctx->req.trsvcid, len + 1);
345 	}
346 
347 	/* Parse priority for the NVMe-oF transport connection */
348 	if (ctx->req.priority) {
349 		trid.priority = spdk_strtol(ctx->req.priority, 10);
350 	}
351 
352 	/* Parse subnqn */
353 	if (ctx->req.subnqn) {
354 		maxlen = sizeof(trid.subnqn);
355 		len = strnlen(ctx->req.subnqn, maxlen);
356 		if (len == maxlen) {
357 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s",
358 							     ctx->req.subnqn);
359 			goto cleanup;
360 		}
361 		memcpy(trid.subnqn, ctx->req.subnqn, len + 1);
362 	}
363 
364 	if (ctx->req.hostnqn) {
365 		snprintf(ctx->req.opts.hostnqn, sizeof(ctx->req.opts.hostnqn), "%s",
366 			 ctx->req.hostnqn);
367 	}
368 
369 	if (ctx->req.hostaddr) {
370 		maxlen = sizeof(ctx->req.opts.src_addr);
371 		len = strnlen(ctx->req.hostaddr, maxlen);
372 		if (len == maxlen) {
373 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostaddr too long: %s",
374 							     ctx->req.hostaddr);
375 			goto cleanup;
376 		}
377 		snprintf(ctx->req.opts.src_addr, maxlen, "%s", ctx->req.hostaddr);
378 	}
379 
380 	if (ctx->req.hostsvcid) {
381 		maxlen = sizeof(ctx->req.opts.src_svcid);
382 		len = strnlen(ctx->req.hostsvcid, maxlen);
383 		if (len == maxlen) {
384 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "hostsvcid too long: %s",
385 							     ctx->req.hostsvcid);
386 			goto cleanup;
387 		}
388 		snprintf(ctx->req.opts.src_svcid, maxlen, "%s", ctx->req.hostsvcid);
389 	}
390 
391 	ctrlr = nvme_ctrlr_get_by_name(ctx->req.name);
392 
393 	if (ctrlr) {
394 		if (ctx->req.multipath == NULL) {
395 			/* For now, this means add a failover path. This maintains backward compatibility
396 			 * with past behavior. In the future, this behavior will change to "disable". */
397 			SPDK_ERRLOG("The multipath parameter was not specified to bdev_nvme_attach_controller but "
398 				    "it was used to add a failover path. This behavior will default to rejecting "
399 				    "the request in the future. Specify the 'multipath' parameter to control the behavior");
400 			ctx->req.multipath = strdup("failover");
401 			if (ctx->req.multipath == NULL) {
402 				SPDK_ERRLOG("cannot allocate multipath failover string\n");
403 				goto cleanup;
404 			}
405 		}
406 
407 		opts = spdk_nvme_ctrlr_get_opts(ctrlr->ctrlr);
408 		ctrlr_trid = spdk_nvme_ctrlr_get_transport_id(ctrlr->ctrlr);
409 
410 		/* This controller already exists. Check what the user wants to do. */
411 		if (strcasecmp(ctx->req.multipath, "disable") == 0) {
412 			/* The user does not want to do any form of multipathing. */
413 			spdk_jsonrpc_send_error_response_fmt(request, -EALREADY,
414 							     "A controller named %s already exists and multipath is disabled\n",
415 							     ctx->req.name);
416 			goto cleanup;
417 		} else if (strcasecmp(ctx->req.multipath, "failover") == 0) {
418 			/* The user wants to add this as a failover path. */
419 
420 			if (strncmp(trid.traddr, ctrlr_trid->traddr, sizeof(trid.traddr)) == 0 &&
421 			    strncmp(trid.trsvcid, ctrlr_trid->trsvcid, sizeof(trid.trsvcid)) == 0 &&
422 			    strncmp(ctx->req.opts.src_addr, opts->src_addr, sizeof(opts->src_addr)) == 0 &&
423 			    strncmp(ctx->req.opts.src_svcid, opts->src_svcid, sizeof(opts->src_svcid)) == 0) {
424 				/* Exactly same network path can't be added a second time */
425 				spdk_jsonrpc_send_error_response_fmt(request, -EALREADY,
426 								     "A controller named %s already exists with the specified network path\n",
427 								     ctx->req.name);
428 				goto cleanup;
429 			}
430 		} else {
431 			/* Invalid multipath option */
432 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL,
433 							     "Invalid multipath parameter: %s\n",
434 							     ctx->req.multipath);
435 			goto cleanup;
436 		}
437 
438 		if (strncmp(trid.subnqn,
439 			    ctrlr_trid->subnqn,
440 			    SPDK_NVMF_NQN_MAX_LEN) != 0) {
441 			/* Different SUBNQN is not allowed when specifying the same controller name. */
442 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL,
443 							     "A controller named %s already exists, but uses a different subnqn (%s)\n",
444 							     ctx->req.name, ctrlr_trid->subnqn);
445 			goto cleanup;
446 		}
447 
448 
449 
450 		if (strncmp(ctx->req.opts.hostnqn, opts->hostnqn, SPDK_NVMF_NQN_MAX_LEN) != 0) {
451 			/* Different HOSTNQN is not allowed when specifying the same controller name. */
452 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL,
453 							     "A controller named %s already exists, but uses a different hostnqn (%s)\n",
454 							     ctx->req.name, opts->hostnqn);
455 			goto cleanup;
456 		}
457 
458 		if (ctx->req.prchk_guard || ctx->req.prchk_reftag) {
459 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL,
460 							     "A controller named %s already exists. To add a path, do not specify PI options.\n",
461 							     ctx->req.name);
462 			goto cleanup;
463 		}
464 	}
465 
466 	if (ctx->req.prchk_reftag) {
467 		prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
468 	}
469 
470 	if (ctx->req.prchk_guard) {
471 		prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD;
472 	}
473 
474 	ctx->request = request;
475 	ctx->count = NVME_MAX_BDEVS_PER_RPC;
476 	rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, prchk_flags,
477 			      rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.opts,
478 			      false);
479 	if (rc) {
480 		spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
481 		goto cleanup;
482 	}
483 
484 	return;
485 
486 cleanup:
487 	free_rpc_bdev_nvme_attach_controller(&ctx->req);
488 	free(ctx);
489 }
490 SPDK_RPC_REGISTER("bdev_nvme_attach_controller", rpc_bdev_nvme_attach_controller,
491 		  SPDK_RPC_RUNTIME)
492 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev)
493 
494 static void
495 rpc_dump_nvme_controller_info(struct nvme_ctrlr *nvme_ctrlr, void *ctx)
496 {
497 	struct spdk_json_write_ctx	*w = ctx;
498 	struct spdk_nvme_transport_id	*trid;
499 	const struct spdk_nvme_ctrlr_opts *opts;
500 
501 	trid = &nvme_ctrlr->active_path_id->trid;
502 
503 	spdk_json_write_object_begin(w);
504 	spdk_json_write_named_string(w, "name", nvme_ctrlr->nbdev_ctrlr->name);
505 
506 #ifdef SPDK_CONFIG_NVME_CUSE
507 	size_t cuse_name_size = 128;
508 	char cuse_name[cuse_name_size];
509 
510 	int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_ctrlr->ctrlr, cuse_name, &cuse_name_size);
511 	if (rc == 0) {
512 		spdk_json_write_named_string(w, "cuse_device", cuse_name);
513 	}
514 #endif
515 
516 	spdk_json_write_named_object_begin(w, "trid");
517 	nvme_bdev_dump_trid_json(trid, w);
518 	spdk_json_write_object_end(w);
519 
520 	opts = spdk_nvme_ctrlr_get_opts(nvme_ctrlr->ctrlr);
521 
522 	spdk_json_write_named_object_begin(w, "host");
523 	spdk_json_write_named_string(w, "nqn", opts->hostnqn);
524 	spdk_json_write_named_string(w, "addr", opts->src_addr);
525 	spdk_json_write_named_string(w, "svcid", opts->src_svcid);
526 	spdk_json_write_object_end(w);
527 	spdk_json_write_object_end(w);
528 }
529 
530 struct rpc_bdev_nvme_get_controllers {
531 	char *name;
532 };
533 
534 static void
535 free_rpc_bdev_nvme_get_controllers(struct rpc_bdev_nvme_get_controllers *r)
536 {
537 	free(r->name);
538 }
539 
540 static const struct spdk_json_object_decoder rpc_bdev_nvme_get_controllers_decoders[] = {
541 	{"name", offsetof(struct rpc_bdev_nvme_get_controllers, name), spdk_json_decode_string, true},
542 };
543 
544 static void
545 rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request,
546 			      const struct spdk_json_val *params)
547 {
548 	struct rpc_bdev_nvme_get_controllers req = {};
549 	struct spdk_json_write_ctx *w;
550 	struct nvme_ctrlr *ctrlr = NULL;
551 
552 	if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders,
553 					      SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders),
554 					      &req)) {
555 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
556 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
557 						 "spdk_json_decode_object failed");
558 		goto cleanup;
559 	}
560 
561 	if (req.name) {
562 		ctrlr = nvme_ctrlr_get_by_name(req.name);
563 		if (ctrlr == NULL) {
564 			SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name);
565 			spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name);
566 			goto cleanup;
567 		}
568 	}
569 
570 	w = spdk_jsonrpc_begin_result(request);
571 	spdk_json_write_array_begin(w);
572 
573 	if (ctrlr != NULL) {
574 		rpc_dump_nvme_controller_info(ctrlr, w);
575 	} else {
576 		nvme_ctrlr_for_each(rpc_dump_nvme_controller_info, w);
577 	}
578 
579 	spdk_json_write_array_end(w);
580 
581 	spdk_jsonrpc_end_result(request, w);
582 
583 cleanup:
584 	free_rpc_bdev_nvme_get_controllers(&req);
585 }
586 SPDK_RPC_REGISTER("bdev_nvme_get_controllers", rpc_bdev_nvme_get_controllers, SPDK_RPC_RUNTIME)
587 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_get_controllers, get_nvme_controllers)
588 
589 struct rpc_bdev_nvme_detach_controller {
590 	char *name;
591 	char *trtype;
592 	char *adrfam;
593 	char *traddr;
594 	char *trsvcid;
595 	char *subnqn;
596 };
597 
598 static void
599 free_rpc_bdev_nvme_detach_controller(struct rpc_bdev_nvme_detach_controller *req)
600 {
601 	free(req->name);
602 	free(req->trtype);
603 	free(req->adrfam);
604 	free(req->traddr);
605 	free(req->trsvcid);
606 	free(req->subnqn);
607 }
608 
609 static const struct spdk_json_object_decoder rpc_bdev_nvme_detach_controller_decoders[] = {
610 	{"name", offsetof(struct rpc_bdev_nvme_detach_controller, name), spdk_json_decode_string},
611 	{"trtype", offsetof(struct rpc_bdev_nvme_detach_controller, trtype), spdk_json_decode_string, true},
612 	{"traddr", offsetof(struct rpc_bdev_nvme_detach_controller, traddr), spdk_json_decode_string, true},
613 	{"adrfam", offsetof(struct rpc_bdev_nvme_detach_controller, adrfam), spdk_json_decode_string, true},
614 	{"trsvcid", offsetof(struct rpc_bdev_nvme_detach_controller, trsvcid), spdk_json_decode_string, true},
615 	{"subnqn", offsetof(struct rpc_bdev_nvme_detach_controller, subnqn), spdk_json_decode_string, true},
616 };
617 
618 static void
619 rpc_bdev_nvme_detach_controller(struct spdk_jsonrpc_request *request,
620 				const struct spdk_json_val *params)
621 {
622 	struct rpc_bdev_nvme_detach_controller req = {NULL};
623 	struct nvme_path_id path = {};
624 	size_t len, maxlen;
625 	int rc = 0;
626 
627 	if (spdk_json_decode_object(params, rpc_bdev_nvme_detach_controller_decoders,
628 				    SPDK_COUNTOF(rpc_bdev_nvme_detach_controller_decoders),
629 				    &req)) {
630 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
631 						 "spdk_json_decode_object failed");
632 		goto cleanup;
633 	}
634 
635 	if (req.trtype != NULL) {
636 		rc = spdk_nvme_transport_id_populate_trstring(&path.trid, req.trtype);
637 		if (rc < 0) {
638 			SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype);
639 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s",
640 							     req.trtype);
641 			goto cleanup;
642 		}
643 
644 		rc = spdk_nvme_transport_id_parse_trtype(&path.trid.trtype, req.trtype);
645 		if (rc < 0) {
646 			SPDK_ERRLOG("Failed to parse trtype: %s\n", req.trtype);
647 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse trtype: %s",
648 							     req.trtype);
649 			goto cleanup;
650 		}
651 	}
652 
653 	if (req.traddr != NULL) {
654 		maxlen = sizeof(path.trid.traddr);
655 		len = strnlen(req.traddr, maxlen);
656 		if (len == maxlen) {
657 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "traddr too long: %s",
658 							     req.traddr);
659 			goto cleanup;
660 		}
661 		memcpy(path.trid.traddr, req.traddr, len + 1);
662 	}
663 
664 	if (req.adrfam != NULL) {
665 		rc = spdk_nvme_transport_id_parse_adrfam(&path.trid.adrfam, req.adrfam);
666 		if (rc < 0) {
667 			SPDK_ERRLOG("Failed to parse adrfam: %s\n", req.adrfam);
668 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "Failed to parse adrfam: %s",
669 							     req.adrfam);
670 			goto cleanup;
671 		}
672 	}
673 
674 	if (req.trsvcid != NULL) {
675 		maxlen = sizeof(path.trid.trsvcid);
676 		len = strnlen(req.trsvcid, maxlen);
677 		if (len == maxlen) {
678 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "trsvcid too long: %s",
679 							     req.trsvcid);
680 			goto cleanup;
681 		}
682 		memcpy(path.trid.trsvcid, req.trsvcid, len + 1);
683 	}
684 
685 	/* Parse subnqn */
686 	if (req.subnqn != NULL) {
687 		maxlen = sizeof(path.trid.subnqn);
688 		len = strnlen(req.subnqn, maxlen);
689 		if (len == maxlen) {
690 			spdk_jsonrpc_send_error_response_fmt(request, -EINVAL, "subnqn too long: %s",
691 							     req.subnqn);
692 			goto cleanup;
693 		}
694 		memcpy(path.trid.subnqn, req.subnqn, len + 1);
695 	}
696 
697 	rc = bdev_nvme_delete(req.name, &path);
698 
699 	if (rc != 0) {
700 		spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
701 		goto cleanup;
702 	}
703 
704 	spdk_jsonrpc_send_bool_response(request, true);
705 
706 cleanup:
707 	free_rpc_bdev_nvme_detach_controller(&req);
708 }
709 SPDK_RPC_REGISTER("bdev_nvme_detach_controller", rpc_bdev_nvme_detach_controller,
710 		  SPDK_RPC_RUNTIME)
711 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_detach_controller, delete_nvme_controller)
712 
713 struct rpc_apply_firmware {
714 	char *filename;
715 	char *bdev_name;
716 };
717 
718 static void
719 free_rpc_apply_firmware(struct rpc_apply_firmware *req)
720 {
721 	free(req->filename);
722 	free(req->bdev_name);
723 }
724 
725 static const struct spdk_json_object_decoder rpc_apply_firmware_decoders[] = {
726 	{"filename", offsetof(struct rpc_apply_firmware, filename), spdk_json_decode_string},
727 	{"bdev_name", offsetof(struct rpc_apply_firmware, bdev_name), spdk_json_decode_string},
728 };
729 
730 struct firmware_update_info {
731 	void				*fw_image;
732 	void				*p;
733 	unsigned int			size;
734 	unsigned int			size_remaining;
735 	unsigned int			offset;
736 	unsigned int			transfer;
737 
738 	void				*desc;
739 	struct spdk_io_channel		*ch;
740 	struct spdk_jsonrpc_request	*request;
741 	struct spdk_nvme_ctrlr		*ctrlr;
742 	open_descriptors_t		desc_head;
743 	struct rpc_apply_firmware	*req;
744 };
745 
746 static void
747 _apply_firmware_cleanup(void *ctx)
748 {
749 	struct spdk_bdev_desc *desc = ctx;
750 
751 	spdk_bdev_close(desc);
752 }
753 
754 static void
755 apply_firmware_cleanup(void *cb_arg)
756 {
757 	struct open_descriptors			*opt, *tmp;
758 	struct firmware_update_info *firm_ctx = cb_arg;
759 
760 	if (!firm_ctx) {
761 		return;
762 	}
763 
764 	if (firm_ctx->fw_image) {
765 		spdk_free(firm_ctx->fw_image);
766 	}
767 
768 	if (firm_ctx->req) {
769 		free_rpc_apply_firmware(firm_ctx->req);
770 		free(firm_ctx->req);
771 	}
772 
773 	if (firm_ctx->ch) {
774 		spdk_put_io_channel(firm_ctx->ch);
775 	}
776 
777 	TAILQ_FOREACH_SAFE(opt, &firm_ctx->desc_head, tqlst, tmp) {
778 		TAILQ_REMOVE(&firm_ctx->desc_head, opt, tqlst);
779 		/* Close the underlying bdev on its same opened thread. */
780 		if (opt->thread && opt->thread != spdk_get_thread()) {
781 			spdk_thread_send_msg(opt->thread, _apply_firmware_cleanup, opt->desc);
782 		} else {
783 			spdk_bdev_close(opt->desc);
784 		}
785 		free(opt);
786 	}
787 	free(firm_ctx);
788 }
789 
790 static void
791 apply_firmware_complete_reset(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
792 {
793 	struct spdk_json_write_ctx		*w;
794 	struct firmware_update_info *firm_ctx = cb_arg;
795 
796 	spdk_bdev_free_io(bdev_io);
797 
798 	if (!success) {
799 		spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
800 						 "firmware commit failed.");
801 		apply_firmware_cleanup(firm_ctx);
802 		return;
803 	}
804 
805 	if (spdk_nvme_ctrlr_reset(firm_ctx->ctrlr) != 0) {
806 		spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
807 						 "Controller reset failed.");
808 		apply_firmware_cleanup(firm_ctx);
809 		return;
810 	}
811 
812 	w = spdk_jsonrpc_begin_result(firm_ctx->request);
813 	spdk_json_write_string(w, "firmware commit succeeded. Controller reset in progress.");
814 	spdk_jsonrpc_end_result(firm_ctx->request, w);
815 	apply_firmware_cleanup(firm_ctx);
816 }
817 
818 static void
819 apply_firmware_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
820 {
821 	struct spdk_nvme_cmd			cmd = {};
822 	struct spdk_nvme_fw_commit		fw_commit;
823 	int					slot = 0;
824 	int					rc;
825 	struct firmware_update_info *firm_ctx = cb_arg;
826 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
827 
828 	if (!success) {
829 		spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
830 						 "firmware download failed .");
831 		spdk_bdev_free_io(bdev_io);
832 		apply_firmware_cleanup(firm_ctx);
833 		return;
834 	}
835 
836 	firm_ctx->p += firm_ctx->transfer;
837 	firm_ctx->offset += firm_ctx->transfer;
838 	firm_ctx->size_remaining -= firm_ctx->transfer;
839 
840 	switch (firm_ctx->size_remaining) {
841 	case 0:
842 		/* firmware download completed. Commit firmware */
843 		memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
844 		fw_commit.fs = slot;
845 		fw_commit.ca = commit_action;
846 
847 		cmd.opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
848 		memcpy(&cmd.cdw10, &fw_commit, sizeof(uint32_t));
849 		rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, NULL, 0,
850 						   apply_firmware_complete_reset, firm_ctx);
851 		if (rc) {
852 			spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
853 							 "firmware commit failed.");
854 			spdk_bdev_free_io(bdev_io);
855 			apply_firmware_cleanup(firm_ctx);
856 			return;
857 		}
858 		break;
859 	default:
860 		firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096);
861 		cmd.opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
862 
863 		cmd.cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer);
864 		cmd.cdw11 = firm_ctx->offset >> 2;
865 		rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, &cmd, firm_ctx->p,
866 						   firm_ctx->transfer, apply_firmware_complete, firm_ctx);
867 		if (rc) {
868 			spdk_jsonrpc_send_error_response(firm_ctx->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
869 							 "firmware download failed.");
870 			spdk_bdev_free_io(bdev_io);
871 			apply_firmware_cleanup(firm_ctx);
872 			return;
873 		}
874 		break;
875 	}
876 }
877 
878 static void
879 apply_firmware_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
880 {
881 }
882 
883 static void
884 rpc_bdev_nvme_apply_firmware(struct spdk_jsonrpc_request *request,
885 			     const struct spdk_json_val *params)
886 {
887 	int					rc;
888 	int					fd = -1;
889 	struct stat				fw_stat;
890 	struct spdk_nvme_ctrlr			*ctrlr;
891 	char					msg[1024];
892 	struct spdk_bdev			*bdev;
893 	struct spdk_bdev			*bdev2;
894 	struct open_descriptors			*opt;
895 	struct spdk_bdev_desc			*desc;
896 	struct spdk_nvme_cmd			*cmd;
897 	struct firmware_update_info		*firm_ctx;
898 
899 	firm_ctx = calloc(1, sizeof(struct firmware_update_info));
900 	if (!firm_ctx) {
901 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
902 						 "Memory allocation error.");
903 		return;
904 	}
905 	firm_ctx->fw_image = NULL;
906 	TAILQ_INIT(&firm_ctx->desc_head);
907 	firm_ctx->request = request;
908 
909 	firm_ctx->req = calloc(1, sizeof(struct rpc_apply_firmware));
910 	if (!firm_ctx->req) {
911 		snprintf(msg, sizeof(msg), "Memory allocation error.");
912 		goto err;
913 	}
914 
915 	if (spdk_json_decode_object(params, rpc_apply_firmware_decoders,
916 				    SPDK_COUNTOF(rpc_apply_firmware_decoders), firm_ctx->req)) {
917 		snprintf(msg, sizeof(msg), "spdk_json_decode_object failed.");
918 		goto err;
919 	}
920 
921 	if ((bdev = spdk_bdev_get_by_name(firm_ctx->req->bdev_name)) == NULL) {
922 		snprintf(msg, sizeof(msg), "bdev %s were not found", firm_ctx->req->bdev_name);
923 		goto err;
924 	}
925 
926 	if ((ctrlr = bdev_nvme_get_ctrlr(bdev)) == NULL) {
927 		snprintf(msg, sizeof(msg), "Controller information for %s were not found.",
928 			 firm_ctx->req->bdev_name);
929 		goto err;
930 	}
931 	firm_ctx->ctrlr = ctrlr;
932 
933 	for (bdev2 = spdk_bdev_first(); bdev2; bdev2 = spdk_bdev_next(bdev2)) {
934 
935 		if (bdev_nvme_get_ctrlr(bdev2) != ctrlr) {
936 			continue;
937 		}
938 
939 		if (!(opt = malloc(sizeof(struct open_descriptors)))) {
940 			snprintf(msg, sizeof(msg), "Memory allocation error.");
941 			goto err;
942 		}
943 
944 		if (spdk_bdev_open_ext(spdk_bdev_get_name(bdev2), true, apply_firmware_open_cb, NULL, &desc) != 0) {
945 			snprintf(msg, sizeof(msg), "Device %s is in use.", firm_ctx->req->bdev_name);
946 			free(opt);
947 			goto err;
948 		}
949 
950 		/* Save the thread where the base device is opened */
951 		opt->thread = spdk_get_thread();
952 
953 		opt->desc = desc;
954 		opt->bdev = bdev;
955 		TAILQ_INSERT_TAIL(&firm_ctx->desc_head, opt, tqlst);
956 	}
957 
958 	/*
959 	 * find a descriptor associated with our bdev
960 	 */
961 	firm_ctx->desc = NULL;
962 	TAILQ_FOREACH(opt, &firm_ctx->desc_head, tqlst) {
963 		if (opt->bdev == bdev) {
964 			firm_ctx->desc = opt->desc;
965 			break;
966 		}
967 	}
968 
969 	if (!firm_ctx->desc) {
970 		snprintf(msg, sizeof(msg), "No descriptor were found.");
971 		goto err;
972 	}
973 
974 	firm_ctx->ch = spdk_bdev_get_io_channel(firm_ctx->desc);
975 	if (!firm_ctx->ch) {
976 		snprintf(msg, sizeof(msg), "No channels were found.");
977 		goto err;
978 	}
979 
980 	fd = open(firm_ctx->req->filename, O_RDONLY);
981 	if (fd < 0) {
982 		snprintf(msg, sizeof(msg), "open file failed.");
983 		goto err;
984 	}
985 
986 	rc = fstat(fd, &fw_stat);
987 	if (rc < 0) {
988 		close(fd);
989 		snprintf(msg, sizeof(msg), "fstat failed.");
990 		goto err;
991 	}
992 
993 	firm_ctx->size = fw_stat.st_size;
994 	if (fw_stat.st_size % 4) {
995 		close(fd);
996 		snprintf(msg, sizeof(msg), "Firmware image size is not multiple of 4.");
997 		goto err;
998 	}
999 
1000 	firm_ctx->fw_image = spdk_zmalloc(firm_ctx->size, 4096, NULL,
1001 					  SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1002 	if (!firm_ctx->fw_image) {
1003 		close(fd);
1004 		snprintf(msg, sizeof(msg), "Memory allocation error.");
1005 		goto err;
1006 	}
1007 	firm_ctx->p = firm_ctx->fw_image;
1008 
1009 	if (read(fd, firm_ctx->p, firm_ctx->size) != ((ssize_t)(firm_ctx->size))) {
1010 		close(fd);
1011 		snprintf(msg, sizeof(msg), "Read firmware image failed!");
1012 		goto err;
1013 	}
1014 	close(fd);
1015 
1016 	firm_ctx->offset = 0;
1017 	firm_ctx->size_remaining = firm_ctx->size;
1018 	firm_ctx->transfer = spdk_min(firm_ctx->size_remaining, 4096);
1019 
1020 	cmd = malloc(sizeof(struct spdk_nvme_cmd));
1021 	if (!cmd) {
1022 		snprintf(msg, sizeof(msg), "Memory allocation error.");
1023 		goto err;
1024 	}
1025 	memset(cmd, 0, sizeof(struct spdk_nvme_cmd));
1026 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
1027 
1028 	cmd->cdw10 = spdk_nvme_bytes_to_numd(firm_ctx->transfer);
1029 	cmd->cdw11 = firm_ctx->offset >> 2;
1030 
1031 	rc = spdk_bdev_nvme_admin_passthru(firm_ctx->desc, firm_ctx->ch, cmd, firm_ctx->p,
1032 					   firm_ctx->transfer, apply_firmware_complete, firm_ctx);
1033 	if (rc == 0) {
1034 		/* normal return here. */
1035 		return;
1036 	}
1037 
1038 	free(cmd);
1039 	snprintf(msg, sizeof(msg), "Read firmware image failed!");
1040 err:
1041 	spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, msg);
1042 	apply_firmware_cleanup(firm_ctx);
1043 }
1044 SPDK_RPC_REGISTER("bdev_nvme_apply_firmware", rpc_bdev_nvme_apply_firmware, SPDK_RPC_RUNTIME)
1045 SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_apply_firmware, apply_nvme_firmware)
1046 
1047 struct rpc_bdev_nvme_transport_stat_ctx {
1048 	struct spdk_jsonrpc_request *request;
1049 	struct spdk_json_write_ctx *w;
1050 };
1051 
1052 static void
1053 rpc_bdev_nvme_rdma_stats(struct spdk_json_write_ctx *w,
1054 			 struct spdk_nvme_transport_poll_group_stat *stat)
1055 {
1056 	struct spdk_nvme_rdma_device_stat *device_stats;
1057 	uint32_t i;
1058 
1059 	spdk_json_write_named_array_begin(w, "devices");
1060 
1061 	for (i = 0; i < stat->rdma.num_devices; i++) {
1062 		device_stats = &stat->rdma.device_stats[i];
1063 		spdk_json_write_object_begin(w);
1064 		spdk_json_write_named_string(w, "dev_name", device_stats->name);
1065 		spdk_json_write_named_uint64(w, "polls", device_stats->polls);
1066 		spdk_json_write_named_uint64(w, "idle_polls", device_stats->idle_polls);
1067 		spdk_json_write_named_uint64(w, "completions", device_stats->completions);
1068 		spdk_json_write_named_uint64(w, "queued_requests", device_stats->queued_requests);
1069 		spdk_json_write_named_uint64(w, "total_send_wrs", device_stats->total_send_wrs);
1070 		spdk_json_write_named_uint64(w, "send_doorbell_updates", device_stats->send_doorbell_updates);
1071 		spdk_json_write_named_uint64(w, "total_recv_wrs", device_stats->total_recv_wrs);
1072 		spdk_json_write_named_uint64(w, "recv_doorbell_updates", device_stats->recv_doorbell_updates);
1073 		spdk_json_write_object_end(w);
1074 	}
1075 	spdk_json_write_array_end(w);
1076 }
1077 
1078 static void
1079 rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w,
1080 			 struct spdk_nvme_transport_poll_group_stat *stat)
1081 {
1082 	spdk_json_write_named_uint64(w, "polls", stat->pcie.polls);
1083 	spdk_json_write_named_uint64(w, "idle_polls", stat->pcie.idle_polls);
1084 	spdk_json_write_named_uint64(w, "completions", stat->pcie.completions);
1085 	spdk_json_write_named_uint64(w, "cq_doorbell_updates", stat->pcie.cq_doorbell_updates);
1086 	spdk_json_write_named_uint64(w, "queued_requests", stat->pcie.queued_requests);
1087 	spdk_json_write_named_uint64(w, "submitted_requests", stat->pcie.submitted_requests);
1088 	spdk_json_write_named_uint64(w, "sq_doobell_updates", stat->pcie.sq_doobell_updates);
1089 }
1090 
1091 static void
1092 rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w,
1093 			struct spdk_nvme_transport_poll_group_stat *stat)
1094 {
1095 	spdk_json_write_named_uint64(w, "polls", stat->tcp.polls);
1096 	spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls);
1097 	spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions);
1098 	spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions);
1099 	spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests);
1100 	spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests);
1101 }
1102 
1103 static void
1104 rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
1105 {
1106 	struct rpc_bdev_nvme_transport_stat_ctx *ctx;
1107 	struct spdk_io_channel *ch;
1108 	struct nvme_poll_group *group;
1109 	struct spdk_nvme_poll_group_stat *stat;
1110 	struct spdk_nvme_transport_poll_group_stat *tr_stat;
1111 	uint32_t j;
1112 	int rc;
1113 
1114 	ctx = spdk_io_channel_iter_get_ctx(i);
1115 	ch = spdk_io_channel_iter_get_channel(i);
1116 	group = spdk_io_channel_get_ctx(ch);
1117 
1118 	rc = spdk_nvme_poll_group_get_stats(group->group, &stat);
1119 	if (rc) {
1120 		spdk_for_each_channel_continue(i, rc);
1121 		return;
1122 	}
1123 
1124 	spdk_json_write_object_begin(ctx->w);
1125 	spdk_json_write_named_string(ctx->w, "thread", spdk_thread_get_name(spdk_get_thread()));
1126 	spdk_json_write_named_array_begin(ctx->w, "transports");
1127 
1128 	for (j = 0; j < stat->num_transports; j++) {
1129 		tr_stat = stat->transport_stat[j];
1130 		spdk_json_write_object_begin(ctx->w);
1131 		spdk_json_write_named_string(ctx->w, "trname", spdk_nvme_transport_id_trtype_str(tr_stat->trtype));
1132 
1133 		switch (stat->transport_stat[j]->trtype) {
1134 		case SPDK_NVME_TRANSPORT_RDMA:
1135 			rpc_bdev_nvme_rdma_stats(ctx->w, tr_stat);
1136 			break;
1137 		case SPDK_NVME_TRANSPORT_PCIE:
1138 			rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat);
1139 			break;
1140 		case SPDK_NVME_TRANSPORT_TCP:
1141 			rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat);
1142 			break;
1143 		default:
1144 			SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype,
1145 				     spdk_nvme_transport_id_trtype_str(tr_stat->trtype));
1146 		}
1147 		spdk_json_write_object_end(ctx->w);
1148 	}
1149 	/* transports array */
1150 	spdk_json_write_array_end(ctx->w);
1151 	spdk_json_write_object_end(ctx->w);
1152 
1153 	spdk_nvme_poll_group_free_stats(group->group, stat);
1154 	spdk_for_each_channel_continue(i, 0);
1155 }
1156 
1157 static void
1158 rpc_bdev_nvme_stats_done(struct spdk_io_channel_iter *i, int status)
1159 {
1160 	struct rpc_bdev_nvme_transport_stat_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1161 
1162 	spdk_json_write_array_end(ctx->w);
1163 	spdk_json_write_object_end(ctx->w);
1164 	spdk_jsonrpc_end_result(ctx->request, ctx->w);
1165 	free(ctx);
1166 }
1167 
1168 static void
1169 rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request,
1170 				       const struct spdk_json_val *params)
1171 {
1172 	struct rpc_bdev_nvme_transport_stat_ctx *ctx;
1173 
1174 	if (params) {
1175 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
1176 						 "'bdev_nvme_get_transport_statistics' requires no arguments");
1177 		return;
1178 	}
1179 
1180 	ctx = calloc(1, sizeof(*ctx));
1181 	if (!ctx) {
1182 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1183 						 "Memory allocation error");
1184 		return;
1185 	}
1186 	ctx->request = request;
1187 	ctx->w = spdk_jsonrpc_begin_result(ctx->request);
1188 	spdk_json_write_object_begin(ctx->w);
1189 	spdk_json_write_named_array_begin(ctx->w, "poll_groups");
1190 
1191 	spdk_for_each_channel(&g_nvme_bdev_ctrlrs,
1192 			      rpc_bdev_nvme_stats_per_channel,
1193 			      ctx,
1194 			      rpc_bdev_nvme_stats_done);
1195 }
1196 SPDK_RPC_REGISTER("bdev_nvme_get_transport_statistics", rpc_bdev_nvme_get_transport_statistics,
1197 		  SPDK_RPC_RUNTIME)
1198 
1199 struct rpc_bdev_nvme_reset_controller_req {
1200 	char *name;
1201 };
1202 
1203 static void
1204 free_rpc_bdev_nvme_reset_controller_req(struct rpc_bdev_nvme_reset_controller_req *r)
1205 {
1206 	free(r->name);
1207 }
1208 
1209 static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_decoders[] = {
1210 	{"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string},
1211 };
1212 
1213 static void
1214 _rpc_bdev_nvme_reset_controller_cb(void *cb_arg, bool success)
1215 {
1216 	struct spdk_jsonrpc_request *request = cb_arg;
1217 
1218 	spdk_jsonrpc_send_bool_response(request, success);
1219 }
1220 
1221 static void
1222 rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request,
1223 			       const struct spdk_json_val *params)
1224 {
1225 	struct rpc_bdev_nvme_reset_controller_req req = {NULL};
1226 	struct nvme_ctrlr *nvme_ctrlr;
1227 	int rc;
1228 
1229 	if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders,
1230 				    SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders),
1231 				    &req)) {
1232 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1233 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL));
1234 		goto cleanup;
1235 	}
1236 
1237 	nvme_ctrlr = nvme_ctrlr_get_by_name(req.name);
1238 	if (nvme_ctrlr == NULL) {
1239 		SPDK_ERRLOG("Failed at device lookup\n");
1240 		spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
1241 		goto cleanup;
1242 	}
1243 
1244 	rc = bdev_nvme_reset_rpc(nvme_ctrlr, _rpc_bdev_nvme_reset_controller_cb, request);
1245 	if (rc != 0) {
1246 		SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n");
1247 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc));
1248 	}
1249 
1250 cleanup:
1251 	free_rpc_bdev_nvme_reset_controller_req(&req);
1252 }
1253 SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME)
1254 
1255 struct rpc_get_controller_health_info {
1256 	char *name;
1257 };
1258 
1259 struct spdk_nvme_health_info_context {
1260 	struct spdk_jsonrpc_request *request;
1261 	struct spdk_nvme_ctrlr *ctrlr;
1262 	struct spdk_nvme_health_information_page health_page;
1263 };
1264 
1265 static void
1266 free_rpc_get_controller_health_info(struct rpc_get_controller_health_info *r)
1267 {
1268 	free(r->name);
1269 }
1270 
1271 static const struct spdk_json_object_decoder rpc_get_controller_health_info_decoders[] = {
1272 	{"name", offsetof(struct rpc_get_controller_health_info, name), spdk_json_decode_string, true},
1273 };
1274 
1275 static void nvme_health_info_cleanup(struct spdk_nvme_health_info_context *context, bool response)
1276 {
1277 	if (response == true) {
1278 		spdk_jsonrpc_send_error_response(context->request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1279 						 "Internal error.");
1280 	}
1281 
1282 	free(context);
1283 }
1284 
1285 static void
1286 get_health_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
1287 {
1288 	int i;
1289 	char buf[128];
1290 	struct spdk_nvme_health_info_context *context = cb_arg;
1291 	struct spdk_jsonrpc_request *request = context->request;
1292 	struct spdk_json_write_ctx *w;
1293 	struct spdk_nvme_ctrlr *ctrlr = context->ctrlr;
1294 	const struct spdk_nvme_transport_id *trid = NULL;
1295 	const struct spdk_nvme_ctrlr_data *cdata = NULL;
1296 	struct spdk_nvme_health_information_page *health_page = NULL;
1297 
1298 	if (spdk_nvme_cpl_is_error(cpl)) {
1299 		nvme_health_info_cleanup(context, true);
1300 		SPDK_ERRLOG("get log page failed\n");
1301 		return;
1302 	}
1303 
1304 	if (ctrlr == NULL) {
1305 		nvme_health_info_cleanup(context, true);
1306 		SPDK_ERRLOG("ctrlr is NULL\n");
1307 		return;
1308 	} else {
1309 		trid = spdk_nvme_ctrlr_get_transport_id(ctrlr);
1310 		cdata = spdk_nvme_ctrlr_get_data(ctrlr);
1311 		health_page = &(context->health_page);
1312 	}
1313 
1314 	w = spdk_jsonrpc_begin_result(request);
1315 
1316 	spdk_json_write_object_begin(w);
1317 	snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn);
1318 	spdk_str_trim(buf);
1319 	spdk_json_write_named_string(w, "model_number", buf);
1320 	snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn);
1321 	spdk_str_trim(buf);
1322 	spdk_json_write_named_string(w, "serial_number", buf);
1323 	snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr);
1324 	spdk_str_trim(buf);
1325 	spdk_json_write_named_string(w, "firmware_revision", buf);
1326 	spdk_json_write_named_string(w, "traddr", trid->traddr);
1327 	spdk_json_write_named_uint64(w, "temperature_celsius", health_page->temperature - 273);
1328 	spdk_json_write_named_uint64(w, "available_spare_percentage", health_page->available_spare);
1329 	spdk_json_write_named_uint64(w, "available_spare_threshold_percentage",
1330 				     health_page->available_spare_threshold);
1331 	spdk_json_write_named_uint64(w, "percentage_used", health_page->percentage_used);
1332 	spdk_json_write_named_uint128(w, "data_units_read",
1333 				      health_page->data_units_read[0], health_page->data_units_read[1]);
1334 	spdk_json_write_named_uint128(w, "data_units_written",
1335 				      health_page->data_units_written[0], health_page->data_units_written[1]);
1336 	spdk_json_write_named_uint128(w, "host_read_commands",
1337 				      health_page->host_read_commands[0], health_page->host_read_commands[1]);
1338 	spdk_json_write_named_uint128(w, "host_write_commands",
1339 				      health_page->host_write_commands[0], health_page->host_write_commands[1]);
1340 	spdk_json_write_named_uint128(w, "controller_busy_time",
1341 				      health_page->controller_busy_time[0], health_page->controller_busy_time[1]);
1342 	spdk_json_write_named_uint128(w, "power_cycles",
1343 				      health_page->power_cycles[0], health_page->power_cycles[1]);
1344 	spdk_json_write_named_uint128(w, "power_on_hours",
1345 				      health_page->power_on_hours[0], health_page->power_on_hours[1]);
1346 	spdk_json_write_named_uint128(w, "unsafe_shutdowns",
1347 				      health_page->unsafe_shutdowns[0], health_page->unsafe_shutdowns[1]);
1348 	spdk_json_write_named_uint128(w, "media_errors",
1349 				      health_page->media_errors[0], health_page->media_errors[1]);
1350 	spdk_json_write_named_uint128(w, "num_err_log_entries",
1351 				      health_page->num_error_info_log_entries[0], health_page->num_error_info_log_entries[1]);
1352 	spdk_json_write_named_uint64(w, "warning_temperature_time_minutes", health_page->warning_temp_time);
1353 	spdk_json_write_named_uint64(w, "critical_composite_temperature_time_minutes",
1354 				     health_page->critical_temp_time);
1355 	for (i = 0; i < 8; i++) {
1356 		if (health_page->temp_sensor[i] != 0) {
1357 			spdk_json_write_named_uint64(w, "temperature_sensor_celsius", health_page->temp_sensor[i] - 273);
1358 		}
1359 	}
1360 	spdk_json_write_object_end(w);
1361 
1362 	spdk_jsonrpc_end_result(request, w);
1363 	nvme_health_info_cleanup(context, false);
1364 }
1365 
1366 static void
1367 get_health_log_page(struct spdk_nvme_health_info_context *context)
1368 {
1369 	struct spdk_nvme_ctrlr *ctrlr = context->ctrlr;
1370 
1371 	if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION,
1372 					     SPDK_NVME_GLOBAL_NS_TAG,
1373 					     &(context->health_page), sizeof(context->health_page), 0,
1374 					     get_health_log_page_completion, context)) {
1375 		nvme_health_info_cleanup(context, true);
1376 		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page() failed\n");
1377 	}
1378 }
1379 
1380 static void
1381 get_temperature_threshold_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
1382 {
1383 	struct spdk_nvme_health_info_context *context = cb_arg;
1384 
1385 	if (spdk_nvme_cpl_is_error(cpl)) {
1386 		nvme_health_info_cleanup(context, true);
1387 		SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed in completion\n");
1388 	} else {
1389 		get_health_log_page(context);
1390 	}
1391 }
1392 
1393 static int
1394 get_temperature_threshold_feature(struct spdk_nvme_health_info_context *context)
1395 {
1396 	struct spdk_nvme_cmd cmd = {};
1397 
1398 	cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1399 	cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1400 
1401 	return spdk_nvme_ctrlr_cmd_admin_raw(context->ctrlr, &cmd, NULL, 0,
1402 					     get_temperature_threshold_feature_completion, context);
1403 }
1404 
1405 static void
1406 get_controller_health_info(struct spdk_jsonrpc_request *request, struct spdk_nvme_ctrlr *ctrlr)
1407 {
1408 	struct spdk_nvme_health_info_context *context;
1409 
1410 	context = calloc(1, sizeof(struct spdk_nvme_health_info_context));
1411 	if (!context) {
1412 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1413 						 "Memory allocation error.");
1414 		return;
1415 	}
1416 
1417 	context->request = request;
1418 	context->ctrlr = ctrlr;
1419 
1420 	if (get_temperature_threshold_feature(context)) {
1421 		nvme_health_info_cleanup(context, true);
1422 		SPDK_ERRLOG("feature SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD failed to submit\n");
1423 	}
1424 
1425 	return;
1426 }
1427 
1428 static void
1429 rpc_bdev_nvme_get_controller_health_info(struct spdk_jsonrpc_request *request,
1430 		const struct spdk_json_val *params)
1431 {
1432 	struct rpc_get_controller_health_info req = {};
1433 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1434 
1435 	if (!params) {
1436 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1437 						 "Missing device name");
1438 
1439 		return;
1440 	}
1441 	if (spdk_json_decode_object(params, rpc_get_controller_health_info_decoders,
1442 				    SPDK_COUNTOF(rpc_get_controller_health_info_decoders), &req)) {
1443 		SPDK_ERRLOG("spdk_json_decode_object failed\n");
1444 		free_rpc_get_controller_health_info(&req);
1445 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1446 						 "Invalid parameters");
1447 
1448 		return;
1449 	}
1450 
1451 	nvme_ctrlr = nvme_ctrlr_get_by_name(req.name);
1452 
1453 	if (!nvme_ctrlr) {
1454 		SPDK_ERRLOG("nvme ctrlr name '%s' does not exist\n", req.name);
1455 		free_rpc_get_controller_health_info(&req);
1456 		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
1457 						 "Device not found");
1458 		return;
1459 	}
1460 
1461 	get_controller_health_info(request, nvme_ctrlr->ctrlr);
1462 	free_rpc_get_controller_health_info(&req);
1463 
1464 	return;
1465 }
1466 SPDK_RPC_REGISTER("bdev_nvme_get_controller_health_info",
1467 		  rpc_bdev_nvme_get_controller_health_info, SPDK_RPC_RUNTIME)
1468