xref: /spdk/module/event/subsystems/nvmf/nvmf_tgt.c (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "event_nvmf.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/thread.h"
11 #include "spdk/log.h"
12 #include "spdk/nvme.h"
13 #include "spdk/nvmf_cmd.h"
14 #include "spdk_internal/usdt.h"
15 
16 enum nvmf_tgt_state {
17 	NVMF_TGT_INIT_NONE = 0,
18 	NVMF_TGT_INIT_CREATE_TARGET,
19 	NVMF_TGT_INIT_CREATE_POLL_GROUPS,
20 	NVMF_TGT_INIT_START_SUBSYSTEMS,
21 	NVMF_TGT_RUNNING,
22 	NVMF_TGT_FINI_STOP_LISTEN,
23 	NVMF_TGT_FINI_STOP_SUBSYSTEMS,
24 	NVMF_TGT_FINI_DESTROY_SUBSYSTEMS,
25 	NVMF_TGT_FINI_DESTROY_POLL_GROUPS,
26 	NVMF_TGT_FINI_DESTROY_TARGET,
27 	NVMF_TGT_STOPPED,
28 	NVMF_TGT_ERROR,
29 };
30 
31 struct nvmf_tgt_poll_group {
32 	struct spdk_nvmf_poll_group		*group;
33 	struct spdk_thread			*thread;
34 	TAILQ_ENTRY(nvmf_tgt_poll_group)	link;
35 };
36 
37 #define NVMF_TGT_DEFAULT_DIGESTS (SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) | \
38 				  SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) | \
39 				  SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512))
40 
41 #define NVMF_TGT_DEFAULT_DHGROUPS (SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) | \
42 				   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) | \
43 				   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) | \
44 				   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) | \
45 				   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) | \
46 				   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192))
47 
48 struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
49 	.opts = {
50 		.size = SPDK_SIZEOF(&g_spdk_nvmf_tgt_conf.opts, dhchap_dhgroups),
51 		.name = "nvmf_tgt",
52 		.max_subsystems = 0,
53 		.crdt = { 0, 0, 0 },
54 		.discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
55 		.dhchap_digests = NVMF_TGT_DEFAULT_DIGESTS,
56 		.dhchap_dhgroups = NVMF_TGT_DEFAULT_DHGROUPS,
57 	},
58 	.admin_passthru.identify_ctrlr = false
59 };
60 
61 struct spdk_cpuset *g_poll_groups_mask = NULL;
62 struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
63 
64 static enum nvmf_tgt_state g_tgt_state;
65 
66 static struct spdk_thread *g_tgt_init_thread = NULL;
67 static struct spdk_thread *g_tgt_fini_thread = NULL;
68 
69 static TAILQ_HEAD(, nvmf_tgt_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
70 static size_t g_num_poll_groups = 0;
71 
72 static void nvmf_tgt_advance_state(void);
73 
74 static void
75 nvmf_shutdown_cb(void *arg1)
76 {
77 	/* Still in initialization state, defer shutdown operation */
78 	if (g_tgt_state < NVMF_TGT_RUNNING) {
79 		spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb, NULL);
80 		return;
81 	} else if (g_tgt_state != NVMF_TGT_RUNNING && g_tgt_state != NVMF_TGT_ERROR) {
82 		/* Already in Shutdown status, ignore the signal */
83 		return;
84 	}
85 
86 	if (g_tgt_state == NVMF_TGT_ERROR) {
87 		/* Parse configuration error */
88 		g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
89 	} else {
90 		g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
91 	}
92 	nvmf_tgt_advance_state();
93 }
94 
95 static void
96 nvmf_subsystem_fini(void)
97 {
98 	nvmf_shutdown_cb(NULL);
99 }
100 
101 static void
102 _nvmf_tgt_destroy_poll_group_done(void *ctx)
103 {
104 	assert(g_num_poll_groups > 0);
105 
106 	if (--g_num_poll_groups == 0) {
107 		g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
108 		nvmf_tgt_advance_state();
109 	}
110 }
111 
112 static void
113 nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
114 {
115 	struct nvmf_tgt_poll_group *pg = cb_arg;
116 
117 	free(pg);
118 
119 	spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
120 
121 	spdk_thread_exit(spdk_get_thread());
122 }
123 
124 static void
125 nvmf_tgt_destroy_poll_group(void *ctx)
126 {
127 	struct nvmf_tgt_poll_group *pg = ctx;
128 
129 	spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
130 }
131 
132 static void
133 nvmf_tgt_destroy_poll_groups(void)
134 {
135 	struct nvmf_tgt_poll_group *pg, *tpg;
136 
137 	g_tgt_fini_thread = spdk_get_thread();
138 	assert(g_tgt_fini_thread != NULL);
139 
140 	TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
141 		TAILQ_REMOVE(&g_poll_groups, pg, link);
142 		spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
143 	}
144 }
145 
146 static uint32_t
147 nvmf_get_cpuset_count(void)
148 {
149 	if (g_poll_groups_mask) {
150 		return spdk_cpuset_count(g_poll_groups_mask);
151 	} else {
152 		return spdk_env_get_core_count();
153 	}
154 }
155 
156 static void
157 nvmf_tgt_create_poll_group_done(void *ctx)
158 {
159 	struct nvmf_tgt_poll_group *pg = ctx;
160 
161 	assert(pg);
162 
163 	if (!pg->group) {
164 		SPDK_ERRLOG("Failed to create nvmf poll group\n");
165 		/* Change the state to error but wait for completions from all other threads */
166 		g_tgt_state = NVMF_TGT_ERROR;
167 	}
168 
169 	TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
170 
171 	assert(g_num_poll_groups < nvmf_get_cpuset_count());
172 
173 	if (++g_num_poll_groups == nvmf_get_cpuset_count()) {
174 		if (g_tgt_state != NVMF_TGT_ERROR) {
175 			g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
176 		}
177 		nvmf_tgt_advance_state();
178 	}
179 }
180 
181 static void
182 nvmf_tgt_create_poll_group(void *ctx)
183 {
184 	struct nvmf_tgt_poll_group *pg;
185 
186 	pg = calloc(1, sizeof(*pg));
187 	if (!pg) {
188 		SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
189 		g_tgt_state = NVMF_TGT_ERROR;
190 		nvmf_tgt_advance_state();
191 		return;
192 	}
193 
194 	pg->thread = spdk_get_thread();
195 	pg->group = spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt);
196 
197 	spdk_thread_send_msg(g_tgt_init_thread, nvmf_tgt_create_poll_group_done, pg);
198 }
199 
200 static void
201 nvmf_tgt_create_poll_groups(void)
202 {
203 	uint32_t cpu, count = 0;
204 	char thread_name[32];
205 	struct spdk_thread *thread;
206 
207 	g_tgt_init_thread = spdk_get_thread();
208 	assert(g_tgt_init_thread != NULL);
209 
210 	SPDK_ENV_FOREACH_CORE(cpu) {
211 		if (g_poll_groups_mask && !spdk_cpuset_get_cpu(g_poll_groups_mask, cpu)) {
212 			continue;
213 		}
214 		snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%03u", count++);
215 
216 		thread = spdk_thread_create(thread_name, g_poll_groups_mask);
217 		assert(thread != NULL);
218 
219 		spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);
220 	}
221 }
222 
223 static void
224 nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
225 			   void *cb_arg, int status)
226 {
227 	subsystem = spdk_nvmf_subsystem_get_next(subsystem);
228 	int rc;
229 
230 	if (subsystem) {
231 		rc = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
232 		if (rc) {
233 			g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
234 			SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
235 			nvmf_tgt_advance_state();
236 		}
237 		return;
238 	}
239 
240 	g_tgt_state = NVMF_TGT_RUNNING;
241 	nvmf_tgt_advance_state();
242 }
243 
244 static void
245 nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
246 			   void *cb_arg, int status)
247 {
248 	subsystem = spdk_nvmf_subsystem_get_next(subsystem);
249 	int rc;
250 
251 	if (subsystem) {
252 		rc = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
253 		if (rc) {
254 			SPDK_ERRLOG("Unable to stop NVMe-oF subsystem %s with rc %d, Trying others.\n",
255 				    spdk_nvmf_subsystem_get_nqn(subsystem), rc);
256 			nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
257 		}
258 		return;
259 	}
260 
261 	g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
262 	nvmf_tgt_advance_state();
263 }
264 
265 static void
266 nvmf_tgt_stop_listen(void)
267 {
268 	struct spdk_nvmf_subsystem *subsystem;
269 	struct spdk_nvmf_subsystem_listener *listener;
270 	const struct spdk_nvme_transport_id *trid;
271 	struct spdk_nvmf_transport *transport;
272 	int rc;
273 
274 	for (subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
275 	     subsystem != NULL;
276 	     subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
277 		for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem);
278 		     listener != NULL;
279 		     listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
280 			trid = spdk_nvmf_subsystem_listener_get_trid(listener);
281 			transport = spdk_nvmf_tgt_get_transport(g_spdk_nvmf_tgt, trid->trstring);
282 			rc = spdk_nvmf_transport_stop_listen(transport, trid);
283 			if (rc != 0) {
284 				SPDK_ERRLOG("Unable to stop subsystem %s listener %s:%s, rc %d. Trying others.\n",
285 					    spdk_nvmf_subsystem_get_nqn(subsystem), trid->traddr, trid->trsvcid, rc);
286 				continue;
287 			}
288 		}
289 	}
290 
291 	g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
292 }
293 
294 static void
295 _nvmf_tgt_subsystem_destroy(void *cb_arg)
296 {
297 	struct spdk_nvmf_subsystem *subsystem, *next_subsystem;
298 	int rc;
299 
300 	subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
301 
302 	while (subsystem != NULL) {
303 		next_subsystem = spdk_nvmf_subsystem_get_next(subsystem);
304 		rc = spdk_nvmf_subsystem_destroy(subsystem, _nvmf_tgt_subsystem_destroy, NULL);
305 		if (rc) {
306 			if (rc == -EINPROGRESS) {
307 				/* If ret is -EINPROGRESS, nvmf_tgt_subsystem_destroyed will be called when subsystem
308 				 * is destroyed, _nvmf_tgt_subsystem_destroy will continue to destroy other subsystems if any */
309 				return;
310 			} else {
311 				SPDK_ERRLOG("Unable to destroy subsystem %s, rc %d. Trying others.\n",
312 					    spdk_nvmf_subsystem_get_nqn(subsystem), rc);
313 			}
314 		}
315 		subsystem = next_subsystem;
316 	}
317 
318 	g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
319 	nvmf_tgt_advance_state();
320 }
321 
322 static void
323 nvmf_tgt_destroy_done(void *ctx, int status)
324 {
325 	g_tgt_state = NVMF_TGT_STOPPED;
326 
327 	nvmf_tgt_advance_state();
328 }
329 
330 static int
331 nvmf_add_discovery_subsystem(void)
332 {
333 	struct spdk_nvmf_subsystem *subsystem;
334 
335 	subsystem = spdk_nvmf_subsystem_create(g_spdk_nvmf_tgt, SPDK_NVMF_DISCOVERY_NQN,
336 					       SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0);
337 	if (subsystem == NULL) {
338 		SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
339 		return -1;
340 	}
341 
342 	spdk_nvmf_subsystem_set_allow_any_host(subsystem, true);
343 
344 	return 0;
345 }
346 
347 static int
348 nvmf_tgt_create_target(void)
349 {
350 	g_spdk_nvmf_tgt = spdk_nvmf_tgt_create(&g_spdk_nvmf_tgt_conf.opts);
351 	if (!g_spdk_nvmf_tgt) {
352 		SPDK_ERRLOG("spdk_nvmf_tgt_create() failed\n");
353 		return -1;
354 	}
355 
356 	if (nvmf_add_discovery_subsystem() != 0) {
357 		SPDK_ERRLOG("nvmf_add_discovery_subsystem failed\n");
358 		return -1;
359 	}
360 
361 	return 0;
362 }
363 
364 static void
365 fixup_identify_ctrlr(struct spdk_nvmf_request *req)
366 {
367 	struct spdk_nvme_ctrlr_data nvme_cdata = {};
368 	struct spdk_nvme_ctrlr_data nvmf_cdata = {};
369 	struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
370 	struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
371 	size_t datalen;
372 	int rc;
373 
374 	/* This is the identify data from the NVMe drive */
375 	datalen = spdk_nvmf_request_copy_to_buf(req, &nvme_cdata,
376 						sizeof(nvme_cdata));
377 
378 	/* Get the NVMF identify data */
379 	rc = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, &nvmf_cdata);
380 	if (rc != SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
381 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
382 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
383 		return;
384 	}
385 
386 	/* Fixup NVMF identify data with NVMe identify data */
387 
388 	/* Serial Number (SN) */
389 	memcpy(&nvmf_cdata.sn[0], &nvme_cdata.sn[0], sizeof(nvmf_cdata.sn));
390 	/* Model Number (MN) */
391 	memcpy(&nvmf_cdata.mn[0], &nvme_cdata.mn[0], sizeof(nvmf_cdata.mn));
392 	/* Firmware Revision (FR) */
393 	memcpy(&nvmf_cdata.fr[0], &nvme_cdata.fr[0], sizeof(nvmf_cdata.fr));
394 	/* IEEE OUI Identifier (IEEE) */
395 	memcpy(&nvmf_cdata.ieee[0], &nvme_cdata.ieee[0], sizeof(nvmf_cdata.ieee));
396 	/* FRU Globally Unique Identifier (FGUID) */
397 
398 	/* Copy the fixed up data back to the response */
399 	spdk_nvmf_request_copy_from_buf(req, &nvmf_cdata, datalen);
400 }
401 
402 static int
403 nvmf_custom_identify_hdlr(struct spdk_nvmf_request *req)
404 {
405 	struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
406 	struct spdk_bdev *bdev;
407 	struct spdk_bdev_desc *desc;
408 	struct spdk_io_channel *ch;
409 	struct spdk_nvmf_subsystem *subsys;
410 	int rc;
411 
412 	if (cmd->cdw10_bits.identify.cns != SPDK_NVME_IDENTIFY_CTRLR) {
413 		return -1; /* continue */
414 	}
415 
416 	subsys = spdk_nvmf_request_get_subsystem(req);
417 	if (subsys == NULL) {
418 		return -1;
419 	}
420 
421 	/* Only procss this request if it has exactly one namespace */
422 	if (spdk_nvmf_subsystem_get_max_nsid(subsys) != 1) {
423 		return -1;
424 	}
425 
426 	/* Forward to first namespace if it supports NVME admin commands */
427 	rc = spdk_nvmf_request_get_bdev(1, req, &bdev, &desc, &ch);
428 	if (rc) {
429 		/* No bdev found for this namespace. Continue. */
430 		return -1;
431 	}
432 
433 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
434 		return -1;
435 	}
436 
437 	return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, fixup_identify_ctrlr);
438 }
439 
440 static void
441 nvmf_tgt_advance_state(void)
442 {
443 	enum nvmf_tgt_state prev_state;
444 	int rc = -1;
445 	int ret;
446 
447 	do {
448 		SPDK_DTRACE_PROBE1(nvmf_tgt_state, g_tgt_state);
449 		prev_state = g_tgt_state;
450 
451 		switch (g_tgt_state) {
452 		case NVMF_TGT_INIT_NONE: {
453 			g_tgt_state = NVMF_TGT_INIT_CREATE_TARGET;
454 			break;
455 		}
456 		case NVMF_TGT_INIT_CREATE_TARGET:
457 			ret = nvmf_tgt_create_target();
458 			g_tgt_state = (ret == 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS : NVMF_TGT_ERROR;
459 			break;
460 		case NVMF_TGT_INIT_CREATE_POLL_GROUPS:
461 			if (g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr) {
462 				SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
463 				spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY, nvmf_custom_identify_hdlr);
464 			}
465 			/* Create poll group threads, and send a message to each thread
466 			 * and create a poll group.
467 			 */
468 			nvmf_tgt_create_poll_groups();
469 			break;
470 		case NVMF_TGT_INIT_START_SUBSYSTEMS: {
471 			struct spdk_nvmf_subsystem *subsystem;
472 
473 			subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
474 
475 			if (subsystem) {
476 				ret = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
477 				if (ret) {
478 					SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
479 					g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
480 				}
481 			} else {
482 				g_tgt_state = NVMF_TGT_RUNNING;
483 			}
484 			break;
485 		}
486 		case NVMF_TGT_RUNNING:
487 			spdk_subsystem_init_next(0);
488 			break;
489 		case NVMF_TGT_FINI_STOP_LISTEN:
490 			nvmf_tgt_stop_listen();
491 			break;
492 		case NVMF_TGT_FINI_STOP_SUBSYSTEMS: {
493 			struct spdk_nvmf_subsystem *subsystem;
494 
495 			subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
496 
497 			if (subsystem) {
498 				ret = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
499 				if (ret) {
500 					nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
501 				}
502 			} else {
503 				g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
504 			}
505 			break;
506 		}
507 		case NVMF_TGT_FINI_DESTROY_SUBSYSTEMS:
508 			_nvmf_tgt_subsystem_destroy(NULL);
509 			/* Function above can be asynchronous, it will call nvmf_tgt_advance_state() once done.
510 			 * So just return here */
511 			return;
512 		case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
513 			/* Send a message to each poll group thread, and terminate the thread */
514 			nvmf_tgt_destroy_poll_groups();
515 			break;
516 		case NVMF_TGT_FINI_DESTROY_TARGET:
517 			spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt, nvmf_tgt_destroy_done, NULL);
518 			break;
519 		case NVMF_TGT_STOPPED:
520 			spdk_subsystem_fini_next();
521 			return;
522 		case NVMF_TGT_ERROR:
523 			spdk_subsystem_init_next(rc);
524 			return;
525 		}
526 
527 	} while (g_tgt_state != prev_state);
528 }
529 
530 static void
531 nvmf_subsystem_init(void)
532 {
533 	g_tgt_state = NVMF_TGT_INIT_NONE;
534 	nvmf_tgt_advance_state();
535 }
536 
537 static void
538 nvmf_subsystem_dump_discover_filter(struct spdk_json_write_ctx *w)
539 {
540 	static char const *const answers[] = {
541 		"match_any",
542 		"transport",
543 		"address",
544 		"transport,address",
545 		"svcid",
546 		"transport,svcid",
547 		"address,svcid",
548 		"transport,address,svcid"
549 	};
550 
551 	if ((g_spdk_nvmf_tgt_conf.opts.discovery_filter & ~(SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_TYPE |
552 			SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_ADDRESS |
553 			SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_SVCID)) != 0) {
554 		SPDK_ERRLOG("Incorrect discovery filter %d\n", g_spdk_nvmf_tgt_conf.opts.discovery_filter);
555 		assert(0);
556 		return;
557 	}
558 
559 	spdk_json_write_named_string(w, "discovery_filter",
560 				     answers[g_spdk_nvmf_tgt_conf.opts.discovery_filter]);
561 }
562 
563 static void
564 nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
565 {
566 	int i;
567 
568 	spdk_json_write_array_begin(w);
569 
570 	spdk_json_write_object_begin(w);
571 	spdk_json_write_named_string(w, "method", "nvmf_set_config");
572 
573 	spdk_json_write_named_object_begin(w, "params");
574 	nvmf_subsystem_dump_discover_filter(w);
575 	spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
576 	spdk_json_write_named_bool(w, "identify_ctrlr",
577 				   g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr);
578 	spdk_json_write_object_end(w);
579 	if (g_poll_groups_mask) {
580 		spdk_json_write_named_string(w, "poll_groups_mask", spdk_cpuset_fmt(g_poll_groups_mask));
581 	}
582 	spdk_json_write_named_array_begin(w, "dhchap_digests");
583 	for (i = 0; i < 32; ++i) {
584 		if (g_spdk_nvmf_tgt_conf.opts.dhchap_digests & SPDK_BIT(i)) {
585 			spdk_json_write_string(w, spdk_nvme_dhchap_get_digest_name(i));
586 		}
587 	}
588 	spdk_json_write_array_end(w);
589 	spdk_json_write_named_array_begin(w, "dhchap_dhgroups");
590 	for (i = 0; i < 32; ++i) {
591 		if (g_spdk_nvmf_tgt_conf.opts.dhchap_dhgroups & SPDK_BIT(i)) {
592 			spdk_json_write_string(w, spdk_nvme_dhchap_get_dhgroup_name(i));
593 		}
594 	}
595 	spdk_json_write_array_end(w);
596 	spdk_json_write_object_end(w);
597 	spdk_json_write_object_end(w);
598 
599 	spdk_nvmf_tgt_write_config_json(w, g_spdk_nvmf_tgt);
600 	spdk_json_write_array_end(w);
601 }
602 
603 static struct spdk_subsystem g_spdk_subsystem_nvmf = {
604 	.name = "nvmf",
605 	.init = nvmf_subsystem_init,
606 	.fini = nvmf_subsystem_fini,
607 	.write_config_json = nvmf_subsystem_write_config_json,
608 };
609 
610 SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf)
611 SPDK_SUBSYSTEM_DEPEND(nvmf, bdev)
612 SPDK_SUBSYSTEM_DEPEND(nvmf, keyring)
613 SPDK_SUBSYSTEM_DEPEND(nvmf, sock)
614