xref: /spdk/module/event/subsystems/nvmf/nvmf_tgt.c (revision 16d862d0380886f6fc765f68a87e240bb4295595)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "event_nvmf.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/thread.h"
11 #include "spdk/log.h"
12 #include "spdk/nvme.h"
13 #include "spdk/nvmf_cmd.h"
14 #include "spdk_internal/usdt.h"
15 
16 enum nvmf_tgt_state {
17 	NVMF_TGT_INIT_NONE = 0,
18 	NVMF_TGT_INIT_CREATE_TARGET,
19 	NVMF_TGT_INIT_CREATE_POLL_GROUPS,
20 	NVMF_TGT_INIT_START_SUBSYSTEMS,
21 	NVMF_TGT_RUNNING,
22 	NVMF_TGT_FINI_STOP_LISTEN,
23 	NVMF_TGT_FINI_STOP_SUBSYSTEMS,
24 	NVMF_TGT_FINI_DESTROY_SUBSYSTEMS,
25 	NVMF_TGT_FINI_DESTROY_POLL_GROUPS,
26 	NVMF_TGT_FINI_DESTROY_TARGET,
27 	NVMF_TGT_STOPPED,
28 	NVMF_TGT_ERROR,
29 };
30 
31 struct nvmf_tgt_poll_group {
32 	struct spdk_nvmf_poll_group		*group;
33 	struct spdk_thread			*thread;
34 	TAILQ_ENTRY(nvmf_tgt_poll_group)	link;
35 };
36 
37 struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
38 	.opts = {
39 		.size = SPDK_SIZEOF(&g_spdk_nvmf_tgt_conf.opts, dhchap_dhgroups),
40 		.name = "nvmf_tgt",
41 		.max_subsystems = 0,
42 		.crdt = { 0, 0, 0 },
43 		.discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
44 		.dhchap_digests = UINT32_MAX,
45 		.dhchap_dhgroups = UINT32_MAX,
46 	},
47 	.admin_passthru.identify_ctrlr = false
48 };
49 
50 struct spdk_cpuset *g_poll_groups_mask = NULL;
51 struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
52 
53 static enum nvmf_tgt_state g_tgt_state;
54 
55 static struct spdk_thread *g_tgt_init_thread = NULL;
56 static struct spdk_thread *g_tgt_fini_thread = NULL;
57 
58 static TAILQ_HEAD(, nvmf_tgt_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
59 static size_t g_num_poll_groups = 0;
60 
61 static void nvmf_tgt_advance_state(void);
62 
63 static void
64 nvmf_shutdown_cb(void *arg1)
65 {
66 	/* Still in initialization state, defer shutdown operation */
67 	if (g_tgt_state < NVMF_TGT_RUNNING) {
68 		spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb, NULL);
69 		return;
70 	} else if (g_tgt_state != NVMF_TGT_RUNNING && g_tgt_state != NVMF_TGT_ERROR) {
71 		/* Already in Shutdown status, ignore the signal */
72 		return;
73 	}
74 
75 	if (g_tgt_state == NVMF_TGT_ERROR) {
76 		/* Parse configuration error */
77 		g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
78 	} else {
79 		g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
80 	}
81 	nvmf_tgt_advance_state();
82 }
83 
84 static void
85 nvmf_subsystem_fini(void)
86 {
87 	nvmf_shutdown_cb(NULL);
88 }
89 
90 static void
91 _nvmf_tgt_destroy_poll_group_done(void *ctx)
92 {
93 	assert(g_num_poll_groups > 0);
94 
95 	if (--g_num_poll_groups == 0) {
96 		g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
97 		nvmf_tgt_advance_state();
98 	}
99 }
100 
101 static void
102 nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
103 {
104 	struct nvmf_tgt_poll_group *pg = cb_arg;
105 
106 	free(pg);
107 
108 	spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
109 
110 	spdk_thread_exit(spdk_get_thread());
111 }
112 
113 static void
114 nvmf_tgt_destroy_poll_group(void *ctx)
115 {
116 	struct nvmf_tgt_poll_group *pg = ctx;
117 
118 	spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
119 }
120 
121 static void
122 nvmf_tgt_destroy_poll_groups(void)
123 {
124 	struct nvmf_tgt_poll_group *pg, *tpg;
125 
126 	g_tgt_fini_thread = spdk_get_thread();
127 	assert(g_tgt_fini_thread != NULL);
128 
129 	TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
130 		TAILQ_REMOVE(&g_poll_groups, pg, link);
131 		spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
132 	}
133 }
134 
135 static uint32_t
136 nvmf_get_cpuset_count(void)
137 {
138 	if (g_poll_groups_mask) {
139 		return spdk_cpuset_count(g_poll_groups_mask);
140 	} else {
141 		return spdk_env_get_core_count();
142 	}
143 }
144 
145 static void
146 nvmf_tgt_create_poll_group_done(void *ctx)
147 {
148 	struct nvmf_tgt_poll_group *pg = ctx;
149 
150 	assert(pg);
151 
152 	if (!pg->group) {
153 		SPDK_ERRLOG("Failed to create nvmf poll group\n");
154 		/* Change the state to error but wait for completions from all other threads */
155 		g_tgt_state = NVMF_TGT_ERROR;
156 	}
157 
158 	TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
159 
160 	assert(g_num_poll_groups < nvmf_get_cpuset_count());
161 
162 	if (++g_num_poll_groups == nvmf_get_cpuset_count()) {
163 		if (g_tgt_state != NVMF_TGT_ERROR) {
164 			g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
165 		}
166 		nvmf_tgt_advance_state();
167 	}
168 }
169 
170 static void
171 nvmf_tgt_create_poll_group(void *ctx)
172 {
173 	struct nvmf_tgt_poll_group *pg;
174 
175 	pg = calloc(1, sizeof(*pg));
176 	if (!pg) {
177 		SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
178 		g_tgt_state = NVMF_TGT_ERROR;
179 		nvmf_tgt_advance_state();
180 		return;
181 	}
182 
183 	pg->thread = spdk_get_thread();
184 	pg->group = spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt);
185 
186 	spdk_thread_send_msg(g_tgt_init_thread, nvmf_tgt_create_poll_group_done, pg);
187 }
188 
189 static void
190 nvmf_tgt_create_poll_groups(void)
191 {
192 	uint32_t cpu, count = 0;
193 	char thread_name[32];
194 	struct spdk_thread *thread;
195 
196 	g_tgt_init_thread = spdk_get_thread();
197 	assert(g_tgt_init_thread != NULL);
198 
199 	SPDK_ENV_FOREACH_CORE(cpu) {
200 		if (g_poll_groups_mask && !spdk_cpuset_get_cpu(g_poll_groups_mask, cpu)) {
201 			continue;
202 		}
203 		snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%03u", count++);
204 
205 		thread = spdk_thread_create(thread_name, g_poll_groups_mask);
206 		assert(thread != NULL);
207 
208 		spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);
209 	}
210 }
211 
212 static void
213 nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
214 			   void *cb_arg, int status)
215 {
216 	subsystem = spdk_nvmf_subsystem_get_next(subsystem);
217 	int rc;
218 
219 	if (subsystem) {
220 		rc = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
221 		if (rc) {
222 			g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
223 			SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
224 			nvmf_tgt_advance_state();
225 		}
226 		return;
227 	}
228 
229 	g_tgt_state = NVMF_TGT_RUNNING;
230 	nvmf_tgt_advance_state();
231 }
232 
233 static void
234 nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
235 			   void *cb_arg, int status)
236 {
237 	subsystem = spdk_nvmf_subsystem_get_next(subsystem);
238 	int rc;
239 
240 	if (subsystem) {
241 		rc = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
242 		if (rc) {
243 			SPDK_ERRLOG("Unable to stop NVMe-oF subsystem %s with rc %d, Trying others.\n",
244 				    spdk_nvmf_subsystem_get_nqn(subsystem), rc);
245 			nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
246 		}
247 		return;
248 	}
249 
250 	g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
251 	nvmf_tgt_advance_state();
252 }
253 
254 static void
255 nvmf_tgt_stop_listen(void)
256 {
257 	struct spdk_nvmf_subsystem *subsystem;
258 	struct spdk_nvmf_subsystem_listener *listener;
259 	const struct spdk_nvme_transport_id *trid;
260 	struct spdk_nvmf_transport *transport;
261 	int rc;
262 
263 	for (subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
264 	     subsystem != NULL;
265 	     subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
266 		for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem);
267 		     listener != NULL;
268 		     listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
269 			trid = spdk_nvmf_subsystem_listener_get_trid(listener);
270 			transport = spdk_nvmf_tgt_get_transport(g_spdk_nvmf_tgt, trid->trstring);
271 			rc = spdk_nvmf_transport_stop_listen(transport, trid);
272 			if (rc != 0) {
273 				SPDK_ERRLOG("Unable to stop subsystem %s listener %s:%s, rc %d. Trying others.\n",
274 					    spdk_nvmf_subsystem_get_nqn(subsystem), trid->traddr, trid->trsvcid, rc);
275 				continue;
276 			}
277 		}
278 	}
279 
280 	g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
281 }
282 
283 static void
284 _nvmf_tgt_subsystem_destroy(void *cb_arg)
285 {
286 	struct spdk_nvmf_subsystem *subsystem, *next_subsystem;
287 	int rc;
288 
289 	subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
290 
291 	while (subsystem != NULL) {
292 		next_subsystem = spdk_nvmf_subsystem_get_next(subsystem);
293 		rc = spdk_nvmf_subsystem_destroy(subsystem, _nvmf_tgt_subsystem_destroy, NULL);
294 		if (rc) {
295 			if (rc == -EINPROGRESS) {
296 				/* If ret is -EINPROGRESS, nvmf_tgt_subsystem_destroyed will be called when subsystem
297 				 * is destroyed, _nvmf_tgt_subsystem_destroy will continue to destroy other subsystems if any */
298 				return;
299 			} else {
300 				SPDK_ERRLOG("Unable to destroy subsystem %s, rc %d. Trying others.\n",
301 					    spdk_nvmf_subsystem_get_nqn(subsystem), rc);
302 			}
303 		}
304 		subsystem = next_subsystem;
305 	}
306 
307 	g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
308 	nvmf_tgt_advance_state();
309 }
310 
311 static void
312 nvmf_tgt_destroy_done(void *ctx, int status)
313 {
314 	g_tgt_state = NVMF_TGT_STOPPED;
315 
316 	nvmf_tgt_advance_state();
317 }
318 
319 static int
320 nvmf_add_discovery_subsystem(void)
321 {
322 	struct spdk_nvmf_subsystem *subsystem;
323 
324 	subsystem = spdk_nvmf_subsystem_create(g_spdk_nvmf_tgt, SPDK_NVMF_DISCOVERY_NQN,
325 					       SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0);
326 	if (subsystem == NULL) {
327 		SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
328 		return -1;
329 	}
330 
331 	spdk_nvmf_subsystem_set_allow_any_host(subsystem, true);
332 
333 	return 0;
334 }
335 
336 static int
337 nvmf_tgt_create_target(void)
338 {
339 	g_spdk_nvmf_tgt = spdk_nvmf_tgt_create(&g_spdk_nvmf_tgt_conf.opts);
340 	if (!g_spdk_nvmf_tgt) {
341 		SPDK_ERRLOG("spdk_nvmf_tgt_create() failed\n");
342 		return -1;
343 	}
344 
345 	if (nvmf_add_discovery_subsystem() != 0) {
346 		SPDK_ERRLOG("nvmf_add_discovery_subsystem failed\n");
347 		return -1;
348 	}
349 
350 	return 0;
351 }
352 
353 static void
354 fixup_identify_ctrlr(struct spdk_nvmf_request *req)
355 {
356 	struct spdk_nvme_ctrlr_data nvme_cdata = {};
357 	struct spdk_nvme_ctrlr_data nvmf_cdata = {};
358 	struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
359 	struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
360 	size_t datalen;
361 	int rc;
362 
363 	/* This is the identify data from the NVMe drive */
364 	datalen = spdk_nvmf_request_copy_to_buf(req, &nvme_cdata,
365 						sizeof(nvme_cdata));
366 
367 	/* Get the NVMF identify data */
368 	rc = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, &nvmf_cdata);
369 	if (rc != SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
370 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
371 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
372 		return;
373 	}
374 
375 	/* Fixup NVMF identify data with NVMe identify data */
376 
377 	/* Serial Number (SN) */
378 	memcpy(&nvmf_cdata.sn[0], &nvme_cdata.sn[0], sizeof(nvmf_cdata.sn));
379 	/* Model Number (MN) */
380 	memcpy(&nvmf_cdata.mn[0], &nvme_cdata.mn[0], sizeof(nvmf_cdata.mn));
381 	/* Firmware Revision (FR) */
382 	memcpy(&nvmf_cdata.fr[0], &nvme_cdata.fr[0], sizeof(nvmf_cdata.fr));
383 	/* IEEE OUI Identifier (IEEE) */
384 	memcpy(&nvmf_cdata.ieee[0], &nvme_cdata.ieee[0], sizeof(nvmf_cdata.ieee));
385 	/* FRU Globally Unique Identifier (FGUID) */
386 
387 	/* Copy the fixed up data back to the response */
388 	spdk_nvmf_request_copy_from_buf(req, &nvmf_cdata, datalen);
389 }
390 
391 static int
392 nvmf_custom_identify_hdlr(struct spdk_nvmf_request *req)
393 {
394 	struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
395 	struct spdk_bdev *bdev;
396 	struct spdk_bdev_desc *desc;
397 	struct spdk_io_channel *ch;
398 	struct spdk_nvmf_subsystem *subsys;
399 	int rc;
400 
401 	if (cmd->cdw10_bits.identify.cns != SPDK_NVME_IDENTIFY_CTRLR) {
402 		return -1; /* continue */
403 	}
404 
405 	subsys = spdk_nvmf_request_get_subsystem(req);
406 	if (subsys == NULL) {
407 		return -1;
408 	}
409 
410 	/* Only procss this request if it has exactly one namespace */
411 	if (spdk_nvmf_subsystem_get_max_nsid(subsys) != 1) {
412 		return -1;
413 	}
414 
415 	/* Forward to first namespace if it supports NVME admin commands */
416 	rc = spdk_nvmf_request_get_bdev(1, req, &bdev, &desc, &ch);
417 	if (rc) {
418 		/* No bdev found for this namespace. Continue. */
419 		return -1;
420 	}
421 
422 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
423 		return -1;
424 	}
425 
426 	return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, fixup_identify_ctrlr);
427 }
428 
429 static void
430 nvmf_tgt_advance_state(void)
431 {
432 	enum nvmf_tgt_state prev_state;
433 	int rc = -1;
434 	int ret;
435 
436 	do {
437 		SPDK_DTRACE_PROBE1(nvmf_tgt_state, g_tgt_state);
438 		prev_state = g_tgt_state;
439 
440 		switch (g_tgt_state) {
441 		case NVMF_TGT_INIT_NONE: {
442 			g_tgt_state = NVMF_TGT_INIT_CREATE_TARGET;
443 			break;
444 		}
445 		case NVMF_TGT_INIT_CREATE_TARGET:
446 			ret = nvmf_tgt_create_target();
447 			g_tgt_state = (ret == 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS : NVMF_TGT_ERROR;
448 			break;
449 		case NVMF_TGT_INIT_CREATE_POLL_GROUPS:
450 			if (g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr) {
451 				SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
452 				spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY, nvmf_custom_identify_hdlr);
453 			}
454 			/* Create poll group threads, and send a message to each thread
455 			 * and create a poll group.
456 			 */
457 			nvmf_tgt_create_poll_groups();
458 			break;
459 		case NVMF_TGT_INIT_START_SUBSYSTEMS: {
460 			struct spdk_nvmf_subsystem *subsystem;
461 
462 			subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
463 
464 			if (subsystem) {
465 				ret = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
466 				if (ret) {
467 					SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
468 					g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
469 				}
470 			} else {
471 				g_tgt_state = NVMF_TGT_RUNNING;
472 			}
473 			break;
474 		}
475 		case NVMF_TGT_RUNNING:
476 			spdk_subsystem_init_next(0);
477 			break;
478 		case NVMF_TGT_FINI_STOP_LISTEN:
479 			nvmf_tgt_stop_listen();
480 			break;
481 		case NVMF_TGT_FINI_STOP_SUBSYSTEMS: {
482 			struct spdk_nvmf_subsystem *subsystem;
483 
484 			subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
485 
486 			if (subsystem) {
487 				ret = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
488 				if (ret) {
489 					nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
490 				}
491 			} else {
492 				g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
493 			}
494 			break;
495 		}
496 		case NVMF_TGT_FINI_DESTROY_SUBSYSTEMS:
497 			_nvmf_tgt_subsystem_destroy(NULL);
498 			/* Function above can be asynchronous, it will call nvmf_tgt_advance_state() once done.
499 			 * So just return here */
500 			return;
501 		case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
502 			/* Send a message to each poll group thread, and terminate the thread */
503 			nvmf_tgt_destroy_poll_groups();
504 			break;
505 		case NVMF_TGT_FINI_DESTROY_TARGET:
506 			spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt, nvmf_tgt_destroy_done, NULL);
507 			break;
508 		case NVMF_TGT_STOPPED:
509 			spdk_subsystem_fini_next();
510 			return;
511 		case NVMF_TGT_ERROR:
512 			spdk_subsystem_init_next(rc);
513 			return;
514 		}
515 
516 	} while (g_tgt_state != prev_state);
517 }
518 
519 static void
520 nvmf_subsystem_init(void)
521 {
522 	g_tgt_state = NVMF_TGT_INIT_NONE;
523 	nvmf_tgt_advance_state();
524 }
525 
526 static void
527 nvmf_subsystem_dump_discover_filter(struct spdk_json_write_ctx *w)
528 {
529 	static char const *const answers[] = {
530 		"match_any",
531 		"transport",
532 		"address",
533 		"transport,address",
534 		"svcid",
535 		"transport,svcid",
536 		"address,svcid",
537 		"transport,address,svcid"
538 	};
539 
540 	if ((g_spdk_nvmf_tgt_conf.opts.discovery_filter & ~(SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_TYPE |
541 			SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_ADDRESS |
542 			SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_SVCID)) != 0) {
543 		SPDK_ERRLOG("Incorrect discovery filter %d\n", g_spdk_nvmf_tgt_conf.opts.discovery_filter);
544 		assert(0);
545 		return;
546 	}
547 
548 	spdk_json_write_named_string(w, "discovery_filter",
549 				     answers[g_spdk_nvmf_tgt_conf.opts.discovery_filter]);
550 }
551 
552 static void
553 nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
554 {
555 	spdk_json_write_array_begin(w);
556 
557 	spdk_json_write_object_begin(w);
558 	spdk_json_write_named_string(w, "method", "nvmf_set_config");
559 
560 	spdk_json_write_named_object_begin(w, "params");
561 	nvmf_subsystem_dump_discover_filter(w);
562 	spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
563 	spdk_json_write_named_bool(w, "identify_ctrlr",
564 				   g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr);
565 	spdk_json_write_object_end(w);
566 	if (g_poll_groups_mask) {
567 		spdk_json_write_named_string(w, "poll_groups_mask", spdk_cpuset_fmt(g_poll_groups_mask));
568 	}
569 	spdk_json_write_object_end(w);
570 	spdk_json_write_object_end(w);
571 
572 	spdk_nvmf_tgt_write_config_json(w, g_spdk_nvmf_tgt);
573 	spdk_json_write_array_end(w);
574 }
575 
576 static struct spdk_subsystem g_spdk_subsystem_nvmf = {
577 	.name = "nvmf",
578 	.init = nvmf_subsystem_init,
579 	.fini = nvmf_subsystem_fini,
580 	.write_config_json = nvmf_subsystem_write_config_json,
581 };
582 
583 SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf)
584 SPDK_SUBSYSTEM_DEPEND(nvmf, bdev)
585 SPDK_SUBSYSTEM_DEPEND(nvmf, keyring)
586 SPDK_SUBSYSTEM_DEPEND(nvmf, sock)
587