xref: /spdk/lib/nvmf/nvmf.c (revision cc8bb51b24a156c4a0c20d3ca74066efc72c17bc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/conf.h"
38 #include "spdk/io_channel.h"
39 #include "spdk/nvmf.h"
40 #include "spdk/trace.h"
41 
42 #include "spdk_internal/log.h"
43 
44 #include "nvmf_internal.h"
45 #include "transport.h"
46 
47 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
48 
49 #define SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH 128
50 #define SPDK_NVMF_DEFAULT_MAX_QPAIRS_PER_CTRLR 64
51 #define SPDK_NVMF_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
52 #define SPDK_NVMF_DEFAULT_MAX_IO_SIZE 131072
53 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
54 
55 void
56 spdk_nvmf_tgt_opts_init(struct spdk_nvmf_tgt_opts *opts)
57 {
58 	opts->max_queue_depth = SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH;
59 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_DEFAULT_MAX_QPAIRS_PER_CTRLR;
60 	opts->in_capsule_data_size = SPDK_NVMF_DEFAULT_IN_CAPSULE_DATA_SIZE;
61 	opts->max_io_size = SPDK_NVMF_DEFAULT_MAX_IO_SIZE;
62 	opts->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS;
63 }
64 
65 static int
66 spdk_nvmf_poll_group_poll(void *ctx)
67 {
68 	struct spdk_nvmf_poll_group *group = ctx;
69 	int rc;
70 	int count = 0;
71 	struct spdk_nvmf_transport_poll_group *tgroup;
72 
73 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
74 		rc = spdk_nvmf_transport_poll_group_poll(tgroup);
75 		if (rc < 0) {
76 			return -1;
77 		}
78 		count += rc;
79 	}
80 
81 	return count;
82 }
83 
84 static int
85 spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
86 {
87 	struct spdk_nvmf_tgt *tgt = io_device;
88 	struct spdk_nvmf_poll_group *group = ctx_buf;
89 	struct spdk_nvmf_transport *transport;
90 	uint32_t sid;
91 
92 	TAILQ_INIT(&group->tgroups);
93 
94 	TAILQ_FOREACH(transport, &tgt->transports, link) {
95 		spdk_nvmf_poll_group_add_transport(group, transport);
96 	}
97 
98 	group->num_sgroups = tgt->opts.max_subsystems;
99 	group->sgroups = calloc(tgt->opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
100 	if (!group->sgroups) {
101 		return -1;
102 	}
103 
104 	for (sid = 0; sid < tgt->opts.max_subsystems; sid++) {
105 		struct spdk_nvmf_subsystem *subsystem;
106 
107 		subsystem = tgt->subsystems[sid];
108 		if (!subsystem) {
109 			continue;
110 		}
111 
112 		spdk_nvmf_poll_group_add_subsystem(group, subsystem);
113 	}
114 
115 	group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);
116 	group->thread = spdk_get_thread();
117 
118 	return 0;
119 }
120 
121 static void
122 spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
123 {
124 	struct spdk_nvmf_poll_group *group = ctx_buf;
125 	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
126 	struct spdk_nvmf_subsystem_poll_group *sgroup;
127 	uint32_t sid, nsid;
128 
129 	spdk_poller_unregister(&group->poller);
130 
131 	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
132 		TAILQ_REMOVE(&group->tgroups, tgroup, link);
133 		spdk_nvmf_transport_poll_group_destroy(tgroup);
134 	}
135 
136 	for (sid = 0; sid < group->num_sgroups; sid++) {
137 		sgroup = &group->sgroups[sid];
138 
139 		for (nsid = 0; nsid < sgroup->num_channels; nsid++) {
140 			if (sgroup->channels[nsid]) {
141 				spdk_put_io_channel(sgroup->channels[nsid]);
142 				sgroup->channels[nsid] = NULL;
143 			}
144 		}
145 
146 		free(sgroup->channels);
147 	}
148 
149 	free(group->sgroups);
150 }
151 
152 struct spdk_nvmf_tgt *
153 spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
154 {
155 	struct spdk_nvmf_tgt *tgt;
156 
157 	tgt = calloc(1, sizeof(*tgt));
158 	if (!tgt) {
159 		return NULL;
160 	}
161 
162 	if (!opts) {
163 		spdk_nvmf_tgt_opts_init(&tgt->opts);
164 	} else {
165 		tgt->opts = *opts;
166 	}
167 
168 	tgt->discovery_genctr = 0;
169 	tgt->discovery_log_page = NULL;
170 	tgt->discovery_log_page_size = 0;
171 	TAILQ_INIT(&tgt->transports);
172 
173 	tgt->subsystems = calloc(tgt->opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
174 	if (!tgt->subsystems) {
175 		free(tgt);
176 		return NULL;
177 	}
178 
179 	spdk_io_device_register(tgt,
180 				spdk_nvmf_tgt_create_poll_group,
181 				spdk_nvmf_tgt_destroy_poll_group,
182 				sizeof(struct spdk_nvmf_poll_group));
183 
184 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max Queue Pairs Per Controller: %d\n",
185 		      tgt->opts.max_qpairs_per_ctrlr);
186 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max Queue Depth: %d\n", tgt->opts.max_queue_depth);
187 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max In Capsule Data: %d bytes\n",
188 		      tgt->opts.in_capsule_data_size);
189 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max I/O Size: %d bytes\n", tgt->opts.max_io_size);
190 
191 	return tgt;
192 }
193 
194 void
195 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt)
196 {
197 	struct spdk_nvmf_transport *transport, *transport_tmp;
198 	uint32_t i;
199 
200 	if (tgt->discovery_log_page) {
201 		free(tgt->discovery_log_page);
202 	}
203 
204 	if (tgt->subsystems) {
205 		for (i = 0; i < tgt->opts.max_subsystems; i++) {
206 			if (tgt->subsystems[i]) {
207 				spdk_nvmf_subsystem_destroy(tgt->subsystems[i]);
208 			}
209 		}
210 		free(tgt->subsystems);
211 	}
212 
213 	TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, transport_tmp) {
214 		TAILQ_REMOVE(&tgt->transports, transport, link);
215 		spdk_nvmf_transport_destroy(transport);
216 	}
217 
218 	free(tgt);
219 }
220 
221 struct spdk_nvmf_tgt_listen_ctx {
222 	struct spdk_nvmf_tgt *tgt;
223 	struct spdk_nvmf_transport *transport;
224 	struct spdk_nvme_transport_id trid;
225 
226 	spdk_nvmf_tgt_listen_done_fn cb_fn;
227 	void *cb_arg;
228 };
229 
230 static void
231 spdk_nvmf_tgt_listen_done(struct spdk_io_channel_iter *i, int status)
232 {
233 	struct spdk_nvmf_tgt_listen_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
234 
235 	ctx->cb_fn(ctx->cb_arg, status);
236 
237 	free(ctx);
238 }
239 
240 static void
241 spdk_nvmf_tgt_listen_add_transport(struct spdk_io_channel_iter *i)
242 {
243 	struct spdk_nvmf_tgt_listen_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
244 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
245 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
246 	int rc;
247 
248 	rc = spdk_nvmf_poll_group_add_transport(group, ctx->transport);
249 	spdk_for_each_channel_continue(i, rc);
250 }
251 
252 void
253 spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
254 		     struct spdk_nvme_transport_id *trid,
255 		     spdk_nvmf_tgt_listen_done_fn cb_fn,
256 		     void *cb_arg)
257 {
258 	struct spdk_nvmf_transport *transport;
259 	int rc;
260 	bool propagate = false;
261 
262 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
263 	if (!transport) {
264 		transport = spdk_nvmf_transport_create(tgt, trid->trtype);
265 		if (!transport) {
266 			SPDK_ERRLOG("Transport initialization failed\n");
267 			cb_fn(cb_arg, -EINVAL);
268 			return;
269 		}
270 		TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
271 
272 		propagate = true;
273 	}
274 
275 	rc = spdk_nvmf_transport_listen(transport, trid);
276 	if (rc < 0) {
277 		SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
278 		cb_fn(cb_arg, rc);
279 		return;
280 	}
281 
282 	tgt->discovery_genctr++;
283 
284 	if (propagate) {
285 		struct spdk_nvmf_tgt_listen_ctx *ctx;
286 
287 		ctx = calloc(1, sizeof(*ctx));
288 		if (!ctx) {
289 			cb_fn(cb_arg, -ENOMEM);
290 			return;
291 		}
292 
293 		ctx->tgt = tgt;
294 		ctx->transport = transport;
295 		ctx->trid = *trid;
296 		ctx->cb_fn = cb_fn;
297 		ctx->cb_arg = cb_arg;
298 
299 		spdk_for_each_channel(tgt,
300 				      spdk_nvmf_tgt_listen_add_transport,
301 				      ctx,
302 				      spdk_nvmf_tgt_listen_done);
303 	} else {
304 		cb_fn(cb_arg, 0);
305 	}
306 }
307 
308 struct spdk_nvmf_subsystem *
309 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
310 {
311 	struct spdk_nvmf_subsystem	*subsystem;
312 	uint32_t sid;
313 
314 	if (!subnqn) {
315 		return NULL;
316 	}
317 
318 	for (sid = 0; sid < tgt->opts.max_subsystems; sid++) {
319 		subsystem = tgt->subsystems[sid];
320 		if (subsystem == NULL) {
321 			continue;
322 		}
323 
324 		if (strcmp(subnqn, subsystem->subnqn) == 0) {
325 			return subsystem;
326 		}
327 	}
328 
329 	return NULL;
330 }
331 
332 struct spdk_nvmf_transport *
333 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type type)
334 {
335 	struct spdk_nvmf_transport *transport;
336 
337 	TAILQ_FOREACH(transport, &tgt->transports, link) {
338 		if (transport->ops->type == type) {
339 			return transport;
340 		}
341 	}
342 
343 	return NULL;
344 }
345 
346 void
347 spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn)
348 {
349 	struct spdk_nvmf_transport *transport, *tmp;
350 
351 	TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
352 		spdk_nvmf_transport_accept(transport, cb_fn);
353 	}
354 }
355 
356 struct spdk_nvmf_poll_group *
357 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
358 {
359 	struct spdk_io_channel *ch;
360 
361 	ch = spdk_get_io_channel(tgt);
362 	if (!ch) {
363 		SPDK_ERRLOG("Unable to get I/O channel for target\n");
364 		return NULL;
365 	}
366 
367 	return spdk_io_channel_get_ctx(ch);
368 }
369 
370 void
371 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group)
372 {
373 	struct spdk_io_channel *ch;
374 
375 	ch = spdk_io_channel_from_ctx(group);
376 	spdk_put_io_channel(ch);
377 }
378 
379 int
380 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
381 			 struct spdk_nvmf_qpair *qpair)
382 {
383 	int rc = -1;
384 	struct spdk_nvmf_transport_poll_group *tgroup;
385 
386 	qpair->group = group;
387 
388 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
389 		if (tgroup->transport == qpair->transport) {
390 			rc = spdk_nvmf_transport_poll_group_add(tgroup, qpair);
391 			break;
392 		}
393 	}
394 
395 	return rc;
396 }
397 
398 int
399 spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
400 			    struct spdk_nvmf_qpair *qpair)
401 {
402 	int rc = -1;
403 	struct spdk_nvmf_transport_poll_group *tgroup;
404 
405 	qpair->group = NULL;
406 
407 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
408 		if (tgroup->transport == qpair->transport) {
409 			rc = spdk_nvmf_transport_poll_group_remove(tgroup, qpair);
410 			break;
411 		}
412 	}
413 
414 	return rc;
415 }
416 
417 int
418 spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
419 				   struct spdk_nvmf_transport *transport)
420 {
421 	struct spdk_nvmf_transport_poll_group *tgroup;
422 
423 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
424 		if (tgroup->transport == transport) {
425 			/* Transport already in the poll group */
426 			return 0;
427 		}
428 	}
429 
430 	tgroup = spdk_nvmf_transport_poll_group_create(transport);
431 	if (!tgroup) {
432 		SPDK_ERRLOG("Unable to create poll group for transport\n");
433 		return -1;
434 	}
435 
436 	TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
437 
438 	return 0;
439 }
440 
441 static int
442 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
443 			    struct spdk_nvmf_subsystem *subsystem)
444 {
445 	struct spdk_nvmf_subsystem_poll_group *sgroup;
446 	uint32_t new_num_channels, old_num_channels;
447 	uint32_t i;
448 	struct spdk_nvmf_ns *ns;
449 
450 	/* Make sure our poll group has memory for this subsystem allocated */
451 	if (subsystem->id >= group->num_sgroups) {
452 		return -ENOMEM;
453 	}
454 
455 	sgroup = &group->sgroups[subsystem->id];
456 
457 	/* Make sure the array of channels is the correct size */
458 	new_num_channels = subsystem->max_nsid;
459 	old_num_channels = sgroup->num_channels;
460 
461 	if (old_num_channels == 0) {
462 		if (new_num_channels > 0) {
463 			/* First allocation */
464 			sgroup->channels = calloc(new_num_channels, sizeof(sgroup->channels[0]));
465 			if (!sgroup->channels) {
466 				return -ENOMEM;
467 			}
468 		}
469 	} else if (new_num_channels > old_num_channels) {
470 		void *buf;
471 
472 		/* Make the array larger */
473 		buf = realloc(sgroup->channels, new_num_channels * sizeof(sgroup->channels[0]));
474 		if (!buf) {
475 			return -ENOMEM;
476 		}
477 
478 		sgroup->channels = buf;
479 
480 		/* Null out the new channels slots */
481 		for (i = old_num_channels; i < new_num_channels; i++) {
482 			sgroup->channels[i] = NULL;
483 		}
484 	} else if (new_num_channels < old_num_channels) {
485 		void *buf;
486 
487 		/* Free the extra I/O channels */
488 		for (i = new_num_channels; i < old_num_channels; i++) {
489 			if (sgroup->channels[i]) {
490 				spdk_put_io_channel(sgroup->channels[i]);
491 				sgroup->channels[i] = NULL;
492 			}
493 		}
494 
495 		/* Make the array smaller */
496 		if (new_num_channels > 0) {
497 			buf = realloc(sgroup->channels, new_num_channels * sizeof(sgroup->channels[0]));
498 			if (!buf) {
499 				return -ENOMEM;
500 			}
501 			sgroup->channels = buf;
502 		} else {
503 			free(sgroup->channels);
504 			sgroup->channels = NULL;
505 		}
506 	}
507 
508 	sgroup->num_channels = new_num_channels;
509 
510 	/* Detect bdevs that were added or removed */
511 	for (i = 0; i < sgroup->num_channels; i++) {
512 		ns = subsystem->ns[i];
513 		if (ns == NULL && sgroup->channels[i] == NULL) {
514 			/* Both NULL. Leave empty */
515 		} else if (ns == NULL && sgroup->channels[i] != NULL) {
516 			/* There was a channel here, but the namespace is gone. */
517 			spdk_put_io_channel(sgroup->channels[i]);
518 			sgroup->channels[i] = NULL;
519 		} else if (ns != NULL && sgroup->channels[i] == NULL) {
520 			/* A namespace appeared but there is no channel yet */
521 			sgroup->channels[i] = spdk_bdev_get_io_channel(ns->desc);
522 		} else {
523 			/* A namespace was present before and didn't change. */
524 		}
525 	}
526 
527 	return 0;
528 }
529 
530 int
531 spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
532 				      struct spdk_nvmf_subsystem *subsystem)
533 {
534 	return poll_group_update_subsystem(group, subsystem);
535 }
536 
537 int
538 spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
539 				   struct spdk_nvmf_subsystem *subsystem)
540 {
541 	struct spdk_nvmf_subsystem_poll_group *sgroup;
542 	int rc;
543 
544 	rc = poll_group_update_subsystem(group, subsystem);
545 	if (rc) {
546 		return rc;
547 	}
548 
549 	sgroup = &group->sgroups[subsystem->id];
550 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
551 	TAILQ_INIT(&sgroup->queued);
552 	TAILQ_INIT(&sgroup->outstanding);
553 
554 	return 0;
555 }
556 
557 int
558 spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
559 				      struct spdk_nvmf_subsystem *subsystem)
560 {
561 	struct spdk_nvmf_subsystem_poll_group *sgroup;
562 	uint32_t nsid;
563 
564 	sgroup = &group->sgroups[subsystem->id];
565 	sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
566 
567 	for (nsid = 0; nsid < sgroup->num_channels; nsid++) {
568 		if (sgroup->channels[nsid]) {
569 			spdk_put_io_channel(sgroup->channels[nsid]);
570 			sgroup->channels[nsid] = NULL;
571 		}
572 	}
573 
574 	sgroup->num_channels = 0;
575 	free(sgroup->channels);
576 	sgroup->channels = NULL;
577 
578 	return 0;
579 }
580 
581 int
582 spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
583 				     struct spdk_nvmf_subsystem *subsystem)
584 {
585 	struct spdk_nvmf_subsystem_poll_group *sgroup;
586 
587 	if (subsystem->id >= group->num_sgroups) {
588 		return -1;
589 	}
590 
591 	sgroup = &group->sgroups[subsystem->id];
592 	if (sgroup == NULL) {
593 		return -1;
594 	}
595 
596 	assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
597 	/* TODO: This currently does not quiesce I/O */
598 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
599 
600 	return 0;
601 }
602 
603 int
604 spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
605 				      struct spdk_nvmf_subsystem *subsystem)
606 {
607 	struct spdk_nvmf_request *req, *tmp;
608 	struct spdk_nvmf_subsystem_poll_group *sgroup;
609 	int rc;
610 
611 	if (subsystem->id >= group->num_sgroups) {
612 		return -1;
613 	}
614 
615 	sgroup = &group->sgroups[subsystem->id];
616 
617 	assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
618 
619 	rc = poll_group_update_subsystem(group, subsystem);
620 	if (rc) {
621 		return rc;
622 	}
623 
624 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
625 
626 	/* Release all queued requests */
627 	TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
628 		TAILQ_REMOVE(&sgroup->queued, req, link);
629 		spdk_nvmf_request_exec(req);
630 	}
631 
632 	return 0;
633 }
634 
635 SPDK_TRACE_REGISTER_FN(nvmf_trace)
636 {
637 	spdk_trace_register_object(OBJECT_NVMF_IO, 'r');
638 	spdk_trace_register_description("NVMF_IO_START", "", TRACE_NVMF_IO_START,
639 					OWNER_NONE, OBJECT_NVMF_IO, 1, 0, 0, "");
640 	spdk_trace_register_description("NVMF_RDMA_READ_START", "", TRACE_RDMA_READ_START,
641 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
642 	spdk_trace_register_description("NVMF_RDMA_WRITE_START", "", TRACE_RDMA_WRITE_START,
643 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
644 	spdk_trace_register_description("NVMF_RDMA_READ_COMPLETE", "", TRACE_RDMA_READ_COMPLETE,
645 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
646 	spdk_trace_register_description("NVMF_RDMA_WRITE_COMPLETE", "", TRACE_RDMA_WRITE_COMPLETE,
647 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
648 	spdk_trace_register_description("NVMF_LIB_READ_START", "", TRACE_NVMF_LIB_READ_START,
649 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
650 	spdk_trace_register_description("NVMF_LIB_WRITE_START", "", TRACE_NVMF_LIB_WRITE_START,
651 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
652 	spdk_trace_register_description("NVMF_LIB_COMPLETE", "", TRACE_NVMF_LIB_COMPLETE,
653 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
654 	spdk_trace_register_description("NVMF_IO_COMPLETION_DONE", "", TRACE_NVMF_IO_COMPLETE,
655 					OWNER_NONE, OBJECT_NVMF_IO, 0, 0, 0, "");
656 }
657