xref: /spdk/lib/bdev/bdev.c (revision f869197b76ff6981e901b6d9a05789e1b993494a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 
11 #include "spdk/config.h"
12 #include "spdk/env.h"
13 #include "spdk/thread.h"
14 #include "spdk/likely.h"
15 #include "spdk/queue.h"
16 #include "spdk/nvme_spec.h"
17 #include "spdk/scsi_spec.h"
18 #include "spdk/notify.h"
19 #include "spdk/util.h"
20 #include "spdk/trace.h"
21 #include "spdk/dma.h"
22 
23 #include "spdk/bdev_module.h"
24 #include "spdk/log.h"
25 #include "spdk/string.h"
26 
27 #include "bdev_internal.h"
28 #include "spdk_internal/trace_defs.h"
29 
30 #ifdef SPDK_CONFIG_VTUNE
31 #include "ittnotify.h"
32 #include "ittnotify_types.h"
33 int __itt_init_ittlib(const char *, __itt_group_id);
34 #endif
35 
36 #define SPDK_BDEV_IO_POOL_SIZE			(64 * 1024 - 1)
37 #define SPDK_BDEV_IO_CACHE_SIZE			256
38 #define SPDK_BDEV_AUTO_EXAMINE			true
39 #define BUF_SMALL_POOL_SIZE			8191
40 #define BUF_LARGE_POOL_SIZE			1023
41 #define NOMEM_THRESHOLD_COUNT			8
42 
43 #define SPDK_BDEV_QOS_TIMESLICE_IN_USEC		1000
44 #define SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE	1
45 #define SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE	512
46 #define SPDK_BDEV_QOS_MIN_IOS_PER_SEC		1000
47 #define SPDK_BDEV_QOS_MIN_BYTES_PER_SEC		(1024 * 1024)
48 #define SPDK_BDEV_QOS_LIMIT_NOT_DEFINED		UINT64_MAX
49 #define SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC	1000
50 
51 #define SPDK_BDEV_POOL_ALIGNMENT 512
52 
53 /* The maximum number of children requests for a UNMAP or WRITE ZEROES command
54  * when splitting into children requests at a time.
55  */
56 #define SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS (8)
57 
58 static const char *qos_rpc_type[] = {"rw_ios_per_sec",
59 				     "rw_mbytes_per_sec", "r_mbytes_per_sec", "w_mbytes_per_sec"
60 				    };
61 
62 TAILQ_HEAD(spdk_bdev_list, spdk_bdev);
63 
64 RB_HEAD(bdev_name_tree, spdk_bdev_name);
65 
66 static int
67 bdev_name_cmp(struct spdk_bdev_name *name1, struct spdk_bdev_name *name2)
68 {
69 	return strcmp(name1->name, name2->name);
70 }
71 
72 RB_GENERATE_STATIC(bdev_name_tree, spdk_bdev_name, node, bdev_name_cmp);
73 
74 struct spdk_bdev_mgr {
75 	struct spdk_mempool *bdev_io_pool;
76 
77 	struct spdk_mempool *buf_small_pool;
78 	struct spdk_mempool *buf_large_pool;
79 
80 	void *zero_buffer;
81 
82 	TAILQ_HEAD(bdev_module_list, spdk_bdev_module) bdev_modules;
83 
84 	struct spdk_bdev_list bdevs;
85 	struct bdev_name_tree bdev_names;
86 
87 	bool init_complete;
88 	bool module_init_complete;
89 
90 	pthread_mutex_t mutex;
91 
92 #ifdef SPDK_CONFIG_VTUNE
93 	__itt_domain	*domain;
94 #endif
95 };
96 
97 static struct spdk_bdev_mgr g_bdev_mgr = {
98 	.bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
99 	.bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
100 	.bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names),
101 	.init_complete = false,
102 	.module_init_complete = false,
103 	.mutex = PTHREAD_MUTEX_INITIALIZER,
104 };
105 
106 typedef void (*lock_range_cb)(void *ctx, int status);
107 
108 typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc);
109 
110 struct lba_range {
111 	uint64_t			offset;
112 	uint64_t			length;
113 	void				*locked_ctx;
114 	struct spdk_bdev_channel	*owner_ch;
115 	TAILQ_ENTRY(lba_range)		tailq;
116 };
117 
118 static struct spdk_bdev_opts	g_bdev_opts = {
119 	.bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
120 	.bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
121 	.bdev_auto_examine = SPDK_BDEV_AUTO_EXAMINE,
122 	.small_buf_pool_size = BUF_SMALL_POOL_SIZE,
123 	.large_buf_pool_size = BUF_LARGE_POOL_SIZE,
124 };
125 
126 static spdk_bdev_init_cb	g_init_cb_fn = NULL;
127 static void			*g_init_cb_arg = NULL;
128 
129 static spdk_bdev_fini_cb	g_fini_cb_fn = NULL;
130 static void			*g_fini_cb_arg = NULL;
131 static struct spdk_thread	*g_fini_thread = NULL;
132 
133 struct spdk_bdev_qos_limit {
134 	/** IOs or bytes allowed per second (i.e., 1s). */
135 	uint64_t limit;
136 
137 	/** Remaining IOs or bytes allowed in current timeslice (e.g., 1ms).
138 	 *  For remaining bytes, allowed to run negative if an I/O is submitted when
139 	 *  some bytes are remaining, but the I/O is bigger than that amount. The
140 	 *  excess will be deducted from the next timeslice.
141 	 */
142 	int64_t remaining_this_timeslice;
143 
144 	/** Minimum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
145 	uint32_t min_per_timeslice;
146 
147 	/** Maximum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
148 	uint32_t max_per_timeslice;
149 
150 	/** Function to check whether to queue the IO. */
151 	bool (*queue_io)(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
152 
153 	/** Function to update for the submitted IO. */
154 	void (*update_quota)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
155 };
156 
157 struct spdk_bdev_qos {
158 	/** Types of structure of rate limits. */
159 	struct spdk_bdev_qos_limit rate_limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
160 
161 	/** The channel that all I/O are funneled through. */
162 	struct spdk_bdev_channel *ch;
163 
164 	/** The thread on which the poller is running. */
165 	struct spdk_thread *thread;
166 
167 	/** Queue of I/O waiting to be issued. */
168 	bdev_io_tailq_t queued;
169 
170 	/** Size of a timeslice in tsc ticks. */
171 	uint64_t timeslice_size;
172 
173 	/** Timestamp of start of last timeslice. */
174 	uint64_t last_timeslice;
175 
176 	/** Poller that processes queued I/O commands each time slice. */
177 	struct spdk_poller *poller;
178 };
179 
180 struct spdk_bdev_mgmt_channel {
181 	bdev_io_stailq_t need_buf_small;
182 	bdev_io_stailq_t need_buf_large;
183 
184 	/*
185 	 * Each thread keeps a cache of bdev_io - this allows
186 	 *  bdev threads which are *not* DPDK threads to still
187 	 *  benefit from a per-thread bdev_io cache.  Without
188 	 *  this, non-DPDK threads fetching from the mempool
189 	 *  incur a cmpxchg on get and put.
190 	 */
191 	bdev_io_stailq_t per_thread_cache;
192 	uint32_t	per_thread_cache_count;
193 	uint32_t	bdev_io_cache_size;
194 
195 	TAILQ_HEAD(, spdk_bdev_shared_resource)	shared_resources;
196 	TAILQ_HEAD(, spdk_bdev_io_wait_entry)	io_wait_queue;
197 };
198 
199 /*
200  * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
201  * will queue here their IO that awaits retry. It makes it possible to retry sending
202  * IO to one bdev after IO from other bdev completes.
203  */
204 struct spdk_bdev_shared_resource {
205 	/* The bdev management channel */
206 	struct spdk_bdev_mgmt_channel *mgmt_ch;
207 
208 	/*
209 	 * Count of I/O submitted to bdev module and waiting for completion.
210 	 * Incremented before submit_request() is called on an spdk_bdev_io.
211 	 */
212 	uint64_t		io_outstanding;
213 
214 	/*
215 	 * Queue of IO awaiting retry because of a previous NOMEM status returned
216 	 *  on this channel.
217 	 */
218 	bdev_io_tailq_t		nomem_io;
219 
220 	/*
221 	 * Threshold which io_outstanding must drop to before retrying nomem_io.
222 	 */
223 	uint64_t		nomem_threshold;
224 
225 	/* I/O channel allocated by a bdev module */
226 	struct spdk_io_channel	*shared_ch;
227 
228 	/* Refcount of bdev channels using this resource */
229 	uint32_t		ref;
230 
231 	TAILQ_ENTRY(spdk_bdev_shared_resource) link;
232 };
233 
234 #define BDEV_CH_RESET_IN_PROGRESS	(1 << 0)
235 #define BDEV_CH_QOS_ENABLED		(1 << 1)
236 
237 struct spdk_bdev_channel {
238 	struct spdk_bdev	*bdev;
239 
240 	/* The channel for the underlying device */
241 	struct spdk_io_channel	*channel;
242 
243 	/* Per io_device per thread data */
244 	struct spdk_bdev_shared_resource *shared_resource;
245 
246 	struct spdk_bdev_io_stat stat;
247 
248 	/*
249 	 * Count of I/O submitted to the underlying dev module through this channel
250 	 * and waiting for completion.
251 	 */
252 	uint64_t		io_outstanding;
253 
254 	/*
255 	 * List of all submitted I/Os including I/O that are generated via splitting.
256 	 */
257 	bdev_io_tailq_t		io_submitted;
258 
259 	/*
260 	 * List of spdk_bdev_io that are currently queued because they write to a locked
261 	 * LBA range.
262 	 */
263 	bdev_io_tailq_t		io_locked;
264 
265 	uint32_t		flags;
266 
267 	struct spdk_histogram_data *histogram;
268 
269 #ifdef SPDK_CONFIG_VTUNE
270 	uint64_t		start_tsc;
271 	uint64_t		interval_tsc;
272 	__itt_string_handle	*handle;
273 	struct spdk_bdev_io_stat prev_stat;
274 #endif
275 
276 	bdev_io_tailq_t		queued_resets;
277 
278 	lba_range_tailq_t	locked_ranges;
279 };
280 
281 struct media_event_entry {
282 	struct spdk_bdev_media_event	event;
283 	TAILQ_ENTRY(media_event_entry)	tailq;
284 };
285 
286 #define MEDIA_EVENT_POOL_SIZE 64
287 
288 struct spdk_bdev_desc {
289 	struct spdk_bdev		*bdev;
290 	struct spdk_thread		*thread;
291 	struct {
292 		spdk_bdev_event_cb_t event_fn;
293 		void *ctx;
294 	}				callback;
295 	bool				closed;
296 	bool				write;
297 	bool				memory_domains_supported;
298 	pthread_mutex_t			mutex;
299 	uint32_t			refs;
300 	TAILQ_HEAD(, media_event_entry)	pending_media_events;
301 	TAILQ_HEAD(, media_event_entry)	free_media_events;
302 	struct media_event_entry	*media_events_buffer;
303 	TAILQ_ENTRY(spdk_bdev_desc)	link;
304 
305 	uint64_t		timeout_in_sec;
306 	spdk_bdev_io_timeout_cb	cb_fn;
307 	void			*cb_arg;
308 	struct spdk_poller	*io_timeout_poller;
309 };
310 
311 struct spdk_bdev_iostat_ctx {
312 	struct spdk_bdev_io_stat *stat;
313 	spdk_bdev_get_device_stat_cb cb;
314 	void *cb_arg;
315 };
316 
317 struct set_qos_limit_ctx {
318 	void (*cb_fn)(void *cb_arg, int status);
319 	void *cb_arg;
320 	struct spdk_bdev *bdev;
321 };
322 
323 #define __bdev_to_io_dev(bdev)		(((char *)bdev) + 1)
324 #define __bdev_from_io_dev(io_dev)	((struct spdk_bdev *)(((char *)io_dev) - 1))
325 
326 static inline void bdev_io_complete(void *ctx);
327 
328 static void bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
329 static void bdev_write_zero_buffer_next(void *_bdev_io);
330 
331 static void bdev_enable_qos_msg(struct spdk_io_channel_iter *i);
332 static void bdev_enable_qos_done(struct spdk_io_channel_iter *i, int status);
333 
334 static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
335 				     struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
336 				     uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
337 				     struct spdk_bdev_ext_io_opts *opts, bool copy_opts);
338 static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
339 				      struct iovec *iov, int iovcnt, void *md_buf,
340 				      uint64_t offset_blocks, uint64_t num_blocks,
341 				      spdk_bdev_io_completion_cb cb, void *cb_arg,
342 				      struct spdk_bdev_ext_io_opts *opts, bool copy_opts);
343 
344 static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
345 			       uint64_t offset, uint64_t length,
346 			       lock_range_cb cb_fn, void *cb_arg);
347 
348 static int bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
349 				 uint64_t offset, uint64_t length,
350 				 lock_range_cb cb_fn, void *cb_arg);
351 
352 static inline void bdev_io_complete(void *ctx);
353 
354 static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
355 static bool bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort);
356 
357 void
358 spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
359 {
360 	if (!opts) {
361 		SPDK_ERRLOG("opts should not be NULL\n");
362 		return;
363 	}
364 
365 	if (!opts_size) {
366 		SPDK_ERRLOG("opts_size should not be zero value\n");
367 		return;
368 	}
369 
370 	opts->opts_size = opts_size;
371 
372 #define SET_FIELD(field) \
373 	if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts_size) { \
374 		opts->field = g_bdev_opts.field; \
375 	} \
376 
377 	SET_FIELD(bdev_io_pool_size);
378 	SET_FIELD(bdev_io_cache_size);
379 	SET_FIELD(bdev_auto_examine);
380 	SET_FIELD(small_buf_pool_size);
381 	SET_FIELD(large_buf_pool_size);
382 
383 	/* Do not remove this statement, you should always update this statement when you adding a new field,
384 	 * and do not forget to add the SET_FIELD statement for your added field. */
385 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
386 
387 #undef SET_FIELD
388 }
389 
390 int
391 spdk_bdev_set_opts(struct spdk_bdev_opts *opts)
392 {
393 	uint32_t min_pool_size;
394 
395 	if (!opts) {
396 		SPDK_ERRLOG("opts cannot be NULL\n");
397 		return -1;
398 	}
399 
400 	if (!opts->opts_size) {
401 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
402 		return -1;
403 	}
404 
405 	/*
406 	 * Add 1 to the thread count to account for the extra mgmt_ch that gets created during subsystem
407 	 *  initialization.  A second mgmt_ch will be created on the same thread when the application starts
408 	 *  but before the deferred put_io_channel event is executed for the first mgmt_ch.
409 	 */
410 	min_pool_size = opts->bdev_io_cache_size * (spdk_thread_get_count() + 1);
411 	if (opts->bdev_io_pool_size < min_pool_size) {
412 		SPDK_ERRLOG("bdev_io_pool_size %" PRIu32 " is not compatible with bdev_io_cache_size %" PRIu32
413 			    " and %" PRIu32 " threads\n", opts->bdev_io_pool_size, opts->bdev_io_cache_size,
414 			    spdk_thread_get_count());
415 		SPDK_ERRLOG("bdev_io_pool_size must be at least %" PRIu32 "\n", min_pool_size);
416 		return -1;
417 	}
418 
419 	if (opts->small_buf_pool_size < BUF_SMALL_POOL_SIZE) {
420 		SPDK_ERRLOG("small_buf_pool_size must be at least %" PRIu32 "\n", BUF_SMALL_POOL_SIZE);
421 		return -1;
422 	}
423 
424 	if (opts->large_buf_pool_size < BUF_LARGE_POOL_SIZE) {
425 		SPDK_ERRLOG("large_buf_pool_size must be at least %" PRIu32 "\n", BUF_LARGE_POOL_SIZE);
426 		return -1;
427 	}
428 
429 #define SET_FIELD(field) \
430         if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
431                 g_bdev_opts.field = opts->field; \
432         } \
433 
434 	SET_FIELD(bdev_io_pool_size);
435 	SET_FIELD(bdev_io_cache_size);
436 	SET_FIELD(bdev_auto_examine);
437 	SET_FIELD(small_buf_pool_size);
438 	SET_FIELD(large_buf_pool_size);
439 
440 	g_bdev_opts.opts_size = opts->opts_size;
441 
442 #undef SET_FIELD
443 
444 	return 0;
445 }
446 
447 static struct spdk_bdev *
448 bdev_get_by_name(const char *bdev_name)
449 {
450 	struct spdk_bdev_name find;
451 	struct spdk_bdev_name *res;
452 
453 	find.name = (char *)bdev_name;
454 	res = RB_FIND(bdev_name_tree, &g_bdev_mgr.bdev_names, &find);
455 	if (res != NULL) {
456 		return res->bdev;
457 	}
458 
459 	return NULL;
460 }
461 
462 struct spdk_bdev *
463 spdk_bdev_get_by_name(const char *bdev_name)
464 {
465 	struct spdk_bdev *bdev;
466 
467 	pthread_mutex_lock(&g_bdev_mgr.mutex);
468 	bdev = bdev_get_by_name(bdev_name);
469 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
470 
471 	return bdev;
472 }
473 
474 struct spdk_bdev_wait_for_examine_ctx {
475 	struct spdk_poller              *poller;
476 	spdk_bdev_wait_for_examine_cb	cb_fn;
477 	void				*cb_arg;
478 };
479 
480 static bool bdev_module_all_actions_completed(void);
481 
482 static int
483 bdev_wait_for_examine_cb(void *arg)
484 {
485 	struct spdk_bdev_wait_for_examine_ctx *ctx = arg;
486 
487 	if (!bdev_module_all_actions_completed()) {
488 		return SPDK_POLLER_IDLE;
489 	}
490 
491 	spdk_poller_unregister(&ctx->poller);
492 	ctx->cb_fn(ctx->cb_arg);
493 	free(ctx);
494 
495 	return SPDK_POLLER_BUSY;
496 }
497 
498 int
499 spdk_bdev_wait_for_examine(spdk_bdev_wait_for_examine_cb cb_fn, void *cb_arg)
500 {
501 	struct spdk_bdev_wait_for_examine_ctx *ctx;
502 
503 	ctx = calloc(1, sizeof(*ctx));
504 	if (ctx == NULL) {
505 		return -ENOMEM;
506 	}
507 	ctx->cb_fn = cb_fn;
508 	ctx->cb_arg = cb_arg;
509 	ctx->poller = SPDK_POLLER_REGISTER(bdev_wait_for_examine_cb, ctx, 0);
510 
511 	return 0;
512 }
513 
514 struct spdk_bdev_examine_item {
515 	char *name;
516 	TAILQ_ENTRY(spdk_bdev_examine_item) link;
517 };
518 
519 TAILQ_HEAD(spdk_bdev_examine_allowlist, spdk_bdev_examine_item);
520 
521 struct spdk_bdev_examine_allowlist g_bdev_examine_allowlist = TAILQ_HEAD_INITIALIZER(
522 			g_bdev_examine_allowlist);
523 
524 static inline bool
525 bdev_examine_allowlist_check(const char *name)
526 {
527 	struct spdk_bdev_examine_item *item;
528 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
529 		if (strcmp(name, item->name) == 0) {
530 			return true;
531 		}
532 	}
533 	return false;
534 }
535 
536 static inline void
537 bdev_examine_allowlist_free(void)
538 {
539 	struct spdk_bdev_examine_item *item;
540 	while (!TAILQ_EMPTY(&g_bdev_examine_allowlist)) {
541 		item = TAILQ_FIRST(&g_bdev_examine_allowlist);
542 		TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
543 		free(item->name);
544 		free(item);
545 	}
546 }
547 
548 static inline bool
549 bdev_in_examine_allowlist(struct spdk_bdev *bdev)
550 {
551 	struct spdk_bdev_alias *tmp;
552 	if (bdev_examine_allowlist_check(bdev->name)) {
553 		return true;
554 	}
555 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
556 		if (bdev_examine_allowlist_check(tmp->alias.name)) {
557 			return true;
558 		}
559 	}
560 	return false;
561 }
562 
563 static inline bool
564 bdev_ok_to_examine(struct spdk_bdev *bdev)
565 {
566 	if (g_bdev_opts.bdev_auto_examine) {
567 		return true;
568 	} else {
569 		return bdev_in_examine_allowlist(bdev);
570 	}
571 }
572 
573 static void
574 bdev_examine(struct spdk_bdev *bdev)
575 {
576 	struct spdk_bdev_module *module;
577 	uint32_t action;
578 
579 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
580 		if (module->examine_config && bdev_ok_to_examine(bdev)) {
581 			action = module->internal.action_in_progress;
582 			module->internal.action_in_progress++;
583 			module->examine_config(bdev);
584 			if (action != module->internal.action_in_progress) {
585 				SPDK_ERRLOG("examine_config for module %s did not call spdk_bdev_module_examine_done()\n",
586 					    module->name);
587 			}
588 		}
589 	}
590 
591 	if (bdev->internal.claim_module && bdev_ok_to_examine(bdev)) {
592 		if (bdev->internal.claim_module->examine_disk) {
593 			bdev->internal.claim_module->internal.action_in_progress++;
594 			bdev->internal.claim_module->examine_disk(bdev);
595 		}
596 		return;
597 	}
598 
599 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
600 		if (module->examine_disk && bdev_ok_to_examine(bdev)) {
601 			module->internal.action_in_progress++;
602 			module->examine_disk(bdev);
603 		}
604 	}
605 }
606 
607 int
608 spdk_bdev_examine(const char *name)
609 {
610 	struct spdk_bdev *bdev;
611 	struct spdk_bdev_examine_item *item;
612 
613 	if (g_bdev_opts.bdev_auto_examine) {
614 		SPDK_ERRLOG("Manual examine is not allowed if auto examine is enabled");
615 		return -EINVAL;
616 	}
617 
618 	if (bdev_examine_allowlist_check(name)) {
619 		SPDK_ERRLOG("Duplicate bdev name for manual examine: %s\n", name);
620 		return -EEXIST;
621 	}
622 
623 	item = calloc(1, sizeof(*item));
624 	if (!item) {
625 		return -ENOMEM;
626 	}
627 	item->name = strdup(name);
628 	if (!item->name) {
629 		free(item);
630 		return -ENOMEM;
631 	}
632 	TAILQ_INSERT_TAIL(&g_bdev_examine_allowlist, item, link);
633 
634 	bdev = spdk_bdev_get_by_name(name);
635 	if (bdev) {
636 		bdev_examine(bdev);
637 	}
638 	return 0;
639 }
640 
641 static inline void
642 bdev_examine_allowlist_config_json(struct spdk_json_write_ctx *w)
643 {
644 	struct spdk_bdev_examine_item *item;
645 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
646 		spdk_json_write_object_begin(w);
647 		spdk_json_write_named_string(w, "method", "bdev_examine");
648 		spdk_json_write_named_object_begin(w, "params");
649 		spdk_json_write_named_string(w, "name", item->name);
650 		spdk_json_write_object_end(w);
651 		spdk_json_write_object_end(w);
652 	}
653 }
654 
655 struct spdk_bdev *
656 spdk_bdev_first(void)
657 {
658 	struct spdk_bdev *bdev;
659 
660 	bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
661 	if (bdev) {
662 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
663 	}
664 
665 	return bdev;
666 }
667 
668 struct spdk_bdev *
669 spdk_bdev_next(struct spdk_bdev *prev)
670 {
671 	struct spdk_bdev *bdev;
672 
673 	bdev = TAILQ_NEXT(prev, internal.link);
674 	if (bdev) {
675 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
676 	}
677 
678 	return bdev;
679 }
680 
681 static struct spdk_bdev *
682 _bdev_next_leaf(struct spdk_bdev *bdev)
683 {
684 	while (bdev != NULL) {
685 		if (bdev->internal.claim_module == NULL) {
686 			return bdev;
687 		} else {
688 			bdev = TAILQ_NEXT(bdev, internal.link);
689 		}
690 	}
691 
692 	return bdev;
693 }
694 
695 struct spdk_bdev *
696 spdk_bdev_first_leaf(void)
697 {
698 	struct spdk_bdev *bdev;
699 
700 	bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
701 
702 	if (bdev) {
703 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
704 	}
705 
706 	return bdev;
707 }
708 
709 struct spdk_bdev *
710 spdk_bdev_next_leaf(struct spdk_bdev *prev)
711 {
712 	struct spdk_bdev *bdev;
713 
714 	bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
715 
716 	if (bdev) {
717 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
718 	}
719 
720 	return bdev;
721 }
722 
723 static inline bool
724 bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
725 {
726 	return bdev_io->internal.ext_opts && bdev_io->internal.ext_opts->memory_domain;
727 }
728 
729 void
730 spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
731 {
732 	struct iovec *iovs;
733 
734 	if (bdev_io->u.bdev.iovs == NULL) {
735 		bdev_io->u.bdev.iovs = &bdev_io->iov;
736 		bdev_io->u.bdev.iovcnt = 1;
737 	}
738 
739 	iovs = bdev_io->u.bdev.iovs;
740 
741 	assert(iovs != NULL);
742 	assert(bdev_io->u.bdev.iovcnt >= 1);
743 
744 	iovs[0].iov_base = buf;
745 	iovs[0].iov_len = len;
746 }
747 
748 void
749 spdk_bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
750 {
751 	assert((len / spdk_bdev_get_md_size(bdev_io->bdev)) >= bdev_io->u.bdev.num_blocks);
752 	bdev_io->u.bdev.md_buf = md_buf;
753 }
754 
755 static bool
756 _is_buf_allocated(const struct iovec *iovs)
757 {
758 	if (iovs == NULL) {
759 		return false;
760 	}
761 
762 	return iovs[0].iov_base != NULL;
763 }
764 
765 static bool
766 _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
767 {
768 	int i;
769 	uintptr_t iov_base;
770 
771 	if (spdk_likely(alignment == 1)) {
772 		return true;
773 	}
774 
775 	for (i = 0; i < iovcnt; i++) {
776 		iov_base = (uintptr_t)iovs[i].iov_base;
777 		if ((iov_base & (alignment - 1)) != 0) {
778 			return false;
779 		}
780 	}
781 
782 	return true;
783 }
784 
785 static void
786 bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status)
787 {
788 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
789 	void *buf;
790 
791 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
792 		buf = bdev_io->internal.buf;
793 		bdev_io->internal.buf = NULL;
794 		bdev_io->internal.get_aux_buf_cb(ch, bdev_io, buf);
795 		bdev_io->internal.get_aux_buf_cb = NULL;
796 	} else {
797 		assert(bdev_io->internal.get_buf_cb != NULL);
798 		bdev_io->internal.get_buf_cb(ch, bdev_io, status);
799 		bdev_io->internal.get_buf_cb = NULL;
800 	}
801 }
802 
803 static void
804 _bdev_io_pull_buffer_cpl(void *ctx, int rc)
805 {
806 	struct spdk_bdev_io *bdev_io = ctx;
807 
808 	if (rc) {
809 		SPDK_ERRLOG("Set bounce buffer failed with rc %d\n", rc);
810 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
811 	}
812 	bdev_io_get_buf_complete(bdev_io, !rc);
813 }
814 
815 static void
816 _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
817 {
818 	int rc = 0;
819 
820 	/* save original md_buf */
821 	bdev_io->internal.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
822 	bdev_io->internal.orig_md_iov.iov_len = len;
823 	bdev_io->internal.bounce_md_iov.iov_base = md_buf;
824 	bdev_io->internal.bounce_md_iov.iov_len = len;
825 	/* set bounce md_buf */
826 	bdev_io->u.bdev.md_buf = md_buf;
827 
828 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
829 		if (bdev_io_use_memory_domain(bdev_io)) {
830 			rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain,
831 							  bdev_io->internal.ext_opts->memory_domain_ctx,
832 							  &bdev_io->internal.orig_md_iov, 1,
833 							  &bdev_io->internal.bounce_md_iov, 1,
834 							  bdev_io->internal.data_transfer_cpl,
835 							  bdev_io);
836 			if (rc == 0) {
837 				/* Continue to submit IO in completion callback */
838 				return;
839 			}
840 			SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
841 				    spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain), rc);
842 		} else {
843 			memcpy(md_buf, bdev_io->internal.orig_md_iov.iov_base, bdev_io->internal.orig_md_iov.iov_len);
844 		}
845 	}
846 
847 	assert(bdev_io->internal.data_transfer_cpl);
848 	bdev_io->internal.data_transfer_cpl(bdev_io, rc);
849 }
850 
851 static void
852 _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
853 {
854 	struct spdk_bdev *bdev = bdev_io->bdev;
855 	uint64_t md_len;
856 	void *buf;
857 
858 	if (spdk_bdev_is_md_separate(bdev)) {
859 		buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len;
860 		md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
861 
862 		assert(((uintptr_t)buf & (spdk_bdev_get_buf_align(bdev) - 1)) == 0);
863 
864 		if (bdev_io->u.bdev.md_buf != NULL) {
865 			_bdev_io_pull_bounce_md_buf(bdev_io, buf, md_len);
866 			return;
867 		} else {
868 			spdk_bdev_io_set_md_buf(bdev_io, buf, md_len);
869 		}
870 	}
871 
872 	bdev_io_get_buf_complete(bdev_io, true);
873 }
874 
875 static void
876 _bdev_io_pull_bounce_data_buf_done(void *ctx, int rc)
877 {
878 	struct spdk_bdev_io *bdev_io = ctx;
879 
880 	if (rc) {
881 		SPDK_ERRLOG("Failed to get data buffer\n");
882 		assert(bdev_io->internal.data_transfer_cpl);
883 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
884 		return;
885 	}
886 
887 	_bdev_io_set_md_buf(bdev_io);
888 }
889 
890 static void
891 _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
892 			      bdev_copy_bounce_buffer_cpl cpl_cb)
893 {
894 	int rc = 0;
895 
896 	bdev_io->internal.data_transfer_cpl = cpl_cb;
897 	/* save original iovec */
898 	bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs;
899 	bdev_io->internal.orig_iovcnt = bdev_io->u.bdev.iovcnt;
900 	/* set bounce iov */
901 	bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_iov;
902 	bdev_io->u.bdev.iovcnt = 1;
903 	/* set bounce buffer for this operation */
904 	bdev_io->u.bdev.iovs[0].iov_base = buf;
905 	bdev_io->u.bdev.iovs[0].iov_len = len;
906 	/* if this is write path, copy data from original buffer to bounce buffer */
907 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
908 		if (bdev_io_use_memory_domain(bdev_io)) {
909 			rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain,
910 							  bdev_io->internal.ext_opts->memory_domain_ctx,
911 							  bdev_io->internal.orig_iovs,
912 							  (uint32_t) bdev_io->internal.orig_iovcnt,
913 							  bdev_io->u.bdev.iovs, 1,
914 							  _bdev_io_pull_bounce_data_buf_done,
915 							  bdev_io);
916 			if (rc == 0) {
917 				/* Continue to submit IO in completion callback */
918 				return;
919 			}
920 			SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
921 				    spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
922 		} else {
923 			spdk_copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt);
924 		}
925 	}
926 
927 	_bdev_io_pull_bounce_data_buf_done(bdev_io, rc);
928 }
929 
930 static void
931 _bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t len)
932 {
933 	struct spdk_bdev *bdev = bdev_io->bdev;
934 	bool buf_allocated;
935 	uint64_t alignment;
936 	void *aligned_buf;
937 
938 	bdev_io->internal.buf = buf;
939 
940 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
941 		bdev_io_get_buf_complete(bdev_io, true);
942 		return;
943 	}
944 
945 	alignment = spdk_bdev_get_buf_align(bdev);
946 	buf_allocated = _is_buf_allocated(bdev_io->u.bdev.iovs);
947 	aligned_buf = (void *)(((uintptr_t)buf + (alignment - 1)) & ~(alignment - 1));
948 
949 	if (buf_allocated) {
950 		_bdev_io_pull_bounce_data_buf(bdev_io, aligned_buf, len, _bdev_io_pull_buffer_cpl);
951 		/* Continue in completion callback */
952 		return;
953 	} else {
954 		spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
955 	}
956 
957 	_bdev_io_set_md_buf(bdev_io);
958 }
959 
960 static void
961 _bdev_io_put_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t buf_len)
962 {
963 	struct spdk_bdev *bdev = bdev_io->bdev;
964 	struct spdk_mempool *pool;
965 	struct spdk_bdev_io *tmp;
966 	bdev_io_stailq_t *stailq;
967 	struct spdk_bdev_mgmt_channel *ch;
968 	uint64_t md_len, alignment;
969 
970 	md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
971 	alignment = spdk_bdev_get_buf_align(bdev);
972 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
973 
974 	if (buf_len + alignment + md_len <= SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
975 	    SPDK_BDEV_POOL_ALIGNMENT) {
976 		pool = g_bdev_mgr.buf_small_pool;
977 		stailq = &ch->need_buf_small;
978 	} else {
979 		pool = g_bdev_mgr.buf_large_pool;
980 		stailq = &ch->need_buf_large;
981 	}
982 
983 	if (STAILQ_EMPTY(stailq)) {
984 		spdk_mempool_put(pool, buf);
985 	} else {
986 		tmp = STAILQ_FIRST(stailq);
987 		STAILQ_REMOVE_HEAD(stailq, internal.buf_link);
988 		_bdev_io_set_buf(tmp, buf, tmp->internal.buf_len);
989 	}
990 }
991 
992 static void
993 bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
994 {
995 	assert(bdev_io->internal.buf != NULL);
996 	_bdev_io_put_buf(bdev_io, bdev_io->internal.buf, bdev_io->internal.buf_len);
997 	bdev_io->internal.buf = NULL;
998 }
999 
1000 void
1001 spdk_bdev_io_put_aux_buf(struct spdk_bdev_io *bdev_io, void *buf)
1002 {
1003 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1004 
1005 	assert(buf != NULL);
1006 	_bdev_io_put_buf(bdev_io, buf, len);
1007 }
1008 
1009 static void
1010 bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
1011 {
1012 	struct spdk_bdev *bdev = bdev_ch->bdev;
1013 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1014 	struct spdk_bdev_io *bdev_io;
1015 
1016 	if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
1017 		/*
1018 		 * Allow some more I/O to complete before retrying the nomem_io queue.
1019 		 *  Some drivers (such as nvme) cannot immediately take a new I/O in
1020 		 *  the context of a completion, because the resources for the I/O are
1021 		 *  not released until control returns to the bdev poller.  Also, we
1022 		 *  may require several small I/O to complete before a larger I/O
1023 		 *  (that requires splitting) can be submitted.
1024 		 */
1025 		return;
1026 	}
1027 
1028 	while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1029 		bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
1030 		TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, internal.link);
1031 		bdev_io->internal.ch->io_outstanding++;
1032 		shared_resource->io_outstanding++;
1033 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
1034 		bdev_io->internal.error.nvme.cdw0 = 0;
1035 		bdev_io->num_retries++;
1036 		bdev->fn_table->submit_request(spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
1037 		if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
1038 			break;
1039 		}
1040 	}
1041 }
1042 
1043 static inline void
1044 _bdev_io_decrement_outstanding(struct spdk_bdev_channel *bdev_ch,
1045 			       struct spdk_bdev_shared_resource *shared_resource)
1046 {
1047 	assert(bdev_ch->io_outstanding > 0);
1048 	assert(shared_resource->io_outstanding > 0);
1049 	bdev_ch->io_outstanding--;
1050 	shared_resource->io_outstanding--;
1051 }
1052 
1053 static inline bool
1054 _bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io)
1055 {
1056 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
1057 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1058 
1059 	if (spdk_unlikely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM)) {
1060 		TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
1061 		/*
1062 		 * Wait for some of the outstanding I/O to complete before we
1063 		 *  retry any of the nomem_io.  Normally we will wait for
1064 		 *  NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
1065 		 *  depth channels we will instead wait for half to complete.
1066 		 */
1067 		shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
1068 						   (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
1069 		return true;
1070 	}
1071 
1072 	if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1073 		bdev_ch_retry_io(bdev_ch);
1074 	}
1075 
1076 	return false;
1077 }
1078 
1079 static void
1080 _bdev_io_complete_push_bounce_done(void *ctx, int rc)
1081 {
1082 	struct spdk_bdev_io *bdev_io = ctx;
1083 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
1084 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1085 
1086 	if (rc) {
1087 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1088 	}
1089 	/* We want to free the bounce buffer here since we know we're done with it (as opposed
1090 	 * to waiting for the conditional free of internal.buf in spdk_bdev_free_io()).
1091 	 */
1092 	bdev_io_put_buf(bdev_io);
1093 
1094 	/* Continue with IO completion flow */
1095 	_bdev_io_decrement_outstanding(bdev_ch, shared_resource);
1096 	if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io))) {
1097 		return;
1098 	}
1099 
1100 	bdev_io_complete(bdev_io);
1101 }
1102 
1103 static inline void
1104 _bdev_io_push_bounce_md_buffer(struct spdk_bdev_io *bdev_io)
1105 {
1106 	int rc = 0;
1107 
1108 	/* do the same for metadata buffer */
1109 	if (spdk_unlikely(bdev_io->internal.orig_md_iov.iov_base != NULL)) {
1110 		assert(spdk_bdev_is_md_separate(bdev_io->bdev));
1111 
1112 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
1113 		    bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
1114 			if (bdev_io_use_memory_domain(bdev_io)) {
1115 				/* If memory domain is used then we need to call async push function */
1116 				rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain,
1117 								  bdev_io->internal.ext_opts->memory_domain_ctx,
1118 								  &bdev_io->internal.orig_md_iov,
1119 								  (uint32_t)bdev_io->internal.orig_iovcnt,
1120 								  &bdev_io->internal.bounce_md_iov, 1,
1121 								  bdev_io->internal.data_transfer_cpl,
1122 								  bdev_io);
1123 				if (rc == 0) {
1124 					/* Continue IO completion in async callback */
1125 					return;
1126 				}
1127 				SPDK_ERRLOG("Failed to push md to memory domain %s\n",
1128 					    spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
1129 			} else {
1130 				memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
1131 				       bdev_io->internal.orig_md_iov.iov_len);
1132 			}
1133 		}
1134 	}
1135 
1136 	assert(bdev_io->internal.data_transfer_cpl);
1137 	bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1138 }
1139 
1140 static void
1141 _bdev_io_push_bounce_data_buffer_done(void *ctx, int rc)
1142 {
1143 	struct spdk_bdev_io *bdev_io = ctx;
1144 
1145 	assert(bdev_io->internal.data_transfer_cpl);
1146 
1147 	if (rc) {
1148 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1149 		return;
1150 	}
1151 
1152 	/* set original buffer for this io */
1153 	bdev_io->u.bdev.iovcnt = bdev_io->internal.orig_iovcnt;
1154 	bdev_io->u.bdev.iovs = bdev_io->internal.orig_iovs;
1155 	/* disable bouncing buffer for this io */
1156 	bdev_io->internal.orig_iovcnt = 0;
1157 	bdev_io->internal.orig_iovs = NULL;
1158 
1159 	_bdev_io_push_bounce_md_buffer(bdev_io);
1160 }
1161 
1162 static inline void
1163 _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb)
1164 {
1165 	int rc = 0;
1166 
1167 	bdev_io->internal.data_transfer_cpl = cpl_cb;
1168 
1169 	/* if this is read path, copy data from bounce buffer to original buffer */
1170 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
1171 	    bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
1172 		if (bdev_io_use_memory_domain(bdev_io)) {
1173 			/* If memory domain is used then we need to call async push function */
1174 			rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain,
1175 							  bdev_io->internal.ext_opts->memory_domain_ctx,
1176 							  bdev_io->internal.orig_iovs,
1177 							  (uint32_t)bdev_io->internal.orig_iovcnt,
1178 							  &bdev_io->internal.bounce_iov, 1,
1179 							  _bdev_io_push_bounce_data_buffer_done,
1180 							  bdev_io);
1181 			if (rc == 0) {
1182 				/* Continue IO completion in async callback */
1183 				return;
1184 			}
1185 			SPDK_ERRLOG("Failed to push data to memory domain %s\n",
1186 				    spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
1187 		} else {
1188 			spdk_copy_buf_to_iovs(bdev_io->internal.orig_iovs,
1189 					      bdev_io->internal.orig_iovcnt,
1190 					      bdev_io->internal.bounce_iov.iov_base,
1191 					      bdev_io->internal.bounce_iov.iov_len);
1192 		}
1193 	}
1194 
1195 	_bdev_io_push_bounce_data_buffer_done(bdev_io, rc);
1196 }
1197 
1198 static void
1199 bdev_io_get_buf(struct spdk_bdev_io *bdev_io, uint64_t len)
1200 {
1201 	struct spdk_bdev *bdev = bdev_io->bdev;
1202 	struct spdk_mempool *pool;
1203 	bdev_io_stailq_t *stailq;
1204 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1205 	uint64_t alignment, md_len;
1206 	void *buf;
1207 
1208 	alignment = spdk_bdev_get_buf_align(bdev);
1209 	md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
1210 
1211 	if (len + alignment + md_len > SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_LARGE_BUF_MAX_SIZE) +
1212 	    SPDK_BDEV_POOL_ALIGNMENT) {
1213 		SPDK_ERRLOG("Length + alignment %" PRIu64 " is larger than allowed\n",
1214 			    len + alignment);
1215 		bdev_io_get_buf_complete(bdev_io, false);
1216 		return;
1217 	}
1218 
1219 	mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1220 
1221 	bdev_io->internal.buf_len = len;
1222 
1223 	if (len + alignment + md_len <= SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
1224 	    SPDK_BDEV_POOL_ALIGNMENT) {
1225 		pool = g_bdev_mgr.buf_small_pool;
1226 		stailq = &mgmt_ch->need_buf_small;
1227 	} else {
1228 		pool = g_bdev_mgr.buf_large_pool;
1229 		stailq = &mgmt_ch->need_buf_large;
1230 	}
1231 
1232 	buf = spdk_mempool_get(pool);
1233 	if (!buf) {
1234 		STAILQ_INSERT_TAIL(stailq, bdev_io, internal.buf_link);
1235 	} else {
1236 		_bdev_io_set_buf(bdev_io, buf, len);
1237 	}
1238 }
1239 
1240 void
1241 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1242 {
1243 	struct spdk_bdev *bdev = bdev_io->bdev;
1244 	uint64_t alignment;
1245 
1246 	assert(cb != NULL);
1247 	bdev_io->internal.get_buf_cb = cb;
1248 
1249 	alignment = spdk_bdev_get_buf_align(bdev);
1250 
1251 	if (_is_buf_allocated(bdev_io->u.bdev.iovs) &&
1252 	    _are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
1253 		/* Buffer already present and aligned */
1254 		cb(spdk_bdev_io_get_io_channel(bdev_io), bdev_io, true);
1255 		return;
1256 	}
1257 
1258 	bdev_io_get_buf(bdev_io, len);
1259 }
1260 
1261 static void
1262 _bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1263 			      bool success)
1264 {
1265 	if (!success) {
1266 		SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
1267 		bdev_io_complete(bdev_io);
1268 	} else {
1269 		bdev_io_submit(bdev_io);
1270 	}
1271 }
1272 
1273 static void
1274 _bdev_memory_domain_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
1275 			       uint64_t len)
1276 {
1277 	assert(cb != NULL);
1278 	bdev_io->internal.get_buf_cb = cb;
1279 
1280 	bdev_io_get_buf(bdev_io, len);
1281 }
1282 
1283 void
1284 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
1285 {
1286 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1287 
1288 	assert(cb != NULL);
1289 	assert(bdev_io->internal.get_aux_buf_cb == NULL);
1290 	bdev_io->internal.get_aux_buf_cb = cb;
1291 	bdev_io_get_buf(bdev_io, len);
1292 }
1293 
1294 static int
1295 bdev_module_get_max_ctx_size(void)
1296 {
1297 	struct spdk_bdev_module *bdev_module;
1298 	int max_bdev_module_size = 0;
1299 
1300 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1301 		if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
1302 			max_bdev_module_size = bdev_module->get_ctx_size();
1303 		}
1304 	}
1305 
1306 	return max_bdev_module_size;
1307 }
1308 
1309 static void
1310 bdev_qos_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1311 {
1312 	int i;
1313 	struct spdk_bdev_qos *qos = bdev->internal.qos;
1314 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
1315 
1316 	if (!qos) {
1317 		return;
1318 	}
1319 
1320 	spdk_bdev_get_qos_rate_limits(bdev, limits);
1321 
1322 	spdk_json_write_object_begin(w);
1323 	spdk_json_write_named_string(w, "method", "bdev_set_qos_limit");
1324 
1325 	spdk_json_write_named_object_begin(w, "params");
1326 	spdk_json_write_named_string(w, "name", bdev->name);
1327 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1328 		if (limits[i] > 0) {
1329 			spdk_json_write_named_uint64(w, qos_rpc_type[i], limits[i]);
1330 		}
1331 	}
1332 	spdk_json_write_object_end(w);
1333 
1334 	spdk_json_write_object_end(w);
1335 }
1336 
1337 void
1338 spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
1339 {
1340 	struct spdk_bdev_module *bdev_module;
1341 	struct spdk_bdev *bdev;
1342 
1343 	assert(w != NULL);
1344 
1345 	spdk_json_write_array_begin(w);
1346 
1347 	spdk_json_write_object_begin(w);
1348 	spdk_json_write_named_string(w, "method", "bdev_set_options");
1349 	spdk_json_write_named_object_begin(w, "params");
1350 	spdk_json_write_named_uint32(w, "bdev_io_pool_size", g_bdev_opts.bdev_io_pool_size);
1351 	spdk_json_write_named_uint32(w, "bdev_io_cache_size", g_bdev_opts.bdev_io_cache_size);
1352 	spdk_json_write_named_bool(w, "bdev_auto_examine", g_bdev_opts.bdev_auto_examine);
1353 	spdk_json_write_object_end(w);
1354 	spdk_json_write_object_end(w);
1355 
1356 	bdev_examine_allowlist_config_json(w);
1357 
1358 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1359 		if (bdev_module->config_json) {
1360 			bdev_module->config_json(w);
1361 		}
1362 	}
1363 
1364 	pthread_mutex_lock(&g_bdev_mgr.mutex);
1365 
1366 	TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
1367 		if (bdev->fn_table->write_config_json) {
1368 			bdev->fn_table->write_config_json(bdev, w);
1369 		}
1370 
1371 		bdev_qos_config_json(bdev, w);
1372 	}
1373 
1374 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
1375 
1376 	/* This has to be last RPC in array to make sure all bdevs finished examine */
1377 	spdk_json_write_object_begin(w);
1378 	spdk_json_write_named_string(w, "method", "bdev_wait_for_examine");
1379 	spdk_json_write_object_end(w);
1380 
1381 	spdk_json_write_array_end(w);
1382 }
1383 
1384 static int
1385 bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
1386 {
1387 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
1388 	struct spdk_bdev_io *bdev_io;
1389 	uint32_t i;
1390 
1391 	STAILQ_INIT(&ch->need_buf_small);
1392 	STAILQ_INIT(&ch->need_buf_large);
1393 
1394 	STAILQ_INIT(&ch->per_thread_cache);
1395 	ch->bdev_io_cache_size = g_bdev_opts.bdev_io_cache_size;
1396 
1397 	/* Pre-populate bdev_io cache to ensure this thread cannot be starved. */
1398 	ch->per_thread_cache_count = 0;
1399 	for (i = 0; i < ch->bdev_io_cache_size; i++) {
1400 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
1401 		assert(bdev_io != NULL);
1402 		ch->per_thread_cache_count++;
1403 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
1404 	}
1405 
1406 	TAILQ_INIT(&ch->shared_resources);
1407 	TAILQ_INIT(&ch->io_wait_queue);
1408 
1409 	return 0;
1410 }
1411 
1412 static void
1413 bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
1414 {
1415 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
1416 	struct spdk_bdev_io *bdev_io;
1417 
1418 	if (!STAILQ_EMPTY(&ch->need_buf_small) || !STAILQ_EMPTY(&ch->need_buf_large)) {
1419 		SPDK_ERRLOG("Pending I/O list wasn't empty on mgmt channel free\n");
1420 	}
1421 
1422 	if (!TAILQ_EMPTY(&ch->shared_resources)) {
1423 		SPDK_ERRLOG("Module channel list wasn't empty on mgmt channel free\n");
1424 	}
1425 
1426 	while (!STAILQ_EMPTY(&ch->per_thread_cache)) {
1427 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
1428 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
1429 		ch->per_thread_cache_count--;
1430 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
1431 	}
1432 
1433 	assert(ch->per_thread_cache_count == 0);
1434 }
1435 
1436 static void
1437 bdev_init_complete(int rc)
1438 {
1439 	spdk_bdev_init_cb cb_fn = g_init_cb_fn;
1440 	void *cb_arg = g_init_cb_arg;
1441 	struct spdk_bdev_module *m;
1442 
1443 	g_bdev_mgr.init_complete = true;
1444 	g_init_cb_fn = NULL;
1445 	g_init_cb_arg = NULL;
1446 
1447 	/*
1448 	 * For modules that need to know when subsystem init is complete,
1449 	 * inform them now.
1450 	 */
1451 	if (rc == 0) {
1452 		TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
1453 			if (m->init_complete) {
1454 				m->init_complete();
1455 			}
1456 		}
1457 	}
1458 
1459 	cb_fn(cb_arg, rc);
1460 }
1461 
1462 static bool
1463 bdev_module_all_actions_completed(void)
1464 {
1465 	struct spdk_bdev_module *m;
1466 
1467 	TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
1468 		if (m->internal.action_in_progress > 0) {
1469 			return false;
1470 		}
1471 	}
1472 	return true;
1473 }
1474 
1475 static void
1476 bdev_module_action_complete(void)
1477 {
1478 	/*
1479 	 * Don't finish bdev subsystem initialization if
1480 	 * module pre-initialization is still in progress, or
1481 	 * the subsystem been already initialized.
1482 	 */
1483 	if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
1484 		return;
1485 	}
1486 
1487 	/*
1488 	 * Check all bdev modules for inits/examinations in progress. If any
1489 	 * exist, return immediately since we cannot finish bdev subsystem
1490 	 * initialization until all are completed.
1491 	 */
1492 	if (!bdev_module_all_actions_completed()) {
1493 		return;
1494 	}
1495 
1496 	/*
1497 	 * Modules already finished initialization - now that all
1498 	 * the bdev modules have finished their asynchronous I/O
1499 	 * processing, the entire bdev layer can be marked as complete.
1500 	 */
1501 	bdev_init_complete(0);
1502 }
1503 
1504 static void
1505 bdev_module_action_done(struct spdk_bdev_module *module)
1506 {
1507 	assert(module->internal.action_in_progress > 0);
1508 	module->internal.action_in_progress--;
1509 	bdev_module_action_complete();
1510 }
1511 
1512 void
1513 spdk_bdev_module_init_done(struct spdk_bdev_module *module)
1514 {
1515 	bdev_module_action_done(module);
1516 }
1517 
1518 void
1519 spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
1520 {
1521 	bdev_module_action_done(module);
1522 }
1523 
1524 /** The last initialized bdev module */
1525 static struct spdk_bdev_module *g_resume_bdev_module = NULL;
1526 
1527 static void
1528 bdev_init_failed(void *cb_arg)
1529 {
1530 	struct spdk_bdev_module *module = cb_arg;
1531 
1532 	module->internal.action_in_progress--;
1533 	bdev_init_complete(-1);
1534 }
1535 
1536 static int
1537 bdev_modules_init(void)
1538 {
1539 	struct spdk_bdev_module *module;
1540 	int rc = 0;
1541 
1542 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1543 		g_resume_bdev_module = module;
1544 		if (module->async_init) {
1545 			module->internal.action_in_progress = 1;
1546 		}
1547 		rc = module->module_init();
1548 		if (rc != 0) {
1549 			/* Bump action_in_progress to prevent other modules from completion of modules_init
1550 			 * Send message to defer application shutdown until resources are cleaned up */
1551 			module->internal.action_in_progress = 1;
1552 			spdk_thread_send_msg(spdk_get_thread(), bdev_init_failed, module);
1553 			return rc;
1554 		}
1555 	}
1556 
1557 	g_resume_bdev_module = NULL;
1558 	return 0;
1559 }
1560 
1561 void
1562 spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
1563 {
1564 	int cache_size;
1565 	int rc = 0;
1566 	char mempool_name[32];
1567 
1568 	assert(cb_fn != NULL);
1569 
1570 	g_init_cb_fn = cb_fn;
1571 	g_init_cb_arg = cb_arg;
1572 
1573 	spdk_notify_type_register("bdev_register");
1574 	spdk_notify_type_register("bdev_unregister");
1575 
1576 	snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
1577 
1578 	g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
1579 				  g_bdev_opts.bdev_io_pool_size,
1580 				  sizeof(struct spdk_bdev_io) +
1581 				  bdev_module_get_max_ctx_size(),
1582 				  0,
1583 				  SPDK_ENV_SOCKET_ID_ANY);
1584 
1585 	if (g_bdev_mgr.bdev_io_pool == NULL) {
1586 		SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
1587 		bdev_init_complete(-1);
1588 		return;
1589 	}
1590 
1591 	/**
1592 	 * Ensure no more than half of the total buffers end up local caches, by
1593 	 *   using spdk_env_get_core_count() to determine how many local caches we need
1594 	 *   to account for.
1595 	 */
1596 	cache_size = BUF_SMALL_POOL_SIZE / (2 * spdk_env_get_core_count());
1597 	snprintf(mempool_name, sizeof(mempool_name), "buf_small_pool_%d", getpid());
1598 
1599 	g_bdev_mgr.buf_small_pool = spdk_mempool_create(mempool_name,
1600 				    g_bdev_opts.small_buf_pool_size,
1601 				    SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
1602 				    SPDK_BDEV_POOL_ALIGNMENT,
1603 				    cache_size,
1604 				    SPDK_ENV_SOCKET_ID_ANY);
1605 	if (!g_bdev_mgr.buf_small_pool) {
1606 		SPDK_ERRLOG("create rbuf small pool failed\n");
1607 		bdev_init_complete(-1);
1608 		return;
1609 	}
1610 
1611 	cache_size = BUF_LARGE_POOL_SIZE / (2 * spdk_env_get_core_count());
1612 	snprintf(mempool_name, sizeof(mempool_name), "buf_large_pool_%d", getpid());
1613 
1614 	g_bdev_mgr.buf_large_pool = spdk_mempool_create(mempool_name,
1615 				    g_bdev_opts.large_buf_pool_size,
1616 				    SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_LARGE_BUF_MAX_SIZE) +
1617 				    SPDK_BDEV_POOL_ALIGNMENT,
1618 				    cache_size,
1619 				    SPDK_ENV_SOCKET_ID_ANY);
1620 	if (!g_bdev_mgr.buf_large_pool) {
1621 		SPDK_ERRLOG("create rbuf large pool failed\n");
1622 		bdev_init_complete(-1);
1623 		return;
1624 	}
1625 
1626 	g_bdev_mgr.zero_buffer = spdk_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
1627 					      NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1628 	if (!g_bdev_mgr.zero_buffer) {
1629 		SPDK_ERRLOG("create bdev zero buffer failed\n");
1630 		bdev_init_complete(-1);
1631 		return;
1632 	}
1633 
1634 #ifdef SPDK_CONFIG_VTUNE
1635 	g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
1636 #endif
1637 
1638 	spdk_io_device_register(&g_bdev_mgr, bdev_mgmt_channel_create,
1639 				bdev_mgmt_channel_destroy,
1640 				sizeof(struct spdk_bdev_mgmt_channel),
1641 				"bdev_mgr");
1642 
1643 	rc = bdev_modules_init();
1644 	g_bdev_mgr.module_init_complete = true;
1645 	if (rc != 0) {
1646 		SPDK_ERRLOG("bdev modules init failed\n");
1647 		return;
1648 	}
1649 
1650 	bdev_module_action_complete();
1651 }
1652 
1653 static void
1654 bdev_mgr_unregister_cb(void *io_device)
1655 {
1656 	spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
1657 
1658 	if (g_bdev_mgr.bdev_io_pool) {
1659 		if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != g_bdev_opts.bdev_io_pool_size) {
1660 			SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
1661 				    spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
1662 				    g_bdev_opts.bdev_io_pool_size);
1663 		}
1664 
1665 		spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
1666 	}
1667 
1668 	if (g_bdev_mgr.buf_small_pool) {
1669 		if (spdk_mempool_count(g_bdev_mgr.buf_small_pool) != g_bdev_opts.small_buf_pool_size) {
1670 			SPDK_ERRLOG("Small buffer pool count is %zu but should be %u\n",
1671 				    spdk_mempool_count(g_bdev_mgr.buf_small_pool),
1672 				    g_bdev_opts.small_buf_pool_size);
1673 			assert(false);
1674 		}
1675 
1676 		spdk_mempool_free(g_bdev_mgr.buf_small_pool);
1677 	}
1678 
1679 	if (g_bdev_mgr.buf_large_pool) {
1680 		if (spdk_mempool_count(g_bdev_mgr.buf_large_pool) != g_bdev_opts.large_buf_pool_size) {
1681 			SPDK_ERRLOG("Large buffer pool count is %zu but should be %u\n",
1682 				    spdk_mempool_count(g_bdev_mgr.buf_large_pool),
1683 				    g_bdev_opts.large_buf_pool_size);
1684 			assert(false);
1685 		}
1686 
1687 		spdk_mempool_free(g_bdev_mgr.buf_large_pool);
1688 	}
1689 
1690 	spdk_free(g_bdev_mgr.zero_buffer);
1691 
1692 	bdev_examine_allowlist_free();
1693 
1694 	cb_fn(g_fini_cb_arg);
1695 	g_fini_cb_fn = NULL;
1696 	g_fini_cb_arg = NULL;
1697 	g_bdev_mgr.init_complete = false;
1698 	g_bdev_mgr.module_init_complete = false;
1699 }
1700 
1701 static void
1702 bdev_module_fini_iter(void *arg)
1703 {
1704 	struct spdk_bdev_module *bdev_module;
1705 
1706 	/* FIXME: Handling initialization failures is broken now,
1707 	 * so we won't even try cleaning up after successfully
1708 	 * initialized modules. if module_init_complete is false,
1709 	 * just call spdk_bdev_mgr_unregister_cb
1710 	 */
1711 	if (!g_bdev_mgr.module_init_complete) {
1712 		bdev_mgr_unregister_cb(NULL);
1713 		return;
1714 	}
1715 
1716 	/* Start iterating from the last touched module */
1717 	if (!g_resume_bdev_module) {
1718 		bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
1719 	} else {
1720 		bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list,
1721 					 internal.tailq);
1722 	}
1723 
1724 	while (bdev_module) {
1725 		if (bdev_module->async_fini) {
1726 			/* Save our place so we can resume later. We must
1727 			 * save the variable here, before calling module_fini()
1728 			 * below, because in some cases the module may immediately
1729 			 * call spdk_bdev_module_fini_done() and re-enter
1730 			 * this function to continue iterating. */
1731 			g_resume_bdev_module = bdev_module;
1732 		}
1733 
1734 		if (bdev_module->module_fini) {
1735 			bdev_module->module_fini();
1736 		}
1737 
1738 		if (bdev_module->async_fini) {
1739 			return;
1740 		}
1741 
1742 		bdev_module = TAILQ_PREV(bdev_module, bdev_module_list,
1743 					 internal.tailq);
1744 	}
1745 
1746 	g_resume_bdev_module = NULL;
1747 	spdk_io_device_unregister(&g_bdev_mgr, bdev_mgr_unregister_cb);
1748 }
1749 
1750 void
1751 spdk_bdev_module_fini_done(void)
1752 {
1753 	if (spdk_get_thread() != g_fini_thread) {
1754 		spdk_thread_send_msg(g_fini_thread, bdev_module_fini_iter, NULL);
1755 	} else {
1756 		bdev_module_fini_iter(NULL);
1757 	}
1758 }
1759 
1760 static void
1761 bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
1762 {
1763 	struct spdk_bdev *bdev = cb_arg;
1764 
1765 	if (bdeverrno && bdev) {
1766 		SPDK_WARNLOG("Unable to unregister bdev '%s' during spdk_bdev_finish()\n",
1767 			     bdev->name);
1768 
1769 		/*
1770 		 * Since the call to spdk_bdev_unregister() failed, we have no way to free this
1771 		 *  bdev; try to continue by manually removing this bdev from the list and continue
1772 		 *  with the next bdev in the list.
1773 		 */
1774 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
1775 	}
1776 
1777 	if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
1778 		SPDK_DEBUGLOG(bdev, "Done unregistering bdevs\n");
1779 		/*
1780 		 * Bdev module finish need to be deferred as we might be in the middle of some context
1781 		 * (like bdev part free) that will use this bdev (or private bdev driver ctx data)
1782 		 * after returning.
1783 		 */
1784 		spdk_thread_send_msg(spdk_get_thread(), bdev_module_fini_iter, NULL);
1785 		return;
1786 	}
1787 
1788 	/*
1789 	 * Unregister last unclaimed bdev in the list, to ensure that bdev subsystem
1790 	 * shutdown proceeds top-down. The goal is to give virtual bdevs an opportunity
1791 	 * to detect clean shutdown as opposed to run-time hot removal of the underlying
1792 	 * base bdevs.
1793 	 *
1794 	 * Also, walk the list in the reverse order.
1795 	 */
1796 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
1797 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
1798 		if (bdev->internal.claim_module != NULL) {
1799 			SPDK_DEBUGLOG(bdev, "Skipping claimed bdev '%s'(<-'%s').\n",
1800 				      bdev->name, bdev->internal.claim_module->name);
1801 			continue;
1802 		}
1803 
1804 		SPDK_DEBUGLOG(bdev, "Unregistering bdev '%s'\n", bdev->name);
1805 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
1806 		return;
1807 	}
1808 
1809 	/*
1810 	 * If any bdev fails to unclaim underlying bdev properly, we may face the
1811 	 * case of bdev list consisting of claimed bdevs only (if claims are managed
1812 	 * correctly, this would mean there's a loop in the claims graph which is
1813 	 * clearly impossible). Warn and unregister last bdev on the list then.
1814 	 */
1815 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
1816 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
1817 		SPDK_WARNLOG("Unregistering claimed bdev '%s'!\n", bdev->name);
1818 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
1819 		return;
1820 	}
1821 }
1822 
1823 static void
1824 bdev_module_fini_start_iter(void *arg)
1825 {
1826 	struct spdk_bdev_module *bdev_module;
1827 
1828 	if (!g_resume_bdev_module) {
1829 		bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
1830 	} else {
1831 		bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list, internal.tailq);
1832 	}
1833 
1834 	while (bdev_module) {
1835 		if (bdev_module->async_fini_start) {
1836 			/* Save our place so we can resume later. We must
1837 			 * save the variable here, before calling fini_start()
1838 			 * below, because in some cases the module may immediately
1839 			 * call spdk_bdev_module_fini_start_done() and re-enter
1840 			 * this function to continue iterating. */
1841 			g_resume_bdev_module = bdev_module;
1842 		}
1843 
1844 		if (bdev_module->fini_start) {
1845 			bdev_module->fini_start();
1846 		}
1847 
1848 		if (bdev_module->async_fini_start) {
1849 			return;
1850 		}
1851 
1852 		bdev_module = TAILQ_PREV(bdev_module, bdev_module_list, internal.tailq);
1853 	}
1854 
1855 	g_resume_bdev_module = NULL;
1856 
1857 	bdev_finish_unregister_bdevs_iter(NULL, 0);
1858 }
1859 
1860 void
1861 spdk_bdev_module_fini_start_done(void)
1862 {
1863 	if (spdk_get_thread() != g_fini_thread) {
1864 		spdk_thread_send_msg(g_fini_thread, bdev_module_fini_start_iter, NULL);
1865 	} else {
1866 		bdev_module_fini_start_iter(NULL);
1867 	}
1868 }
1869 
1870 static void
1871 bdev_finish_wait_for_examine_done(void *cb_arg)
1872 {
1873 	bdev_module_fini_start_iter(NULL);
1874 }
1875 
1876 void
1877 spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
1878 {
1879 	int rc;
1880 
1881 	assert(cb_fn != NULL);
1882 
1883 	g_fini_thread = spdk_get_thread();
1884 
1885 	g_fini_cb_fn = cb_fn;
1886 	g_fini_cb_arg = cb_arg;
1887 
1888 	rc = spdk_bdev_wait_for_examine(bdev_finish_wait_for_examine_done, NULL);
1889 	if (rc != 0) {
1890 		SPDK_ERRLOG("wait_for_examine failed: %s\n", spdk_strerror(-rc));
1891 		bdev_finish_wait_for_examine_done(NULL);
1892 	}
1893 }
1894 
1895 struct spdk_bdev_io *
1896 bdev_channel_get_io(struct spdk_bdev_channel *channel)
1897 {
1898 	struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
1899 	struct spdk_bdev_io *bdev_io;
1900 
1901 	if (ch->per_thread_cache_count > 0) {
1902 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
1903 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
1904 		ch->per_thread_cache_count--;
1905 	} else if (spdk_unlikely(!TAILQ_EMPTY(&ch->io_wait_queue))) {
1906 		/*
1907 		 * Don't try to look for bdev_ios in the global pool if there are
1908 		 * waiters on bdev_ios - we don't want this caller to jump the line.
1909 		 */
1910 		bdev_io = NULL;
1911 	} else {
1912 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
1913 	}
1914 
1915 	return bdev_io;
1916 }
1917 
1918 void
1919 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
1920 {
1921 	struct spdk_bdev_mgmt_channel *ch;
1922 
1923 	assert(bdev_io != NULL);
1924 	assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING);
1925 
1926 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1927 
1928 	if (bdev_io->internal.buf != NULL) {
1929 		bdev_io_put_buf(bdev_io);
1930 	}
1931 
1932 	if (ch->per_thread_cache_count < ch->bdev_io_cache_size) {
1933 		ch->per_thread_cache_count++;
1934 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
1935 		while (ch->per_thread_cache_count > 0 && !TAILQ_EMPTY(&ch->io_wait_queue)) {
1936 			struct spdk_bdev_io_wait_entry *entry;
1937 
1938 			entry = TAILQ_FIRST(&ch->io_wait_queue);
1939 			TAILQ_REMOVE(&ch->io_wait_queue, entry, link);
1940 			entry->cb_fn(entry->cb_arg);
1941 		}
1942 	} else {
1943 		/* We should never have a full cache with entries on the io wait queue. */
1944 		assert(TAILQ_EMPTY(&ch->io_wait_queue));
1945 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
1946 	}
1947 }
1948 
1949 static bool
1950 bdev_qos_is_iops_rate_limit(enum spdk_bdev_qos_rate_limit_type limit)
1951 {
1952 	assert(limit != SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
1953 
1954 	switch (limit) {
1955 	case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
1956 		return true;
1957 	case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
1958 	case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
1959 	case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
1960 		return false;
1961 	case SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES:
1962 	default:
1963 		return false;
1964 	}
1965 }
1966 
1967 static bool
1968 bdev_qos_io_to_limit(struct spdk_bdev_io *bdev_io)
1969 {
1970 	switch (bdev_io->type) {
1971 	case SPDK_BDEV_IO_TYPE_NVME_IO:
1972 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
1973 	case SPDK_BDEV_IO_TYPE_READ:
1974 	case SPDK_BDEV_IO_TYPE_WRITE:
1975 		return true;
1976 	case SPDK_BDEV_IO_TYPE_ZCOPY:
1977 		if (bdev_io->u.bdev.zcopy.start) {
1978 			return true;
1979 		} else {
1980 			return false;
1981 		}
1982 	default:
1983 		return false;
1984 	}
1985 }
1986 
1987 static bool
1988 bdev_is_read_io(struct spdk_bdev_io *bdev_io)
1989 {
1990 	switch (bdev_io->type) {
1991 	case SPDK_BDEV_IO_TYPE_NVME_IO:
1992 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
1993 		/* Bit 1 (0x2) set for read operation */
1994 		if (bdev_io->u.nvme_passthru.cmd.opc & SPDK_NVME_OPC_READ) {
1995 			return true;
1996 		} else {
1997 			return false;
1998 		}
1999 	case SPDK_BDEV_IO_TYPE_READ:
2000 		return true;
2001 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2002 		/* Populate to read from disk */
2003 		if (bdev_io->u.bdev.zcopy.populate) {
2004 			return true;
2005 		} else {
2006 			return false;
2007 		}
2008 	default:
2009 		return false;
2010 	}
2011 }
2012 
2013 static uint64_t
2014 bdev_get_io_size_in_byte(struct spdk_bdev_io *bdev_io)
2015 {
2016 	struct spdk_bdev	*bdev = bdev_io->bdev;
2017 
2018 	switch (bdev_io->type) {
2019 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2020 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2021 		return bdev_io->u.nvme_passthru.nbytes;
2022 	case SPDK_BDEV_IO_TYPE_READ:
2023 	case SPDK_BDEV_IO_TYPE_WRITE:
2024 		return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2025 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2026 		/* Track the data in the start phase only */
2027 		if (bdev_io->u.bdev.zcopy.start) {
2028 			return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2029 		} else {
2030 			return 0;
2031 		}
2032 	default:
2033 		return 0;
2034 	}
2035 }
2036 
2037 static bool
2038 bdev_qos_rw_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2039 {
2040 	if (limit->max_per_timeslice > 0 && limit->remaining_this_timeslice <= 0) {
2041 		return true;
2042 	} else {
2043 		return false;
2044 	}
2045 }
2046 
2047 static bool
2048 bdev_qos_r_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2049 {
2050 	if (bdev_is_read_io(io) == false) {
2051 		return false;
2052 	}
2053 
2054 	return bdev_qos_rw_queue_io(limit, io);
2055 }
2056 
2057 static bool
2058 bdev_qos_w_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2059 {
2060 	if (bdev_is_read_io(io) == true) {
2061 		return false;
2062 	}
2063 
2064 	return bdev_qos_rw_queue_io(limit, io);
2065 }
2066 
2067 static void
2068 bdev_qos_rw_iops_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2069 {
2070 	limit->remaining_this_timeslice--;
2071 }
2072 
2073 static void
2074 bdev_qos_rw_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2075 {
2076 	limit->remaining_this_timeslice -= bdev_get_io_size_in_byte(io);
2077 }
2078 
2079 static void
2080 bdev_qos_r_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2081 {
2082 	if (bdev_is_read_io(io) == false) {
2083 		return;
2084 	}
2085 
2086 	return bdev_qos_rw_bps_update_quota(limit, io);
2087 }
2088 
2089 static void
2090 bdev_qos_w_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2091 {
2092 	if (bdev_is_read_io(io) == true) {
2093 		return;
2094 	}
2095 
2096 	return bdev_qos_rw_bps_update_quota(limit, io);
2097 }
2098 
2099 static void
2100 bdev_qos_set_ops(struct spdk_bdev_qos *qos)
2101 {
2102 	int i;
2103 
2104 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2105 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
2106 			qos->rate_limits[i].queue_io = NULL;
2107 			qos->rate_limits[i].update_quota = NULL;
2108 			continue;
2109 		}
2110 
2111 		switch (i) {
2112 		case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2113 			qos->rate_limits[i].queue_io = bdev_qos_rw_queue_io;
2114 			qos->rate_limits[i].update_quota = bdev_qos_rw_iops_update_quota;
2115 			break;
2116 		case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2117 			qos->rate_limits[i].queue_io = bdev_qos_rw_queue_io;
2118 			qos->rate_limits[i].update_quota = bdev_qos_rw_bps_update_quota;
2119 			break;
2120 		case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2121 			qos->rate_limits[i].queue_io = bdev_qos_r_queue_io;
2122 			qos->rate_limits[i].update_quota = bdev_qos_r_bps_update_quota;
2123 			break;
2124 		case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2125 			qos->rate_limits[i].queue_io = bdev_qos_w_queue_io;
2126 			qos->rate_limits[i].update_quota = bdev_qos_w_bps_update_quota;
2127 			break;
2128 		default:
2129 			break;
2130 		}
2131 	}
2132 }
2133 
2134 static void
2135 _bdev_io_complete_in_submit(struct spdk_bdev_channel *bdev_ch,
2136 			    struct spdk_bdev_io *bdev_io,
2137 			    enum spdk_bdev_io_status status)
2138 {
2139 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
2140 
2141 	bdev_io->internal.in_submit_request = true;
2142 	bdev_ch->io_outstanding++;
2143 	shared_resource->io_outstanding++;
2144 	spdk_bdev_io_complete(bdev_io, status);
2145 	bdev_io->internal.in_submit_request = false;
2146 }
2147 
2148 static inline void
2149 bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_io)
2150 {
2151 	struct spdk_bdev *bdev = bdev_io->bdev;
2152 	struct spdk_io_channel *ch = bdev_ch->channel;
2153 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
2154 
2155 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
2156 		struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
2157 		struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
2158 
2159 		if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
2160 		    bdev_abort_buf_io(&mgmt_channel->need_buf_small, bio_to_abort) ||
2161 		    bdev_abort_buf_io(&mgmt_channel->need_buf_large, bio_to_abort)) {
2162 			_bdev_io_complete_in_submit(bdev_ch, bdev_io,
2163 						    SPDK_BDEV_IO_STATUS_SUCCESS);
2164 			return;
2165 		}
2166 	}
2167 
2168 	if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
2169 		bdev_ch->io_outstanding++;
2170 		shared_resource->io_outstanding++;
2171 		bdev_io->internal.in_submit_request = true;
2172 		bdev->fn_table->submit_request(ch, bdev_io);
2173 		bdev_io->internal.in_submit_request = false;
2174 	} else {
2175 		TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, internal.link);
2176 	}
2177 }
2178 
2179 static bool
2180 bdev_qos_queue_io(struct spdk_bdev_qos *qos, struct spdk_bdev_io *bdev_io)
2181 {
2182 	int i;
2183 
2184 	if (bdev_qos_io_to_limit(bdev_io) == true) {
2185 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2186 			if (!qos->rate_limits[i].queue_io) {
2187 				continue;
2188 			}
2189 
2190 			if (qos->rate_limits[i].queue_io(&qos->rate_limits[i],
2191 							 bdev_io) == true) {
2192 				return true;
2193 			}
2194 		}
2195 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2196 			if (!qos->rate_limits[i].update_quota) {
2197 				continue;
2198 			}
2199 
2200 			qos->rate_limits[i].update_quota(&qos->rate_limits[i], bdev_io);
2201 		}
2202 	}
2203 
2204 	return false;
2205 }
2206 
2207 static int
2208 bdev_qos_io_submit(struct spdk_bdev_channel *ch, struct spdk_bdev_qos *qos)
2209 {
2210 	struct spdk_bdev_io		*bdev_io = NULL, *tmp = NULL;
2211 	int				submitted_ios = 0;
2212 
2213 	TAILQ_FOREACH_SAFE(bdev_io, &qos->queued, internal.link, tmp) {
2214 		if (!bdev_qos_queue_io(qos, bdev_io)) {
2215 			TAILQ_REMOVE(&qos->queued, bdev_io, internal.link);
2216 			bdev_io_do_submit(ch, bdev_io);
2217 			submitted_ios++;
2218 		}
2219 	}
2220 
2221 	return submitted_ios;
2222 }
2223 
2224 static void
2225 bdev_queue_io_wait_with_cb(struct spdk_bdev_io *bdev_io, spdk_bdev_io_wait_cb cb_fn)
2226 {
2227 	int rc;
2228 
2229 	bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
2230 	bdev_io->internal.waitq_entry.cb_fn = cb_fn;
2231 	bdev_io->internal.waitq_entry.cb_arg = bdev_io;
2232 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
2233 				     &bdev_io->internal.waitq_entry);
2234 	if (rc != 0) {
2235 		SPDK_ERRLOG("Queue IO failed, rc=%d\n", rc);
2236 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2237 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2238 	}
2239 }
2240 
2241 static bool
2242 bdev_rw_should_split(struct spdk_bdev_io *bdev_io)
2243 {
2244 	uint32_t io_boundary = bdev_io->bdev->optimal_io_boundary;
2245 	uint32_t max_size = bdev_io->bdev->max_segment_size;
2246 	int max_segs = bdev_io->bdev->max_num_segments;
2247 
2248 	io_boundary = bdev_io->bdev->split_on_optimal_io_boundary ? io_boundary : 0;
2249 
2250 	if (spdk_likely(!io_boundary && !max_segs && !max_size)) {
2251 		return false;
2252 	}
2253 
2254 	if (io_boundary) {
2255 		uint64_t start_stripe, end_stripe;
2256 
2257 		start_stripe = bdev_io->u.bdev.offset_blocks;
2258 		end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
2259 		/* Avoid expensive div operations if possible.  These spdk_u32 functions are very cheap. */
2260 		if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
2261 			start_stripe >>= spdk_u32log2(io_boundary);
2262 			end_stripe >>= spdk_u32log2(io_boundary);
2263 		} else {
2264 			start_stripe /= io_boundary;
2265 			end_stripe /= io_boundary;
2266 		}
2267 
2268 		if (start_stripe != end_stripe) {
2269 			return true;
2270 		}
2271 	}
2272 
2273 	if (max_segs) {
2274 		if (bdev_io->u.bdev.iovcnt > max_segs) {
2275 			return true;
2276 		}
2277 	}
2278 
2279 	if (max_size) {
2280 		for (int i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
2281 			if (bdev_io->u.bdev.iovs[i].iov_len > max_size) {
2282 				return true;
2283 			}
2284 		}
2285 	}
2286 
2287 	return false;
2288 }
2289 
2290 static bool
2291 bdev_unmap_should_split(struct spdk_bdev_io *bdev_io)
2292 {
2293 	uint32_t num_unmap_segments;
2294 
2295 	if (!bdev_io->bdev->max_unmap || !bdev_io->bdev->max_unmap_segments) {
2296 		return false;
2297 	}
2298 	num_unmap_segments = spdk_divide_round_up(bdev_io->u.bdev.num_blocks, bdev_io->bdev->max_unmap);
2299 	if (num_unmap_segments > bdev_io->bdev->max_unmap_segments) {
2300 		return true;
2301 	}
2302 
2303 	return false;
2304 }
2305 
2306 static bool
2307 bdev_write_zeroes_should_split(struct spdk_bdev_io *bdev_io)
2308 {
2309 	if (!bdev_io->bdev->max_write_zeroes) {
2310 		return false;
2311 	}
2312 
2313 	if (bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_write_zeroes) {
2314 		return true;
2315 	}
2316 
2317 	return false;
2318 }
2319 
2320 static bool
2321 bdev_io_should_split(struct spdk_bdev_io *bdev_io)
2322 {
2323 	switch (bdev_io->type) {
2324 	case SPDK_BDEV_IO_TYPE_READ:
2325 	case SPDK_BDEV_IO_TYPE_WRITE:
2326 		return bdev_rw_should_split(bdev_io);
2327 	case SPDK_BDEV_IO_TYPE_UNMAP:
2328 		return bdev_unmap_should_split(bdev_io);
2329 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2330 		return bdev_write_zeroes_should_split(bdev_io);
2331 	default:
2332 		return false;
2333 	}
2334 }
2335 
2336 static uint32_t
2337 _to_next_boundary(uint64_t offset, uint32_t boundary)
2338 {
2339 	return (boundary - (offset % boundary));
2340 }
2341 
2342 static void bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
2343 
2344 static void _bdev_rw_split(void *_bdev_io);
2345 
2346 static void bdev_unmap_split(struct spdk_bdev_io *bdev_io);
2347 
2348 static void
2349 _bdev_unmap_split(void *_bdev_io)
2350 {
2351 	return bdev_unmap_split((struct spdk_bdev_io *)_bdev_io);
2352 }
2353 
2354 static void bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io);
2355 
2356 static void
2357 _bdev_write_zeroes_split(void *_bdev_io)
2358 {
2359 	return bdev_write_zeroes_split((struct spdk_bdev_io *)_bdev_io);
2360 }
2361 
2362 static int
2363 bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt, void *md_buf,
2364 		     uint64_t num_blocks, uint64_t *offset, uint64_t *remaining)
2365 {
2366 	int rc;
2367 	uint64_t current_offset, current_remaining;
2368 	spdk_bdev_io_wait_cb io_wait_fn;
2369 
2370 	current_offset = *offset;
2371 	current_remaining = *remaining;
2372 
2373 	bdev_io->u.bdev.split_outstanding++;
2374 
2375 	io_wait_fn = _bdev_rw_split;
2376 	switch (bdev_io->type) {
2377 	case SPDK_BDEV_IO_TYPE_READ:
2378 		rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
2379 					       spdk_io_channel_from_ctx(bdev_io->internal.ch),
2380 					       iov, iovcnt, md_buf, current_offset,
2381 					       num_blocks,
2382 					       bdev_io_split_done, bdev_io,
2383 					       bdev_io->internal.ext_opts, true);
2384 		break;
2385 	case SPDK_BDEV_IO_TYPE_WRITE:
2386 		rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
2387 						spdk_io_channel_from_ctx(bdev_io->internal.ch),
2388 						iov, iovcnt, md_buf, current_offset,
2389 						num_blocks,
2390 						bdev_io_split_done, bdev_io,
2391 						bdev_io->internal.ext_opts, true);
2392 		break;
2393 	case SPDK_BDEV_IO_TYPE_UNMAP:
2394 		io_wait_fn = _bdev_unmap_split;
2395 		rc = spdk_bdev_unmap_blocks(bdev_io->internal.desc,
2396 					    spdk_io_channel_from_ctx(bdev_io->internal.ch),
2397 					    current_offset, num_blocks,
2398 					    bdev_io_split_done, bdev_io);
2399 		break;
2400 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2401 		io_wait_fn = _bdev_write_zeroes_split;
2402 		rc = spdk_bdev_write_zeroes_blocks(bdev_io->internal.desc,
2403 						   spdk_io_channel_from_ctx(bdev_io->internal.ch),
2404 						   current_offset, num_blocks,
2405 						   bdev_io_split_done, bdev_io);
2406 		break;
2407 	default:
2408 		assert(false);
2409 		rc = -EINVAL;
2410 		break;
2411 	}
2412 
2413 	if (rc == 0) {
2414 		current_offset += num_blocks;
2415 		current_remaining -= num_blocks;
2416 		bdev_io->u.bdev.split_current_offset_blocks = current_offset;
2417 		bdev_io->u.bdev.split_remaining_num_blocks = current_remaining;
2418 		*offset = current_offset;
2419 		*remaining = current_remaining;
2420 	} else {
2421 		bdev_io->u.bdev.split_outstanding--;
2422 		if (rc == -ENOMEM) {
2423 			if (bdev_io->u.bdev.split_outstanding == 0) {
2424 				/* No I/O is outstanding. Hence we should wait here. */
2425 				bdev_queue_io_wait_with_cb(bdev_io, io_wait_fn);
2426 			}
2427 		} else {
2428 			bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2429 			if (bdev_io->u.bdev.split_outstanding == 0) {
2430 				spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx);
2431 				TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
2432 				bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2433 			}
2434 		}
2435 	}
2436 
2437 	return rc;
2438 }
2439 
2440 static void
2441 _bdev_rw_split(void *_bdev_io)
2442 {
2443 	struct iovec *parent_iov, *iov;
2444 	struct spdk_bdev_io *bdev_io = _bdev_io;
2445 	struct spdk_bdev *bdev = bdev_io->bdev;
2446 	uint64_t parent_offset, current_offset, remaining;
2447 	uint32_t parent_iov_offset, parent_iovcnt, parent_iovpos, child_iovcnt;
2448 	uint32_t to_next_boundary, to_next_boundary_bytes, to_last_block_bytes;
2449 	uint32_t iovcnt, iov_len, child_iovsize;
2450 	uint32_t blocklen = bdev->blocklen;
2451 	uint32_t io_boundary = bdev->optimal_io_boundary;
2452 	uint32_t max_segment_size = bdev->max_segment_size;
2453 	uint32_t max_child_iovcnt = bdev->max_num_segments;
2454 	void *md_buf = NULL;
2455 	int rc;
2456 
2457 	max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX;
2458 	max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, BDEV_IO_NUM_CHILD_IOV) :
2459 			   BDEV_IO_NUM_CHILD_IOV;
2460 	io_boundary = bdev->split_on_optimal_io_boundary ? io_boundary : UINT32_MAX;
2461 
2462 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
2463 	current_offset = bdev_io->u.bdev.split_current_offset_blocks;
2464 	parent_offset = bdev_io->u.bdev.offset_blocks;
2465 	parent_iov_offset = (current_offset - parent_offset) * blocklen;
2466 	parent_iovcnt = bdev_io->u.bdev.iovcnt;
2467 
2468 	for (parent_iovpos = 0; parent_iovpos < parent_iovcnt; parent_iovpos++) {
2469 		parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
2470 		if (parent_iov_offset < parent_iov->iov_len) {
2471 			break;
2472 		}
2473 		parent_iov_offset -= parent_iov->iov_len;
2474 	}
2475 
2476 	child_iovcnt = 0;
2477 	while (remaining > 0 && parent_iovpos < parent_iovcnt && child_iovcnt < BDEV_IO_NUM_CHILD_IOV) {
2478 		to_next_boundary = _to_next_boundary(current_offset, io_boundary);
2479 		to_next_boundary = spdk_min(remaining, to_next_boundary);
2480 		to_next_boundary_bytes = to_next_boundary * blocklen;
2481 
2482 		iov = &bdev_io->child_iov[child_iovcnt];
2483 		iovcnt = 0;
2484 
2485 		if (bdev_io->u.bdev.md_buf) {
2486 			md_buf = (char *)bdev_io->u.bdev.md_buf +
2487 				 (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev);
2488 		}
2489 
2490 		child_iovsize = spdk_min(BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt);
2491 		while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt &&
2492 		       iovcnt < child_iovsize) {
2493 			parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
2494 			iov_len = parent_iov->iov_len - parent_iov_offset;
2495 
2496 			iov_len = spdk_min(iov_len, max_segment_size);
2497 			iov_len = spdk_min(iov_len, to_next_boundary_bytes);
2498 			to_next_boundary_bytes -= iov_len;
2499 
2500 			bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
2501 			bdev_io->child_iov[child_iovcnt].iov_len = iov_len;
2502 
2503 			if (iov_len < parent_iov->iov_len - parent_iov_offset) {
2504 				parent_iov_offset += iov_len;
2505 			} else {
2506 				parent_iovpos++;
2507 				parent_iov_offset = 0;
2508 			}
2509 			child_iovcnt++;
2510 			iovcnt++;
2511 		}
2512 
2513 		if (to_next_boundary_bytes > 0) {
2514 			/* We had to stop this child I/O early because we ran out of
2515 			 * child_iov space or were limited by max_num_segments.
2516 			 * Ensure the iovs to be aligned with block size and
2517 			 * then adjust to_next_boundary before starting the
2518 			 * child I/O.
2519 			 */
2520 			assert(child_iovcnt == BDEV_IO_NUM_CHILD_IOV ||
2521 			       iovcnt == child_iovsize);
2522 			to_last_block_bytes = to_next_boundary_bytes % blocklen;
2523 			if (to_last_block_bytes != 0) {
2524 				uint32_t child_iovpos = child_iovcnt - 1;
2525 				/* don't decrease child_iovcnt when it equals to BDEV_IO_NUM_CHILD_IOV
2526 				 * so the loop will naturally end
2527 				 */
2528 
2529 				to_last_block_bytes = blocklen - to_last_block_bytes;
2530 				to_next_boundary_bytes += to_last_block_bytes;
2531 				while (to_last_block_bytes > 0 && iovcnt > 0) {
2532 					iov_len = spdk_min(to_last_block_bytes,
2533 							   bdev_io->child_iov[child_iovpos].iov_len);
2534 					bdev_io->child_iov[child_iovpos].iov_len -= iov_len;
2535 					if (bdev_io->child_iov[child_iovpos].iov_len == 0) {
2536 						child_iovpos--;
2537 						if (--iovcnt == 0) {
2538 							/* If the child IO is less than a block size just return.
2539 							 * If the first child IO of any split round is less than
2540 							 * a block size, an error exit.
2541 							 */
2542 							if (bdev_io->u.bdev.split_outstanding == 0) {
2543 								SPDK_ERRLOG("The first child io was less than a block size\n");
2544 								bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2545 								spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx);
2546 								TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
2547 								bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2548 							}
2549 
2550 							return;
2551 						}
2552 					}
2553 
2554 					to_last_block_bytes -= iov_len;
2555 
2556 					if (parent_iov_offset == 0) {
2557 						parent_iovpos--;
2558 						parent_iov_offset = bdev_io->u.bdev.iovs[parent_iovpos].iov_len;
2559 					}
2560 					parent_iov_offset -= iov_len;
2561 				}
2562 
2563 				assert(to_last_block_bytes == 0);
2564 			}
2565 			to_next_boundary -= to_next_boundary_bytes / blocklen;
2566 		}
2567 
2568 		rc = bdev_io_split_submit(bdev_io, iov, iovcnt, md_buf, to_next_boundary,
2569 					  &current_offset, &remaining);
2570 		if (spdk_unlikely(rc)) {
2571 			return;
2572 		}
2573 	}
2574 }
2575 
2576 static void
2577 bdev_unmap_split(struct spdk_bdev_io *bdev_io)
2578 {
2579 	uint64_t offset, unmap_blocks, remaining, max_unmap_blocks;
2580 	uint32_t num_children_reqs = 0;
2581 	int rc;
2582 
2583 	offset = bdev_io->u.bdev.split_current_offset_blocks;
2584 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
2585 	max_unmap_blocks = bdev_io->bdev->max_unmap * bdev_io->bdev->max_unmap_segments;
2586 
2587 	while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
2588 		unmap_blocks = spdk_min(remaining, max_unmap_blocks);
2589 
2590 		rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, unmap_blocks,
2591 					  &offset, &remaining);
2592 		if (spdk_likely(rc == 0)) {
2593 			num_children_reqs++;
2594 		} else {
2595 			return;
2596 		}
2597 	}
2598 }
2599 
2600 static void
2601 bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io)
2602 {
2603 	uint64_t offset, write_zeroes_blocks, remaining;
2604 	uint32_t num_children_reqs = 0;
2605 	int rc;
2606 
2607 	offset = bdev_io->u.bdev.split_current_offset_blocks;
2608 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
2609 
2610 	while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
2611 		write_zeroes_blocks = spdk_min(remaining, bdev_io->bdev->max_write_zeroes);
2612 
2613 		rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, write_zeroes_blocks,
2614 					  &offset, &remaining);
2615 		if (spdk_likely(rc == 0)) {
2616 			num_children_reqs++;
2617 		} else {
2618 			return;
2619 		}
2620 	}
2621 }
2622 
2623 static void
2624 parent_bdev_io_complete(void *ctx, int rc)
2625 {
2626 	struct spdk_bdev_io *parent_io = ctx;
2627 
2628 	if (rc) {
2629 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2630 	}
2631 
2632 	parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
2633 			       parent_io->internal.caller_ctx);
2634 }
2635 
2636 static void
2637 bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2638 {
2639 	struct spdk_bdev_io *parent_io = cb_arg;
2640 
2641 	spdk_bdev_free_io(bdev_io);
2642 
2643 	if (!success) {
2644 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2645 		/* If any child I/O failed, stop further splitting process. */
2646 		parent_io->u.bdev.split_current_offset_blocks += parent_io->u.bdev.split_remaining_num_blocks;
2647 		parent_io->u.bdev.split_remaining_num_blocks = 0;
2648 	}
2649 	parent_io->u.bdev.split_outstanding--;
2650 	if (parent_io->u.bdev.split_outstanding != 0) {
2651 		return;
2652 	}
2653 
2654 	/*
2655 	 * Parent I/O finishes when all blocks are consumed.
2656 	 */
2657 	if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
2658 		assert(parent_io->internal.cb != bdev_io_split_done);
2659 		spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)parent_io, bdev_io->internal.caller_ctx);
2660 		TAILQ_REMOVE(&parent_io->internal.ch->io_submitted, parent_io, internal.ch_link);
2661 
2662 		if (parent_io->internal.orig_iovcnt != 0) {
2663 			_bdev_io_push_bounce_data_buffer(parent_io, parent_bdev_io_complete);
2664 			/* bdev IO will be completed in the callback */
2665 		} else {
2666 			parent_bdev_io_complete(parent_io, 0);
2667 		}
2668 		return;
2669 	}
2670 
2671 	/*
2672 	 * Continue with the splitting process.  This function will complete the parent I/O if the
2673 	 * splitting is done.
2674 	 */
2675 	switch (parent_io->type) {
2676 	case SPDK_BDEV_IO_TYPE_READ:
2677 	case SPDK_BDEV_IO_TYPE_WRITE:
2678 		_bdev_rw_split(parent_io);
2679 		break;
2680 	case SPDK_BDEV_IO_TYPE_UNMAP:
2681 		bdev_unmap_split(parent_io);
2682 		break;
2683 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2684 		bdev_write_zeroes_split(parent_io);
2685 		break;
2686 	default:
2687 		assert(false);
2688 		break;
2689 	}
2690 }
2691 
2692 static void bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2693 				     bool success);
2694 
2695 static void
2696 bdev_io_split(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2697 {
2698 	bdev_io->u.bdev.split_current_offset_blocks = bdev_io->u.bdev.offset_blocks;
2699 	bdev_io->u.bdev.split_remaining_num_blocks = bdev_io->u.bdev.num_blocks;
2700 	bdev_io->u.bdev.split_outstanding = 0;
2701 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
2702 
2703 	switch (bdev_io->type) {
2704 	case SPDK_BDEV_IO_TYPE_READ:
2705 	case SPDK_BDEV_IO_TYPE_WRITE:
2706 		if (_is_buf_allocated(bdev_io->u.bdev.iovs)) {
2707 			_bdev_rw_split(bdev_io);
2708 		} else {
2709 			assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
2710 			spdk_bdev_io_get_buf(bdev_io, bdev_rw_split_get_buf_cb,
2711 					     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
2712 		}
2713 		break;
2714 	case SPDK_BDEV_IO_TYPE_UNMAP:
2715 		bdev_unmap_split(bdev_io);
2716 		break;
2717 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2718 		bdev_write_zeroes_split(bdev_io);
2719 		break;
2720 	default:
2721 		assert(false);
2722 		break;
2723 	}
2724 }
2725 
2726 static void
2727 bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
2728 {
2729 	if (!success) {
2730 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2731 		return;
2732 	}
2733 
2734 	_bdev_rw_split(bdev_io);
2735 }
2736 
2737 /* Explicitly mark this inline, since it's used as a function pointer and otherwise won't
2738  *  be inlined, at least on some compilers.
2739  */
2740 static inline void
2741 _bdev_io_submit(void *ctx)
2742 {
2743 	struct spdk_bdev_io *bdev_io = ctx;
2744 	struct spdk_bdev *bdev = bdev_io->bdev;
2745 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
2746 	uint64_t tsc;
2747 
2748 	tsc = spdk_get_ticks();
2749 	bdev_io->internal.submit_tsc = tsc;
2750 	spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_START, 0, 0, (uintptr_t)bdev_io,
2751 			      (uint64_t)bdev_io->type, bdev_io->internal.caller_ctx,
2752 			      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks);
2753 
2754 	if (spdk_likely(bdev_ch->flags == 0)) {
2755 		bdev_io_do_submit(bdev_ch, bdev_io);
2756 		return;
2757 	}
2758 
2759 	if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
2760 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
2761 	} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
2762 		if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
2763 		    bdev_abort_queued_io(&bdev->internal.qos->queued, bdev_io->u.abort.bio_to_abort)) {
2764 			_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
2765 		} else {
2766 			TAILQ_INSERT_TAIL(&bdev->internal.qos->queued, bdev_io, internal.link);
2767 			bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
2768 		}
2769 	} else {
2770 		SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
2771 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2772 	}
2773 }
2774 
2775 bool bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2);
2776 
2777 bool
2778 bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2)
2779 {
2780 	if (range1->length == 0 || range2->length == 0) {
2781 		return false;
2782 	}
2783 
2784 	if (range1->offset + range1->length <= range2->offset) {
2785 		return false;
2786 	}
2787 
2788 	if (range2->offset + range2->length <= range1->offset) {
2789 		return false;
2790 	}
2791 
2792 	return true;
2793 }
2794 
2795 static bool
2796 bdev_io_range_is_locked(struct spdk_bdev_io *bdev_io, struct lba_range *range)
2797 {
2798 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
2799 	struct lba_range r;
2800 
2801 	switch (bdev_io->type) {
2802 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2803 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2804 		/* Don't try to decode the NVMe command - just assume worst-case and that
2805 		 * it overlaps a locked range.
2806 		 */
2807 		return true;
2808 	case SPDK_BDEV_IO_TYPE_WRITE:
2809 	case SPDK_BDEV_IO_TYPE_UNMAP:
2810 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2811 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2812 		r.offset = bdev_io->u.bdev.offset_blocks;
2813 		r.length = bdev_io->u.bdev.num_blocks;
2814 		if (!bdev_lba_range_overlapped(range, &r)) {
2815 			/* This I/O doesn't overlap the specified LBA range. */
2816 			return false;
2817 		} else if (range->owner_ch == ch && range->locked_ctx == bdev_io->internal.caller_ctx) {
2818 			/* This I/O overlaps, but the I/O is on the same channel that locked this
2819 			 * range, and the caller_ctx is the same as the locked_ctx.  This means
2820 			 * that this I/O is associated with the lock, and is allowed to execute.
2821 			 */
2822 			return false;
2823 		} else {
2824 			return true;
2825 		}
2826 	default:
2827 		return false;
2828 	}
2829 }
2830 
2831 void
2832 bdev_io_submit(struct spdk_bdev_io *bdev_io)
2833 {
2834 	struct spdk_bdev *bdev = bdev_io->bdev;
2835 	struct spdk_thread *thread = spdk_bdev_io_get_thread(bdev_io);
2836 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
2837 
2838 	assert(thread != NULL);
2839 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
2840 
2841 	if (!TAILQ_EMPTY(&ch->locked_ranges)) {
2842 		struct lba_range *range;
2843 
2844 		TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
2845 			if (bdev_io_range_is_locked(bdev_io, range)) {
2846 				TAILQ_INSERT_TAIL(&ch->io_locked, bdev_io, internal.ch_link);
2847 				return;
2848 			}
2849 		}
2850 	}
2851 
2852 	TAILQ_INSERT_TAIL(&ch->io_submitted, bdev_io, internal.ch_link);
2853 
2854 	if (bdev_io_should_split(bdev_io)) {
2855 		bdev_io->internal.submit_tsc = spdk_get_ticks();
2856 		spdk_trace_record_tsc(bdev_io->internal.submit_tsc, TRACE_BDEV_IO_START, 0, 0,
2857 				      (uintptr_t)bdev_io, (uint64_t)bdev_io->type, bdev_io->internal.caller_ctx,
2858 				      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks);
2859 		bdev_io_split(NULL, bdev_io);
2860 		return;
2861 	}
2862 
2863 	if (ch->flags & BDEV_CH_QOS_ENABLED) {
2864 		if ((thread == bdev->internal.qos->thread) || !bdev->internal.qos->thread) {
2865 			_bdev_io_submit(bdev_io);
2866 		} else {
2867 			bdev_io->internal.io_submit_ch = ch;
2868 			bdev_io->internal.ch = bdev->internal.qos->ch;
2869 			spdk_thread_send_msg(bdev->internal.qos->thread, _bdev_io_submit, bdev_io);
2870 		}
2871 	} else {
2872 		_bdev_io_submit(bdev_io);
2873 	}
2874 }
2875 
2876 static inline void
2877 _bdev_io_copy_ext_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
2878 {
2879 	struct spdk_bdev_ext_io_opts *opts_copy = &bdev_io->internal.ext_opts_copy;
2880 
2881 	/* Zero part we don't copy */
2882 	memset(((char *)opts_copy) + opts->size, 0, sizeof(*opts) - opts->size);
2883 	memcpy(opts_copy, opts, opts->size);
2884 	opts_copy->size = sizeof(*opts_copy);
2885 	opts_copy->metadata = bdev_io->u.bdev.md_buf;
2886 	/* Save pointer to the copied ext_opts which will be used by bdev modules */
2887 	bdev_io->u.bdev.ext_opts = opts_copy;
2888 }
2889 
2890 static inline void
2891 _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
2892 {
2893 	/* bdev doesn't support memory domains, thereby buffers in this IO request can't
2894 	 * be accessed directly. It is needed to allocate buffers before issuing IO operation.
2895 	 * For write operation we need to pull buffers from memory domain before submitting IO.
2896 	 * Once read operation completes, we need to use memory_domain push functionality to
2897 	 * update data in original memory domain IO buffer
2898 	 * This IO request will go through a regular IO flow, so clear memory domains pointers in
2899 	 * the copied ext_opts */
2900 	bdev_io->internal.ext_opts_copy.memory_domain = NULL;
2901 	bdev_io->internal.ext_opts_copy.memory_domain_ctx = NULL;
2902 	_bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
2903 				       bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
2904 }
2905 
2906 static inline void
2907 _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io,
2908 		    struct spdk_bdev_ext_io_opts *opts, bool copy_opts)
2909 {
2910 	if (opts) {
2911 		bool use_pull_push = opts->memory_domain && !desc->memory_domains_supported;
2912 		assert(opts->size <= sizeof(*opts));
2913 		/*
2914 		 * copy if size is smaller than opts struct to avoid having to check size
2915 		 * on every access to bdev_io->u.bdev.ext_opts
2916 		 */
2917 		if (copy_opts || use_pull_push || opts->size < sizeof(*opts)) {
2918 			_bdev_io_copy_ext_opts(bdev_io, opts);
2919 			if (use_pull_push) {
2920 				_bdev_io_ext_use_bounce_buffer(bdev_io);
2921 				return;
2922 			}
2923 		}
2924 	}
2925 	bdev_io_submit(bdev_io);
2926 }
2927 
2928 static void
2929 bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
2930 {
2931 	struct spdk_bdev *bdev = bdev_io->bdev;
2932 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
2933 	struct spdk_io_channel *ch = bdev_ch->channel;
2934 
2935 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
2936 
2937 	bdev_io->internal.in_submit_request = true;
2938 	bdev->fn_table->submit_request(ch, bdev_io);
2939 	bdev_io->internal.in_submit_request = false;
2940 }
2941 
2942 void
2943 bdev_io_init(struct spdk_bdev_io *bdev_io,
2944 	     struct spdk_bdev *bdev, void *cb_arg,
2945 	     spdk_bdev_io_completion_cb cb)
2946 {
2947 	bdev_io->bdev = bdev;
2948 	bdev_io->internal.caller_ctx = cb_arg;
2949 	bdev_io->internal.cb = cb;
2950 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
2951 	bdev_io->internal.in_submit_request = false;
2952 	bdev_io->internal.buf = NULL;
2953 	bdev_io->internal.io_submit_ch = NULL;
2954 	bdev_io->internal.orig_iovs = NULL;
2955 	bdev_io->internal.orig_iovcnt = 0;
2956 	bdev_io->internal.orig_md_iov.iov_base = NULL;
2957 	bdev_io->internal.error.nvme.cdw0 = 0;
2958 	bdev_io->num_retries = 0;
2959 	bdev_io->internal.get_buf_cb = NULL;
2960 	bdev_io->internal.get_aux_buf_cb = NULL;
2961 	bdev_io->internal.ext_opts = NULL;
2962 	bdev_io->internal.data_transfer_cpl = NULL;
2963 }
2964 
2965 static bool
2966 bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
2967 {
2968 	return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
2969 }
2970 
2971 bool
2972 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
2973 {
2974 	bool supported;
2975 
2976 	supported = bdev_io_type_supported(bdev, io_type);
2977 
2978 	if (!supported) {
2979 		switch (io_type) {
2980 		case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2981 			/* The bdev layer will emulate write zeroes as long as write is supported. */
2982 			supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
2983 			break;
2984 		default:
2985 			break;
2986 		}
2987 	}
2988 
2989 	return supported;
2990 }
2991 
2992 int
2993 spdk_bdev_dump_info_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
2994 {
2995 	if (bdev->fn_table->dump_info_json) {
2996 		return bdev->fn_table->dump_info_json(bdev->ctxt, w);
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 static void
3003 bdev_qos_update_max_quota_per_timeslice(struct spdk_bdev_qos *qos)
3004 {
3005 	uint32_t max_per_timeslice = 0;
3006 	int i;
3007 
3008 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3009 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
3010 			qos->rate_limits[i].max_per_timeslice = 0;
3011 			continue;
3012 		}
3013 
3014 		max_per_timeslice = qos->rate_limits[i].limit *
3015 				    SPDK_BDEV_QOS_TIMESLICE_IN_USEC / SPDK_SEC_TO_USEC;
3016 
3017 		qos->rate_limits[i].max_per_timeslice = spdk_max(max_per_timeslice,
3018 							qos->rate_limits[i].min_per_timeslice);
3019 
3020 		qos->rate_limits[i].remaining_this_timeslice = qos->rate_limits[i].max_per_timeslice;
3021 	}
3022 
3023 	bdev_qos_set_ops(qos);
3024 }
3025 
3026 static int
3027 bdev_channel_poll_qos(void *arg)
3028 {
3029 	struct spdk_bdev_qos *qos = arg;
3030 	uint64_t now = spdk_get_ticks();
3031 	int i;
3032 
3033 	if (now < (qos->last_timeslice + qos->timeslice_size)) {
3034 		/* We received our callback earlier than expected - return
3035 		 *  immediately and wait to do accounting until at least one
3036 		 *  timeslice has actually expired.  This should never happen
3037 		 *  with a well-behaved timer implementation.
3038 		 */
3039 		return SPDK_POLLER_IDLE;
3040 	}
3041 
3042 	/* Reset for next round of rate limiting */
3043 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3044 		/* We may have allowed the IOs or bytes to slightly overrun in the last
3045 		 * timeslice. remaining_this_timeslice is signed, so if it's negative
3046 		 * here, we'll account for the overrun so that the next timeslice will
3047 		 * be appropriately reduced.
3048 		 */
3049 		if (qos->rate_limits[i].remaining_this_timeslice > 0) {
3050 			qos->rate_limits[i].remaining_this_timeslice = 0;
3051 		}
3052 	}
3053 
3054 	while (now >= (qos->last_timeslice + qos->timeslice_size)) {
3055 		qos->last_timeslice += qos->timeslice_size;
3056 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3057 			qos->rate_limits[i].remaining_this_timeslice +=
3058 				qos->rate_limits[i].max_per_timeslice;
3059 		}
3060 	}
3061 
3062 	return bdev_qos_io_submit(qos->ch, qos);
3063 }
3064 
3065 static void
3066 bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
3067 {
3068 	struct spdk_bdev_shared_resource *shared_resource;
3069 	struct lba_range *range;
3070 
3071 	while (!TAILQ_EMPTY(&ch->locked_ranges)) {
3072 		range = TAILQ_FIRST(&ch->locked_ranges);
3073 		TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
3074 		free(range);
3075 	}
3076 
3077 	spdk_put_io_channel(ch->channel);
3078 
3079 	shared_resource = ch->shared_resource;
3080 
3081 	assert(TAILQ_EMPTY(&ch->io_locked));
3082 	assert(TAILQ_EMPTY(&ch->io_submitted));
3083 	assert(ch->io_outstanding == 0);
3084 	assert(shared_resource->ref > 0);
3085 	shared_resource->ref--;
3086 	if (shared_resource->ref == 0) {
3087 		assert(shared_resource->io_outstanding == 0);
3088 		TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
3089 		spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
3090 		free(shared_resource);
3091 	}
3092 }
3093 
3094 /* Caller must hold bdev->internal.mutex. */
3095 static void
3096 bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
3097 {
3098 	struct spdk_bdev_qos	*qos = bdev->internal.qos;
3099 	int			i;
3100 
3101 	/* Rate limiting on this bdev enabled */
3102 	if (qos) {
3103 		if (qos->ch == NULL) {
3104 			struct spdk_io_channel *io_ch;
3105 
3106 			SPDK_DEBUGLOG(bdev, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
3107 				      bdev->name, spdk_get_thread());
3108 
3109 			/* No qos channel has been selected, so set one up */
3110 
3111 			/* Take another reference to ch */
3112 			io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
3113 			assert(io_ch != NULL);
3114 			qos->ch = ch;
3115 
3116 			qos->thread = spdk_io_channel_get_thread(io_ch);
3117 
3118 			TAILQ_INIT(&qos->queued);
3119 
3120 			for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3121 				if (bdev_qos_is_iops_rate_limit(i) == true) {
3122 					qos->rate_limits[i].min_per_timeslice =
3123 						SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE;
3124 				} else {
3125 					qos->rate_limits[i].min_per_timeslice =
3126 						SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE;
3127 				}
3128 
3129 				if (qos->rate_limits[i].limit == 0) {
3130 					qos->rate_limits[i].limit = SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
3131 				}
3132 			}
3133 			bdev_qos_update_max_quota_per_timeslice(qos);
3134 			qos->timeslice_size =
3135 				SPDK_BDEV_QOS_TIMESLICE_IN_USEC * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
3136 			qos->last_timeslice = spdk_get_ticks();
3137 			qos->poller = SPDK_POLLER_REGISTER(bdev_channel_poll_qos,
3138 							   qos,
3139 							   SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
3140 		}
3141 
3142 		ch->flags |= BDEV_CH_QOS_ENABLED;
3143 	}
3144 }
3145 
3146 struct poll_timeout_ctx {
3147 	struct spdk_bdev_desc	*desc;
3148 	uint64_t		timeout_in_sec;
3149 	spdk_bdev_io_timeout_cb	cb_fn;
3150 	void			*cb_arg;
3151 };
3152 
3153 static void
3154 bdev_desc_free(struct spdk_bdev_desc *desc)
3155 {
3156 	pthread_mutex_destroy(&desc->mutex);
3157 	free(desc->media_events_buffer);
3158 	free(desc);
3159 }
3160 
3161 static void
3162 bdev_channel_poll_timeout_io_done(struct spdk_io_channel_iter *i, int status)
3163 {
3164 	struct poll_timeout_ctx *ctx  = spdk_io_channel_iter_get_ctx(i);
3165 	struct spdk_bdev_desc *desc = ctx->desc;
3166 
3167 	free(ctx);
3168 
3169 	pthread_mutex_lock(&desc->mutex);
3170 	desc->refs--;
3171 	if (desc->closed == true && desc->refs == 0) {
3172 		pthread_mutex_unlock(&desc->mutex);
3173 		bdev_desc_free(desc);
3174 		return;
3175 	}
3176 	pthread_mutex_unlock(&desc->mutex);
3177 }
3178 
3179 static void
3180 bdev_channel_poll_timeout_io(struct spdk_io_channel_iter *i)
3181 {
3182 	struct poll_timeout_ctx *ctx  = spdk_io_channel_iter_get_ctx(i);
3183 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
3184 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(io_ch);
3185 	struct spdk_bdev_desc *desc = ctx->desc;
3186 	struct spdk_bdev_io *bdev_io;
3187 	uint64_t now;
3188 
3189 	pthread_mutex_lock(&desc->mutex);
3190 	if (desc->closed == true) {
3191 		pthread_mutex_unlock(&desc->mutex);
3192 		spdk_for_each_channel_continue(i, -1);
3193 		return;
3194 	}
3195 	pthread_mutex_unlock(&desc->mutex);
3196 
3197 	now = spdk_get_ticks();
3198 	TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
3199 		/* Exclude any I/O that are generated via splitting. */
3200 		if (bdev_io->internal.cb == bdev_io_split_done) {
3201 			continue;
3202 		}
3203 
3204 		/* Once we find an I/O that has not timed out, we can immediately
3205 		 * exit the loop.
3206 		 */
3207 		if (now < (bdev_io->internal.submit_tsc +
3208 			   ctx->timeout_in_sec * spdk_get_ticks_hz())) {
3209 			goto end;
3210 		}
3211 
3212 		if (bdev_io->internal.desc == desc) {
3213 			ctx->cb_fn(ctx->cb_arg, bdev_io);
3214 		}
3215 	}
3216 
3217 end:
3218 	spdk_for_each_channel_continue(i, 0);
3219 }
3220 
3221 static int
3222 bdev_poll_timeout_io(void *arg)
3223 {
3224 	struct spdk_bdev_desc *desc = arg;
3225 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3226 	struct poll_timeout_ctx *ctx;
3227 
3228 	ctx = calloc(1, sizeof(struct poll_timeout_ctx));
3229 	if (!ctx) {
3230 		SPDK_ERRLOG("failed to allocate memory\n");
3231 		return SPDK_POLLER_BUSY;
3232 	}
3233 	ctx->desc = desc;
3234 	ctx->cb_arg = desc->cb_arg;
3235 	ctx->cb_fn = desc->cb_fn;
3236 	ctx->timeout_in_sec = desc->timeout_in_sec;
3237 
3238 	/* Take a ref on the descriptor in case it gets closed while we are checking
3239 	 * all of the channels.
3240 	 */
3241 	pthread_mutex_lock(&desc->mutex);
3242 	desc->refs++;
3243 	pthread_mutex_unlock(&desc->mutex);
3244 
3245 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
3246 			      bdev_channel_poll_timeout_io,
3247 			      ctx,
3248 			      bdev_channel_poll_timeout_io_done);
3249 
3250 	return SPDK_POLLER_BUSY;
3251 }
3252 
3253 int
3254 spdk_bdev_set_timeout(struct spdk_bdev_desc *desc, uint64_t timeout_in_sec,
3255 		      spdk_bdev_io_timeout_cb cb_fn, void *cb_arg)
3256 {
3257 	assert(desc->thread == spdk_get_thread());
3258 
3259 	spdk_poller_unregister(&desc->io_timeout_poller);
3260 
3261 	if (timeout_in_sec) {
3262 		assert(cb_fn != NULL);
3263 		desc->io_timeout_poller = SPDK_POLLER_REGISTER(bdev_poll_timeout_io,
3264 					  desc,
3265 					  SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * SPDK_SEC_TO_USEC /
3266 					  1000);
3267 		if (desc->io_timeout_poller == NULL) {
3268 			SPDK_ERRLOG("can not register the desc timeout IO poller\n");
3269 			return -1;
3270 		}
3271 	}
3272 
3273 	desc->cb_fn = cb_fn;
3274 	desc->cb_arg = cb_arg;
3275 	desc->timeout_in_sec = timeout_in_sec;
3276 
3277 	return 0;
3278 }
3279 
3280 static int
3281 bdev_channel_create(void *io_device, void *ctx_buf)
3282 {
3283 	struct spdk_bdev		*bdev = __bdev_from_io_dev(io_device);
3284 	struct spdk_bdev_channel	*ch = ctx_buf;
3285 	struct spdk_io_channel		*mgmt_io_ch;
3286 	struct spdk_bdev_mgmt_channel	*mgmt_ch;
3287 	struct spdk_bdev_shared_resource *shared_resource;
3288 	struct lba_range		*range;
3289 
3290 	ch->bdev = bdev;
3291 	ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
3292 	if (!ch->channel) {
3293 		return -1;
3294 	}
3295 
3296 	spdk_trace_record(TRACE_BDEV_IOCH_CREATE, 0, 0, 0, ch->bdev->name,
3297 			  spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
3298 
3299 	assert(ch->histogram == NULL);
3300 	if (bdev->internal.histogram_enabled) {
3301 		ch->histogram = spdk_histogram_data_alloc();
3302 		if (ch->histogram == NULL) {
3303 			SPDK_ERRLOG("Could not allocate histogram\n");
3304 		}
3305 	}
3306 
3307 	mgmt_io_ch = spdk_get_io_channel(&g_bdev_mgr);
3308 	if (!mgmt_io_ch) {
3309 		spdk_put_io_channel(ch->channel);
3310 		return -1;
3311 	}
3312 
3313 	mgmt_ch = spdk_io_channel_get_ctx(mgmt_io_ch);
3314 	TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
3315 		if (shared_resource->shared_ch == ch->channel) {
3316 			spdk_put_io_channel(mgmt_io_ch);
3317 			shared_resource->ref++;
3318 			break;
3319 		}
3320 	}
3321 
3322 	if (shared_resource == NULL) {
3323 		shared_resource = calloc(1, sizeof(*shared_resource));
3324 		if (shared_resource == NULL) {
3325 			spdk_put_io_channel(ch->channel);
3326 			spdk_put_io_channel(mgmt_io_ch);
3327 			return -1;
3328 		}
3329 
3330 		shared_resource->mgmt_ch = mgmt_ch;
3331 		shared_resource->io_outstanding = 0;
3332 		TAILQ_INIT(&shared_resource->nomem_io);
3333 		shared_resource->nomem_threshold = 0;
3334 		shared_resource->shared_ch = ch->channel;
3335 		shared_resource->ref = 1;
3336 		TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
3337 	}
3338 
3339 	memset(&ch->stat, 0, sizeof(ch->stat));
3340 	ch->stat.ticks_rate = spdk_get_ticks_hz();
3341 	ch->io_outstanding = 0;
3342 	TAILQ_INIT(&ch->queued_resets);
3343 	TAILQ_INIT(&ch->locked_ranges);
3344 	ch->flags = 0;
3345 	ch->shared_resource = shared_resource;
3346 
3347 	TAILQ_INIT(&ch->io_submitted);
3348 	TAILQ_INIT(&ch->io_locked);
3349 
3350 #ifdef SPDK_CONFIG_VTUNE
3351 	{
3352 		char *name;
3353 		__itt_init_ittlib(NULL, 0);
3354 		name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
3355 		if (!name) {
3356 			bdev_channel_destroy_resource(ch);
3357 			return -1;
3358 		}
3359 		ch->handle = __itt_string_handle_create(name);
3360 		free(name);
3361 		ch->start_tsc = spdk_get_ticks();
3362 		ch->interval_tsc = spdk_get_ticks_hz() / 100;
3363 		memset(&ch->prev_stat, 0, sizeof(ch->prev_stat));
3364 	}
3365 #endif
3366 
3367 	pthread_mutex_lock(&bdev->internal.mutex);
3368 	bdev_enable_qos(bdev, ch);
3369 
3370 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
3371 		struct lba_range *new_range;
3372 
3373 		new_range = calloc(1, sizeof(*new_range));
3374 		if (new_range == NULL) {
3375 			pthread_mutex_unlock(&bdev->internal.mutex);
3376 			bdev_channel_destroy_resource(ch);
3377 			return -1;
3378 		}
3379 		new_range->length = range->length;
3380 		new_range->offset = range->offset;
3381 		new_range->locked_ctx = range->locked_ctx;
3382 		TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
3383 	}
3384 
3385 	pthread_mutex_unlock(&bdev->internal.mutex);
3386 
3387 	return 0;
3388 }
3389 
3390 /*
3391  * Abort I/O that are waiting on a data buffer.  These types of I/O are
3392  *  linked using the spdk_bdev_io internal.buf_link TAILQ_ENTRY.
3393  */
3394 static void
3395 bdev_abort_all_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_channel *ch)
3396 {
3397 	bdev_io_stailq_t tmp;
3398 	struct spdk_bdev_io *bdev_io;
3399 
3400 	STAILQ_INIT(&tmp);
3401 
3402 	while (!STAILQ_EMPTY(queue)) {
3403 		bdev_io = STAILQ_FIRST(queue);
3404 		STAILQ_REMOVE_HEAD(queue, internal.buf_link);
3405 		if (bdev_io->internal.ch == ch) {
3406 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
3407 		} else {
3408 			STAILQ_INSERT_TAIL(&tmp, bdev_io, internal.buf_link);
3409 		}
3410 	}
3411 
3412 	STAILQ_SWAP(&tmp, queue, spdk_bdev_io);
3413 }
3414 
3415 /*
3416  * Abort I/O that are queued waiting for submission.  These types of I/O are
3417  *  linked using the spdk_bdev_io link TAILQ_ENTRY.
3418  */
3419 static void
3420 bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
3421 {
3422 	struct spdk_bdev_io *bdev_io, *tmp;
3423 
3424 	TAILQ_FOREACH_SAFE(bdev_io, queue, internal.link, tmp) {
3425 		if (bdev_io->internal.ch == ch) {
3426 			TAILQ_REMOVE(queue, bdev_io, internal.link);
3427 			/*
3428 			 * spdk_bdev_io_complete() assumes that the completed I/O had
3429 			 *  been submitted to the bdev module.  Since in this case it
3430 			 *  hadn't, bump io_outstanding to account for the decrement
3431 			 *  that spdk_bdev_io_complete() will do.
3432 			 */
3433 			if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
3434 				ch->io_outstanding++;
3435 				ch->shared_resource->io_outstanding++;
3436 			}
3437 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
3438 		}
3439 	}
3440 }
3441 
3442 static bool
3443 bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
3444 {
3445 	struct spdk_bdev_io *bdev_io;
3446 
3447 	TAILQ_FOREACH(bdev_io, queue, internal.link) {
3448 		if (bdev_io == bio_to_abort) {
3449 			TAILQ_REMOVE(queue, bio_to_abort, internal.link);
3450 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
3451 			return true;
3452 		}
3453 	}
3454 
3455 	return false;
3456 }
3457 
3458 static bool
3459 bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort)
3460 {
3461 	struct spdk_bdev_io *bdev_io;
3462 
3463 	STAILQ_FOREACH(bdev_io, queue, internal.buf_link) {
3464 		if (bdev_io == bio_to_abort) {
3465 			STAILQ_REMOVE(queue, bio_to_abort, spdk_bdev_io, internal.buf_link);
3466 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
3467 			return true;
3468 		}
3469 	}
3470 
3471 	return false;
3472 }
3473 
3474 static void
3475 bdev_qos_channel_destroy(void *cb_arg)
3476 {
3477 	struct spdk_bdev_qos *qos = cb_arg;
3478 
3479 	spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
3480 	spdk_poller_unregister(&qos->poller);
3481 
3482 	SPDK_DEBUGLOG(bdev, "Free QoS %p.\n", qos);
3483 
3484 	free(qos);
3485 }
3486 
3487 static int
3488 bdev_qos_destroy(struct spdk_bdev *bdev)
3489 {
3490 	int i;
3491 
3492 	/*
3493 	 * Cleanly shutting down the QoS poller is tricky, because
3494 	 * during the asynchronous operation the user could open
3495 	 * a new descriptor and create a new channel, spawning
3496 	 * a new QoS poller.
3497 	 *
3498 	 * The strategy is to create a new QoS structure here and swap it
3499 	 * in. The shutdown path then continues to refer to the old one
3500 	 * until it completes and then releases it.
3501 	 */
3502 	struct spdk_bdev_qos *new_qos, *old_qos;
3503 
3504 	old_qos = bdev->internal.qos;
3505 
3506 	new_qos = calloc(1, sizeof(*new_qos));
3507 	if (!new_qos) {
3508 		SPDK_ERRLOG("Unable to allocate memory to shut down QoS.\n");
3509 		return -ENOMEM;
3510 	}
3511 
3512 	/* Copy the old QoS data into the newly allocated structure */
3513 	memcpy(new_qos, old_qos, sizeof(*new_qos));
3514 
3515 	/* Zero out the key parts of the QoS structure */
3516 	new_qos->ch = NULL;
3517 	new_qos->thread = NULL;
3518 	new_qos->poller = NULL;
3519 	TAILQ_INIT(&new_qos->queued);
3520 	/*
3521 	 * The limit member of spdk_bdev_qos_limit structure is not zeroed.
3522 	 * It will be used later for the new QoS structure.
3523 	 */
3524 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3525 		new_qos->rate_limits[i].remaining_this_timeslice = 0;
3526 		new_qos->rate_limits[i].min_per_timeslice = 0;
3527 		new_qos->rate_limits[i].max_per_timeslice = 0;
3528 	}
3529 
3530 	bdev->internal.qos = new_qos;
3531 
3532 	if (old_qos->thread == NULL) {
3533 		free(old_qos);
3534 	} else {
3535 		spdk_thread_send_msg(old_qos->thread, bdev_qos_channel_destroy, old_qos);
3536 	}
3537 
3538 	/* It is safe to continue with destroying the bdev even though the QoS channel hasn't
3539 	 * been destroyed yet. The destruction path will end up waiting for the final
3540 	 * channel to be put before it releases resources. */
3541 
3542 	return 0;
3543 }
3544 
3545 static void
3546 bdev_io_stat_add(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat *add)
3547 {
3548 	total->bytes_read += add->bytes_read;
3549 	total->num_read_ops += add->num_read_ops;
3550 	total->bytes_written += add->bytes_written;
3551 	total->num_write_ops += add->num_write_ops;
3552 	total->bytes_unmapped += add->bytes_unmapped;
3553 	total->num_unmap_ops += add->num_unmap_ops;
3554 	total->read_latency_ticks += add->read_latency_ticks;
3555 	total->write_latency_ticks += add->write_latency_ticks;
3556 	total->unmap_latency_ticks += add->unmap_latency_ticks;
3557 }
3558 
3559 static void
3560 bdev_channel_abort_queued_ios(struct spdk_bdev_channel *ch)
3561 {
3562 	struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
3563 	struct spdk_bdev_mgmt_channel *mgmt_ch = shared_resource->mgmt_ch;
3564 
3565 	bdev_abort_all_queued_io(&shared_resource->nomem_io, ch);
3566 	bdev_abort_all_buf_io(&mgmt_ch->need_buf_small, ch);
3567 	bdev_abort_all_buf_io(&mgmt_ch->need_buf_large, ch);
3568 }
3569 
3570 static void
3571 bdev_channel_destroy(void *io_device, void *ctx_buf)
3572 {
3573 	struct spdk_bdev_channel *ch = ctx_buf;
3574 
3575 	SPDK_DEBUGLOG(bdev, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
3576 		      spdk_get_thread());
3577 
3578 	spdk_trace_record(TRACE_BDEV_IOCH_DESTROY, 0, 0, 0, ch->bdev->name,
3579 			  spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
3580 
3581 	/* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
3582 	pthread_mutex_lock(&ch->bdev->internal.mutex);
3583 	bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat);
3584 	pthread_mutex_unlock(&ch->bdev->internal.mutex);
3585 
3586 	bdev_abort_all_queued_io(&ch->queued_resets, ch);
3587 
3588 	bdev_channel_abort_queued_ios(ch);
3589 
3590 	if (ch->histogram) {
3591 		spdk_histogram_data_free(ch->histogram);
3592 	}
3593 
3594 	bdev_channel_destroy_resource(ch);
3595 }
3596 
3597 /*
3598  * If the name already exists in the global bdev name tree, RB_INSERT() returns a pointer
3599  * to it. Hence we do not have to call bdev_get_by_name() when using this function.
3600  */
3601 static int
3602 bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const char *name)
3603 {
3604 	struct spdk_bdev_name *tmp;
3605 
3606 	bdev_name->name = strdup(name);
3607 	if (bdev_name->name == NULL) {
3608 		SPDK_ERRLOG("Unable to allocate bdev name\n");
3609 		return -ENOMEM;
3610 	}
3611 
3612 	bdev_name->bdev = bdev;
3613 
3614 	pthread_mutex_lock(&g_bdev_mgr.mutex);
3615 	tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
3616 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
3617 
3618 	if (tmp != NULL) {
3619 		SPDK_ERRLOG("Bdev name %s already exists\n", name);
3620 		free(bdev_name->name);
3621 		return -EEXIST;
3622 	}
3623 
3624 	return 0;
3625 }
3626 
3627 static void
3628 bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
3629 {
3630 	RB_REMOVE(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
3631 	free(bdev_name->name);
3632 }
3633 
3634 static void
3635 bdev_name_del(struct spdk_bdev_name *bdev_name)
3636 {
3637 	pthread_mutex_lock(&g_bdev_mgr.mutex);
3638 	bdev_name_del_unsafe(bdev_name);
3639 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
3640 }
3641 
3642 int
3643 spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
3644 {
3645 	struct spdk_bdev_alias *tmp;
3646 	int ret;
3647 
3648 	if (alias == NULL) {
3649 		SPDK_ERRLOG("Empty alias passed\n");
3650 		return -EINVAL;
3651 	}
3652 
3653 	tmp = calloc(1, sizeof(*tmp));
3654 	if (tmp == NULL) {
3655 		SPDK_ERRLOG("Unable to allocate alias\n");
3656 		return -ENOMEM;
3657 	}
3658 
3659 	ret = bdev_name_add(&tmp->alias, bdev, alias);
3660 	if (ret != 0) {
3661 		free(tmp);
3662 		return ret;
3663 	}
3664 
3665 	TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
3666 
3667 	return 0;
3668 }
3669 
3670 static int
3671 bdev_alias_del(struct spdk_bdev *bdev, const char *alias,
3672 	       void (*alias_del_fn)(struct spdk_bdev_name *n))
3673 {
3674 	struct spdk_bdev_alias *tmp;
3675 
3676 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
3677 		if (strcmp(alias, tmp->alias.name) == 0) {
3678 			TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
3679 			alias_del_fn(&tmp->alias);
3680 			free(tmp);
3681 			return 0;
3682 		}
3683 	}
3684 
3685 	return -ENOENT;
3686 }
3687 
3688 int
3689 spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
3690 {
3691 	int rc;
3692 
3693 	rc = bdev_alias_del(bdev, alias, bdev_name_del);
3694 	if (rc == -ENOENT) {
3695 		SPDK_INFOLOG(bdev, "Alias %s does not exist\n", alias);
3696 	}
3697 
3698 	return rc;
3699 }
3700 
3701 void
3702 spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
3703 {
3704 	struct spdk_bdev_alias *p, *tmp;
3705 
3706 	TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
3707 		TAILQ_REMOVE(&bdev->aliases, p, tailq);
3708 		bdev_name_del(&p->alias);
3709 		free(p);
3710 	}
3711 }
3712 
3713 struct spdk_io_channel *
3714 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
3715 {
3716 	return spdk_get_io_channel(__bdev_to_io_dev(spdk_bdev_desc_get_bdev(desc)));
3717 }
3718 
3719 void *
3720 spdk_bdev_get_module_ctx(struct spdk_bdev_desc *desc)
3721 {
3722 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3723 	void *ctx = NULL;
3724 
3725 	if (bdev->fn_table->get_module_ctx) {
3726 		ctx = bdev->fn_table->get_module_ctx(bdev->ctxt);
3727 	}
3728 
3729 	return ctx;
3730 }
3731 
3732 const char *
3733 spdk_bdev_get_module_name(const struct spdk_bdev *bdev)
3734 {
3735 	return bdev->module->name;
3736 }
3737 
3738 const char *
3739 spdk_bdev_get_name(const struct spdk_bdev *bdev)
3740 {
3741 	return bdev->name;
3742 }
3743 
3744 const char *
3745 spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
3746 {
3747 	return bdev->product_name;
3748 }
3749 
3750 const struct spdk_bdev_aliases_list *
3751 spdk_bdev_get_aliases(const struct spdk_bdev *bdev)
3752 {
3753 	return &bdev->aliases;
3754 }
3755 
3756 uint32_t
3757 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
3758 {
3759 	return bdev->blocklen;
3760 }
3761 
3762 uint32_t
3763 spdk_bdev_get_write_unit_size(const struct spdk_bdev *bdev)
3764 {
3765 	return bdev->write_unit_size;
3766 }
3767 
3768 uint64_t
3769 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
3770 {
3771 	return bdev->blockcnt;
3772 }
3773 
3774 const char *
3775 spdk_bdev_get_qos_rpc_type(enum spdk_bdev_qos_rate_limit_type type)
3776 {
3777 	return qos_rpc_type[type];
3778 }
3779 
3780 void
3781 spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
3782 {
3783 	int i;
3784 
3785 	memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
3786 
3787 	pthread_mutex_lock(&bdev->internal.mutex);
3788 	if (bdev->internal.qos) {
3789 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3790 			if (bdev->internal.qos->rate_limits[i].limit !=
3791 			    SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
3792 				limits[i] = bdev->internal.qos->rate_limits[i].limit;
3793 				if (bdev_qos_is_iops_rate_limit(i) == false) {
3794 					/* Change from Byte to Megabyte which is user visible. */
3795 					limits[i] = limits[i] / 1024 / 1024;
3796 				}
3797 			}
3798 		}
3799 	}
3800 	pthread_mutex_unlock(&bdev->internal.mutex);
3801 }
3802 
3803 size_t
3804 spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
3805 {
3806 	return 1 << bdev->required_alignment;
3807 }
3808 
3809 uint32_t
3810 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
3811 {
3812 	return bdev->optimal_io_boundary;
3813 }
3814 
3815 bool
3816 spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
3817 {
3818 	return bdev->write_cache;
3819 }
3820 
3821 const struct spdk_uuid *
3822 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
3823 {
3824 	return &bdev->uuid;
3825 }
3826 
3827 uint16_t
3828 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
3829 {
3830 	return bdev->acwu;
3831 }
3832 
3833 uint32_t
3834 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
3835 {
3836 	return bdev->md_len;
3837 }
3838 
3839 bool
3840 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
3841 {
3842 	return (bdev->md_len != 0) && bdev->md_interleave;
3843 }
3844 
3845 bool
3846 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
3847 {
3848 	return (bdev->md_len != 0) && !bdev->md_interleave;
3849 }
3850 
3851 bool
3852 spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
3853 {
3854 	return bdev->zoned;
3855 }
3856 
3857 uint32_t
3858 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
3859 {
3860 	if (spdk_bdev_is_md_interleaved(bdev)) {
3861 		return bdev->blocklen - bdev->md_len;
3862 	} else {
3863 		return bdev->blocklen;
3864 	}
3865 }
3866 
3867 uint32_t
3868 spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
3869 {
3870 	return bdev->phys_blocklen;
3871 }
3872 
3873 static uint32_t
3874 _bdev_get_block_size_with_md(const struct spdk_bdev *bdev)
3875 {
3876 	if (!spdk_bdev_is_md_interleaved(bdev)) {
3877 		return bdev->blocklen + bdev->md_len;
3878 	} else {
3879 		return bdev->blocklen;
3880 	}
3881 }
3882 
3883 /* We have to use the typedef in the function declaration to appease astyle. */
3884 typedef enum spdk_dif_type spdk_dif_type_t;
3885 
3886 spdk_dif_type_t
3887 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
3888 {
3889 	if (bdev->md_len != 0) {
3890 		return bdev->dif_type;
3891 	} else {
3892 		return SPDK_DIF_DISABLE;
3893 	}
3894 }
3895 
3896 bool
3897 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
3898 {
3899 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
3900 		return bdev->dif_is_head_of_md;
3901 	} else {
3902 		return false;
3903 	}
3904 }
3905 
3906 bool
3907 spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
3908 			       enum spdk_dif_check_type check_type)
3909 {
3910 	if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
3911 		return false;
3912 	}
3913 
3914 	switch (check_type) {
3915 	case SPDK_DIF_CHECK_TYPE_REFTAG:
3916 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) != 0;
3917 	case SPDK_DIF_CHECK_TYPE_APPTAG:
3918 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) != 0;
3919 	case SPDK_DIF_CHECK_TYPE_GUARD:
3920 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_GUARD_CHECK) != 0;
3921 	default:
3922 		return false;
3923 	}
3924 }
3925 
3926 uint64_t
3927 spdk_bdev_get_qd(const struct spdk_bdev *bdev)
3928 {
3929 	return bdev->internal.measured_queue_depth;
3930 }
3931 
3932 uint64_t
3933 spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
3934 {
3935 	return bdev->internal.period;
3936 }
3937 
3938 uint64_t
3939 spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
3940 {
3941 	return bdev->internal.weighted_io_time;
3942 }
3943 
3944 uint64_t
3945 spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
3946 {
3947 	return bdev->internal.io_time;
3948 }
3949 
3950 static void bdev_update_qd_sampling_period(void *ctx);
3951 
3952 static void
3953 _calculate_measured_qd_cpl(struct spdk_io_channel_iter *i, int status)
3954 {
3955 	struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
3956 
3957 	bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
3958 
3959 	if (bdev->internal.measured_queue_depth) {
3960 		bdev->internal.io_time += bdev->internal.period;
3961 		bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
3962 	}
3963 
3964 	bdev->internal.qd_poll_in_progress = false;
3965 
3966 	bdev_update_qd_sampling_period(bdev);
3967 }
3968 
3969 static void
3970 _calculate_measured_qd(struct spdk_io_channel_iter *i)
3971 {
3972 	struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
3973 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
3974 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(io_ch);
3975 
3976 	bdev->internal.temporary_queue_depth += ch->io_outstanding;
3977 	spdk_for_each_channel_continue(i, 0);
3978 }
3979 
3980 static int
3981 bdev_calculate_measured_queue_depth(void *ctx)
3982 {
3983 	struct spdk_bdev *bdev = ctx;
3984 
3985 	bdev->internal.qd_poll_in_progress = true;
3986 	bdev->internal.temporary_queue_depth = 0;
3987 	spdk_for_each_channel(__bdev_to_io_dev(bdev), _calculate_measured_qd, bdev,
3988 			      _calculate_measured_qd_cpl);
3989 	return SPDK_POLLER_BUSY;
3990 }
3991 
3992 static void
3993 bdev_update_qd_sampling_period(void *ctx)
3994 {
3995 	struct spdk_bdev *bdev = ctx;
3996 
3997 	if (bdev->internal.period == bdev->internal.new_period) {
3998 		return;
3999 	}
4000 
4001 	if (bdev->internal.qd_poll_in_progress) {
4002 		return;
4003 	}
4004 
4005 	bdev->internal.period = bdev->internal.new_period;
4006 
4007 	spdk_poller_unregister(&bdev->internal.qd_poller);
4008 	if (bdev->internal.period != 0) {
4009 		bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
4010 					   bdev, bdev->internal.period);
4011 	} else {
4012 		spdk_bdev_close(bdev->internal.qd_desc);
4013 		bdev->internal.qd_desc = NULL;
4014 	}
4015 }
4016 
4017 static void
4018 _tmp_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx)
4019 {
4020 	SPDK_NOTICELOG("Unexpected event type: %d\n", type);
4021 }
4022 
4023 void
4024 spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
4025 {
4026 	int rc;
4027 
4028 	if (bdev->internal.new_period == period) {
4029 		return;
4030 	}
4031 
4032 	bdev->internal.new_period = period;
4033 
4034 	if (bdev->internal.qd_desc != NULL) {
4035 		assert(bdev->internal.period != 0);
4036 
4037 		spdk_thread_send_msg(bdev->internal.qd_desc->thread,
4038 				     bdev_update_qd_sampling_period, bdev);
4039 		return;
4040 	}
4041 
4042 	assert(bdev->internal.period == 0);
4043 
4044 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false, _tmp_bdev_event_cb,
4045 				NULL, &bdev->internal.qd_desc);
4046 	if (rc != 0) {
4047 		return;
4048 	}
4049 
4050 	bdev->internal.period = period;
4051 	bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
4052 				   bdev, period);
4053 }
4054 
4055 struct bdev_get_current_qd_ctx {
4056 	uint64_t current_qd;
4057 	spdk_bdev_get_current_qd_cb cb_fn;
4058 	void *cb_arg;
4059 };
4060 
4061 static void
4062 bdev_get_current_qd_done(struct spdk_io_channel_iter *i, int status)
4063 {
4064 	struct bdev_get_current_qd_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
4065 	void *io_dev = spdk_io_channel_iter_get_io_device(i);
4066 
4067 	ctx->cb_fn(__bdev_from_io_dev(io_dev), ctx->current_qd, ctx->cb_arg, 0);
4068 
4069 	free(ctx);
4070 }
4071 
4072 static void
4073 bdev_get_current_qd(struct spdk_io_channel_iter *i)
4074 {
4075 	struct bdev_get_current_qd_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
4076 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
4077 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(io_ch);
4078 
4079 	ctx->current_qd += bdev_ch->io_outstanding;
4080 
4081 	spdk_for_each_channel_continue(i, 0);
4082 }
4083 
4084 void
4085 spdk_bdev_get_current_qd(struct spdk_bdev *bdev, spdk_bdev_get_current_qd_cb cb_fn,
4086 			 void *cb_arg)
4087 {
4088 	struct bdev_get_current_qd_ctx *ctx;
4089 
4090 	assert(cb_fn != NULL);
4091 
4092 	ctx = calloc(1, sizeof(*ctx));
4093 	if (ctx == NULL) {
4094 		cb_fn(bdev, 0, cb_arg, -ENOMEM);
4095 		return;
4096 	}
4097 
4098 	ctx->cb_fn = cb_fn;
4099 	ctx->cb_arg = cb_arg;
4100 
4101 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
4102 			      bdev_get_current_qd,
4103 			      ctx,
4104 			      bdev_get_current_qd_done);
4105 }
4106 
4107 static void
4108 _resize_notify(void *arg)
4109 {
4110 	struct spdk_bdev_desc *desc = arg;
4111 
4112 	pthread_mutex_lock(&desc->mutex);
4113 	desc->refs--;
4114 	if (!desc->closed) {
4115 		pthread_mutex_unlock(&desc->mutex);
4116 		desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE,
4117 					desc->bdev,
4118 					desc->callback.ctx);
4119 		return;
4120 	} else if (0 == desc->refs) {
4121 		/* This descriptor was closed after this resize_notify message was sent.
4122 		 * spdk_bdev_close() could not free the descriptor since this message was
4123 		 * in flight, so we free it now using bdev_desc_free().
4124 		 */
4125 		pthread_mutex_unlock(&desc->mutex);
4126 		bdev_desc_free(desc);
4127 		return;
4128 	}
4129 	pthread_mutex_unlock(&desc->mutex);
4130 }
4131 
4132 int
4133 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
4134 {
4135 	struct spdk_bdev_desc *desc;
4136 	int ret;
4137 
4138 	if (size == bdev->blockcnt) {
4139 		return 0;
4140 	}
4141 
4142 	pthread_mutex_lock(&bdev->internal.mutex);
4143 
4144 	/* bdev has open descriptors */
4145 	if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
4146 	    bdev->blockcnt > size) {
4147 		ret = -EBUSY;
4148 	} else {
4149 		bdev->blockcnt = size;
4150 		TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
4151 			pthread_mutex_lock(&desc->mutex);
4152 			if (!desc->closed) {
4153 				desc->refs++;
4154 				spdk_thread_send_msg(desc->thread, _resize_notify, desc);
4155 			}
4156 			pthread_mutex_unlock(&desc->mutex);
4157 		}
4158 		ret = 0;
4159 	}
4160 
4161 	pthread_mutex_unlock(&bdev->internal.mutex);
4162 
4163 	return ret;
4164 }
4165 
4166 /*
4167  * Convert I/O offset and length from bytes to blocks.
4168  *
4169  * Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
4170  */
4171 static uint64_t
4172 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t offset_bytes, uint64_t *offset_blocks,
4173 		     uint64_t num_bytes, uint64_t *num_blocks)
4174 {
4175 	uint32_t block_size = bdev->blocklen;
4176 	uint8_t shift_cnt;
4177 
4178 	/* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
4179 	if (spdk_likely(spdk_u32_is_pow2(block_size))) {
4180 		shift_cnt = spdk_u32log2(block_size);
4181 		*offset_blocks = offset_bytes >> shift_cnt;
4182 		*num_blocks = num_bytes >> shift_cnt;
4183 		return (offset_bytes - (*offset_blocks << shift_cnt)) |
4184 		       (num_bytes - (*num_blocks << shift_cnt));
4185 	} else {
4186 		*offset_blocks = offset_bytes / block_size;
4187 		*num_blocks = num_bytes / block_size;
4188 		return (offset_bytes % block_size) | (num_bytes % block_size);
4189 	}
4190 }
4191 
4192 static bool
4193 bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
4194 {
4195 	/* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
4196 	 * has been an overflow and hence the offset has been wrapped around */
4197 	if (offset_blocks + num_blocks < offset_blocks) {
4198 		return false;
4199 	}
4200 
4201 	/* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
4202 	if (offset_blocks + num_blocks > bdev->blockcnt) {
4203 		return false;
4204 	}
4205 
4206 	return true;
4207 }
4208 
4209 static int
4210 bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
4211 			 void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4212 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
4213 {
4214 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4215 	struct spdk_bdev_io *bdev_io;
4216 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4217 
4218 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4219 		return -EINVAL;
4220 	}
4221 
4222 	bdev_io = bdev_channel_get_io(channel);
4223 	if (!bdev_io) {
4224 		return -ENOMEM;
4225 	}
4226 
4227 	bdev_io->internal.ch = channel;
4228 	bdev_io->internal.desc = desc;
4229 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
4230 	bdev_io->u.bdev.iovs = &bdev_io->iov;
4231 	bdev_io->u.bdev.iovs[0].iov_base = buf;
4232 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
4233 	bdev_io->u.bdev.iovcnt = 1;
4234 	bdev_io->u.bdev.md_buf = md_buf;
4235 	bdev_io->u.bdev.num_blocks = num_blocks;
4236 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4237 	bdev_io->u.bdev.ext_opts = NULL;
4238 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4239 
4240 	bdev_io_submit(bdev_io);
4241 	return 0;
4242 }
4243 
4244 int
4245 spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4246 	       void *buf, uint64_t offset, uint64_t nbytes,
4247 	       spdk_bdev_io_completion_cb cb, void *cb_arg)
4248 {
4249 	uint64_t offset_blocks, num_blocks;
4250 
4251 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4252 				 nbytes, &num_blocks) != 0) {
4253 		return -EINVAL;
4254 	}
4255 
4256 	return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
4257 }
4258 
4259 int
4260 spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4261 		      void *buf, uint64_t offset_blocks, uint64_t num_blocks,
4262 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
4263 {
4264 	return bdev_read_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks, cb, cb_arg);
4265 }
4266 
4267 int
4268 spdk_bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4269 			      void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4270 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
4271 {
4272 	struct iovec iov = {
4273 		.iov_base = buf,
4274 	};
4275 
4276 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4277 		return -EINVAL;
4278 	}
4279 
4280 	if (md_buf && !_is_buf_allocated(&iov)) {
4281 		return -EINVAL;
4282 	}
4283 
4284 	return bdev_read_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
4285 					cb, cb_arg);
4286 }
4287 
4288 int
4289 spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4290 		struct iovec *iov, int iovcnt,
4291 		uint64_t offset, uint64_t nbytes,
4292 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4293 {
4294 	uint64_t offset_blocks, num_blocks;
4295 
4296 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4297 				 nbytes, &num_blocks) != 0) {
4298 		return -EINVAL;
4299 	}
4300 
4301 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
4302 }
4303 
4304 static int
4305 bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4306 			  struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
4307 			  uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
4308 			  struct spdk_bdev_ext_io_opts *opts, bool copy_opts)
4309 {
4310 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4311 	struct spdk_bdev_io *bdev_io;
4312 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4313 
4314 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4315 		return -EINVAL;
4316 	}
4317 
4318 	bdev_io = bdev_channel_get_io(channel);
4319 	if (!bdev_io) {
4320 		return -ENOMEM;
4321 	}
4322 
4323 	bdev_io->internal.ch = channel;
4324 	bdev_io->internal.desc = desc;
4325 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
4326 	bdev_io->u.bdev.iovs = iov;
4327 	bdev_io->u.bdev.iovcnt = iovcnt;
4328 	bdev_io->u.bdev.md_buf = md_buf;
4329 	bdev_io->u.bdev.num_blocks = num_blocks;
4330 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4331 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4332 	bdev_io->internal.ext_opts = opts;
4333 	bdev_io->u.bdev.ext_opts = opts;
4334 
4335 	_bdev_io_submit_ext(desc, bdev_io, opts, copy_opts);
4336 
4337 	return 0;
4338 }
4339 
4340 int
4341 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4342 		       struct iovec *iov, int iovcnt,
4343 		       uint64_t offset_blocks, uint64_t num_blocks,
4344 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
4345 {
4346 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
4347 					 num_blocks, cb, cb_arg, NULL, false);
4348 }
4349 
4350 int
4351 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4352 			       struct iovec *iov, int iovcnt, void *md_buf,
4353 			       uint64_t offset_blocks, uint64_t num_blocks,
4354 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
4355 {
4356 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4357 		return -EINVAL;
4358 	}
4359 
4360 	if (md_buf && !_is_buf_allocated(iov)) {
4361 		return -EINVAL;
4362 	}
4363 
4364 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
4365 					 num_blocks, cb, cb_arg, NULL, false);
4366 }
4367 
4368 static inline bool
4369 _bdev_io_check_opts(struct spdk_bdev_ext_io_opts *opts, struct iovec *iov)
4370 {
4371 	/*
4372 	 * We check if opts size is at least of size when we first introduced
4373 	 * spdk_bdev_ext_io_opts (ac6f2bdd8d) since access to those members
4374 	 * are not checked internal.
4375 	 */
4376 	return opts->size >= offsetof(struct spdk_bdev_ext_io_opts, metadata) +
4377 	       sizeof(opts->metadata) &&
4378 	       opts->size <= sizeof(*opts) &&
4379 	       /* When memory domain is used, the user must provide data buffers */
4380 	       (!opts->memory_domain || (iov && iov[0].iov_base));
4381 }
4382 
4383 int
4384 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4385 			   struct iovec *iov, int iovcnt,
4386 			   uint64_t offset_blocks, uint64_t num_blocks,
4387 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
4388 			   struct spdk_bdev_ext_io_opts *opts)
4389 {
4390 	void *md = NULL;
4391 
4392 	if (opts) {
4393 		if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
4394 			return -EINVAL;
4395 		}
4396 		md = opts->metadata;
4397 	}
4398 
4399 	if (md && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4400 		return -EINVAL;
4401 	}
4402 
4403 	if (md && !_is_buf_allocated(iov)) {
4404 		return -EINVAL;
4405 	}
4406 
4407 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
4408 					 num_blocks, cb, cb_arg, opts, false);
4409 }
4410 
4411 static int
4412 bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4413 			  void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4414 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
4415 {
4416 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4417 	struct spdk_bdev_io *bdev_io;
4418 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4419 
4420 	if (!desc->write) {
4421 		return -EBADF;
4422 	}
4423 
4424 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4425 		return -EINVAL;
4426 	}
4427 
4428 	bdev_io = bdev_channel_get_io(channel);
4429 	if (!bdev_io) {
4430 		return -ENOMEM;
4431 	}
4432 
4433 	bdev_io->internal.ch = channel;
4434 	bdev_io->internal.desc = desc;
4435 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
4436 	bdev_io->u.bdev.iovs = &bdev_io->iov;
4437 	bdev_io->u.bdev.iovs[0].iov_base = buf;
4438 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
4439 	bdev_io->u.bdev.iovcnt = 1;
4440 	bdev_io->u.bdev.md_buf = md_buf;
4441 	bdev_io->u.bdev.num_blocks = num_blocks;
4442 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4443 	bdev_io->u.bdev.ext_opts = NULL;
4444 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4445 
4446 	bdev_io_submit(bdev_io);
4447 	return 0;
4448 }
4449 
4450 int
4451 spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4452 		void *buf, uint64_t offset, uint64_t nbytes,
4453 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4454 {
4455 	uint64_t offset_blocks, num_blocks;
4456 
4457 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4458 				 nbytes, &num_blocks) != 0) {
4459 		return -EINVAL;
4460 	}
4461 
4462 	return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
4463 }
4464 
4465 int
4466 spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4467 		       void *buf, uint64_t offset_blocks, uint64_t num_blocks,
4468 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
4469 {
4470 	return bdev_write_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
4471 					 cb, cb_arg);
4472 }
4473 
4474 int
4475 spdk_bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4476 			       void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4477 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
4478 {
4479 	struct iovec iov = {
4480 		.iov_base = buf,
4481 	};
4482 
4483 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4484 		return -EINVAL;
4485 	}
4486 
4487 	if (md_buf && !_is_buf_allocated(&iov)) {
4488 		return -EINVAL;
4489 	}
4490 
4491 	return bdev_write_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
4492 					 cb, cb_arg);
4493 }
4494 
4495 static int
4496 bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4497 			   struct iovec *iov, int iovcnt, void *md_buf,
4498 			   uint64_t offset_blocks, uint64_t num_blocks,
4499 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
4500 			   struct spdk_bdev_ext_io_opts *opts, bool copy_opts)
4501 {
4502 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4503 	struct spdk_bdev_io *bdev_io;
4504 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4505 
4506 	if (!desc->write) {
4507 		return -EBADF;
4508 	}
4509 
4510 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4511 		return -EINVAL;
4512 	}
4513 
4514 	bdev_io = bdev_channel_get_io(channel);
4515 	if (!bdev_io) {
4516 		return -ENOMEM;
4517 	}
4518 
4519 	bdev_io->internal.ch = channel;
4520 	bdev_io->internal.desc = desc;
4521 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
4522 	bdev_io->u.bdev.iovs = iov;
4523 	bdev_io->u.bdev.iovcnt = iovcnt;
4524 	bdev_io->u.bdev.md_buf = md_buf;
4525 	bdev_io->u.bdev.num_blocks = num_blocks;
4526 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4527 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4528 	bdev_io->internal.ext_opts = opts;
4529 	bdev_io->u.bdev.ext_opts = opts;
4530 
4531 	_bdev_io_submit_ext(desc, bdev_io, opts, copy_opts);
4532 
4533 	return 0;
4534 }
4535 
4536 int
4537 spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4538 		 struct iovec *iov, int iovcnt,
4539 		 uint64_t offset, uint64_t len,
4540 		 spdk_bdev_io_completion_cb cb, void *cb_arg)
4541 {
4542 	uint64_t offset_blocks, num_blocks;
4543 
4544 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4545 				 len, &num_blocks) != 0) {
4546 		return -EINVAL;
4547 	}
4548 
4549 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
4550 }
4551 
4552 int
4553 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4554 			struct iovec *iov, int iovcnt,
4555 			uint64_t offset_blocks, uint64_t num_blocks,
4556 			spdk_bdev_io_completion_cb cb, void *cb_arg)
4557 {
4558 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
4559 					  num_blocks, cb, cb_arg, NULL, false);
4560 }
4561 
4562 int
4563 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4564 				struct iovec *iov, int iovcnt, void *md_buf,
4565 				uint64_t offset_blocks, uint64_t num_blocks,
4566 				spdk_bdev_io_completion_cb cb, void *cb_arg)
4567 {
4568 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4569 		return -EINVAL;
4570 	}
4571 
4572 	if (md_buf && !_is_buf_allocated(iov)) {
4573 		return -EINVAL;
4574 	}
4575 
4576 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
4577 					  num_blocks, cb, cb_arg, NULL, false);
4578 }
4579 
4580 int
4581 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4582 			    struct iovec *iov, int iovcnt,
4583 			    uint64_t offset_blocks, uint64_t num_blocks,
4584 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
4585 			    struct spdk_bdev_ext_io_opts *opts)
4586 {
4587 	void *md = NULL;
4588 
4589 	if (opts) {
4590 		if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
4591 			return -EINVAL;
4592 		}
4593 		md = opts->metadata;
4594 	}
4595 
4596 	if (md && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4597 		return -EINVAL;
4598 	}
4599 
4600 	if (md && !_is_buf_allocated(iov)) {
4601 		return -EINVAL;
4602 	}
4603 
4604 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
4605 					  num_blocks, cb, cb_arg, opts, false);
4606 }
4607 
4608 static void
4609 bdev_compare_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4610 {
4611 	struct spdk_bdev_io *parent_io = cb_arg;
4612 	struct spdk_bdev *bdev = parent_io->bdev;
4613 	uint8_t *read_buf = bdev_io->u.bdev.iovs[0].iov_base;
4614 	int i, rc = 0;
4615 
4616 	if (!success) {
4617 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4618 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
4619 		spdk_bdev_free_io(bdev_io);
4620 		return;
4621 	}
4622 
4623 	for (i = 0; i < parent_io->u.bdev.iovcnt; i++) {
4624 		rc = memcmp(read_buf,
4625 			    parent_io->u.bdev.iovs[i].iov_base,
4626 			    parent_io->u.bdev.iovs[i].iov_len);
4627 		if (rc) {
4628 			break;
4629 		}
4630 		read_buf += parent_io->u.bdev.iovs[i].iov_len;
4631 	}
4632 
4633 	if (rc == 0 && parent_io->u.bdev.md_buf && spdk_bdev_is_md_separate(bdev)) {
4634 		rc = memcmp(bdev_io->u.bdev.md_buf,
4635 			    parent_io->u.bdev.md_buf,
4636 			    spdk_bdev_get_md_size(bdev));
4637 	}
4638 
4639 	spdk_bdev_free_io(bdev_io);
4640 
4641 	if (rc == 0) {
4642 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
4643 		parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
4644 	} else {
4645 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
4646 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
4647 	}
4648 }
4649 
4650 static void
4651 bdev_compare_do_read(void *_bdev_io)
4652 {
4653 	struct spdk_bdev_io *bdev_io = _bdev_io;
4654 	int rc;
4655 
4656 	rc = spdk_bdev_read_blocks(bdev_io->internal.desc,
4657 				   spdk_io_channel_from_ctx(bdev_io->internal.ch), NULL,
4658 				   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4659 				   bdev_compare_do_read_done, bdev_io);
4660 
4661 	if (rc == -ENOMEM) {
4662 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_do_read);
4663 	} else if (rc != 0) {
4664 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4665 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
4666 	}
4667 }
4668 
4669 static int
4670 bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4671 			     struct iovec *iov, int iovcnt, void *md_buf,
4672 			     uint64_t offset_blocks, uint64_t num_blocks,
4673 			     spdk_bdev_io_completion_cb cb, void *cb_arg)
4674 {
4675 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4676 	struct spdk_bdev_io *bdev_io;
4677 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4678 
4679 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4680 		return -EINVAL;
4681 	}
4682 
4683 	bdev_io = bdev_channel_get_io(channel);
4684 	if (!bdev_io) {
4685 		return -ENOMEM;
4686 	}
4687 
4688 	bdev_io->internal.ch = channel;
4689 	bdev_io->internal.desc = desc;
4690 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
4691 	bdev_io->u.bdev.iovs = iov;
4692 	bdev_io->u.bdev.iovcnt = iovcnt;
4693 	bdev_io->u.bdev.md_buf = md_buf;
4694 	bdev_io->u.bdev.num_blocks = num_blocks;
4695 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4696 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4697 	bdev_io->u.bdev.ext_opts = NULL;
4698 
4699 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
4700 		bdev_io_submit(bdev_io);
4701 		return 0;
4702 	}
4703 
4704 	bdev_compare_do_read(bdev_io);
4705 
4706 	return 0;
4707 }
4708 
4709 int
4710 spdk_bdev_comparev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4711 			  struct iovec *iov, int iovcnt,
4712 			  uint64_t offset_blocks, uint64_t num_blocks,
4713 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
4714 {
4715 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
4716 					    num_blocks, cb, cb_arg);
4717 }
4718 
4719 int
4720 spdk_bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4721 				  struct iovec *iov, int iovcnt, void *md_buf,
4722 				  uint64_t offset_blocks, uint64_t num_blocks,
4723 				  spdk_bdev_io_completion_cb cb, void *cb_arg)
4724 {
4725 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4726 		return -EINVAL;
4727 	}
4728 
4729 	if (md_buf && !_is_buf_allocated(iov)) {
4730 		return -EINVAL;
4731 	}
4732 
4733 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
4734 					    num_blocks, cb, cb_arg);
4735 }
4736 
4737 static int
4738 bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4739 			    void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4740 			    spdk_bdev_io_completion_cb cb, void *cb_arg)
4741 {
4742 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4743 	struct spdk_bdev_io *bdev_io;
4744 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4745 
4746 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4747 		return -EINVAL;
4748 	}
4749 
4750 	bdev_io = bdev_channel_get_io(channel);
4751 	if (!bdev_io) {
4752 		return -ENOMEM;
4753 	}
4754 
4755 	bdev_io->internal.ch = channel;
4756 	bdev_io->internal.desc = desc;
4757 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
4758 	bdev_io->u.bdev.iovs = &bdev_io->iov;
4759 	bdev_io->u.bdev.iovs[0].iov_base = buf;
4760 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
4761 	bdev_io->u.bdev.iovcnt = 1;
4762 	bdev_io->u.bdev.md_buf = md_buf;
4763 	bdev_io->u.bdev.num_blocks = num_blocks;
4764 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4765 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4766 	bdev_io->u.bdev.ext_opts = NULL;
4767 
4768 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
4769 		bdev_io_submit(bdev_io);
4770 		return 0;
4771 	}
4772 
4773 	bdev_compare_do_read(bdev_io);
4774 
4775 	return 0;
4776 }
4777 
4778 int
4779 spdk_bdev_compare_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4780 			 void *buf, uint64_t offset_blocks, uint64_t num_blocks,
4781 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
4782 {
4783 	return bdev_compare_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
4784 					   cb, cb_arg);
4785 }
4786 
4787 int
4788 spdk_bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4789 				 void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4790 				 spdk_bdev_io_completion_cb cb, void *cb_arg)
4791 {
4792 	struct iovec iov = {
4793 		.iov_base = buf,
4794 	};
4795 
4796 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4797 		return -EINVAL;
4798 	}
4799 
4800 	if (md_buf && !_is_buf_allocated(&iov)) {
4801 		return -EINVAL;
4802 	}
4803 
4804 	return bdev_compare_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
4805 					   cb, cb_arg);
4806 }
4807 
4808 static void
4809 bdev_comparev_and_writev_blocks_unlocked(void *ctx, int unlock_status)
4810 {
4811 	struct spdk_bdev_io *bdev_io = ctx;
4812 
4813 	if (unlock_status) {
4814 		SPDK_ERRLOG("LBA range unlock failed\n");
4815 	}
4816 
4817 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS ? true :
4818 			     false, bdev_io->internal.caller_ctx);
4819 }
4820 
4821 static void
4822 bdev_comparev_and_writev_blocks_unlock(struct spdk_bdev_io *bdev_io, int status)
4823 {
4824 	bdev_io->internal.status = status;
4825 
4826 	bdev_unlock_lba_range(bdev_io->internal.desc, spdk_io_channel_from_ctx(bdev_io->internal.ch),
4827 			      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4828 			      bdev_comparev_and_writev_blocks_unlocked, bdev_io);
4829 }
4830 
4831 static void
4832 bdev_compare_and_write_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4833 {
4834 	struct spdk_bdev_io *parent_io = cb_arg;
4835 
4836 	if (!success) {
4837 		SPDK_ERRLOG("Compare and write operation failed\n");
4838 	}
4839 
4840 	spdk_bdev_free_io(bdev_io);
4841 
4842 	bdev_comparev_and_writev_blocks_unlock(parent_io,
4843 					       success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
4844 }
4845 
4846 static void
4847 bdev_compare_and_write_do_write(void *_bdev_io)
4848 {
4849 	struct spdk_bdev_io *bdev_io = _bdev_io;
4850 	int rc;
4851 
4852 	rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
4853 				     spdk_io_channel_from_ctx(bdev_io->internal.ch),
4854 				     bdev_io->u.bdev.fused_iovs, bdev_io->u.bdev.fused_iovcnt,
4855 				     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4856 				     bdev_compare_and_write_do_write_done, bdev_io);
4857 
4858 
4859 	if (rc == -ENOMEM) {
4860 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_write);
4861 	} else if (rc != 0) {
4862 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
4863 	}
4864 }
4865 
4866 static void
4867 bdev_compare_and_write_do_compare_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4868 {
4869 	struct spdk_bdev_io *parent_io = cb_arg;
4870 
4871 	spdk_bdev_free_io(bdev_io);
4872 
4873 	if (!success) {
4874 		bdev_comparev_and_writev_blocks_unlock(parent_io, SPDK_BDEV_IO_STATUS_MISCOMPARE);
4875 		return;
4876 	}
4877 
4878 	bdev_compare_and_write_do_write(parent_io);
4879 }
4880 
4881 static void
4882 bdev_compare_and_write_do_compare(void *_bdev_io)
4883 {
4884 	struct spdk_bdev_io *bdev_io = _bdev_io;
4885 	int rc;
4886 
4887 	rc = spdk_bdev_comparev_blocks(bdev_io->internal.desc,
4888 				       spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs,
4889 				       bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4890 				       bdev_compare_and_write_do_compare_done, bdev_io);
4891 
4892 	if (rc == -ENOMEM) {
4893 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_compare);
4894 	} else if (rc != 0) {
4895 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED);
4896 	}
4897 }
4898 
4899 static void
4900 bdev_comparev_and_writev_blocks_locked(void *ctx, int status)
4901 {
4902 	struct spdk_bdev_io *bdev_io = ctx;
4903 
4904 	if (status) {
4905 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED;
4906 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
4907 		return;
4908 	}
4909 
4910 	bdev_compare_and_write_do_compare(bdev_io);
4911 }
4912 
4913 int
4914 spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4915 				     struct iovec *compare_iov, int compare_iovcnt,
4916 				     struct iovec *write_iov, int write_iovcnt,
4917 				     uint64_t offset_blocks, uint64_t num_blocks,
4918 				     spdk_bdev_io_completion_cb cb, void *cb_arg)
4919 {
4920 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4921 	struct spdk_bdev_io *bdev_io;
4922 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4923 
4924 	if (!desc->write) {
4925 		return -EBADF;
4926 	}
4927 
4928 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4929 		return -EINVAL;
4930 	}
4931 
4932 	if (num_blocks > bdev->acwu) {
4933 		return -EINVAL;
4934 	}
4935 
4936 	bdev_io = bdev_channel_get_io(channel);
4937 	if (!bdev_io) {
4938 		return -ENOMEM;
4939 	}
4940 
4941 	bdev_io->internal.ch = channel;
4942 	bdev_io->internal.desc = desc;
4943 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
4944 	bdev_io->u.bdev.iovs = compare_iov;
4945 	bdev_io->u.bdev.iovcnt = compare_iovcnt;
4946 	bdev_io->u.bdev.fused_iovs = write_iov;
4947 	bdev_io->u.bdev.fused_iovcnt = write_iovcnt;
4948 	bdev_io->u.bdev.md_buf = NULL;
4949 	bdev_io->u.bdev.num_blocks = num_blocks;
4950 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4951 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4952 	bdev_io->u.bdev.ext_opts = NULL;
4953 
4954 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
4955 		bdev_io_submit(bdev_io);
4956 		return 0;
4957 	}
4958 
4959 	return bdev_lock_lba_range(desc, ch, offset_blocks, num_blocks,
4960 				   bdev_comparev_and_writev_blocks_locked, bdev_io);
4961 }
4962 
4963 int
4964 spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4965 		      struct iovec *iov, int iovcnt,
4966 		      uint64_t offset_blocks, uint64_t num_blocks,
4967 		      bool populate,
4968 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
4969 {
4970 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4971 	struct spdk_bdev_io *bdev_io;
4972 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4973 
4974 	if (!desc->write) {
4975 		return -EBADF;
4976 	}
4977 
4978 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4979 		return -EINVAL;
4980 	}
4981 
4982 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
4983 		return -ENOTSUP;
4984 	}
4985 
4986 	bdev_io = bdev_channel_get_io(channel);
4987 	if (!bdev_io) {
4988 		return -ENOMEM;
4989 	}
4990 
4991 	bdev_io->internal.ch = channel;
4992 	bdev_io->internal.desc = desc;
4993 	bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
4994 	bdev_io->u.bdev.num_blocks = num_blocks;
4995 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4996 	bdev_io->u.bdev.iovs = iov;
4997 	bdev_io->u.bdev.iovcnt = iovcnt;
4998 	bdev_io->u.bdev.md_buf = NULL;
4999 	bdev_io->u.bdev.zcopy.populate = populate ? 1 : 0;
5000 	bdev_io->u.bdev.zcopy.commit = 0;
5001 	bdev_io->u.bdev.zcopy.start = 1;
5002 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5003 	bdev_io->u.bdev.ext_opts = NULL;
5004 
5005 	bdev_io_submit(bdev_io);
5006 
5007 	return 0;
5008 }
5009 
5010 int
5011 spdk_bdev_zcopy_end(struct spdk_bdev_io *bdev_io, bool commit,
5012 		    spdk_bdev_io_completion_cb cb, void *cb_arg)
5013 {
5014 	if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY) {
5015 		return -EINVAL;
5016 	}
5017 
5018 	bdev_io->u.bdev.zcopy.commit = commit ? 1 : 0;
5019 	bdev_io->u.bdev.zcopy.start = 0;
5020 	bdev_io->internal.caller_ctx = cb_arg;
5021 	bdev_io->internal.cb = cb;
5022 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
5023 
5024 	bdev_io_submit(bdev_io);
5025 
5026 	return 0;
5027 }
5028 
5029 int
5030 spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5031 		       uint64_t offset, uint64_t len,
5032 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
5033 {
5034 	uint64_t offset_blocks, num_blocks;
5035 
5036 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5037 				 len, &num_blocks) != 0) {
5038 		return -EINVAL;
5039 	}
5040 
5041 	return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
5042 }
5043 
5044 int
5045 spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5046 			      uint64_t offset_blocks, uint64_t num_blocks,
5047 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
5048 {
5049 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5050 	struct spdk_bdev_io *bdev_io;
5051 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5052 
5053 	if (!desc->write) {
5054 		return -EBADF;
5055 	}
5056 
5057 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5058 		return -EINVAL;
5059 	}
5060 
5061 	if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
5062 	    !bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
5063 		return -ENOTSUP;
5064 	}
5065 
5066 	bdev_io = bdev_channel_get_io(channel);
5067 
5068 	if (!bdev_io) {
5069 		return -ENOMEM;
5070 	}
5071 
5072 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
5073 	bdev_io->internal.ch = channel;
5074 	bdev_io->internal.desc = desc;
5075 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5076 	bdev_io->u.bdev.num_blocks = num_blocks;
5077 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5078 	bdev_io->u.bdev.ext_opts = NULL;
5079 
5080 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
5081 		bdev_io_submit(bdev_io);
5082 		return 0;
5083 	}
5084 
5085 	assert(bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE));
5086 	assert(_bdev_get_block_size_with_md(bdev) <= ZERO_BUFFER_SIZE);
5087 	bdev_io->u.bdev.split_remaining_num_blocks = num_blocks;
5088 	bdev_io->u.bdev.split_current_offset_blocks = offset_blocks;
5089 	bdev_write_zero_buffer_next(bdev_io);
5090 
5091 	return 0;
5092 }
5093 
5094 int
5095 spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5096 		uint64_t offset, uint64_t nbytes,
5097 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5098 {
5099 	uint64_t offset_blocks, num_blocks;
5100 
5101 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5102 				 nbytes, &num_blocks) != 0) {
5103 		return -EINVAL;
5104 	}
5105 
5106 	return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
5107 }
5108 
5109 int
5110 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5111 		       uint64_t offset_blocks, uint64_t num_blocks,
5112 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
5113 {
5114 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5115 	struct spdk_bdev_io *bdev_io;
5116 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5117 
5118 	if (!desc->write) {
5119 		return -EBADF;
5120 	}
5121 
5122 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5123 		return -EINVAL;
5124 	}
5125 
5126 	if (num_blocks == 0) {
5127 		SPDK_ERRLOG("Can't unmap 0 bytes\n");
5128 		return -EINVAL;
5129 	}
5130 
5131 	bdev_io = bdev_channel_get_io(channel);
5132 	if (!bdev_io) {
5133 		return -ENOMEM;
5134 	}
5135 
5136 	bdev_io->internal.ch = channel;
5137 	bdev_io->internal.desc = desc;
5138 	bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
5139 
5140 	bdev_io->u.bdev.iovs = &bdev_io->iov;
5141 	bdev_io->u.bdev.iovs[0].iov_base = NULL;
5142 	bdev_io->u.bdev.iovs[0].iov_len = 0;
5143 	bdev_io->u.bdev.iovcnt = 1;
5144 
5145 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5146 	bdev_io->u.bdev.num_blocks = num_blocks;
5147 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5148 	bdev_io->u.bdev.ext_opts = NULL;
5149 
5150 	bdev_io_submit(bdev_io);
5151 	return 0;
5152 }
5153 
5154 int
5155 spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5156 		uint64_t offset, uint64_t length,
5157 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5158 {
5159 	uint64_t offset_blocks, num_blocks;
5160 
5161 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5162 				 length, &num_blocks) != 0) {
5163 		return -EINVAL;
5164 	}
5165 
5166 	return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
5167 }
5168 
5169 int
5170 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5171 		       uint64_t offset_blocks, uint64_t num_blocks,
5172 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
5173 {
5174 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5175 	struct spdk_bdev_io *bdev_io;
5176 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5177 
5178 	if (!desc->write) {
5179 		return -EBADF;
5180 	}
5181 
5182 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5183 		return -EINVAL;
5184 	}
5185 
5186 	bdev_io = bdev_channel_get_io(channel);
5187 	if (!bdev_io) {
5188 		return -ENOMEM;
5189 	}
5190 
5191 	bdev_io->internal.ch = channel;
5192 	bdev_io->internal.desc = desc;
5193 	bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
5194 	bdev_io->u.bdev.iovs = NULL;
5195 	bdev_io->u.bdev.iovcnt = 0;
5196 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5197 	bdev_io->u.bdev.num_blocks = num_blocks;
5198 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5199 
5200 	bdev_io_submit(bdev_io);
5201 	return 0;
5202 }
5203 
5204 static void
5205 bdev_reset_dev(struct spdk_io_channel_iter *i, int status)
5206 {
5207 	struct spdk_bdev_channel *ch = spdk_io_channel_iter_get_ctx(i);
5208 	struct spdk_bdev_io *bdev_io;
5209 
5210 	bdev_io = TAILQ_FIRST(&ch->queued_resets);
5211 	TAILQ_REMOVE(&ch->queued_resets, bdev_io, internal.link);
5212 	bdev_io_submit_reset(bdev_io);
5213 }
5214 
5215 static void
5216 bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
5217 {
5218 	struct spdk_io_channel		*ch;
5219 	struct spdk_bdev_channel	*channel;
5220 	struct spdk_bdev_mgmt_channel	*mgmt_channel;
5221 	struct spdk_bdev_shared_resource *shared_resource;
5222 	bdev_io_tailq_t			tmp_queued;
5223 
5224 	TAILQ_INIT(&tmp_queued);
5225 
5226 	ch = spdk_io_channel_iter_get_channel(i);
5227 	channel = spdk_io_channel_get_ctx(ch);
5228 	shared_resource = channel->shared_resource;
5229 	mgmt_channel = shared_resource->mgmt_ch;
5230 
5231 	channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
5232 
5233 	if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
5234 		/* The QoS object is always valid and readable while
5235 		 * the channel flag is set, so the lock here should not
5236 		 * be necessary. We're not in the fast path though, so
5237 		 * just take it anyway. */
5238 		pthread_mutex_lock(&channel->bdev->internal.mutex);
5239 		if (channel->bdev->internal.qos->ch == channel) {
5240 			TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link);
5241 		}
5242 		pthread_mutex_unlock(&channel->bdev->internal.mutex);
5243 	}
5244 
5245 	bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
5246 	bdev_abort_all_buf_io(&mgmt_channel->need_buf_small, channel);
5247 	bdev_abort_all_buf_io(&mgmt_channel->need_buf_large, channel);
5248 	bdev_abort_all_queued_io(&tmp_queued, channel);
5249 
5250 	spdk_for_each_channel_continue(i, 0);
5251 }
5252 
5253 static void
5254 bdev_start_reset(void *ctx)
5255 {
5256 	struct spdk_bdev_channel *ch = ctx;
5257 
5258 	spdk_for_each_channel(__bdev_to_io_dev(ch->bdev), bdev_reset_freeze_channel,
5259 			      ch, bdev_reset_dev);
5260 }
5261 
5262 static void
5263 bdev_channel_start_reset(struct spdk_bdev_channel *ch)
5264 {
5265 	struct spdk_bdev *bdev = ch->bdev;
5266 
5267 	assert(!TAILQ_EMPTY(&ch->queued_resets));
5268 
5269 	pthread_mutex_lock(&bdev->internal.mutex);
5270 	if (bdev->internal.reset_in_progress == NULL) {
5271 		bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
5272 		/*
5273 		 * Take a channel reference for the target bdev for the life of this
5274 		 *  reset.  This guards against the channel getting destroyed while
5275 		 *  spdk_for_each_channel() calls related to this reset IO are in
5276 		 *  progress.  We will release the reference when this reset is
5277 		 *  completed.
5278 		 */
5279 		bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
5280 		bdev_start_reset(ch);
5281 	}
5282 	pthread_mutex_unlock(&bdev->internal.mutex);
5283 }
5284 
5285 int
5286 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5287 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5288 {
5289 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5290 	struct spdk_bdev_io *bdev_io;
5291 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5292 
5293 	bdev_io = bdev_channel_get_io(channel);
5294 	if (!bdev_io) {
5295 		return -ENOMEM;
5296 	}
5297 
5298 	bdev_io->internal.ch = channel;
5299 	bdev_io->internal.desc = desc;
5300 	bdev_io->internal.submit_tsc = spdk_get_ticks();
5301 	bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
5302 	bdev_io->u.reset.ch_ref = NULL;
5303 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5304 
5305 	pthread_mutex_lock(&bdev->internal.mutex);
5306 	TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
5307 	pthread_mutex_unlock(&bdev->internal.mutex);
5308 
5309 	TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io,
5310 			  internal.ch_link);
5311 
5312 	bdev_channel_start_reset(channel);
5313 
5314 	return 0;
5315 }
5316 
5317 void
5318 spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
5319 		      struct spdk_bdev_io_stat *stat)
5320 {
5321 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5322 
5323 	*stat = channel->stat;
5324 }
5325 
5326 static void
5327 bdev_get_device_stat_done(struct spdk_io_channel_iter *i, int status)
5328 {
5329 	void *io_device = spdk_io_channel_iter_get_io_device(i);
5330 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = spdk_io_channel_iter_get_ctx(i);
5331 
5332 	bdev_iostat_ctx->cb(__bdev_from_io_dev(io_device), bdev_iostat_ctx->stat,
5333 			    bdev_iostat_ctx->cb_arg, 0);
5334 	free(bdev_iostat_ctx);
5335 }
5336 
5337 static void
5338 bdev_get_each_channel_stat(struct spdk_io_channel_iter *i)
5339 {
5340 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = spdk_io_channel_iter_get_ctx(i);
5341 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
5342 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5343 
5344 	bdev_io_stat_add(bdev_iostat_ctx->stat, &channel->stat);
5345 	spdk_for_each_channel_continue(i, 0);
5346 }
5347 
5348 void
5349 spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
5350 			  spdk_bdev_get_device_stat_cb cb, void *cb_arg)
5351 {
5352 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx;
5353 
5354 	assert(bdev != NULL);
5355 	assert(stat != NULL);
5356 	assert(cb != NULL);
5357 
5358 	bdev_iostat_ctx = calloc(1, sizeof(struct spdk_bdev_iostat_ctx));
5359 	if (bdev_iostat_ctx == NULL) {
5360 		SPDK_ERRLOG("Unable to allocate memory for spdk_bdev_iostat_ctx\n");
5361 		cb(bdev, stat, cb_arg, -ENOMEM);
5362 		return;
5363 	}
5364 
5365 	bdev_iostat_ctx->stat = stat;
5366 	bdev_iostat_ctx->cb = cb;
5367 	bdev_iostat_ctx->cb_arg = cb_arg;
5368 
5369 	/* Start with the statistics from previously deleted channels. */
5370 	pthread_mutex_lock(&bdev->internal.mutex);
5371 	bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat);
5372 	pthread_mutex_unlock(&bdev->internal.mutex);
5373 
5374 	/* Then iterate and add the statistics from each existing channel. */
5375 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
5376 			      bdev_get_each_channel_stat,
5377 			      bdev_iostat_ctx,
5378 			      bdev_get_device_stat_done);
5379 }
5380 
5381 int
5382 spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5383 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
5384 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
5385 {
5386 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5387 	struct spdk_bdev_io *bdev_io;
5388 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5389 
5390 	if (!desc->write) {
5391 		return -EBADF;
5392 	}
5393 
5394 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN))) {
5395 		return -ENOTSUP;
5396 	}
5397 
5398 	bdev_io = bdev_channel_get_io(channel);
5399 	if (!bdev_io) {
5400 		return -ENOMEM;
5401 	}
5402 
5403 	bdev_io->internal.ch = channel;
5404 	bdev_io->internal.desc = desc;
5405 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
5406 	bdev_io->u.nvme_passthru.cmd = *cmd;
5407 	bdev_io->u.nvme_passthru.buf = buf;
5408 	bdev_io->u.nvme_passthru.nbytes = nbytes;
5409 	bdev_io->u.nvme_passthru.md_buf = NULL;
5410 	bdev_io->u.nvme_passthru.md_len = 0;
5411 
5412 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5413 
5414 	bdev_io_submit(bdev_io);
5415 	return 0;
5416 }
5417 
5418 int
5419 spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5420 			   const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
5421 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
5422 {
5423 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5424 	struct spdk_bdev_io *bdev_io;
5425 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5426 
5427 	if (!desc->write) {
5428 		/*
5429 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
5430 		 *  to easily determine if the command is a read or write, but for now just
5431 		 *  do not allow io_passthru with a read-only descriptor.
5432 		 */
5433 		return -EBADF;
5434 	}
5435 
5436 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
5437 		return -ENOTSUP;
5438 	}
5439 
5440 	bdev_io = bdev_channel_get_io(channel);
5441 	if (!bdev_io) {
5442 		return -ENOMEM;
5443 	}
5444 
5445 	bdev_io->internal.ch = channel;
5446 	bdev_io->internal.desc = desc;
5447 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
5448 	bdev_io->u.nvme_passthru.cmd = *cmd;
5449 	bdev_io->u.nvme_passthru.buf = buf;
5450 	bdev_io->u.nvme_passthru.nbytes = nbytes;
5451 	bdev_io->u.nvme_passthru.md_buf = NULL;
5452 	bdev_io->u.nvme_passthru.md_len = 0;
5453 
5454 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5455 
5456 	bdev_io_submit(bdev_io);
5457 	return 0;
5458 }
5459 
5460 int
5461 spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5462 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
5463 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
5464 {
5465 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5466 	struct spdk_bdev_io *bdev_io;
5467 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5468 
5469 	if (!desc->write) {
5470 		/*
5471 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
5472 		 *  to easily determine if the command is a read or write, but for now just
5473 		 *  do not allow io_passthru with a read-only descriptor.
5474 		 */
5475 		return -EBADF;
5476 	}
5477 
5478 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
5479 		return -ENOTSUP;
5480 	}
5481 
5482 	bdev_io = bdev_channel_get_io(channel);
5483 	if (!bdev_io) {
5484 		return -ENOMEM;
5485 	}
5486 
5487 	bdev_io->internal.ch = channel;
5488 	bdev_io->internal.desc = desc;
5489 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
5490 	bdev_io->u.nvme_passthru.cmd = *cmd;
5491 	bdev_io->u.nvme_passthru.buf = buf;
5492 	bdev_io->u.nvme_passthru.nbytes = nbytes;
5493 	bdev_io->u.nvme_passthru.md_buf = md_buf;
5494 	bdev_io->u.nvme_passthru.md_len = md_len;
5495 
5496 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5497 
5498 	bdev_io_submit(bdev_io);
5499 	return 0;
5500 }
5501 
5502 static void bdev_abort_retry(void *ctx);
5503 static void bdev_abort(struct spdk_bdev_io *parent_io);
5504 
5505 static void
5506 bdev_abort_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5507 {
5508 	struct spdk_bdev_channel *channel = bdev_io->internal.ch;
5509 	struct spdk_bdev_io *parent_io = cb_arg;
5510 	struct spdk_bdev_io *bio_to_abort, *tmp_io;
5511 
5512 	bio_to_abort = bdev_io->u.abort.bio_to_abort;
5513 
5514 	spdk_bdev_free_io(bdev_io);
5515 
5516 	if (!success) {
5517 		/* Check if the target I/O completed in the meantime. */
5518 		TAILQ_FOREACH(tmp_io, &channel->io_submitted, internal.ch_link) {
5519 			if (tmp_io == bio_to_abort) {
5520 				break;
5521 			}
5522 		}
5523 
5524 		/* If the target I/O still exists, set the parent to failed. */
5525 		if (tmp_io != NULL) {
5526 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5527 		}
5528 	}
5529 
5530 	parent_io->u.bdev.split_outstanding--;
5531 	if (parent_io->u.bdev.split_outstanding == 0) {
5532 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
5533 			bdev_abort_retry(parent_io);
5534 		} else {
5535 			bdev_io_complete(parent_io);
5536 		}
5537 	}
5538 }
5539 
5540 static int
5541 bdev_abort_io(struct spdk_bdev_desc *desc, struct spdk_bdev_channel *channel,
5542 	      struct spdk_bdev_io *bio_to_abort,
5543 	      spdk_bdev_io_completion_cb cb, void *cb_arg)
5544 {
5545 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5546 	struct spdk_bdev_io *bdev_io;
5547 
5548 	if (bio_to_abort->type == SPDK_BDEV_IO_TYPE_ABORT ||
5549 	    bio_to_abort->type == SPDK_BDEV_IO_TYPE_RESET) {
5550 		/* TODO: Abort reset or abort request. */
5551 		return -ENOTSUP;
5552 	}
5553 
5554 	bdev_io = bdev_channel_get_io(channel);
5555 	if (bdev_io == NULL) {
5556 		return -ENOMEM;
5557 	}
5558 
5559 	bdev_io->internal.ch = channel;
5560 	bdev_io->internal.desc = desc;
5561 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
5562 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5563 
5564 	if (bdev->split_on_optimal_io_boundary && bdev_io_should_split(bio_to_abort)) {
5565 		bdev_io->u.bdev.abort.bio_cb_arg = bio_to_abort;
5566 
5567 		/* Parent abort request is not submitted directly, but to manage its
5568 		 * execution add it to the submitted list here.
5569 		 */
5570 		bdev_io->internal.submit_tsc = spdk_get_ticks();
5571 		TAILQ_INSERT_TAIL(&channel->io_submitted, bdev_io, internal.ch_link);
5572 
5573 		bdev_abort(bdev_io);
5574 
5575 		return 0;
5576 	}
5577 
5578 	bdev_io->u.abort.bio_to_abort = bio_to_abort;
5579 
5580 	/* Submit the abort request to the underlying bdev module. */
5581 	bdev_io_submit(bdev_io);
5582 
5583 	return 0;
5584 }
5585 
5586 static uint32_t
5587 _bdev_abort(struct spdk_bdev_io *parent_io)
5588 {
5589 	struct spdk_bdev_desc *desc = parent_io->internal.desc;
5590 	struct spdk_bdev_channel *channel = parent_io->internal.ch;
5591 	void *bio_cb_arg;
5592 	struct spdk_bdev_io *bio_to_abort;
5593 	uint32_t matched_ios;
5594 	int rc;
5595 
5596 	bio_cb_arg = parent_io->u.bdev.abort.bio_cb_arg;
5597 
5598 	/* matched_ios is returned and will be kept by the caller.
5599 	 *
5600 	 * This funcion will be used for two cases, 1) the same cb_arg is used for
5601 	 * multiple I/Os, 2) a single large I/O is split into smaller ones.
5602 	 * Incrementing split_outstanding directly here may confuse readers especially
5603 	 * for the 1st case.
5604 	 *
5605 	 * Completion of I/O abort is processed after stack unwinding. Hence this trick
5606 	 * works as expected.
5607 	 */
5608 	matched_ios = 0;
5609 	parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5610 
5611 	TAILQ_FOREACH(bio_to_abort, &channel->io_submitted, internal.ch_link) {
5612 		if (bio_to_abort->internal.caller_ctx != bio_cb_arg) {
5613 			continue;
5614 		}
5615 
5616 		if (bio_to_abort->internal.submit_tsc > parent_io->internal.submit_tsc) {
5617 			/* Any I/O which was submitted after this abort command should be excluded. */
5618 			continue;
5619 		}
5620 
5621 		rc = bdev_abort_io(desc, channel, bio_to_abort, bdev_abort_io_done, parent_io);
5622 		if (rc != 0) {
5623 			if (rc == -ENOMEM) {
5624 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
5625 			} else {
5626 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5627 			}
5628 			break;
5629 		}
5630 		matched_ios++;
5631 	}
5632 
5633 	return matched_ios;
5634 }
5635 
5636 static void
5637 bdev_abort_retry(void *ctx)
5638 {
5639 	struct spdk_bdev_io *parent_io = ctx;
5640 	uint32_t matched_ios;
5641 
5642 	matched_ios = _bdev_abort(parent_io);
5643 
5644 	if (matched_ios == 0) {
5645 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
5646 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
5647 		} else {
5648 			/* For retry, the case that no target I/O was found is success
5649 			 * because it means target I/Os completed in the meantime.
5650 			 */
5651 			bdev_io_complete(parent_io);
5652 		}
5653 		return;
5654 	}
5655 
5656 	/* Use split_outstanding to manage the progress of aborting I/Os. */
5657 	parent_io->u.bdev.split_outstanding = matched_ios;
5658 }
5659 
5660 static void
5661 bdev_abort(struct spdk_bdev_io *parent_io)
5662 {
5663 	uint32_t matched_ios;
5664 
5665 	matched_ios = _bdev_abort(parent_io);
5666 
5667 	if (matched_ios == 0) {
5668 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
5669 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
5670 		} else {
5671 			/* The case the no target I/O was found is failure. */
5672 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5673 			bdev_io_complete(parent_io);
5674 		}
5675 		return;
5676 	}
5677 
5678 	/* Use split_outstanding to manage the progress of aborting I/Os. */
5679 	parent_io->u.bdev.split_outstanding = matched_ios;
5680 }
5681 
5682 int
5683 spdk_bdev_abort(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5684 		void *bio_cb_arg,
5685 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5686 {
5687 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5688 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5689 	struct spdk_bdev_io *bdev_io;
5690 
5691 	if (bio_cb_arg == NULL) {
5692 		return -EINVAL;
5693 	}
5694 
5695 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
5696 		return -ENOTSUP;
5697 	}
5698 
5699 	bdev_io = bdev_channel_get_io(channel);
5700 	if (bdev_io == NULL) {
5701 		return -ENOMEM;
5702 	}
5703 
5704 	bdev_io->internal.ch = channel;
5705 	bdev_io->internal.desc = desc;
5706 	bdev_io->internal.submit_tsc = spdk_get_ticks();
5707 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
5708 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5709 
5710 	bdev_io->u.bdev.abort.bio_cb_arg = bio_cb_arg;
5711 
5712 	/* Parent abort request is not submitted directly, but to manage its execution,
5713 	 * add it to the submitted list here.
5714 	 */
5715 	TAILQ_INSERT_TAIL(&channel->io_submitted, bdev_io, internal.ch_link);
5716 
5717 	bdev_abort(bdev_io);
5718 
5719 	return 0;
5720 }
5721 
5722 int
5723 spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
5724 			struct spdk_bdev_io_wait_entry *entry)
5725 {
5726 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
5727 	struct spdk_bdev_mgmt_channel *mgmt_ch = channel->shared_resource->mgmt_ch;
5728 
5729 	if (bdev != entry->bdev) {
5730 		SPDK_ERRLOG("bdevs do not match\n");
5731 		return -EINVAL;
5732 	}
5733 
5734 	if (mgmt_ch->per_thread_cache_count > 0) {
5735 		SPDK_ERRLOG("Cannot queue io_wait if spdk_bdev_io available in per-thread cache\n");
5736 		return -EINVAL;
5737 	}
5738 
5739 	TAILQ_INSERT_TAIL(&mgmt_ch->io_wait_queue, entry, link);
5740 	return 0;
5741 }
5742 
5743 static inline void
5744 bdev_io_complete(void *ctx)
5745 {
5746 	struct spdk_bdev_io *bdev_io = ctx;
5747 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
5748 	uint64_t tsc, tsc_diff;
5749 
5750 	if (spdk_unlikely(bdev_io->internal.in_submit_request || bdev_io->internal.io_submit_ch)) {
5751 		/*
5752 		 * Send the completion to the thread that originally submitted the I/O,
5753 		 * which may not be the current thread in the case of QoS.
5754 		 */
5755 		if (bdev_io->internal.io_submit_ch) {
5756 			bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
5757 			bdev_io->internal.io_submit_ch = NULL;
5758 		}
5759 
5760 		/*
5761 		 * Defer completion to avoid potential infinite recursion if the
5762 		 * user's completion callback issues a new I/O.
5763 		 */
5764 		spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
5765 				     bdev_io_complete, bdev_io);
5766 		return;
5767 	}
5768 
5769 	tsc = spdk_get_ticks();
5770 	tsc_diff = tsc - bdev_io->internal.submit_tsc;
5771 	spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io,
5772 			      bdev_io->internal.caller_ctx);
5773 
5774 	TAILQ_REMOVE(&bdev_ch->io_submitted, bdev_io, internal.ch_link);
5775 
5776 	if (bdev_io->internal.ch->histogram) {
5777 		spdk_histogram_data_tally(bdev_io->internal.ch->histogram, tsc_diff);
5778 	}
5779 
5780 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
5781 		switch (bdev_io->type) {
5782 		case SPDK_BDEV_IO_TYPE_READ:
5783 			bdev_io->internal.ch->stat.bytes_read += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5784 			bdev_io->internal.ch->stat.num_read_ops++;
5785 			bdev_io->internal.ch->stat.read_latency_ticks += tsc_diff;
5786 			break;
5787 		case SPDK_BDEV_IO_TYPE_WRITE:
5788 			bdev_io->internal.ch->stat.bytes_written += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5789 			bdev_io->internal.ch->stat.num_write_ops++;
5790 			bdev_io->internal.ch->stat.write_latency_ticks += tsc_diff;
5791 			break;
5792 		case SPDK_BDEV_IO_TYPE_UNMAP:
5793 			bdev_io->internal.ch->stat.bytes_unmapped += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5794 			bdev_io->internal.ch->stat.num_unmap_ops++;
5795 			bdev_io->internal.ch->stat.unmap_latency_ticks += tsc_diff;
5796 			break;
5797 		case SPDK_BDEV_IO_TYPE_ZCOPY:
5798 			/* Track the data in the start phase only */
5799 			if (bdev_io->u.bdev.zcopy.start) {
5800 				if (bdev_io->u.bdev.zcopy.populate) {
5801 					bdev_io->internal.ch->stat.bytes_read +=
5802 						bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5803 					bdev_io->internal.ch->stat.num_read_ops++;
5804 					bdev_io->internal.ch->stat.read_latency_ticks += tsc_diff;
5805 				} else {
5806 					bdev_io->internal.ch->stat.bytes_written +=
5807 						bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5808 					bdev_io->internal.ch->stat.num_write_ops++;
5809 					bdev_io->internal.ch->stat.write_latency_ticks += tsc_diff;
5810 				}
5811 			}
5812 			break;
5813 		default:
5814 			break;
5815 		}
5816 	}
5817 
5818 #ifdef SPDK_CONFIG_VTUNE
5819 	uint64_t now_tsc = spdk_get_ticks();
5820 	if (now_tsc > (bdev_io->internal.ch->start_tsc + bdev_io->internal.ch->interval_tsc)) {
5821 		uint64_t data[5];
5822 
5823 		data[0] = bdev_io->internal.ch->stat.num_read_ops - bdev_io->internal.ch->prev_stat.num_read_ops;
5824 		data[1] = bdev_io->internal.ch->stat.bytes_read - bdev_io->internal.ch->prev_stat.bytes_read;
5825 		data[2] = bdev_io->internal.ch->stat.num_write_ops - bdev_io->internal.ch->prev_stat.num_write_ops;
5826 		data[3] = bdev_io->internal.ch->stat.bytes_written - bdev_io->internal.ch->prev_stat.bytes_written;
5827 		data[4] = bdev_io->bdev->fn_table->get_spin_time ?
5828 			  bdev_io->bdev->fn_table->get_spin_time(spdk_bdev_io_get_io_channel(bdev_io)) : 0;
5829 
5830 		__itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_io->internal.ch->handle,
5831 				   __itt_metadata_u64, 5, data);
5832 
5833 		bdev_io->internal.ch->prev_stat = bdev_io->internal.ch->stat;
5834 		bdev_io->internal.ch->start_tsc = now_tsc;
5835 	}
5836 #endif
5837 
5838 	assert(bdev_io->internal.cb != NULL);
5839 	assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
5840 
5841 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
5842 			     bdev_io->internal.caller_ctx);
5843 }
5844 
5845 static void bdev_destroy_cb(void *io_device);
5846 
5847 static void
5848 bdev_reset_complete(struct spdk_io_channel_iter *i, int status)
5849 {
5850 	struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
5851 	struct spdk_bdev *bdev = bdev_io->bdev;
5852 
5853 	if (bdev_io->u.reset.ch_ref != NULL) {
5854 		spdk_put_io_channel(bdev_io->u.reset.ch_ref);
5855 		bdev_io->u.reset.ch_ref = NULL;
5856 	}
5857 
5858 	bdev_io_complete(bdev_io);
5859 
5860 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING &&
5861 	    TAILQ_EMPTY(&bdev->internal.open_descs)) {
5862 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
5863 	}
5864 }
5865 
5866 static void
5867 bdev_unfreeze_channel(struct spdk_io_channel_iter *i)
5868 {
5869 	struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
5870 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
5871 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
5872 	struct spdk_bdev_io *queued_reset;
5873 
5874 	ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
5875 	while (!TAILQ_EMPTY(&ch->queued_resets)) {
5876 		queued_reset = TAILQ_FIRST(&ch->queued_resets);
5877 		TAILQ_REMOVE(&ch->queued_resets, queued_reset, internal.link);
5878 		spdk_bdev_io_complete(queued_reset, bdev_io->internal.status);
5879 	}
5880 
5881 	spdk_for_each_channel_continue(i, 0);
5882 }
5883 
5884 void
5885 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
5886 {
5887 	struct spdk_bdev *bdev = bdev_io->bdev;
5888 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
5889 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
5890 
5891 	bdev_io->internal.status = status;
5892 
5893 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
5894 		bool unlock_channels = false;
5895 
5896 		if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
5897 			SPDK_ERRLOG("NOMEM returned for reset\n");
5898 		}
5899 		pthread_mutex_lock(&bdev->internal.mutex);
5900 		if (bdev_io == bdev->internal.reset_in_progress) {
5901 			bdev->internal.reset_in_progress = NULL;
5902 			unlock_channels = true;
5903 		}
5904 		pthread_mutex_unlock(&bdev->internal.mutex);
5905 
5906 		if (unlock_channels) {
5907 			spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_unfreeze_channel,
5908 					      bdev_io, bdev_reset_complete);
5909 			return;
5910 		}
5911 	} else {
5912 		if (spdk_unlikely(bdev_io->internal.orig_iovcnt != 0)) {
5913 			_bdev_io_push_bounce_data_buffer(bdev_io, _bdev_io_complete_push_bounce_done);
5914 			/* bdev IO will be completed in the callback */
5915 			return;
5916 		}
5917 
5918 		_bdev_io_decrement_outstanding(bdev_ch, shared_resource);
5919 		if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io))) {
5920 			return;
5921 		}
5922 	}
5923 
5924 	bdev_io_complete(bdev_io);
5925 }
5926 
5927 void
5928 spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
5929 				  enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
5930 {
5931 	if (sc == SPDK_SCSI_STATUS_GOOD) {
5932 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5933 	} else {
5934 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
5935 		bdev_io->internal.error.scsi.sc = sc;
5936 		bdev_io->internal.error.scsi.sk = sk;
5937 		bdev_io->internal.error.scsi.asc = asc;
5938 		bdev_io->internal.error.scsi.ascq = ascq;
5939 	}
5940 
5941 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
5942 }
5943 
5944 void
5945 spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
5946 			     int *sc, int *sk, int *asc, int *ascq)
5947 {
5948 	assert(sc != NULL);
5949 	assert(sk != NULL);
5950 	assert(asc != NULL);
5951 	assert(ascq != NULL);
5952 
5953 	switch (bdev_io->internal.status) {
5954 	case SPDK_BDEV_IO_STATUS_SUCCESS:
5955 		*sc = SPDK_SCSI_STATUS_GOOD;
5956 		*sk = SPDK_SCSI_SENSE_NO_SENSE;
5957 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
5958 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
5959 		break;
5960 	case SPDK_BDEV_IO_STATUS_NVME_ERROR:
5961 		spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
5962 		break;
5963 	case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
5964 		*sc = bdev_io->internal.error.scsi.sc;
5965 		*sk = bdev_io->internal.error.scsi.sk;
5966 		*asc = bdev_io->internal.error.scsi.asc;
5967 		*ascq = bdev_io->internal.error.scsi.ascq;
5968 		break;
5969 	default:
5970 		*sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
5971 		*sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
5972 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
5973 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
5974 		break;
5975 	}
5976 }
5977 
5978 void
5979 spdk_bdev_io_complete_aio_status(struct spdk_bdev_io *bdev_io, int aio_result)
5980 {
5981 	if (aio_result == 0) {
5982 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5983 	} else {
5984 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_AIO_ERROR;
5985 	}
5986 
5987 	bdev_io->internal.error.aio_result = aio_result;
5988 
5989 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
5990 }
5991 
5992 void
5993 spdk_bdev_io_get_aio_status(const struct spdk_bdev_io *bdev_io, int *aio_result)
5994 {
5995 	assert(aio_result != NULL);
5996 
5997 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_AIO_ERROR) {
5998 		*aio_result = bdev_io->internal.error.aio_result;
5999 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
6000 		*aio_result = 0;
6001 	} else {
6002 		*aio_result = -EIO;
6003 	}
6004 }
6005 
6006 void
6007 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
6008 {
6009 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
6010 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6011 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
6012 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
6013 	} else {
6014 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
6015 	}
6016 
6017 	bdev_io->internal.error.nvme.cdw0 = cdw0;
6018 	bdev_io->internal.error.nvme.sct = sct;
6019 	bdev_io->internal.error.nvme.sc = sc;
6020 
6021 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
6022 }
6023 
6024 void
6025 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
6026 {
6027 	assert(sct != NULL);
6028 	assert(sc != NULL);
6029 	assert(cdw0 != NULL);
6030 
6031 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
6032 		*sct = SPDK_NVME_SCT_GENERIC;
6033 		*sc = SPDK_NVME_SC_SUCCESS;
6034 		if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
6035 			*cdw0 = 0;
6036 		} else {
6037 			*cdw0 = 1U;
6038 		}
6039 		return;
6040 	}
6041 
6042 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
6043 		*sct = bdev_io->internal.error.nvme.sct;
6044 		*sc = bdev_io->internal.error.nvme.sc;
6045 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
6046 		*sct = SPDK_NVME_SCT_GENERIC;
6047 		*sc = SPDK_NVME_SC_SUCCESS;
6048 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
6049 		*sct = SPDK_NVME_SCT_GENERIC;
6050 		*sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
6051 	} else {
6052 		*sct = SPDK_NVME_SCT_GENERIC;
6053 		*sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
6054 	}
6055 
6056 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
6057 }
6058 
6059 void
6060 spdk_bdev_io_get_nvme_fused_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
6061 				   int *first_sct, int *first_sc, int *second_sct, int *second_sc)
6062 {
6063 	assert(first_sct != NULL);
6064 	assert(first_sc != NULL);
6065 	assert(second_sct != NULL);
6066 	assert(second_sc != NULL);
6067 	assert(cdw0 != NULL);
6068 
6069 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
6070 		if (bdev_io->internal.error.nvme.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
6071 		    bdev_io->internal.error.nvme.sc == SPDK_NVME_SC_COMPARE_FAILURE) {
6072 			*first_sct = bdev_io->internal.error.nvme.sct;
6073 			*first_sc = bdev_io->internal.error.nvme.sc;
6074 			*second_sct = SPDK_NVME_SCT_GENERIC;
6075 			*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
6076 		} else {
6077 			*first_sct = SPDK_NVME_SCT_GENERIC;
6078 			*first_sc = SPDK_NVME_SC_SUCCESS;
6079 			*second_sct = bdev_io->internal.error.nvme.sct;
6080 			*second_sc = bdev_io->internal.error.nvme.sc;
6081 		}
6082 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
6083 		*first_sct = SPDK_NVME_SCT_GENERIC;
6084 		*first_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
6085 		*second_sct = SPDK_NVME_SCT_GENERIC;
6086 		*second_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
6087 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
6088 		*first_sct = SPDK_NVME_SCT_GENERIC;
6089 		*first_sc = SPDK_NVME_SC_SUCCESS;
6090 		*second_sct = SPDK_NVME_SCT_GENERIC;
6091 		*second_sc = SPDK_NVME_SC_SUCCESS;
6092 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED) {
6093 		*first_sct = SPDK_NVME_SCT_GENERIC;
6094 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
6095 		*second_sct = SPDK_NVME_SCT_GENERIC;
6096 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
6097 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_MISCOMPARE) {
6098 		*first_sct = SPDK_NVME_SCT_MEDIA_ERROR;
6099 		*first_sc = SPDK_NVME_SC_COMPARE_FAILURE;
6100 		*second_sct = SPDK_NVME_SCT_GENERIC;
6101 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
6102 	} else {
6103 		*first_sct = SPDK_NVME_SCT_GENERIC;
6104 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
6105 		*second_sct = SPDK_NVME_SCT_GENERIC;
6106 		*second_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
6107 	}
6108 
6109 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
6110 }
6111 
6112 struct spdk_thread *
6113 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
6114 {
6115 	return spdk_io_channel_get_thread(bdev_io->internal.ch->channel);
6116 }
6117 
6118 struct spdk_io_channel *
6119 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
6120 {
6121 	return bdev_io->internal.ch->channel;
6122 }
6123 
6124 static int
6125 bdev_register(struct spdk_bdev *bdev)
6126 {
6127 	char *bdev_name;
6128 	char uuid[SPDK_UUID_STRING_LEN];
6129 	int ret;
6130 
6131 	assert(bdev->module != NULL);
6132 
6133 	if (!bdev->name) {
6134 		SPDK_ERRLOG("Bdev name is NULL\n");
6135 		return -EINVAL;
6136 	}
6137 
6138 	if (!strlen(bdev->name)) {
6139 		SPDK_ERRLOG("Bdev name must not be an empty string\n");
6140 		return -EINVAL;
6141 	}
6142 
6143 	/* Users often register their own I/O devices using the bdev name. In
6144 	 * order to avoid conflicts, prepend bdev_. */
6145 	bdev_name = spdk_sprintf_alloc("bdev_%s", bdev->name);
6146 	if (!bdev_name) {
6147 		SPDK_ERRLOG("Unable to allocate memory for internal bdev name.\n");
6148 		return -ENOMEM;
6149 	}
6150 
6151 	bdev->internal.status = SPDK_BDEV_STATUS_READY;
6152 	bdev->internal.measured_queue_depth = UINT64_MAX;
6153 	bdev->internal.claim_module = NULL;
6154 	bdev->internal.qd_poller = NULL;
6155 	bdev->internal.qos = NULL;
6156 
6157 	TAILQ_INIT(&bdev->internal.open_descs);
6158 	TAILQ_INIT(&bdev->internal.locked_ranges);
6159 	TAILQ_INIT(&bdev->internal.pending_locked_ranges);
6160 	TAILQ_INIT(&bdev->aliases);
6161 
6162 	ret = bdev_name_add(&bdev->internal.bdev_name, bdev, bdev->name);
6163 	if (ret != 0) {
6164 		free(bdev_name);
6165 		return ret;
6166 	}
6167 
6168 	/* If the user didn't specify a uuid, generate one. */
6169 	if (spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))) {
6170 		spdk_uuid_generate(&bdev->uuid);
6171 	}
6172 
6173 	/* Add the UUID alias only if it's different than the name */
6174 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6175 	if (strcmp(bdev->name, uuid) != 0) {
6176 		ret = spdk_bdev_alias_add(bdev, uuid);
6177 		if (ret != 0) {
6178 			SPDK_ERRLOG("Unable to add uuid:%s alias for bdev %s\n", uuid, bdev->name);
6179 			bdev_name_del(&bdev->internal.bdev_name);
6180 			free(bdev_name);
6181 			return ret;
6182 		}
6183 	}
6184 
6185 	if (spdk_bdev_get_buf_align(bdev) > 1) {
6186 		if (bdev->split_on_optimal_io_boundary) {
6187 			bdev->optimal_io_boundary = spdk_min(bdev->optimal_io_boundary,
6188 							     SPDK_BDEV_LARGE_BUF_MAX_SIZE / bdev->blocklen);
6189 		} else {
6190 			bdev->split_on_optimal_io_boundary = true;
6191 			bdev->optimal_io_boundary = SPDK_BDEV_LARGE_BUF_MAX_SIZE / bdev->blocklen;
6192 		}
6193 	}
6194 
6195 	/* If the user didn't specify a write unit size, set it to one. */
6196 	if (bdev->write_unit_size == 0) {
6197 		bdev->write_unit_size = 1;
6198 	}
6199 
6200 	/* Set ACWU value to 1 if bdev module did not set it (does not support it natively) */
6201 	if (bdev->acwu == 0) {
6202 		bdev->acwu = 1;
6203 	}
6204 
6205 	if (bdev->phys_blocklen == 0) {
6206 		bdev->phys_blocklen = spdk_bdev_get_data_block_size(bdev);
6207 	}
6208 
6209 	bdev->internal.reset_in_progress = NULL;
6210 	bdev->internal.qd_poll_in_progress = false;
6211 	bdev->internal.period = 0;
6212 	bdev->internal.new_period = 0;
6213 
6214 	spdk_io_device_register(__bdev_to_io_dev(bdev),
6215 				bdev_channel_create, bdev_channel_destroy,
6216 				sizeof(struct spdk_bdev_channel),
6217 				bdev_name);
6218 
6219 	free(bdev_name);
6220 
6221 	pthread_mutex_init(&bdev->internal.mutex, NULL);
6222 
6223 	SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
6224 	TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
6225 
6226 	return 0;
6227 }
6228 
6229 static void
6230 bdev_destroy_cb(void *io_device)
6231 {
6232 	int			rc;
6233 	struct spdk_bdev	*bdev;
6234 	spdk_bdev_unregister_cb	cb_fn;
6235 	void			*cb_arg;
6236 
6237 	bdev = __bdev_from_io_dev(io_device);
6238 	cb_fn = bdev->internal.unregister_cb;
6239 	cb_arg = bdev->internal.unregister_ctx;
6240 
6241 	pthread_mutex_destroy(&bdev->internal.mutex);
6242 	free(bdev->internal.qos);
6243 
6244 	rc = bdev->fn_table->destruct(bdev->ctxt);
6245 	if (rc < 0) {
6246 		SPDK_ERRLOG("destruct failed\n");
6247 	}
6248 	if (rc <= 0 && cb_fn != NULL) {
6249 		cb_fn(cb_arg, rc);
6250 	}
6251 }
6252 
6253 void
6254 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
6255 {
6256 	if (bdev->internal.unregister_cb != NULL) {
6257 		bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
6258 	}
6259 }
6260 
6261 static void
6262 _remove_notify(void *arg)
6263 {
6264 	struct spdk_bdev_desc *desc = arg;
6265 
6266 	pthread_mutex_lock(&desc->mutex);
6267 	desc->refs--;
6268 
6269 	if (!desc->closed) {
6270 		pthread_mutex_unlock(&desc->mutex);
6271 		desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx);
6272 		return;
6273 	} else if (0 == desc->refs) {
6274 		/* This descriptor was closed after this remove_notify message was sent.
6275 		 * spdk_bdev_close() could not free the descriptor since this message was
6276 		 * in flight, so we free it now using bdev_desc_free().
6277 		 */
6278 		pthread_mutex_unlock(&desc->mutex);
6279 		bdev_desc_free(desc);
6280 		return;
6281 	}
6282 	pthread_mutex_unlock(&desc->mutex);
6283 }
6284 
6285 /* Must be called while holding g_bdev_mgr.mutex and bdev->internal.mutex.
6286  * returns: 0 - bdev removed and ready to be destructed.
6287  *          -EBUSY - bdev can't be destructed yet.  */
6288 static int
6289 bdev_unregister_unsafe(struct spdk_bdev *bdev)
6290 {
6291 	struct spdk_bdev_desc	*desc, *tmp;
6292 	int			rc = 0;
6293 	char			uuid[SPDK_UUID_STRING_LEN];
6294 
6295 	/* Notify each descriptor about hotremoval */
6296 	TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
6297 		rc = -EBUSY;
6298 		pthread_mutex_lock(&desc->mutex);
6299 		/*
6300 		 * Defer invocation of the event_cb to a separate message that will
6301 		 *  run later on its thread.  This ensures this context unwinds and
6302 		 *  we don't recursively unregister this bdev again if the event_cb
6303 		 *  immediately closes its descriptor.
6304 		 */
6305 		desc->refs++;
6306 		spdk_thread_send_msg(desc->thread, _remove_notify, desc);
6307 		pthread_mutex_unlock(&desc->mutex);
6308 	}
6309 
6310 	/* If there are no descriptors, proceed removing the bdev */
6311 	if (rc == 0) {
6312 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
6313 		SPDK_DEBUGLOG(bdev, "Removing bdev %s from list done\n", bdev->name);
6314 
6315 		/* Delete the name and the UUID alias */
6316 		spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6317 		bdev_name_del_unsafe(&bdev->internal.bdev_name);
6318 		bdev_alias_del(bdev, uuid, bdev_name_del_unsafe);
6319 
6320 		spdk_notify_send("bdev_unregister", spdk_bdev_get_name(bdev));
6321 
6322 		if (bdev->internal.reset_in_progress != NULL) {
6323 			/* If reset is in progress, let the completion callback for reset
6324 			 * unregister the bdev.
6325 			 */
6326 			rc = -EBUSY;
6327 		}
6328 	}
6329 
6330 	return rc;
6331 }
6332 
6333 static void
6334 bdev_unregister_abort_channel(struct spdk_io_channel_iter *i)
6335 {
6336 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
6337 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(io_ch);
6338 
6339 	bdev_channel_abort_queued_ios(bdev_ch);
6340 	spdk_for_each_channel_continue(i, 0);
6341 }
6342 
6343 static void
6344 bdev_unregister(struct spdk_io_channel_iter *i, int status)
6345 {
6346 	struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
6347 	int rc;
6348 
6349 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6350 	pthread_mutex_lock(&bdev->internal.mutex);
6351 	/*
6352 	 * Set the status to REMOVING after completing to abort channels. Otherwise,
6353 	 * the last spdk_bdev_close() may call spdk_io_device_unregister() while
6354 	 * spdk_for_each_channel() is executed and spdk_io_device_unregister() may fail.
6355 	 */
6356 	bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
6357 	rc = bdev_unregister_unsafe(bdev);
6358 	pthread_mutex_unlock(&bdev->internal.mutex);
6359 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6360 
6361 	if (rc == 0) {
6362 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
6363 	}
6364 }
6365 
6366 void
6367 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
6368 {
6369 	struct spdk_thread	*thread;
6370 
6371 	SPDK_DEBUGLOG(bdev, "Removing bdev %s from list\n", bdev->name);
6372 
6373 	thread = spdk_get_thread();
6374 	if (!thread) {
6375 		/* The user called this from a non-SPDK thread. */
6376 		if (cb_fn != NULL) {
6377 			cb_fn(cb_arg, -ENOTSUP);
6378 		}
6379 		return;
6380 	}
6381 
6382 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6383 	if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
6384 	    bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
6385 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
6386 		if (cb_fn) {
6387 			cb_fn(cb_arg, -EBUSY);
6388 		}
6389 		return;
6390 	}
6391 
6392 	pthread_mutex_lock(&bdev->internal.mutex);
6393 	bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
6394 	bdev->internal.unregister_cb = cb_fn;
6395 	bdev->internal.unregister_ctx = cb_arg;
6396 	pthread_mutex_unlock(&bdev->internal.mutex);
6397 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6398 
6399 	spdk_bdev_set_qd_sampling_period(bdev, 0);
6400 
6401 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
6402 			      bdev_unregister_abort_channel,
6403 			      bdev,
6404 			      bdev_unregister);
6405 }
6406 
6407 int
6408 spdk_bdev_unregister_by_name(const char *bdev_name, struct spdk_bdev_module *module,
6409 			     spdk_bdev_unregister_cb cb_fn, void *cb_arg)
6410 {
6411 	struct spdk_bdev_desc *desc;
6412 	struct spdk_bdev *bdev;
6413 	int rc;
6414 
6415 	rc = spdk_bdev_open_ext(bdev_name, false, _tmp_bdev_event_cb, NULL, &desc);
6416 	if (rc != 0) {
6417 		SPDK_ERRLOG("Failed to open bdev with name: %s\n", bdev_name);
6418 		return rc;
6419 	}
6420 
6421 	bdev = spdk_bdev_desc_get_bdev(desc);
6422 
6423 	if (bdev->module != module) {
6424 		spdk_bdev_close(desc);
6425 		SPDK_ERRLOG("Bdev %s was not registered by the specified module.\n",
6426 			    bdev_name);
6427 		return -ENODEV;
6428 	}
6429 
6430 	spdk_bdev_unregister(bdev, cb_fn, cb_arg);
6431 
6432 	spdk_bdev_close(desc);
6433 
6434 	return 0;
6435 }
6436 
6437 static int
6438 bdev_start_qos(struct spdk_bdev *bdev)
6439 {
6440 	struct set_qos_limit_ctx *ctx;
6441 
6442 	/* Enable QoS */
6443 	if (bdev->internal.qos && bdev->internal.qos->thread == NULL) {
6444 		ctx = calloc(1, sizeof(*ctx));
6445 		if (ctx == NULL) {
6446 			SPDK_ERRLOG("Failed to allocate memory for QoS context\n");
6447 			return -ENOMEM;
6448 		}
6449 		ctx->bdev = bdev;
6450 		spdk_for_each_channel(__bdev_to_io_dev(bdev),
6451 				      bdev_enable_qos_msg, ctx,
6452 				      bdev_enable_qos_done);
6453 	}
6454 
6455 	return 0;
6456 }
6457 
6458 static int
6459 bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
6460 {
6461 	struct spdk_thread *thread;
6462 	int rc = 0;
6463 
6464 	thread = spdk_get_thread();
6465 	if (!thread) {
6466 		SPDK_ERRLOG("Cannot open bdev from non-SPDK thread.\n");
6467 		return -ENOTSUP;
6468 	}
6469 
6470 	SPDK_DEBUGLOG(bdev, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
6471 		      spdk_get_thread());
6472 
6473 	desc->bdev = bdev;
6474 	desc->thread = thread;
6475 	desc->write = write;
6476 
6477 	pthread_mutex_lock(&bdev->internal.mutex);
6478 	if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
6479 	    bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
6480 		pthread_mutex_unlock(&bdev->internal.mutex);
6481 		return -ENODEV;
6482 	}
6483 
6484 	if (write && bdev->internal.claim_module) {
6485 		SPDK_ERRLOG("Could not open %s - %s module already claimed it\n",
6486 			    bdev->name, bdev->internal.claim_module->name);
6487 		pthread_mutex_unlock(&bdev->internal.mutex);
6488 		return -EPERM;
6489 	}
6490 
6491 	rc = bdev_start_qos(bdev);
6492 	if (rc != 0) {
6493 		SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
6494 		pthread_mutex_unlock(&bdev->internal.mutex);
6495 		return rc;
6496 	}
6497 
6498 	TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
6499 
6500 	pthread_mutex_unlock(&bdev->internal.mutex);
6501 
6502 	return 0;
6503 }
6504 
6505 static int
6506 bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *event_ctx,
6507 		struct spdk_bdev_desc **_desc)
6508 {
6509 	struct spdk_bdev_desc *desc;
6510 	unsigned int event_id;
6511 
6512 	desc = calloc(1, sizeof(*desc));
6513 	if (desc == NULL) {
6514 		SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
6515 		return -ENOMEM;
6516 	}
6517 
6518 	TAILQ_INIT(&desc->pending_media_events);
6519 	TAILQ_INIT(&desc->free_media_events);
6520 
6521 	desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
6522 	desc->callback.event_fn = event_cb;
6523 	desc->callback.ctx = event_ctx;
6524 	pthread_mutex_init(&desc->mutex, NULL);
6525 
6526 	if (bdev->media_events) {
6527 		desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
6528 						   sizeof(*desc->media_events_buffer));
6529 		if (desc->media_events_buffer == NULL) {
6530 			SPDK_ERRLOG("Failed to initialize media event pool\n");
6531 			bdev_desc_free(desc);
6532 			return -ENOMEM;
6533 		}
6534 
6535 		for (event_id = 0; event_id < MEDIA_EVENT_POOL_SIZE; ++event_id) {
6536 			TAILQ_INSERT_TAIL(&desc->free_media_events,
6537 					  &desc->media_events_buffer[event_id], tailq);
6538 		}
6539 	}
6540 
6541 	*_desc = desc;
6542 
6543 	return 0;
6544 }
6545 
6546 int
6547 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
6548 		   void *event_ctx, struct spdk_bdev_desc **_desc)
6549 {
6550 	struct spdk_bdev_desc *desc;
6551 	struct spdk_bdev *bdev;
6552 	int rc;
6553 
6554 	if (event_cb == NULL) {
6555 		SPDK_ERRLOG("Missing event callback function\n");
6556 		return -EINVAL;
6557 	}
6558 
6559 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6560 
6561 	bdev = bdev_get_by_name(bdev_name);
6562 
6563 	if (bdev == NULL) {
6564 		SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
6565 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
6566 		return -ENODEV;
6567 	}
6568 
6569 	rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc);
6570 	if (rc != 0) {
6571 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
6572 		return rc;
6573 	}
6574 
6575 	rc = bdev_open(bdev, write, desc);
6576 	if (rc != 0) {
6577 		bdev_desc_free(desc);
6578 		desc = NULL;
6579 	}
6580 
6581 	*_desc = desc;
6582 
6583 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6584 
6585 	return rc;
6586 }
6587 
6588 static void
6589 bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
6590 {
6591 	int rc;
6592 
6593 	pthread_mutex_lock(&bdev->internal.mutex);
6594 	pthread_mutex_lock(&desc->mutex);
6595 
6596 	TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
6597 
6598 	desc->closed = true;
6599 
6600 	if (0 == desc->refs) {
6601 		pthread_mutex_unlock(&desc->mutex);
6602 		bdev_desc_free(desc);
6603 	} else {
6604 		pthread_mutex_unlock(&desc->mutex);
6605 	}
6606 
6607 	/* If no more descriptors, kill QoS channel */
6608 	if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
6609 		SPDK_DEBUGLOG(bdev, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
6610 			      bdev->name, spdk_get_thread());
6611 
6612 		if (bdev_qos_destroy(bdev)) {
6613 			/* There isn't anything we can do to recover here. Just let the
6614 			 * old QoS poller keep running. The QoS handling won't change
6615 			 * cores when the user allocates a new channel, but it won't break. */
6616 			SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
6617 		}
6618 	}
6619 
6620 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
6621 		rc = bdev_unregister_unsafe(bdev);
6622 		pthread_mutex_unlock(&bdev->internal.mutex);
6623 
6624 		if (rc == 0) {
6625 			spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
6626 		}
6627 	} else {
6628 		pthread_mutex_unlock(&bdev->internal.mutex);
6629 	}
6630 }
6631 
6632 void
6633 spdk_bdev_close(struct spdk_bdev_desc *desc)
6634 {
6635 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6636 
6637 	SPDK_DEBUGLOG(bdev, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
6638 		      spdk_get_thread());
6639 
6640 	assert(desc->thread == spdk_get_thread());
6641 
6642 	spdk_poller_unregister(&desc->io_timeout_poller);
6643 
6644 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6645 
6646 	bdev_close(bdev, desc);
6647 
6648 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6649 }
6650 
6651 static void
6652 bdev_register_finished(void *arg)
6653 {
6654 	struct spdk_bdev_desc *desc = arg;
6655 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6656 
6657 	spdk_notify_send("bdev_register", spdk_bdev_get_name(bdev));
6658 
6659 	bdev_close(bdev, desc);
6660 }
6661 
6662 int
6663 spdk_bdev_register(struct spdk_bdev *bdev)
6664 {
6665 	struct spdk_bdev_desc *desc;
6666 	int rc;
6667 
6668 	rc = bdev_register(bdev);
6669 	if (rc != 0) {
6670 		return rc;
6671 	}
6672 
6673 	/* A descriptor is opened to prevent bdev deletion during examination */
6674 	rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
6675 	if (rc != 0) {
6676 		spdk_bdev_unregister(bdev, NULL, NULL);
6677 		return rc;
6678 	}
6679 
6680 	rc = bdev_open(bdev, false, desc);
6681 	if (rc != 0) {
6682 		bdev_desc_free(desc);
6683 		spdk_bdev_unregister(bdev, NULL, NULL);
6684 		return rc;
6685 	}
6686 
6687 	/* Examine configuration before initializing I/O */
6688 	bdev_examine(bdev);
6689 
6690 	rc = spdk_bdev_wait_for_examine(bdev_register_finished, desc);
6691 	if (rc != 0) {
6692 		bdev_close(bdev, desc);
6693 		spdk_bdev_unregister(bdev, NULL, NULL);
6694 	}
6695 
6696 	return rc;
6697 }
6698 
6699 int
6700 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
6701 			    struct spdk_bdev_module *module)
6702 {
6703 	if (bdev->internal.claim_module != NULL) {
6704 		SPDK_ERRLOG("bdev %s already claimed by module %s\n", bdev->name,
6705 			    bdev->internal.claim_module->name);
6706 		return -EPERM;
6707 	}
6708 
6709 	if (desc && !desc->write) {
6710 		desc->write = true;
6711 	}
6712 
6713 	bdev->internal.claim_module = module;
6714 	return 0;
6715 }
6716 
6717 void
6718 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
6719 {
6720 	assert(bdev->internal.claim_module != NULL);
6721 	bdev->internal.claim_module = NULL;
6722 }
6723 
6724 struct spdk_bdev *
6725 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
6726 {
6727 	assert(desc != NULL);
6728 	return desc->bdev;
6729 }
6730 
6731 int
6732 spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
6733 {
6734 	struct spdk_bdev *bdev, *tmp;
6735 	struct spdk_bdev_desc *desc;
6736 	int rc = 0;
6737 
6738 	assert(fn != NULL);
6739 
6740 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6741 	bdev = spdk_bdev_first();
6742 	while (bdev != NULL) {
6743 		rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
6744 		if (rc != 0) {
6745 			break;
6746 		}
6747 		rc = bdev_open(bdev, false, desc);
6748 		if (rc != 0) {
6749 			bdev_desc_free(desc);
6750 			break;
6751 		}
6752 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
6753 
6754 		rc = fn(ctx, bdev);
6755 
6756 		pthread_mutex_lock(&g_bdev_mgr.mutex);
6757 		tmp = spdk_bdev_next(bdev);
6758 		bdev_close(bdev, desc);
6759 		if (rc != 0) {
6760 			break;
6761 		}
6762 		bdev = tmp;
6763 	}
6764 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6765 
6766 	return rc;
6767 }
6768 
6769 int
6770 spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
6771 {
6772 	struct spdk_bdev *bdev, *tmp;
6773 	struct spdk_bdev_desc *desc;
6774 	int rc = 0;
6775 
6776 	assert(fn != NULL);
6777 
6778 	pthread_mutex_lock(&g_bdev_mgr.mutex);
6779 	bdev = spdk_bdev_first_leaf();
6780 	while (bdev != NULL) {
6781 		rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
6782 		if (rc != 0) {
6783 			break;
6784 		}
6785 		rc = bdev_open(bdev, false, desc);
6786 		if (rc != 0) {
6787 			bdev_desc_free(desc);
6788 			break;
6789 		}
6790 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
6791 
6792 		rc = fn(ctx, bdev);
6793 
6794 		pthread_mutex_lock(&g_bdev_mgr.mutex);
6795 		tmp = spdk_bdev_next_leaf(bdev);
6796 		bdev_close(bdev, desc);
6797 		if (rc != 0) {
6798 			break;
6799 		}
6800 		bdev = tmp;
6801 	}
6802 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
6803 
6804 	return rc;
6805 }
6806 
6807 void
6808 spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
6809 {
6810 	struct iovec *iovs;
6811 	int iovcnt;
6812 
6813 	if (bdev_io == NULL) {
6814 		return;
6815 	}
6816 
6817 	switch (bdev_io->type) {
6818 	case SPDK_BDEV_IO_TYPE_READ:
6819 	case SPDK_BDEV_IO_TYPE_WRITE:
6820 	case SPDK_BDEV_IO_TYPE_ZCOPY:
6821 		iovs = bdev_io->u.bdev.iovs;
6822 		iovcnt = bdev_io->u.bdev.iovcnt;
6823 		break;
6824 	default:
6825 		iovs = NULL;
6826 		iovcnt = 0;
6827 		break;
6828 	}
6829 
6830 	if (iovp) {
6831 		*iovp = iovs;
6832 	}
6833 	if (iovcntp) {
6834 		*iovcntp = iovcnt;
6835 	}
6836 }
6837 
6838 void *
6839 spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
6840 {
6841 	if (bdev_io == NULL) {
6842 		return NULL;
6843 	}
6844 
6845 	if (!spdk_bdev_is_md_separate(bdev_io->bdev)) {
6846 		return NULL;
6847 	}
6848 
6849 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
6850 	    bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
6851 		return bdev_io->u.bdev.md_buf;
6852 	}
6853 
6854 	return NULL;
6855 }
6856 
6857 void *
6858 spdk_bdev_io_get_cb_arg(struct spdk_bdev_io *bdev_io)
6859 {
6860 	if (bdev_io == NULL) {
6861 		assert(false);
6862 		return NULL;
6863 	}
6864 
6865 	return bdev_io->internal.caller_ctx;
6866 }
6867 
6868 void
6869 spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
6870 {
6871 
6872 	if (spdk_bdev_module_list_find(bdev_module->name)) {
6873 		SPDK_ERRLOG("ERROR: module '%s' already registered.\n", bdev_module->name);
6874 		assert(false);
6875 	}
6876 
6877 	/*
6878 	 * Modules with examine callbacks must be initialized first, so they are
6879 	 *  ready to handle examine callbacks from later modules that will
6880 	 *  register physical bdevs.
6881 	 */
6882 	if (bdev_module->examine_config != NULL || bdev_module->examine_disk != NULL) {
6883 		TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
6884 	} else {
6885 		TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
6886 	}
6887 }
6888 
6889 struct spdk_bdev_module *
6890 spdk_bdev_module_list_find(const char *name)
6891 {
6892 	struct spdk_bdev_module *bdev_module;
6893 
6894 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
6895 		if (strcmp(name, bdev_module->name) == 0) {
6896 			break;
6897 		}
6898 	}
6899 
6900 	return bdev_module;
6901 }
6902 
6903 static void
6904 bdev_write_zero_buffer_next(void *_bdev_io)
6905 {
6906 	struct spdk_bdev_io *bdev_io = _bdev_io;
6907 	uint64_t num_bytes, num_blocks;
6908 	void *md_buf = NULL;
6909 	int rc;
6910 
6911 	num_bytes = spdk_min(_bdev_get_block_size_with_md(bdev_io->bdev) *
6912 			     bdev_io->u.bdev.split_remaining_num_blocks,
6913 			     ZERO_BUFFER_SIZE);
6914 	num_blocks = num_bytes / _bdev_get_block_size_with_md(bdev_io->bdev);
6915 
6916 	if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
6917 		md_buf = (char *)g_bdev_mgr.zero_buffer +
6918 			 spdk_bdev_get_block_size(bdev_io->bdev) * num_blocks;
6919 	}
6920 
6921 	rc = bdev_write_blocks_with_md(bdev_io->internal.desc,
6922 				       spdk_io_channel_from_ctx(bdev_io->internal.ch),
6923 				       g_bdev_mgr.zero_buffer, md_buf,
6924 				       bdev_io->u.bdev.split_current_offset_blocks, num_blocks,
6925 				       bdev_write_zero_buffer_done, bdev_io);
6926 	if (rc == 0) {
6927 		bdev_io->u.bdev.split_remaining_num_blocks -= num_blocks;
6928 		bdev_io->u.bdev.split_current_offset_blocks += num_blocks;
6929 	} else if (rc == -ENOMEM) {
6930 		bdev_queue_io_wait_with_cb(bdev_io, bdev_write_zero_buffer_next);
6931 	} else {
6932 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6933 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6934 	}
6935 }
6936 
6937 static void
6938 bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6939 {
6940 	struct spdk_bdev_io *parent_io = cb_arg;
6941 
6942 	spdk_bdev_free_io(bdev_io);
6943 
6944 	if (!success) {
6945 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6946 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
6947 		return;
6948 	}
6949 
6950 	if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
6951 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6952 		parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
6953 		return;
6954 	}
6955 
6956 	bdev_write_zero_buffer_next(parent_io);
6957 }
6958 
6959 static void
6960 bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
6961 {
6962 	pthread_mutex_lock(&ctx->bdev->internal.mutex);
6963 	ctx->bdev->internal.qos_mod_in_progress = false;
6964 	pthread_mutex_unlock(&ctx->bdev->internal.mutex);
6965 
6966 	if (ctx->cb_fn) {
6967 		ctx->cb_fn(ctx->cb_arg, status);
6968 	}
6969 	free(ctx);
6970 }
6971 
6972 static void
6973 bdev_disable_qos_done(void *cb_arg)
6974 {
6975 	struct set_qos_limit_ctx *ctx = cb_arg;
6976 	struct spdk_bdev *bdev = ctx->bdev;
6977 	struct spdk_bdev_io *bdev_io;
6978 	struct spdk_bdev_qos *qos;
6979 
6980 	pthread_mutex_lock(&bdev->internal.mutex);
6981 	qos = bdev->internal.qos;
6982 	bdev->internal.qos = NULL;
6983 	pthread_mutex_unlock(&bdev->internal.mutex);
6984 
6985 	while (!TAILQ_EMPTY(&qos->queued)) {
6986 		/* Send queued I/O back to their original thread for resubmission. */
6987 		bdev_io = TAILQ_FIRST(&qos->queued);
6988 		TAILQ_REMOVE(&qos->queued, bdev_io, internal.link);
6989 
6990 		if (bdev_io->internal.io_submit_ch) {
6991 			/*
6992 			 * Channel was changed when sending it to the QoS thread - change it back
6993 			 *  before sending it back to the original thread.
6994 			 */
6995 			bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
6996 			bdev_io->internal.io_submit_ch = NULL;
6997 		}
6998 
6999 		spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7000 				     _bdev_io_submit, bdev_io);
7001 	}
7002 
7003 	if (qos->thread != NULL) {
7004 		spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
7005 		spdk_poller_unregister(&qos->poller);
7006 	}
7007 
7008 	free(qos);
7009 
7010 	bdev_set_qos_limit_done(ctx, 0);
7011 }
7012 
7013 static void
7014 bdev_disable_qos_msg_done(struct spdk_io_channel_iter *i, int status)
7015 {
7016 	void *io_device = spdk_io_channel_iter_get_io_device(i);
7017 	struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
7018 	struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7019 	struct spdk_thread *thread;
7020 
7021 	pthread_mutex_lock(&bdev->internal.mutex);
7022 	thread = bdev->internal.qos->thread;
7023 	pthread_mutex_unlock(&bdev->internal.mutex);
7024 
7025 	if (thread != NULL) {
7026 		spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
7027 	} else {
7028 		bdev_disable_qos_done(ctx);
7029 	}
7030 }
7031 
7032 static void
7033 bdev_disable_qos_msg(struct spdk_io_channel_iter *i)
7034 {
7035 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
7036 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch);
7037 
7038 	bdev_ch->flags &= ~BDEV_CH_QOS_ENABLED;
7039 
7040 	spdk_for_each_channel_continue(i, 0);
7041 }
7042 
7043 static void
7044 bdev_update_qos_rate_limit_msg(void *cb_arg)
7045 {
7046 	struct set_qos_limit_ctx *ctx = cb_arg;
7047 	struct spdk_bdev *bdev = ctx->bdev;
7048 
7049 	pthread_mutex_lock(&bdev->internal.mutex);
7050 	bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
7051 	pthread_mutex_unlock(&bdev->internal.mutex);
7052 
7053 	bdev_set_qos_limit_done(ctx, 0);
7054 }
7055 
7056 static void
7057 bdev_enable_qos_msg(struct spdk_io_channel_iter *i)
7058 {
7059 	void *io_device = spdk_io_channel_iter_get_io_device(i);
7060 	struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
7061 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
7062 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch);
7063 
7064 	pthread_mutex_lock(&bdev->internal.mutex);
7065 	bdev_enable_qos(bdev, bdev_ch);
7066 	pthread_mutex_unlock(&bdev->internal.mutex);
7067 	spdk_for_each_channel_continue(i, 0);
7068 }
7069 
7070 static void
7071 bdev_enable_qos_done(struct spdk_io_channel_iter *i, int status)
7072 {
7073 	struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7074 
7075 	bdev_set_qos_limit_done(ctx, status);
7076 }
7077 
7078 static void
7079 bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
7080 {
7081 	int i;
7082 
7083 	assert(bdev->internal.qos != NULL);
7084 
7085 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
7086 		if (limits[i] != SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
7087 			bdev->internal.qos->rate_limits[i].limit = limits[i];
7088 
7089 			if (limits[i] == 0) {
7090 				bdev->internal.qos->rate_limits[i].limit =
7091 					SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
7092 			}
7093 		}
7094 	}
7095 }
7096 
7097 void
7098 spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
7099 			      void (*cb_fn)(void *cb_arg, int status), void *cb_arg)
7100 {
7101 	struct set_qos_limit_ctx	*ctx;
7102 	uint32_t			limit_set_complement;
7103 	uint64_t			min_limit_per_sec;
7104 	int				i;
7105 	bool				disable_rate_limit = true;
7106 
7107 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
7108 		if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
7109 			continue;
7110 		}
7111 
7112 		if (limits[i] > 0) {
7113 			disable_rate_limit = false;
7114 		}
7115 
7116 		if (bdev_qos_is_iops_rate_limit(i) == true) {
7117 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_IOS_PER_SEC;
7118 		} else {
7119 			/* Change from megabyte to byte rate limit */
7120 			limits[i] = limits[i] * 1024 * 1024;
7121 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_BYTES_PER_SEC;
7122 		}
7123 
7124 		limit_set_complement = limits[i] % min_limit_per_sec;
7125 		if (limit_set_complement) {
7126 			SPDK_ERRLOG("Requested rate limit %" PRIu64 " is not a multiple of %" PRIu64 "\n",
7127 				    limits[i], min_limit_per_sec);
7128 			limits[i] += min_limit_per_sec - limit_set_complement;
7129 			SPDK_ERRLOG("Round up the rate limit to %" PRIu64 "\n", limits[i]);
7130 		}
7131 	}
7132 
7133 	ctx = calloc(1, sizeof(*ctx));
7134 	if (ctx == NULL) {
7135 		cb_fn(cb_arg, -ENOMEM);
7136 		return;
7137 	}
7138 
7139 	ctx->cb_fn = cb_fn;
7140 	ctx->cb_arg = cb_arg;
7141 	ctx->bdev = bdev;
7142 
7143 	pthread_mutex_lock(&bdev->internal.mutex);
7144 	if (bdev->internal.qos_mod_in_progress) {
7145 		pthread_mutex_unlock(&bdev->internal.mutex);
7146 		free(ctx);
7147 		cb_fn(cb_arg, -EAGAIN);
7148 		return;
7149 	}
7150 	bdev->internal.qos_mod_in_progress = true;
7151 
7152 	if (disable_rate_limit == true && bdev->internal.qos) {
7153 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
7154 			if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED &&
7155 			    (bdev->internal.qos->rate_limits[i].limit > 0 &&
7156 			     bdev->internal.qos->rate_limits[i].limit !=
7157 			     SPDK_BDEV_QOS_LIMIT_NOT_DEFINED)) {
7158 				disable_rate_limit = false;
7159 				break;
7160 			}
7161 		}
7162 	}
7163 
7164 	if (disable_rate_limit == false) {
7165 		if (bdev->internal.qos == NULL) {
7166 			bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
7167 			if (!bdev->internal.qos) {
7168 				pthread_mutex_unlock(&bdev->internal.mutex);
7169 				SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
7170 				bdev_set_qos_limit_done(ctx, -ENOMEM);
7171 				return;
7172 			}
7173 		}
7174 
7175 		if (bdev->internal.qos->thread == NULL) {
7176 			/* Enabling */
7177 			bdev_set_qos_rate_limits(bdev, limits);
7178 
7179 			spdk_for_each_channel(__bdev_to_io_dev(bdev),
7180 					      bdev_enable_qos_msg, ctx,
7181 					      bdev_enable_qos_done);
7182 		} else {
7183 			/* Updating */
7184 			bdev_set_qos_rate_limits(bdev, limits);
7185 
7186 			spdk_thread_send_msg(bdev->internal.qos->thread,
7187 					     bdev_update_qos_rate_limit_msg, ctx);
7188 		}
7189 	} else {
7190 		if (bdev->internal.qos != NULL) {
7191 			bdev_set_qos_rate_limits(bdev, limits);
7192 
7193 			/* Disabling */
7194 			spdk_for_each_channel(__bdev_to_io_dev(bdev),
7195 					      bdev_disable_qos_msg, ctx,
7196 					      bdev_disable_qos_msg_done);
7197 		} else {
7198 			pthread_mutex_unlock(&bdev->internal.mutex);
7199 			bdev_set_qos_limit_done(ctx, 0);
7200 			return;
7201 		}
7202 	}
7203 
7204 	pthread_mutex_unlock(&bdev->internal.mutex);
7205 }
7206 
7207 struct spdk_bdev_histogram_ctx {
7208 	spdk_bdev_histogram_status_cb cb_fn;
7209 	void *cb_arg;
7210 	struct spdk_bdev *bdev;
7211 	int status;
7212 };
7213 
7214 static void
7215 bdev_histogram_disable_channel_cb(struct spdk_io_channel_iter *i, int status)
7216 {
7217 	struct spdk_bdev_histogram_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7218 
7219 	pthread_mutex_lock(&ctx->bdev->internal.mutex);
7220 	ctx->bdev->internal.histogram_in_progress = false;
7221 	pthread_mutex_unlock(&ctx->bdev->internal.mutex);
7222 	ctx->cb_fn(ctx->cb_arg, ctx->status);
7223 	free(ctx);
7224 }
7225 
7226 static void
7227 bdev_histogram_disable_channel(struct spdk_io_channel_iter *i)
7228 {
7229 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7230 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7231 
7232 	if (ch->histogram != NULL) {
7233 		spdk_histogram_data_free(ch->histogram);
7234 		ch->histogram = NULL;
7235 	}
7236 	spdk_for_each_channel_continue(i, 0);
7237 }
7238 
7239 static void
7240 bdev_histogram_enable_channel_cb(struct spdk_io_channel_iter *i, int status)
7241 {
7242 	struct spdk_bdev_histogram_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7243 
7244 	if (status != 0) {
7245 		ctx->status = status;
7246 		ctx->bdev->internal.histogram_enabled = false;
7247 		spdk_for_each_channel(__bdev_to_io_dev(ctx->bdev), bdev_histogram_disable_channel, ctx,
7248 				      bdev_histogram_disable_channel_cb);
7249 	} else {
7250 		pthread_mutex_lock(&ctx->bdev->internal.mutex);
7251 		ctx->bdev->internal.histogram_in_progress = false;
7252 		pthread_mutex_unlock(&ctx->bdev->internal.mutex);
7253 		ctx->cb_fn(ctx->cb_arg, ctx->status);
7254 		free(ctx);
7255 	}
7256 }
7257 
7258 static void
7259 bdev_histogram_enable_channel(struct spdk_io_channel_iter *i)
7260 {
7261 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7262 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7263 	int status = 0;
7264 
7265 	if (ch->histogram == NULL) {
7266 		ch->histogram = spdk_histogram_data_alloc();
7267 		if (ch->histogram == NULL) {
7268 			status = -ENOMEM;
7269 		}
7270 	}
7271 
7272 	spdk_for_each_channel_continue(i, status);
7273 }
7274 
7275 void
7276 spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
7277 			   void *cb_arg, bool enable)
7278 {
7279 	struct spdk_bdev_histogram_ctx *ctx;
7280 
7281 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_ctx));
7282 	if (ctx == NULL) {
7283 		cb_fn(cb_arg, -ENOMEM);
7284 		return;
7285 	}
7286 
7287 	ctx->bdev = bdev;
7288 	ctx->status = 0;
7289 	ctx->cb_fn = cb_fn;
7290 	ctx->cb_arg = cb_arg;
7291 
7292 	pthread_mutex_lock(&bdev->internal.mutex);
7293 	if (bdev->internal.histogram_in_progress) {
7294 		pthread_mutex_unlock(&bdev->internal.mutex);
7295 		free(ctx);
7296 		cb_fn(cb_arg, -EAGAIN);
7297 		return;
7298 	}
7299 
7300 	bdev->internal.histogram_in_progress = true;
7301 	pthread_mutex_unlock(&bdev->internal.mutex);
7302 
7303 	bdev->internal.histogram_enabled = enable;
7304 
7305 	if (enable) {
7306 		/* Allocate histogram for each channel */
7307 		spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_enable_channel, ctx,
7308 				      bdev_histogram_enable_channel_cb);
7309 	} else {
7310 		spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_disable_channel, ctx,
7311 				      bdev_histogram_disable_channel_cb);
7312 	}
7313 }
7314 
7315 struct spdk_bdev_histogram_data_ctx {
7316 	spdk_bdev_histogram_data_cb cb_fn;
7317 	void *cb_arg;
7318 	struct spdk_bdev *bdev;
7319 	/** merged histogram data from all channels */
7320 	struct spdk_histogram_data	*histogram;
7321 };
7322 
7323 static void
7324 bdev_histogram_get_channel_cb(struct spdk_io_channel_iter *i, int status)
7325 {
7326 	struct spdk_bdev_histogram_data_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7327 
7328 	ctx->cb_fn(ctx->cb_arg, status, ctx->histogram);
7329 	free(ctx);
7330 }
7331 
7332 static void
7333 bdev_histogram_get_channel(struct spdk_io_channel_iter *i)
7334 {
7335 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7336 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7337 	struct spdk_bdev_histogram_data_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7338 	int status = 0;
7339 
7340 	if (ch->histogram == NULL) {
7341 		status = -EFAULT;
7342 	} else {
7343 		spdk_histogram_data_merge(ctx->histogram, ch->histogram);
7344 	}
7345 
7346 	spdk_for_each_channel_continue(i, status);
7347 }
7348 
7349 void
7350 spdk_bdev_histogram_get(struct spdk_bdev *bdev, struct spdk_histogram_data *histogram,
7351 			spdk_bdev_histogram_data_cb cb_fn,
7352 			void *cb_arg)
7353 {
7354 	struct spdk_bdev_histogram_data_ctx *ctx;
7355 
7356 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_data_ctx));
7357 	if (ctx == NULL) {
7358 		cb_fn(cb_arg, -ENOMEM, NULL);
7359 		return;
7360 	}
7361 
7362 	ctx->bdev = bdev;
7363 	ctx->cb_fn = cb_fn;
7364 	ctx->cb_arg = cb_arg;
7365 
7366 	ctx->histogram = histogram;
7367 
7368 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_get_channel, ctx,
7369 			      bdev_histogram_get_channel_cb);
7370 }
7371 
7372 size_t
7373 spdk_bdev_get_media_events(struct spdk_bdev_desc *desc, struct spdk_bdev_media_event *events,
7374 			   size_t max_events)
7375 {
7376 	struct media_event_entry *entry;
7377 	size_t num_events = 0;
7378 
7379 	for (; num_events < max_events; ++num_events) {
7380 		entry = TAILQ_FIRST(&desc->pending_media_events);
7381 		if (entry == NULL) {
7382 			break;
7383 		}
7384 
7385 		events[num_events] = entry->event;
7386 		TAILQ_REMOVE(&desc->pending_media_events, entry, tailq);
7387 		TAILQ_INSERT_TAIL(&desc->free_media_events, entry, tailq);
7388 	}
7389 
7390 	return num_events;
7391 }
7392 
7393 int
7394 spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media_event *events,
7395 			    size_t num_events)
7396 {
7397 	struct spdk_bdev_desc *desc;
7398 	struct media_event_entry *entry;
7399 	size_t event_id;
7400 	int rc = 0;
7401 
7402 	assert(bdev->media_events);
7403 
7404 	pthread_mutex_lock(&bdev->internal.mutex);
7405 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
7406 		if (desc->write) {
7407 			break;
7408 		}
7409 	}
7410 
7411 	if (desc == NULL || desc->media_events_buffer == NULL) {
7412 		rc = -ENODEV;
7413 		goto out;
7414 	}
7415 
7416 	for (event_id = 0; event_id < num_events; ++event_id) {
7417 		entry = TAILQ_FIRST(&desc->free_media_events);
7418 		if (entry == NULL) {
7419 			break;
7420 		}
7421 
7422 		TAILQ_REMOVE(&desc->free_media_events, entry, tailq);
7423 		TAILQ_INSERT_TAIL(&desc->pending_media_events, entry, tailq);
7424 		entry->event = events[event_id];
7425 	}
7426 
7427 	rc = event_id;
7428 out:
7429 	pthread_mutex_unlock(&bdev->internal.mutex);
7430 	return rc;
7431 }
7432 
7433 void
7434 spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
7435 {
7436 	struct spdk_bdev_desc *desc;
7437 
7438 	pthread_mutex_lock(&bdev->internal.mutex);
7439 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
7440 		if (!TAILQ_EMPTY(&desc->pending_media_events)) {
7441 			desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev,
7442 						desc->callback.ctx);
7443 		}
7444 	}
7445 	pthread_mutex_unlock(&bdev->internal.mutex);
7446 }
7447 
7448 struct locked_lba_range_ctx {
7449 	struct lba_range		range;
7450 	struct spdk_bdev		*bdev;
7451 	struct lba_range		*current_range;
7452 	struct lba_range		*owner_range;
7453 	struct spdk_poller		*poller;
7454 	lock_range_cb			cb_fn;
7455 	void				*cb_arg;
7456 };
7457 
7458 static void
7459 bdev_lock_error_cleanup_cb(struct spdk_io_channel_iter *i, int status)
7460 {
7461 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7462 
7463 	ctx->cb_fn(ctx->cb_arg, -ENOMEM);
7464 	free(ctx);
7465 }
7466 
7467 static void bdev_unlock_lba_range_get_channel(struct spdk_io_channel_iter *i);
7468 
7469 static void
7470 bdev_lock_lba_range_cb(struct spdk_io_channel_iter *i, int status)
7471 {
7472 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7473 	struct spdk_bdev *bdev = ctx->bdev;
7474 
7475 	if (status == -ENOMEM) {
7476 		/* One of the channels could not allocate a range object.
7477 		 * So we have to go back and clean up any ranges that were
7478 		 * allocated successfully before we return error status to
7479 		 * the caller.  We can reuse the unlock function to do that
7480 		 * clean up.
7481 		 */
7482 		spdk_for_each_channel(__bdev_to_io_dev(bdev),
7483 				      bdev_unlock_lba_range_get_channel, ctx,
7484 				      bdev_lock_error_cleanup_cb);
7485 		return;
7486 	}
7487 
7488 	/* All channels have locked this range and no I/O overlapping the range
7489 	 * are outstanding!  Set the owner_ch for the range object for the
7490 	 * locking channel, so that this channel will know that it is allowed
7491 	 * to write to this range.
7492 	 */
7493 	ctx->owner_range->owner_ch = ctx->range.owner_ch;
7494 	ctx->cb_fn(ctx->cb_arg, status);
7495 
7496 	/* Don't free the ctx here.  Its range is in the bdev's global list of
7497 	 * locked ranges still, and will be removed and freed when this range
7498 	 * is later unlocked.
7499 	 */
7500 }
7501 
7502 static int
7503 bdev_lock_lba_range_check_io(void *_i)
7504 {
7505 	struct spdk_io_channel_iter *i = _i;
7506 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7507 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7508 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7509 	struct lba_range *range = ctx->current_range;
7510 	struct spdk_bdev_io *bdev_io;
7511 
7512 	spdk_poller_unregister(&ctx->poller);
7513 
7514 	/* The range is now in the locked_ranges, so no new IO can be submitted to this
7515 	 * range.  But we need to wait until any outstanding IO overlapping with this range
7516 	 * are completed.
7517 	 */
7518 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
7519 		if (bdev_io_range_is_locked(bdev_io, range)) {
7520 			ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
7521 			return SPDK_POLLER_BUSY;
7522 		}
7523 	}
7524 
7525 	spdk_for_each_channel_continue(i, 0);
7526 	return SPDK_POLLER_BUSY;
7527 }
7528 
7529 static void
7530 bdev_lock_lba_range_get_channel(struct spdk_io_channel_iter *i)
7531 {
7532 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7533 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7534 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7535 	struct lba_range *range;
7536 
7537 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
7538 		if (range->length == ctx->range.length &&
7539 		    range->offset == ctx->range.offset &&
7540 		    range->locked_ctx == ctx->range.locked_ctx) {
7541 			/* This range already exists on this channel, so don't add
7542 			 * it again.  This can happen when a new channel is created
7543 			 * while the for_each_channel operation is in progress.
7544 			 * Do not check for outstanding I/O in that case, since the
7545 			 * range was locked before any I/O could be submitted to the
7546 			 * new channel.
7547 			 */
7548 			spdk_for_each_channel_continue(i, 0);
7549 			return;
7550 		}
7551 	}
7552 
7553 	range = calloc(1, sizeof(*range));
7554 	if (range == NULL) {
7555 		spdk_for_each_channel_continue(i, -ENOMEM);
7556 		return;
7557 	}
7558 
7559 	range->length = ctx->range.length;
7560 	range->offset = ctx->range.offset;
7561 	range->locked_ctx = ctx->range.locked_ctx;
7562 	ctx->current_range = range;
7563 	if (ctx->range.owner_ch == ch) {
7564 		/* This is the range object for the channel that will hold
7565 		 * the lock.  Store it in the ctx object so that we can easily
7566 		 * set its owner_ch after the lock is finally acquired.
7567 		 */
7568 		ctx->owner_range = range;
7569 	}
7570 	TAILQ_INSERT_TAIL(&ch->locked_ranges, range, tailq);
7571 	bdev_lock_lba_range_check_io(i);
7572 }
7573 
7574 static void
7575 bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx)
7576 {
7577 	assert(spdk_get_thread() == spdk_io_channel_get_thread(ctx->range.owner_ch->channel));
7578 
7579 	/* We will add a copy of this range to each channel now. */
7580 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_lock_lba_range_get_channel, ctx,
7581 			      bdev_lock_lba_range_cb);
7582 }
7583 
7584 static bool
7585 bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq)
7586 {
7587 	struct lba_range *r;
7588 
7589 	TAILQ_FOREACH(r, tailq, tailq) {
7590 		if (bdev_lba_range_overlapped(range, r)) {
7591 			return true;
7592 		}
7593 	}
7594 	return false;
7595 }
7596 
7597 static int
7598 bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
7599 		    uint64_t offset, uint64_t length,
7600 		    lock_range_cb cb_fn, void *cb_arg)
7601 {
7602 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7603 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7604 	struct locked_lba_range_ctx *ctx;
7605 
7606 	if (cb_arg == NULL) {
7607 		SPDK_ERRLOG("cb_arg must not be NULL\n");
7608 		return -EINVAL;
7609 	}
7610 
7611 	ctx = calloc(1, sizeof(*ctx));
7612 	if (ctx == NULL) {
7613 		return -ENOMEM;
7614 	}
7615 
7616 	ctx->range.offset = offset;
7617 	ctx->range.length = length;
7618 	ctx->range.owner_ch = ch;
7619 	ctx->range.locked_ctx = cb_arg;
7620 	ctx->bdev = bdev;
7621 	ctx->cb_fn = cb_fn;
7622 	ctx->cb_arg = cb_arg;
7623 
7624 	pthread_mutex_lock(&bdev->internal.mutex);
7625 	if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
7626 		/* There is an active lock overlapping with this range.
7627 		 * Put it on the pending list until this range no
7628 		 * longer overlaps with another.
7629 		 */
7630 		TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq);
7631 	} else {
7632 		TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
7633 		bdev_lock_lba_range_ctx(bdev, ctx);
7634 	}
7635 	pthread_mutex_unlock(&bdev->internal.mutex);
7636 	return 0;
7637 }
7638 
7639 static void
7640 bdev_lock_lba_range_ctx_msg(void *_ctx)
7641 {
7642 	struct locked_lba_range_ctx *ctx = _ctx;
7643 
7644 	bdev_lock_lba_range_ctx(ctx->bdev, ctx);
7645 }
7646 
7647 static void
7648 bdev_unlock_lba_range_cb(struct spdk_io_channel_iter *i, int status)
7649 {
7650 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7651 	struct locked_lba_range_ctx *pending_ctx;
7652 	struct spdk_bdev_channel *ch = ctx->range.owner_ch;
7653 	struct spdk_bdev *bdev = ch->bdev;
7654 	struct lba_range *range, *tmp;
7655 
7656 	pthread_mutex_lock(&bdev->internal.mutex);
7657 	/* Check if there are any pending locked ranges that overlap with this range
7658 	 * that was just unlocked.  If there are, check that it doesn't overlap with any
7659 	 * other locked ranges before calling bdev_lock_lba_range_ctx which will start
7660 	 * the lock process.
7661 	 */
7662 	TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) {
7663 		if (bdev_lba_range_overlapped(range, &ctx->range) &&
7664 		    !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) {
7665 			TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq);
7666 			pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
7667 			TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq);
7668 			spdk_thread_send_msg(spdk_io_channel_get_thread(pending_ctx->range.owner_ch->channel),
7669 					     bdev_lock_lba_range_ctx_msg, pending_ctx);
7670 		}
7671 	}
7672 	pthread_mutex_unlock(&bdev->internal.mutex);
7673 
7674 	ctx->cb_fn(ctx->cb_arg, status);
7675 	free(ctx);
7676 }
7677 
7678 static void
7679 bdev_unlock_lba_range_get_channel(struct spdk_io_channel_iter *i)
7680 {
7681 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
7682 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7683 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7684 	TAILQ_HEAD(, spdk_bdev_io) io_locked;
7685 	struct spdk_bdev_io *bdev_io;
7686 	struct lba_range *range;
7687 
7688 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
7689 		if (ctx->range.offset == range->offset &&
7690 		    ctx->range.length == range->length &&
7691 		    ctx->range.locked_ctx == range->locked_ctx) {
7692 			TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
7693 			free(range);
7694 			break;
7695 		}
7696 	}
7697 
7698 	/* Note: we should almost always be able to assert that the range specified
7699 	 * was found.  But there are some very rare corner cases where a new channel
7700 	 * gets created simultaneously with a range unlock, where this function
7701 	 * would execute on that new channel and wouldn't have the range.
7702 	 * We also use this to clean up range allocations when a later allocation
7703 	 * fails in the locking path.
7704 	 * So we can't actually assert() here.
7705 	 */
7706 
7707 	/* Swap the locked IO into a temporary list, and then try to submit them again.
7708 	 * We could hyper-optimize this to only resubmit locked I/O that overlap
7709 	 * with the range that was just unlocked, but this isn't a performance path so
7710 	 * we go for simplicity here.
7711 	 */
7712 	TAILQ_INIT(&io_locked);
7713 	TAILQ_SWAP(&ch->io_locked, &io_locked, spdk_bdev_io, internal.ch_link);
7714 	while (!TAILQ_EMPTY(&io_locked)) {
7715 		bdev_io = TAILQ_FIRST(&io_locked);
7716 		TAILQ_REMOVE(&io_locked, bdev_io, internal.ch_link);
7717 		bdev_io_submit(bdev_io);
7718 	}
7719 
7720 	spdk_for_each_channel_continue(i, 0);
7721 }
7722 
7723 static int
7724 bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
7725 		      uint64_t offset, uint64_t length,
7726 		      lock_range_cb cb_fn, void *cb_arg)
7727 {
7728 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7729 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
7730 	struct locked_lba_range_ctx *ctx;
7731 	struct lba_range *range;
7732 	bool range_found = false;
7733 
7734 	/* Let's make sure the specified channel actually has a lock on
7735 	 * the specified range.  Note that the range must match exactly.
7736 	 */
7737 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
7738 		if (range->offset == offset && range->length == length &&
7739 		    range->owner_ch == ch && range->locked_ctx == cb_arg) {
7740 			range_found = true;
7741 			break;
7742 		}
7743 	}
7744 
7745 	if (!range_found) {
7746 		return -EINVAL;
7747 	}
7748 
7749 	pthread_mutex_lock(&bdev->internal.mutex);
7750 	/* We confirmed that this channel has locked the specified range.  To
7751 	 * start the unlock the process, we find the range in the bdev's locked_ranges
7752 	 * and remove it.  This ensures new channels don't inherit the locked range.
7753 	 * Then we will send a message to each channel (including the one specified
7754 	 * here) to remove the range from its per-channel list.
7755 	 */
7756 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
7757 		if (range->offset == offset && range->length == length &&
7758 		    range->locked_ctx == cb_arg) {
7759 			break;
7760 		}
7761 	}
7762 	if (range == NULL) {
7763 		assert(false);
7764 		pthread_mutex_unlock(&bdev->internal.mutex);
7765 		return -EINVAL;
7766 	}
7767 	TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
7768 	ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
7769 	pthread_mutex_unlock(&bdev->internal.mutex);
7770 
7771 	ctx->cb_fn = cb_fn;
7772 	ctx->cb_arg = cb_arg;
7773 
7774 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_unlock_lba_range_get_channel, ctx,
7775 			      bdev_unlock_lba_range_cb);
7776 	return 0;
7777 }
7778 
7779 int
7780 spdk_bdev_get_memory_domains(struct spdk_bdev *bdev, struct spdk_memory_domain **domains,
7781 			     int array_size)
7782 {
7783 	if (!bdev) {
7784 		return -EINVAL;
7785 	}
7786 
7787 	if (bdev->fn_table->get_memory_domains) {
7788 		return bdev->fn_table->get_memory_domains(bdev->ctxt, domains, array_size);
7789 	}
7790 
7791 	return 0;
7792 }
7793 
7794 struct spdk_bdev_for_each_io_ctx {
7795 	void *ctx;
7796 	spdk_bdev_io_fn fn;
7797 	spdk_bdev_for_each_io_cb cb;
7798 };
7799 
7800 static void
7801 bdev_channel_for_each_io(struct spdk_io_channel_iter *i)
7802 {
7803 	struct spdk_bdev_for_each_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7804 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
7805 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(io_ch);
7806 	struct spdk_bdev_io *bdev_io;
7807 	int rc = 0;
7808 
7809 	TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
7810 		rc = ctx->fn(ctx->ctx, bdev_io);
7811 		if (rc != 0) {
7812 			break;
7813 		}
7814 	}
7815 
7816 	spdk_for_each_channel_continue(i, rc);
7817 }
7818 
7819 static void
7820 bdev_for_each_io_done(struct spdk_io_channel_iter *i, int status)
7821 {
7822 	struct spdk_bdev_for_each_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
7823 
7824 	ctx->cb(ctx->ctx, status);
7825 
7826 	free(ctx);
7827 }
7828 
7829 void
7830 spdk_bdev_for_each_bdev_io(struct spdk_bdev *bdev, void *_ctx, spdk_bdev_io_fn fn,
7831 			   spdk_bdev_for_each_io_cb cb)
7832 {
7833 	struct spdk_bdev_for_each_io_ctx *ctx;
7834 
7835 	assert(fn != NULL && cb != NULL);
7836 
7837 	ctx = calloc(1, sizeof(*ctx));
7838 	if (ctx == NULL) {
7839 		SPDK_ERRLOG("Failed to allocate context.\n");
7840 		cb(_ctx, -ENOMEM);
7841 		return;
7842 	}
7843 
7844 	ctx->ctx = _ctx;
7845 	ctx->fn = fn;
7846 	ctx->cb = cb;
7847 
7848 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
7849 			      bdev_channel_for_each_io,
7850 			      ctx,
7851 			      bdev_for_each_io_done);
7852 }
7853 
7854 SPDK_LOG_REGISTER_COMPONENT(bdev)
7855 
7856 SPDK_TRACE_REGISTER_FN(bdev_trace, "bdev", TRACE_GROUP_BDEV)
7857 {
7858 	struct spdk_trace_tpoint_opts opts[] = {
7859 		{
7860 			"BDEV_IO_START", TRACE_BDEV_IO_START,
7861 			OWNER_BDEV, OBJECT_BDEV_IO, 1,
7862 			{
7863 				{ "type", SPDK_TRACE_ARG_TYPE_INT, 8 },
7864 				{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
7865 				{ "offset", SPDK_TRACE_ARG_TYPE_INT, 8 },
7866 				{ "len", SPDK_TRACE_ARG_TYPE_INT, 8 }
7867 			}
7868 		},
7869 		{
7870 			"BDEV_IO_DONE", TRACE_BDEV_IO_DONE,
7871 			OWNER_BDEV, OBJECT_BDEV_IO, 0,
7872 			{{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 }}
7873 		},
7874 		{
7875 			"BDEV_IOCH_CREATE", TRACE_BDEV_IOCH_CREATE,
7876 			OWNER_BDEV, OBJECT_NONE, 1,
7877 			{
7878 				{ "name", SPDK_TRACE_ARG_TYPE_STR, 40 },
7879 				{ "thread_id", SPDK_TRACE_ARG_TYPE_INT, 8}
7880 			}
7881 		},
7882 		{
7883 			"BDEV_IOCH_DESTROY", TRACE_BDEV_IOCH_DESTROY,
7884 			OWNER_BDEV, OBJECT_NONE, 0,
7885 			{
7886 				{ "name", SPDK_TRACE_ARG_TYPE_STR, 40 },
7887 				{ "thread_id", SPDK_TRACE_ARG_TYPE_INT, 8}
7888 			}
7889 		},
7890 	};
7891 
7892 
7893 	spdk_trace_register_owner(OWNER_BDEV, 'b');
7894 	spdk_trace_register_object(OBJECT_BDEV_IO, 'i');
7895 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
7896 	spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_START, OBJECT_BDEV_IO, 0);
7897 	spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_DONE, OBJECT_BDEV_IO, 0);
7898 }
7899