xref: /spdk/lib/bdev/bdev.c (revision b6875e1ce57743f3b1416016b9c624d79a862af9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 
11 #include "spdk/accel.h"
12 #include "spdk/config.h"
13 #include "spdk/env.h"
14 #include "spdk/thread.h"
15 #include "spdk/likely.h"
16 #include "spdk/queue.h"
17 #include "spdk/nvme_spec.h"
18 #include "spdk/scsi_spec.h"
19 #include "spdk/notify.h"
20 #include "spdk/util.h"
21 #include "spdk/trace.h"
22 #include "spdk/dma.h"
23 
24 #include "spdk/bdev_module.h"
25 #include "spdk/log.h"
26 #include "spdk/string.h"
27 
28 #include "bdev_internal.h"
29 #include "spdk_internal/trace_defs.h"
30 #include "spdk_internal/assert.h"
31 
32 #ifdef SPDK_CONFIG_VTUNE
33 #include "ittnotify.h"
34 #include "ittnotify_types.h"
35 int __itt_init_ittlib(const char *, __itt_group_id);
36 #endif
37 
38 #define SPDK_BDEV_IO_POOL_SIZE			(64 * 1024 - 1)
39 #define SPDK_BDEV_IO_CACHE_SIZE			256
40 #define SPDK_BDEV_AUTO_EXAMINE			true
41 #define BUF_SMALL_CACHE_SIZE			128
42 #define BUF_LARGE_CACHE_SIZE			16
43 #define NOMEM_THRESHOLD_COUNT			8
44 
45 #define SPDK_BDEV_QOS_TIMESLICE_IN_USEC		1000
46 #define SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE	1
47 #define SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE	512
48 #define SPDK_BDEV_QOS_MIN_IOS_PER_SEC		1000
49 #define SPDK_BDEV_QOS_MIN_BYTES_PER_SEC		(1024 * 1024)
50 #define SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC	(UINT64_MAX / (1024 * 1024))
51 #define SPDK_BDEV_QOS_LIMIT_NOT_DEFINED		UINT64_MAX
52 #define SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC	1000
53 
54 /* The maximum number of children requests for a UNMAP or WRITE ZEROES command
55  * when splitting into children requests at a time.
56  */
57 #define SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS (8)
58 #define BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD 1000000
59 
60 /* The maximum number of children requests for a COPY command
61  * when splitting into children requests at a time.
62  */
63 #define SPDK_BDEV_MAX_CHILDREN_COPY_REQS (8)
64 
65 #define LOG_ALREADY_CLAIMED_ERROR(detail, bdev) \
66 	log_already_claimed(SPDK_LOG_ERROR, __LINE__, __func__, detail, bdev)
67 #ifdef DEBUG
68 #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) \
69 	log_already_claimed(SPDK_LOG_DEBUG, __LINE__, __func__, detail, bdev)
70 #else
71 #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) do {} while(0)
72 #endif
73 
74 static void log_already_claimed(enum spdk_log_level level, const int line, const char *func,
75 				const char *detail, struct spdk_bdev *bdev);
76 
77 static const char *qos_rpc_type[] = {"rw_ios_per_sec",
78 				     "rw_mbytes_per_sec", "r_mbytes_per_sec", "w_mbytes_per_sec"
79 				    };
80 
81 TAILQ_HEAD(spdk_bdev_list, spdk_bdev);
82 
83 RB_HEAD(bdev_name_tree, spdk_bdev_name);
84 
85 static int
86 bdev_name_cmp(struct spdk_bdev_name *name1, struct spdk_bdev_name *name2)
87 {
88 	return strcmp(name1->name, name2->name);
89 }
90 
91 RB_GENERATE_STATIC(bdev_name_tree, spdk_bdev_name, node, bdev_name_cmp);
92 
93 struct spdk_bdev_mgr {
94 	struct spdk_mempool *bdev_io_pool;
95 
96 	void *zero_buffer;
97 
98 	TAILQ_HEAD(bdev_module_list, spdk_bdev_module) bdev_modules;
99 
100 	struct spdk_bdev_list bdevs;
101 	struct bdev_name_tree bdev_names;
102 
103 	bool init_complete;
104 	bool module_init_complete;
105 
106 	struct spdk_spinlock spinlock;
107 
108 	TAILQ_HEAD(, spdk_bdev_open_async_ctx) async_bdev_opens;
109 
110 #ifdef SPDK_CONFIG_VTUNE
111 	__itt_domain	*domain;
112 #endif
113 };
114 
115 static struct spdk_bdev_mgr g_bdev_mgr = {
116 	.bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
117 	.bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
118 	.bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names),
119 	.init_complete = false,
120 	.module_init_complete = false,
121 	.async_bdev_opens = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.async_bdev_opens),
122 };
123 
124 static void
125 __attribute__((constructor))
126 _bdev_init(void)
127 {
128 	spdk_spin_init(&g_bdev_mgr.spinlock);
129 }
130 
131 typedef void (*lock_range_cb)(struct lba_range *range, void *ctx, int status);
132 
133 typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc);
134 
135 struct lba_range {
136 	struct spdk_bdev		*bdev;
137 	uint64_t			offset;
138 	uint64_t			length;
139 	bool				quiesce;
140 	void				*locked_ctx;
141 	struct spdk_thread		*owner_thread;
142 	struct spdk_bdev_channel	*owner_ch;
143 	TAILQ_ENTRY(lba_range)		tailq;
144 	TAILQ_ENTRY(lba_range)		tailq_module;
145 };
146 
147 static struct spdk_bdev_opts	g_bdev_opts = {
148 	.bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
149 	.bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
150 	.bdev_auto_examine = SPDK_BDEV_AUTO_EXAMINE,
151 	.iobuf_small_cache_size = BUF_SMALL_CACHE_SIZE,
152 	.iobuf_large_cache_size = BUF_LARGE_CACHE_SIZE,
153 };
154 
155 static spdk_bdev_init_cb	g_init_cb_fn = NULL;
156 static void			*g_init_cb_arg = NULL;
157 
158 static spdk_bdev_fini_cb	g_fini_cb_fn = NULL;
159 static void			*g_fini_cb_arg = NULL;
160 static struct spdk_thread	*g_fini_thread = NULL;
161 
162 struct spdk_bdev_qos_limit {
163 	/** IOs or bytes allowed per second (i.e., 1s). */
164 	uint64_t limit;
165 
166 	/** Remaining IOs or bytes allowed in current timeslice (e.g., 1ms).
167 	 *  For remaining bytes, allowed to run negative if an I/O is submitted when
168 	 *  some bytes are remaining, but the I/O is bigger than that amount. The
169 	 *  excess will be deducted from the next timeslice.
170 	 */
171 	int64_t remaining_this_timeslice;
172 
173 	/** Minimum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
174 	uint32_t min_per_timeslice;
175 
176 	/** Maximum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
177 	uint32_t max_per_timeslice;
178 
179 	/** Function to check whether to queue the IO.
180 	 * If The IO is allowed to pass, the quota will be reduced correspondingly.
181 	 */
182 	bool (*queue_io)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
183 
184 	/** Function to rewind the quota once the IO was allowed to be sent by this
185 	 * limit but queued due to one of the further limits.
186 	 */
187 	void (*rewind_quota)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
188 };
189 
190 struct spdk_bdev_qos {
191 	/** Types of structure of rate limits. */
192 	struct spdk_bdev_qos_limit rate_limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
193 
194 	/** The channel that all I/O are funneled through. */
195 	struct spdk_bdev_channel *ch;
196 
197 	/** The thread on which the poller is running. */
198 	struct spdk_thread *thread;
199 
200 	/** Size of a timeslice in tsc ticks. */
201 	uint64_t timeslice_size;
202 
203 	/** Timestamp of start of last timeslice. */
204 	uint64_t last_timeslice;
205 
206 	/** Poller that processes queued I/O commands each time slice. */
207 	struct spdk_poller *poller;
208 };
209 
210 struct spdk_bdev_mgmt_channel {
211 	/*
212 	 * Each thread keeps a cache of bdev_io - this allows
213 	 *  bdev threads which are *not* DPDK threads to still
214 	 *  benefit from a per-thread bdev_io cache.  Without
215 	 *  this, non-DPDK threads fetching from the mempool
216 	 *  incur a cmpxchg on get and put.
217 	 */
218 	bdev_io_stailq_t per_thread_cache;
219 	uint32_t	per_thread_cache_count;
220 	uint32_t	bdev_io_cache_size;
221 
222 	struct spdk_iobuf_channel iobuf;
223 
224 	TAILQ_HEAD(, spdk_bdev_shared_resource)	shared_resources;
225 	TAILQ_HEAD(, spdk_bdev_io_wait_entry)	io_wait_queue;
226 };
227 
228 /*
229  * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
230  * will queue here their IO that awaits retry. It makes it possible to retry sending
231  * IO to one bdev after IO from other bdev completes.
232  */
233 struct spdk_bdev_shared_resource {
234 	/* The bdev management channel */
235 	struct spdk_bdev_mgmt_channel *mgmt_ch;
236 
237 	/*
238 	 * Count of I/O submitted to bdev module and waiting for completion.
239 	 * Incremented before submit_request() is called on an spdk_bdev_io.
240 	 */
241 	uint64_t		io_outstanding;
242 
243 	/*
244 	 * Queue of IO awaiting retry because of a previous NOMEM status returned
245 	 *  on this channel.
246 	 */
247 	bdev_io_tailq_t		nomem_io;
248 
249 	/*
250 	 * Threshold which io_outstanding must drop to before retrying nomem_io.
251 	 */
252 	uint64_t		nomem_threshold;
253 
254 	/* I/O channel allocated by a bdev module */
255 	struct spdk_io_channel	*shared_ch;
256 
257 	struct spdk_poller	*nomem_poller;
258 
259 	/* Refcount of bdev channels using this resource */
260 	uint32_t		ref;
261 
262 	TAILQ_ENTRY(spdk_bdev_shared_resource) link;
263 };
264 
265 #define BDEV_CH_RESET_IN_PROGRESS	(1 << 0)
266 #define BDEV_CH_QOS_ENABLED		(1 << 1)
267 
268 struct spdk_bdev_channel {
269 	struct spdk_bdev	*bdev;
270 
271 	/* The channel for the underlying device */
272 	struct spdk_io_channel	*channel;
273 
274 	/* Accel channel */
275 	struct spdk_io_channel	*accel_channel;
276 
277 	/* Per io_device per thread data */
278 	struct spdk_bdev_shared_resource *shared_resource;
279 
280 	struct spdk_bdev_io_stat *stat;
281 
282 	/*
283 	 * Count of I/O submitted to the underlying dev module through this channel
284 	 * and waiting for completion.
285 	 */
286 	uint64_t		io_outstanding;
287 
288 	/*
289 	 * List of all submitted I/Os including I/O that are generated via splitting.
290 	 */
291 	bdev_io_tailq_t		io_submitted;
292 
293 	/*
294 	 * List of spdk_bdev_io that are currently queued because they write to a locked
295 	 * LBA range.
296 	 */
297 	bdev_io_tailq_t		io_locked;
298 
299 	/* List of I/Os with accel sequence being currently executed */
300 	bdev_io_tailq_t		io_accel_exec;
301 
302 	/* List of I/Os doing memory domain pull/push */
303 	bdev_io_tailq_t		io_memory_domain;
304 
305 	uint32_t		flags;
306 
307 	/* Counts number of bdev_io in the io_submitted TAILQ */
308 	uint16_t		queue_depth;
309 
310 	uint16_t		trace_id;
311 
312 	struct spdk_histogram_data *histogram;
313 
314 #ifdef SPDK_CONFIG_VTUNE
315 	uint64_t		start_tsc;
316 	uint64_t		interval_tsc;
317 	__itt_string_handle	*handle;
318 	struct spdk_bdev_io_stat *prev_stat;
319 #endif
320 
321 	bdev_io_tailq_t		queued_resets;
322 
323 	lba_range_tailq_t	locked_ranges;
324 
325 	/** List of I/Os queued by QoS. */
326 	bdev_io_tailq_t		qos_queued_io;
327 };
328 
329 struct media_event_entry {
330 	struct spdk_bdev_media_event	event;
331 	TAILQ_ENTRY(media_event_entry)	tailq;
332 };
333 
334 #define MEDIA_EVENT_POOL_SIZE 64
335 
336 struct spdk_bdev_desc {
337 	struct spdk_bdev		*bdev;
338 	struct spdk_thread		*thread;
339 	struct {
340 		spdk_bdev_event_cb_t event_fn;
341 		void *ctx;
342 	}				callback;
343 	bool				closed;
344 	bool				write;
345 	bool				memory_domains_supported;
346 	bool				accel_sequence_supported[SPDK_BDEV_NUM_IO_TYPES];
347 	struct spdk_spinlock		spinlock;
348 	uint32_t			refs;
349 	TAILQ_HEAD(, media_event_entry)	pending_media_events;
350 	TAILQ_HEAD(, media_event_entry)	free_media_events;
351 	struct media_event_entry	*media_events_buffer;
352 	TAILQ_ENTRY(spdk_bdev_desc)	link;
353 
354 	uint64_t		timeout_in_sec;
355 	spdk_bdev_io_timeout_cb	cb_fn;
356 	void			*cb_arg;
357 	struct spdk_poller	*io_timeout_poller;
358 	struct spdk_bdev_module_claim	*claim;
359 };
360 
361 struct spdk_bdev_iostat_ctx {
362 	struct spdk_bdev_io_stat *stat;
363 	spdk_bdev_get_device_stat_cb cb;
364 	void *cb_arg;
365 };
366 
367 struct set_qos_limit_ctx {
368 	void (*cb_fn)(void *cb_arg, int status);
369 	void *cb_arg;
370 	struct spdk_bdev *bdev;
371 };
372 
373 struct spdk_bdev_channel_iter {
374 	spdk_bdev_for_each_channel_msg fn;
375 	spdk_bdev_for_each_channel_done cpl;
376 	struct spdk_io_channel_iter *i;
377 	void *ctx;
378 };
379 
380 struct spdk_bdev_io_error_stat {
381 	uint32_t error_status[-SPDK_MIN_BDEV_IO_STATUS];
382 };
383 
384 enum bdev_io_retry_state {
385 	BDEV_IO_RETRY_STATE_INVALID,
386 	BDEV_IO_RETRY_STATE_PULL,
387 	BDEV_IO_RETRY_STATE_PULL_MD,
388 	BDEV_IO_RETRY_STATE_SUBMIT,
389 	BDEV_IO_RETRY_STATE_PUSH,
390 	BDEV_IO_RETRY_STATE_PUSH_MD,
391 };
392 
393 #define __bdev_to_io_dev(bdev)		(((char *)bdev) + 1)
394 #define __bdev_from_io_dev(io_dev)	((struct spdk_bdev *)(((char *)io_dev) - 1))
395 #define __io_ch_to_bdev_ch(io_ch)	((struct spdk_bdev_channel *)spdk_io_channel_get_ctx(io_ch))
396 #define __io_ch_to_bdev_mgmt_ch(io_ch)	((struct spdk_bdev_mgmt_channel *)spdk_io_channel_get_ctx(io_ch))
397 
398 static inline void bdev_io_complete(void *ctx);
399 static inline void bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io);
400 static void bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io);
401 static void bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io);
402 
403 static void bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
404 static int bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io);
405 
406 static void bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
407 				struct spdk_io_channel *ch, void *_ctx);
408 static void bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status);
409 
410 static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
411 				     struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
412 				     uint64_t num_blocks,
413 				     struct spdk_memory_domain *domain, void *domain_ctx,
414 				     struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
415 				     spdk_bdev_io_completion_cb cb, void *cb_arg);
416 static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
417 				      struct iovec *iov, int iovcnt, void *md_buf,
418 				      uint64_t offset_blocks, uint64_t num_blocks,
419 				      struct spdk_memory_domain *domain, void *domain_ctx,
420 				      struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
421 				      uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
422 				      spdk_bdev_io_completion_cb cb, void *cb_arg);
423 
424 static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
425 			       uint64_t offset, uint64_t length,
426 			       lock_range_cb cb_fn, void *cb_arg);
427 
428 static int bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
429 				 uint64_t offset, uint64_t length,
430 				 lock_range_cb cb_fn, void *cb_arg);
431 
432 static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
433 static bool bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *ch, struct spdk_bdev_io *bio_to_abort);
434 
435 static bool claim_type_is_v2(enum spdk_bdev_claim_type type);
436 static void bdev_desc_release_claims(struct spdk_bdev_desc *desc);
437 static void claim_reset(struct spdk_bdev *bdev);
438 
439 static void bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch);
440 
441 #define bdev_get_ext_io_opt(opts, field, defval) \
442 	((opts) != NULL ? SPDK_GET_FIELD(opts, field, defval) : (defval))
443 
444 static inline void
445 bdev_ch_add_to_io_submitted(struct spdk_bdev_io *bdev_io)
446 {
447 	TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
448 	bdev_io->internal.ch->queue_depth++;
449 }
450 
451 static inline void
452 bdev_ch_remove_from_io_submitted(struct spdk_bdev_io *bdev_io)
453 {
454 	TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
455 	bdev_io->internal.ch->queue_depth--;
456 }
457 
458 void
459 spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
460 {
461 	if (!opts) {
462 		SPDK_ERRLOG("opts should not be NULL\n");
463 		return;
464 	}
465 
466 	if (!opts_size) {
467 		SPDK_ERRLOG("opts_size should not be zero value\n");
468 		return;
469 	}
470 
471 	opts->opts_size = opts_size;
472 
473 #define SET_FIELD(field) \
474 	if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts_size) { \
475 		opts->field = g_bdev_opts.field; \
476 	} \
477 
478 	SET_FIELD(bdev_io_pool_size);
479 	SET_FIELD(bdev_io_cache_size);
480 	SET_FIELD(bdev_auto_examine);
481 	SET_FIELD(iobuf_small_cache_size);
482 	SET_FIELD(iobuf_large_cache_size);
483 
484 	/* Do not remove this statement, you should always update this statement when you adding a new field,
485 	 * and do not forget to add the SET_FIELD statement for your added field. */
486 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
487 
488 #undef SET_FIELD
489 }
490 
491 int
492 spdk_bdev_set_opts(struct spdk_bdev_opts *opts)
493 {
494 	uint32_t min_pool_size;
495 
496 	if (!opts) {
497 		SPDK_ERRLOG("opts cannot be NULL\n");
498 		return -1;
499 	}
500 
501 	if (!opts->opts_size) {
502 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
503 		return -1;
504 	}
505 
506 	/*
507 	 * Add 1 to the thread count to account for the extra mgmt_ch that gets created during subsystem
508 	 *  initialization.  A second mgmt_ch will be created on the same thread when the application starts
509 	 *  but before the deferred put_io_channel event is executed for the first mgmt_ch.
510 	 */
511 	min_pool_size = opts->bdev_io_cache_size * (spdk_thread_get_count() + 1);
512 	if (opts->bdev_io_pool_size < min_pool_size) {
513 		SPDK_ERRLOG("bdev_io_pool_size %" PRIu32 " is not compatible with bdev_io_cache_size %" PRIu32
514 			    " and %" PRIu32 " threads\n", opts->bdev_io_pool_size, opts->bdev_io_cache_size,
515 			    spdk_thread_get_count());
516 		SPDK_ERRLOG("bdev_io_pool_size must be at least %" PRIu32 "\n", min_pool_size);
517 		return -1;
518 	}
519 
520 #define SET_FIELD(field) \
521         if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
522                 g_bdev_opts.field = opts->field; \
523         } \
524 
525 	SET_FIELD(bdev_io_pool_size);
526 	SET_FIELD(bdev_io_cache_size);
527 	SET_FIELD(bdev_auto_examine);
528 	SET_FIELD(iobuf_small_cache_size);
529 	SET_FIELD(iobuf_large_cache_size);
530 
531 	g_bdev_opts.opts_size = opts->opts_size;
532 
533 #undef SET_FIELD
534 
535 	return 0;
536 }
537 
538 static struct spdk_bdev *
539 bdev_get_by_name(const char *bdev_name)
540 {
541 	struct spdk_bdev_name find;
542 	struct spdk_bdev_name *res;
543 
544 	find.name = (char *)bdev_name;
545 	res = RB_FIND(bdev_name_tree, &g_bdev_mgr.bdev_names, &find);
546 	if (res != NULL) {
547 		return res->bdev;
548 	}
549 
550 	return NULL;
551 }
552 
553 struct spdk_bdev *
554 spdk_bdev_get_by_name(const char *bdev_name)
555 {
556 	struct spdk_bdev *bdev;
557 
558 	spdk_spin_lock(&g_bdev_mgr.spinlock);
559 	bdev = bdev_get_by_name(bdev_name);
560 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
561 
562 	return bdev;
563 }
564 
565 struct bdev_io_status_string {
566 	enum spdk_bdev_io_status status;
567 	const char *str;
568 };
569 
570 static const struct bdev_io_status_string bdev_io_status_strings[] = {
571 	{ SPDK_BDEV_IO_STATUS_AIO_ERROR, "aio_error" },
572 	{ SPDK_BDEV_IO_STATUS_ABORTED, "aborted" },
573 	{ SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED, "first_fused_failed" },
574 	{ SPDK_BDEV_IO_STATUS_MISCOMPARE, "miscompare" },
575 	{ SPDK_BDEV_IO_STATUS_NOMEM, "nomem" },
576 	{ SPDK_BDEV_IO_STATUS_SCSI_ERROR, "scsi_error" },
577 	{ SPDK_BDEV_IO_STATUS_NVME_ERROR, "nvme_error" },
578 	{ SPDK_BDEV_IO_STATUS_FAILED, "failed" },
579 	{ SPDK_BDEV_IO_STATUS_PENDING, "pending" },
580 	{ SPDK_BDEV_IO_STATUS_SUCCESS, "success" },
581 };
582 
583 static const char *
584 bdev_io_status_get_string(enum spdk_bdev_io_status status)
585 {
586 	uint32_t i;
587 
588 	for (i = 0; i < SPDK_COUNTOF(bdev_io_status_strings); i++) {
589 		if (bdev_io_status_strings[i].status == status) {
590 			return bdev_io_status_strings[i].str;
591 		}
592 	}
593 
594 	return "reserved";
595 }
596 
597 struct spdk_bdev_wait_for_examine_ctx {
598 	struct spdk_poller              *poller;
599 	spdk_bdev_wait_for_examine_cb	cb_fn;
600 	void				*cb_arg;
601 };
602 
603 static bool bdev_module_all_actions_completed(void);
604 
605 static int
606 bdev_wait_for_examine_cb(void *arg)
607 {
608 	struct spdk_bdev_wait_for_examine_ctx *ctx = arg;
609 
610 	if (!bdev_module_all_actions_completed()) {
611 		return SPDK_POLLER_IDLE;
612 	}
613 
614 	spdk_poller_unregister(&ctx->poller);
615 	ctx->cb_fn(ctx->cb_arg);
616 	free(ctx);
617 
618 	return SPDK_POLLER_BUSY;
619 }
620 
621 int
622 spdk_bdev_wait_for_examine(spdk_bdev_wait_for_examine_cb cb_fn, void *cb_arg)
623 {
624 	struct spdk_bdev_wait_for_examine_ctx *ctx;
625 
626 	ctx = calloc(1, sizeof(*ctx));
627 	if (ctx == NULL) {
628 		return -ENOMEM;
629 	}
630 	ctx->cb_fn = cb_fn;
631 	ctx->cb_arg = cb_arg;
632 	ctx->poller = SPDK_POLLER_REGISTER(bdev_wait_for_examine_cb, ctx, 0);
633 
634 	return 0;
635 }
636 
637 struct spdk_bdev_examine_item {
638 	char *name;
639 	TAILQ_ENTRY(spdk_bdev_examine_item) link;
640 };
641 
642 TAILQ_HEAD(spdk_bdev_examine_allowlist, spdk_bdev_examine_item);
643 
644 struct spdk_bdev_examine_allowlist g_bdev_examine_allowlist = TAILQ_HEAD_INITIALIZER(
645 			g_bdev_examine_allowlist);
646 
647 static inline bool
648 bdev_examine_allowlist_check(const char *name)
649 {
650 	struct spdk_bdev_examine_item *item;
651 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
652 		if (strcmp(name, item->name) == 0) {
653 			return true;
654 		}
655 	}
656 	return false;
657 }
658 
659 static inline void
660 bdev_examine_allowlist_free(void)
661 {
662 	struct spdk_bdev_examine_item *item;
663 	while (!TAILQ_EMPTY(&g_bdev_examine_allowlist)) {
664 		item = TAILQ_FIRST(&g_bdev_examine_allowlist);
665 		TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
666 		free(item->name);
667 		free(item);
668 	}
669 }
670 
671 static inline bool
672 bdev_in_examine_allowlist(struct spdk_bdev *bdev)
673 {
674 	struct spdk_bdev_alias *tmp;
675 	if (bdev_examine_allowlist_check(bdev->name)) {
676 		return true;
677 	}
678 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
679 		if (bdev_examine_allowlist_check(tmp->alias.name)) {
680 			return true;
681 		}
682 	}
683 	return false;
684 }
685 
686 static inline bool
687 bdev_ok_to_examine(struct spdk_bdev *bdev)
688 {
689 	if (g_bdev_opts.bdev_auto_examine) {
690 		return true;
691 	} else {
692 		return bdev_in_examine_allowlist(bdev);
693 	}
694 }
695 
696 static void
697 bdev_examine(struct spdk_bdev *bdev)
698 {
699 	struct spdk_bdev_module *module;
700 	struct spdk_bdev_module_claim *claim, *tmpclaim;
701 	uint32_t action;
702 
703 	if (!bdev_ok_to_examine(bdev)) {
704 		return;
705 	}
706 
707 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
708 		if (module->examine_config) {
709 			spdk_spin_lock(&module->internal.spinlock);
710 			action = module->internal.action_in_progress;
711 			module->internal.action_in_progress++;
712 			spdk_spin_unlock(&module->internal.spinlock);
713 			module->examine_config(bdev);
714 			if (action != module->internal.action_in_progress) {
715 				SPDK_ERRLOG("examine_config for module %s did not call "
716 					    "spdk_bdev_module_examine_done()\n", module->name);
717 			}
718 		}
719 	}
720 
721 	spdk_spin_lock(&bdev->internal.spinlock);
722 
723 	switch (bdev->internal.claim_type) {
724 	case SPDK_BDEV_CLAIM_NONE:
725 		/* Examine by all bdev modules */
726 		TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
727 			if (module->examine_disk) {
728 				spdk_spin_lock(&module->internal.spinlock);
729 				module->internal.action_in_progress++;
730 				spdk_spin_unlock(&module->internal.spinlock);
731 				spdk_spin_unlock(&bdev->internal.spinlock);
732 				module->examine_disk(bdev);
733 				spdk_spin_lock(&bdev->internal.spinlock);
734 			}
735 		}
736 		break;
737 	case SPDK_BDEV_CLAIM_EXCL_WRITE:
738 		/* Examine by the one bdev module with a v1 claim */
739 		module = bdev->internal.claim.v1.module;
740 		if (module->examine_disk) {
741 			spdk_spin_lock(&module->internal.spinlock);
742 			module->internal.action_in_progress++;
743 			spdk_spin_unlock(&module->internal.spinlock);
744 			spdk_spin_unlock(&bdev->internal.spinlock);
745 			module->examine_disk(bdev);
746 			return;
747 		}
748 		break;
749 	default:
750 		/* Examine by all bdev modules with a v2 claim */
751 		assert(claim_type_is_v2(bdev->internal.claim_type));
752 		/*
753 		 * Removal of tailq nodes while iterating can cause the iteration to jump out of the
754 		 * list, perhaps accessing freed memory. Without protection, this could happen
755 		 * while the lock is dropped during the examine callback.
756 		 */
757 		bdev->internal.examine_in_progress++;
758 
759 		TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
760 			module = claim->module;
761 
762 			if (module == NULL) {
763 				/* This is a vestigial claim, held by examine_count */
764 				continue;
765 			}
766 
767 			if (module->examine_disk == NULL) {
768 				continue;
769 			}
770 
771 			spdk_spin_lock(&module->internal.spinlock);
772 			module->internal.action_in_progress++;
773 			spdk_spin_unlock(&module->internal.spinlock);
774 
775 			/* Call examine_disk without holding internal.spinlock. */
776 			spdk_spin_unlock(&bdev->internal.spinlock);
777 			module->examine_disk(bdev);
778 			spdk_spin_lock(&bdev->internal.spinlock);
779 		}
780 
781 		assert(bdev->internal.examine_in_progress > 0);
782 		bdev->internal.examine_in_progress--;
783 		if (bdev->internal.examine_in_progress == 0) {
784 			/* Remove any claims that were released during examine_disk */
785 			TAILQ_FOREACH_SAFE(claim, &bdev->internal.claim.v2.claims, link, tmpclaim) {
786 				if (claim->desc != NULL) {
787 					continue;
788 				}
789 
790 				TAILQ_REMOVE(&bdev->internal.claim.v2.claims, claim, link);
791 				free(claim);
792 			}
793 			if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
794 				claim_reset(bdev);
795 			}
796 		}
797 	}
798 
799 	spdk_spin_unlock(&bdev->internal.spinlock);
800 }
801 
802 int
803 spdk_bdev_examine(const char *name)
804 {
805 	struct spdk_bdev *bdev;
806 	struct spdk_bdev_examine_item *item;
807 	struct spdk_thread *thread = spdk_get_thread();
808 
809 	if (spdk_unlikely(!spdk_thread_is_app_thread(thread))) {
810 		SPDK_ERRLOG("Cannot examine bdev %s on thread %p (%s)\n", name, thread,
811 			    thread ? spdk_thread_get_name(thread) : "null");
812 		return -EINVAL;
813 	}
814 
815 	if (g_bdev_opts.bdev_auto_examine) {
816 		SPDK_ERRLOG("Manual examine is not allowed if auto examine is enabled\n");
817 		return -EINVAL;
818 	}
819 
820 	if (bdev_examine_allowlist_check(name)) {
821 		SPDK_ERRLOG("Duplicate bdev name for manual examine: %s\n", name);
822 		return -EEXIST;
823 	}
824 
825 	item = calloc(1, sizeof(*item));
826 	if (!item) {
827 		return -ENOMEM;
828 	}
829 	item->name = strdup(name);
830 	if (!item->name) {
831 		free(item);
832 		return -ENOMEM;
833 	}
834 	TAILQ_INSERT_TAIL(&g_bdev_examine_allowlist, item, link);
835 
836 	bdev = spdk_bdev_get_by_name(name);
837 	if (bdev) {
838 		bdev_examine(bdev);
839 	}
840 	return 0;
841 }
842 
843 static inline void
844 bdev_examine_allowlist_config_json(struct spdk_json_write_ctx *w)
845 {
846 	struct spdk_bdev_examine_item *item;
847 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
848 		spdk_json_write_object_begin(w);
849 		spdk_json_write_named_string(w, "method", "bdev_examine");
850 		spdk_json_write_named_object_begin(w, "params");
851 		spdk_json_write_named_string(w, "name", item->name);
852 		spdk_json_write_object_end(w);
853 		spdk_json_write_object_end(w);
854 	}
855 }
856 
857 struct spdk_bdev *
858 spdk_bdev_first(void)
859 {
860 	struct spdk_bdev *bdev;
861 
862 	bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
863 	if (bdev) {
864 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
865 	}
866 
867 	return bdev;
868 }
869 
870 struct spdk_bdev *
871 spdk_bdev_next(struct spdk_bdev *prev)
872 {
873 	struct spdk_bdev *bdev;
874 
875 	bdev = TAILQ_NEXT(prev, internal.link);
876 	if (bdev) {
877 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
878 	}
879 
880 	return bdev;
881 }
882 
883 static struct spdk_bdev *
884 _bdev_next_leaf(struct spdk_bdev *bdev)
885 {
886 	while (bdev != NULL) {
887 		if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
888 			return bdev;
889 		} else {
890 			bdev = TAILQ_NEXT(bdev, internal.link);
891 		}
892 	}
893 
894 	return bdev;
895 }
896 
897 struct spdk_bdev *
898 spdk_bdev_first_leaf(void)
899 {
900 	struct spdk_bdev *bdev;
901 
902 	bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
903 
904 	if (bdev) {
905 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
906 	}
907 
908 	return bdev;
909 }
910 
911 struct spdk_bdev *
912 spdk_bdev_next_leaf(struct spdk_bdev *prev)
913 {
914 	struct spdk_bdev *bdev;
915 
916 	bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
917 
918 	if (bdev) {
919 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
920 	}
921 
922 	return bdev;
923 }
924 
925 static inline bool
926 bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
927 {
928 	return bdev_io->internal.memory_domain;
929 }
930 
931 static inline bool
932 bdev_io_use_accel_sequence(struct spdk_bdev_io *bdev_io)
933 {
934 	return bdev_io->internal.has_accel_sequence;
935 }
936 
937 static inline void
938 bdev_queue_nomem_io_head(struct spdk_bdev_shared_resource *shared_resource,
939 			 struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
940 {
941 	/* Wait for some of the outstanding I/O to complete before we retry any of the nomem_io.
942 	 * Normally we will wait for NOMEM_THRESHOLD_COUNT I/O to complete but for low queue depth
943 	 * channels we will instead wait for half to complete.
944 	 */
945 	shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
946 					   (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
947 
948 	assert(state != BDEV_IO_RETRY_STATE_INVALID);
949 	bdev_io->internal.retry_state = state;
950 	TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
951 }
952 
953 static inline void
954 bdev_queue_nomem_io_tail(struct spdk_bdev_shared_resource *shared_resource,
955 			 struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
956 {
957 	/* We only queue IOs at the end of the nomem_io queue if they're submitted by the user while
958 	 * the queue isn't empty, so we don't need to update the nomem_threshold here */
959 	assert(!TAILQ_EMPTY(&shared_resource->nomem_io));
960 
961 	assert(state != BDEV_IO_RETRY_STATE_INVALID);
962 	bdev_io->internal.retry_state = state;
963 	TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, internal.link);
964 }
965 
966 void
967 spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
968 {
969 	struct iovec *iovs;
970 
971 	if (bdev_io->u.bdev.iovs == NULL) {
972 		bdev_io->u.bdev.iovs = &bdev_io->iov;
973 		bdev_io->u.bdev.iovcnt = 1;
974 	}
975 
976 	iovs = bdev_io->u.bdev.iovs;
977 
978 	assert(iovs != NULL);
979 	assert(bdev_io->u.bdev.iovcnt >= 1);
980 
981 	iovs[0].iov_base = buf;
982 	iovs[0].iov_len = len;
983 }
984 
985 void
986 spdk_bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
987 {
988 	assert((len / spdk_bdev_get_md_size(bdev_io->bdev)) >= bdev_io->u.bdev.num_blocks);
989 	bdev_io->u.bdev.md_buf = md_buf;
990 }
991 
992 static bool
993 _is_buf_allocated(const struct iovec *iovs)
994 {
995 	if (iovs == NULL) {
996 		return false;
997 	}
998 
999 	return iovs[0].iov_base != NULL;
1000 }
1001 
1002 static bool
1003 _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
1004 {
1005 	int i;
1006 	uintptr_t iov_base;
1007 
1008 	if (spdk_likely(alignment == 1)) {
1009 		return true;
1010 	}
1011 
1012 	for (i = 0; i < iovcnt; i++) {
1013 		iov_base = (uintptr_t)iovs[i].iov_base;
1014 		if ((iov_base & (alignment - 1)) != 0) {
1015 			return false;
1016 		}
1017 	}
1018 
1019 	return true;
1020 }
1021 
1022 static inline bool
1023 bdev_io_needs_sequence_exec(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
1024 {
1025 	if (!bdev_io->internal.accel_sequence) {
1026 		return false;
1027 	}
1028 
1029 	/* For now, we don't allow splitting IOs with an accel sequence and will treat them as if
1030 	 * bdev module didn't support accel sequences */
1031 	return !desc->accel_sequence_supported[bdev_io->type] || bdev_io->internal.split;
1032 }
1033 
1034 static inline void
1035 bdev_io_increment_outstanding(struct spdk_bdev_channel *bdev_ch,
1036 			      struct spdk_bdev_shared_resource *shared_resource)
1037 {
1038 	bdev_ch->io_outstanding++;
1039 	shared_resource->io_outstanding++;
1040 }
1041 
1042 static inline void
1043 bdev_io_decrement_outstanding(struct spdk_bdev_channel *bdev_ch,
1044 			      struct spdk_bdev_shared_resource *shared_resource)
1045 {
1046 	assert(bdev_ch->io_outstanding > 0);
1047 	assert(shared_resource->io_outstanding > 0);
1048 	bdev_ch->io_outstanding--;
1049 	shared_resource->io_outstanding--;
1050 }
1051 
1052 static void
1053 bdev_io_submit_sequence_cb(void *ctx, int status)
1054 {
1055 	struct spdk_bdev_io *bdev_io = ctx;
1056 
1057 	bdev_io->u.bdev.accel_sequence = NULL;
1058 	bdev_io->internal.accel_sequence = NULL;
1059 
1060 	if (spdk_unlikely(status != 0)) {
1061 		SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
1062 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1063 		bdev_io_complete_unsubmitted(bdev_io);
1064 		return;
1065 	}
1066 
1067 	bdev_io_submit(bdev_io);
1068 }
1069 
1070 static void
1071 bdev_io_exec_sequence_cb(void *ctx, int status)
1072 {
1073 	struct spdk_bdev_io *bdev_io = ctx;
1074 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1075 
1076 	TAILQ_REMOVE(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1077 	bdev_io_decrement_outstanding(ch, ch->shared_resource);
1078 
1079 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1080 		bdev_ch_retry_io(ch);
1081 	}
1082 
1083 	bdev_io->internal.data_transfer_cpl(bdev_io, status);
1084 }
1085 
1086 static void
1087 bdev_io_exec_sequence(struct spdk_bdev_io *bdev_io, void (*cb_fn)(void *ctx, int status))
1088 {
1089 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1090 
1091 	assert(bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1092 	assert(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE || bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1093 
1094 	/* Since the operations are appended during submission, they're in the opposite order than
1095 	 * how we want to execute them for reads (i.e. we need to execute the most recently added
1096 	 * operation first), so reverse the sequence before executing it.
1097 	 */
1098 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1099 		spdk_accel_sequence_reverse(bdev_io->internal.accel_sequence);
1100 	}
1101 
1102 	TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1103 	bdev_io_increment_outstanding(ch, ch->shared_resource);
1104 	bdev_io->internal.data_transfer_cpl = cb_fn;
1105 
1106 	spdk_accel_sequence_finish(bdev_io->internal.accel_sequence,
1107 				   bdev_io_exec_sequence_cb, bdev_io);
1108 }
1109 
1110 static void
1111 bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status)
1112 {
1113 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1114 	void *buf;
1115 
1116 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1117 		buf = bdev_io->internal.buf;
1118 		bdev_io->internal.buf = NULL;
1119 		bdev_io->internal.get_aux_buf_cb(ch, bdev_io, buf);
1120 		bdev_io->internal.get_aux_buf_cb = NULL;
1121 	} else {
1122 		assert(bdev_io->internal.get_buf_cb != NULL);
1123 		bdev_io->internal.get_buf_cb(ch, bdev_io, status);
1124 		bdev_io->internal.get_buf_cb = NULL;
1125 	}
1126 }
1127 
1128 static void
1129 _bdev_io_pull_buffer_cpl(void *ctx, int rc)
1130 {
1131 	struct spdk_bdev_io *bdev_io = ctx;
1132 
1133 	if (rc) {
1134 		SPDK_ERRLOG("Set bounce buffer failed with rc %d\n", rc);
1135 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1136 	}
1137 	bdev_io_get_buf_complete(bdev_io, !rc);
1138 }
1139 
1140 static void
1141 bdev_io_pull_md_buf_done(void *ctx, int status)
1142 {
1143 	struct spdk_bdev_io *bdev_io = ctx;
1144 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1145 
1146 	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1147 	bdev_io_decrement_outstanding(ch, ch->shared_resource);
1148 
1149 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1150 		bdev_ch_retry_io(ch);
1151 	}
1152 
1153 	assert(bdev_io->internal.data_transfer_cpl);
1154 	bdev_io->internal.data_transfer_cpl(bdev_io, status);
1155 }
1156 
1157 static void
1158 bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
1159 {
1160 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1161 	int rc = 0;
1162 
1163 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1164 		if (bdev_io_use_memory_domain(bdev_io)) {
1165 			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1166 			bdev_io_increment_outstanding(ch, ch->shared_resource);
1167 			rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1168 							  bdev_io->internal.memory_domain_ctx,
1169 							  &bdev_io->internal.orig_md_iov, 1,
1170 							  &bdev_io->internal.bounce_md_iov, 1,
1171 							  bdev_io_pull_md_buf_done, bdev_io);
1172 			if (rc == 0) {
1173 				/* Continue to submit IO in completion callback */
1174 				return;
1175 			}
1176 			bdev_io_decrement_outstanding(ch, ch->shared_resource);
1177 			TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1178 			if (rc != -ENOMEM) {
1179 				SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
1180 					    spdk_memory_domain_get_dma_device_id(
1181 						    bdev_io->internal.memory_domain), rc);
1182 			}
1183 		} else {
1184 			memcpy(bdev_io->internal.bounce_md_iov.iov_base,
1185 			       bdev_io->internal.orig_md_iov.iov_base,
1186 			       bdev_io->internal.orig_md_iov.iov_len);
1187 		}
1188 	}
1189 
1190 	if (spdk_unlikely(rc == -ENOMEM)) {
1191 		bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL_MD);
1192 	} else {
1193 		assert(bdev_io->internal.data_transfer_cpl);
1194 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1195 	}
1196 }
1197 
1198 static void
1199 _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
1200 {
1201 	/* save original md_buf */
1202 	bdev_io->internal.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
1203 	bdev_io->internal.orig_md_iov.iov_len = len;
1204 	bdev_io->internal.bounce_md_iov.iov_base = md_buf;
1205 	bdev_io->internal.bounce_md_iov.iov_len = len;
1206 	/* set bounce md_buf */
1207 	bdev_io->u.bdev.md_buf = md_buf;
1208 
1209 	bdev_io_pull_md_buf(bdev_io);
1210 }
1211 
1212 static void
1213 _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
1214 {
1215 	struct spdk_bdev *bdev = bdev_io->bdev;
1216 	uint64_t md_len;
1217 	void *buf;
1218 
1219 	if (spdk_bdev_is_md_separate(bdev)) {
1220 		assert(!bdev_io_use_accel_sequence(bdev_io));
1221 
1222 		buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len;
1223 		md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
1224 
1225 		assert(((uintptr_t)buf & (spdk_bdev_get_buf_align(bdev) - 1)) == 0);
1226 
1227 		if (bdev_io->u.bdev.md_buf != NULL) {
1228 			_bdev_io_pull_bounce_md_buf(bdev_io, buf, md_len);
1229 			return;
1230 		} else {
1231 			spdk_bdev_io_set_md_buf(bdev_io, buf, md_len);
1232 		}
1233 	}
1234 
1235 	bdev_io_get_buf_complete(bdev_io, true);
1236 }
1237 
1238 static inline void
1239 bdev_io_pull_data_done(struct spdk_bdev_io *bdev_io, int rc)
1240 {
1241 	if (rc) {
1242 		SPDK_ERRLOG("Failed to get data buffer\n");
1243 		assert(bdev_io->internal.data_transfer_cpl);
1244 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1245 		return;
1246 	}
1247 
1248 	_bdev_io_set_md_buf(bdev_io);
1249 }
1250 
1251 static void
1252 bdev_io_pull_data_done_and_track(void *ctx, int status)
1253 {
1254 	struct spdk_bdev_io *bdev_io = ctx;
1255 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1256 
1257 	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1258 	bdev_io_decrement_outstanding(ch, ch->shared_resource);
1259 
1260 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1261 		bdev_ch_retry_io(ch);
1262 	}
1263 
1264 	bdev_io_pull_data_done(bdev_io, status);
1265 }
1266 
1267 static void
1268 bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
1269 {
1270 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1271 	int rc = 0;
1272 
1273 	/* If we need to exec an accel sequence or the IO uses a memory domain buffer and has a
1274 	 * sequence, append a copy operation making accel change the src/dst buffers of the previous
1275 	 * operation */
1276 	if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io) ||
1277 	    (bdev_io_use_accel_sequence(bdev_io) && bdev_io_use_memory_domain(bdev_io))) {
1278 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1279 			rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1280 						    bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1281 						    NULL, NULL,
1282 						    bdev_io->internal.orig_iovs,
1283 						    bdev_io->internal.orig_iovcnt,
1284 						    bdev_io->internal.memory_domain,
1285 						    bdev_io->internal.memory_domain_ctx,
1286 						    NULL, NULL);
1287 		} else {
1288 			/* We need to reverse the src/dst for reads */
1289 			assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1290 			rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1291 						    bdev_io->internal.orig_iovs,
1292 						    bdev_io->internal.orig_iovcnt,
1293 						    bdev_io->internal.memory_domain,
1294 						    bdev_io->internal.memory_domain_ctx,
1295 						    bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1296 						    NULL, NULL, NULL, NULL);
1297 		}
1298 
1299 		if (spdk_unlikely(rc != 0 && rc != -ENOMEM)) {
1300 			SPDK_ERRLOG("Failed to append copy to accel sequence: %p\n",
1301 				    bdev_io->internal.accel_sequence);
1302 		}
1303 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1304 		/* if this is write path, copy data from original buffer to bounce buffer */
1305 		if (bdev_io_use_memory_domain(bdev_io)) {
1306 			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1307 			bdev_io_increment_outstanding(ch, ch->shared_resource);
1308 			rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1309 							  bdev_io->internal.memory_domain_ctx,
1310 							  bdev_io->internal.orig_iovs,
1311 							  (uint32_t) bdev_io->internal.orig_iovcnt,
1312 							  bdev_io->u.bdev.iovs, 1,
1313 							  bdev_io_pull_data_done_and_track,
1314 							  bdev_io);
1315 			if (rc == 0) {
1316 				/* Continue to submit IO in completion callback */
1317 				return;
1318 			}
1319 			TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1320 			bdev_io_decrement_outstanding(ch, ch->shared_resource);
1321 			if (rc != -ENOMEM) {
1322 				SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
1323 					    spdk_memory_domain_get_dma_device_id(
1324 						    bdev_io->internal.memory_domain));
1325 			}
1326 		} else {
1327 			assert(bdev_io->u.bdev.iovcnt == 1);
1328 			spdk_copy_iovs_to_buf(bdev_io->u.bdev.iovs[0].iov_base,
1329 					      bdev_io->u.bdev.iovs[0].iov_len,
1330 					      bdev_io->internal.orig_iovs,
1331 					      bdev_io->internal.orig_iovcnt);
1332 		}
1333 	}
1334 
1335 	if (spdk_unlikely(rc == -ENOMEM)) {
1336 		bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1337 	} else {
1338 		bdev_io_pull_data_done(bdev_io, rc);
1339 	}
1340 }
1341 
1342 static void
1343 _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
1344 			      bdev_copy_bounce_buffer_cpl cpl_cb)
1345 {
1346 	struct spdk_bdev_shared_resource *shared_resource = bdev_io->internal.ch->shared_resource;
1347 
1348 	bdev_io->internal.data_transfer_cpl = cpl_cb;
1349 	/* save original iovec */
1350 	bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs;
1351 	bdev_io->internal.orig_iovcnt = bdev_io->u.bdev.iovcnt;
1352 	/* set bounce iov */
1353 	bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_iov;
1354 	bdev_io->u.bdev.iovcnt = 1;
1355 	/* set bounce buffer for this operation */
1356 	bdev_io->u.bdev.iovs[0].iov_base = buf;
1357 	bdev_io->u.bdev.iovs[0].iov_len = len;
1358 
1359 	if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1360 		bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1361 	} else {
1362 		bdev_io_pull_data(bdev_io);
1363 	}
1364 }
1365 
1366 static void
1367 _bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t len)
1368 {
1369 	struct spdk_bdev *bdev = bdev_io->bdev;
1370 	bool buf_allocated;
1371 	uint64_t alignment;
1372 	void *aligned_buf;
1373 
1374 	bdev_io->internal.buf = buf;
1375 
1376 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1377 		bdev_io_get_buf_complete(bdev_io, true);
1378 		return;
1379 	}
1380 
1381 	alignment = spdk_bdev_get_buf_align(bdev);
1382 	buf_allocated = _is_buf_allocated(bdev_io->u.bdev.iovs);
1383 	aligned_buf = (void *)(((uintptr_t)buf + (alignment - 1)) & ~(alignment - 1));
1384 
1385 	if (buf_allocated) {
1386 		_bdev_io_pull_bounce_data_buf(bdev_io, aligned_buf, len, _bdev_io_pull_buffer_cpl);
1387 		/* Continue in completion callback */
1388 		return;
1389 	} else {
1390 		spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
1391 	}
1392 
1393 	_bdev_io_set_md_buf(bdev_io);
1394 }
1395 
1396 static inline uint64_t
1397 bdev_io_get_max_buf_len(struct spdk_bdev_io *bdev_io, uint64_t len)
1398 {
1399 	struct spdk_bdev *bdev = bdev_io->bdev;
1400 	uint64_t md_len, alignment;
1401 
1402 	md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
1403 
1404 	/* 1 byte alignment needs 0 byte of extra space, 64 bytes alignment needs 63 bytes of extra space, etc. */
1405 	alignment = spdk_bdev_get_buf_align(bdev) - 1;
1406 
1407 	return len + alignment + md_len;
1408 }
1409 
1410 static void
1411 _bdev_io_put_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t buf_len)
1412 {
1413 	struct spdk_bdev_mgmt_channel *ch;
1414 
1415 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1416 	spdk_iobuf_put(&ch->iobuf, buf, bdev_io_get_max_buf_len(bdev_io, buf_len));
1417 }
1418 
1419 static void
1420 bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
1421 {
1422 	assert(bdev_io->internal.buf != NULL);
1423 	_bdev_io_put_buf(bdev_io, bdev_io->internal.buf, bdev_io->internal.buf_len);
1424 	bdev_io->internal.buf = NULL;
1425 }
1426 
1427 void
1428 spdk_bdev_io_put_aux_buf(struct spdk_bdev_io *bdev_io, void *buf)
1429 {
1430 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1431 
1432 	assert(buf != NULL);
1433 	_bdev_io_put_buf(bdev_io, buf, len);
1434 }
1435 
1436 static inline void
1437 bdev_submit_request(struct spdk_bdev *bdev, struct spdk_io_channel *ioch,
1438 		    struct spdk_bdev_io *bdev_io)
1439 {
1440 	/* After a request is submitted to a bdev module, the ownership of an accel sequence
1441 	 * associated with that bdev_io is transferred to the bdev module. So, clear the internal
1442 	 * sequence pointer to make sure we won't touch it anymore. */
1443 	if ((bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE ||
1444 	     bdev_io->type == SPDK_BDEV_IO_TYPE_READ) && bdev_io->u.bdev.accel_sequence != NULL) {
1445 		assert(!bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1446 		bdev_io->internal.accel_sequence = NULL;
1447 	}
1448 
1449 	bdev->fn_table->submit_request(ioch, bdev_io);
1450 }
1451 
1452 static inline void
1453 bdev_ch_resubmit_io(struct spdk_bdev_shared_resource *shared_resource, struct spdk_bdev_io *bdev_io)
1454 {
1455 	struct spdk_bdev *bdev = bdev_io->bdev;
1456 
1457 	bdev_io_increment_outstanding(bdev_io->internal.ch, shared_resource);
1458 	bdev_io->internal.error.nvme.cdw0 = 0;
1459 	bdev_io->num_retries++;
1460 	bdev_submit_request(bdev, spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
1461 }
1462 
1463 static void
1464 bdev_shared_ch_retry_io(struct spdk_bdev_shared_resource *shared_resource)
1465 {
1466 	struct spdk_bdev_io *bdev_io;
1467 
1468 	if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
1469 		/*
1470 		 * Allow some more I/O to complete before retrying the nomem_io queue.
1471 		 *  Some drivers (such as nvme) cannot immediately take a new I/O in
1472 		 *  the context of a completion, because the resources for the I/O are
1473 		 *  not released until control returns to the bdev poller.  Also, we
1474 		 *  may require several small I/O to complete before a larger I/O
1475 		 *  (that requires splitting) can be submitted.
1476 		 */
1477 		return;
1478 	}
1479 
1480 	while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1481 		bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
1482 		TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, internal.link);
1483 
1484 		switch (bdev_io->internal.retry_state) {
1485 		case BDEV_IO_RETRY_STATE_SUBMIT:
1486 			bdev_ch_resubmit_io(shared_resource, bdev_io);
1487 			break;
1488 		case BDEV_IO_RETRY_STATE_PULL:
1489 			bdev_io_pull_data(bdev_io);
1490 			break;
1491 		case BDEV_IO_RETRY_STATE_PULL_MD:
1492 			bdev_io_pull_md_buf(bdev_io);
1493 			break;
1494 		case BDEV_IO_RETRY_STATE_PUSH:
1495 			bdev_io_push_bounce_data(bdev_io);
1496 			break;
1497 		case BDEV_IO_RETRY_STATE_PUSH_MD:
1498 			bdev_io_push_bounce_md_buf(bdev_io);
1499 			break;
1500 		default:
1501 			assert(0 && "invalid retry state");
1502 			break;
1503 		}
1504 
1505 		if (bdev_io == TAILQ_FIRST(&shared_resource->nomem_io)) {
1506 			/* This IO completed again with NOMEM status, so break the loop and
1507 			 * don't try anymore.  Note that a bdev_io that fails with NOMEM
1508 			 * always gets requeued at the front of the list, to maintain
1509 			 * ordering.
1510 			 */
1511 			break;
1512 		}
1513 	}
1514 }
1515 
1516 static void
1517 bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
1518 {
1519 	bdev_shared_ch_retry_io(bdev_ch->shared_resource);
1520 }
1521 
1522 static int
1523 bdev_no_mem_poller(void *ctx)
1524 {
1525 	struct spdk_bdev_shared_resource *shared_resource = ctx;
1526 
1527 	spdk_poller_unregister(&shared_resource->nomem_poller);
1528 
1529 	if (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1530 		bdev_shared_ch_retry_io(shared_resource);
1531 	}
1532 	/* the retry cb may re-register the poller so double check */
1533 	if (!TAILQ_EMPTY(&shared_resource->nomem_io) &&
1534 	    shared_resource->io_outstanding == 0 && shared_resource->nomem_poller == NULL) {
1535 		/* No IOs were submitted, try again */
1536 		shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1537 						SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1538 	}
1539 
1540 	return SPDK_POLLER_BUSY;
1541 }
1542 
1543 static inline bool
1544 _bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
1545 {
1546 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
1547 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1548 
1549 	if (spdk_unlikely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM)) {
1550 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
1551 		bdev_queue_nomem_io_head(shared_resource, bdev_io, state);
1552 
1553 		if (shared_resource->io_outstanding == 0 && !shared_resource->nomem_poller) {
1554 			/* Special case when we have nomem IOs and no outstanding IOs which completions
1555 			 * could trigger retry of queued IOs
1556 			 * Any IOs submitted may trigger retry of queued IOs. This poller handles a case when no
1557 			 * new IOs submitted, e.g. qd==1 */
1558 			shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1559 							SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1560 		}
1561 		/* If bdev module completed an I/O that has an accel sequence with NOMEM status, the
1562 		 * ownership of that sequence is transferred back to the bdev layer, so we need to
1563 		 * restore internal.accel_sequence to make sure that the sequence is handled
1564 		 * correctly in case the I/O is later aborted. */
1565 		if ((bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
1566 		     bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) && bdev_io->u.bdev.accel_sequence) {
1567 			assert(bdev_io->internal.accel_sequence == NULL);
1568 			bdev_io->internal.accel_sequence = bdev_io->u.bdev.accel_sequence;
1569 		}
1570 
1571 		return true;
1572 	}
1573 
1574 	if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1575 		bdev_ch_retry_io(bdev_ch);
1576 	}
1577 
1578 	return false;
1579 }
1580 
1581 static void
1582 _bdev_io_complete_push_bounce_done(void *ctx, int rc)
1583 {
1584 	struct spdk_bdev_io *bdev_io = ctx;
1585 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1586 
1587 	if (rc) {
1588 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1589 	}
1590 	/* We want to free the bounce buffer here since we know we're done with it (as opposed
1591 	 * to waiting for the conditional free of internal.buf in spdk_bdev_free_io()).
1592 	 */
1593 	bdev_io_put_buf(bdev_io);
1594 
1595 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1596 		bdev_ch_retry_io(ch);
1597 	}
1598 
1599 	/* Continue with IO completion flow */
1600 	bdev_io_complete(bdev_io);
1601 }
1602 
1603 static void
1604 bdev_io_push_bounce_md_buf_done(void *ctx, int rc)
1605 {
1606 	struct spdk_bdev_io *bdev_io = ctx;
1607 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1608 
1609 	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1610 	bdev_io_decrement_outstanding(ch, ch->shared_resource);
1611 
1612 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1613 		bdev_ch_retry_io(ch);
1614 	}
1615 
1616 	bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1617 }
1618 
1619 static inline void
1620 bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
1621 {
1622 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1623 	int rc = 0;
1624 
1625 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1626 	/* do the same for metadata buffer */
1627 	if (spdk_unlikely(bdev_io->internal.orig_md_iov.iov_base != NULL)) {
1628 		assert(spdk_bdev_is_md_separate(bdev_io->bdev));
1629 
1630 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1631 			if (bdev_io_use_memory_domain(bdev_io)) {
1632 				TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1633 				bdev_io_increment_outstanding(ch, ch->shared_resource);
1634 				/* If memory domain is used then we need to call async push function */
1635 				rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1636 								  bdev_io->internal.memory_domain_ctx,
1637 								  &bdev_io->internal.orig_md_iov,
1638 								  (uint32_t)bdev_io->internal.orig_iovcnt,
1639 								  &bdev_io->internal.bounce_md_iov, 1,
1640 								  bdev_io_push_bounce_md_buf_done,
1641 								  bdev_io);
1642 				if (rc == 0) {
1643 					/* Continue IO completion in async callback */
1644 					return;
1645 				}
1646 				TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1647 				bdev_io_decrement_outstanding(ch, ch->shared_resource);
1648 				if (rc != -ENOMEM) {
1649 					SPDK_ERRLOG("Failed to push md to memory domain %s\n",
1650 						    spdk_memory_domain_get_dma_device_id(
1651 							    bdev_io->internal.memory_domain));
1652 				}
1653 			} else {
1654 				memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
1655 				       bdev_io->internal.orig_md_iov.iov_len);
1656 			}
1657 		}
1658 	}
1659 
1660 	if (spdk_unlikely(rc == -ENOMEM)) {
1661 		bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH_MD);
1662 	} else {
1663 		assert(bdev_io->internal.data_transfer_cpl);
1664 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1665 	}
1666 }
1667 
1668 static inline void
1669 bdev_io_push_bounce_data_done(struct spdk_bdev_io *bdev_io, int rc)
1670 {
1671 	assert(bdev_io->internal.data_transfer_cpl);
1672 	if (rc) {
1673 		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1674 		return;
1675 	}
1676 
1677 	/* set original buffer for this io */
1678 	bdev_io->u.bdev.iovcnt = bdev_io->internal.orig_iovcnt;
1679 	bdev_io->u.bdev.iovs = bdev_io->internal.orig_iovs;
1680 	/* disable bouncing buffer for this io */
1681 	bdev_io->internal.orig_iovcnt = 0;
1682 	bdev_io->internal.orig_iovs = NULL;
1683 
1684 	bdev_io_push_bounce_md_buf(bdev_io);
1685 }
1686 
1687 static void
1688 bdev_io_push_bounce_data_done_and_track(void *ctx, int status)
1689 {
1690 	struct spdk_bdev_io *bdev_io = ctx;
1691 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1692 
1693 	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1694 	bdev_io_decrement_outstanding(ch, ch->shared_resource);
1695 
1696 	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1697 		bdev_ch_retry_io(ch);
1698 	}
1699 
1700 	bdev_io_push_bounce_data_done(bdev_io, status);
1701 }
1702 
1703 static inline void
1704 bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
1705 {
1706 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1707 	int rc = 0;
1708 
1709 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1710 	assert(!bdev_io_use_accel_sequence(bdev_io));
1711 
1712 	/* if this is read path, copy data from bounce buffer to original buffer */
1713 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1714 		if (bdev_io_use_memory_domain(bdev_io)) {
1715 			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1716 			bdev_io_increment_outstanding(ch, ch->shared_resource);
1717 			/* If memory domain is used then we need to call async push function */
1718 			rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1719 							  bdev_io->internal.memory_domain_ctx,
1720 							  bdev_io->internal.orig_iovs,
1721 							  (uint32_t)bdev_io->internal.orig_iovcnt,
1722 							  &bdev_io->internal.bounce_iov, 1,
1723 							  bdev_io_push_bounce_data_done_and_track,
1724 							  bdev_io);
1725 			if (rc == 0) {
1726 				/* Continue IO completion in async callback */
1727 				return;
1728 			}
1729 
1730 			TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1731 			bdev_io_decrement_outstanding(ch, ch->shared_resource);
1732 			if (rc != -ENOMEM) {
1733 				SPDK_ERRLOG("Failed to push data to memory domain %s\n",
1734 					    spdk_memory_domain_get_dma_device_id(
1735 						    bdev_io->internal.memory_domain));
1736 			}
1737 		} else {
1738 			spdk_copy_buf_to_iovs(bdev_io->internal.orig_iovs,
1739 					      bdev_io->internal.orig_iovcnt,
1740 					      bdev_io->internal.bounce_iov.iov_base,
1741 					      bdev_io->internal.bounce_iov.iov_len);
1742 		}
1743 	}
1744 
1745 	if (spdk_unlikely(rc == -ENOMEM)) {
1746 		bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH);
1747 	} else {
1748 		bdev_io_push_bounce_data_done(bdev_io, rc);
1749 	}
1750 }
1751 
1752 static inline void
1753 _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb)
1754 {
1755 	bdev_io->internal.data_transfer_cpl = cpl_cb;
1756 	bdev_io_push_bounce_data(bdev_io);
1757 }
1758 
1759 static void
1760 bdev_io_get_iobuf_cb(struct spdk_iobuf_entry *iobuf, void *buf)
1761 {
1762 	struct spdk_bdev_io *bdev_io;
1763 
1764 	bdev_io = SPDK_CONTAINEROF(iobuf, struct spdk_bdev_io, internal.iobuf);
1765 	_bdev_io_set_buf(bdev_io, buf, bdev_io->internal.buf_len);
1766 }
1767 
1768 static void
1769 bdev_io_get_buf(struct spdk_bdev_io *bdev_io, uint64_t len)
1770 {
1771 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1772 	uint64_t max_len;
1773 	void *buf;
1774 
1775 	assert(spdk_bdev_io_get_thread(bdev_io) == spdk_get_thread());
1776 	mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1777 	max_len = bdev_io_get_max_buf_len(bdev_io, len);
1778 
1779 	if (spdk_unlikely(max_len > mgmt_ch->iobuf.large.bufsize)) {
1780 		SPDK_ERRLOG("Length %" PRIu64 " is larger than allowed\n", max_len);
1781 		bdev_io_get_buf_complete(bdev_io, false);
1782 		return;
1783 	}
1784 
1785 	bdev_io->internal.buf_len = len;
1786 	buf = spdk_iobuf_get(&mgmt_ch->iobuf, max_len, &bdev_io->internal.iobuf,
1787 			     bdev_io_get_iobuf_cb);
1788 	if (buf != NULL) {
1789 		_bdev_io_set_buf(bdev_io, buf, len);
1790 	}
1791 }
1792 
1793 void
1794 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1795 {
1796 	struct spdk_bdev *bdev = bdev_io->bdev;
1797 	uint64_t alignment;
1798 
1799 	assert(cb != NULL);
1800 	bdev_io->internal.get_buf_cb = cb;
1801 
1802 	alignment = spdk_bdev_get_buf_align(bdev);
1803 
1804 	if (_is_buf_allocated(bdev_io->u.bdev.iovs) &&
1805 	    _are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
1806 		/* Buffer already present and aligned */
1807 		cb(spdk_bdev_io_get_io_channel(bdev_io), bdev_io, true);
1808 		return;
1809 	}
1810 
1811 	bdev_io_get_buf(bdev_io, len);
1812 }
1813 
1814 static void
1815 _bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1816 			      bool success)
1817 {
1818 	if (!success) {
1819 		SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
1820 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1821 		bdev_io_complete_unsubmitted(bdev_io);
1822 		return;
1823 	}
1824 
1825 	if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
1826 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1827 			bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
1828 			return;
1829 		}
1830 		/* For reads we'll execute the sequence after the data is read, so, for now, only
1831 		 * clear out accel_sequence pointer and submit the IO */
1832 		assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1833 		bdev_io->u.bdev.accel_sequence = NULL;
1834 	}
1835 
1836 	bdev_io_submit(bdev_io);
1837 }
1838 
1839 static void
1840 _bdev_memory_domain_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
1841 			       uint64_t len)
1842 {
1843 	assert(cb != NULL);
1844 	bdev_io->internal.get_buf_cb = cb;
1845 
1846 	bdev_io_get_buf(bdev_io, len);
1847 }
1848 
1849 void
1850 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
1851 {
1852 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1853 
1854 	assert(cb != NULL);
1855 	assert(bdev_io->internal.get_aux_buf_cb == NULL);
1856 	bdev_io->internal.get_aux_buf_cb = cb;
1857 	bdev_io_get_buf(bdev_io, len);
1858 }
1859 
1860 static int
1861 bdev_module_get_max_ctx_size(void)
1862 {
1863 	struct spdk_bdev_module *bdev_module;
1864 	int max_bdev_module_size = 0;
1865 
1866 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1867 		if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
1868 			max_bdev_module_size = bdev_module->get_ctx_size();
1869 		}
1870 	}
1871 
1872 	return max_bdev_module_size;
1873 }
1874 
1875 static void
1876 bdev_enable_histogram_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1877 {
1878 	if (!bdev->internal.histogram_enabled) {
1879 		return;
1880 	}
1881 
1882 	spdk_json_write_object_begin(w);
1883 	spdk_json_write_named_string(w, "method", "bdev_enable_histogram");
1884 
1885 	spdk_json_write_named_object_begin(w, "params");
1886 	spdk_json_write_named_string(w, "name", bdev->name);
1887 
1888 	spdk_json_write_named_bool(w, "enable", bdev->internal.histogram_enabled);
1889 
1890 	if (bdev->internal.histogram_io_type) {
1891 		spdk_json_write_named_string(w, "opc",
1892 					     spdk_bdev_get_io_type_name(bdev->internal.histogram_io_type));
1893 	}
1894 
1895 	spdk_json_write_object_end(w);
1896 
1897 	spdk_json_write_object_end(w);
1898 }
1899 
1900 static void
1901 bdev_qos_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1902 {
1903 	int i;
1904 	struct spdk_bdev_qos *qos = bdev->internal.qos;
1905 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
1906 
1907 	if (!qos) {
1908 		return;
1909 	}
1910 
1911 	spdk_bdev_get_qos_rate_limits(bdev, limits);
1912 
1913 	spdk_json_write_object_begin(w);
1914 	spdk_json_write_named_string(w, "method", "bdev_set_qos_limit");
1915 
1916 	spdk_json_write_named_object_begin(w, "params");
1917 	spdk_json_write_named_string(w, "name", bdev->name);
1918 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1919 		if (limits[i] > 0) {
1920 			spdk_json_write_named_uint64(w, qos_rpc_type[i], limits[i]);
1921 		}
1922 	}
1923 	spdk_json_write_object_end(w);
1924 
1925 	spdk_json_write_object_end(w);
1926 }
1927 
1928 void
1929 spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
1930 {
1931 	struct spdk_bdev_module *bdev_module;
1932 	struct spdk_bdev *bdev;
1933 
1934 	assert(w != NULL);
1935 
1936 	spdk_json_write_array_begin(w);
1937 
1938 	spdk_json_write_object_begin(w);
1939 	spdk_json_write_named_string(w, "method", "bdev_set_options");
1940 	spdk_json_write_named_object_begin(w, "params");
1941 	spdk_json_write_named_uint32(w, "bdev_io_pool_size", g_bdev_opts.bdev_io_pool_size);
1942 	spdk_json_write_named_uint32(w, "bdev_io_cache_size", g_bdev_opts.bdev_io_cache_size);
1943 	spdk_json_write_named_bool(w, "bdev_auto_examine", g_bdev_opts.bdev_auto_examine);
1944 	spdk_json_write_named_uint32(w, "iobuf_small_cache_size", g_bdev_opts.iobuf_small_cache_size);
1945 	spdk_json_write_named_uint32(w, "iobuf_large_cache_size", g_bdev_opts.iobuf_large_cache_size);
1946 	spdk_json_write_object_end(w);
1947 	spdk_json_write_object_end(w);
1948 
1949 	bdev_examine_allowlist_config_json(w);
1950 
1951 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1952 		if (bdev_module->config_json) {
1953 			bdev_module->config_json(w);
1954 		}
1955 	}
1956 
1957 	spdk_spin_lock(&g_bdev_mgr.spinlock);
1958 
1959 	TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
1960 		if (bdev->fn_table->write_config_json) {
1961 			bdev->fn_table->write_config_json(bdev, w);
1962 		}
1963 
1964 		bdev_qos_config_json(bdev, w);
1965 		bdev_enable_histogram_config_json(bdev, w);
1966 	}
1967 
1968 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
1969 
1970 	/* This has to be last RPC in array to make sure all bdevs finished examine */
1971 	spdk_json_write_object_begin(w);
1972 	spdk_json_write_named_string(w, "method", "bdev_wait_for_examine");
1973 	spdk_json_write_object_end(w);
1974 
1975 	spdk_json_write_array_end(w);
1976 }
1977 
1978 static void
1979 bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
1980 {
1981 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
1982 	struct spdk_bdev_io *bdev_io;
1983 
1984 	spdk_iobuf_channel_fini(&ch->iobuf);
1985 
1986 	while (!STAILQ_EMPTY(&ch->per_thread_cache)) {
1987 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
1988 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
1989 		ch->per_thread_cache_count--;
1990 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
1991 	}
1992 
1993 	assert(ch->per_thread_cache_count == 0);
1994 }
1995 
1996 static int
1997 bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
1998 {
1999 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
2000 	struct spdk_bdev_io *bdev_io;
2001 	uint32_t i;
2002 	int rc;
2003 
2004 	rc = spdk_iobuf_channel_init(&ch->iobuf, "bdev",
2005 				     g_bdev_opts.iobuf_small_cache_size,
2006 				     g_bdev_opts.iobuf_large_cache_size);
2007 	if (rc != 0) {
2008 		SPDK_ERRLOG("Failed to create iobuf channel: %s\n", spdk_strerror(-rc));
2009 		return -1;
2010 	}
2011 
2012 	STAILQ_INIT(&ch->per_thread_cache);
2013 	ch->bdev_io_cache_size = g_bdev_opts.bdev_io_cache_size;
2014 
2015 	/* Pre-populate bdev_io cache to ensure this thread cannot be starved. */
2016 	ch->per_thread_cache_count = 0;
2017 	for (i = 0; i < ch->bdev_io_cache_size; i++) {
2018 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2019 		if (bdev_io == NULL) {
2020 			SPDK_ERRLOG("You need to increase bdev_io_pool_size using bdev_set_options RPC.\n");
2021 			assert(false);
2022 			bdev_mgmt_channel_destroy(io_device, ctx_buf);
2023 			return -1;
2024 		}
2025 		ch->per_thread_cache_count++;
2026 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2027 	}
2028 
2029 	TAILQ_INIT(&ch->shared_resources);
2030 	TAILQ_INIT(&ch->io_wait_queue);
2031 
2032 	return 0;
2033 }
2034 
2035 static void
2036 bdev_init_complete(int rc)
2037 {
2038 	spdk_bdev_init_cb cb_fn = g_init_cb_fn;
2039 	void *cb_arg = g_init_cb_arg;
2040 	struct spdk_bdev_module *m;
2041 
2042 	g_bdev_mgr.init_complete = true;
2043 	g_init_cb_fn = NULL;
2044 	g_init_cb_arg = NULL;
2045 
2046 	/*
2047 	 * For modules that need to know when subsystem init is complete,
2048 	 * inform them now.
2049 	 */
2050 	if (rc == 0) {
2051 		TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2052 			if (m->init_complete) {
2053 				m->init_complete();
2054 			}
2055 		}
2056 	}
2057 
2058 	cb_fn(cb_arg, rc);
2059 }
2060 
2061 static bool
2062 bdev_module_all_actions_completed(void)
2063 {
2064 	struct spdk_bdev_module *m;
2065 
2066 	TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2067 		if (m->internal.action_in_progress > 0) {
2068 			return false;
2069 		}
2070 	}
2071 	return true;
2072 }
2073 
2074 static void
2075 bdev_module_action_complete(void)
2076 {
2077 	/*
2078 	 * Don't finish bdev subsystem initialization if
2079 	 * module pre-initialization is still in progress, or
2080 	 * the subsystem been already initialized.
2081 	 */
2082 	if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
2083 		return;
2084 	}
2085 
2086 	/*
2087 	 * Check all bdev modules for inits/examinations in progress. If any
2088 	 * exist, return immediately since we cannot finish bdev subsystem
2089 	 * initialization until all are completed.
2090 	 */
2091 	if (!bdev_module_all_actions_completed()) {
2092 		return;
2093 	}
2094 
2095 	/*
2096 	 * Modules already finished initialization - now that all
2097 	 * the bdev modules have finished their asynchronous I/O
2098 	 * processing, the entire bdev layer can be marked as complete.
2099 	 */
2100 	bdev_init_complete(0);
2101 }
2102 
2103 static void
2104 bdev_module_action_done(struct spdk_bdev_module *module)
2105 {
2106 	spdk_spin_lock(&module->internal.spinlock);
2107 	assert(module->internal.action_in_progress > 0);
2108 	module->internal.action_in_progress--;
2109 	spdk_spin_unlock(&module->internal.spinlock);
2110 	bdev_module_action_complete();
2111 }
2112 
2113 void
2114 spdk_bdev_module_init_done(struct spdk_bdev_module *module)
2115 {
2116 	assert(module->async_init);
2117 	bdev_module_action_done(module);
2118 }
2119 
2120 void
2121 spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
2122 {
2123 	bdev_module_action_done(module);
2124 }
2125 
2126 /** The last initialized bdev module */
2127 static struct spdk_bdev_module *g_resume_bdev_module = NULL;
2128 
2129 static void
2130 bdev_init_failed(void *cb_arg)
2131 {
2132 	struct spdk_bdev_module *module = cb_arg;
2133 
2134 	spdk_spin_lock(&module->internal.spinlock);
2135 	assert(module->internal.action_in_progress > 0);
2136 	module->internal.action_in_progress--;
2137 	spdk_spin_unlock(&module->internal.spinlock);
2138 	bdev_init_complete(-1);
2139 }
2140 
2141 static int
2142 bdev_modules_init(void)
2143 {
2144 	struct spdk_bdev_module *module;
2145 	int rc = 0;
2146 
2147 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2148 		g_resume_bdev_module = module;
2149 		if (module->async_init) {
2150 			spdk_spin_lock(&module->internal.spinlock);
2151 			module->internal.action_in_progress = 1;
2152 			spdk_spin_unlock(&module->internal.spinlock);
2153 		}
2154 		rc = module->module_init();
2155 		if (rc != 0) {
2156 			/* Bump action_in_progress to prevent other modules from completion of modules_init
2157 			 * Send message to defer application shutdown until resources are cleaned up */
2158 			spdk_spin_lock(&module->internal.spinlock);
2159 			module->internal.action_in_progress = 1;
2160 			spdk_spin_unlock(&module->internal.spinlock);
2161 			spdk_thread_send_msg(spdk_get_thread(), bdev_init_failed, module);
2162 			return rc;
2163 		}
2164 	}
2165 
2166 	g_resume_bdev_module = NULL;
2167 	return 0;
2168 }
2169 
2170 void
2171 spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
2172 {
2173 	int rc = 0;
2174 	char mempool_name[32];
2175 
2176 	assert(cb_fn != NULL);
2177 
2178 	g_init_cb_fn = cb_fn;
2179 	g_init_cb_arg = cb_arg;
2180 
2181 	spdk_notify_type_register("bdev_register");
2182 	spdk_notify_type_register("bdev_unregister");
2183 
2184 	snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
2185 
2186 	rc = spdk_iobuf_register_module("bdev");
2187 	if (rc != 0) {
2188 		SPDK_ERRLOG("could not register bdev iobuf module: %s\n", spdk_strerror(-rc));
2189 		bdev_init_complete(-1);
2190 		return;
2191 	}
2192 
2193 	g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
2194 				  g_bdev_opts.bdev_io_pool_size,
2195 				  sizeof(struct spdk_bdev_io) +
2196 				  bdev_module_get_max_ctx_size(),
2197 				  0,
2198 				  SPDK_ENV_SOCKET_ID_ANY);
2199 
2200 	if (g_bdev_mgr.bdev_io_pool == NULL) {
2201 		SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
2202 		bdev_init_complete(-1);
2203 		return;
2204 	}
2205 
2206 	g_bdev_mgr.zero_buffer = spdk_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
2207 					      NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
2208 	if (!g_bdev_mgr.zero_buffer) {
2209 		SPDK_ERRLOG("create bdev zero buffer failed\n");
2210 		bdev_init_complete(-1);
2211 		return;
2212 	}
2213 
2214 #ifdef SPDK_CONFIG_VTUNE
2215 	g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
2216 #endif
2217 
2218 	spdk_io_device_register(&g_bdev_mgr, bdev_mgmt_channel_create,
2219 				bdev_mgmt_channel_destroy,
2220 				sizeof(struct spdk_bdev_mgmt_channel),
2221 				"bdev_mgr");
2222 
2223 	rc = bdev_modules_init();
2224 	g_bdev_mgr.module_init_complete = true;
2225 	if (rc != 0) {
2226 		SPDK_ERRLOG("bdev modules init failed\n");
2227 		return;
2228 	}
2229 
2230 	bdev_module_action_complete();
2231 }
2232 
2233 static void
2234 bdev_mgr_unregister_cb(void *io_device)
2235 {
2236 	spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
2237 
2238 	if (g_bdev_mgr.bdev_io_pool) {
2239 		if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != g_bdev_opts.bdev_io_pool_size) {
2240 			SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
2241 				    spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
2242 				    g_bdev_opts.bdev_io_pool_size);
2243 		}
2244 
2245 		spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
2246 	}
2247 
2248 	spdk_free(g_bdev_mgr.zero_buffer);
2249 
2250 	bdev_examine_allowlist_free();
2251 
2252 	cb_fn(g_fini_cb_arg);
2253 	g_fini_cb_fn = NULL;
2254 	g_fini_cb_arg = NULL;
2255 	g_bdev_mgr.init_complete = false;
2256 	g_bdev_mgr.module_init_complete = false;
2257 }
2258 
2259 static void
2260 bdev_module_fini_iter(void *arg)
2261 {
2262 	struct spdk_bdev_module *bdev_module;
2263 
2264 	/* FIXME: Handling initialization failures is broken now,
2265 	 * so we won't even try cleaning up after successfully
2266 	 * initialized modules. if module_init_complete is false,
2267 	 * just call spdk_bdev_mgr_unregister_cb
2268 	 */
2269 	if (!g_bdev_mgr.module_init_complete) {
2270 		bdev_mgr_unregister_cb(NULL);
2271 		return;
2272 	}
2273 
2274 	/* Start iterating from the last touched module */
2275 	if (!g_resume_bdev_module) {
2276 		bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2277 	} else {
2278 		bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list,
2279 					 internal.tailq);
2280 	}
2281 
2282 	while (bdev_module) {
2283 		if (bdev_module->async_fini) {
2284 			/* Save our place so we can resume later. We must
2285 			 * save the variable here, before calling module_fini()
2286 			 * below, because in some cases the module may immediately
2287 			 * call spdk_bdev_module_fini_done() and re-enter
2288 			 * this function to continue iterating. */
2289 			g_resume_bdev_module = bdev_module;
2290 		}
2291 
2292 		if (bdev_module->module_fini) {
2293 			bdev_module->module_fini();
2294 		}
2295 
2296 		if (bdev_module->async_fini) {
2297 			return;
2298 		}
2299 
2300 		bdev_module = TAILQ_PREV(bdev_module, bdev_module_list,
2301 					 internal.tailq);
2302 	}
2303 
2304 	g_resume_bdev_module = NULL;
2305 	spdk_io_device_unregister(&g_bdev_mgr, bdev_mgr_unregister_cb);
2306 }
2307 
2308 void
2309 spdk_bdev_module_fini_done(void)
2310 {
2311 	if (spdk_get_thread() != g_fini_thread) {
2312 		spdk_thread_send_msg(g_fini_thread, bdev_module_fini_iter, NULL);
2313 	} else {
2314 		bdev_module_fini_iter(NULL);
2315 	}
2316 }
2317 
2318 static void
2319 bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
2320 {
2321 	struct spdk_bdev *bdev = cb_arg;
2322 
2323 	if (bdeverrno && bdev) {
2324 		SPDK_WARNLOG("Unable to unregister bdev '%s' during spdk_bdev_finish()\n",
2325 			     bdev->name);
2326 
2327 		/*
2328 		 * Since the call to spdk_bdev_unregister() failed, we have no way to free this
2329 		 *  bdev; try to continue by manually removing this bdev from the list and continue
2330 		 *  with the next bdev in the list.
2331 		 */
2332 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
2333 	}
2334 
2335 	if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
2336 		SPDK_DEBUGLOG(bdev, "Done unregistering bdevs\n");
2337 		/*
2338 		 * Bdev module finish need to be deferred as we might be in the middle of some context
2339 		 * (like bdev part free) that will use this bdev (or private bdev driver ctx data)
2340 		 * after returning.
2341 		 */
2342 		spdk_thread_send_msg(spdk_get_thread(), bdev_module_fini_iter, NULL);
2343 		return;
2344 	}
2345 
2346 	/*
2347 	 * Unregister last unclaimed bdev in the list, to ensure that bdev subsystem
2348 	 * shutdown proceeds top-down. The goal is to give virtual bdevs an opportunity
2349 	 * to detect clean shutdown as opposed to run-time hot removal of the underlying
2350 	 * base bdevs.
2351 	 *
2352 	 * Also, walk the list in the reverse order.
2353 	 */
2354 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2355 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2356 		spdk_spin_lock(&bdev->internal.spinlock);
2357 		if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
2358 			LOG_ALREADY_CLAIMED_DEBUG("claimed, skipping", bdev);
2359 			spdk_spin_unlock(&bdev->internal.spinlock);
2360 			continue;
2361 		}
2362 		spdk_spin_unlock(&bdev->internal.spinlock);
2363 
2364 		SPDK_DEBUGLOG(bdev, "Unregistering bdev '%s'\n", bdev->name);
2365 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2366 		return;
2367 	}
2368 
2369 	/*
2370 	 * If any bdev fails to unclaim underlying bdev properly, we may face the
2371 	 * case of bdev list consisting of claimed bdevs only (if claims are managed
2372 	 * correctly, this would mean there's a loop in the claims graph which is
2373 	 * clearly impossible). Warn and unregister last bdev on the list then.
2374 	 */
2375 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2376 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2377 		SPDK_WARNLOG("Unregistering claimed bdev '%s'!\n", bdev->name);
2378 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2379 		return;
2380 	}
2381 }
2382 
2383 static void
2384 bdev_module_fini_start_iter(void *arg)
2385 {
2386 	struct spdk_bdev_module *bdev_module;
2387 
2388 	if (!g_resume_bdev_module) {
2389 		bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2390 	} else {
2391 		bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list, internal.tailq);
2392 	}
2393 
2394 	while (bdev_module) {
2395 		if (bdev_module->async_fini_start) {
2396 			/* Save our place so we can resume later. We must
2397 			 * save the variable here, before calling fini_start()
2398 			 * below, because in some cases the module may immediately
2399 			 * call spdk_bdev_module_fini_start_done() and re-enter
2400 			 * this function to continue iterating. */
2401 			g_resume_bdev_module = bdev_module;
2402 		}
2403 
2404 		if (bdev_module->fini_start) {
2405 			bdev_module->fini_start();
2406 		}
2407 
2408 		if (bdev_module->async_fini_start) {
2409 			return;
2410 		}
2411 
2412 		bdev_module = TAILQ_PREV(bdev_module, bdev_module_list, internal.tailq);
2413 	}
2414 
2415 	g_resume_bdev_module = NULL;
2416 
2417 	bdev_finish_unregister_bdevs_iter(NULL, 0);
2418 }
2419 
2420 void
2421 spdk_bdev_module_fini_start_done(void)
2422 {
2423 	if (spdk_get_thread() != g_fini_thread) {
2424 		spdk_thread_send_msg(g_fini_thread, bdev_module_fini_start_iter, NULL);
2425 	} else {
2426 		bdev_module_fini_start_iter(NULL);
2427 	}
2428 }
2429 
2430 static void
2431 bdev_finish_wait_for_examine_done(void *cb_arg)
2432 {
2433 	bdev_module_fini_start_iter(NULL);
2434 }
2435 
2436 static void bdev_open_async_fini(void);
2437 
2438 void
2439 spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
2440 {
2441 	int rc;
2442 
2443 	assert(cb_fn != NULL);
2444 
2445 	g_fini_thread = spdk_get_thread();
2446 
2447 	g_fini_cb_fn = cb_fn;
2448 	g_fini_cb_arg = cb_arg;
2449 
2450 	bdev_open_async_fini();
2451 
2452 	rc = spdk_bdev_wait_for_examine(bdev_finish_wait_for_examine_done, NULL);
2453 	if (rc != 0) {
2454 		SPDK_ERRLOG("wait_for_examine failed: %s\n", spdk_strerror(-rc));
2455 		bdev_finish_wait_for_examine_done(NULL);
2456 	}
2457 }
2458 
2459 struct spdk_bdev_io *
2460 bdev_channel_get_io(struct spdk_bdev_channel *channel)
2461 {
2462 	struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
2463 	struct spdk_bdev_io *bdev_io;
2464 
2465 	if (ch->per_thread_cache_count > 0) {
2466 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
2467 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
2468 		ch->per_thread_cache_count--;
2469 	} else if (spdk_unlikely(!TAILQ_EMPTY(&ch->io_wait_queue))) {
2470 		/*
2471 		 * Don't try to look for bdev_ios in the global pool if there are
2472 		 * waiters on bdev_ios - we don't want this caller to jump the line.
2473 		 */
2474 		bdev_io = NULL;
2475 	} else {
2476 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2477 	}
2478 
2479 	return bdev_io;
2480 }
2481 
2482 void
2483 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
2484 {
2485 	struct spdk_bdev_mgmt_channel *ch;
2486 
2487 	assert(bdev_io != NULL);
2488 	assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING);
2489 
2490 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
2491 
2492 	if (bdev_io->internal.buf != NULL) {
2493 		bdev_io_put_buf(bdev_io);
2494 	}
2495 
2496 	if (ch->per_thread_cache_count < ch->bdev_io_cache_size) {
2497 		ch->per_thread_cache_count++;
2498 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2499 		while (ch->per_thread_cache_count > 0 && !TAILQ_EMPTY(&ch->io_wait_queue)) {
2500 			struct spdk_bdev_io_wait_entry *entry;
2501 
2502 			entry = TAILQ_FIRST(&ch->io_wait_queue);
2503 			TAILQ_REMOVE(&ch->io_wait_queue, entry, link);
2504 			entry->cb_fn(entry->cb_arg);
2505 		}
2506 	} else {
2507 		/* We should never have a full cache with entries on the io wait queue. */
2508 		assert(TAILQ_EMPTY(&ch->io_wait_queue));
2509 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
2510 	}
2511 }
2512 
2513 static bool
2514 bdev_qos_is_iops_rate_limit(enum spdk_bdev_qos_rate_limit_type limit)
2515 {
2516 	assert(limit != SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
2517 
2518 	switch (limit) {
2519 	case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2520 		return true;
2521 	case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2522 	case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2523 	case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2524 		return false;
2525 	case SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES:
2526 	default:
2527 		return false;
2528 	}
2529 }
2530 
2531 static bool
2532 bdev_qos_io_to_limit(struct spdk_bdev_io *bdev_io)
2533 {
2534 	switch (bdev_io->type) {
2535 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2536 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2537 	case SPDK_BDEV_IO_TYPE_READ:
2538 	case SPDK_BDEV_IO_TYPE_WRITE:
2539 		return true;
2540 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2541 		if (bdev_io->u.bdev.zcopy.start) {
2542 			return true;
2543 		} else {
2544 			return false;
2545 		}
2546 	default:
2547 		return false;
2548 	}
2549 }
2550 
2551 static bool
2552 bdev_is_read_io(struct spdk_bdev_io *bdev_io)
2553 {
2554 	switch (bdev_io->type) {
2555 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2556 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2557 		/* Bit 1 (0x2) set for read operation */
2558 		if (bdev_io->u.nvme_passthru.cmd.opc & SPDK_NVME_OPC_READ) {
2559 			return true;
2560 		} else {
2561 			return false;
2562 		}
2563 	case SPDK_BDEV_IO_TYPE_READ:
2564 		return true;
2565 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2566 		/* Populate to read from disk */
2567 		if (bdev_io->u.bdev.zcopy.populate) {
2568 			return true;
2569 		} else {
2570 			return false;
2571 		}
2572 	default:
2573 		return false;
2574 	}
2575 }
2576 
2577 static uint64_t
2578 bdev_get_io_size_in_byte(struct spdk_bdev_io *bdev_io)
2579 {
2580 	struct spdk_bdev	*bdev = bdev_io->bdev;
2581 
2582 	switch (bdev_io->type) {
2583 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2584 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2585 		return bdev_io->u.nvme_passthru.nbytes;
2586 	case SPDK_BDEV_IO_TYPE_READ:
2587 	case SPDK_BDEV_IO_TYPE_WRITE:
2588 		return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2589 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2590 		/* Track the data in the start phase only */
2591 		if (bdev_io->u.bdev.zcopy.start) {
2592 			return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2593 		} else {
2594 			return 0;
2595 		}
2596 	default:
2597 		return 0;
2598 	}
2599 }
2600 
2601 static inline bool
2602 bdev_qos_rw_queue_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2603 {
2604 	int64_t remaining_this_timeslice;
2605 
2606 	if (!limit->max_per_timeslice) {
2607 		/* The QoS is disabled */
2608 		return false;
2609 	}
2610 
2611 	remaining_this_timeslice = __atomic_sub_fetch(&limit->remaining_this_timeslice, delta,
2612 				   __ATOMIC_RELAXED);
2613 	if (remaining_this_timeslice + (int64_t)delta > 0) {
2614 		/* There was still a quota for this delta -> the IO shouldn't be queued
2615 		 *
2616 		 * We allow a slight quota overrun here so an IO bigger than the per-timeslice
2617 		 * quota can be allowed once a while. Such overrun then taken into account in
2618 		 * the QoS poller, where the next timeslice quota is calculated.
2619 		 */
2620 		return false;
2621 	}
2622 
2623 	/* There was no quota for this delta -> the IO should be queued
2624 	 * The remaining_this_timeslice must be rewinded so it reflects the real
2625 	 * amount of IOs or bytes allowed.
2626 	 */
2627 	__atomic_add_fetch(
2628 		&limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2629 	return true;
2630 }
2631 
2632 static inline void
2633 bdev_qos_rw_rewind_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2634 {
2635 	__atomic_add_fetch(&limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2636 }
2637 
2638 static bool
2639 bdev_qos_rw_iops_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2640 {
2641 	return bdev_qos_rw_queue_io(limit, io, 1);
2642 }
2643 
2644 static void
2645 bdev_qos_rw_iops_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2646 {
2647 	bdev_qos_rw_rewind_io(limit, io, 1);
2648 }
2649 
2650 static bool
2651 bdev_qos_rw_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2652 {
2653 	return bdev_qos_rw_queue_io(limit, io, bdev_get_io_size_in_byte(io));
2654 }
2655 
2656 static void
2657 bdev_qos_rw_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2658 {
2659 	bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2660 }
2661 
2662 static bool
2663 bdev_qos_r_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2664 {
2665 	if (bdev_is_read_io(io) == false) {
2666 		return false;
2667 	}
2668 
2669 	return bdev_qos_rw_bps_queue(limit, io);
2670 }
2671 
2672 static void
2673 bdev_qos_r_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2674 {
2675 	if (bdev_is_read_io(io) != false) {
2676 		bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2677 	}
2678 }
2679 
2680 static bool
2681 bdev_qos_w_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2682 {
2683 	if (bdev_is_read_io(io) == true) {
2684 		return false;
2685 	}
2686 
2687 	return bdev_qos_rw_bps_queue(limit, io);
2688 }
2689 
2690 static void
2691 bdev_qos_w_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2692 {
2693 	if (bdev_is_read_io(io) != true) {
2694 		bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2695 	}
2696 }
2697 
2698 static void
2699 bdev_qos_set_ops(struct spdk_bdev_qos *qos)
2700 {
2701 	int i;
2702 
2703 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2704 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
2705 			qos->rate_limits[i].queue_io = NULL;
2706 			continue;
2707 		}
2708 
2709 		switch (i) {
2710 		case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2711 			qos->rate_limits[i].queue_io = bdev_qos_rw_iops_queue;
2712 			qos->rate_limits[i].rewind_quota = bdev_qos_rw_iops_rewind_quota;
2713 			break;
2714 		case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2715 			qos->rate_limits[i].queue_io = bdev_qos_rw_bps_queue;
2716 			qos->rate_limits[i].rewind_quota = bdev_qos_rw_bps_rewind_quota;
2717 			break;
2718 		case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2719 			qos->rate_limits[i].queue_io = bdev_qos_r_bps_queue;
2720 			qos->rate_limits[i].rewind_quota = bdev_qos_r_bps_rewind_quota;
2721 			break;
2722 		case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2723 			qos->rate_limits[i].queue_io = bdev_qos_w_bps_queue;
2724 			qos->rate_limits[i].rewind_quota = bdev_qos_w_bps_rewind_quota;
2725 			break;
2726 		default:
2727 			break;
2728 		}
2729 	}
2730 }
2731 
2732 static void
2733 _bdev_io_complete_in_submit(struct spdk_bdev_channel *bdev_ch,
2734 			    struct spdk_bdev_io *bdev_io,
2735 			    enum spdk_bdev_io_status status)
2736 {
2737 	bdev_io->internal.in_submit_request = true;
2738 	bdev_io_increment_outstanding(bdev_ch, bdev_ch->shared_resource);
2739 	spdk_bdev_io_complete(bdev_io, status);
2740 	bdev_io->internal.in_submit_request = false;
2741 }
2742 
2743 static inline void
2744 bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_io)
2745 {
2746 	struct spdk_bdev *bdev = bdev_io->bdev;
2747 	struct spdk_io_channel *ch = bdev_ch->channel;
2748 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
2749 
2750 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
2751 		struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
2752 		struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
2753 
2754 		if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
2755 		    bdev_abort_buf_io(mgmt_channel, bio_to_abort)) {
2756 			_bdev_io_complete_in_submit(bdev_ch, bdev_io,
2757 						    SPDK_BDEV_IO_STATUS_SUCCESS);
2758 			return;
2759 		}
2760 	}
2761 
2762 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE &&
2763 			  bdev_io->bdev->split_on_write_unit &&
2764 			  bdev_io->u.bdev.num_blocks < bdev_io->bdev->write_unit_size)) {
2765 		SPDK_ERRLOG("IO num_blocks %lu does not match the write_unit_size %u\n",
2766 			    bdev_io->u.bdev.num_blocks, bdev_io->bdev->write_unit_size);
2767 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2768 		return;
2769 	}
2770 
2771 	if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
2772 		bdev_io_increment_outstanding(bdev_ch, shared_resource);
2773 		bdev_io->internal.in_submit_request = true;
2774 		bdev_submit_request(bdev, ch, bdev_io);
2775 		bdev_io->internal.in_submit_request = false;
2776 	} else {
2777 		bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_SUBMIT);
2778 		if (shared_resource->nomem_threshold == 0 && shared_resource->io_outstanding == 0) {
2779 			/* Special case when we have nomem IOs and no outstanding IOs which completions
2780 			 * could trigger retry of queued IOs */
2781 			bdev_shared_ch_retry_io(shared_resource);
2782 		}
2783 	}
2784 }
2785 
2786 static bool
2787 bdev_qos_queue_io(struct spdk_bdev_qos *qos, struct spdk_bdev_io *bdev_io)
2788 {
2789 	int i;
2790 
2791 	if (bdev_qos_io_to_limit(bdev_io) == true) {
2792 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2793 			if (!qos->rate_limits[i].queue_io) {
2794 				continue;
2795 			}
2796 
2797 			if (qos->rate_limits[i].queue_io(&qos->rate_limits[i],
2798 							 bdev_io) == true) {
2799 				for (i -= 1; i >= 0 ; i--) {
2800 					if (!qos->rate_limits[i].queue_io) {
2801 						continue;
2802 					}
2803 
2804 					qos->rate_limits[i].rewind_quota(&qos->rate_limits[i], bdev_io);
2805 				}
2806 				return true;
2807 			}
2808 		}
2809 	}
2810 
2811 	return false;
2812 }
2813 
2814 static int
2815 bdev_qos_io_submit(struct spdk_bdev_channel *ch, struct spdk_bdev_qos *qos)
2816 {
2817 	struct spdk_bdev_io		*bdev_io = NULL, *tmp = NULL;
2818 	int				submitted_ios = 0;
2819 
2820 	TAILQ_FOREACH_SAFE(bdev_io, &ch->qos_queued_io, internal.link, tmp) {
2821 		if (!bdev_qos_queue_io(qos, bdev_io)) {
2822 			TAILQ_REMOVE(&ch->qos_queued_io, bdev_io, internal.link);
2823 			bdev_io_do_submit(ch, bdev_io);
2824 
2825 			submitted_ios++;
2826 		}
2827 	}
2828 
2829 	return submitted_ios;
2830 }
2831 
2832 static void
2833 bdev_queue_io_wait_with_cb(struct spdk_bdev_io *bdev_io, spdk_bdev_io_wait_cb cb_fn)
2834 {
2835 	int rc;
2836 
2837 	bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
2838 	bdev_io->internal.waitq_entry.cb_fn = cb_fn;
2839 	bdev_io->internal.waitq_entry.cb_arg = bdev_io;
2840 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
2841 				     &bdev_io->internal.waitq_entry);
2842 	if (rc != 0) {
2843 		SPDK_ERRLOG("Queue IO failed, rc=%d\n", rc);
2844 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2845 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2846 	}
2847 }
2848 
2849 static bool
2850 bdev_rw_should_split(struct spdk_bdev_io *bdev_io)
2851 {
2852 	uint32_t io_boundary;
2853 	struct spdk_bdev *bdev = bdev_io->bdev;
2854 	uint32_t max_segment_size = bdev->max_segment_size;
2855 	uint32_t max_size = bdev->max_rw_size;
2856 	int max_segs = bdev->max_num_segments;
2857 
2858 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
2859 		io_boundary = bdev->write_unit_size;
2860 	} else if (bdev->split_on_optimal_io_boundary) {
2861 		io_boundary = bdev->optimal_io_boundary;
2862 	} else {
2863 		io_boundary = 0;
2864 	}
2865 
2866 	if (spdk_likely(!io_boundary && !max_segs && !max_segment_size && !max_size)) {
2867 		return false;
2868 	}
2869 
2870 	if (io_boundary) {
2871 		uint64_t start_stripe, end_stripe;
2872 
2873 		start_stripe = bdev_io->u.bdev.offset_blocks;
2874 		end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
2875 		/* Avoid expensive div operations if possible.  These spdk_u32 functions are very cheap. */
2876 		if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
2877 			start_stripe >>= spdk_u32log2(io_boundary);
2878 			end_stripe >>= spdk_u32log2(io_boundary);
2879 		} else {
2880 			start_stripe /= io_boundary;
2881 			end_stripe /= io_boundary;
2882 		}
2883 
2884 		if (start_stripe != end_stripe) {
2885 			return true;
2886 		}
2887 	}
2888 
2889 	if (max_segs) {
2890 		if (bdev_io->u.bdev.iovcnt > max_segs) {
2891 			return true;
2892 		}
2893 	}
2894 
2895 	if (max_segment_size) {
2896 		for (int i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
2897 			if (bdev_io->u.bdev.iovs[i].iov_len > max_segment_size) {
2898 				return true;
2899 			}
2900 		}
2901 	}
2902 
2903 	if (max_size) {
2904 		if (bdev_io->u.bdev.num_blocks > max_size) {
2905 			return true;
2906 		}
2907 	}
2908 
2909 	return false;
2910 }
2911 
2912 static bool
2913 bdev_unmap_should_split(struct spdk_bdev_io *bdev_io)
2914 {
2915 	uint32_t num_unmap_segments;
2916 
2917 	if (!bdev_io->bdev->max_unmap || !bdev_io->bdev->max_unmap_segments) {
2918 		return false;
2919 	}
2920 	num_unmap_segments = spdk_divide_round_up(bdev_io->u.bdev.num_blocks, bdev_io->bdev->max_unmap);
2921 	if (num_unmap_segments > bdev_io->bdev->max_unmap_segments) {
2922 		return true;
2923 	}
2924 
2925 	return false;
2926 }
2927 
2928 static bool
2929 bdev_write_zeroes_should_split(struct spdk_bdev_io *bdev_io)
2930 {
2931 	if (!bdev_io->bdev->max_write_zeroes) {
2932 		return false;
2933 	}
2934 
2935 	if (bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_write_zeroes) {
2936 		return true;
2937 	}
2938 
2939 	return false;
2940 }
2941 
2942 static bool
2943 bdev_copy_should_split(struct spdk_bdev_io *bdev_io)
2944 {
2945 	if (bdev_io->bdev->max_copy != 0 &&
2946 	    bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_copy) {
2947 		return true;
2948 	}
2949 
2950 	return false;
2951 }
2952 
2953 static bool
2954 bdev_io_should_split(struct spdk_bdev_io *bdev_io)
2955 {
2956 	switch (bdev_io->type) {
2957 	case SPDK_BDEV_IO_TYPE_READ:
2958 	case SPDK_BDEV_IO_TYPE_WRITE:
2959 		return bdev_rw_should_split(bdev_io);
2960 	case SPDK_BDEV_IO_TYPE_UNMAP:
2961 		return bdev_unmap_should_split(bdev_io);
2962 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2963 		return bdev_write_zeroes_should_split(bdev_io);
2964 	case SPDK_BDEV_IO_TYPE_COPY:
2965 		return bdev_copy_should_split(bdev_io);
2966 	default:
2967 		return false;
2968 	}
2969 }
2970 
2971 static uint32_t
2972 _to_next_boundary(uint64_t offset, uint32_t boundary)
2973 {
2974 	return (boundary - (offset % boundary));
2975 }
2976 
2977 static void bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
2978 
2979 static void _bdev_rw_split(void *_bdev_io);
2980 
2981 static void bdev_unmap_split(struct spdk_bdev_io *bdev_io);
2982 
2983 static void
2984 _bdev_unmap_split(void *_bdev_io)
2985 {
2986 	return bdev_unmap_split((struct spdk_bdev_io *)_bdev_io);
2987 }
2988 
2989 static void bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io);
2990 
2991 static void
2992 _bdev_write_zeroes_split(void *_bdev_io)
2993 {
2994 	return bdev_write_zeroes_split((struct spdk_bdev_io *)_bdev_io);
2995 }
2996 
2997 static void bdev_copy_split(struct spdk_bdev_io *bdev_io);
2998 
2999 static void
3000 _bdev_copy_split(void *_bdev_io)
3001 {
3002 	return bdev_copy_split((struct spdk_bdev_io *)_bdev_io);
3003 }
3004 
3005 static int
3006 bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt, void *md_buf,
3007 		     uint64_t num_blocks, uint64_t *offset, uint64_t *remaining)
3008 {
3009 	int rc;
3010 	uint64_t current_offset, current_remaining, current_src_offset;
3011 	spdk_bdev_io_wait_cb io_wait_fn;
3012 
3013 	current_offset = *offset;
3014 	current_remaining = *remaining;
3015 
3016 	bdev_io->u.bdev.split_outstanding++;
3017 
3018 	io_wait_fn = _bdev_rw_split;
3019 	switch (bdev_io->type) {
3020 	case SPDK_BDEV_IO_TYPE_READ:
3021 		assert(bdev_io->u.bdev.accel_sequence == NULL);
3022 		rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
3023 					       spdk_io_channel_from_ctx(bdev_io->internal.ch),
3024 					       iov, iovcnt, md_buf, current_offset,
3025 					       num_blocks, bdev_io->internal.memory_domain,
3026 					       bdev_io->internal.memory_domain_ctx, NULL,
3027 					       bdev_io->u.bdev.dif_check_flags,
3028 					       bdev_io_split_done, bdev_io);
3029 		break;
3030 	case SPDK_BDEV_IO_TYPE_WRITE:
3031 		assert(bdev_io->u.bdev.accel_sequence == NULL);
3032 		rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
3033 						spdk_io_channel_from_ctx(bdev_io->internal.ch),
3034 						iov, iovcnt, md_buf, current_offset,
3035 						num_blocks, bdev_io->internal.memory_domain,
3036 						bdev_io->internal.memory_domain_ctx, NULL,
3037 						bdev_io->u.bdev.dif_check_flags,
3038 						bdev_io->u.bdev.nvme_cdw12.raw,
3039 						bdev_io->u.bdev.nvme_cdw13.raw,
3040 						bdev_io_split_done, bdev_io);
3041 		break;
3042 	case SPDK_BDEV_IO_TYPE_UNMAP:
3043 		io_wait_fn = _bdev_unmap_split;
3044 		rc = spdk_bdev_unmap_blocks(bdev_io->internal.desc,
3045 					    spdk_io_channel_from_ctx(bdev_io->internal.ch),
3046 					    current_offset, num_blocks,
3047 					    bdev_io_split_done, bdev_io);
3048 		break;
3049 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3050 		io_wait_fn = _bdev_write_zeroes_split;
3051 		rc = spdk_bdev_write_zeroes_blocks(bdev_io->internal.desc,
3052 						   spdk_io_channel_from_ctx(bdev_io->internal.ch),
3053 						   current_offset, num_blocks,
3054 						   bdev_io_split_done, bdev_io);
3055 		break;
3056 	case SPDK_BDEV_IO_TYPE_COPY:
3057 		io_wait_fn = _bdev_copy_split;
3058 		current_src_offset = bdev_io->u.bdev.copy.src_offset_blocks +
3059 				     (current_offset - bdev_io->u.bdev.offset_blocks);
3060 		rc = spdk_bdev_copy_blocks(bdev_io->internal.desc,
3061 					   spdk_io_channel_from_ctx(bdev_io->internal.ch),
3062 					   current_offset, current_src_offset, num_blocks,
3063 					   bdev_io_split_done, bdev_io);
3064 		break;
3065 	default:
3066 		assert(false);
3067 		rc = -EINVAL;
3068 		break;
3069 	}
3070 
3071 	if (rc == 0) {
3072 		current_offset += num_blocks;
3073 		current_remaining -= num_blocks;
3074 		bdev_io->u.bdev.split_current_offset_blocks = current_offset;
3075 		bdev_io->u.bdev.split_remaining_num_blocks = current_remaining;
3076 		*offset = current_offset;
3077 		*remaining = current_remaining;
3078 	} else {
3079 		bdev_io->u.bdev.split_outstanding--;
3080 		if (rc == -ENOMEM) {
3081 			if (bdev_io->u.bdev.split_outstanding == 0) {
3082 				/* No I/O is outstanding. Hence we should wait here. */
3083 				bdev_queue_io_wait_with_cb(bdev_io, io_wait_fn);
3084 			}
3085 		} else {
3086 			bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3087 			if (bdev_io->u.bdev.split_outstanding == 0) {
3088 				bdev_ch_remove_from_io_submitted(bdev_io);
3089 				spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3090 						  0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3091 						  bdev_io->internal.ch->queue_depth);
3092 				bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3093 			}
3094 		}
3095 	}
3096 
3097 	return rc;
3098 }
3099 
3100 static void
3101 _bdev_rw_split(void *_bdev_io)
3102 {
3103 	struct iovec *parent_iov, *iov;
3104 	struct spdk_bdev_io *bdev_io = _bdev_io;
3105 	struct spdk_bdev *bdev = bdev_io->bdev;
3106 	uint64_t parent_offset, current_offset, remaining;
3107 	uint32_t parent_iov_offset, parent_iovcnt, parent_iovpos, child_iovcnt;
3108 	uint32_t to_next_boundary, to_next_boundary_bytes, to_last_block_bytes;
3109 	uint32_t iovcnt, iov_len, child_iovsize;
3110 	uint32_t blocklen = bdev->blocklen;
3111 	uint32_t io_boundary;
3112 	uint32_t max_segment_size = bdev->max_segment_size;
3113 	uint32_t max_child_iovcnt = bdev->max_num_segments;
3114 	uint32_t max_size = bdev->max_rw_size;
3115 	void *md_buf = NULL;
3116 	int rc;
3117 
3118 	max_size = max_size ? max_size : UINT32_MAX;
3119 	max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX;
3120 	max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, SPDK_BDEV_IO_NUM_CHILD_IOV) :
3121 			   SPDK_BDEV_IO_NUM_CHILD_IOV;
3122 
3123 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
3124 		io_boundary = bdev->write_unit_size;
3125 	} else if (bdev->split_on_optimal_io_boundary) {
3126 		io_boundary = bdev->optimal_io_boundary;
3127 	} else {
3128 		io_boundary = UINT32_MAX;
3129 	}
3130 
3131 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
3132 	current_offset = bdev_io->u.bdev.split_current_offset_blocks;
3133 	parent_offset = bdev_io->u.bdev.offset_blocks;
3134 	parent_iov_offset = (current_offset - parent_offset) * blocklen;
3135 	parent_iovcnt = bdev_io->u.bdev.iovcnt;
3136 
3137 	for (parent_iovpos = 0; parent_iovpos < parent_iovcnt; parent_iovpos++) {
3138 		parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3139 		if (parent_iov_offset < parent_iov->iov_len) {
3140 			break;
3141 		}
3142 		parent_iov_offset -= parent_iov->iov_len;
3143 	}
3144 
3145 	child_iovcnt = 0;
3146 	while (remaining > 0 && parent_iovpos < parent_iovcnt &&
3147 	       child_iovcnt < SPDK_BDEV_IO_NUM_CHILD_IOV) {
3148 		to_next_boundary = _to_next_boundary(current_offset, io_boundary);
3149 		to_next_boundary = spdk_min(remaining, to_next_boundary);
3150 		to_next_boundary = spdk_min(max_size, to_next_boundary);
3151 		to_next_boundary_bytes = to_next_boundary * blocklen;
3152 
3153 		iov = &bdev_io->child_iov[child_iovcnt];
3154 		iovcnt = 0;
3155 
3156 		if (bdev_io->u.bdev.md_buf) {
3157 			md_buf = (char *)bdev_io->u.bdev.md_buf +
3158 				 (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev);
3159 		}
3160 
3161 		child_iovsize = spdk_min(SPDK_BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt);
3162 		while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt &&
3163 		       iovcnt < child_iovsize) {
3164 			parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3165 			iov_len = parent_iov->iov_len - parent_iov_offset;
3166 
3167 			iov_len = spdk_min(iov_len, max_segment_size);
3168 			iov_len = spdk_min(iov_len, to_next_boundary_bytes);
3169 			to_next_boundary_bytes -= iov_len;
3170 
3171 			bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
3172 			bdev_io->child_iov[child_iovcnt].iov_len = iov_len;
3173 
3174 			if (iov_len < parent_iov->iov_len - parent_iov_offset) {
3175 				parent_iov_offset += iov_len;
3176 			} else {
3177 				parent_iovpos++;
3178 				parent_iov_offset = 0;
3179 			}
3180 			child_iovcnt++;
3181 			iovcnt++;
3182 		}
3183 
3184 		if (to_next_boundary_bytes > 0) {
3185 			/* We had to stop this child I/O early because we ran out of
3186 			 * child_iov space or were limited by max_num_segments.
3187 			 * Ensure the iovs to be aligned with block size and
3188 			 * then adjust to_next_boundary before starting the
3189 			 * child I/O.
3190 			 */
3191 			assert(child_iovcnt == SPDK_BDEV_IO_NUM_CHILD_IOV ||
3192 			       iovcnt == child_iovsize);
3193 			to_last_block_bytes = to_next_boundary_bytes % blocklen;
3194 			if (to_last_block_bytes != 0) {
3195 				uint32_t child_iovpos = child_iovcnt - 1;
3196 				/* don't decrease child_iovcnt when it equals to SPDK_BDEV_IO_NUM_CHILD_IOV
3197 				 * so the loop will naturally end
3198 				 */
3199 
3200 				to_last_block_bytes = blocklen - to_last_block_bytes;
3201 				to_next_boundary_bytes += to_last_block_bytes;
3202 				while (to_last_block_bytes > 0 && iovcnt > 0) {
3203 					iov_len = spdk_min(to_last_block_bytes,
3204 							   bdev_io->child_iov[child_iovpos].iov_len);
3205 					bdev_io->child_iov[child_iovpos].iov_len -= iov_len;
3206 					if (bdev_io->child_iov[child_iovpos].iov_len == 0) {
3207 						child_iovpos--;
3208 						if (--iovcnt == 0) {
3209 							/* If the child IO is less than a block size just return.
3210 							 * If the first child IO of any split round is less than
3211 							 * a block size, an error exit.
3212 							 */
3213 							if (bdev_io->u.bdev.split_outstanding == 0) {
3214 								SPDK_ERRLOG("The first child io was less than a block size\n");
3215 								bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3216 								bdev_ch_remove_from_io_submitted(bdev_io);
3217 								spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3218 										  0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3219 										  bdev_io->internal.ch->queue_depth);
3220 								bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3221 							}
3222 
3223 							return;
3224 						}
3225 					}
3226 
3227 					to_last_block_bytes -= iov_len;
3228 
3229 					if (parent_iov_offset == 0) {
3230 						parent_iovpos--;
3231 						parent_iov_offset = bdev_io->u.bdev.iovs[parent_iovpos].iov_len;
3232 					}
3233 					parent_iov_offset -= iov_len;
3234 				}
3235 
3236 				assert(to_last_block_bytes == 0);
3237 			}
3238 			to_next_boundary -= to_next_boundary_bytes / blocklen;
3239 		}
3240 
3241 		rc = bdev_io_split_submit(bdev_io, iov, iovcnt, md_buf, to_next_boundary,
3242 					  &current_offset, &remaining);
3243 		if (spdk_unlikely(rc)) {
3244 			return;
3245 		}
3246 	}
3247 }
3248 
3249 static void
3250 bdev_unmap_split(struct spdk_bdev_io *bdev_io)
3251 {
3252 	uint64_t offset, unmap_blocks, remaining, max_unmap_blocks;
3253 	uint32_t num_children_reqs = 0;
3254 	int rc;
3255 
3256 	offset = bdev_io->u.bdev.split_current_offset_blocks;
3257 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
3258 	max_unmap_blocks = bdev_io->bdev->max_unmap * bdev_io->bdev->max_unmap_segments;
3259 
3260 	while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3261 		unmap_blocks = spdk_min(remaining, max_unmap_blocks);
3262 
3263 		rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, unmap_blocks,
3264 					  &offset, &remaining);
3265 		if (spdk_likely(rc == 0)) {
3266 			num_children_reqs++;
3267 		} else {
3268 			return;
3269 		}
3270 	}
3271 }
3272 
3273 static void
3274 bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io)
3275 {
3276 	uint64_t offset, write_zeroes_blocks, remaining;
3277 	uint32_t num_children_reqs = 0;
3278 	int rc;
3279 
3280 	offset = bdev_io->u.bdev.split_current_offset_blocks;
3281 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
3282 
3283 	while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3284 		write_zeroes_blocks = spdk_min(remaining, bdev_io->bdev->max_write_zeroes);
3285 
3286 		rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, write_zeroes_blocks,
3287 					  &offset, &remaining);
3288 		if (spdk_likely(rc == 0)) {
3289 			num_children_reqs++;
3290 		} else {
3291 			return;
3292 		}
3293 	}
3294 }
3295 
3296 static void
3297 bdev_copy_split(struct spdk_bdev_io *bdev_io)
3298 {
3299 	uint64_t offset, copy_blocks, remaining;
3300 	uint32_t num_children_reqs = 0;
3301 	int rc;
3302 
3303 	offset = bdev_io->u.bdev.split_current_offset_blocks;
3304 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
3305 
3306 	assert(bdev_io->bdev->max_copy != 0);
3307 	while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_COPY_REQS)) {
3308 		copy_blocks = spdk_min(remaining, bdev_io->bdev->max_copy);
3309 
3310 		rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, copy_blocks,
3311 					  &offset, &remaining);
3312 		if (spdk_likely(rc == 0)) {
3313 			num_children_reqs++;
3314 		} else {
3315 			return;
3316 		}
3317 	}
3318 }
3319 
3320 static void
3321 parent_bdev_io_complete(void *ctx, int rc)
3322 {
3323 	struct spdk_bdev_io *parent_io = ctx;
3324 
3325 	if (rc) {
3326 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3327 	}
3328 
3329 	parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
3330 			       parent_io->internal.caller_ctx);
3331 }
3332 
3333 static void
3334 bdev_io_complete_parent_sequence_cb(void *ctx, int status)
3335 {
3336 	struct spdk_bdev_io *bdev_io = ctx;
3337 
3338 	/* u.bdev.accel_sequence should have already been cleared at this point */
3339 	assert(bdev_io->u.bdev.accel_sequence == NULL);
3340 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3341 	bdev_io->internal.accel_sequence = NULL;
3342 
3343 	if (spdk_unlikely(status != 0)) {
3344 		SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
3345 	}
3346 
3347 	parent_bdev_io_complete(bdev_io, status);
3348 }
3349 
3350 static void
3351 bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
3352 {
3353 	struct spdk_bdev_io *parent_io = cb_arg;
3354 
3355 	spdk_bdev_free_io(bdev_io);
3356 
3357 	if (!success) {
3358 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3359 		/* If any child I/O failed, stop further splitting process. */
3360 		parent_io->u.bdev.split_current_offset_blocks += parent_io->u.bdev.split_remaining_num_blocks;
3361 		parent_io->u.bdev.split_remaining_num_blocks = 0;
3362 	}
3363 	parent_io->u.bdev.split_outstanding--;
3364 	if (parent_io->u.bdev.split_outstanding != 0) {
3365 		return;
3366 	}
3367 
3368 	/*
3369 	 * Parent I/O finishes when all blocks are consumed.
3370 	 */
3371 	if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
3372 		assert(parent_io->internal.cb != bdev_io_split_done);
3373 		bdev_ch_remove_from_io_submitted(parent_io);
3374 		spdk_trace_record(TRACE_BDEV_IO_DONE, parent_io->internal.ch->trace_id,
3375 				  0, (uintptr_t)parent_io, bdev_io->internal.caller_ctx,
3376 				  parent_io->internal.ch->queue_depth);
3377 
3378 		if (spdk_likely(parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
3379 			if (bdev_io_needs_sequence_exec(parent_io->internal.desc, parent_io)) {
3380 				bdev_io_exec_sequence(parent_io, bdev_io_complete_parent_sequence_cb);
3381 				return;
3382 			} else if (parent_io->internal.orig_iovcnt != 0 &&
3383 				   !bdev_io_use_accel_sequence(bdev_io)) {
3384 				/* bdev IO will be completed in the callback */
3385 				_bdev_io_push_bounce_data_buffer(parent_io, parent_bdev_io_complete);
3386 				return;
3387 			}
3388 		}
3389 
3390 		parent_bdev_io_complete(parent_io, 0);
3391 		return;
3392 	}
3393 
3394 	/*
3395 	 * Continue with the splitting process.  This function will complete the parent I/O if the
3396 	 * splitting is done.
3397 	 */
3398 	switch (parent_io->type) {
3399 	case SPDK_BDEV_IO_TYPE_READ:
3400 	case SPDK_BDEV_IO_TYPE_WRITE:
3401 		_bdev_rw_split(parent_io);
3402 		break;
3403 	case SPDK_BDEV_IO_TYPE_UNMAP:
3404 		bdev_unmap_split(parent_io);
3405 		break;
3406 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3407 		bdev_write_zeroes_split(parent_io);
3408 		break;
3409 	case SPDK_BDEV_IO_TYPE_COPY:
3410 		bdev_copy_split(parent_io);
3411 		break;
3412 	default:
3413 		assert(false);
3414 		break;
3415 	}
3416 }
3417 
3418 static void bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
3419 				     bool success);
3420 
3421 static void
3422 bdev_io_split(struct spdk_bdev_io *bdev_io)
3423 {
3424 	assert(bdev_io_should_split(bdev_io));
3425 
3426 	bdev_io->u.bdev.split_current_offset_blocks = bdev_io->u.bdev.offset_blocks;
3427 	bdev_io->u.bdev.split_remaining_num_blocks = bdev_io->u.bdev.num_blocks;
3428 	bdev_io->u.bdev.split_outstanding = 0;
3429 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
3430 
3431 	switch (bdev_io->type) {
3432 	case SPDK_BDEV_IO_TYPE_READ:
3433 	case SPDK_BDEV_IO_TYPE_WRITE:
3434 		if (_is_buf_allocated(bdev_io->u.bdev.iovs)) {
3435 			_bdev_rw_split(bdev_io);
3436 		} else {
3437 			assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3438 			spdk_bdev_io_get_buf(bdev_io, bdev_rw_split_get_buf_cb,
3439 					     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3440 		}
3441 		break;
3442 	case SPDK_BDEV_IO_TYPE_UNMAP:
3443 		bdev_unmap_split(bdev_io);
3444 		break;
3445 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3446 		bdev_write_zeroes_split(bdev_io);
3447 		break;
3448 	case SPDK_BDEV_IO_TYPE_COPY:
3449 		bdev_copy_split(bdev_io);
3450 		break;
3451 	default:
3452 		assert(false);
3453 		break;
3454 	}
3455 }
3456 
3457 static void
3458 bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
3459 {
3460 	if (!success) {
3461 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3462 		return;
3463 	}
3464 
3465 	_bdev_rw_split(bdev_io);
3466 }
3467 
3468 /* Explicitly mark this inline, since it's used as a function pointer and otherwise won't
3469  *  be inlined, at least on some compilers.
3470  */
3471 static inline void
3472 _bdev_io_submit(void *ctx)
3473 {
3474 	struct spdk_bdev_io *bdev_io = ctx;
3475 	struct spdk_bdev *bdev = bdev_io->bdev;
3476 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3477 
3478 	if (spdk_likely(bdev_ch->flags == 0)) {
3479 		bdev_io_do_submit(bdev_ch, bdev_io);
3480 		return;
3481 	}
3482 
3483 	if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
3484 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
3485 	} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
3486 		if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
3487 		    bdev_abort_queued_io(&bdev_ch->qos_queued_io, bdev_io->u.abort.bio_to_abort)) {
3488 			_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
3489 		} else {
3490 			TAILQ_INSERT_TAIL(&bdev_ch->qos_queued_io, bdev_io, internal.link);
3491 			bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
3492 		}
3493 	} else {
3494 		SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
3495 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3496 	}
3497 }
3498 
3499 bool bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2);
3500 
3501 bool
3502 bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2)
3503 {
3504 	if (range1->length == 0 || range2->length == 0) {
3505 		return false;
3506 	}
3507 
3508 	if (range1->offset + range1->length <= range2->offset) {
3509 		return false;
3510 	}
3511 
3512 	if (range2->offset + range2->length <= range1->offset) {
3513 		return false;
3514 	}
3515 
3516 	return true;
3517 }
3518 
3519 static bool
3520 bdev_io_range_is_locked(struct spdk_bdev_io *bdev_io, struct lba_range *range)
3521 {
3522 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3523 	struct lba_range r;
3524 
3525 	switch (bdev_io->type) {
3526 	case SPDK_BDEV_IO_TYPE_NVME_IO:
3527 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
3528 		/* Don't try to decode the NVMe command - just assume worst-case and that
3529 		 * it overlaps a locked range.
3530 		 */
3531 		return true;
3532 	case SPDK_BDEV_IO_TYPE_READ:
3533 		if (!range->quiesce) {
3534 			return false;
3535 		}
3536 	/* fallthrough */
3537 	case SPDK_BDEV_IO_TYPE_WRITE:
3538 	case SPDK_BDEV_IO_TYPE_UNMAP:
3539 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3540 	case SPDK_BDEV_IO_TYPE_ZCOPY:
3541 	case SPDK_BDEV_IO_TYPE_COPY:
3542 		r.offset = bdev_io->u.bdev.offset_blocks;
3543 		r.length = bdev_io->u.bdev.num_blocks;
3544 		if (!bdev_lba_range_overlapped(range, &r)) {
3545 			/* This I/O doesn't overlap the specified LBA range. */
3546 			return false;
3547 		} else if (range->owner_ch == ch && range->locked_ctx == bdev_io->internal.caller_ctx) {
3548 			/* This I/O overlaps, but the I/O is on the same channel that locked this
3549 			 * range, and the caller_ctx is the same as the locked_ctx.  This means
3550 			 * that this I/O is associated with the lock, and is allowed to execute.
3551 			 */
3552 			return false;
3553 		} else {
3554 			return true;
3555 		}
3556 	default:
3557 		return false;
3558 	}
3559 }
3560 
3561 void
3562 bdev_io_submit(struct spdk_bdev_io *bdev_io)
3563 {
3564 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3565 
3566 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3567 
3568 	if (!TAILQ_EMPTY(&ch->locked_ranges)) {
3569 		struct lba_range *range;
3570 
3571 		TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
3572 			if (bdev_io_range_is_locked(bdev_io, range)) {
3573 				TAILQ_INSERT_TAIL(&ch->io_locked, bdev_io, internal.ch_link);
3574 				return;
3575 			}
3576 		}
3577 	}
3578 
3579 	bdev_ch_add_to_io_submitted(bdev_io);
3580 
3581 	bdev_io->internal.submit_tsc = spdk_get_ticks();
3582 	spdk_trace_record_tsc(bdev_io->internal.submit_tsc, TRACE_BDEV_IO_START,
3583 			      ch->trace_id, bdev_io->u.bdev.num_blocks,
3584 			      (uintptr_t)bdev_io, (uint64_t)bdev_io->type, bdev_io->internal.caller_ctx,
3585 			      bdev_io->u.bdev.offset_blocks, ch->queue_depth);
3586 
3587 	if (bdev_io->internal.split) {
3588 		bdev_io_split(bdev_io);
3589 		return;
3590 	}
3591 
3592 	_bdev_io_submit(bdev_io);
3593 }
3594 
3595 static inline void
3596 _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
3597 {
3598 	/* bdev doesn't support memory domains, thereby buffers in this IO request can't
3599 	 * be accessed directly. It is needed to allocate buffers before issuing IO operation.
3600 	 * For write operation we need to pull buffers from memory domain before submitting IO.
3601 	 * Once read operation completes, we need to use memory_domain push functionality to
3602 	 * update data in original memory domain IO buffer
3603 	 * This IO request will go through a regular IO flow, so clear memory domains pointers */
3604 	bdev_io->u.bdev.memory_domain = NULL;
3605 	bdev_io->u.bdev.memory_domain_ctx = NULL;
3606 	_bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
3607 				       bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3608 }
3609 
3610 static inline void
3611 _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
3612 {
3613 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3614 	bool needs_exec = bdev_io_needs_sequence_exec(desc, bdev_io);
3615 
3616 	if (spdk_unlikely(ch->flags & BDEV_CH_RESET_IN_PROGRESS)) {
3617 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
3618 		bdev_io_complete_unsubmitted(bdev_io);
3619 		return;
3620 	}
3621 
3622 	/* We need to allocate bounce buffer if bdev doesn't support memory domains, or if it does
3623 	 * support them, but we need to execute an accel sequence and the data buffer is from accel
3624 	 * memory domain (to avoid doing a push/pull from that domain).
3625 	 */
3626 	if ((bdev_io->internal.memory_domain && !desc->memory_domains_supported) ||
3627 	    (needs_exec && bdev_io->internal.memory_domain == spdk_accel_get_memory_domain())) {
3628 		_bdev_io_ext_use_bounce_buffer(bdev_io);
3629 		return;
3630 	}
3631 
3632 	if (needs_exec) {
3633 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
3634 			bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
3635 			return;
3636 		}
3637 		/* For reads we'll execute the sequence after the data is read, so, for now, only
3638 		 * clear out accel_sequence pointer and submit the IO */
3639 		assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3640 		bdev_io->u.bdev.accel_sequence = NULL;
3641 	}
3642 
3643 	bdev_io_submit(bdev_io);
3644 }
3645 
3646 static void
3647 bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
3648 {
3649 	struct spdk_bdev *bdev = bdev_io->bdev;
3650 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3651 	struct spdk_io_channel *ch = bdev_ch->channel;
3652 
3653 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3654 
3655 	bdev_io->internal.in_submit_request = true;
3656 	bdev_submit_request(bdev, ch, bdev_io);
3657 	bdev_io->internal.in_submit_request = false;
3658 }
3659 
3660 void
3661 bdev_io_init(struct spdk_bdev_io *bdev_io,
3662 	     struct spdk_bdev *bdev, void *cb_arg,
3663 	     spdk_bdev_io_completion_cb cb)
3664 {
3665 	bdev_io->bdev = bdev;
3666 	bdev_io->internal.caller_ctx = cb_arg;
3667 	bdev_io->internal.cb = cb;
3668 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
3669 	bdev_io->internal.in_submit_request = false;
3670 	bdev_io->internal.buf = NULL;
3671 	bdev_io->internal.orig_iovs = NULL;
3672 	bdev_io->internal.orig_iovcnt = 0;
3673 	bdev_io->internal.orig_md_iov.iov_base = NULL;
3674 	bdev_io->internal.error.nvme.cdw0 = 0;
3675 	bdev_io->num_retries = 0;
3676 	bdev_io->internal.get_buf_cb = NULL;
3677 	bdev_io->internal.get_aux_buf_cb = NULL;
3678 	bdev_io->internal.memory_domain = NULL;
3679 	bdev_io->internal.memory_domain_ctx = NULL;
3680 	bdev_io->internal.data_transfer_cpl = NULL;
3681 	bdev_io->internal.split = bdev_io_should_split(bdev_io);
3682 	bdev_io->internal.accel_sequence = NULL;
3683 	bdev_io->internal.has_accel_sequence = false;
3684 }
3685 
3686 static bool
3687 bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3688 {
3689 	return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
3690 }
3691 
3692 bool
3693 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3694 {
3695 	bool supported;
3696 
3697 	supported = bdev_io_type_supported(bdev, io_type);
3698 
3699 	if (!supported) {
3700 		switch (io_type) {
3701 		case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3702 			/* The bdev layer will emulate write zeroes as long as write is supported. */
3703 			supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
3704 			break;
3705 		default:
3706 			break;
3707 		}
3708 	}
3709 
3710 	return supported;
3711 }
3712 
3713 static const char *g_io_type_strings[] = {
3714 	[SPDK_BDEV_IO_TYPE_READ] = "read",
3715 	[SPDK_BDEV_IO_TYPE_WRITE] = "write",
3716 	[SPDK_BDEV_IO_TYPE_UNMAP] = "unmap",
3717 	[SPDK_BDEV_IO_TYPE_FLUSH] = "flush",
3718 	[SPDK_BDEV_IO_TYPE_RESET] = "reset",
3719 	[SPDK_BDEV_IO_TYPE_NVME_ADMIN] = "nvme_admin",
3720 	[SPDK_BDEV_IO_TYPE_NVME_IO] = "nvme_io",
3721 	[SPDK_BDEV_IO_TYPE_NVME_IO_MD] = "nvme_io_md",
3722 	[SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = "write_zeroes",
3723 	[SPDK_BDEV_IO_TYPE_ZCOPY] = "zcopy",
3724 	[SPDK_BDEV_IO_TYPE_GET_ZONE_INFO] = "get_zone_info",
3725 	[SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT] = "zone_management",
3726 	[SPDK_BDEV_IO_TYPE_ZONE_APPEND] = "zone_append",
3727 	[SPDK_BDEV_IO_TYPE_COMPARE] = "compare",
3728 	[SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE] = "compare_and_write",
3729 	[SPDK_BDEV_IO_TYPE_ABORT] = "abort",
3730 	[SPDK_BDEV_IO_TYPE_SEEK_HOLE] = "seek_hole",
3731 	[SPDK_BDEV_IO_TYPE_SEEK_DATA] = "seek_data",
3732 	[SPDK_BDEV_IO_TYPE_COPY] = "copy",
3733 	[SPDK_BDEV_IO_TYPE_NVME_IOV_MD] = "nvme_iov_md",
3734 };
3735 
3736 const char *
3737 spdk_bdev_get_io_type_name(enum spdk_bdev_io_type io_type)
3738 {
3739 	if (io_type <= SPDK_BDEV_IO_TYPE_INVALID || io_type >= SPDK_BDEV_NUM_IO_TYPES) {
3740 		return NULL;
3741 	}
3742 
3743 	return g_io_type_strings[io_type];
3744 }
3745 
3746 int
3747 spdk_bdev_get_io_type(const char *io_type_string)
3748 {
3749 	int i;
3750 
3751 	for (i = SPDK_BDEV_IO_TYPE_READ; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
3752 		if (!strcmp(io_type_string, g_io_type_strings[i])) {
3753 			return i;
3754 		}
3755 	}
3756 
3757 	return -1;
3758 }
3759 
3760 uint64_t
3761 spdk_bdev_io_get_submit_tsc(struct spdk_bdev_io *bdev_io)
3762 {
3763 	return bdev_io->internal.submit_tsc;
3764 }
3765 
3766 int
3767 spdk_bdev_dump_info_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
3768 {
3769 	if (bdev->fn_table->dump_info_json) {
3770 		return bdev->fn_table->dump_info_json(bdev->ctxt, w);
3771 	}
3772 
3773 	return 0;
3774 }
3775 
3776 static void
3777 bdev_qos_update_max_quota_per_timeslice(struct spdk_bdev_qos *qos)
3778 {
3779 	uint32_t max_per_timeslice = 0;
3780 	int i;
3781 
3782 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3783 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
3784 			qos->rate_limits[i].max_per_timeslice = 0;
3785 			continue;
3786 		}
3787 
3788 		max_per_timeslice = qos->rate_limits[i].limit *
3789 				    SPDK_BDEV_QOS_TIMESLICE_IN_USEC / SPDK_SEC_TO_USEC;
3790 
3791 		qos->rate_limits[i].max_per_timeslice = spdk_max(max_per_timeslice,
3792 							qos->rate_limits[i].min_per_timeslice);
3793 
3794 		__atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
3795 				 qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELEASE);
3796 	}
3797 
3798 	bdev_qos_set_ops(qos);
3799 }
3800 
3801 static void
3802 bdev_channel_submit_qos_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
3803 			   struct spdk_io_channel *io_ch, void *ctx)
3804 {
3805 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
3806 	int status;
3807 
3808 	bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
3809 
3810 	/* if all IOs were sent then continue the iteration, otherwise - stop it */
3811 	/* TODO: channels round robing */
3812 	status = TAILQ_EMPTY(&bdev_ch->qos_queued_io) ? 0 : 1;
3813 
3814 	spdk_bdev_for_each_channel_continue(i, status);
3815 }
3816 
3817 
3818 static void
3819 bdev_channel_submit_qos_io_done(struct spdk_bdev *bdev, void *ctx, int status)
3820 {
3821 
3822 }
3823 
3824 static int
3825 bdev_channel_poll_qos(void *arg)
3826 {
3827 	struct spdk_bdev *bdev = arg;
3828 	struct spdk_bdev_qos *qos = bdev->internal.qos;
3829 	uint64_t now = spdk_get_ticks();
3830 	int i;
3831 	int64_t remaining_last_timeslice;
3832 
3833 	if (spdk_unlikely(qos->thread == NULL)) {
3834 		/* Old QoS was unbound to remove and new QoS is not enabled yet. */
3835 		return SPDK_POLLER_IDLE;
3836 	}
3837 
3838 	if (now < (qos->last_timeslice + qos->timeslice_size)) {
3839 		/* We received our callback earlier than expected - return
3840 		 *  immediately and wait to do accounting until at least one
3841 		 *  timeslice has actually expired.  This should never happen
3842 		 *  with a well-behaved timer implementation.
3843 		 */
3844 		return SPDK_POLLER_IDLE;
3845 	}
3846 
3847 	/* Reset for next round of rate limiting */
3848 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3849 		/* We may have allowed the IOs or bytes to slightly overrun in the last
3850 		 * timeslice. remaining_this_timeslice is signed, so if it's negative
3851 		 * here, we'll account for the overrun so that the next timeslice will
3852 		 * be appropriately reduced.
3853 		 */
3854 		remaining_last_timeslice = __atomic_exchange_n(&qos->rate_limits[i].remaining_this_timeslice,
3855 					   0, __ATOMIC_RELAXED);
3856 		if (remaining_last_timeslice < 0) {
3857 			/* There could be a race condition here as both bdev_qos_rw_queue_io() and bdev_channel_poll_qos()
3858 			 * potentially use 2 atomic ops each, so they can intertwine.
3859 			 * This race can potentialy cause the limits to be a little fuzzy but won't cause any real damage.
3860 			 */
3861 			__atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
3862 					 remaining_last_timeslice, __ATOMIC_RELAXED);
3863 		}
3864 	}
3865 
3866 	while (now >= (qos->last_timeslice + qos->timeslice_size)) {
3867 		qos->last_timeslice += qos->timeslice_size;
3868 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3869 			__atomic_add_fetch(&qos->rate_limits[i].remaining_this_timeslice,
3870 					   qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELAXED);
3871 		}
3872 	}
3873 
3874 	spdk_bdev_for_each_channel(bdev, bdev_channel_submit_qos_io, qos,
3875 				   bdev_channel_submit_qos_io_done);
3876 
3877 	return SPDK_POLLER_BUSY;
3878 }
3879 
3880 static void
3881 bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
3882 {
3883 	struct spdk_bdev_shared_resource *shared_resource;
3884 	struct lba_range *range;
3885 
3886 	bdev_free_io_stat(ch->stat);
3887 #ifdef SPDK_CONFIG_VTUNE
3888 	bdev_free_io_stat(ch->prev_stat);
3889 #endif
3890 
3891 	while (!TAILQ_EMPTY(&ch->locked_ranges)) {
3892 		range = TAILQ_FIRST(&ch->locked_ranges);
3893 		TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
3894 		free(range);
3895 	}
3896 
3897 	spdk_put_io_channel(ch->channel);
3898 	spdk_put_io_channel(ch->accel_channel);
3899 
3900 	shared_resource = ch->shared_resource;
3901 
3902 	assert(TAILQ_EMPTY(&ch->io_locked));
3903 	assert(TAILQ_EMPTY(&ch->io_submitted));
3904 	assert(TAILQ_EMPTY(&ch->io_accel_exec));
3905 	assert(TAILQ_EMPTY(&ch->io_memory_domain));
3906 	assert(ch->io_outstanding == 0);
3907 	assert(shared_resource->ref > 0);
3908 	shared_resource->ref--;
3909 	if (shared_resource->ref == 0) {
3910 		assert(shared_resource->io_outstanding == 0);
3911 		TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
3912 		spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
3913 		spdk_poller_unregister(&shared_resource->nomem_poller);
3914 		free(shared_resource);
3915 	}
3916 }
3917 
3918 static void
3919 bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
3920 {
3921 	struct spdk_bdev_qos	*qos = bdev->internal.qos;
3922 	int			i;
3923 
3924 	assert(spdk_spin_held(&bdev->internal.spinlock));
3925 
3926 	/* Rate limiting on this bdev enabled */
3927 	if (qos) {
3928 		if (qos->ch == NULL) {
3929 			struct spdk_io_channel *io_ch;
3930 
3931 			SPDK_DEBUGLOG(bdev, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
3932 				      bdev->name, spdk_get_thread());
3933 
3934 			/* No qos channel has been selected, so set one up */
3935 
3936 			/* Take another reference to ch */
3937 			io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
3938 			assert(io_ch != NULL);
3939 			qos->ch = ch;
3940 
3941 			qos->thread = spdk_io_channel_get_thread(io_ch);
3942 
3943 			for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3944 				if (bdev_qos_is_iops_rate_limit(i) == true) {
3945 					qos->rate_limits[i].min_per_timeslice =
3946 						SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE;
3947 				} else {
3948 					qos->rate_limits[i].min_per_timeslice =
3949 						SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE;
3950 				}
3951 
3952 				if (qos->rate_limits[i].limit == 0) {
3953 					qos->rate_limits[i].limit = SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
3954 				}
3955 			}
3956 			bdev_qos_update_max_quota_per_timeslice(qos);
3957 			qos->timeslice_size =
3958 				SPDK_BDEV_QOS_TIMESLICE_IN_USEC * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
3959 			qos->last_timeslice = spdk_get_ticks();
3960 			qos->poller = SPDK_POLLER_REGISTER(bdev_channel_poll_qos,
3961 							   bdev,
3962 							   SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
3963 		}
3964 
3965 		ch->flags |= BDEV_CH_QOS_ENABLED;
3966 	}
3967 }
3968 
3969 struct poll_timeout_ctx {
3970 	struct spdk_bdev_desc	*desc;
3971 	uint64_t		timeout_in_sec;
3972 	spdk_bdev_io_timeout_cb	cb_fn;
3973 	void			*cb_arg;
3974 };
3975 
3976 static void
3977 bdev_desc_free(struct spdk_bdev_desc *desc)
3978 {
3979 	spdk_spin_destroy(&desc->spinlock);
3980 	free(desc->media_events_buffer);
3981 	free(desc);
3982 }
3983 
3984 static void
3985 bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
3986 {
3987 	struct poll_timeout_ctx *ctx  = _ctx;
3988 	struct spdk_bdev_desc *desc = ctx->desc;
3989 
3990 	free(ctx);
3991 
3992 	spdk_spin_lock(&desc->spinlock);
3993 	desc->refs--;
3994 	if (desc->closed == true && desc->refs == 0) {
3995 		spdk_spin_unlock(&desc->spinlock);
3996 		bdev_desc_free(desc);
3997 		return;
3998 	}
3999 	spdk_spin_unlock(&desc->spinlock);
4000 }
4001 
4002 static void
4003 bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
4004 			     struct spdk_io_channel *io_ch, void *_ctx)
4005 {
4006 	struct poll_timeout_ctx *ctx  = _ctx;
4007 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
4008 	struct spdk_bdev_desc *desc = ctx->desc;
4009 	struct spdk_bdev_io *bdev_io;
4010 	uint64_t now;
4011 
4012 	spdk_spin_lock(&desc->spinlock);
4013 	if (desc->closed == true) {
4014 		spdk_spin_unlock(&desc->spinlock);
4015 		spdk_bdev_for_each_channel_continue(i, -1);
4016 		return;
4017 	}
4018 	spdk_spin_unlock(&desc->spinlock);
4019 
4020 	now = spdk_get_ticks();
4021 	TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
4022 		/* Exclude any I/O that are generated via splitting. */
4023 		if (bdev_io->internal.cb == bdev_io_split_done) {
4024 			continue;
4025 		}
4026 
4027 		/* Once we find an I/O that has not timed out, we can immediately
4028 		 * exit the loop.
4029 		 */
4030 		if (now < (bdev_io->internal.submit_tsc +
4031 			   ctx->timeout_in_sec * spdk_get_ticks_hz())) {
4032 			goto end;
4033 		}
4034 
4035 		if (bdev_io->internal.desc == desc) {
4036 			ctx->cb_fn(ctx->cb_arg, bdev_io);
4037 		}
4038 	}
4039 
4040 end:
4041 	spdk_bdev_for_each_channel_continue(i, 0);
4042 }
4043 
4044 static int
4045 bdev_poll_timeout_io(void *arg)
4046 {
4047 	struct spdk_bdev_desc *desc = arg;
4048 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4049 	struct poll_timeout_ctx *ctx;
4050 
4051 	ctx = calloc(1, sizeof(struct poll_timeout_ctx));
4052 	if (!ctx) {
4053 		SPDK_ERRLOG("failed to allocate memory\n");
4054 		return SPDK_POLLER_BUSY;
4055 	}
4056 	ctx->desc = desc;
4057 	ctx->cb_arg = desc->cb_arg;
4058 	ctx->cb_fn = desc->cb_fn;
4059 	ctx->timeout_in_sec = desc->timeout_in_sec;
4060 
4061 	/* Take a ref on the descriptor in case it gets closed while we are checking
4062 	 * all of the channels.
4063 	 */
4064 	spdk_spin_lock(&desc->spinlock);
4065 	desc->refs++;
4066 	spdk_spin_unlock(&desc->spinlock);
4067 
4068 	spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx,
4069 				   bdev_channel_poll_timeout_io_done);
4070 
4071 	return SPDK_POLLER_BUSY;
4072 }
4073 
4074 int
4075 spdk_bdev_set_timeout(struct spdk_bdev_desc *desc, uint64_t timeout_in_sec,
4076 		      spdk_bdev_io_timeout_cb cb_fn, void *cb_arg)
4077 {
4078 	assert(desc->thread == spdk_get_thread());
4079 
4080 	spdk_poller_unregister(&desc->io_timeout_poller);
4081 
4082 	if (timeout_in_sec) {
4083 		assert(cb_fn != NULL);
4084 		desc->io_timeout_poller = SPDK_POLLER_REGISTER(bdev_poll_timeout_io,
4085 					  desc,
4086 					  SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * SPDK_SEC_TO_USEC /
4087 					  1000);
4088 		if (desc->io_timeout_poller == NULL) {
4089 			SPDK_ERRLOG("can not register the desc timeout IO poller\n");
4090 			return -1;
4091 		}
4092 	}
4093 
4094 	desc->cb_fn = cb_fn;
4095 	desc->cb_arg = cb_arg;
4096 	desc->timeout_in_sec = timeout_in_sec;
4097 
4098 	return 0;
4099 }
4100 
4101 static int
4102 bdev_channel_create(void *io_device, void *ctx_buf)
4103 {
4104 	struct spdk_bdev		*bdev = __bdev_from_io_dev(io_device);
4105 	struct spdk_bdev_channel	*ch = ctx_buf;
4106 	struct spdk_io_channel		*mgmt_io_ch;
4107 	struct spdk_bdev_mgmt_channel	*mgmt_ch;
4108 	struct spdk_bdev_shared_resource *shared_resource;
4109 	struct lba_range		*range;
4110 
4111 	ch->bdev = bdev;
4112 	ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
4113 	if (!ch->channel) {
4114 		return -1;
4115 	}
4116 
4117 	ch->accel_channel = spdk_accel_get_io_channel();
4118 	if (!ch->accel_channel) {
4119 		spdk_put_io_channel(ch->channel);
4120 		return -1;
4121 	}
4122 
4123 	spdk_trace_record(TRACE_BDEV_IOCH_CREATE, bdev->internal.trace_id, 0, 0,
4124 			  spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4125 
4126 	assert(ch->histogram == NULL);
4127 	if (bdev->internal.histogram_enabled) {
4128 		ch->histogram = spdk_histogram_data_alloc();
4129 		if (ch->histogram == NULL) {
4130 			SPDK_ERRLOG("Could not allocate histogram\n");
4131 		}
4132 	}
4133 
4134 	mgmt_io_ch = spdk_get_io_channel(&g_bdev_mgr);
4135 	if (!mgmt_io_ch) {
4136 		spdk_put_io_channel(ch->channel);
4137 		spdk_put_io_channel(ch->accel_channel);
4138 		return -1;
4139 	}
4140 
4141 	mgmt_ch = __io_ch_to_bdev_mgmt_ch(mgmt_io_ch);
4142 	TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
4143 		if (shared_resource->shared_ch == ch->channel) {
4144 			spdk_put_io_channel(mgmt_io_ch);
4145 			shared_resource->ref++;
4146 			break;
4147 		}
4148 	}
4149 
4150 	if (shared_resource == NULL) {
4151 		shared_resource = calloc(1, sizeof(*shared_resource));
4152 		if (shared_resource == NULL) {
4153 			spdk_put_io_channel(ch->channel);
4154 			spdk_put_io_channel(ch->accel_channel);
4155 			spdk_put_io_channel(mgmt_io_ch);
4156 			return -1;
4157 		}
4158 
4159 		shared_resource->mgmt_ch = mgmt_ch;
4160 		shared_resource->io_outstanding = 0;
4161 		TAILQ_INIT(&shared_resource->nomem_io);
4162 		shared_resource->nomem_threshold = 0;
4163 		shared_resource->shared_ch = ch->channel;
4164 		shared_resource->ref = 1;
4165 		TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
4166 	}
4167 
4168 	ch->io_outstanding = 0;
4169 	TAILQ_INIT(&ch->queued_resets);
4170 	TAILQ_INIT(&ch->locked_ranges);
4171 	TAILQ_INIT(&ch->qos_queued_io);
4172 	ch->flags = 0;
4173 	ch->trace_id = bdev->internal.trace_id;
4174 	ch->shared_resource = shared_resource;
4175 
4176 	TAILQ_INIT(&ch->io_submitted);
4177 	TAILQ_INIT(&ch->io_locked);
4178 	TAILQ_INIT(&ch->io_accel_exec);
4179 	TAILQ_INIT(&ch->io_memory_domain);
4180 
4181 	ch->stat = bdev_alloc_io_stat(false);
4182 	if (ch->stat == NULL) {
4183 		bdev_channel_destroy_resource(ch);
4184 		return -1;
4185 	}
4186 
4187 	ch->stat->ticks_rate = spdk_get_ticks_hz();
4188 
4189 #ifdef SPDK_CONFIG_VTUNE
4190 	{
4191 		char *name;
4192 		__itt_init_ittlib(NULL, 0);
4193 		name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
4194 		if (!name) {
4195 			bdev_channel_destroy_resource(ch);
4196 			return -1;
4197 		}
4198 		ch->handle = __itt_string_handle_create(name);
4199 		free(name);
4200 		ch->start_tsc = spdk_get_ticks();
4201 		ch->interval_tsc = spdk_get_ticks_hz() / 100;
4202 		ch->prev_stat = bdev_alloc_io_stat(false);
4203 		if (ch->prev_stat == NULL) {
4204 			bdev_channel_destroy_resource(ch);
4205 			return -1;
4206 		}
4207 	}
4208 #endif
4209 
4210 	spdk_spin_lock(&bdev->internal.spinlock);
4211 	bdev_enable_qos(bdev, ch);
4212 
4213 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
4214 		struct lba_range *new_range;
4215 
4216 		new_range = calloc(1, sizeof(*new_range));
4217 		if (new_range == NULL) {
4218 			spdk_spin_unlock(&bdev->internal.spinlock);
4219 			bdev_channel_destroy_resource(ch);
4220 			return -1;
4221 		}
4222 		new_range->length = range->length;
4223 		new_range->offset = range->offset;
4224 		new_range->locked_ctx = range->locked_ctx;
4225 		TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
4226 	}
4227 
4228 	spdk_spin_unlock(&bdev->internal.spinlock);
4229 
4230 	return 0;
4231 }
4232 
4233 static int
4234 bdev_abort_all_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
4235 			 void *cb_ctx)
4236 {
4237 	struct spdk_bdev_channel *bdev_ch = cb_ctx;
4238 	struct spdk_bdev_io *bdev_io;
4239 	uint64_t buf_len;
4240 
4241 	bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4242 	if (bdev_io->internal.ch == bdev_ch) {
4243 		buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf_len);
4244 		spdk_iobuf_entry_abort(ch, entry, buf_len);
4245 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 /*
4252  * Abort I/O that are waiting on a data buffer.
4253  */
4254 static void
4255 bdev_abort_all_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_channel *ch)
4256 {
4257 	spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, &mgmt_ch->iobuf.small,
4258 				  bdev_abort_all_buf_io_cb, ch);
4259 	spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, &mgmt_ch->iobuf.large,
4260 				  bdev_abort_all_buf_io_cb, ch);
4261 }
4262 
4263 /*
4264  * Abort I/O that are queued waiting for submission.  These types of I/O are
4265  *  linked using the spdk_bdev_io link TAILQ_ENTRY.
4266  */
4267 static void
4268 bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
4269 {
4270 	struct spdk_bdev_io *bdev_io, *tmp;
4271 
4272 	TAILQ_FOREACH_SAFE(bdev_io, queue, internal.link, tmp) {
4273 		if (bdev_io->internal.ch == ch) {
4274 			TAILQ_REMOVE(queue, bdev_io, internal.link);
4275 			/*
4276 			 * spdk_bdev_io_complete() assumes that the completed I/O had
4277 			 *  been submitted to the bdev module.  Since in this case it
4278 			 *  hadn't, bump io_outstanding to account for the decrement
4279 			 *  that spdk_bdev_io_complete() will do.
4280 			 */
4281 			if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
4282 				bdev_io_increment_outstanding(ch, ch->shared_resource);
4283 			}
4284 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4285 		}
4286 	}
4287 }
4288 
4289 static bool
4290 bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
4291 {
4292 	struct spdk_bdev_io *bdev_io;
4293 
4294 	TAILQ_FOREACH(bdev_io, queue, internal.link) {
4295 		if (bdev_io == bio_to_abort) {
4296 			TAILQ_REMOVE(queue, bio_to_abort, internal.link);
4297 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4298 			return true;
4299 		}
4300 	}
4301 
4302 	return false;
4303 }
4304 
4305 static int
4306 bdev_abort_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry, void *cb_ctx)
4307 {
4308 	struct spdk_bdev_io *bdev_io, *bio_to_abort = cb_ctx;
4309 	uint64_t buf_len;
4310 
4311 	bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4312 	if (bdev_io == bio_to_abort) {
4313 		buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf_len);
4314 		spdk_iobuf_entry_abort(ch, entry, buf_len);
4315 		spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4316 		return 1;
4317 	}
4318 
4319 	return 0;
4320 }
4321 
4322 static bool
4323 bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_io *bio_to_abort)
4324 {
4325 	int rc;
4326 
4327 	rc = spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, &mgmt_ch->iobuf.small,
4328 				       bdev_abort_buf_io_cb, bio_to_abort);
4329 	if (rc == 1) {
4330 		return true;
4331 	}
4332 
4333 	rc = spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, &mgmt_ch->iobuf.large,
4334 				       bdev_abort_buf_io_cb, bio_to_abort);
4335 	return rc == 1;
4336 }
4337 
4338 static void
4339 bdev_qos_channel_destroy(void *cb_arg)
4340 {
4341 	struct spdk_bdev_qos *qos = cb_arg;
4342 
4343 	spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
4344 	spdk_poller_unregister(&qos->poller);
4345 
4346 	SPDK_DEBUGLOG(bdev, "Free QoS %p.\n", qos);
4347 
4348 	free(qos);
4349 }
4350 
4351 static int
4352 bdev_qos_destroy(struct spdk_bdev *bdev)
4353 {
4354 	int i;
4355 
4356 	/*
4357 	 * Cleanly shutting down the QoS poller is tricky, because
4358 	 * during the asynchronous operation the user could open
4359 	 * a new descriptor and create a new channel, spawning
4360 	 * a new QoS poller.
4361 	 *
4362 	 * The strategy is to create a new QoS structure here and swap it
4363 	 * in. The shutdown path then continues to refer to the old one
4364 	 * until it completes and then releases it.
4365 	 */
4366 	struct spdk_bdev_qos *new_qos, *old_qos;
4367 
4368 	old_qos = bdev->internal.qos;
4369 
4370 	new_qos = calloc(1, sizeof(*new_qos));
4371 	if (!new_qos) {
4372 		SPDK_ERRLOG("Unable to allocate memory to shut down QoS.\n");
4373 		return -ENOMEM;
4374 	}
4375 
4376 	/* Copy the old QoS data into the newly allocated structure */
4377 	memcpy(new_qos, old_qos, sizeof(*new_qos));
4378 
4379 	/* Zero out the key parts of the QoS structure */
4380 	new_qos->ch = NULL;
4381 	new_qos->thread = NULL;
4382 	new_qos->poller = NULL;
4383 	/*
4384 	 * The limit member of spdk_bdev_qos_limit structure is not zeroed.
4385 	 * It will be used later for the new QoS structure.
4386 	 */
4387 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4388 		new_qos->rate_limits[i].remaining_this_timeslice = 0;
4389 		new_qos->rate_limits[i].min_per_timeslice = 0;
4390 		new_qos->rate_limits[i].max_per_timeslice = 0;
4391 	}
4392 
4393 	bdev->internal.qos = new_qos;
4394 
4395 	if (old_qos->thread == NULL) {
4396 		free(old_qos);
4397 	} else {
4398 		spdk_thread_send_msg(old_qos->thread, bdev_qos_channel_destroy, old_qos);
4399 	}
4400 
4401 	/* It is safe to continue with destroying the bdev even though the QoS channel hasn't
4402 	 * been destroyed yet. The destruction path will end up waiting for the final
4403 	 * channel to be put before it releases resources. */
4404 
4405 	return 0;
4406 }
4407 
4408 void
4409 spdk_bdev_add_io_stat(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat *add)
4410 {
4411 	total->bytes_read += add->bytes_read;
4412 	total->num_read_ops += add->num_read_ops;
4413 	total->bytes_written += add->bytes_written;
4414 	total->num_write_ops += add->num_write_ops;
4415 	total->bytes_unmapped += add->bytes_unmapped;
4416 	total->num_unmap_ops += add->num_unmap_ops;
4417 	total->bytes_copied += add->bytes_copied;
4418 	total->num_copy_ops += add->num_copy_ops;
4419 	total->read_latency_ticks += add->read_latency_ticks;
4420 	total->write_latency_ticks += add->write_latency_ticks;
4421 	total->unmap_latency_ticks += add->unmap_latency_ticks;
4422 	total->copy_latency_ticks += add->copy_latency_ticks;
4423 	if (total->max_read_latency_ticks < add->max_read_latency_ticks) {
4424 		total->max_read_latency_ticks = add->max_read_latency_ticks;
4425 	}
4426 	if (total->min_read_latency_ticks > add->min_read_latency_ticks) {
4427 		total->min_read_latency_ticks = add->min_read_latency_ticks;
4428 	}
4429 	if (total->max_write_latency_ticks < add->max_write_latency_ticks) {
4430 		total->max_write_latency_ticks = add->max_write_latency_ticks;
4431 	}
4432 	if (total->min_write_latency_ticks > add->min_write_latency_ticks) {
4433 		total->min_write_latency_ticks = add->min_write_latency_ticks;
4434 	}
4435 	if (total->max_unmap_latency_ticks < add->max_unmap_latency_ticks) {
4436 		total->max_unmap_latency_ticks = add->max_unmap_latency_ticks;
4437 	}
4438 	if (total->min_unmap_latency_ticks > add->min_unmap_latency_ticks) {
4439 		total->min_unmap_latency_ticks = add->min_unmap_latency_ticks;
4440 	}
4441 	if (total->max_copy_latency_ticks < add->max_copy_latency_ticks) {
4442 		total->max_copy_latency_ticks = add->max_copy_latency_ticks;
4443 	}
4444 	if (total->min_copy_latency_ticks > add->min_copy_latency_ticks) {
4445 		total->min_copy_latency_ticks = add->min_copy_latency_ticks;
4446 	}
4447 }
4448 
4449 static void
4450 bdev_get_io_stat(struct spdk_bdev_io_stat *to_stat, struct spdk_bdev_io_stat *from_stat)
4451 {
4452 	memcpy(to_stat, from_stat, offsetof(struct spdk_bdev_io_stat, io_error));
4453 
4454 	if (to_stat->io_error != NULL && from_stat->io_error != NULL) {
4455 		memcpy(to_stat->io_error, from_stat->io_error,
4456 		       sizeof(struct spdk_bdev_io_error_stat));
4457 	}
4458 }
4459 
4460 void
4461 spdk_bdev_reset_io_stat(struct spdk_bdev_io_stat *stat, enum spdk_bdev_reset_stat_mode mode)
4462 {
4463 	stat->max_read_latency_ticks = 0;
4464 	stat->min_read_latency_ticks = UINT64_MAX;
4465 	stat->max_write_latency_ticks = 0;
4466 	stat->min_write_latency_ticks = UINT64_MAX;
4467 	stat->max_unmap_latency_ticks = 0;
4468 	stat->min_unmap_latency_ticks = UINT64_MAX;
4469 	stat->max_copy_latency_ticks = 0;
4470 	stat->min_copy_latency_ticks = UINT64_MAX;
4471 
4472 	if (mode != SPDK_BDEV_RESET_STAT_ALL) {
4473 		return;
4474 	}
4475 
4476 	stat->bytes_read = 0;
4477 	stat->num_read_ops = 0;
4478 	stat->bytes_written = 0;
4479 	stat->num_write_ops = 0;
4480 	stat->bytes_unmapped = 0;
4481 	stat->num_unmap_ops = 0;
4482 	stat->bytes_copied = 0;
4483 	stat->num_copy_ops = 0;
4484 	stat->read_latency_ticks = 0;
4485 	stat->write_latency_ticks = 0;
4486 	stat->unmap_latency_ticks = 0;
4487 	stat->copy_latency_ticks = 0;
4488 
4489 	if (stat->io_error != NULL) {
4490 		memset(stat->io_error, 0, sizeof(struct spdk_bdev_io_error_stat));
4491 	}
4492 }
4493 
4494 struct spdk_bdev_io_stat *
4495 bdev_alloc_io_stat(bool io_error_stat)
4496 {
4497 	struct spdk_bdev_io_stat *stat;
4498 
4499 	stat = malloc(sizeof(struct spdk_bdev_io_stat));
4500 	if (stat == NULL) {
4501 		return NULL;
4502 	}
4503 
4504 	if (io_error_stat) {
4505 		stat->io_error = malloc(sizeof(struct spdk_bdev_io_error_stat));
4506 		if (stat->io_error == NULL) {
4507 			free(stat);
4508 			return NULL;
4509 		}
4510 	} else {
4511 		stat->io_error = NULL;
4512 	}
4513 
4514 	spdk_bdev_reset_io_stat(stat, SPDK_BDEV_RESET_STAT_ALL);
4515 
4516 	return stat;
4517 }
4518 
4519 void
4520 bdev_free_io_stat(struct spdk_bdev_io_stat *stat)
4521 {
4522 	if (stat != NULL) {
4523 		free(stat->io_error);
4524 		free(stat);
4525 	}
4526 }
4527 
4528 void
4529 spdk_bdev_dump_io_stat_json(struct spdk_bdev_io_stat *stat, struct spdk_json_write_ctx *w)
4530 {
4531 	int i;
4532 
4533 	spdk_json_write_named_uint64(w, "bytes_read", stat->bytes_read);
4534 	spdk_json_write_named_uint64(w, "num_read_ops", stat->num_read_ops);
4535 	spdk_json_write_named_uint64(w, "bytes_written", stat->bytes_written);
4536 	spdk_json_write_named_uint64(w, "num_write_ops", stat->num_write_ops);
4537 	spdk_json_write_named_uint64(w, "bytes_unmapped", stat->bytes_unmapped);
4538 	spdk_json_write_named_uint64(w, "num_unmap_ops", stat->num_unmap_ops);
4539 	spdk_json_write_named_uint64(w, "bytes_copied", stat->bytes_copied);
4540 	spdk_json_write_named_uint64(w, "num_copy_ops", stat->num_copy_ops);
4541 	spdk_json_write_named_uint64(w, "read_latency_ticks", stat->read_latency_ticks);
4542 	spdk_json_write_named_uint64(w, "max_read_latency_ticks", stat->max_read_latency_ticks);
4543 	spdk_json_write_named_uint64(w, "min_read_latency_ticks",
4544 				     stat->min_read_latency_ticks != UINT64_MAX ?
4545 				     stat->min_read_latency_ticks : 0);
4546 	spdk_json_write_named_uint64(w, "write_latency_ticks", stat->write_latency_ticks);
4547 	spdk_json_write_named_uint64(w, "max_write_latency_ticks", stat->max_write_latency_ticks);
4548 	spdk_json_write_named_uint64(w, "min_write_latency_ticks",
4549 				     stat->min_write_latency_ticks != UINT64_MAX ?
4550 				     stat->min_write_latency_ticks : 0);
4551 	spdk_json_write_named_uint64(w, "unmap_latency_ticks", stat->unmap_latency_ticks);
4552 	spdk_json_write_named_uint64(w, "max_unmap_latency_ticks", stat->max_unmap_latency_ticks);
4553 	spdk_json_write_named_uint64(w, "min_unmap_latency_ticks",
4554 				     stat->min_unmap_latency_ticks != UINT64_MAX ?
4555 				     stat->min_unmap_latency_ticks : 0);
4556 	spdk_json_write_named_uint64(w, "copy_latency_ticks", stat->copy_latency_ticks);
4557 	spdk_json_write_named_uint64(w, "max_copy_latency_ticks", stat->max_copy_latency_ticks);
4558 	spdk_json_write_named_uint64(w, "min_copy_latency_ticks",
4559 				     stat->min_copy_latency_ticks != UINT64_MAX ?
4560 				     stat->min_copy_latency_ticks : 0);
4561 
4562 	if (stat->io_error != NULL) {
4563 		spdk_json_write_named_object_begin(w, "io_error");
4564 		for (i = 0; i < -SPDK_MIN_BDEV_IO_STATUS; i++) {
4565 			if (stat->io_error->error_status[i] != 0) {
4566 				spdk_json_write_named_uint32(w, bdev_io_status_get_string(-(i + 1)),
4567 							     stat->io_error->error_status[i]);
4568 			}
4569 		}
4570 		spdk_json_write_object_end(w);
4571 	}
4572 }
4573 
4574 static void
4575 bdev_channel_abort_queued_ios(struct spdk_bdev_channel *ch)
4576 {
4577 	struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
4578 	struct spdk_bdev_mgmt_channel *mgmt_ch = shared_resource->mgmt_ch;
4579 
4580 	bdev_abort_all_queued_io(&shared_resource->nomem_io, ch);
4581 	bdev_abort_all_buf_io(mgmt_ch, ch);
4582 }
4583 
4584 static void
4585 bdev_channel_destroy(void *io_device, void *ctx_buf)
4586 {
4587 	struct spdk_bdev_channel *ch = ctx_buf;
4588 
4589 	SPDK_DEBUGLOG(bdev, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
4590 		      spdk_get_thread());
4591 
4592 	spdk_trace_record(TRACE_BDEV_IOCH_DESTROY, ch->bdev->internal.trace_id, 0, 0,
4593 			  spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4594 
4595 	/* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
4596 	spdk_spin_lock(&ch->bdev->internal.spinlock);
4597 	spdk_bdev_add_io_stat(ch->bdev->internal.stat, ch->stat);
4598 	spdk_spin_unlock(&ch->bdev->internal.spinlock);
4599 
4600 	bdev_abort_all_queued_io(&ch->queued_resets, ch);
4601 
4602 	bdev_channel_abort_queued_ios(ch);
4603 
4604 	if (ch->histogram) {
4605 		spdk_histogram_data_free(ch->histogram);
4606 	}
4607 
4608 	bdev_channel_destroy_resource(ch);
4609 }
4610 
4611 /*
4612  * If the name already exists in the global bdev name tree, RB_INSERT() returns a pointer
4613  * to it. Hence we do not have to call bdev_get_by_name() when using this function.
4614  */
4615 static int
4616 bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const char *name)
4617 {
4618 	struct spdk_bdev_name *tmp;
4619 
4620 	bdev_name->name = strdup(name);
4621 	if (bdev_name->name == NULL) {
4622 		SPDK_ERRLOG("Unable to allocate bdev name\n");
4623 		return -ENOMEM;
4624 	}
4625 
4626 	bdev_name->bdev = bdev;
4627 
4628 	spdk_spin_lock(&g_bdev_mgr.spinlock);
4629 	tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4630 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
4631 
4632 	if (tmp != NULL) {
4633 		SPDK_ERRLOG("Bdev name %s already exists\n", name);
4634 		free(bdev_name->name);
4635 		return -EEXIST;
4636 	}
4637 
4638 	return 0;
4639 }
4640 
4641 static void
4642 bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
4643 {
4644 	RB_REMOVE(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4645 	free(bdev_name->name);
4646 }
4647 
4648 static void
4649 bdev_name_del(struct spdk_bdev_name *bdev_name)
4650 {
4651 	spdk_spin_lock(&g_bdev_mgr.spinlock);
4652 	bdev_name_del_unsafe(bdev_name);
4653 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
4654 }
4655 
4656 int
4657 spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
4658 {
4659 	struct spdk_bdev_alias *tmp;
4660 	int ret;
4661 
4662 	if (alias == NULL) {
4663 		SPDK_ERRLOG("Empty alias passed\n");
4664 		return -EINVAL;
4665 	}
4666 
4667 	tmp = calloc(1, sizeof(*tmp));
4668 	if (tmp == NULL) {
4669 		SPDK_ERRLOG("Unable to allocate alias\n");
4670 		return -ENOMEM;
4671 	}
4672 
4673 	ret = bdev_name_add(&tmp->alias, bdev, alias);
4674 	if (ret != 0) {
4675 		free(tmp);
4676 		return ret;
4677 	}
4678 
4679 	TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
4680 
4681 	return 0;
4682 }
4683 
4684 static int
4685 bdev_alias_del(struct spdk_bdev *bdev, const char *alias,
4686 	       void (*alias_del_fn)(struct spdk_bdev_name *n))
4687 {
4688 	struct spdk_bdev_alias *tmp;
4689 
4690 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
4691 		if (strcmp(alias, tmp->alias.name) == 0) {
4692 			TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
4693 			alias_del_fn(&tmp->alias);
4694 			free(tmp);
4695 			return 0;
4696 		}
4697 	}
4698 
4699 	return -ENOENT;
4700 }
4701 
4702 int
4703 spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
4704 {
4705 	int rc;
4706 
4707 	rc = bdev_alias_del(bdev, alias, bdev_name_del);
4708 	if (rc == -ENOENT) {
4709 		SPDK_INFOLOG(bdev, "Alias %s does not exist\n", alias);
4710 	}
4711 
4712 	return rc;
4713 }
4714 
4715 void
4716 spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
4717 {
4718 	struct spdk_bdev_alias *p, *tmp;
4719 
4720 	TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
4721 		TAILQ_REMOVE(&bdev->aliases, p, tailq);
4722 		bdev_name_del(&p->alias);
4723 		free(p);
4724 	}
4725 }
4726 
4727 struct spdk_io_channel *
4728 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
4729 {
4730 	return spdk_get_io_channel(__bdev_to_io_dev(spdk_bdev_desc_get_bdev(desc)));
4731 }
4732 
4733 void *
4734 spdk_bdev_get_module_ctx(struct spdk_bdev_desc *desc)
4735 {
4736 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4737 	void *ctx = NULL;
4738 
4739 	if (bdev->fn_table->get_module_ctx) {
4740 		ctx = bdev->fn_table->get_module_ctx(bdev->ctxt);
4741 	}
4742 
4743 	return ctx;
4744 }
4745 
4746 const char *
4747 spdk_bdev_get_module_name(const struct spdk_bdev *bdev)
4748 {
4749 	return bdev->module->name;
4750 }
4751 
4752 const char *
4753 spdk_bdev_get_name(const struct spdk_bdev *bdev)
4754 {
4755 	return bdev->name;
4756 }
4757 
4758 const char *
4759 spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
4760 {
4761 	return bdev->product_name;
4762 }
4763 
4764 const struct spdk_bdev_aliases_list *
4765 spdk_bdev_get_aliases(const struct spdk_bdev *bdev)
4766 {
4767 	return &bdev->aliases;
4768 }
4769 
4770 uint32_t
4771 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
4772 {
4773 	return bdev->blocklen;
4774 }
4775 
4776 uint32_t
4777 spdk_bdev_get_write_unit_size(const struct spdk_bdev *bdev)
4778 {
4779 	return bdev->write_unit_size;
4780 }
4781 
4782 uint64_t
4783 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
4784 {
4785 	return bdev->blockcnt;
4786 }
4787 
4788 const char *
4789 spdk_bdev_get_qos_rpc_type(enum spdk_bdev_qos_rate_limit_type type)
4790 {
4791 	return qos_rpc_type[type];
4792 }
4793 
4794 void
4795 spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
4796 {
4797 	int i;
4798 
4799 	memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
4800 
4801 	spdk_spin_lock(&bdev->internal.spinlock);
4802 	if (bdev->internal.qos) {
4803 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4804 			if (bdev->internal.qos->rate_limits[i].limit !=
4805 			    SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
4806 				limits[i] = bdev->internal.qos->rate_limits[i].limit;
4807 				if (bdev_qos_is_iops_rate_limit(i) == false) {
4808 					/* Change from Byte to Megabyte which is user visible. */
4809 					limits[i] = limits[i] / 1024 / 1024;
4810 				}
4811 			}
4812 		}
4813 	}
4814 	spdk_spin_unlock(&bdev->internal.spinlock);
4815 }
4816 
4817 size_t
4818 spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
4819 {
4820 	return 1 << bdev->required_alignment;
4821 }
4822 
4823 uint32_t
4824 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
4825 {
4826 	return bdev->optimal_io_boundary;
4827 }
4828 
4829 bool
4830 spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
4831 {
4832 	return bdev->write_cache;
4833 }
4834 
4835 const struct spdk_uuid *
4836 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
4837 {
4838 	return &bdev->uuid;
4839 }
4840 
4841 uint16_t
4842 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
4843 {
4844 	return bdev->acwu;
4845 }
4846 
4847 uint32_t
4848 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
4849 {
4850 	return bdev->md_len;
4851 }
4852 
4853 bool
4854 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
4855 {
4856 	return (bdev->md_len != 0) && bdev->md_interleave;
4857 }
4858 
4859 bool
4860 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
4861 {
4862 	return (bdev->md_len != 0) && !bdev->md_interleave;
4863 }
4864 
4865 bool
4866 spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
4867 {
4868 	return bdev->zoned;
4869 }
4870 
4871 uint32_t
4872 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
4873 {
4874 	if (spdk_bdev_is_md_interleaved(bdev)) {
4875 		return bdev->blocklen - bdev->md_len;
4876 	} else {
4877 		return bdev->blocklen;
4878 	}
4879 }
4880 
4881 uint32_t
4882 spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
4883 {
4884 	return bdev->phys_blocklen;
4885 }
4886 
4887 static uint32_t
4888 _bdev_get_block_size_with_md(const struct spdk_bdev *bdev)
4889 {
4890 	if (!spdk_bdev_is_md_interleaved(bdev)) {
4891 		return bdev->blocklen + bdev->md_len;
4892 	} else {
4893 		return bdev->blocklen;
4894 	}
4895 }
4896 
4897 /* We have to use the typedef in the function declaration to appease astyle. */
4898 typedef enum spdk_dif_type spdk_dif_type_t;
4899 typedef enum spdk_dif_pi_format spdk_dif_pi_format_t;
4900 
4901 spdk_dif_type_t
4902 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
4903 {
4904 	if (bdev->md_len != 0) {
4905 		return bdev->dif_type;
4906 	} else {
4907 		return SPDK_DIF_DISABLE;
4908 	}
4909 }
4910 
4911 spdk_dif_pi_format_t
4912 spdk_bdev_get_dif_pi_format(const struct spdk_bdev *bdev)
4913 {
4914 	return bdev->dif_pi_format;
4915 }
4916 
4917 bool
4918 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
4919 {
4920 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
4921 		return bdev->dif_is_head_of_md;
4922 	} else {
4923 		return false;
4924 	}
4925 }
4926 
4927 bool
4928 spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
4929 			       enum spdk_dif_check_type check_type)
4930 {
4931 	if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
4932 		return false;
4933 	}
4934 
4935 	switch (check_type) {
4936 	case SPDK_DIF_CHECK_TYPE_REFTAG:
4937 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) != 0;
4938 	case SPDK_DIF_CHECK_TYPE_APPTAG:
4939 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) != 0;
4940 	case SPDK_DIF_CHECK_TYPE_GUARD:
4941 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_GUARD_CHECK) != 0;
4942 	default:
4943 		return false;
4944 	}
4945 }
4946 
4947 static uint32_t
4948 bdev_get_max_write(const struct spdk_bdev *bdev, uint64_t num_bytes)
4949 {
4950 	uint64_t aligned_length, max_write_blocks;
4951 
4952 	aligned_length = num_bytes - (spdk_bdev_get_buf_align(bdev) - 1);
4953 	max_write_blocks = aligned_length / _bdev_get_block_size_with_md(bdev);
4954 	max_write_blocks -= max_write_blocks % bdev->write_unit_size;
4955 
4956 	return max_write_blocks;
4957 }
4958 
4959 uint32_t
4960 spdk_bdev_get_max_copy(const struct spdk_bdev *bdev)
4961 {
4962 	return bdev->max_copy;
4963 }
4964 
4965 uint64_t
4966 spdk_bdev_get_qd(const struct spdk_bdev *bdev)
4967 {
4968 	return bdev->internal.measured_queue_depth;
4969 }
4970 
4971 uint64_t
4972 spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
4973 {
4974 	return bdev->internal.period;
4975 }
4976 
4977 uint64_t
4978 spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
4979 {
4980 	return bdev->internal.weighted_io_time;
4981 }
4982 
4983 uint64_t
4984 spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
4985 {
4986 	return bdev->internal.io_time;
4987 }
4988 
4989 union spdk_bdev_nvme_ctratt spdk_bdev_get_nvme_ctratt(struct spdk_bdev *bdev)
4990 {
4991 	return bdev->ctratt;
4992 }
4993 
4994 static void bdev_update_qd_sampling_period(void *ctx);
4995 
4996 static void
4997 _calculate_measured_qd_cpl(struct spdk_bdev *bdev, void *_ctx, int status)
4998 {
4999 	bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
5000 
5001 	if (bdev->internal.measured_queue_depth) {
5002 		bdev->internal.io_time += bdev->internal.period;
5003 		bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
5004 	}
5005 
5006 	bdev->internal.qd_poll_in_progress = false;
5007 
5008 	bdev_update_qd_sampling_period(bdev);
5009 }
5010 
5011 static void
5012 _calculate_measured_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5013 		       struct spdk_io_channel *io_ch, void *_ctx)
5014 {
5015 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(io_ch);
5016 
5017 	bdev->internal.temporary_queue_depth += ch->io_outstanding;
5018 	spdk_bdev_for_each_channel_continue(i, 0);
5019 }
5020 
5021 static int
5022 bdev_calculate_measured_queue_depth(void *ctx)
5023 {
5024 	struct spdk_bdev *bdev = ctx;
5025 
5026 	bdev->internal.qd_poll_in_progress = true;
5027 	bdev->internal.temporary_queue_depth = 0;
5028 	spdk_bdev_for_each_channel(bdev, _calculate_measured_qd, bdev, _calculate_measured_qd_cpl);
5029 	return SPDK_POLLER_BUSY;
5030 }
5031 
5032 static void
5033 bdev_update_qd_sampling_period(void *ctx)
5034 {
5035 	struct spdk_bdev *bdev = ctx;
5036 
5037 	if (bdev->internal.period == bdev->internal.new_period) {
5038 		return;
5039 	}
5040 
5041 	if (bdev->internal.qd_poll_in_progress) {
5042 		return;
5043 	}
5044 
5045 	bdev->internal.period = bdev->internal.new_period;
5046 
5047 	spdk_poller_unregister(&bdev->internal.qd_poller);
5048 	if (bdev->internal.period != 0) {
5049 		bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5050 					   bdev, bdev->internal.period);
5051 	} else {
5052 		spdk_bdev_close(bdev->internal.qd_desc);
5053 		bdev->internal.qd_desc = NULL;
5054 	}
5055 }
5056 
5057 static void
5058 _tmp_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx)
5059 {
5060 	SPDK_NOTICELOG("Unexpected event type: %d\n", type);
5061 }
5062 
5063 void
5064 spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
5065 {
5066 	int rc;
5067 
5068 	if (bdev->internal.new_period == period) {
5069 		return;
5070 	}
5071 
5072 	bdev->internal.new_period = period;
5073 
5074 	if (bdev->internal.qd_desc != NULL) {
5075 		assert(bdev->internal.period != 0);
5076 
5077 		spdk_thread_send_msg(bdev->internal.qd_desc->thread,
5078 				     bdev_update_qd_sampling_period, bdev);
5079 		return;
5080 	}
5081 
5082 	assert(bdev->internal.period == 0);
5083 
5084 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false, _tmp_bdev_event_cb,
5085 				NULL, &bdev->internal.qd_desc);
5086 	if (rc != 0) {
5087 		return;
5088 	}
5089 
5090 	bdev->internal.period = period;
5091 	bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5092 				   bdev, period);
5093 }
5094 
5095 struct bdev_get_current_qd_ctx {
5096 	uint64_t current_qd;
5097 	spdk_bdev_get_current_qd_cb cb_fn;
5098 	void *cb_arg;
5099 };
5100 
5101 static void
5102 bdev_get_current_qd_done(struct spdk_bdev *bdev, void *_ctx, int status)
5103 {
5104 	struct bdev_get_current_qd_ctx *ctx = _ctx;
5105 
5106 	ctx->cb_fn(bdev, ctx->current_qd, ctx->cb_arg, 0);
5107 
5108 	free(ctx);
5109 }
5110 
5111 static void
5112 bdev_get_current_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5113 		    struct spdk_io_channel *io_ch, void *_ctx)
5114 {
5115 	struct bdev_get_current_qd_ctx *ctx = _ctx;
5116 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
5117 
5118 	ctx->current_qd += bdev_ch->io_outstanding;
5119 
5120 	spdk_bdev_for_each_channel_continue(i, 0);
5121 }
5122 
5123 void
5124 spdk_bdev_get_current_qd(struct spdk_bdev *bdev, spdk_bdev_get_current_qd_cb cb_fn,
5125 			 void *cb_arg)
5126 {
5127 	struct bdev_get_current_qd_ctx *ctx;
5128 
5129 	assert(cb_fn != NULL);
5130 
5131 	ctx = calloc(1, sizeof(*ctx));
5132 	if (ctx == NULL) {
5133 		cb_fn(bdev, 0, cb_arg, -ENOMEM);
5134 		return;
5135 	}
5136 
5137 	ctx->cb_fn = cb_fn;
5138 	ctx->cb_arg = cb_arg;
5139 
5140 	spdk_bdev_for_each_channel(bdev, bdev_get_current_qd, ctx, bdev_get_current_qd_done);
5141 }
5142 
5143 static void
5144 _event_notify(struct spdk_bdev_desc *desc, enum spdk_bdev_event_type type)
5145 {
5146 	assert(desc->thread == spdk_get_thread());
5147 
5148 	spdk_spin_lock(&desc->spinlock);
5149 	desc->refs--;
5150 	if (!desc->closed) {
5151 		spdk_spin_unlock(&desc->spinlock);
5152 		desc->callback.event_fn(type,
5153 					desc->bdev,
5154 					desc->callback.ctx);
5155 		return;
5156 	} else if (desc->refs == 0) {
5157 		/* This descriptor was closed after this event_notify message was sent.
5158 		 * spdk_bdev_close() could not free the descriptor since this message was
5159 		 * in flight, so we free it now using bdev_desc_free().
5160 		 */
5161 		spdk_spin_unlock(&desc->spinlock);
5162 		bdev_desc_free(desc);
5163 		return;
5164 	}
5165 	spdk_spin_unlock(&desc->spinlock);
5166 }
5167 
5168 static void
5169 event_notify(struct spdk_bdev_desc *desc, spdk_msg_fn event_notify_fn)
5170 {
5171 	spdk_spin_lock(&desc->spinlock);
5172 	desc->refs++;
5173 	spdk_thread_send_msg(desc->thread, event_notify_fn, desc);
5174 	spdk_spin_unlock(&desc->spinlock);
5175 }
5176 
5177 static void
5178 _resize_notify(void *ctx)
5179 {
5180 	struct spdk_bdev_desc *desc = ctx;
5181 
5182 	_event_notify(desc, SPDK_BDEV_EVENT_RESIZE);
5183 }
5184 
5185 int
5186 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
5187 {
5188 	struct spdk_bdev_desc *desc;
5189 	int ret;
5190 
5191 	if (size == bdev->blockcnt) {
5192 		return 0;
5193 	}
5194 
5195 	spdk_spin_lock(&bdev->internal.spinlock);
5196 
5197 	/* bdev has open descriptors */
5198 	if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
5199 	    bdev->blockcnt > size) {
5200 		ret = -EBUSY;
5201 	} else {
5202 		bdev->blockcnt = size;
5203 		TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
5204 			event_notify(desc, _resize_notify);
5205 		}
5206 		ret = 0;
5207 	}
5208 
5209 	spdk_spin_unlock(&bdev->internal.spinlock);
5210 
5211 	return ret;
5212 }
5213 
5214 /*
5215  * Convert I/O offset and length from bytes to blocks.
5216  *
5217  * Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
5218  */
5219 static uint64_t
5220 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t offset_bytes, uint64_t *offset_blocks,
5221 		     uint64_t num_bytes, uint64_t *num_blocks)
5222 {
5223 	uint32_t block_size = bdev->blocklen;
5224 	uint8_t shift_cnt;
5225 
5226 	/* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
5227 	if (spdk_likely(spdk_u32_is_pow2(block_size))) {
5228 		shift_cnt = spdk_u32log2(block_size);
5229 		*offset_blocks = offset_bytes >> shift_cnt;
5230 		*num_blocks = num_bytes >> shift_cnt;
5231 		return (offset_bytes - (*offset_blocks << shift_cnt)) |
5232 		       (num_bytes - (*num_blocks << shift_cnt));
5233 	} else {
5234 		*offset_blocks = offset_bytes / block_size;
5235 		*num_blocks = num_bytes / block_size;
5236 		return (offset_bytes % block_size) | (num_bytes % block_size);
5237 	}
5238 }
5239 
5240 static bool
5241 bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
5242 {
5243 	/* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
5244 	 * has been an overflow and hence the offset has been wrapped around */
5245 	if (offset_blocks + num_blocks < offset_blocks) {
5246 		return false;
5247 	}
5248 
5249 	/* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
5250 	if (offset_blocks + num_blocks > bdev->blockcnt) {
5251 		return false;
5252 	}
5253 
5254 	return true;
5255 }
5256 
5257 static void
5258 bdev_seek_complete_cb(void *ctx)
5259 {
5260 	struct spdk_bdev_io *bdev_io = ctx;
5261 
5262 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5263 	bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
5264 }
5265 
5266 static int
5267 bdev_seek(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5268 	  uint64_t offset_blocks, enum spdk_bdev_io_type io_type,
5269 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
5270 {
5271 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5272 	struct spdk_bdev_io *bdev_io;
5273 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5274 
5275 	assert(io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA || io_type == SPDK_BDEV_IO_TYPE_SEEK_HOLE);
5276 
5277 	/* Check if offset_blocks is valid looking at the validity of one block */
5278 	if (!bdev_io_valid_blocks(bdev, offset_blocks, 1)) {
5279 		return -EINVAL;
5280 	}
5281 
5282 	bdev_io = bdev_channel_get_io(channel);
5283 	if (!bdev_io) {
5284 		return -ENOMEM;
5285 	}
5286 
5287 	bdev_io->internal.ch = channel;
5288 	bdev_io->internal.desc = desc;
5289 	bdev_io->type = io_type;
5290 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5291 	bdev_io->u.bdev.memory_domain = NULL;
5292 	bdev_io->u.bdev.memory_domain_ctx = NULL;
5293 	bdev_io->u.bdev.accel_sequence = NULL;
5294 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5295 
5296 	if (!spdk_bdev_io_type_supported(bdev, io_type)) {
5297 		/* In case bdev doesn't support seek to next data/hole offset,
5298 		 * it is assumed that only data and no holes are present */
5299 		if (io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
5300 			bdev_io->u.bdev.seek.offset = offset_blocks;
5301 		} else {
5302 			bdev_io->u.bdev.seek.offset = UINT64_MAX;
5303 		}
5304 
5305 		spdk_thread_send_msg(spdk_get_thread(), bdev_seek_complete_cb, bdev_io);
5306 		return 0;
5307 	}
5308 
5309 	bdev_io_submit(bdev_io);
5310 	return 0;
5311 }
5312 
5313 int
5314 spdk_bdev_seek_data(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5315 		    uint64_t offset_blocks,
5316 		    spdk_bdev_io_completion_cb cb, void *cb_arg)
5317 {
5318 	return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_DATA, cb, cb_arg);
5319 }
5320 
5321 int
5322 spdk_bdev_seek_hole(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5323 		    uint64_t offset_blocks,
5324 		    spdk_bdev_io_completion_cb cb, void *cb_arg)
5325 {
5326 	return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_HOLE, cb, cb_arg);
5327 }
5328 
5329 uint64_t
5330 spdk_bdev_io_get_seek_offset(const struct spdk_bdev_io *bdev_io)
5331 {
5332 	return bdev_io->u.bdev.seek.offset;
5333 }
5334 
5335 static int
5336 bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
5337 			 void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5338 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
5339 {
5340 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5341 	struct spdk_bdev_io *bdev_io;
5342 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5343 
5344 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5345 		return -EINVAL;
5346 	}
5347 
5348 	bdev_io = bdev_channel_get_io(channel);
5349 	if (!bdev_io) {
5350 		return -ENOMEM;
5351 	}
5352 
5353 	bdev_io->internal.ch = channel;
5354 	bdev_io->internal.desc = desc;
5355 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5356 	bdev_io->u.bdev.iovs = &bdev_io->iov;
5357 	bdev_io->u.bdev.iovs[0].iov_base = buf;
5358 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
5359 	bdev_io->u.bdev.iovcnt = 1;
5360 	bdev_io->u.bdev.md_buf = md_buf;
5361 	bdev_io->u.bdev.num_blocks = num_blocks;
5362 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5363 	bdev_io->u.bdev.memory_domain = NULL;
5364 	bdev_io->u.bdev.memory_domain_ctx = NULL;
5365 	bdev_io->u.bdev.accel_sequence = NULL;
5366 	bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5367 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5368 
5369 	bdev_io_submit(bdev_io);
5370 	return 0;
5371 }
5372 
5373 int
5374 spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5375 	       void *buf, uint64_t offset, uint64_t nbytes,
5376 	       spdk_bdev_io_completion_cb cb, void *cb_arg)
5377 {
5378 	uint64_t offset_blocks, num_blocks;
5379 
5380 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5381 				 nbytes, &num_blocks) != 0) {
5382 		return -EINVAL;
5383 	}
5384 
5385 	return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5386 }
5387 
5388 int
5389 spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5390 		      void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5391 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
5392 {
5393 	return bdev_read_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks, cb, cb_arg);
5394 }
5395 
5396 int
5397 spdk_bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5398 			      void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5399 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
5400 {
5401 	struct iovec iov = {
5402 		.iov_base = buf,
5403 	};
5404 
5405 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5406 		return -EINVAL;
5407 	}
5408 
5409 	if (md_buf && !_is_buf_allocated(&iov)) {
5410 		return -EINVAL;
5411 	}
5412 
5413 	return bdev_read_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
5414 					cb, cb_arg);
5415 }
5416 
5417 int
5418 spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5419 		struct iovec *iov, int iovcnt,
5420 		uint64_t offset, uint64_t nbytes,
5421 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5422 {
5423 	uint64_t offset_blocks, num_blocks;
5424 
5425 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5426 				 nbytes, &num_blocks) != 0) {
5427 		return -EINVAL;
5428 	}
5429 
5430 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
5431 }
5432 
5433 static int
5434 bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5435 			  struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
5436 			  uint64_t num_blocks, struct spdk_memory_domain *domain, void *domain_ctx,
5437 			  struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
5438 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
5439 {
5440 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5441 	struct spdk_bdev_io *bdev_io;
5442 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5443 
5444 	if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
5445 		return -EINVAL;
5446 	}
5447 
5448 	bdev_io = bdev_channel_get_io(channel);
5449 	if (spdk_unlikely(!bdev_io)) {
5450 		return -ENOMEM;
5451 	}
5452 
5453 	bdev_io->internal.ch = channel;
5454 	bdev_io->internal.desc = desc;
5455 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5456 	bdev_io->u.bdev.iovs = iov;
5457 	bdev_io->u.bdev.iovcnt = iovcnt;
5458 	bdev_io->u.bdev.md_buf = md_buf;
5459 	bdev_io->u.bdev.num_blocks = num_blocks;
5460 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5461 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5462 	bdev_io->internal.memory_domain = domain;
5463 	bdev_io->internal.memory_domain_ctx = domain_ctx;
5464 	bdev_io->internal.accel_sequence = seq;
5465 	bdev_io->internal.has_accel_sequence = seq != NULL;
5466 	bdev_io->u.bdev.memory_domain = domain;
5467 	bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
5468 	bdev_io->u.bdev.accel_sequence = seq;
5469 	bdev_io->u.bdev.dif_check_flags = dif_check_flags;
5470 
5471 	_bdev_io_submit_ext(desc, bdev_io);
5472 
5473 	return 0;
5474 }
5475 
5476 int
5477 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5478 		       struct iovec *iov, int iovcnt,
5479 		       uint64_t offset_blocks, uint64_t num_blocks,
5480 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
5481 {
5482 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5483 
5484 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5485 					 num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5486 }
5487 
5488 int
5489 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5490 			       struct iovec *iov, int iovcnt, void *md_buf,
5491 			       uint64_t offset_blocks, uint64_t num_blocks,
5492 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
5493 {
5494 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5495 
5496 	if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
5497 		return -EINVAL;
5498 	}
5499 
5500 	if (md_buf && !_is_buf_allocated(iov)) {
5501 		return -EINVAL;
5502 	}
5503 
5504 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5505 					 num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5506 }
5507 
5508 static inline bool
5509 _bdev_io_check_opts(struct spdk_bdev_ext_io_opts *opts, struct iovec *iov)
5510 {
5511 	/*
5512 	 * We check if opts size is at least of size when we first introduced
5513 	 * spdk_bdev_ext_io_opts (ac6f2bdd8d) since access to those members
5514 	 * are not checked internal.
5515 	 */
5516 	return opts->size >= offsetof(struct spdk_bdev_ext_io_opts, metadata) +
5517 	       sizeof(opts->metadata) &&
5518 	       opts->size <= sizeof(*opts) &&
5519 	       /* When memory domain is used, the user must provide data buffers */
5520 	       (!opts->memory_domain || (iov && iov[0].iov_base));
5521 }
5522 
5523 int
5524 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5525 			   struct iovec *iov, int iovcnt,
5526 			   uint64_t offset_blocks, uint64_t num_blocks,
5527 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
5528 			   struct spdk_bdev_ext_io_opts *opts)
5529 {
5530 	struct spdk_memory_domain *domain = NULL;
5531 	struct spdk_accel_sequence *seq = NULL;
5532 	void *domain_ctx = NULL, *md = NULL;
5533 	uint32_t dif_check_flags = 0;
5534 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5535 
5536 	if (opts) {
5537 		if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
5538 			return -EINVAL;
5539 		}
5540 
5541 		md = opts->metadata;
5542 		domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
5543 		domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
5544 		seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
5545 		if (md) {
5546 			if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
5547 				return -EINVAL;
5548 			}
5549 
5550 			if (spdk_unlikely(!_is_buf_allocated(iov))) {
5551 				return -EINVAL;
5552 			}
5553 
5554 			if (spdk_unlikely(seq != NULL)) {
5555 				return -EINVAL;
5556 			}
5557 		}
5558 	}
5559 
5560 	dif_check_flags = bdev->dif_check_flags &
5561 			  ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
5562 
5563 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
5564 					 num_blocks, domain, domain_ctx, seq, dif_check_flags, cb, cb_arg);
5565 }
5566 
5567 static int
5568 bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5569 			  void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5570 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
5571 {
5572 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5573 	struct spdk_bdev_io *bdev_io;
5574 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5575 
5576 	if (!desc->write) {
5577 		return -EBADF;
5578 	}
5579 
5580 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5581 		return -EINVAL;
5582 	}
5583 
5584 	bdev_io = bdev_channel_get_io(channel);
5585 	if (!bdev_io) {
5586 		return -ENOMEM;
5587 	}
5588 
5589 	bdev_io->internal.ch = channel;
5590 	bdev_io->internal.desc = desc;
5591 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
5592 	bdev_io->u.bdev.iovs = &bdev_io->iov;
5593 	bdev_io->u.bdev.iovs[0].iov_base = buf;
5594 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
5595 	bdev_io->u.bdev.iovcnt = 1;
5596 	bdev_io->u.bdev.md_buf = md_buf;
5597 	bdev_io->u.bdev.num_blocks = num_blocks;
5598 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5599 	bdev_io->u.bdev.memory_domain = NULL;
5600 	bdev_io->u.bdev.memory_domain_ctx = NULL;
5601 	bdev_io->u.bdev.accel_sequence = NULL;
5602 	bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5603 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5604 
5605 	bdev_io_submit(bdev_io);
5606 	return 0;
5607 }
5608 
5609 int
5610 spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5611 		void *buf, uint64_t offset, uint64_t nbytes,
5612 		spdk_bdev_io_completion_cb cb, void *cb_arg)
5613 {
5614 	uint64_t offset_blocks, num_blocks;
5615 
5616 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5617 				 nbytes, &num_blocks) != 0) {
5618 		return -EINVAL;
5619 	}
5620 
5621 	return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5622 }
5623 
5624 int
5625 spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5626 		       void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5627 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
5628 {
5629 	return bdev_write_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
5630 					 cb, cb_arg);
5631 }
5632 
5633 int
5634 spdk_bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5635 			       void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5636 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
5637 {
5638 	struct iovec iov = {
5639 		.iov_base = buf,
5640 	};
5641 
5642 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5643 		return -EINVAL;
5644 	}
5645 
5646 	if (md_buf && !_is_buf_allocated(&iov)) {
5647 		return -EINVAL;
5648 	}
5649 
5650 	return bdev_write_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
5651 					 cb, cb_arg);
5652 }
5653 
5654 static int
5655 bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5656 			   struct iovec *iov, int iovcnt, void *md_buf,
5657 			   uint64_t offset_blocks, uint64_t num_blocks,
5658 			   struct spdk_memory_domain *domain, void *domain_ctx,
5659 			   struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
5660 			   uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
5661 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
5662 {
5663 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5664 	struct spdk_bdev_io *bdev_io;
5665 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5666 
5667 	if (spdk_unlikely(!desc->write)) {
5668 		return -EBADF;
5669 	}
5670 
5671 	if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
5672 		return -EINVAL;
5673 	}
5674 
5675 	bdev_io = bdev_channel_get_io(channel);
5676 	if (spdk_unlikely(!bdev_io)) {
5677 		return -ENOMEM;
5678 	}
5679 
5680 	bdev_io->internal.ch = channel;
5681 	bdev_io->internal.desc = desc;
5682 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
5683 	bdev_io->u.bdev.iovs = iov;
5684 	bdev_io->u.bdev.iovcnt = iovcnt;
5685 	bdev_io->u.bdev.md_buf = md_buf;
5686 	bdev_io->u.bdev.num_blocks = num_blocks;
5687 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5688 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5689 	bdev_io->internal.memory_domain = domain;
5690 	bdev_io->internal.memory_domain_ctx = domain_ctx;
5691 	bdev_io->internal.accel_sequence = seq;
5692 	bdev_io->internal.has_accel_sequence = seq != NULL;
5693 	bdev_io->u.bdev.memory_domain = domain;
5694 	bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
5695 	bdev_io->u.bdev.accel_sequence = seq;
5696 	bdev_io->u.bdev.dif_check_flags = dif_check_flags;
5697 	bdev_io->u.bdev.nvme_cdw12.raw = nvme_cdw12_raw;
5698 	bdev_io->u.bdev.nvme_cdw13.raw = nvme_cdw13_raw;
5699 
5700 	_bdev_io_submit_ext(desc, bdev_io);
5701 
5702 	return 0;
5703 }
5704 
5705 int
5706 spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5707 		 struct iovec *iov, int iovcnt,
5708 		 uint64_t offset, uint64_t len,
5709 		 spdk_bdev_io_completion_cb cb, void *cb_arg)
5710 {
5711 	uint64_t offset_blocks, num_blocks;
5712 
5713 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
5714 				 len, &num_blocks) != 0) {
5715 		return -EINVAL;
5716 	}
5717 
5718 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
5719 }
5720 
5721 int
5722 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5723 			struct iovec *iov, int iovcnt,
5724 			uint64_t offset_blocks, uint64_t num_blocks,
5725 			spdk_bdev_io_completion_cb cb, void *cb_arg)
5726 {
5727 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5728 
5729 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5730 					  num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
5731 					  cb, cb_arg);
5732 }
5733 
5734 int
5735 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5736 				struct iovec *iov, int iovcnt, void *md_buf,
5737 				uint64_t offset_blocks, uint64_t num_blocks,
5738 				spdk_bdev_io_completion_cb cb, void *cb_arg)
5739 {
5740 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5741 
5742 	if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
5743 		return -EINVAL;
5744 	}
5745 
5746 	if (md_buf && !_is_buf_allocated(iov)) {
5747 		return -EINVAL;
5748 	}
5749 
5750 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5751 					  num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
5752 					  cb, cb_arg);
5753 }
5754 
5755 int
5756 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5757 			    struct iovec *iov, int iovcnt,
5758 			    uint64_t offset_blocks, uint64_t num_blocks,
5759 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
5760 			    struct spdk_bdev_ext_io_opts *opts)
5761 {
5762 	struct spdk_memory_domain *domain = NULL;
5763 	struct spdk_accel_sequence *seq = NULL;
5764 	void *domain_ctx = NULL, *md = NULL;
5765 	uint32_t dif_check_flags = 0;
5766 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5767 	uint32_t nvme_cdw12_raw = 0;
5768 	uint32_t nvme_cdw13_raw = 0;
5769 
5770 	if (opts) {
5771 		if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
5772 			return -EINVAL;
5773 		}
5774 		md = opts->metadata;
5775 		domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
5776 		domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
5777 		seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
5778 		nvme_cdw12_raw = bdev_get_ext_io_opt(opts, nvme_cdw12.raw, 0);
5779 		nvme_cdw13_raw = bdev_get_ext_io_opt(opts, nvme_cdw13.raw, 0);
5780 		if (md) {
5781 			if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
5782 				return -EINVAL;
5783 			}
5784 
5785 			if (spdk_unlikely(!_is_buf_allocated(iov))) {
5786 				return -EINVAL;
5787 			}
5788 
5789 			if (spdk_unlikely(seq != NULL)) {
5790 				return -EINVAL;
5791 			}
5792 		}
5793 	}
5794 
5795 	dif_check_flags = bdev->dif_check_flags &
5796 			  ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
5797 
5798 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks,
5799 					  domain, domain_ctx, seq, dif_check_flags,
5800 					  nvme_cdw12_raw, nvme_cdw13_raw, cb, cb_arg);
5801 }
5802 
5803 static void
5804 bdev_compare_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5805 {
5806 	struct spdk_bdev_io *parent_io = cb_arg;
5807 	struct spdk_bdev *bdev = parent_io->bdev;
5808 	uint8_t *read_buf = bdev_io->u.bdev.iovs[0].iov_base;
5809 	int i, rc = 0;
5810 
5811 	if (!success) {
5812 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5813 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
5814 		spdk_bdev_free_io(bdev_io);
5815 		return;
5816 	}
5817 
5818 	for (i = 0; i < parent_io->u.bdev.iovcnt; i++) {
5819 		rc = memcmp(read_buf,
5820 			    parent_io->u.bdev.iovs[i].iov_base,
5821 			    parent_io->u.bdev.iovs[i].iov_len);
5822 		if (rc) {
5823 			break;
5824 		}
5825 		read_buf += parent_io->u.bdev.iovs[i].iov_len;
5826 	}
5827 
5828 	if (rc == 0 && parent_io->u.bdev.md_buf && spdk_bdev_is_md_separate(bdev)) {
5829 		rc = memcmp(bdev_io->u.bdev.md_buf,
5830 			    parent_io->u.bdev.md_buf,
5831 			    spdk_bdev_get_md_size(bdev));
5832 	}
5833 
5834 	spdk_bdev_free_io(bdev_io);
5835 
5836 	if (rc == 0) {
5837 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5838 		parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
5839 	} else {
5840 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
5841 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
5842 	}
5843 }
5844 
5845 static void
5846 bdev_compare_do_read(void *_bdev_io)
5847 {
5848 	struct spdk_bdev_io *bdev_io = _bdev_io;
5849 	int rc;
5850 
5851 	rc = spdk_bdev_read_blocks(bdev_io->internal.desc,
5852 				   spdk_io_channel_from_ctx(bdev_io->internal.ch), NULL,
5853 				   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
5854 				   bdev_compare_do_read_done, bdev_io);
5855 
5856 	if (rc == -ENOMEM) {
5857 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_do_read);
5858 	} else if (rc != 0) {
5859 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5860 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
5861 	}
5862 }
5863 
5864 static int
5865 bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5866 			     struct iovec *iov, int iovcnt, void *md_buf,
5867 			     uint64_t offset_blocks, uint64_t num_blocks,
5868 			     spdk_bdev_io_completion_cb cb, void *cb_arg)
5869 {
5870 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5871 	struct spdk_bdev_io *bdev_io;
5872 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5873 
5874 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5875 		return -EINVAL;
5876 	}
5877 
5878 	bdev_io = bdev_channel_get_io(channel);
5879 	if (!bdev_io) {
5880 		return -ENOMEM;
5881 	}
5882 
5883 	bdev_io->internal.ch = channel;
5884 	bdev_io->internal.desc = desc;
5885 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
5886 	bdev_io->u.bdev.iovs = iov;
5887 	bdev_io->u.bdev.iovcnt = iovcnt;
5888 	bdev_io->u.bdev.md_buf = md_buf;
5889 	bdev_io->u.bdev.num_blocks = num_blocks;
5890 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5891 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5892 	bdev_io->u.bdev.memory_domain = NULL;
5893 	bdev_io->u.bdev.memory_domain_ctx = NULL;
5894 	bdev_io->u.bdev.accel_sequence = NULL;
5895 
5896 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
5897 		bdev_io_submit(bdev_io);
5898 		return 0;
5899 	}
5900 
5901 	bdev_compare_do_read(bdev_io);
5902 
5903 	return 0;
5904 }
5905 
5906 int
5907 spdk_bdev_comparev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5908 			  struct iovec *iov, int iovcnt,
5909 			  uint64_t offset_blocks, uint64_t num_blocks,
5910 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
5911 {
5912 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5913 					    num_blocks, cb, cb_arg);
5914 }
5915 
5916 int
5917 spdk_bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5918 				  struct iovec *iov, int iovcnt, void *md_buf,
5919 				  uint64_t offset_blocks, uint64_t num_blocks,
5920 				  spdk_bdev_io_completion_cb cb, void *cb_arg)
5921 {
5922 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5923 		return -EINVAL;
5924 	}
5925 
5926 	if (md_buf && !_is_buf_allocated(iov)) {
5927 		return -EINVAL;
5928 	}
5929 
5930 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5931 					    num_blocks, cb, cb_arg);
5932 }
5933 
5934 static int
5935 bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5936 			    void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5937 			    spdk_bdev_io_completion_cb cb, void *cb_arg)
5938 {
5939 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5940 	struct spdk_bdev_io *bdev_io;
5941 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5942 
5943 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5944 		return -EINVAL;
5945 	}
5946 
5947 	bdev_io = bdev_channel_get_io(channel);
5948 	if (!bdev_io) {
5949 		return -ENOMEM;
5950 	}
5951 
5952 	bdev_io->internal.ch = channel;
5953 	bdev_io->internal.desc = desc;
5954 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
5955 	bdev_io->u.bdev.iovs = &bdev_io->iov;
5956 	bdev_io->u.bdev.iovs[0].iov_base = buf;
5957 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
5958 	bdev_io->u.bdev.iovcnt = 1;
5959 	bdev_io->u.bdev.md_buf = md_buf;
5960 	bdev_io->u.bdev.num_blocks = num_blocks;
5961 	bdev_io->u.bdev.offset_blocks = offset_blocks;
5962 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
5963 	bdev_io->u.bdev.memory_domain = NULL;
5964 	bdev_io->u.bdev.memory_domain_ctx = NULL;
5965 	bdev_io->u.bdev.accel_sequence = NULL;
5966 
5967 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
5968 		bdev_io_submit(bdev_io);
5969 		return 0;
5970 	}
5971 
5972 	bdev_compare_do_read(bdev_io);
5973 
5974 	return 0;
5975 }
5976 
5977 int
5978 spdk_bdev_compare_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5979 			 void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5980 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
5981 {
5982 	return bdev_compare_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
5983 					   cb, cb_arg);
5984 }
5985 
5986 int
5987 spdk_bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5988 				 void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5989 				 spdk_bdev_io_completion_cb cb, void *cb_arg)
5990 {
5991 	struct iovec iov = {
5992 		.iov_base = buf,
5993 	};
5994 
5995 	if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5996 		return -EINVAL;
5997 	}
5998 
5999 	if (md_buf && !_is_buf_allocated(&iov)) {
6000 		return -EINVAL;
6001 	}
6002 
6003 	return bdev_compare_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
6004 					   cb, cb_arg);
6005 }
6006 
6007 static void
6008 bdev_comparev_and_writev_blocks_unlocked(struct lba_range *range, void *ctx, int unlock_status)
6009 {
6010 	struct spdk_bdev_io *bdev_io = ctx;
6011 
6012 	if (unlock_status) {
6013 		SPDK_ERRLOG("LBA range unlock failed\n");
6014 	}
6015 
6016 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS ? true :
6017 			     false, bdev_io->internal.caller_ctx);
6018 }
6019 
6020 static void
6021 bdev_comparev_and_writev_blocks_unlock(struct spdk_bdev_io *bdev_io, int status)
6022 {
6023 	bdev_io->internal.status = status;
6024 
6025 	bdev_unlock_lba_range(bdev_io->internal.desc, spdk_io_channel_from_ctx(bdev_io->internal.ch),
6026 			      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6027 			      bdev_comparev_and_writev_blocks_unlocked, bdev_io);
6028 }
6029 
6030 static void
6031 bdev_compare_and_write_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6032 {
6033 	struct spdk_bdev_io *parent_io = cb_arg;
6034 
6035 	if (!success) {
6036 		SPDK_ERRLOG("Compare and write operation failed\n");
6037 	}
6038 
6039 	spdk_bdev_free_io(bdev_io);
6040 
6041 	bdev_comparev_and_writev_blocks_unlock(parent_io,
6042 					       success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
6043 }
6044 
6045 static void
6046 bdev_compare_and_write_do_write(void *_bdev_io)
6047 {
6048 	struct spdk_bdev_io *bdev_io = _bdev_io;
6049 	int rc;
6050 
6051 	rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
6052 				     spdk_io_channel_from_ctx(bdev_io->internal.ch),
6053 				     bdev_io->u.bdev.fused_iovs, bdev_io->u.bdev.fused_iovcnt,
6054 				     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6055 				     bdev_compare_and_write_do_write_done, bdev_io);
6056 
6057 
6058 	if (rc == -ENOMEM) {
6059 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_write);
6060 	} else if (rc != 0) {
6061 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6062 	}
6063 }
6064 
6065 static void
6066 bdev_compare_and_write_do_compare_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6067 {
6068 	struct spdk_bdev_io *parent_io = cb_arg;
6069 
6070 	spdk_bdev_free_io(bdev_io);
6071 
6072 	if (!success) {
6073 		bdev_comparev_and_writev_blocks_unlock(parent_io, SPDK_BDEV_IO_STATUS_MISCOMPARE);
6074 		return;
6075 	}
6076 
6077 	bdev_compare_and_write_do_write(parent_io);
6078 }
6079 
6080 static void
6081 bdev_compare_and_write_do_compare(void *_bdev_io)
6082 {
6083 	struct spdk_bdev_io *bdev_io = _bdev_io;
6084 	int rc;
6085 
6086 	rc = spdk_bdev_comparev_blocks(bdev_io->internal.desc,
6087 				       spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs,
6088 				       bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6089 				       bdev_compare_and_write_do_compare_done, bdev_io);
6090 
6091 	if (rc == -ENOMEM) {
6092 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_compare);
6093 	} else if (rc != 0) {
6094 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED);
6095 	}
6096 }
6097 
6098 static void
6099 bdev_comparev_and_writev_blocks_locked(struct lba_range *range, void *ctx, int status)
6100 {
6101 	struct spdk_bdev_io *bdev_io = ctx;
6102 
6103 	if (status) {
6104 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED;
6105 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6106 		return;
6107 	}
6108 
6109 	bdev_compare_and_write_do_compare(bdev_io);
6110 }
6111 
6112 int
6113 spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6114 				     struct iovec *compare_iov, int compare_iovcnt,
6115 				     struct iovec *write_iov, int write_iovcnt,
6116 				     uint64_t offset_blocks, uint64_t num_blocks,
6117 				     spdk_bdev_io_completion_cb cb, void *cb_arg)
6118 {
6119 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6120 	struct spdk_bdev_io *bdev_io;
6121 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6122 
6123 	if (!desc->write) {
6124 		return -EBADF;
6125 	}
6126 
6127 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6128 		return -EINVAL;
6129 	}
6130 
6131 	if (num_blocks > bdev->acwu) {
6132 		return -EINVAL;
6133 	}
6134 
6135 	bdev_io = bdev_channel_get_io(channel);
6136 	if (!bdev_io) {
6137 		return -ENOMEM;
6138 	}
6139 
6140 	bdev_io->internal.ch = channel;
6141 	bdev_io->internal.desc = desc;
6142 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
6143 	bdev_io->u.bdev.iovs = compare_iov;
6144 	bdev_io->u.bdev.iovcnt = compare_iovcnt;
6145 	bdev_io->u.bdev.fused_iovs = write_iov;
6146 	bdev_io->u.bdev.fused_iovcnt = write_iovcnt;
6147 	bdev_io->u.bdev.md_buf = NULL;
6148 	bdev_io->u.bdev.num_blocks = num_blocks;
6149 	bdev_io->u.bdev.offset_blocks = offset_blocks;
6150 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6151 	bdev_io->u.bdev.memory_domain = NULL;
6152 	bdev_io->u.bdev.memory_domain_ctx = NULL;
6153 	bdev_io->u.bdev.accel_sequence = NULL;
6154 
6155 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
6156 		bdev_io_submit(bdev_io);
6157 		return 0;
6158 	}
6159 
6160 	return bdev_lock_lba_range(desc, ch, offset_blocks, num_blocks,
6161 				   bdev_comparev_and_writev_blocks_locked, bdev_io);
6162 }
6163 
6164 int
6165 spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6166 		      struct iovec *iov, int iovcnt,
6167 		      uint64_t offset_blocks, uint64_t num_blocks,
6168 		      bool populate,
6169 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
6170 {
6171 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6172 	struct spdk_bdev_io *bdev_io;
6173 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6174 
6175 	if (!desc->write) {
6176 		return -EBADF;
6177 	}
6178 
6179 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6180 		return -EINVAL;
6181 	}
6182 
6183 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
6184 		return -ENOTSUP;
6185 	}
6186 
6187 	bdev_io = bdev_channel_get_io(channel);
6188 	if (!bdev_io) {
6189 		return -ENOMEM;
6190 	}
6191 
6192 	bdev_io->internal.ch = channel;
6193 	bdev_io->internal.desc = desc;
6194 	bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
6195 	bdev_io->u.bdev.num_blocks = num_blocks;
6196 	bdev_io->u.bdev.offset_blocks = offset_blocks;
6197 	bdev_io->u.bdev.iovs = iov;
6198 	bdev_io->u.bdev.iovcnt = iovcnt;
6199 	bdev_io->u.bdev.md_buf = NULL;
6200 	bdev_io->u.bdev.zcopy.populate = populate ? 1 : 0;
6201 	bdev_io->u.bdev.zcopy.commit = 0;
6202 	bdev_io->u.bdev.zcopy.start = 1;
6203 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6204 	bdev_io->u.bdev.memory_domain = NULL;
6205 	bdev_io->u.bdev.memory_domain_ctx = NULL;
6206 	bdev_io->u.bdev.accel_sequence = NULL;
6207 
6208 	bdev_io_submit(bdev_io);
6209 
6210 	return 0;
6211 }
6212 
6213 int
6214 spdk_bdev_zcopy_end(struct spdk_bdev_io *bdev_io, bool commit,
6215 		    spdk_bdev_io_completion_cb cb, void *cb_arg)
6216 {
6217 	if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY) {
6218 		return -EINVAL;
6219 	}
6220 
6221 	bdev_io->u.bdev.zcopy.commit = commit ? 1 : 0;
6222 	bdev_io->u.bdev.zcopy.start = 0;
6223 	bdev_io->internal.caller_ctx = cb_arg;
6224 	bdev_io->internal.cb = cb;
6225 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
6226 
6227 	bdev_io_submit(bdev_io);
6228 
6229 	return 0;
6230 }
6231 
6232 int
6233 spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6234 		       uint64_t offset, uint64_t len,
6235 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
6236 {
6237 	uint64_t offset_blocks, num_blocks;
6238 
6239 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
6240 				 len, &num_blocks) != 0) {
6241 		return -EINVAL;
6242 	}
6243 
6244 	return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6245 }
6246 
6247 int
6248 spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6249 			      uint64_t offset_blocks, uint64_t num_blocks,
6250 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
6251 {
6252 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6253 	struct spdk_bdev_io *bdev_io;
6254 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6255 
6256 	if (!desc->write) {
6257 		return -EBADF;
6258 	}
6259 
6260 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6261 		return -EINVAL;
6262 	}
6263 
6264 	if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
6265 	    !bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
6266 		return -ENOTSUP;
6267 	}
6268 
6269 	bdev_io = bdev_channel_get_io(channel);
6270 
6271 	if (!bdev_io) {
6272 		return -ENOMEM;
6273 	}
6274 
6275 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
6276 	bdev_io->internal.ch = channel;
6277 	bdev_io->internal.desc = desc;
6278 	bdev_io->u.bdev.offset_blocks = offset_blocks;
6279 	bdev_io->u.bdev.num_blocks = num_blocks;
6280 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6281 	bdev_io->u.bdev.memory_domain = NULL;
6282 	bdev_io->u.bdev.memory_domain_ctx = NULL;
6283 	bdev_io->u.bdev.accel_sequence = NULL;
6284 
6285 	/* If the write_zeroes size is large and should be split, use the generic split
6286 	 * logic regardless of whether SPDK_BDEV_IO_TYPE_WRITE_ZEREOS is supported or not.
6287 	 *
6288 	 * Then, send the write_zeroes request if SPDK_BDEV_IO_TYPE_WRITE_ZEROES is supported
6289 	 * or emulate it using regular write request otherwise.
6290 	 */
6291 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) ||
6292 	    bdev_io->internal.split) {
6293 		bdev_io_submit(bdev_io);
6294 		return 0;
6295 	}
6296 
6297 	assert(_bdev_get_block_size_with_md(bdev) <= ZERO_BUFFER_SIZE);
6298 
6299 	return bdev_write_zero_buffer(bdev_io);
6300 }
6301 
6302 int
6303 spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6304 		uint64_t offset, uint64_t nbytes,
6305 		spdk_bdev_io_completion_cb cb, void *cb_arg)
6306 {
6307 	uint64_t offset_blocks, num_blocks;
6308 
6309 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
6310 				 nbytes, &num_blocks) != 0) {
6311 		return -EINVAL;
6312 	}
6313 
6314 	return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6315 }
6316 
6317 static void
6318 bdev_io_complete_cb(void *ctx)
6319 {
6320 	struct spdk_bdev_io *bdev_io = ctx;
6321 
6322 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6323 	bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
6324 }
6325 
6326 int
6327 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6328 		       uint64_t offset_blocks, uint64_t num_blocks,
6329 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
6330 {
6331 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6332 	struct spdk_bdev_io *bdev_io;
6333 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6334 
6335 	if (!desc->write) {
6336 		return -EBADF;
6337 	}
6338 
6339 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6340 		return -EINVAL;
6341 	}
6342 
6343 	bdev_io = bdev_channel_get_io(channel);
6344 	if (!bdev_io) {
6345 		return -ENOMEM;
6346 	}
6347 
6348 	bdev_io->internal.ch = channel;
6349 	bdev_io->internal.desc = desc;
6350 	bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
6351 
6352 	bdev_io->u.bdev.iovs = &bdev_io->iov;
6353 	bdev_io->u.bdev.iovs[0].iov_base = NULL;
6354 	bdev_io->u.bdev.iovs[0].iov_len = 0;
6355 	bdev_io->u.bdev.iovcnt = 1;
6356 
6357 	bdev_io->u.bdev.offset_blocks = offset_blocks;
6358 	bdev_io->u.bdev.num_blocks = num_blocks;
6359 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6360 	bdev_io->u.bdev.memory_domain = NULL;
6361 	bdev_io->u.bdev.memory_domain_ctx = NULL;
6362 	bdev_io->u.bdev.accel_sequence = NULL;
6363 
6364 	if (num_blocks == 0) {
6365 		spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
6366 		return 0;
6367 	}
6368 
6369 	bdev_io_submit(bdev_io);
6370 	return 0;
6371 }
6372 
6373 int
6374 spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6375 		uint64_t offset, uint64_t length,
6376 		spdk_bdev_io_completion_cb cb, void *cb_arg)
6377 {
6378 	uint64_t offset_blocks, num_blocks;
6379 
6380 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
6381 				 length, &num_blocks) != 0) {
6382 		return -EINVAL;
6383 	}
6384 
6385 	return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6386 }
6387 
6388 int
6389 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6390 		       uint64_t offset_blocks, uint64_t num_blocks,
6391 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
6392 {
6393 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6394 	struct spdk_bdev_io *bdev_io;
6395 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6396 
6397 	if (!desc->write) {
6398 		return -EBADF;
6399 	}
6400 
6401 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6402 		return -EINVAL;
6403 	}
6404 
6405 	bdev_io = bdev_channel_get_io(channel);
6406 	if (!bdev_io) {
6407 		return -ENOMEM;
6408 	}
6409 
6410 	bdev_io->internal.ch = channel;
6411 	bdev_io->internal.desc = desc;
6412 	bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
6413 	bdev_io->u.bdev.iovs = NULL;
6414 	bdev_io->u.bdev.iovcnt = 0;
6415 	bdev_io->u.bdev.offset_blocks = offset_blocks;
6416 	bdev_io->u.bdev.num_blocks = num_blocks;
6417 	bdev_io->u.bdev.memory_domain = NULL;
6418 	bdev_io->u.bdev.memory_domain_ctx = NULL;
6419 	bdev_io->u.bdev.accel_sequence = NULL;
6420 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6421 
6422 	bdev_io_submit(bdev_io);
6423 	return 0;
6424 }
6425 
6426 static int bdev_reset_poll_for_outstanding_io(void *ctx);
6427 
6428 static void
6429 bdev_reset_check_outstanding_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
6430 {
6431 	struct spdk_bdev_channel *ch = _ctx;
6432 	struct spdk_bdev_io *bdev_io;
6433 
6434 	bdev_io = TAILQ_FIRST(&ch->queued_resets);
6435 
6436 	if (status == -EBUSY) {
6437 		if (spdk_get_ticks() < bdev_io->u.reset.wait_poller.stop_time_tsc) {
6438 			bdev_io->u.reset.wait_poller.poller = SPDK_POLLER_REGISTER(bdev_reset_poll_for_outstanding_io,
6439 							      ch, BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
6440 		} else {
6441 			TAILQ_REMOVE(&ch->queued_resets, bdev_io, internal.link);
6442 
6443 			if (TAILQ_EMPTY(&ch->io_memory_domain) && TAILQ_EMPTY(&ch->io_accel_exec)) {
6444 				/* If outstanding IOs are still present and reset_io_drain_timeout
6445 				 * seconds passed, start the reset. */
6446 				bdev_io_submit_reset(bdev_io);
6447 			} else {
6448 				/* We still have in progress memory domain pull/push or we're
6449 				 * executing accel sequence.  Since we cannot abort either of those
6450 				 * operaions, fail the reset request. */
6451 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6452 			}
6453 		}
6454 	} else {
6455 		TAILQ_REMOVE(&ch->queued_resets, bdev_io, internal.link);
6456 		SPDK_DEBUGLOG(bdev,
6457 			      "Skipping reset for underlying device of bdev: %s - no outstanding I/O.\n",
6458 			      ch->bdev->name);
6459 		/* Mark the completion status as a SUCCESS and complete the reset. */
6460 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
6461 	}
6462 }
6463 
6464 static void
6465 bdev_reset_check_outstanding_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6466 				struct spdk_io_channel *io_ch, void *_ctx)
6467 {
6468 	struct spdk_bdev_channel *cur_ch = __io_ch_to_bdev_ch(io_ch);
6469 	int status = 0;
6470 
6471 	if (cur_ch->io_outstanding > 0 ||
6472 	    !TAILQ_EMPTY(&cur_ch->io_memory_domain) ||
6473 	    !TAILQ_EMPTY(&cur_ch->io_accel_exec)) {
6474 		/* If a channel has outstanding IO, set status to -EBUSY code. This will stop
6475 		 * further iteration over the rest of the channels and pass non-zero status
6476 		 * to the callback function. */
6477 		status = -EBUSY;
6478 	}
6479 	spdk_bdev_for_each_channel_continue(i, status);
6480 }
6481 
6482 static int
6483 bdev_reset_poll_for_outstanding_io(void *ctx)
6484 {
6485 	struct spdk_bdev_channel *ch = ctx;
6486 	struct spdk_bdev_io *bdev_io;
6487 
6488 	bdev_io = TAILQ_FIRST(&ch->queued_resets);
6489 
6490 	spdk_poller_unregister(&bdev_io->u.reset.wait_poller.poller);
6491 	spdk_bdev_for_each_channel(ch->bdev, bdev_reset_check_outstanding_io, ch,
6492 				   bdev_reset_check_outstanding_io_done);
6493 
6494 	return SPDK_POLLER_BUSY;
6495 }
6496 
6497 static void
6498 bdev_reset_freeze_channel_done(struct spdk_bdev *bdev, void *_ctx, int status)
6499 {
6500 	struct spdk_bdev_channel *ch = _ctx;
6501 	struct spdk_bdev_io *bdev_io;
6502 
6503 	bdev_io = TAILQ_FIRST(&ch->queued_resets);
6504 
6505 	if (bdev->reset_io_drain_timeout == 0) {
6506 		TAILQ_REMOVE(&ch->queued_resets, bdev_io, internal.link);
6507 
6508 		bdev_io_submit_reset(bdev_io);
6509 		return;
6510 	}
6511 
6512 	bdev_io->u.reset.wait_poller.stop_time_tsc = spdk_get_ticks() +
6513 			(ch->bdev->reset_io_drain_timeout * spdk_get_ticks_hz());
6514 
6515 	/* In case bdev->reset_io_drain_timeout is not equal to zero,
6516 	 * submit the reset to the underlying module only if outstanding I/O
6517 	 * remain after reset_io_drain_timeout seconds have passed. */
6518 	spdk_bdev_for_each_channel(ch->bdev, bdev_reset_check_outstanding_io, ch,
6519 				   bdev_reset_check_outstanding_io_done);
6520 }
6521 
6522 static void
6523 bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6524 			  struct spdk_io_channel *ch, void *_ctx)
6525 {
6526 	struct spdk_bdev_channel	*channel;
6527 	struct spdk_bdev_mgmt_channel	*mgmt_channel;
6528 	struct spdk_bdev_shared_resource *shared_resource;
6529 	bdev_io_tailq_t			tmp_queued;
6530 
6531 	TAILQ_INIT(&tmp_queued);
6532 
6533 	channel = __io_ch_to_bdev_ch(ch);
6534 	shared_resource = channel->shared_resource;
6535 	mgmt_channel = shared_resource->mgmt_ch;
6536 
6537 	channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
6538 
6539 	if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
6540 		TAILQ_SWAP(&channel->qos_queued_io, &tmp_queued, spdk_bdev_io, internal.link);
6541 	}
6542 
6543 	bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
6544 	bdev_abort_all_buf_io(mgmt_channel, channel);
6545 	bdev_abort_all_queued_io(&tmp_queued, channel);
6546 
6547 	spdk_bdev_for_each_channel_continue(i, 0);
6548 }
6549 
6550 static void
6551 bdev_start_reset(void *ctx)
6552 {
6553 	struct spdk_bdev_channel *ch = ctx;
6554 
6555 	spdk_bdev_for_each_channel(ch->bdev, bdev_reset_freeze_channel, ch,
6556 				   bdev_reset_freeze_channel_done);
6557 }
6558 
6559 static void
6560 bdev_channel_start_reset(struct spdk_bdev_channel *ch)
6561 {
6562 	struct spdk_bdev *bdev = ch->bdev;
6563 
6564 	assert(!TAILQ_EMPTY(&ch->queued_resets));
6565 
6566 	spdk_spin_lock(&bdev->internal.spinlock);
6567 	if (bdev->internal.reset_in_progress == NULL) {
6568 		bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
6569 		/*
6570 		 * Take a channel reference for the target bdev for the life of this
6571 		 *  reset.  This guards against the channel getting destroyed while
6572 		 *  spdk_bdev_for_each_channel() calls related to this reset IO are in
6573 		 *  progress.  We will release the reference when this reset is
6574 		 *  completed.
6575 		 */
6576 		bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
6577 		bdev_start_reset(ch);
6578 	}
6579 	spdk_spin_unlock(&bdev->internal.spinlock);
6580 }
6581 
6582 int
6583 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6584 		spdk_bdev_io_completion_cb cb, void *cb_arg)
6585 {
6586 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6587 	struct spdk_bdev_io *bdev_io;
6588 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6589 
6590 	bdev_io = bdev_channel_get_io(channel);
6591 	if (!bdev_io) {
6592 		return -ENOMEM;
6593 	}
6594 
6595 	bdev_io->internal.ch = channel;
6596 	bdev_io->internal.desc = desc;
6597 	bdev_io->internal.submit_tsc = spdk_get_ticks();
6598 	bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
6599 	bdev_io->u.reset.ch_ref = NULL;
6600 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6601 
6602 	spdk_spin_lock(&bdev->internal.spinlock);
6603 	TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
6604 	spdk_spin_unlock(&bdev->internal.spinlock);
6605 
6606 	bdev_ch_add_to_io_submitted(bdev_io);
6607 
6608 	bdev_channel_start_reset(channel);
6609 
6610 	return 0;
6611 }
6612 
6613 void
6614 spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
6615 		      struct spdk_bdev_io_stat *stat)
6616 {
6617 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6618 
6619 	bdev_get_io_stat(stat, channel->stat);
6620 }
6621 
6622 static void
6623 bdev_get_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
6624 {
6625 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6626 
6627 	bdev_iostat_ctx->cb(bdev, bdev_iostat_ctx->stat,
6628 			    bdev_iostat_ctx->cb_arg, 0);
6629 	free(bdev_iostat_ctx);
6630 }
6631 
6632 static void
6633 bdev_get_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6634 			   struct spdk_io_channel *ch, void *_ctx)
6635 {
6636 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6637 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6638 
6639 	spdk_bdev_add_io_stat(bdev_iostat_ctx->stat, channel->stat);
6640 	spdk_bdev_for_each_channel_continue(i, 0);
6641 }
6642 
6643 void
6644 spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
6645 			  spdk_bdev_get_device_stat_cb cb, void *cb_arg)
6646 {
6647 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx;
6648 
6649 	assert(bdev != NULL);
6650 	assert(stat != NULL);
6651 	assert(cb != NULL);
6652 
6653 	bdev_iostat_ctx = calloc(1, sizeof(struct spdk_bdev_iostat_ctx));
6654 	if (bdev_iostat_ctx == NULL) {
6655 		SPDK_ERRLOG("Unable to allocate memory for spdk_bdev_iostat_ctx\n");
6656 		cb(bdev, stat, cb_arg, -ENOMEM);
6657 		return;
6658 	}
6659 
6660 	bdev_iostat_ctx->stat = stat;
6661 	bdev_iostat_ctx->cb = cb;
6662 	bdev_iostat_ctx->cb_arg = cb_arg;
6663 
6664 	/* Start with the statistics from previously deleted channels. */
6665 	spdk_spin_lock(&bdev->internal.spinlock);
6666 	bdev_get_io_stat(bdev_iostat_ctx->stat, bdev->internal.stat);
6667 	spdk_spin_unlock(&bdev->internal.spinlock);
6668 
6669 	/* Then iterate and add the statistics from each existing channel. */
6670 	spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx,
6671 				   bdev_get_device_stat_done);
6672 }
6673 
6674 struct bdev_iostat_reset_ctx {
6675 	enum spdk_bdev_reset_stat_mode mode;
6676 	bdev_reset_device_stat_cb cb;
6677 	void *cb_arg;
6678 };
6679 
6680 static void
6681 bdev_reset_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
6682 {
6683 	struct bdev_iostat_reset_ctx *ctx = _ctx;
6684 
6685 	ctx->cb(bdev, ctx->cb_arg, 0);
6686 
6687 	free(ctx);
6688 }
6689 
6690 static void
6691 bdev_reset_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6692 			     struct spdk_io_channel *ch, void *_ctx)
6693 {
6694 	struct bdev_iostat_reset_ctx *ctx = _ctx;
6695 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6696 
6697 	spdk_bdev_reset_io_stat(channel->stat, ctx->mode);
6698 
6699 	spdk_bdev_for_each_channel_continue(i, 0);
6700 }
6701 
6702 void
6703 bdev_reset_device_stat(struct spdk_bdev *bdev, enum spdk_bdev_reset_stat_mode mode,
6704 		       bdev_reset_device_stat_cb cb, void *cb_arg)
6705 {
6706 	struct bdev_iostat_reset_ctx *ctx;
6707 
6708 	assert(bdev != NULL);
6709 	assert(cb != NULL);
6710 
6711 	ctx = calloc(1, sizeof(*ctx));
6712 	if (ctx == NULL) {
6713 		SPDK_ERRLOG("Unable to allocate bdev_iostat_reset_ctx.\n");
6714 		cb(bdev, cb_arg, -ENOMEM);
6715 		return;
6716 	}
6717 
6718 	ctx->mode = mode;
6719 	ctx->cb = cb;
6720 	ctx->cb_arg = cb_arg;
6721 
6722 	spdk_spin_lock(&bdev->internal.spinlock);
6723 	spdk_bdev_reset_io_stat(bdev->internal.stat, mode);
6724 	spdk_spin_unlock(&bdev->internal.spinlock);
6725 
6726 	spdk_bdev_for_each_channel(bdev,
6727 				   bdev_reset_each_channel_stat,
6728 				   ctx,
6729 				   bdev_reset_device_stat_done);
6730 }
6731 
6732 int
6733 spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6734 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
6735 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
6736 {
6737 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6738 	struct spdk_bdev_io *bdev_io;
6739 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6740 
6741 	if (!desc->write) {
6742 		return -EBADF;
6743 	}
6744 
6745 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN))) {
6746 		return -ENOTSUP;
6747 	}
6748 
6749 	bdev_io = bdev_channel_get_io(channel);
6750 	if (!bdev_io) {
6751 		return -ENOMEM;
6752 	}
6753 
6754 	bdev_io->internal.ch = channel;
6755 	bdev_io->internal.desc = desc;
6756 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
6757 	bdev_io->u.nvme_passthru.cmd = *cmd;
6758 	bdev_io->u.nvme_passthru.buf = buf;
6759 	bdev_io->u.nvme_passthru.nbytes = nbytes;
6760 	bdev_io->u.nvme_passthru.md_buf = NULL;
6761 	bdev_io->u.nvme_passthru.md_len = 0;
6762 
6763 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6764 
6765 	bdev_io_submit(bdev_io);
6766 	return 0;
6767 }
6768 
6769 int
6770 spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6771 			   const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
6772 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
6773 {
6774 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6775 	struct spdk_bdev_io *bdev_io;
6776 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6777 
6778 	if (!desc->write) {
6779 		/*
6780 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6781 		 *  to easily determine if the command is a read or write, but for now just
6782 		 *  do not allow io_passthru with a read-only descriptor.
6783 		 */
6784 		return -EBADF;
6785 	}
6786 
6787 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
6788 		return -ENOTSUP;
6789 	}
6790 
6791 	bdev_io = bdev_channel_get_io(channel);
6792 	if (!bdev_io) {
6793 		return -ENOMEM;
6794 	}
6795 
6796 	bdev_io->internal.ch = channel;
6797 	bdev_io->internal.desc = desc;
6798 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
6799 	bdev_io->u.nvme_passthru.cmd = *cmd;
6800 	bdev_io->u.nvme_passthru.buf = buf;
6801 	bdev_io->u.nvme_passthru.nbytes = nbytes;
6802 	bdev_io->u.nvme_passthru.md_buf = NULL;
6803 	bdev_io->u.nvme_passthru.md_len = 0;
6804 
6805 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6806 
6807 	bdev_io_submit(bdev_io);
6808 	return 0;
6809 }
6810 
6811 int
6812 spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6813 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
6814 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
6815 {
6816 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6817 	struct spdk_bdev_io *bdev_io;
6818 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6819 
6820 	if (!desc->write) {
6821 		/*
6822 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6823 		 *  to easily determine if the command is a read or write, but for now just
6824 		 *  do not allow io_passthru with a read-only descriptor.
6825 		 */
6826 		return -EBADF;
6827 	}
6828 
6829 	if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
6830 		return -ENOTSUP;
6831 	}
6832 
6833 	bdev_io = bdev_channel_get_io(channel);
6834 	if (!bdev_io) {
6835 		return -ENOMEM;
6836 	}
6837 
6838 	bdev_io->internal.ch = channel;
6839 	bdev_io->internal.desc = desc;
6840 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
6841 	bdev_io->u.nvme_passthru.cmd = *cmd;
6842 	bdev_io->u.nvme_passthru.buf = buf;
6843 	bdev_io->u.nvme_passthru.nbytes = nbytes;
6844 	bdev_io->u.nvme_passthru.md_buf = md_buf;
6845 	bdev_io->u.nvme_passthru.md_len = md_len;
6846 
6847 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6848 
6849 	bdev_io_submit(bdev_io);
6850 	return 0;
6851 }
6852 
6853 int
6854 spdk_bdev_nvme_iov_passthru_md(struct spdk_bdev_desc *desc,
6855 			       struct spdk_io_channel *ch,
6856 			       const struct spdk_nvme_cmd *cmd,
6857 			       struct iovec *iov, int iovcnt, size_t nbytes,
6858 			       void *md_buf, size_t md_len,
6859 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
6860 {
6861 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6862 	struct spdk_bdev_io *bdev_io;
6863 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6864 
6865 	if (!desc->write) {
6866 		/*
6867 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6868 		 * to easily determine if the command is a read or write, but for now just
6869 		 * do not allow io_passthru with a read-only descriptor.
6870 		 */
6871 		return -EBADF;
6872 	}
6873 
6874 	if (md_buf && spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
6875 		return -ENOTSUP;
6876 	} else if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
6877 		return -ENOTSUP;
6878 	}
6879 
6880 	bdev_io = bdev_channel_get_io(channel);
6881 	if (!bdev_io) {
6882 		return -ENOMEM;
6883 	}
6884 
6885 	bdev_io->internal.ch = channel;
6886 	bdev_io->internal.desc = desc;
6887 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IOV_MD;
6888 	bdev_io->u.nvme_passthru.cmd = *cmd;
6889 	bdev_io->u.nvme_passthru.iovs = iov;
6890 	bdev_io->u.nvme_passthru.iovcnt = iovcnt;
6891 	bdev_io->u.nvme_passthru.nbytes = nbytes;
6892 	bdev_io->u.nvme_passthru.md_buf = md_buf;
6893 	bdev_io->u.nvme_passthru.md_len = md_len;
6894 
6895 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6896 
6897 	bdev_io_submit(bdev_io);
6898 	return 0;
6899 }
6900 
6901 static void bdev_abort_retry(void *ctx);
6902 static void bdev_abort(struct spdk_bdev_io *parent_io);
6903 
6904 static void
6905 bdev_abort_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6906 {
6907 	struct spdk_bdev_channel *channel = bdev_io->internal.ch;
6908 	struct spdk_bdev_io *parent_io = cb_arg;
6909 	struct spdk_bdev_io *bio_to_abort, *tmp_io;
6910 
6911 	bio_to_abort = bdev_io->u.abort.bio_to_abort;
6912 
6913 	spdk_bdev_free_io(bdev_io);
6914 
6915 	if (!success) {
6916 		/* Check if the target I/O completed in the meantime. */
6917 		TAILQ_FOREACH(tmp_io, &channel->io_submitted, internal.ch_link) {
6918 			if (tmp_io == bio_to_abort) {
6919 				break;
6920 			}
6921 		}
6922 
6923 		/* If the target I/O still exists, set the parent to failed. */
6924 		if (tmp_io != NULL) {
6925 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6926 		}
6927 	}
6928 
6929 	parent_io->u.bdev.split_outstanding--;
6930 	if (parent_io->u.bdev.split_outstanding == 0) {
6931 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
6932 			bdev_abort_retry(parent_io);
6933 		} else {
6934 			bdev_io_complete(parent_io);
6935 		}
6936 	}
6937 }
6938 
6939 static int
6940 bdev_abort_io(struct spdk_bdev_desc *desc, struct spdk_bdev_channel *channel,
6941 	      struct spdk_bdev_io *bio_to_abort,
6942 	      spdk_bdev_io_completion_cb cb, void *cb_arg)
6943 {
6944 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6945 	struct spdk_bdev_io *bdev_io;
6946 
6947 	if (bio_to_abort->type == SPDK_BDEV_IO_TYPE_ABORT ||
6948 	    bio_to_abort->type == SPDK_BDEV_IO_TYPE_RESET) {
6949 		/* TODO: Abort reset or abort request. */
6950 		return -ENOTSUP;
6951 	}
6952 
6953 	bdev_io = bdev_channel_get_io(channel);
6954 	if (bdev_io == NULL) {
6955 		return -ENOMEM;
6956 	}
6957 
6958 	bdev_io->internal.ch = channel;
6959 	bdev_io->internal.desc = desc;
6960 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
6961 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
6962 
6963 	if (bdev->split_on_optimal_io_boundary && bio_to_abort->internal.split) {
6964 		assert(bdev_io_should_split(bio_to_abort));
6965 		bdev_io->u.bdev.abort.bio_cb_arg = bio_to_abort;
6966 
6967 		/* Parent abort request is not submitted directly, but to manage its
6968 		 * execution add it to the submitted list here.
6969 		 */
6970 		bdev_io->internal.submit_tsc = spdk_get_ticks();
6971 		bdev_ch_add_to_io_submitted(bdev_io);
6972 
6973 		bdev_abort(bdev_io);
6974 
6975 		return 0;
6976 	}
6977 
6978 	bdev_io->u.abort.bio_to_abort = bio_to_abort;
6979 
6980 	/* Submit the abort request to the underlying bdev module. */
6981 	bdev_io_submit(bdev_io);
6982 
6983 	return 0;
6984 }
6985 
6986 static bool
6987 bdev_io_on_tailq(struct spdk_bdev_io *bdev_io, bdev_io_tailq_t *tailq)
6988 {
6989 	struct spdk_bdev_io *iter;
6990 
6991 	TAILQ_FOREACH(iter, tailq, internal.link) {
6992 		if (iter == bdev_io) {
6993 			return true;
6994 		}
6995 	}
6996 
6997 	return false;
6998 }
6999 
7000 static uint32_t
7001 _bdev_abort(struct spdk_bdev_io *parent_io)
7002 {
7003 	struct spdk_bdev_desc *desc = parent_io->internal.desc;
7004 	struct spdk_bdev_channel *channel = parent_io->internal.ch;
7005 	void *bio_cb_arg;
7006 	struct spdk_bdev_io *bio_to_abort;
7007 	uint32_t matched_ios;
7008 	int rc;
7009 
7010 	bio_cb_arg = parent_io->u.bdev.abort.bio_cb_arg;
7011 
7012 	/* matched_ios is returned and will be kept by the caller.
7013 	 *
7014 	 * This function will be used for two cases, 1) the same cb_arg is used for
7015 	 * multiple I/Os, 2) a single large I/O is split into smaller ones.
7016 	 * Incrementing split_outstanding directly here may confuse readers especially
7017 	 * for the 1st case.
7018 	 *
7019 	 * Completion of I/O abort is processed after stack unwinding. Hence this trick
7020 	 * works as expected.
7021 	 */
7022 	matched_ios = 0;
7023 	parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
7024 
7025 	TAILQ_FOREACH(bio_to_abort, &channel->io_submitted, internal.ch_link) {
7026 		if (bio_to_abort->internal.caller_ctx != bio_cb_arg) {
7027 			continue;
7028 		}
7029 
7030 		if (bio_to_abort->internal.submit_tsc > parent_io->internal.submit_tsc) {
7031 			/* Any I/O which was submitted after this abort command should be excluded. */
7032 			continue;
7033 		}
7034 
7035 		/* We can't abort a request that's being pushed/pulled or executed by accel */
7036 		if (bdev_io_on_tailq(bio_to_abort, &channel->io_accel_exec) ||
7037 		    bdev_io_on_tailq(bio_to_abort, &channel->io_memory_domain)) {
7038 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7039 			break;
7040 		}
7041 
7042 		rc = bdev_abort_io(desc, channel, bio_to_abort, bdev_abort_io_done, parent_io);
7043 		if (rc != 0) {
7044 			if (rc == -ENOMEM) {
7045 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
7046 			} else {
7047 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7048 			}
7049 			break;
7050 		}
7051 		matched_ios++;
7052 	}
7053 
7054 	return matched_ios;
7055 }
7056 
7057 static void
7058 bdev_abort_retry(void *ctx)
7059 {
7060 	struct spdk_bdev_io *parent_io = ctx;
7061 	uint32_t matched_ios;
7062 
7063 	matched_ios = _bdev_abort(parent_io);
7064 
7065 	if (matched_ios == 0) {
7066 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7067 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7068 		} else {
7069 			/* For retry, the case that no target I/O was found is success
7070 			 * because it means target I/Os completed in the meantime.
7071 			 */
7072 			bdev_io_complete(parent_io);
7073 		}
7074 		return;
7075 	}
7076 
7077 	/* Use split_outstanding to manage the progress of aborting I/Os. */
7078 	parent_io->u.bdev.split_outstanding = matched_ios;
7079 }
7080 
7081 static void
7082 bdev_abort(struct spdk_bdev_io *parent_io)
7083 {
7084 	uint32_t matched_ios;
7085 
7086 	matched_ios = _bdev_abort(parent_io);
7087 
7088 	if (matched_ios == 0) {
7089 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7090 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7091 		} else {
7092 			/* The case the no target I/O was found is failure. */
7093 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7094 			bdev_io_complete(parent_io);
7095 		}
7096 		return;
7097 	}
7098 
7099 	/* Use split_outstanding to manage the progress of aborting I/Os. */
7100 	parent_io->u.bdev.split_outstanding = matched_ios;
7101 }
7102 
7103 int
7104 spdk_bdev_abort(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7105 		void *bio_cb_arg,
7106 		spdk_bdev_io_completion_cb cb, void *cb_arg)
7107 {
7108 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7109 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7110 	struct spdk_bdev_io *bdev_io;
7111 
7112 	if (bio_cb_arg == NULL) {
7113 		return -EINVAL;
7114 	}
7115 
7116 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
7117 		return -ENOTSUP;
7118 	}
7119 
7120 	bdev_io = bdev_channel_get_io(channel);
7121 	if (bdev_io == NULL) {
7122 		return -ENOMEM;
7123 	}
7124 
7125 	bdev_io->internal.ch = channel;
7126 	bdev_io->internal.desc = desc;
7127 	bdev_io->internal.submit_tsc = spdk_get_ticks();
7128 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
7129 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
7130 
7131 	bdev_io->u.bdev.abort.bio_cb_arg = bio_cb_arg;
7132 
7133 	/* Parent abort request is not submitted directly, but to manage its execution,
7134 	 * add it to the submitted list here.
7135 	 */
7136 	bdev_ch_add_to_io_submitted(bdev_io);
7137 
7138 	bdev_abort(bdev_io);
7139 
7140 	return 0;
7141 }
7142 
7143 int
7144 spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
7145 			struct spdk_bdev_io_wait_entry *entry)
7146 {
7147 	struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7148 	struct spdk_bdev_mgmt_channel *mgmt_ch = channel->shared_resource->mgmt_ch;
7149 
7150 	if (bdev != entry->bdev) {
7151 		SPDK_ERRLOG("bdevs do not match\n");
7152 		return -EINVAL;
7153 	}
7154 
7155 	if (mgmt_ch->per_thread_cache_count > 0) {
7156 		SPDK_ERRLOG("Cannot queue io_wait if spdk_bdev_io available in per-thread cache\n");
7157 		return -EINVAL;
7158 	}
7159 
7160 	TAILQ_INSERT_TAIL(&mgmt_ch->io_wait_queue, entry, link);
7161 	return 0;
7162 }
7163 
7164 static inline void
7165 bdev_io_update_io_stat(struct spdk_bdev_io *bdev_io, uint64_t tsc_diff)
7166 {
7167 	enum spdk_bdev_io_status io_status = bdev_io->internal.status;
7168 	struct spdk_bdev_io_stat *io_stat = bdev_io->internal.ch->stat;
7169 	uint64_t num_blocks = bdev_io->u.bdev.num_blocks;
7170 	uint32_t blocklen = bdev_io->bdev->blocklen;
7171 
7172 	if (spdk_likely(io_status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7173 		switch (bdev_io->type) {
7174 		case SPDK_BDEV_IO_TYPE_READ:
7175 			io_stat->bytes_read += num_blocks * blocklen;
7176 			io_stat->num_read_ops++;
7177 			io_stat->read_latency_ticks += tsc_diff;
7178 			if (io_stat->max_read_latency_ticks < tsc_diff) {
7179 				io_stat->max_read_latency_ticks = tsc_diff;
7180 			}
7181 			if (io_stat->min_read_latency_ticks > tsc_diff) {
7182 				io_stat->min_read_latency_ticks = tsc_diff;
7183 			}
7184 			break;
7185 		case SPDK_BDEV_IO_TYPE_WRITE:
7186 			io_stat->bytes_written += num_blocks * blocklen;
7187 			io_stat->num_write_ops++;
7188 			io_stat->write_latency_ticks += tsc_diff;
7189 			if (io_stat->max_write_latency_ticks < tsc_diff) {
7190 				io_stat->max_write_latency_ticks = tsc_diff;
7191 			}
7192 			if (io_stat->min_write_latency_ticks > tsc_diff) {
7193 				io_stat->min_write_latency_ticks = tsc_diff;
7194 			}
7195 			break;
7196 		case SPDK_BDEV_IO_TYPE_UNMAP:
7197 			io_stat->bytes_unmapped += num_blocks * blocklen;
7198 			io_stat->num_unmap_ops++;
7199 			io_stat->unmap_latency_ticks += tsc_diff;
7200 			if (io_stat->max_unmap_latency_ticks < tsc_diff) {
7201 				io_stat->max_unmap_latency_ticks = tsc_diff;
7202 			}
7203 			if (io_stat->min_unmap_latency_ticks > tsc_diff) {
7204 				io_stat->min_unmap_latency_ticks = tsc_diff;
7205 			}
7206 			break;
7207 		case SPDK_BDEV_IO_TYPE_ZCOPY:
7208 			/* Track the data in the start phase only */
7209 			if (bdev_io->u.bdev.zcopy.start) {
7210 				if (bdev_io->u.bdev.zcopy.populate) {
7211 					io_stat->bytes_read += num_blocks * blocklen;
7212 					io_stat->num_read_ops++;
7213 					io_stat->read_latency_ticks += tsc_diff;
7214 					if (io_stat->max_read_latency_ticks < tsc_diff) {
7215 						io_stat->max_read_latency_ticks = tsc_diff;
7216 					}
7217 					if (io_stat->min_read_latency_ticks > tsc_diff) {
7218 						io_stat->min_read_latency_ticks = tsc_diff;
7219 					}
7220 				} else {
7221 					io_stat->bytes_written += num_blocks * blocklen;
7222 					io_stat->num_write_ops++;
7223 					io_stat->write_latency_ticks += tsc_diff;
7224 					if (io_stat->max_write_latency_ticks < tsc_diff) {
7225 						io_stat->max_write_latency_ticks = tsc_diff;
7226 					}
7227 					if (io_stat->min_write_latency_ticks > tsc_diff) {
7228 						io_stat->min_write_latency_ticks = tsc_diff;
7229 					}
7230 				}
7231 			}
7232 			break;
7233 		case SPDK_BDEV_IO_TYPE_COPY:
7234 			io_stat->bytes_copied += num_blocks * blocklen;
7235 			io_stat->num_copy_ops++;
7236 			bdev_io->internal.ch->stat->copy_latency_ticks += tsc_diff;
7237 			if (io_stat->max_copy_latency_ticks < tsc_diff) {
7238 				io_stat->max_copy_latency_ticks = tsc_diff;
7239 			}
7240 			if (io_stat->min_copy_latency_ticks > tsc_diff) {
7241 				io_stat->min_copy_latency_ticks = tsc_diff;
7242 			}
7243 			break;
7244 		default:
7245 			break;
7246 		}
7247 	} else if (io_status <= SPDK_BDEV_IO_STATUS_FAILED && io_status >= SPDK_MIN_BDEV_IO_STATUS) {
7248 		io_stat = bdev_io->bdev->internal.stat;
7249 		assert(io_stat->io_error != NULL);
7250 
7251 		spdk_spin_lock(&bdev_io->bdev->internal.spinlock);
7252 		io_stat->io_error->error_status[-io_status - 1]++;
7253 		spdk_spin_unlock(&bdev_io->bdev->internal.spinlock);
7254 	}
7255 
7256 #ifdef SPDK_CONFIG_VTUNE
7257 	uint64_t now_tsc = spdk_get_ticks();
7258 	if (now_tsc > (bdev_io->internal.ch->start_tsc + bdev_io->internal.ch->interval_tsc)) {
7259 		uint64_t data[5];
7260 		struct spdk_bdev_io_stat *prev_stat = bdev_io->internal.ch->prev_stat;
7261 
7262 		data[0] = io_stat->num_read_ops - prev_stat->num_read_ops;
7263 		data[1] = io_stat->bytes_read - prev_stat->bytes_read;
7264 		data[2] = io_stat->num_write_ops - prev_stat->num_write_ops;
7265 		data[3] = io_stat->bytes_written - prev_stat->bytes_written;
7266 		data[4] = bdev_io->bdev->fn_table->get_spin_time ?
7267 			  bdev_io->bdev->fn_table->get_spin_time(spdk_bdev_io_get_io_channel(bdev_io)) : 0;
7268 
7269 		__itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_io->internal.ch->handle,
7270 				   __itt_metadata_u64, 5, data);
7271 
7272 		memcpy(prev_stat, io_stat, sizeof(struct spdk_bdev_io_stat));
7273 		bdev_io->internal.ch->start_tsc = now_tsc;
7274 	}
7275 #endif
7276 }
7277 
7278 static inline void
7279 _bdev_io_complete(void *ctx)
7280 {
7281 	struct spdk_bdev_io *bdev_io = ctx;
7282 
7283 	if (spdk_unlikely(bdev_io->internal.accel_sequence != NULL)) {
7284 		assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7285 		spdk_accel_sequence_abort(bdev_io->internal.accel_sequence);
7286 	}
7287 
7288 	assert(bdev_io->internal.cb != NULL);
7289 	assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
7290 
7291 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
7292 			     bdev_io->internal.caller_ctx);
7293 }
7294 
7295 static inline void
7296 bdev_io_complete(void *ctx)
7297 {
7298 	struct spdk_bdev_io *bdev_io = ctx;
7299 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7300 	uint64_t tsc, tsc_diff;
7301 
7302 	if (spdk_unlikely(bdev_io->internal.in_submit_request)) {
7303 		/*
7304 		 * Defer completion to avoid potential infinite recursion if the
7305 		 * user's completion callback issues a new I/O.
7306 		 */
7307 		spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7308 				     bdev_io_complete, bdev_io);
7309 		return;
7310 	}
7311 
7312 	tsc = spdk_get_ticks();
7313 	tsc_diff = tsc - bdev_io->internal.submit_tsc;
7314 
7315 	bdev_ch_remove_from_io_submitted(bdev_io);
7316 	spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, bdev_ch->trace_id, 0, (uintptr_t)bdev_io,
7317 			      bdev_io->internal.caller_ctx, bdev_ch->queue_depth);
7318 
7319 	if (bdev_ch->histogram) {
7320 		if (bdev_io->bdev->internal.histogram_io_type == 0 ||
7321 		    bdev_io->bdev->internal.histogram_io_type == bdev_io->type) {
7322 			/*
7323 			 * Tally all I/O types if the histogram_io_type is set to 0.
7324 			 */
7325 			spdk_histogram_data_tally(bdev_ch->histogram, tsc_diff);
7326 		}
7327 	}
7328 
7329 	bdev_io_update_io_stat(bdev_io, tsc_diff);
7330 	_bdev_io_complete(bdev_io);
7331 }
7332 
7333 /* The difference between this function and bdev_io_complete() is that this should be called to
7334  * complete IOs that haven't been submitted via bdev_io_submit(), as they weren't added onto the
7335  * io_submitted list and don't have submit_tsc updated.
7336  */
7337 static inline void
7338 bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io)
7339 {
7340 	/* Since the IO hasn't been submitted it's bound to be failed */
7341 	assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7342 
7343 	/* At this point we don't know if the IO is completed from submission context or not, but,
7344 	 * since this is an error path, we can always do an spdk_thread_send_msg(). */
7345 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7346 			     _bdev_io_complete, bdev_io);
7347 }
7348 
7349 static void bdev_destroy_cb(void *io_device);
7350 
7351 static void
7352 bdev_reset_complete(struct spdk_bdev *bdev, void *_ctx, int status)
7353 {
7354 	struct spdk_bdev_io *bdev_io = _ctx;
7355 
7356 	if (bdev_io->u.reset.ch_ref != NULL) {
7357 		spdk_put_io_channel(bdev_io->u.reset.ch_ref);
7358 		bdev_io->u.reset.ch_ref = NULL;
7359 	}
7360 
7361 	bdev_io_complete(bdev_io);
7362 
7363 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING &&
7364 	    TAILQ_EMPTY(&bdev->internal.open_descs)) {
7365 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
7366 	}
7367 }
7368 
7369 static void
7370 bdev_unfreeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
7371 		      struct spdk_io_channel *_ch, void *_ctx)
7372 {
7373 	struct spdk_bdev_io *bdev_io = _ctx;
7374 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
7375 	struct spdk_bdev_io *queued_reset;
7376 
7377 	ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
7378 	while (!TAILQ_EMPTY(&ch->queued_resets)) {
7379 		queued_reset = TAILQ_FIRST(&ch->queued_resets);
7380 		TAILQ_REMOVE(&ch->queued_resets, queued_reset, internal.link);
7381 		spdk_bdev_io_complete(queued_reset, bdev_io->internal.status);
7382 	}
7383 
7384 	spdk_bdev_for_each_channel_continue(i, 0);
7385 }
7386 
7387 static void
7388 bdev_io_complete_sequence_cb(void *ctx, int status)
7389 {
7390 	struct spdk_bdev_io *bdev_io = ctx;
7391 
7392 	/* u.bdev.accel_sequence should have already been cleared at this point */
7393 	assert(bdev_io->u.bdev.accel_sequence == NULL);
7394 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7395 	bdev_io->internal.accel_sequence = NULL;
7396 
7397 	if (spdk_unlikely(status != 0)) {
7398 		SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
7399 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7400 	}
7401 
7402 	bdev_io_complete(bdev_io);
7403 }
7404 
7405 void
7406 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
7407 {
7408 	struct spdk_bdev *bdev = bdev_io->bdev;
7409 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7410 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
7411 
7412 	if (spdk_unlikely(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING)) {
7413 		SPDK_ERRLOG("Unexpected completion on IO from %s module, status was %s\n",
7414 			    spdk_bdev_get_module_name(bdev),
7415 			    bdev_io_status_get_string(bdev_io->internal.status));
7416 		assert(false);
7417 	}
7418 	bdev_io->internal.status = status;
7419 
7420 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
7421 		bool unlock_channels = false;
7422 
7423 		if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
7424 			SPDK_ERRLOG("NOMEM returned for reset\n");
7425 		}
7426 		spdk_spin_lock(&bdev->internal.spinlock);
7427 		if (bdev_io == bdev->internal.reset_in_progress) {
7428 			bdev->internal.reset_in_progress = NULL;
7429 			unlock_channels = true;
7430 		}
7431 		spdk_spin_unlock(&bdev->internal.spinlock);
7432 
7433 		if (unlock_channels) {
7434 			spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io,
7435 						   bdev_reset_complete);
7436 			return;
7437 		}
7438 	} else {
7439 		bdev_io_decrement_outstanding(bdev_ch, shared_resource);
7440 		if (spdk_likely(status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7441 			if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
7442 				bdev_io_exec_sequence(bdev_io, bdev_io_complete_sequence_cb);
7443 				return;
7444 			} else if (spdk_unlikely(bdev_io->internal.orig_iovcnt != 0 &&
7445 						 !bdev_io_use_accel_sequence(bdev_io))) {
7446 				_bdev_io_push_bounce_data_buffer(bdev_io,
7447 								 _bdev_io_complete_push_bounce_done);
7448 				/* bdev IO will be completed in the callback */
7449 				return;
7450 			}
7451 		}
7452 
7453 		if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io, BDEV_IO_RETRY_STATE_SUBMIT))) {
7454 			return;
7455 		}
7456 	}
7457 
7458 	bdev_io_complete(bdev_io);
7459 }
7460 
7461 void
7462 spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
7463 				  enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
7464 {
7465 	enum spdk_bdev_io_status status;
7466 
7467 	if (sc == SPDK_SCSI_STATUS_GOOD) {
7468 		status = SPDK_BDEV_IO_STATUS_SUCCESS;
7469 	} else {
7470 		status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
7471 		bdev_io->internal.error.scsi.sc = sc;
7472 		bdev_io->internal.error.scsi.sk = sk;
7473 		bdev_io->internal.error.scsi.asc = asc;
7474 		bdev_io->internal.error.scsi.ascq = ascq;
7475 	}
7476 
7477 	spdk_bdev_io_complete(bdev_io, status);
7478 }
7479 
7480 void
7481 spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
7482 			     int *sc, int *sk, int *asc, int *ascq)
7483 {
7484 	assert(sc != NULL);
7485 	assert(sk != NULL);
7486 	assert(asc != NULL);
7487 	assert(ascq != NULL);
7488 
7489 	switch (bdev_io->internal.status) {
7490 	case SPDK_BDEV_IO_STATUS_SUCCESS:
7491 		*sc = SPDK_SCSI_STATUS_GOOD;
7492 		*sk = SPDK_SCSI_SENSE_NO_SENSE;
7493 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7494 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7495 		break;
7496 	case SPDK_BDEV_IO_STATUS_NVME_ERROR:
7497 		spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
7498 		break;
7499 	case SPDK_BDEV_IO_STATUS_MISCOMPARE:
7500 		*sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7501 		*sk = SPDK_SCSI_SENSE_MISCOMPARE;
7502 		*asc = SPDK_SCSI_ASC_MISCOMPARE_DURING_VERIFY_OPERATION;
7503 		*ascq = bdev_io->internal.error.scsi.ascq;
7504 		break;
7505 	case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
7506 		*sc = bdev_io->internal.error.scsi.sc;
7507 		*sk = bdev_io->internal.error.scsi.sk;
7508 		*asc = bdev_io->internal.error.scsi.asc;
7509 		*ascq = bdev_io->internal.error.scsi.ascq;
7510 		break;
7511 	default:
7512 		*sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7513 		*sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
7514 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7515 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7516 		break;
7517 	}
7518 }
7519 
7520 void
7521 spdk_bdev_io_complete_aio_status(struct spdk_bdev_io *bdev_io, int aio_result)
7522 {
7523 	enum spdk_bdev_io_status status;
7524 
7525 	if (aio_result == 0) {
7526 		status = SPDK_BDEV_IO_STATUS_SUCCESS;
7527 	} else {
7528 		status = SPDK_BDEV_IO_STATUS_AIO_ERROR;
7529 	}
7530 
7531 	bdev_io->internal.error.aio_result = aio_result;
7532 
7533 	spdk_bdev_io_complete(bdev_io, status);
7534 }
7535 
7536 void
7537 spdk_bdev_io_get_aio_status(const struct spdk_bdev_io *bdev_io, int *aio_result)
7538 {
7539 	assert(aio_result != NULL);
7540 
7541 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_AIO_ERROR) {
7542 		*aio_result = bdev_io->internal.error.aio_result;
7543 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7544 		*aio_result = 0;
7545 	} else {
7546 		*aio_result = -EIO;
7547 	}
7548 }
7549 
7550 void
7551 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
7552 {
7553 	enum spdk_bdev_io_status status;
7554 
7555 	if (spdk_likely(sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS)) {
7556 		status = SPDK_BDEV_IO_STATUS_SUCCESS;
7557 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
7558 		status = SPDK_BDEV_IO_STATUS_ABORTED;
7559 	} else {
7560 		status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
7561 	}
7562 
7563 	bdev_io->internal.error.nvme.cdw0 = cdw0;
7564 	bdev_io->internal.error.nvme.sct = sct;
7565 	bdev_io->internal.error.nvme.sc = sc;
7566 
7567 	spdk_bdev_io_complete(bdev_io, status);
7568 }
7569 
7570 void
7571 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
7572 {
7573 	assert(sct != NULL);
7574 	assert(sc != NULL);
7575 	assert(cdw0 != NULL);
7576 
7577 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
7578 		*sct = SPDK_NVME_SCT_GENERIC;
7579 		*sc = SPDK_NVME_SC_SUCCESS;
7580 		if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7581 			*cdw0 = 0;
7582 		} else {
7583 			*cdw0 = 1U;
7584 		}
7585 		return;
7586 	}
7587 
7588 	if (spdk_likely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7589 		*sct = SPDK_NVME_SCT_GENERIC;
7590 		*sc = SPDK_NVME_SC_SUCCESS;
7591 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7592 		*sct = bdev_io->internal.error.nvme.sct;
7593 		*sc = bdev_io->internal.error.nvme.sc;
7594 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7595 		*sct = SPDK_NVME_SCT_GENERIC;
7596 		*sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7597 	} else {
7598 		*sct = SPDK_NVME_SCT_GENERIC;
7599 		*sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7600 	}
7601 
7602 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
7603 }
7604 
7605 void
7606 spdk_bdev_io_get_nvme_fused_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
7607 				   int *first_sct, int *first_sc, int *second_sct, int *second_sc)
7608 {
7609 	assert(first_sct != NULL);
7610 	assert(first_sc != NULL);
7611 	assert(second_sct != NULL);
7612 	assert(second_sc != NULL);
7613 	assert(cdw0 != NULL);
7614 
7615 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7616 		if (bdev_io->internal.error.nvme.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
7617 		    bdev_io->internal.error.nvme.sc == SPDK_NVME_SC_COMPARE_FAILURE) {
7618 			*first_sct = bdev_io->internal.error.nvme.sct;
7619 			*first_sc = bdev_io->internal.error.nvme.sc;
7620 			*second_sct = SPDK_NVME_SCT_GENERIC;
7621 			*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7622 		} else {
7623 			*first_sct = SPDK_NVME_SCT_GENERIC;
7624 			*first_sc = SPDK_NVME_SC_SUCCESS;
7625 			*second_sct = bdev_io->internal.error.nvme.sct;
7626 			*second_sc = bdev_io->internal.error.nvme.sc;
7627 		}
7628 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7629 		*first_sct = SPDK_NVME_SCT_GENERIC;
7630 		*first_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7631 		*second_sct = SPDK_NVME_SCT_GENERIC;
7632 		*second_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7633 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7634 		*first_sct = SPDK_NVME_SCT_GENERIC;
7635 		*first_sc = SPDK_NVME_SC_SUCCESS;
7636 		*second_sct = SPDK_NVME_SCT_GENERIC;
7637 		*second_sc = SPDK_NVME_SC_SUCCESS;
7638 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED) {
7639 		*first_sct = SPDK_NVME_SCT_GENERIC;
7640 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7641 		*second_sct = SPDK_NVME_SCT_GENERIC;
7642 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7643 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_MISCOMPARE) {
7644 		*first_sct = SPDK_NVME_SCT_MEDIA_ERROR;
7645 		*first_sc = SPDK_NVME_SC_COMPARE_FAILURE;
7646 		*second_sct = SPDK_NVME_SCT_GENERIC;
7647 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7648 	} else {
7649 		*first_sct = SPDK_NVME_SCT_GENERIC;
7650 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7651 		*second_sct = SPDK_NVME_SCT_GENERIC;
7652 		*second_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7653 	}
7654 
7655 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
7656 }
7657 
7658 void
7659 spdk_bdev_io_complete_base_io_status(struct spdk_bdev_io *bdev_io,
7660 				     const struct spdk_bdev_io *base_io)
7661 {
7662 	switch (base_io->internal.status) {
7663 	case SPDK_BDEV_IO_STATUS_NVME_ERROR:
7664 		spdk_bdev_io_complete_nvme_status(bdev_io,
7665 						  base_io->internal.error.nvme.cdw0,
7666 						  base_io->internal.error.nvme.sct,
7667 						  base_io->internal.error.nvme.sc);
7668 		break;
7669 	case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
7670 		spdk_bdev_io_complete_scsi_status(bdev_io,
7671 						  base_io->internal.error.scsi.sc,
7672 						  base_io->internal.error.scsi.sk,
7673 						  base_io->internal.error.scsi.asc,
7674 						  base_io->internal.error.scsi.ascq);
7675 		break;
7676 	case SPDK_BDEV_IO_STATUS_AIO_ERROR:
7677 		spdk_bdev_io_complete_aio_status(bdev_io, base_io->internal.error.aio_result);
7678 		break;
7679 	default:
7680 		spdk_bdev_io_complete(bdev_io, base_io->internal.status);
7681 		break;
7682 	}
7683 }
7684 
7685 struct spdk_thread *
7686 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
7687 {
7688 	return spdk_io_channel_get_thread(bdev_io->internal.ch->channel);
7689 }
7690 
7691 struct spdk_io_channel *
7692 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
7693 {
7694 	return bdev_io->internal.ch->channel;
7695 }
7696 
7697 static int
7698 bdev_register(struct spdk_bdev *bdev)
7699 {
7700 	char *bdev_name;
7701 	char uuid[SPDK_UUID_STRING_LEN];
7702 	struct spdk_iobuf_opts iobuf_opts;
7703 	int ret;
7704 
7705 	assert(bdev->module != NULL);
7706 
7707 	if (!bdev->name) {
7708 		SPDK_ERRLOG("Bdev name is NULL\n");
7709 		return -EINVAL;
7710 	}
7711 
7712 	if (!strlen(bdev->name)) {
7713 		SPDK_ERRLOG("Bdev name must not be an empty string\n");
7714 		return -EINVAL;
7715 	}
7716 
7717 	/* Users often register their own I/O devices using the bdev name. In
7718 	 * order to avoid conflicts, prepend bdev_. */
7719 	bdev_name = spdk_sprintf_alloc("bdev_%s", bdev->name);
7720 	if (!bdev_name) {
7721 		SPDK_ERRLOG("Unable to allocate memory for internal bdev name.\n");
7722 		return -ENOMEM;
7723 	}
7724 
7725 	bdev->internal.stat = bdev_alloc_io_stat(true);
7726 	if (!bdev->internal.stat) {
7727 		SPDK_ERRLOG("Unable to allocate I/O statistics structure.\n");
7728 		free(bdev_name);
7729 		return -ENOMEM;
7730 	}
7731 
7732 	bdev->internal.status = SPDK_BDEV_STATUS_READY;
7733 	bdev->internal.measured_queue_depth = UINT64_MAX;
7734 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
7735 	memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
7736 	bdev->internal.qd_poller = NULL;
7737 	bdev->internal.qos = NULL;
7738 
7739 	TAILQ_INIT(&bdev->internal.open_descs);
7740 	TAILQ_INIT(&bdev->internal.locked_ranges);
7741 	TAILQ_INIT(&bdev->internal.pending_locked_ranges);
7742 	TAILQ_INIT(&bdev->aliases);
7743 
7744 	/* UUID may be specified by the user or defined by bdev itself.
7745 	 * Otherwise it will be generated here, so this field will never be empty. */
7746 	if (spdk_uuid_is_null(&bdev->uuid)) {
7747 		spdk_uuid_generate(&bdev->uuid);
7748 	}
7749 
7750 	/* Add the UUID alias only if it's different than the name */
7751 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
7752 	if (strcmp(bdev->name, uuid) != 0) {
7753 		ret = spdk_bdev_alias_add(bdev, uuid);
7754 		if (ret != 0) {
7755 			SPDK_ERRLOG("Unable to add uuid:%s alias for bdev %s\n", uuid, bdev->name);
7756 			bdev_free_io_stat(bdev->internal.stat);
7757 			free(bdev_name);
7758 			return ret;
7759 		}
7760 	}
7761 
7762 	spdk_iobuf_get_opts(&iobuf_opts, sizeof(iobuf_opts));
7763 	if (spdk_bdev_get_buf_align(bdev) > 1) {
7764 		bdev->max_rw_size = spdk_min(bdev->max_rw_size ? bdev->max_rw_size : UINT32_MAX,
7765 					     iobuf_opts.large_bufsize / bdev->blocklen);
7766 	}
7767 
7768 	/* If the user didn't specify a write unit size, set it to one. */
7769 	if (bdev->write_unit_size == 0) {
7770 		bdev->write_unit_size = 1;
7771 	}
7772 
7773 	/* Set ACWU value to the write unit size if bdev module did not set it (does not support it natively) */
7774 	if (bdev->acwu == 0) {
7775 		bdev->acwu = bdev->write_unit_size;
7776 	}
7777 
7778 	if (bdev->phys_blocklen == 0) {
7779 		bdev->phys_blocklen = spdk_bdev_get_data_block_size(bdev);
7780 	}
7781 
7782 	if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
7783 		bdev->max_copy = bdev_get_max_write(bdev, iobuf_opts.large_bufsize);
7784 	}
7785 
7786 	if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
7787 		bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
7788 	}
7789 
7790 	bdev->internal.reset_in_progress = NULL;
7791 	bdev->internal.qd_poll_in_progress = false;
7792 	bdev->internal.period = 0;
7793 	bdev->internal.new_period = 0;
7794 	bdev->internal.trace_id = spdk_trace_register_owner(OWNER_TYPE_BDEV, bdev_name);
7795 
7796 	/*
7797 	 * Initialize spinlock before registering IO device because spinlock is used in
7798 	 * bdev_channel_create
7799 	 */
7800 	spdk_spin_init(&bdev->internal.spinlock);
7801 
7802 	spdk_io_device_register(__bdev_to_io_dev(bdev),
7803 				bdev_channel_create, bdev_channel_destroy,
7804 				sizeof(struct spdk_bdev_channel),
7805 				bdev_name);
7806 
7807 	/*
7808 	 * Register bdev name only after the bdev object is ready.
7809 	 * After bdev_name_add returns, it is possible for oter threads to start using the bdev,
7810 	 * create IO channels...
7811 	 */
7812 	ret = bdev_name_add(&bdev->internal.bdev_name, bdev, bdev->name);
7813 	if (ret != 0) {
7814 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), NULL);
7815 		bdev_free_io_stat(bdev->internal.stat);
7816 		spdk_spin_destroy(&bdev->internal.spinlock);
7817 		free(bdev_name);
7818 		return ret;
7819 	}
7820 
7821 	free(bdev_name);
7822 
7823 	SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
7824 	TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
7825 
7826 	return 0;
7827 }
7828 
7829 static void
7830 bdev_destroy_cb(void *io_device)
7831 {
7832 	int			rc;
7833 	struct spdk_bdev	*bdev;
7834 	spdk_bdev_unregister_cb	cb_fn;
7835 	void			*cb_arg;
7836 
7837 	bdev = __bdev_from_io_dev(io_device);
7838 
7839 	if (bdev->internal.unregister_td != spdk_get_thread()) {
7840 		spdk_thread_send_msg(bdev->internal.unregister_td, bdev_destroy_cb, io_device);
7841 		return;
7842 	}
7843 
7844 	cb_fn = bdev->internal.unregister_cb;
7845 	cb_arg = bdev->internal.unregister_ctx;
7846 
7847 	spdk_spin_destroy(&bdev->internal.spinlock);
7848 	free(bdev->internal.qos);
7849 	bdev_free_io_stat(bdev->internal.stat);
7850 	spdk_trace_unregister_owner(bdev->internal.trace_id);
7851 
7852 	rc = bdev->fn_table->destruct(bdev->ctxt);
7853 	if (rc < 0) {
7854 		SPDK_ERRLOG("destruct failed\n");
7855 	}
7856 	if (rc <= 0 && cb_fn != NULL) {
7857 		cb_fn(cb_arg, rc);
7858 	}
7859 }
7860 
7861 void
7862 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
7863 {
7864 	if (bdev->internal.unregister_cb != NULL) {
7865 		bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
7866 	}
7867 }
7868 
7869 static void
7870 _remove_notify(void *arg)
7871 {
7872 	struct spdk_bdev_desc *desc = arg;
7873 
7874 	_event_notify(desc, SPDK_BDEV_EVENT_REMOVE);
7875 }
7876 
7877 /* returns: 0 - bdev removed and ready to be destructed.
7878  *          -EBUSY - bdev can't be destructed yet.  */
7879 static int
7880 bdev_unregister_unsafe(struct spdk_bdev *bdev)
7881 {
7882 	struct spdk_bdev_desc	*desc, *tmp;
7883 	int			rc = 0;
7884 	char			uuid[SPDK_UUID_STRING_LEN];
7885 
7886 	assert(spdk_spin_held(&g_bdev_mgr.spinlock));
7887 	assert(spdk_spin_held(&bdev->internal.spinlock));
7888 
7889 	/* Notify each descriptor about hotremoval */
7890 	TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
7891 		rc = -EBUSY;
7892 		/*
7893 		 * Defer invocation of the event_cb to a separate message that will
7894 		 *  run later on its thread.  This ensures this context unwinds and
7895 		 *  we don't recursively unregister this bdev again if the event_cb
7896 		 *  immediately closes its descriptor.
7897 		 */
7898 		event_notify(desc, _remove_notify);
7899 	}
7900 
7901 	/* If there are no descriptors, proceed removing the bdev */
7902 	if (rc == 0) {
7903 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
7904 		SPDK_DEBUGLOG(bdev, "Removing bdev %s from list done\n", bdev->name);
7905 
7906 		/* Delete the name and the UUID alias */
7907 		spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
7908 		bdev_name_del_unsafe(&bdev->internal.bdev_name);
7909 		bdev_alias_del(bdev, uuid, bdev_name_del_unsafe);
7910 
7911 		spdk_notify_send("bdev_unregister", spdk_bdev_get_name(bdev));
7912 
7913 		if (bdev->internal.reset_in_progress != NULL) {
7914 			/* If reset is in progress, let the completion callback for reset
7915 			 * unregister the bdev.
7916 			 */
7917 			rc = -EBUSY;
7918 		}
7919 	}
7920 
7921 	return rc;
7922 }
7923 
7924 static void
7925 bdev_unregister_abort_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
7926 			      struct spdk_io_channel *io_ch, void *_ctx)
7927 {
7928 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
7929 
7930 	bdev_channel_abort_queued_ios(bdev_ch);
7931 	spdk_bdev_for_each_channel_continue(i, 0);
7932 }
7933 
7934 static void
7935 bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
7936 {
7937 	int rc;
7938 
7939 	spdk_spin_lock(&g_bdev_mgr.spinlock);
7940 	spdk_spin_lock(&bdev->internal.spinlock);
7941 	/*
7942 	 * Set the status to REMOVING after completing to abort channels. Otherwise,
7943 	 * the last spdk_bdev_close() may call spdk_io_device_unregister() while
7944 	 * spdk_bdev_for_each_channel() is executed and spdk_io_device_unregister()
7945 	 * may fail.
7946 	 */
7947 	bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
7948 	rc = bdev_unregister_unsafe(bdev);
7949 	spdk_spin_unlock(&bdev->internal.spinlock);
7950 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
7951 
7952 	if (rc == 0) {
7953 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
7954 	}
7955 }
7956 
7957 void
7958 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
7959 {
7960 	struct spdk_thread	*thread;
7961 
7962 	SPDK_DEBUGLOG(bdev, "Removing bdev %s from list\n", bdev->name);
7963 
7964 	thread = spdk_get_thread();
7965 	if (!thread) {
7966 		/* The user called this from a non-SPDK thread. */
7967 		if (cb_fn != NULL) {
7968 			cb_fn(cb_arg, -ENOTSUP);
7969 		}
7970 		return;
7971 	}
7972 
7973 	spdk_spin_lock(&g_bdev_mgr.spinlock);
7974 	if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
7975 	    bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
7976 		spdk_spin_unlock(&g_bdev_mgr.spinlock);
7977 		if (cb_fn) {
7978 			cb_fn(cb_arg, -EBUSY);
7979 		}
7980 		return;
7981 	}
7982 
7983 	spdk_spin_lock(&bdev->internal.spinlock);
7984 	bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
7985 	bdev->internal.unregister_cb = cb_fn;
7986 	bdev->internal.unregister_ctx = cb_arg;
7987 	bdev->internal.unregister_td = thread;
7988 	spdk_spin_unlock(&bdev->internal.spinlock);
7989 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
7990 
7991 	spdk_bdev_set_qd_sampling_period(bdev, 0);
7992 
7993 	spdk_bdev_for_each_channel(bdev, bdev_unregister_abort_channel, bdev,
7994 				   bdev_unregister);
7995 }
7996 
7997 int
7998 spdk_bdev_unregister_by_name(const char *bdev_name, struct spdk_bdev_module *module,
7999 			     spdk_bdev_unregister_cb cb_fn, void *cb_arg)
8000 {
8001 	struct spdk_bdev_desc *desc;
8002 	struct spdk_bdev *bdev;
8003 	int rc;
8004 
8005 	rc = spdk_bdev_open_ext(bdev_name, false, _tmp_bdev_event_cb, NULL, &desc);
8006 	if (rc != 0) {
8007 		SPDK_ERRLOG("Failed to open bdev with name: %s\n", bdev_name);
8008 		return rc;
8009 	}
8010 
8011 	bdev = spdk_bdev_desc_get_bdev(desc);
8012 
8013 	if (bdev->module != module) {
8014 		spdk_bdev_close(desc);
8015 		SPDK_ERRLOG("Bdev %s was not registered by the specified module.\n",
8016 			    bdev_name);
8017 		return -ENODEV;
8018 	}
8019 
8020 	spdk_bdev_unregister(bdev, cb_fn, cb_arg);
8021 
8022 	spdk_bdev_close(desc);
8023 
8024 	return 0;
8025 }
8026 
8027 static int
8028 bdev_start_qos(struct spdk_bdev *bdev)
8029 {
8030 	struct set_qos_limit_ctx *ctx;
8031 
8032 	/* Enable QoS */
8033 	if (bdev->internal.qos && bdev->internal.qos->thread == NULL) {
8034 		ctx = calloc(1, sizeof(*ctx));
8035 		if (ctx == NULL) {
8036 			SPDK_ERRLOG("Failed to allocate memory for QoS context\n");
8037 			return -ENOMEM;
8038 		}
8039 		ctx->bdev = bdev;
8040 		spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx, bdev_enable_qos_done);
8041 	}
8042 
8043 	return 0;
8044 }
8045 
8046 static void
8047 log_already_claimed(enum spdk_log_level level, const int line, const char *func, const char *detail,
8048 		    struct spdk_bdev *bdev)
8049 {
8050 	enum spdk_bdev_claim_type type;
8051 	const char *typename, *modname;
8052 	extern struct spdk_log_flag SPDK_LOG_bdev;
8053 
8054 	assert(spdk_spin_held(&bdev->internal.spinlock));
8055 
8056 	if (level >= SPDK_LOG_INFO && !SPDK_LOG_bdev.enabled) {
8057 		return;
8058 	}
8059 
8060 	type = bdev->internal.claim_type;
8061 	typename = spdk_bdev_claim_get_name(type);
8062 
8063 	if (type == SPDK_BDEV_CLAIM_EXCL_WRITE) {
8064 		modname = bdev->internal.claim.v1.module->name;
8065 		spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8066 			 bdev->name, detail, typename, modname);
8067 		return;
8068 	}
8069 
8070 	if (claim_type_is_v2(type)) {
8071 		struct spdk_bdev_module_claim *claim;
8072 
8073 		TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
8074 			modname = claim->module->name;
8075 			spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8076 				 bdev->name, detail, typename, modname);
8077 		}
8078 		return;
8079 	}
8080 
8081 	assert(false);
8082 }
8083 
8084 static int
8085 bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
8086 {
8087 	struct spdk_thread *thread;
8088 	int rc = 0;
8089 
8090 	thread = spdk_get_thread();
8091 	if (!thread) {
8092 		SPDK_ERRLOG("Cannot open bdev from non-SPDK thread.\n");
8093 		return -ENOTSUP;
8094 	}
8095 
8096 	SPDK_DEBUGLOG(bdev, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8097 		      spdk_get_thread());
8098 
8099 	desc->bdev = bdev;
8100 	desc->thread = thread;
8101 	desc->write = write;
8102 
8103 	spdk_spin_lock(&bdev->internal.spinlock);
8104 	if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
8105 	    bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
8106 		spdk_spin_unlock(&bdev->internal.spinlock);
8107 		return -ENODEV;
8108 	}
8109 
8110 	if (write && bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8111 		LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8112 		spdk_spin_unlock(&bdev->internal.spinlock);
8113 		return -EPERM;
8114 	}
8115 
8116 	rc = bdev_start_qos(bdev);
8117 	if (rc != 0) {
8118 		SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
8119 		spdk_spin_unlock(&bdev->internal.spinlock);
8120 		return rc;
8121 	}
8122 
8123 	TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
8124 
8125 	spdk_spin_unlock(&bdev->internal.spinlock);
8126 
8127 	return 0;
8128 }
8129 
8130 static int
8131 bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *event_ctx,
8132 		struct spdk_bdev_desc **_desc)
8133 {
8134 	struct spdk_bdev_desc *desc;
8135 	unsigned int i;
8136 
8137 	desc = calloc(1, sizeof(*desc));
8138 	if (desc == NULL) {
8139 		SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
8140 		return -ENOMEM;
8141 	}
8142 
8143 	TAILQ_INIT(&desc->pending_media_events);
8144 	TAILQ_INIT(&desc->free_media_events);
8145 
8146 	desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
8147 	desc->callback.event_fn = event_cb;
8148 	desc->callback.ctx = event_ctx;
8149 	spdk_spin_init(&desc->spinlock);
8150 
8151 	if (bdev->media_events) {
8152 		desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
8153 						   sizeof(*desc->media_events_buffer));
8154 		if (desc->media_events_buffer == NULL) {
8155 			SPDK_ERRLOG("Failed to initialize media event pool\n");
8156 			bdev_desc_free(desc);
8157 			return -ENOMEM;
8158 		}
8159 
8160 		for (i = 0; i < MEDIA_EVENT_POOL_SIZE; ++i) {
8161 			TAILQ_INSERT_TAIL(&desc->free_media_events,
8162 					  &desc->media_events_buffer[i], tailq);
8163 		}
8164 	}
8165 
8166 	if (bdev->fn_table->accel_sequence_supported != NULL) {
8167 		for (i = 0; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
8168 			desc->accel_sequence_supported[i] =
8169 				bdev->fn_table->accel_sequence_supported(bdev->ctxt,
8170 						(enum spdk_bdev_io_type)i);
8171 		}
8172 	}
8173 
8174 	*_desc = desc;
8175 
8176 	return 0;
8177 }
8178 
8179 static int
8180 bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8181 	      void *event_ctx, struct spdk_bdev_desc **_desc)
8182 {
8183 	struct spdk_bdev_desc *desc;
8184 	struct spdk_bdev *bdev;
8185 	int rc;
8186 
8187 	bdev = bdev_get_by_name(bdev_name);
8188 
8189 	if (bdev == NULL) {
8190 		SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
8191 		return -ENODEV;
8192 	}
8193 
8194 	rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc);
8195 	if (rc != 0) {
8196 		return rc;
8197 	}
8198 
8199 	rc = bdev_open(bdev, write, desc);
8200 	if (rc != 0) {
8201 		bdev_desc_free(desc);
8202 		desc = NULL;
8203 	}
8204 
8205 	*_desc = desc;
8206 
8207 	return rc;
8208 }
8209 
8210 int
8211 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8212 		   void *event_ctx, struct spdk_bdev_desc **_desc)
8213 {
8214 	int rc;
8215 
8216 	if (event_cb == NULL) {
8217 		SPDK_ERRLOG("Missing event callback function\n");
8218 		return -EINVAL;
8219 	}
8220 
8221 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8222 	rc = bdev_open_ext(bdev_name, write, event_cb, event_ctx, _desc);
8223 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8224 
8225 	return rc;
8226 }
8227 
8228 struct spdk_bdev_open_async_ctx {
8229 	char					*bdev_name;
8230 	spdk_bdev_event_cb_t			event_cb;
8231 	void					*event_ctx;
8232 	bool					write;
8233 	int					rc;
8234 	spdk_bdev_open_async_cb_t		cb_fn;
8235 	void					*cb_arg;
8236 	struct spdk_bdev_desc			*desc;
8237 	struct spdk_bdev_open_async_opts	opts;
8238 	uint64_t				start_ticks;
8239 	struct spdk_thread			*orig_thread;
8240 	struct spdk_poller			*poller;
8241 	TAILQ_ENTRY(spdk_bdev_open_async_ctx)	tailq;
8242 };
8243 
8244 static void
8245 bdev_open_async_done(void *arg)
8246 {
8247 	struct spdk_bdev_open_async_ctx *ctx = arg;
8248 
8249 	ctx->cb_fn(ctx->desc, ctx->rc, ctx->cb_arg);
8250 
8251 	free(ctx->bdev_name);
8252 	free(ctx);
8253 }
8254 
8255 static void
8256 bdev_open_async_cancel(void *arg)
8257 {
8258 	struct spdk_bdev_open_async_ctx *ctx = arg;
8259 
8260 	assert(ctx->rc == -ESHUTDOWN);
8261 
8262 	spdk_poller_unregister(&ctx->poller);
8263 
8264 	bdev_open_async_done(ctx);
8265 }
8266 
8267 /* This is called when the bdev library finishes at shutdown. */
8268 static void
8269 bdev_open_async_fini(void)
8270 {
8271 	struct spdk_bdev_open_async_ctx *ctx, *tmp_ctx;
8272 
8273 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8274 	TAILQ_FOREACH_SAFE(ctx, &g_bdev_mgr.async_bdev_opens, tailq, tmp_ctx) {
8275 		TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8276 		/*
8277 		 * We have to move to ctx->orig_thread to unregister ctx->poller.
8278 		 * However, there is a chance that ctx->poller is executed before
8279 		 * message is executed, which could result in bdev_open_async_done()
8280 		 * being called twice. To avoid such race condition, set ctx->rc to
8281 		 * -ESHUTDOWN.
8282 		 */
8283 		ctx->rc = -ESHUTDOWN;
8284 		spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_cancel, ctx);
8285 	}
8286 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8287 }
8288 
8289 static int bdev_open_async(void *arg);
8290 
8291 static void
8292 _bdev_open_async(struct spdk_bdev_open_async_ctx *ctx)
8293 {
8294 	uint64_t timeout_ticks;
8295 
8296 	if (ctx->rc == -ESHUTDOWN) {
8297 		/* This context is being canceled. Do nothing. */
8298 		return;
8299 	}
8300 
8301 	ctx->rc = bdev_open_ext(ctx->bdev_name, ctx->write, ctx->event_cb, ctx->event_ctx,
8302 				&ctx->desc);
8303 	if (ctx->rc == 0 || ctx->opts.timeout_ms == 0) {
8304 		goto exit;
8305 	}
8306 
8307 	timeout_ticks = ctx->start_ticks + ctx->opts.timeout_ms * spdk_get_ticks_hz() / 1000ull;
8308 	if (spdk_get_ticks() >= timeout_ticks) {
8309 		SPDK_ERRLOG("Timed out while waiting for bdev '%s' to appear\n", ctx->bdev_name);
8310 		ctx->rc = -ETIMEDOUT;
8311 		goto exit;
8312 	}
8313 
8314 	return;
8315 
8316 exit:
8317 	spdk_poller_unregister(&ctx->poller);
8318 	TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8319 
8320 	/* Completion callback is processed after stack unwinding. */
8321 	spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_done, ctx);
8322 }
8323 
8324 static int
8325 bdev_open_async(void *arg)
8326 {
8327 	struct spdk_bdev_open_async_ctx *ctx = arg;
8328 
8329 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8330 
8331 	_bdev_open_async(ctx);
8332 
8333 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8334 
8335 	return SPDK_POLLER_BUSY;
8336 }
8337 
8338 static void
8339 bdev_open_async_opts_copy(struct spdk_bdev_open_async_opts *opts,
8340 			  struct spdk_bdev_open_async_opts *opts_src,
8341 			  size_t size)
8342 {
8343 	assert(opts);
8344 	assert(opts_src);
8345 
8346 	opts->size = size;
8347 
8348 #define SET_FIELD(field) \
8349 	if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8350 		opts->field = opts_src->field; \
8351 	} \
8352 
8353 	SET_FIELD(timeout_ms);
8354 
8355 	/* Do not remove this statement, you should always update this statement when you adding a new field,
8356 	 * and do not forget to add the SET_FIELD statement for your added field. */
8357 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_open_async_opts) == 16, "Incorrect size");
8358 
8359 #undef SET_FIELD
8360 }
8361 
8362 static void
8363 bdev_open_async_opts_get_default(struct spdk_bdev_open_async_opts *opts, size_t size)
8364 {
8365 	assert(opts);
8366 
8367 	opts->size = size;
8368 
8369 #define SET_FIELD(field, value) \
8370 	if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8371 		opts->field = value; \
8372 	} \
8373 
8374 	SET_FIELD(timeout_ms, 0);
8375 
8376 #undef SET_FIELD
8377 }
8378 
8379 int
8380 spdk_bdev_open_async(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8381 		     void *event_ctx, struct spdk_bdev_open_async_opts *opts,
8382 		     spdk_bdev_open_async_cb_t open_cb, void *open_cb_arg)
8383 {
8384 	struct spdk_bdev_open_async_ctx *ctx;
8385 
8386 	if (event_cb == NULL) {
8387 		SPDK_ERRLOG("Missing event callback function\n");
8388 		return -EINVAL;
8389 	}
8390 
8391 	if (open_cb == NULL) {
8392 		SPDK_ERRLOG("Missing open callback function\n");
8393 		return -EINVAL;
8394 	}
8395 
8396 	if (opts != NULL && opts->size == 0) {
8397 		SPDK_ERRLOG("size in the options structure should not be zero\n");
8398 		return -EINVAL;
8399 	}
8400 
8401 	ctx = calloc(1, sizeof(*ctx));
8402 	if (ctx == NULL) {
8403 		SPDK_ERRLOG("Failed to allocate open context\n");
8404 		return -ENOMEM;
8405 	}
8406 
8407 	ctx->bdev_name = strdup(bdev_name);
8408 	if (ctx->bdev_name == NULL) {
8409 		SPDK_ERRLOG("Failed to duplicate bdev_name\n");
8410 		free(ctx);
8411 		return -ENOMEM;
8412 	}
8413 
8414 	ctx->poller = SPDK_POLLER_REGISTER(bdev_open_async, ctx, 100 * 1000);
8415 	if (ctx->poller == NULL) {
8416 		SPDK_ERRLOG("Failed to register bdev_open_async poller\n");
8417 		free(ctx->bdev_name);
8418 		free(ctx);
8419 		return -ENOMEM;
8420 	}
8421 
8422 	ctx->cb_fn = open_cb;
8423 	ctx->cb_arg = open_cb_arg;
8424 	ctx->write = write;
8425 	ctx->event_cb = event_cb;
8426 	ctx->event_ctx = event_ctx;
8427 	ctx->orig_thread = spdk_get_thread();
8428 	ctx->start_ticks = spdk_get_ticks();
8429 
8430 	bdev_open_async_opts_get_default(&ctx->opts, sizeof(ctx->opts));
8431 	if (opts != NULL) {
8432 		bdev_open_async_opts_copy(&ctx->opts, opts, opts->size);
8433 	}
8434 
8435 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8436 
8437 	TAILQ_INSERT_TAIL(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8438 	_bdev_open_async(ctx);
8439 
8440 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8441 
8442 	return 0;
8443 }
8444 
8445 static void
8446 bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
8447 {
8448 	int rc;
8449 
8450 	spdk_spin_lock(&bdev->internal.spinlock);
8451 	spdk_spin_lock(&desc->spinlock);
8452 
8453 	TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
8454 
8455 	desc->closed = true;
8456 
8457 	if (desc->claim != NULL) {
8458 		bdev_desc_release_claims(desc);
8459 	}
8460 
8461 	if (0 == desc->refs) {
8462 		spdk_spin_unlock(&desc->spinlock);
8463 		bdev_desc_free(desc);
8464 	} else {
8465 		spdk_spin_unlock(&desc->spinlock);
8466 	}
8467 
8468 	/* If no more descriptors, kill QoS channel */
8469 	if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8470 		SPDK_DEBUGLOG(bdev, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
8471 			      bdev->name, spdk_get_thread());
8472 
8473 		if (bdev_qos_destroy(bdev)) {
8474 			/* There isn't anything we can do to recover here. Just let the
8475 			 * old QoS poller keep running. The QoS handling won't change
8476 			 * cores when the user allocates a new channel, but it won't break. */
8477 			SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
8478 		}
8479 	}
8480 
8481 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8482 		rc = bdev_unregister_unsafe(bdev);
8483 		spdk_spin_unlock(&bdev->internal.spinlock);
8484 
8485 		if (rc == 0) {
8486 			spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
8487 		}
8488 	} else {
8489 		spdk_spin_unlock(&bdev->internal.spinlock);
8490 	}
8491 }
8492 
8493 void
8494 spdk_bdev_close(struct spdk_bdev_desc *desc)
8495 {
8496 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8497 
8498 	SPDK_DEBUGLOG(bdev, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8499 		      spdk_get_thread());
8500 
8501 	assert(desc->thread == spdk_get_thread());
8502 
8503 	spdk_poller_unregister(&desc->io_timeout_poller);
8504 
8505 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8506 
8507 	bdev_close(bdev, desc);
8508 
8509 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8510 }
8511 
8512 static void
8513 bdev_register_finished(void *arg)
8514 {
8515 	struct spdk_bdev_desc *desc = arg;
8516 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8517 
8518 	spdk_notify_send("bdev_register", spdk_bdev_get_name(bdev));
8519 
8520 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8521 
8522 	bdev_close(bdev, desc);
8523 
8524 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
8525 }
8526 
8527 int
8528 spdk_bdev_register(struct spdk_bdev *bdev)
8529 {
8530 	struct spdk_bdev_desc *desc;
8531 	struct spdk_thread *thread = spdk_get_thread();
8532 	int rc;
8533 
8534 	if (spdk_unlikely(!spdk_thread_is_app_thread(NULL))) {
8535 		SPDK_ERRLOG("Cannot register bdev %s on thread %p (%s)\n", bdev->name, thread,
8536 			    thread ? spdk_thread_get_name(thread) : "null");
8537 		return -EINVAL;
8538 	}
8539 
8540 	rc = bdev_register(bdev);
8541 	if (rc != 0) {
8542 		return rc;
8543 	}
8544 
8545 	/* A descriptor is opened to prevent bdev deletion during examination */
8546 	rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
8547 	if (rc != 0) {
8548 		spdk_bdev_unregister(bdev, NULL, NULL);
8549 		return rc;
8550 	}
8551 
8552 	rc = bdev_open(bdev, false, desc);
8553 	if (rc != 0) {
8554 		bdev_desc_free(desc);
8555 		spdk_bdev_unregister(bdev, NULL, NULL);
8556 		return rc;
8557 	}
8558 
8559 	/* Examine configuration before initializing I/O */
8560 	bdev_examine(bdev);
8561 
8562 	rc = spdk_bdev_wait_for_examine(bdev_register_finished, desc);
8563 	if (rc != 0) {
8564 		bdev_close(bdev, desc);
8565 		spdk_bdev_unregister(bdev, NULL, NULL);
8566 	}
8567 
8568 	return rc;
8569 }
8570 
8571 int
8572 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
8573 			    struct spdk_bdev_module *module)
8574 {
8575 	spdk_spin_lock(&bdev->internal.spinlock);
8576 
8577 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8578 		LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8579 		spdk_spin_unlock(&bdev->internal.spinlock);
8580 		return -EPERM;
8581 	}
8582 
8583 	if (desc && !desc->write) {
8584 		desc->write = true;
8585 	}
8586 
8587 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
8588 	bdev->internal.claim.v1.module = module;
8589 
8590 	spdk_spin_unlock(&bdev->internal.spinlock);
8591 	return 0;
8592 }
8593 
8594 void
8595 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
8596 {
8597 	spdk_spin_lock(&bdev->internal.spinlock);
8598 
8599 	assert(bdev->internal.claim.v1.module != NULL);
8600 	assert(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
8601 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
8602 	bdev->internal.claim.v1.module = NULL;
8603 
8604 	spdk_spin_unlock(&bdev->internal.spinlock);
8605 }
8606 
8607 /*
8608  * Start claims v2
8609  */
8610 
8611 const char *
8612 spdk_bdev_claim_get_name(enum spdk_bdev_claim_type type)
8613 {
8614 	switch (type) {
8615 	case SPDK_BDEV_CLAIM_NONE:
8616 		return "not_claimed";
8617 	case SPDK_BDEV_CLAIM_EXCL_WRITE:
8618 		return "exclusive_write";
8619 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8620 		return "read_many_write_one";
8621 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
8622 		return "read_many_write_none";
8623 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8624 		return "read_many_write_many";
8625 	default:
8626 		break;
8627 	}
8628 	return "invalid_claim";
8629 }
8630 
8631 static bool
8632 claim_type_is_v2(enum spdk_bdev_claim_type type)
8633 {
8634 	switch (type) {
8635 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8636 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
8637 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8638 		return true;
8639 	default:
8640 		break;
8641 	}
8642 	return false;
8643 }
8644 
8645 /* Returns true if taking a claim with desc->write == false should make the descriptor writable. */
8646 static bool
8647 claim_type_promotes_to_write(enum spdk_bdev_claim_type type)
8648 {
8649 	switch (type) {
8650 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8651 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8652 		return true;
8653 	default:
8654 		break;
8655 	}
8656 	return false;
8657 }
8658 
8659 void
8660 spdk_bdev_claim_opts_init(struct spdk_bdev_claim_opts *opts, size_t size)
8661 {
8662 	if (opts == NULL) {
8663 		SPDK_ERRLOG("opts should not be NULL\n");
8664 		assert(opts != NULL);
8665 		return;
8666 	}
8667 	if (size == 0) {
8668 		SPDK_ERRLOG("size should not be zero\n");
8669 		assert(size != 0);
8670 		return;
8671 	}
8672 
8673 	memset(opts, 0, size);
8674 	opts->opts_size = size;
8675 
8676 #define FIELD_OK(field) \
8677         offsetof(struct spdk_bdev_claim_opts, field) + sizeof(opts->field) <= size
8678 
8679 #define SET_FIELD(field, value) \
8680         if (FIELD_OK(field)) { \
8681                 opts->field = value; \
8682         } \
8683 
8684 	SET_FIELD(shared_claim_key, 0);
8685 
8686 #undef FIELD_OK
8687 #undef SET_FIELD
8688 }
8689 
8690 static int
8691 claim_opts_copy(struct spdk_bdev_claim_opts *src, struct spdk_bdev_claim_opts *dst)
8692 {
8693 	if (src->opts_size == 0) {
8694 		SPDK_ERRLOG("size should not be zero\n");
8695 		return -1;
8696 	}
8697 
8698 	memset(dst, 0, sizeof(*dst));
8699 	dst->opts_size = src->opts_size;
8700 
8701 #define FIELD_OK(field) \
8702         offsetof(struct spdk_bdev_claim_opts, field) + sizeof(src->field) <= src->opts_size
8703 
8704 #define SET_FIELD(field) \
8705         if (FIELD_OK(field)) { \
8706                 dst->field = src->field; \
8707         } \
8708 
8709 	if (FIELD_OK(name)) {
8710 		snprintf(dst->name, sizeof(dst->name), "%s", src->name);
8711 	}
8712 
8713 	SET_FIELD(shared_claim_key);
8714 
8715 	/* You should not remove this statement, but need to update the assert statement
8716 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
8717 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_claim_opts) == 48, "Incorrect size");
8718 
8719 #undef FIELD_OK
8720 #undef SET_FIELD
8721 	return 0;
8722 }
8723 
8724 /* Returns 0 if a read-write-once claim can be taken. */
8725 static int
8726 claim_verify_rwo(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8727 		 struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8728 {
8729 	struct spdk_bdev *bdev = desc->bdev;
8730 	struct spdk_bdev_desc *open_desc;
8731 
8732 	assert(spdk_spin_held(&bdev->internal.spinlock));
8733 	assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
8734 
8735 	if (opts->shared_claim_key != 0) {
8736 		SPDK_ERRLOG("%s: key option not supported with read-write-once claims\n",
8737 			    bdev->name);
8738 		return -EINVAL;
8739 	}
8740 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8741 		LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8742 		return -EPERM;
8743 	}
8744 	if (desc->claim != NULL) {
8745 		SPDK_NOTICELOG("%s: descriptor already claimed bdev with module %s\n",
8746 			       bdev->name, desc->claim->module->name);
8747 		return -EPERM;
8748 	}
8749 	TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8750 		if (desc != open_desc && open_desc->write) {
8751 			SPDK_NOTICELOG("%s: Cannot obtain read-write-once claim while "
8752 				       "another descriptor is open for writing\n",
8753 				       bdev->name);
8754 			return -EPERM;
8755 		}
8756 	}
8757 
8758 	return 0;
8759 }
8760 
8761 /* Returns 0 if a read-only-many claim can be taken. */
8762 static int
8763 claim_verify_rom(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8764 		 struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8765 {
8766 	struct spdk_bdev *bdev = desc->bdev;
8767 	struct spdk_bdev_desc *open_desc;
8768 
8769 	assert(spdk_spin_held(&bdev->internal.spinlock));
8770 	assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
8771 	assert(desc->claim == NULL);
8772 
8773 	if (desc->write) {
8774 		SPDK_ERRLOG("%s: Cannot obtain read-only-many claim with writable descriptor\n",
8775 			    bdev->name);
8776 		return -EINVAL;
8777 	}
8778 	if (opts->shared_claim_key != 0) {
8779 		SPDK_ERRLOG("%s: key option not supported with read-only-may claims\n", bdev->name);
8780 		return -EINVAL;
8781 	}
8782 	if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
8783 		TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8784 			if (open_desc->write) {
8785 				SPDK_NOTICELOG("%s: Cannot obtain read-only-many claim while "
8786 					       "another descriptor is open for writing\n",
8787 					       bdev->name);
8788 				return -EPERM;
8789 			}
8790 		}
8791 	}
8792 
8793 	return 0;
8794 }
8795 
8796 /* Returns 0 if a read-write-many claim can be taken. */
8797 static int
8798 claim_verify_rwm(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8799 		 struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8800 {
8801 	struct spdk_bdev *bdev = desc->bdev;
8802 	struct spdk_bdev_desc *open_desc;
8803 
8804 	assert(spdk_spin_held(&bdev->internal.spinlock));
8805 	assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
8806 	assert(desc->claim == NULL);
8807 
8808 	if (opts->shared_claim_key == 0) {
8809 		SPDK_ERRLOG("%s: shared_claim_key option required with read-write-may claims\n",
8810 			    bdev->name);
8811 		return -EINVAL;
8812 	}
8813 	switch (bdev->internal.claim_type) {
8814 	case SPDK_BDEV_CLAIM_NONE:
8815 		TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8816 			if (open_desc == desc) {
8817 				continue;
8818 			}
8819 			if (open_desc->write) {
8820 				SPDK_NOTICELOG("%s: Cannot obtain read-write-many claim while "
8821 					       "another descriptor is open for writing without a "
8822 					       "claim\n", bdev->name);
8823 				return -EPERM;
8824 			}
8825 		}
8826 		break;
8827 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8828 		if (opts->shared_claim_key != bdev->internal.claim.v2.key) {
8829 			LOG_ALREADY_CLAIMED_ERROR("already claimed with another key", bdev);
8830 			return -EPERM;
8831 		}
8832 		break;
8833 	default:
8834 		LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8835 		return -EBUSY;
8836 	}
8837 
8838 	return 0;
8839 }
8840 
8841 /* Updates desc and its bdev with a v2 claim. */
8842 static int
8843 claim_bdev(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8844 	   struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8845 {
8846 	struct spdk_bdev *bdev = desc->bdev;
8847 	struct spdk_bdev_module_claim *claim;
8848 
8849 	assert(spdk_spin_held(&bdev->internal.spinlock));
8850 	assert(claim_type_is_v2(type));
8851 	assert(desc->claim == NULL);
8852 
8853 	claim = calloc(1, sizeof(*desc->claim));
8854 	if (claim == NULL) {
8855 		SPDK_ERRLOG("%s: out of memory while allocating claim\n", bdev->name);
8856 		return -ENOMEM;
8857 	}
8858 	claim->module = module;
8859 	claim->desc = desc;
8860 	SPDK_STATIC_ASSERT(sizeof(claim->name) == sizeof(opts->name), "sizes must match");
8861 	memcpy(claim->name, opts->name, sizeof(claim->name));
8862 	desc->claim = claim;
8863 
8864 	if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
8865 		bdev->internal.claim_type = type;
8866 		TAILQ_INIT(&bdev->internal.claim.v2.claims);
8867 		bdev->internal.claim.v2.key = opts->shared_claim_key;
8868 	}
8869 	assert(type == bdev->internal.claim_type);
8870 
8871 	TAILQ_INSERT_TAIL(&bdev->internal.claim.v2.claims, claim, link);
8872 
8873 	if (!desc->write && claim_type_promotes_to_write(type)) {
8874 		desc->write = true;
8875 	}
8876 
8877 	return 0;
8878 }
8879 
8880 int
8881 spdk_bdev_module_claim_bdev_desc(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8882 				 struct spdk_bdev_claim_opts *_opts,
8883 				 struct spdk_bdev_module *module)
8884 {
8885 	struct spdk_bdev *bdev;
8886 	struct spdk_bdev_claim_opts opts;
8887 	int rc = 0;
8888 
8889 	if (desc == NULL) {
8890 		SPDK_ERRLOG("descriptor must not be NULL\n");
8891 		return -EINVAL;
8892 	}
8893 
8894 	bdev = desc->bdev;
8895 
8896 	if (_opts == NULL) {
8897 		spdk_bdev_claim_opts_init(&opts, sizeof(opts));
8898 	} else if (claim_opts_copy(_opts, &opts) != 0) {
8899 		return -EINVAL;
8900 	}
8901 
8902 	spdk_spin_lock(&bdev->internal.spinlock);
8903 
8904 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE &&
8905 	    bdev->internal.claim_type != type) {
8906 		LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8907 		spdk_spin_unlock(&bdev->internal.spinlock);
8908 		return -EPERM;
8909 	}
8910 
8911 	if (claim_type_is_v2(type) && desc->claim != NULL) {
8912 		SPDK_ERRLOG("%s: descriptor already has %s claim with name '%s'\n",
8913 			    bdev->name, spdk_bdev_claim_get_name(type), desc->claim->name);
8914 		spdk_spin_unlock(&bdev->internal.spinlock);
8915 		return -EPERM;
8916 	}
8917 
8918 	switch (type) {
8919 	case SPDK_BDEV_CLAIM_EXCL_WRITE:
8920 		spdk_spin_unlock(&bdev->internal.spinlock);
8921 		return spdk_bdev_module_claim_bdev(bdev, desc, module);
8922 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8923 		rc = claim_verify_rwo(desc, type, &opts, module);
8924 		break;
8925 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
8926 		rc = claim_verify_rom(desc, type, &opts, module);
8927 		break;
8928 	case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8929 		rc = claim_verify_rwm(desc, type, &opts, module);
8930 		break;
8931 	default:
8932 		SPDK_ERRLOG("%s: claim type %d not supported\n", bdev->name, type);
8933 		rc = -ENOTSUP;
8934 	}
8935 
8936 	if (rc == 0) {
8937 		rc = claim_bdev(desc, type, &opts, module);
8938 	}
8939 
8940 	spdk_spin_unlock(&bdev->internal.spinlock);
8941 	return rc;
8942 }
8943 
8944 static void
8945 claim_reset(struct spdk_bdev *bdev)
8946 {
8947 	assert(spdk_spin_held(&bdev->internal.spinlock));
8948 	assert(claim_type_is_v2(bdev->internal.claim_type));
8949 	assert(TAILQ_EMPTY(&bdev->internal.claim.v2.claims));
8950 
8951 	memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
8952 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
8953 }
8954 
8955 static void
8956 bdev_desc_release_claims(struct spdk_bdev_desc *desc)
8957 {
8958 	struct spdk_bdev *bdev = desc->bdev;
8959 
8960 	assert(spdk_spin_held(&bdev->internal.spinlock));
8961 	assert(claim_type_is_v2(bdev->internal.claim_type));
8962 
8963 	if (bdev->internal.examine_in_progress == 0) {
8964 		TAILQ_REMOVE(&bdev->internal.claim.v2.claims, desc->claim, link);
8965 		free(desc->claim);
8966 		if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
8967 			claim_reset(bdev);
8968 		}
8969 	} else {
8970 		/* This is a dead claim that will be cleaned up when bdev_examine() is done. */
8971 		desc->claim->module = NULL;
8972 		desc->claim->desc = NULL;
8973 	}
8974 	desc->claim = NULL;
8975 }
8976 
8977 /*
8978  * End claims v2
8979  */
8980 
8981 struct spdk_bdev *
8982 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
8983 {
8984 	assert(desc != NULL);
8985 	return desc->bdev;
8986 }
8987 
8988 int
8989 spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
8990 {
8991 	struct spdk_bdev *bdev, *tmp;
8992 	struct spdk_bdev_desc *desc;
8993 	int rc = 0;
8994 
8995 	assert(fn != NULL);
8996 
8997 	spdk_spin_lock(&g_bdev_mgr.spinlock);
8998 	bdev = spdk_bdev_first();
8999 	while (bdev != NULL) {
9000 		rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
9001 		if (rc != 0) {
9002 			break;
9003 		}
9004 		rc = bdev_open(bdev, false, desc);
9005 		if (rc != 0) {
9006 			bdev_desc_free(desc);
9007 			if (rc == -ENODEV) {
9008 				/* Ignore the error and move to the next bdev. */
9009 				rc = 0;
9010 				bdev = spdk_bdev_next(bdev);
9011 				continue;
9012 			}
9013 			break;
9014 		}
9015 		spdk_spin_unlock(&g_bdev_mgr.spinlock);
9016 
9017 		rc = fn(ctx, bdev);
9018 
9019 		spdk_spin_lock(&g_bdev_mgr.spinlock);
9020 		tmp = spdk_bdev_next(bdev);
9021 		bdev_close(bdev, desc);
9022 		if (rc != 0) {
9023 			break;
9024 		}
9025 		bdev = tmp;
9026 	}
9027 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
9028 
9029 	return rc;
9030 }
9031 
9032 int
9033 spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
9034 {
9035 	struct spdk_bdev *bdev, *tmp;
9036 	struct spdk_bdev_desc *desc;
9037 	int rc = 0;
9038 
9039 	assert(fn != NULL);
9040 
9041 	spdk_spin_lock(&g_bdev_mgr.spinlock);
9042 	bdev = spdk_bdev_first_leaf();
9043 	while (bdev != NULL) {
9044 		rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
9045 		if (rc != 0) {
9046 			break;
9047 		}
9048 		rc = bdev_open(bdev, false, desc);
9049 		if (rc != 0) {
9050 			bdev_desc_free(desc);
9051 			if (rc == -ENODEV) {
9052 				/* Ignore the error and move to the next bdev. */
9053 				rc = 0;
9054 				bdev = spdk_bdev_next_leaf(bdev);
9055 				continue;
9056 			}
9057 			break;
9058 		}
9059 		spdk_spin_unlock(&g_bdev_mgr.spinlock);
9060 
9061 		rc = fn(ctx, bdev);
9062 
9063 		spdk_spin_lock(&g_bdev_mgr.spinlock);
9064 		tmp = spdk_bdev_next_leaf(bdev);
9065 		bdev_close(bdev, desc);
9066 		if (rc != 0) {
9067 			break;
9068 		}
9069 		bdev = tmp;
9070 	}
9071 	spdk_spin_unlock(&g_bdev_mgr.spinlock);
9072 
9073 	return rc;
9074 }
9075 
9076 void
9077 spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
9078 {
9079 	struct iovec *iovs;
9080 	int iovcnt;
9081 
9082 	if (bdev_io == NULL) {
9083 		return;
9084 	}
9085 
9086 	switch (bdev_io->type) {
9087 	case SPDK_BDEV_IO_TYPE_READ:
9088 	case SPDK_BDEV_IO_TYPE_WRITE:
9089 	case SPDK_BDEV_IO_TYPE_ZCOPY:
9090 		iovs = bdev_io->u.bdev.iovs;
9091 		iovcnt = bdev_io->u.bdev.iovcnt;
9092 		break;
9093 	default:
9094 		iovs = NULL;
9095 		iovcnt = 0;
9096 		break;
9097 	}
9098 
9099 	if (iovp) {
9100 		*iovp = iovs;
9101 	}
9102 	if (iovcntp) {
9103 		*iovcntp = iovcnt;
9104 	}
9105 }
9106 
9107 void *
9108 spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
9109 {
9110 	if (bdev_io == NULL) {
9111 		return NULL;
9112 	}
9113 
9114 	if (!spdk_bdev_is_md_separate(bdev_io->bdev)) {
9115 		return NULL;
9116 	}
9117 
9118 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
9119 	    bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
9120 		return bdev_io->u.bdev.md_buf;
9121 	}
9122 
9123 	return NULL;
9124 }
9125 
9126 void *
9127 spdk_bdev_io_get_cb_arg(struct spdk_bdev_io *bdev_io)
9128 {
9129 	if (bdev_io == NULL) {
9130 		assert(false);
9131 		return NULL;
9132 	}
9133 
9134 	return bdev_io->internal.caller_ctx;
9135 }
9136 
9137 void
9138 spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
9139 {
9140 
9141 	if (spdk_bdev_module_list_find(bdev_module->name)) {
9142 		SPDK_ERRLOG("ERROR: module '%s' already registered.\n", bdev_module->name);
9143 		assert(false);
9144 	}
9145 
9146 	spdk_spin_init(&bdev_module->internal.spinlock);
9147 	TAILQ_INIT(&bdev_module->internal.quiesced_ranges);
9148 
9149 	/*
9150 	 * Modules with examine callbacks must be initialized first, so they are
9151 	 *  ready to handle examine callbacks from later modules that will
9152 	 *  register physical bdevs.
9153 	 */
9154 	if (bdev_module->examine_config != NULL || bdev_module->examine_disk != NULL) {
9155 		TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9156 	} else {
9157 		TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9158 	}
9159 }
9160 
9161 struct spdk_bdev_module *
9162 spdk_bdev_module_list_find(const char *name)
9163 {
9164 	struct spdk_bdev_module *bdev_module;
9165 
9166 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
9167 		if (strcmp(name, bdev_module->name) == 0) {
9168 			break;
9169 		}
9170 	}
9171 
9172 	return bdev_module;
9173 }
9174 
9175 static int
9176 bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io)
9177 {
9178 	uint64_t num_blocks;
9179 	void *md_buf = NULL;
9180 
9181 	num_blocks = bdev_io->u.bdev.num_blocks;
9182 
9183 	if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
9184 		md_buf = (char *)g_bdev_mgr.zero_buffer +
9185 			 spdk_bdev_get_block_size(bdev_io->bdev) * num_blocks;
9186 	}
9187 
9188 	return bdev_write_blocks_with_md(bdev_io->internal.desc,
9189 					 spdk_io_channel_from_ctx(bdev_io->internal.ch),
9190 					 g_bdev_mgr.zero_buffer, md_buf,
9191 					 bdev_io->u.bdev.offset_blocks, num_blocks,
9192 					 bdev_write_zero_buffer_done, bdev_io);
9193 }
9194 
9195 static void
9196 bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
9197 {
9198 	struct spdk_bdev_io *parent_io = cb_arg;
9199 
9200 	spdk_bdev_free_io(bdev_io);
9201 
9202 	parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
9203 	parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
9204 }
9205 
9206 static void
9207 bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
9208 {
9209 	spdk_spin_lock(&ctx->bdev->internal.spinlock);
9210 	ctx->bdev->internal.qos_mod_in_progress = false;
9211 	spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9212 
9213 	if (ctx->cb_fn) {
9214 		ctx->cb_fn(ctx->cb_arg, status);
9215 	}
9216 	free(ctx);
9217 }
9218 
9219 static void
9220 bdev_disable_qos_done(void *cb_arg)
9221 {
9222 	struct set_qos_limit_ctx *ctx = cb_arg;
9223 	struct spdk_bdev *bdev = ctx->bdev;
9224 	struct spdk_bdev_qos *qos;
9225 
9226 	spdk_spin_lock(&bdev->internal.spinlock);
9227 	qos = bdev->internal.qos;
9228 	bdev->internal.qos = NULL;
9229 	spdk_spin_unlock(&bdev->internal.spinlock);
9230 
9231 	if (qos->thread != NULL) {
9232 		spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
9233 		spdk_poller_unregister(&qos->poller);
9234 	}
9235 
9236 	free(qos);
9237 
9238 	bdev_set_qos_limit_done(ctx, 0);
9239 }
9240 
9241 static void
9242 bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status)
9243 {
9244 	struct set_qos_limit_ctx *ctx = _ctx;
9245 	struct spdk_thread *thread;
9246 
9247 	spdk_spin_lock(&bdev->internal.spinlock);
9248 	thread = bdev->internal.qos->thread;
9249 	spdk_spin_unlock(&bdev->internal.spinlock);
9250 
9251 	if (thread != NULL) {
9252 		spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
9253 	} else {
9254 		bdev_disable_qos_done(ctx);
9255 	}
9256 }
9257 
9258 static void
9259 bdev_disable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9260 		     struct spdk_io_channel *ch, void *_ctx)
9261 {
9262 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9263 	struct spdk_bdev_io *bdev_io;
9264 
9265 	bdev_ch->flags &= ~BDEV_CH_QOS_ENABLED;
9266 
9267 	while (!TAILQ_EMPTY(&bdev_ch->qos_queued_io)) {
9268 		/* Re-submit the queued I/O. */
9269 		bdev_io = TAILQ_FIRST(&bdev_ch->qos_queued_io);
9270 		TAILQ_REMOVE(&bdev_ch->qos_queued_io, bdev_io, internal.link);
9271 		_bdev_io_submit(bdev_io);
9272 	}
9273 
9274 	spdk_bdev_for_each_channel_continue(i, 0);
9275 }
9276 
9277 static void
9278 bdev_update_qos_rate_limit_msg(void *cb_arg)
9279 {
9280 	struct set_qos_limit_ctx *ctx = cb_arg;
9281 	struct spdk_bdev *bdev = ctx->bdev;
9282 
9283 	spdk_spin_lock(&bdev->internal.spinlock);
9284 	bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
9285 	spdk_spin_unlock(&bdev->internal.spinlock);
9286 
9287 	bdev_set_qos_limit_done(ctx, 0);
9288 }
9289 
9290 static void
9291 bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9292 		    struct spdk_io_channel *ch, void *_ctx)
9293 {
9294 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9295 
9296 	spdk_spin_lock(&bdev->internal.spinlock);
9297 	bdev_enable_qos(bdev, bdev_ch);
9298 	spdk_spin_unlock(&bdev->internal.spinlock);
9299 	spdk_bdev_for_each_channel_continue(i, 0);
9300 }
9301 
9302 static void
9303 bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status)
9304 {
9305 	struct set_qos_limit_ctx *ctx = _ctx;
9306 
9307 	bdev_set_qos_limit_done(ctx, status);
9308 }
9309 
9310 static void
9311 bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
9312 {
9313 	int i;
9314 
9315 	assert(bdev->internal.qos != NULL);
9316 
9317 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9318 		if (limits[i] != SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9319 			bdev->internal.qos->rate_limits[i].limit = limits[i];
9320 
9321 			if (limits[i] == 0) {
9322 				bdev->internal.qos->rate_limits[i].limit =
9323 					SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
9324 			}
9325 		}
9326 	}
9327 }
9328 
9329 void
9330 spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
9331 			      void (*cb_fn)(void *cb_arg, int status), void *cb_arg)
9332 {
9333 	struct set_qos_limit_ctx	*ctx;
9334 	uint32_t			limit_set_complement;
9335 	uint64_t			min_limit_per_sec;
9336 	int				i;
9337 	bool				disable_rate_limit = true;
9338 
9339 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9340 		if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9341 			continue;
9342 		}
9343 
9344 		if (limits[i] > 0) {
9345 			disable_rate_limit = false;
9346 		}
9347 
9348 		if (bdev_qos_is_iops_rate_limit(i) == true) {
9349 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_IOS_PER_SEC;
9350 		} else {
9351 			if (limits[i] > SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC) {
9352 				SPDK_WARNLOG("Requested rate limit %" PRIu64 " will result in uint64_t overflow, "
9353 					     "reset to %" PRIu64 "\n", limits[i], SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC);
9354 				limits[i] = SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC;
9355 			}
9356 			/* Change from megabyte to byte rate limit */
9357 			limits[i] = limits[i] * 1024 * 1024;
9358 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_BYTES_PER_SEC;
9359 		}
9360 
9361 		limit_set_complement = limits[i] % min_limit_per_sec;
9362 		if (limit_set_complement) {
9363 			SPDK_ERRLOG("Requested rate limit %" PRIu64 " is not a multiple of %" PRIu64 "\n",
9364 				    limits[i], min_limit_per_sec);
9365 			limits[i] += min_limit_per_sec - limit_set_complement;
9366 			SPDK_ERRLOG("Round up the rate limit to %" PRIu64 "\n", limits[i]);
9367 		}
9368 	}
9369 
9370 	ctx = calloc(1, sizeof(*ctx));
9371 	if (ctx == NULL) {
9372 		cb_fn(cb_arg, -ENOMEM);
9373 		return;
9374 	}
9375 
9376 	ctx->cb_fn = cb_fn;
9377 	ctx->cb_arg = cb_arg;
9378 	ctx->bdev = bdev;
9379 
9380 	spdk_spin_lock(&bdev->internal.spinlock);
9381 	if (bdev->internal.qos_mod_in_progress) {
9382 		spdk_spin_unlock(&bdev->internal.spinlock);
9383 		free(ctx);
9384 		cb_fn(cb_arg, -EAGAIN);
9385 		return;
9386 	}
9387 	bdev->internal.qos_mod_in_progress = true;
9388 
9389 	if (disable_rate_limit == true && bdev->internal.qos) {
9390 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9391 			if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED &&
9392 			    (bdev->internal.qos->rate_limits[i].limit > 0 &&
9393 			     bdev->internal.qos->rate_limits[i].limit !=
9394 			     SPDK_BDEV_QOS_LIMIT_NOT_DEFINED)) {
9395 				disable_rate_limit = false;
9396 				break;
9397 			}
9398 		}
9399 	}
9400 
9401 	if (disable_rate_limit == false) {
9402 		if (bdev->internal.qos == NULL) {
9403 			bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
9404 			if (!bdev->internal.qos) {
9405 				spdk_spin_unlock(&bdev->internal.spinlock);
9406 				SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
9407 				bdev_set_qos_limit_done(ctx, -ENOMEM);
9408 				return;
9409 			}
9410 		}
9411 
9412 		if (bdev->internal.qos->thread == NULL) {
9413 			/* Enabling */
9414 			bdev_set_qos_rate_limits(bdev, limits);
9415 
9416 			spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx,
9417 						   bdev_enable_qos_done);
9418 		} else {
9419 			/* Updating */
9420 			bdev_set_qos_rate_limits(bdev, limits);
9421 
9422 			spdk_thread_send_msg(bdev->internal.qos->thread,
9423 					     bdev_update_qos_rate_limit_msg, ctx);
9424 		}
9425 	} else {
9426 		if (bdev->internal.qos != NULL) {
9427 			bdev_set_qos_rate_limits(bdev, limits);
9428 
9429 			/* Disabling */
9430 			spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx,
9431 						   bdev_disable_qos_msg_done);
9432 		} else {
9433 			spdk_spin_unlock(&bdev->internal.spinlock);
9434 			bdev_set_qos_limit_done(ctx, 0);
9435 			return;
9436 		}
9437 	}
9438 
9439 	spdk_spin_unlock(&bdev->internal.spinlock);
9440 }
9441 
9442 struct spdk_bdev_histogram_ctx {
9443 	spdk_bdev_histogram_status_cb cb_fn;
9444 	void *cb_arg;
9445 	struct spdk_bdev *bdev;
9446 	int status;
9447 };
9448 
9449 static void
9450 bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9451 {
9452 	struct spdk_bdev_histogram_ctx *ctx = _ctx;
9453 
9454 	spdk_spin_lock(&ctx->bdev->internal.spinlock);
9455 	ctx->bdev->internal.histogram_in_progress = false;
9456 	spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9457 	ctx->cb_fn(ctx->cb_arg, ctx->status);
9458 	free(ctx);
9459 }
9460 
9461 static void
9462 bdev_histogram_disable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9463 			       struct spdk_io_channel *_ch, void *_ctx)
9464 {
9465 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9466 
9467 	if (ch->histogram != NULL) {
9468 		spdk_histogram_data_free(ch->histogram);
9469 		ch->histogram = NULL;
9470 	}
9471 	spdk_bdev_for_each_channel_continue(i, 0);
9472 }
9473 
9474 static void
9475 bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9476 {
9477 	struct spdk_bdev_histogram_ctx *ctx = _ctx;
9478 
9479 	if (status != 0) {
9480 		ctx->status = status;
9481 		ctx->bdev->internal.histogram_enabled = false;
9482 		spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx,
9483 					   bdev_histogram_disable_channel_cb);
9484 	} else {
9485 		spdk_spin_lock(&ctx->bdev->internal.spinlock);
9486 		ctx->bdev->internal.histogram_in_progress = false;
9487 		spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9488 		ctx->cb_fn(ctx->cb_arg, ctx->status);
9489 		free(ctx);
9490 	}
9491 }
9492 
9493 static void
9494 bdev_histogram_enable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9495 			      struct spdk_io_channel *_ch, void *_ctx)
9496 {
9497 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9498 	int status = 0;
9499 
9500 	if (ch->histogram == NULL) {
9501 		ch->histogram = spdk_histogram_data_alloc();
9502 		if (ch->histogram == NULL) {
9503 			status = -ENOMEM;
9504 		}
9505 	}
9506 
9507 	spdk_bdev_for_each_channel_continue(i, status);
9508 }
9509 
9510 void
9511 spdk_bdev_histogram_enable_ext(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
9512 			       void *cb_arg, bool enable, struct spdk_bdev_enable_histogram_opts *opts)
9513 {
9514 	struct spdk_bdev_histogram_ctx *ctx;
9515 
9516 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_ctx));
9517 	if (ctx == NULL) {
9518 		cb_fn(cb_arg, -ENOMEM);
9519 		return;
9520 	}
9521 
9522 	ctx->bdev = bdev;
9523 	ctx->status = 0;
9524 	ctx->cb_fn = cb_fn;
9525 	ctx->cb_arg = cb_arg;
9526 
9527 	spdk_spin_lock(&bdev->internal.spinlock);
9528 	if (bdev->internal.histogram_in_progress) {
9529 		spdk_spin_unlock(&bdev->internal.spinlock);
9530 		free(ctx);
9531 		cb_fn(cb_arg, -EAGAIN);
9532 		return;
9533 	}
9534 
9535 	bdev->internal.histogram_in_progress = true;
9536 	spdk_spin_unlock(&bdev->internal.spinlock);
9537 
9538 	bdev->internal.histogram_enabled = enable;
9539 	bdev->internal.histogram_io_type = opts->io_type;
9540 
9541 	if (enable) {
9542 		/* Allocate histogram for each channel */
9543 		spdk_bdev_for_each_channel(bdev, bdev_histogram_enable_channel, ctx,
9544 					   bdev_histogram_enable_channel_cb);
9545 	} else {
9546 		spdk_bdev_for_each_channel(bdev, bdev_histogram_disable_channel, ctx,
9547 					   bdev_histogram_disable_channel_cb);
9548 	}
9549 }
9550 
9551 void
9552 spdk_bdev_enable_histogram_opts_init(struct spdk_bdev_enable_histogram_opts *opts, size_t size)
9553 {
9554 	if (opts == NULL) {
9555 		SPDK_ERRLOG("opts should not be NULL\n");
9556 		assert(opts != NULL);
9557 		return;
9558 	}
9559 	if (size == 0) {
9560 		SPDK_ERRLOG("size should not be zero\n");
9561 		assert(size != 0);
9562 		return;
9563 	}
9564 
9565 	memset(opts, 0, size);
9566 	opts->size = size;
9567 
9568 #define FIELD_OK(field) \
9569         offsetof(struct spdk_bdev_enable_histogram_opts, field) + sizeof(opts->field) <= size
9570 
9571 #define SET_FIELD(field, value) \
9572         if (FIELD_OK(field)) { \
9573                 opts->field = value; \
9574         } \
9575 
9576 	SET_FIELD(io_type, 0);
9577 
9578 	/* You should not remove this statement, but need to update the assert statement
9579 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
9580 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_enable_histogram_opts) == 9, "Incorrect size");
9581 
9582 #undef FIELD_OK
9583 #undef SET_FIELD
9584 }
9585 
9586 void
9587 spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
9588 			   void *cb_arg, bool enable)
9589 {
9590 	struct spdk_bdev_enable_histogram_opts opts;
9591 
9592 	spdk_bdev_enable_histogram_opts_init(&opts, sizeof(opts));
9593 	spdk_bdev_histogram_enable_ext(bdev, cb_fn, cb_arg, enable, &opts);
9594 }
9595 
9596 struct spdk_bdev_histogram_data_ctx {
9597 	spdk_bdev_histogram_data_cb cb_fn;
9598 	void *cb_arg;
9599 	struct spdk_bdev *bdev;
9600 	/** merged histogram data from all channels */
9601 	struct spdk_histogram_data	*histogram;
9602 };
9603 
9604 static void
9605 bdev_histogram_get_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9606 {
9607 	struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
9608 
9609 	ctx->cb_fn(ctx->cb_arg, status, ctx->histogram);
9610 	free(ctx);
9611 }
9612 
9613 static void
9614 bdev_histogram_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9615 			   struct spdk_io_channel *_ch, void *_ctx)
9616 {
9617 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9618 	struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
9619 	int status = 0;
9620 
9621 	if (ch->histogram == NULL) {
9622 		status = -EFAULT;
9623 	} else {
9624 		spdk_histogram_data_merge(ctx->histogram, ch->histogram);
9625 	}
9626 
9627 	spdk_bdev_for_each_channel_continue(i, status);
9628 }
9629 
9630 void
9631 spdk_bdev_histogram_get(struct spdk_bdev *bdev, struct spdk_histogram_data *histogram,
9632 			spdk_bdev_histogram_data_cb cb_fn,
9633 			void *cb_arg)
9634 {
9635 	struct spdk_bdev_histogram_data_ctx *ctx;
9636 
9637 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_data_ctx));
9638 	if (ctx == NULL) {
9639 		cb_fn(cb_arg, -ENOMEM, NULL);
9640 		return;
9641 	}
9642 
9643 	ctx->bdev = bdev;
9644 	ctx->cb_fn = cb_fn;
9645 	ctx->cb_arg = cb_arg;
9646 
9647 	ctx->histogram = histogram;
9648 
9649 	spdk_bdev_for_each_channel(bdev, bdev_histogram_get_channel, ctx,
9650 				   bdev_histogram_get_channel_cb);
9651 }
9652 
9653 void
9654 spdk_bdev_channel_get_histogram(struct spdk_io_channel *ch, spdk_bdev_histogram_data_cb cb_fn,
9655 				void *cb_arg)
9656 {
9657 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9658 	int status = 0;
9659 
9660 	assert(cb_fn != NULL);
9661 
9662 	if (bdev_ch->histogram == NULL) {
9663 		status = -EFAULT;
9664 	}
9665 	cb_fn(cb_arg, status, bdev_ch->histogram);
9666 }
9667 
9668 size_t
9669 spdk_bdev_get_media_events(struct spdk_bdev_desc *desc, struct spdk_bdev_media_event *events,
9670 			   size_t max_events)
9671 {
9672 	struct media_event_entry *entry;
9673 	size_t num_events = 0;
9674 
9675 	for (; num_events < max_events; ++num_events) {
9676 		entry = TAILQ_FIRST(&desc->pending_media_events);
9677 		if (entry == NULL) {
9678 			break;
9679 		}
9680 
9681 		events[num_events] = entry->event;
9682 		TAILQ_REMOVE(&desc->pending_media_events, entry, tailq);
9683 		TAILQ_INSERT_TAIL(&desc->free_media_events, entry, tailq);
9684 	}
9685 
9686 	return num_events;
9687 }
9688 
9689 int
9690 spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media_event *events,
9691 			    size_t num_events)
9692 {
9693 	struct spdk_bdev_desc *desc;
9694 	struct media_event_entry *entry;
9695 	size_t event_id;
9696 	int rc = 0;
9697 
9698 	assert(bdev->media_events);
9699 
9700 	spdk_spin_lock(&bdev->internal.spinlock);
9701 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
9702 		if (desc->write) {
9703 			break;
9704 		}
9705 	}
9706 
9707 	if (desc == NULL || desc->media_events_buffer == NULL) {
9708 		rc = -ENODEV;
9709 		goto out;
9710 	}
9711 
9712 	for (event_id = 0; event_id < num_events; ++event_id) {
9713 		entry = TAILQ_FIRST(&desc->free_media_events);
9714 		if (entry == NULL) {
9715 			break;
9716 		}
9717 
9718 		TAILQ_REMOVE(&desc->free_media_events, entry, tailq);
9719 		TAILQ_INSERT_TAIL(&desc->pending_media_events, entry, tailq);
9720 		entry->event = events[event_id];
9721 	}
9722 
9723 	rc = event_id;
9724 out:
9725 	spdk_spin_unlock(&bdev->internal.spinlock);
9726 	return rc;
9727 }
9728 
9729 static void
9730 _media_management_notify(void *arg)
9731 {
9732 	struct spdk_bdev_desc *desc = arg;
9733 
9734 	_event_notify(desc, SPDK_BDEV_EVENT_MEDIA_MANAGEMENT);
9735 }
9736 
9737 void
9738 spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
9739 {
9740 	struct spdk_bdev_desc *desc;
9741 
9742 	spdk_spin_lock(&bdev->internal.spinlock);
9743 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
9744 		if (!TAILQ_EMPTY(&desc->pending_media_events)) {
9745 			event_notify(desc, _media_management_notify);
9746 		}
9747 	}
9748 	spdk_spin_unlock(&bdev->internal.spinlock);
9749 }
9750 
9751 struct locked_lba_range_ctx {
9752 	struct lba_range		range;
9753 	struct lba_range		*current_range;
9754 	struct lba_range		*owner_range;
9755 	struct spdk_poller		*poller;
9756 	lock_range_cb			cb_fn;
9757 	void				*cb_arg;
9758 };
9759 
9760 static void
9761 bdev_lock_error_cleanup_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9762 {
9763 	struct locked_lba_range_ctx *ctx = _ctx;
9764 
9765 	ctx->cb_fn(&ctx->range, ctx->cb_arg, -ENOMEM);
9766 	free(ctx);
9767 }
9768 
9769 static void bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i,
9770 		struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *_ctx);
9771 
9772 static void
9773 bdev_lock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9774 {
9775 	struct locked_lba_range_ctx *ctx = _ctx;
9776 
9777 	if (status == -ENOMEM) {
9778 		/* One of the channels could not allocate a range object.
9779 		 * So we have to go back and clean up any ranges that were
9780 		 * allocated successfully before we return error status to
9781 		 * the caller.  We can reuse the unlock function to do that
9782 		 * clean up.
9783 		 */
9784 		spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
9785 					   bdev_lock_error_cleanup_cb);
9786 		return;
9787 	}
9788 
9789 	/* All channels have locked this range and no I/O overlapping the range
9790 	 * are outstanding!  Set the owner_ch for the range object for the
9791 	 * locking channel, so that this channel will know that it is allowed
9792 	 * to write to this range.
9793 	 */
9794 	if (ctx->owner_range != NULL) {
9795 		ctx->owner_range->owner_ch = ctx->range.owner_ch;
9796 	}
9797 
9798 	ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
9799 
9800 	/* Don't free the ctx here.  Its range is in the bdev's global list of
9801 	 * locked ranges still, and will be removed and freed when this range
9802 	 * is later unlocked.
9803 	 */
9804 }
9805 
9806 static int
9807 bdev_lock_lba_range_check_io(void *_i)
9808 {
9809 	struct spdk_bdev_channel_iter *i = _i;
9810 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i->i);
9811 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9812 	struct locked_lba_range_ctx *ctx = i->ctx;
9813 	struct lba_range *range = ctx->current_range;
9814 	struct spdk_bdev_io *bdev_io;
9815 
9816 	spdk_poller_unregister(&ctx->poller);
9817 
9818 	/* The range is now in the locked_ranges, so no new IO can be submitted to this
9819 	 * range.  But we need to wait until any outstanding IO overlapping with this range
9820 	 * are completed.
9821 	 */
9822 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
9823 		if (bdev_io_range_is_locked(bdev_io, range)) {
9824 			ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
9825 			return SPDK_POLLER_BUSY;
9826 		}
9827 	}
9828 
9829 	spdk_bdev_for_each_channel_continue(i, 0);
9830 	return SPDK_POLLER_BUSY;
9831 }
9832 
9833 static void
9834 bdev_lock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9835 				struct spdk_io_channel *_ch, void *_ctx)
9836 {
9837 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9838 	struct locked_lba_range_ctx *ctx = _ctx;
9839 	struct lba_range *range;
9840 
9841 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
9842 		if (range->length == ctx->range.length &&
9843 		    range->offset == ctx->range.offset &&
9844 		    range->locked_ctx == ctx->range.locked_ctx) {
9845 			/* This range already exists on this channel, so don't add
9846 			 * it again.  This can happen when a new channel is created
9847 			 * while the for_each_channel operation is in progress.
9848 			 * Do not check for outstanding I/O in that case, since the
9849 			 * range was locked before any I/O could be submitted to the
9850 			 * new channel.
9851 			 */
9852 			spdk_bdev_for_each_channel_continue(i, 0);
9853 			return;
9854 		}
9855 	}
9856 
9857 	range = calloc(1, sizeof(*range));
9858 	if (range == NULL) {
9859 		spdk_bdev_for_each_channel_continue(i, -ENOMEM);
9860 		return;
9861 	}
9862 
9863 	range->length = ctx->range.length;
9864 	range->offset = ctx->range.offset;
9865 	range->locked_ctx = ctx->range.locked_ctx;
9866 	range->quiesce = ctx->range.quiesce;
9867 	ctx->current_range = range;
9868 	if (ctx->range.owner_ch == ch) {
9869 		/* This is the range object for the channel that will hold
9870 		 * the lock.  Store it in the ctx object so that we can easily
9871 		 * set its owner_ch after the lock is finally acquired.
9872 		 */
9873 		ctx->owner_range = range;
9874 	}
9875 	TAILQ_INSERT_TAIL(&ch->locked_ranges, range, tailq);
9876 	bdev_lock_lba_range_check_io(i);
9877 }
9878 
9879 static void
9880 bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx)
9881 {
9882 	assert(spdk_get_thread() == ctx->range.owner_thread);
9883 	assert(ctx->range.owner_ch == NULL ||
9884 	       spdk_io_channel_get_thread(ctx->range.owner_ch->channel) == ctx->range.owner_thread);
9885 
9886 	/* We will add a copy of this range to each channel now. */
9887 	spdk_bdev_for_each_channel(bdev, bdev_lock_lba_range_get_channel, ctx,
9888 				   bdev_lock_lba_range_cb);
9889 }
9890 
9891 static bool
9892 bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq)
9893 {
9894 	struct lba_range *r;
9895 
9896 	TAILQ_FOREACH(r, tailq, tailq) {
9897 		if (bdev_lba_range_overlapped(range, r)) {
9898 			return true;
9899 		}
9900 	}
9901 	return false;
9902 }
9903 
9904 static void bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status);
9905 
9906 static int
9907 _bdev_lock_lba_range(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch,
9908 		     uint64_t offset, uint64_t length,
9909 		     lock_range_cb cb_fn, void *cb_arg)
9910 {
9911 	struct locked_lba_range_ctx *ctx;
9912 
9913 	ctx = calloc(1, sizeof(*ctx));
9914 	if (ctx == NULL) {
9915 		return -ENOMEM;
9916 	}
9917 
9918 	ctx->range.offset = offset;
9919 	ctx->range.length = length;
9920 	ctx->range.owner_thread = spdk_get_thread();
9921 	ctx->range.owner_ch = ch;
9922 	ctx->range.locked_ctx = cb_arg;
9923 	ctx->range.bdev = bdev;
9924 	ctx->range.quiesce = (cb_fn == bdev_quiesce_range_locked);
9925 	ctx->cb_fn = cb_fn;
9926 	ctx->cb_arg = cb_arg;
9927 
9928 	spdk_spin_lock(&bdev->internal.spinlock);
9929 	if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
9930 		/* There is an active lock overlapping with this range.
9931 		 * Put it on the pending list until this range no
9932 		 * longer overlaps with another.
9933 		 */
9934 		TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq);
9935 	} else {
9936 		TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
9937 		bdev_lock_lba_range_ctx(bdev, ctx);
9938 	}
9939 	spdk_spin_unlock(&bdev->internal.spinlock);
9940 	return 0;
9941 }
9942 
9943 static int
9944 bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
9945 		    uint64_t offset, uint64_t length,
9946 		    lock_range_cb cb_fn, void *cb_arg)
9947 {
9948 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
9949 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9950 
9951 	if (cb_arg == NULL) {
9952 		SPDK_ERRLOG("cb_arg must not be NULL\n");
9953 		return -EINVAL;
9954 	}
9955 
9956 	return _bdev_lock_lba_range(bdev, ch, offset, length, cb_fn, cb_arg);
9957 }
9958 
9959 static void
9960 bdev_lock_lba_range_ctx_msg(void *_ctx)
9961 {
9962 	struct locked_lba_range_ctx *ctx = _ctx;
9963 
9964 	bdev_lock_lba_range_ctx(ctx->range.bdev, ctx);
9965 }
9966 
9967 static void
9968 bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9969 {
9970 	struct locked_lba_range_ctx *ctx = _ctx;
9971 	struct locked_lba_range_ctx *pending_ctx;
9972 	struct lba_range *range, *tmp;
9973 
9974 	spdk_spin_lock(&bdev->internal.spinlock);
9975 	/* Check if there are any pending locked ranges that overlap with this range
9976 	 * that was just unlocked.  If there are, check that it doesn't overlap with any
9977 	 * other locked ranges before calling bdev_lock_lba_range_ctx which will start
9978 	 * the lock process.
9979 	 */
9980 	TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) {
9981 		if (bdev_lba_range_overlapped(range, &ctx->range) &&
9982 		    !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) {
9983 			TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq);
9984 			pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
9985 			TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq);
9986 			spdk_thread_send_msg(pending_ctx->range.owner_thread,
9987 					     bdev_lock_lba_range_ctx_msg, pending_ctx);
9988 		}
9989 	}
9990 	spdk_spin_unlock(&bdev->internal.spinlock);
9991 
9992 	ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
9993 	free(ctx);
9994 }
9995 
9996 static void
9997 bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9998 				  struct spdk_io_channel *_ch, void *_ctx)
9999 {
10000 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10001 	struct locked_lba_range_ctx *ctx = _ctx;
10002 	TAILQ_HEAD(, spdk_bdev_io) io_locked;
10003 	struct spdk_bdev_io *bdev_io;
10004 	struct lba_range *range;
10005 
10006 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10007 		if (ctx->range.offset == range->offset &&
10008 		    ctx->range.length == range->length &&
10009 		    ctx->range.locked_ctx == range->locked_ctx) {
10010 			TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
10011 			free(range);
10012 			break;
10013 		}
10014 	}
10015 
10016 	/* Note: we should almost always be able to assert that the range specified
10017 	 * was found.  But there are some very rare corner cases where a new channel
10018 	 * gets created simultaneously with a range unlock, where this function
10019 	 * would execute on that new channel and wouldn't have the range.
10020 	 * We also use this to clean up range allocations when a later allocation
10021 	 * fails in the locking path.
10022 	 * So we can't actually assert() here.
10023 	 */
10024 
10025 	/* Swap the locked IO into a temporary list, and then try to submit them again.
10026 	 * We could hyper-optimize this to only resubmit locked I/O that overlap
10027 	 * with the range that was just unlocked, but this isn't a performance path so
10028 	 * we go for simplicity here.
10029 	 */
10030 	TAILQ_INIT(&io_locked);
10031 	TAILQ_SWAP(&ch->io_locked, &io_locked, spdk_bdev_io, internal.ch_link);
10032 	while (!TAILQ_EMPTY(&io_locked)) {
10033 		bdev_io = TAILQ_FIRST(&io_locked);
10034 		TAILQ_REMOVE(&io_locked, bdev_io, internal.ch_link);
10035 		bdev_io_submit(bdev_io);
10036 	}
10037 
10038 	spdk_bdev_for_each_channel_continue(i, 0);
10039 }
10040 
10041 static int
10042 _bdev_unlock_lba_range(struct spdk_bdev *bdev, uint64_t offset, uint64_t length,
10043 		       lock_range_cb cb_fn, void *cb_arg)
10044 {
10045 	struct locked_lba_range_ctx *ctx;
10046 	struct lba_range *range;
10047 
10048 	spdk_spin_lock(&bdev->internal.spinlock);
10049 	/* To start the unlock the process, we find the range in the bdev's locked_ranges
10050 	 * and remove it. This ensures new channels don't inherit the locked range.
10051 	 * Then we will send a message to each channel to remove the range from its
10052 	 * per-channel list.
10053 	 */
10054 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
10055 		if (range->offset == offset && range->length == length &&
10056 		    (range->owner_ch == NULL || range->locked_ctx == cb_arg)) {
10057 			break;
10058 		}
10059 	}
10060 	if (range == NULL) {
10061 		assert(false);
10062 		spdk_spin_unlock(&bdev->internal.spinlock);
10063 		return -EINVAL;
10064 	}
10065 	TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
10066 	ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
10067 	spdk_spin_unlock(&bdev->internal.spinlock);
10068 
10069 	ctx->cb_fn = cb_fn;
10070 	ctx->cb_arg = cb_arg;
10071 
10072 	spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
10073 				   bdev_unlock_lba_range_cb);
10074 	return 0;
10075 }
10076 
10077 static int
10078 bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
10079 		      uint64_t offset, uint64_t length,
10080 		      lock_range_cb cb_fn, void *cb_arg)
10081 {
10082 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10083 	struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10084 	struct lba_range *range;
10085 	bool range_found = false;
10086 
10087 	/* Let's make sure the specified channel actually has a lock on
10088 	 * the specified range.  Note that the range must match exactly.
10089 	 */
10090 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10091 		if (range->offset == offset && range->length == length &&
10092 		    range->owner_ch == ch && range->locked_ctx == cb_arg) {
10093 			range_found = true;
10094 			break;
10095 		}
10096 	}
10097 
10098 	if (!range_found) {
10099 		return -EINVAL;
10100 	}
10101 
10102 	return _bdev_unlock_lba_range(bdev, offset, length, cb_fn, cb_arg);
10103 }
10104 
10105 struct bdev_quiesce_ctx {
10106 	spdk_bdev_quiesce_cb cb_fn;
10107 	void *cb_arg;
10108 };
10109 
10110 static void
10111 bdev_unquiesce_range_unlocked(struct lba_range *range, void *ctx, int status)
10112 {
10113 	struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10114 
10115 	if (quiesce_ctx->cb_fn != NULL) {
10116 		quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10117 	}
10118 
10119 	free(quiesce_ctx);
10120 }
10121 
10122 static void
10123 bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status)
10124 {
10125 	struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10126 	struct spdk_bdev_module *module = range->bdev->module;
10127 
10128 	if (status != 0) {
10129 		if (quiesce_ctx->cb_fn != NULL) {
10130 			quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10131 		}
10132 		free(quiesce_ctx);
10133 		return;
10134 	}
10135 
10136 	spdk_spin_lock(&module->internal.spinlock);
10137 	TAILQ_INSERT_TAIL(&module->internal.quiesced_ranges, range, tailq_module);
10138 	spdk_spin_unlock(&module->internal.spinlock);
10139 
10140 	if (quiesce_ctx->cb_fn != NULL) {
10141 		/* copy the context in case the range is unlocked by the callback */
10142 		struct bdev_quiesce_ctx tmp = *quiesce_ctx;
10143 
10144 		quiesce_ctx->cb_fn = NULL;
10145 		quiesce_ctx->cb_arg = NULL;
10146 
10147 		tmp.cb_fn(tmp.cb_arg, status);
10148 	}
10149 	/* quiesce_ctx will be freed on unquiesce */
10150 }
10151 
10152 static int
10153 _spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10154 		   uint64_t offset, uint64_t length,
10155 		   spdk_bdev_quiesce_cb cb_fn, void *cb_arg,
10156 		   bool unquiesce)
10157 {
10158 	struct bdev_quiesce_ctx *quiesce_ctx;
10159 	int rc;
10160 
10161 	if (module != bdev->module) {
10162 		SPDK_ERRLOG("Bdev does not belong to specified module.\n");
10163 		return -EINVAL;
10164 	}
10165 
10166 	if (!bdev_io_valid_blocks(bdev, offset, length)) {
10167 		return -EINVAL;
10168 	}
10169 
10170 	if (unquiesce) {
10171 		struct lba_range *range;
10172 
10173 		/* Make sure the specified range is actually quiesced in the specified module and
10174 		 * then remove it from the list. Note that the range must match exactly.
10175 		 */
10176 		spdk_spin_lock(&module->internal.spinlock);
10177 		TAILQ_FOREACH(range, &module->internal.quiesced_ranges, tailq_module) {
10178 			if (range->bdev == bdev && range->offset == offset && range->length == length) {
10179 				TAILQ_REMOVE(&module->internal.quiesced_ranges, range, tailq_module);
10180 				break;
10181 			}
10182 		}
10183 		spdk_spin_unlock(&module->internal.spinlock);
10184 
10185 		if (range == NULL) {
10186 			SPDK_ERRLOG("The range to unquiesce was not found.\n");
10187 			return -EINVAL;
10188 		}
10189 
10190 		quiesce_ctx = range->locked_ctx;
10191 		quiesce_ctx->cb_fn = cb_fn;
10192 		quiesce_ctx->cb_arg = cb_arg;
10193 
10194 		rc = _bdev_unlock_lba_range(bdev, offset, length, bdev_unquiesce_range_unlocked, quiesce_ctx);
10195 	} else {
10196 		quiesce_ctx = malloc(sizeof(*quiesce_ctx));
10197 		if (quiesce_ctx == NULL) {
10198 			return -ENOMEM;
10199 		}
10200 
10201 		quiesce_ctx->cb_fn = cb_fn;
10202 		quiesce_ctx->cb_arg = cb_arg;
10203 
10204 		rc = _bdev_lock_lba_range(bdev, NULL, offset, length, bdev_quiesce_range_locked, quiesce_ctx);
10205 		if (rc != 0) {
10206 			free(quiesce_ctx);
10207 		}
10208 	}
10209 
10210 	return rc;
10211 }
10212 
10213 int
10214 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10215 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10216 {
10217 	return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, false);
10218 }
10219 
10220 int
10221 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10222 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10223 {
10224 	return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, true);
10225 }
10226 
10227 int
10228 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10229 			uint64_t offset, uint64_t length,
10230 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10231 {
10232 	return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, false);
10233 }
10234 
10235 int
10236 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10237 			  uint64_t offset, uint64_t length,
10238 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10239 {
10240 	return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, true);
10241 }
10242 
10243 int
10244 spdk_bdev_get_memory_domains(struct spdk_bdev *bdev, struct spdk_memory_domain **domains,
10245 			     int array_size)
10246 {
10247 	if (!bdev) {
10248 		return -EINVAL;
10249 	}
10250 
10251 	if (bdev->fn_table->get_memory_domains) {
10252 		return bdev->fn_table->get_memory_domains(bdev->ctxt, domains, array_size);
10253 	}
10254 
10255 	return 0;
10256 }
10257 
10258 struct spdk_bdev_for_each_io_ctx {
10259 	void *ctx;
10260 	spdk_bdev_io_fn fn;
10261 	spdk_bdev_for_each_io_cb cb;
10262 };
10263 
10264 static void
10265 bdev_channel_for_each_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10266 			 struct spdk_io_channel *io_ch, void *_ctx)
10267 {
10268 	struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10269 	struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
10270 	struct spdk_bdev_io *bdev_io;
10271 	int rc = 0;
10272 
10273 	TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
10274 		rc = ctx->fn(ctx->ctx, bdev_io);
10275 		if (rc != 0) {
10276 			break;
10277 		}
10278 	}
10279 
10280 	spdk_bdev_for_each_channel_continue(i, rc);
10281 }
10282 
10283 static void
10284 bdev_for_each_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
10285 {
10286 	struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10287 
10288 	ctx->cb(ctx->ctx, status);
10289 
10290 	free(ctx);
10291 }
10292 
10293 void
10294 spdk_bdev_for_each_bdev_io(struct spdk_bdev *bdev, void *_ctx, spdk_bdev_io_fn fn,
10295 			   spdk_bdev_for_each_io_cb cb)
10296 {
10297 	struct spdk_bdev_for_each_io_ctx *ctx;
10298 
10299 	assert(fn != NULL && cb != NULL);
10300 
10301 	ctx = calloc(1, sizeof(*ctx));
10302 	if (ctx == NULL) {
10303 		SPDK_ERRLOG("Failed to allocate context.\n");
10304 		cb(_ctx, -ENOMEM);
10305 		return;
10306 	}
10307 
10308 	ctx->ctx = _ctx;
10309 	ctx->fn = fn;
10310 	ctx->cb = cb;
10311 
10312 	spdk_bdev_for_each_channel(bdev, bdev_channel_for_each_io, ctx,
10313 				   bdev_for_each_io_done);
10314 }
10315 
10316 void
10317 spdk_bdev_for_each_channel_continue(struct spdk_bdev_channel_iter *iter, int status)
10318 {
10319 	spdk_for_each_channel_continue(iter->i, status);
10320 }
10321 
10322 static struct spdk_bdev *
10323 io_channel_iter_get_bdev(struct spdk_io_channel_iter *i)
10324 {
10325 	void *io_device = spdk_io_channel_iter_get_io_device(i);
10326 
10327 	return __bdev_from_io_dev(io_device);
10328 }
10329 
10330 static void
10331 bdev_each_channel_msg(struct spdk_io_channel_iter *i)
10332 {
10333 	struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10334 	struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10335 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
10336 
10337 	iter->i = i;
10338 	iter->fn(iter, bdev, ch, iter->ctx);
10339 }
10340 
10341 static void
10342 bdev_each_channel_cpl(struct spdk_io_channel_iter *i, int status)
10343 {
10344 	struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10345 	struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10346 
10347 	iter->i = i;
10348 	iter->cpl(bdev, iter->ctx, status);
10349 
10350 	free(iter);
10351 }
10352 
10353 void
10354 spdk_bdev_for_each_channel(struct spdk_bdev *bdev, spdk_bdev_for_each_channel_msg fn,
10355 			   void *ctx, spdk_bdev_for_each_channel_done cpl)
10356 {
10357 	struct spdk_bdev_channel_iter *iter;
10358 
10359 	assert(bdev != NULL && fn != NULL && ctx != NULL);
10360 
10361 	iter = calloc(1, sizeof(struct spdk_bdev_channel_iter));
10362 	if (iter == NULL) {
10363 		SPDK_ERRLOG("Unable to allocate iterator\n");
10364 		assert(false);
10365 		return;
10366 	}
10367 
10368 	iter->fn = fn;
10369 	iter->cpl = cpl;
10370 	iter->ctx = ctx;
10371 
10372 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_each_channel_msg,
10373 			      iter, bdev_each_channel_cpl);
10374 }
10375 
10376 static void
10377 bdev_copy_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10378 {
10379 	struct spdk_bdev_io *parent_io = cb_arg;
10380 
10381 	spdk_bdev_free_io(bdev_io);
10382 
10383 	/* Check return status of write */
10384 	parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
10385 	parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
10386 }
10387 
10388 static void
10389 bdev_copy_do_write(void *_bdev_io)
10390 {
10391 	struct spdk_bdev_io *bdev_io = _bdev_io;
10392 	int rc;
10393 
10394 	/* Write blocks */
10395 	rc = spdk_bdev_write_blocks_with_md(bdev_io->internal.desc,
10396 					    spdk_io_channel_from_ctx(bdev_io->internal.ch),
10397 					    bdev_io->u.bdev.iovs[0].iov_base,
10398 					    bdev_io->u.bdev.md_buf, bdev_io->u.bdev.offset_blocks,
10399 					    bdev_io->u.bdev.num_blocks, bdev_copy_do_write_done, bdev_io);
10400 
10401 	if (rc == -ENOMEM) {
10402 		bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_write);
10403 	} else if (rc != 0) {
10404 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10405 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10406 	}
10407 }
10408 
10409 static void
10410 bdev_copy_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10411 {
10412 	struct spdk_bdev_io *parent_io = cb_arg;
10413 
10414 	spdk_bdev_free_io(bdev_io);
10415 
10416 	/* Check return status of read */
10417 	if (!success) {
10418 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10419 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
10420 		return;
10421 	}
10422 
10423 	/* Do write */
10424 	bdev_copy_do_write(parent_io);
10425 }
10426 
10427 static void
10428 bdev_copy_do_read(void *_bdev_io)
10429 {
10430 	struct spdk_bdev_io *bdev_io = _bdev_io;
10431 	int rc;
10432 
10433 	/* Read blocks */
10434 	rc = spdk_bdev_read_blocks_with_md(bdev_io->internal.desc,
10435 					   spdk_io_channel_from_ctx(bdev_io->internal.ch),
10436 					   bdev_io->u.bdev.iovs[0].iov_base,
10437 					   bdev_io->u.bdev.md_buf, bdev_io->u.bdev.copy.src_offset_blocks,
10438 					   bdev_io->u.bdev.num_blocks, bdev_copy_do_read_done, bdev_io);
10439 
10440 	if (rc == -ENOMEM) {
10441 		bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_read);
10442 	} else if (rc != 0) {
10443 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10444 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10445 	}
10446 }
10447 
10448 static void
10449 bdev_copy_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
10450 {
10451 	if (!success) {
10452 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10453 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10454 		return;
10455 	}
10456 
10457 	bdev_copy_do_read(bdev_io);
10458 }
10459 
10460 int
10461 spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
10462 		      uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
10463 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
10464 {
10465 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10466 	struct spdk_bdev_io *bdev_io;
10467 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
10468 
10469 	if (!desc->write) {
10470 		return -EBADF;
10471 	}
10472 
10473 	if (!bdev_io_valid_blocks(bdev, dst_offset_blocks, num_blocks) ||
10474 	    !bdev_io_valid_blocks(bdev, src_offset_blocks, num_blocks)) {
10475 		SPDK_DEBUGLOG(bdev,
10476 			      "Invalid offset or number of blocks: dst %lu, src %lu, count %lu\n",
10477 			      dst_offset_blocks, src_offset_blocks, num_blocks);
10478 		return -EINVAL;
10479 	}
10480 
10481 	bdev_io = bdev_channel_get_io(channel);
10482 	if (!bdev_io) {
10483 		return -ENOMEM;
10484 	}
10485 
10486 	bdev_io->internal.ch = channel;
10487 	bdev_io->internal.desc = desc;
10488 	bdev_io->type = SPDK_BDEV_IO_TYPE_COPY;
10489 
10490 	bdev_io->u.bdev.offset_blocks = dst_offset_blocks;
10491 	bdev_io->u.bdev.copy.src_offset_blocks = src_offset_blocks;
10492 	bdev_io->u.bdev.num_blocks = num_blocks;
10493 	bdev_io->u.bdev.memory_domain = NULL;
10494 	bdev_io->u.bdev.memory_domain_ctx = NULL;
10495 	bdev_io->u.bdev.iovs = NULL;
10496 	bdev_io->u.bdev.iovcnt = 0;
10497 	bdev_io->u.bdev.md_buf = NULL;
10498 	bdev_io->u.bdev.accel_sequence = NULL;
10499 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
10500 
10501 	if (dst_offset_blocks == src_offset_blocks || num_blocks == 0) {
10502 		spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
10503 		return 0;
10504 	}
10505 
10506 
10507 	/* If the copy size is large and should be split, use the generic split logic
10508 	 * regardless of whether SPDK_BDEV_IO_TYPE_COPY is supported or not.
10509 	 *
10510 	 * Then, send the copy request if SPDK_BDEV_IO_TYPE_COPY is supported or
10511 	 * emulate it using regular read and write requests otherwise.
10512 	 */
10513 	if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY) ||
10514 	    bdev_io->internal.split) {
10515 		bdev_io_submit(bdev_io);
10516 		return 0;
10517 	}
10518 
10519 	spdk_bdev_io_get_buf(bdev_io, bdev_copy_get_buf_cb, num_blocks * spdk_bdev_get_block_size(bdev));
10520 
10521 	return 0;
10522 }
10523 
10524 SPDK_LOG_REGISTER_COMPONENT(bdev)
10525 
10526 SPDK_TRACE_REGISTER_FN(bdev_trace, "bdev", TRACE_GROUP_BDEV)
10527 {
10528 	struct spdk_trace_tpoint_opts opts[] = {
10529 		{
10530 			"BDEV_IO_START", TRACE_BDEV_IO_START,
10531 			OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 1,
10532 			{
10533 				{ "type", SPDK_TRACE_ARG_TYPE_INT, 8 },
10534 				{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10535 				{ "offset", SPDK_TRACE_ARG_TYPE_INT, 8 },
10536 				{ "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
10537 			}
10538 		},
10539 		{
10540 			"BDEV_IO_DONE", TRACE_BDEV_IO_DONE,
10541 			OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 0,
10542 			{
10543 				{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10544 				{ "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
10545 			}
10546 		},
10547 		{
10548 			"BDEV_IOCH_CREATE", TRACE_BDEV_IOCH_CREATE,
10549 			OWNER_TYPE_BDEV, OBJECT_NONE, 0,
10550 			{
10551 				{ "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
10552 			}
10553 		},
10554 		{
10555 			"BDEV_IOCH_DESTROY", TRACE_BDEV_IOCH_DESTROY,
10556 			OWNER_TYPE_BDEV, OBJECT_NONE, 0,
10557 			{
10558 				{ "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
10559 			}
10560 		},
10561 	};
10562 
10563 
10564 	spdk_trace_register_owner_type(OWNER_TYPE_BDEV, 'b');
10565 	spdk_trace_register_object(OBJECT_BDEV_IO, 'i');
10566 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
10567 	spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_START, OBJECT_BDEV_IO, 0);
10568 	spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_DONE, OBJECT_BDEV_IO, 0);
10569 }
10570