xref: /spdk/lib/bdev/bdev.c (revision b32cfc467b3f3a6a7bd00631334612f6181f7c08)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 
38 #include "spdk/config.h"
39 #include "spdk/env.h"
40 #include "spdk/thread.h"
41 #include "spdk/likely.h"
42 #include "spdk/queue.h"
43 #include "spdk/nvme_spec.h"
44 #include "spdk/scsi_spec.h"
45 #include "spdk/notify.h"
46 #include "spdk/util.h"
47 #include "spdk/trace.h"
48 
49 #include "spdk/bdev_module.h"
50 #include "spdk/log.h"
51 #include "spdk/string.h"
52 
53 #include "bdev_internal.h"
54 
55 #ifdef SPDK_CONFIG_VTUNE
56 #include "ittnotify.h"
57 #include "ittnotify_types.h"
58 int __itt_init_ittlib(const char *, __itt_group_id);
59 #endif
60 
61 #define SPDK_BDEV_IO_POOL_SIZE			(64 * 1024 - 1)
62 #define SPDK_BDEV_IO_CACHE_SIZE			256
63 #define SPDK_BDEV_AUTO_EXAMINE			true
64 #define BUF_SMALL_POOL_SIZE			8191
65 #define BUF_LARGE_POOL_SIZE			1023
66 #define NOMEM_THRESHOLD_COUNT			8
67 #define ZERO_BUFFER_SIZE			0x100000
68 
69 #define OWNER_BDEV		0x2
70 
71 #define OBJECT_BDEV_IO		0x2
72 
73 #define TRACE_GROUP_BDEV	0x3
74 #define TRACE_BDEV_IO_START	SPDK_TPOINT_ID(TRACE_GROUP_BDEV, 0x0)
75 #define TRACE_BDEV_IO_DONE	SPDK_TPOINT_ID(TRACE_GROUP_BDEV, 0x1)
76 
77 #define SPDK_BDEV_QOS_TIMESLICE_IN_USEC		1000
78 #define SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE	1
79 #define SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE	512
80 #define SPDK_BDEV_QOS_MIN_IOS_PER_SEC		1000
81 #define SPDK_BDEV_QOS_MIN_BYTES_PER_SEC		(1024 * 1024)
82 #define SPDK_BDEV_QOS_LIMIT_NOT_DEFINED		UINT64_MAX
83 #define SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC	1000
84 
85 #define SPDK_BDEV_POOL_ALIGNMENT 512
86 
87 static const char *qos_rpc_type[] = {"rw_ios_per_sec",
88 				     "rw_mbytes_per_sec", "r_mbytes_per_sec", "w_mbytes_per_sec"
89 				    };
90 
91 TAILQ_HEAD(spdk_bdev_list, spdk_bdev);
92 
93 struct spdk_bdev_mgr {
94 	struct spdk_mempool *bdev_io_pool;
95 
96 	struct spdk_mempool *buf_small_pool;
97 	struct spdk_mempool *buf_large_pool;
98 
99 	void *zero_buffer;
100 
101 	TAILQ_HEAD(bdev_module_list, spdk_bdev_module) bdev_modules;
102 
103 	struct spdk_bdev_list bdevs;
104 
105 	bool init_complete;
106 	bool module_init_complete;
107 
108 	pthread_mutex_t mutex;
109 
110 #ifdef SPDK_CONFIG_VTUNE
111 	__itt_domain	*domain;
112 #endif
113 };
114 
115 static struct spdk_bdev_mgr g_bdev_mgr = {
116 	.bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
117 	.bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
118 	.init_complete = false,
119 	.module_init_complete = false,
120 	.mutex = PTHREAD_MUTEX_INITIALIZER,
121 };
122 
123 typedef void (*lock_range_cb)(void *ctx, int status);
124 
125 struct lba_range {
126 	uint64_t			offset;
127 	uint64_t			length;
128 	void				*locked_ctx;
129 	struct spdk_bdev_channel	*owner_ch;
130 	TAILQ_ENTRY(lba_range)		tailq;
131 };
132 
133 static struct spdk_bdev_opts	g_bdev_opts = {
134 	.bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
135 	.bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
136 	.bdev_auto_examine = SPDK_BDEV_AUTO_EXAMINE,
137 	.small_buf_pool_size = BUF_SMALL_POOL_SIZE,
138 	.large_buf_pool_size = BUF_LARGE_POOL_SIZE,
139 };
140 
141 static spdk_bdev_init_cb	g_init_cb_fn = NULL;
142 static void			*g_init_cb_arg = NULL;
143 
144 static spdk_bdev_fini_cb	g_fini_cb_fn = NULL;
145 static void			*g_fini_cb_arg = NULL;
146 static struct spdk_thread	*g_fini_thread = NULL;
147 
148 struct spdk_bdev_qos_limit {
149 	/** IOs or bytes allowed per second (i.e., 1s). */
150 	uint64_t limit;
151 
152 	/** Remaining IOs or bytes allowed in current timeslice (e.g., 1ms).
153 	 *  For remaining bytes, allowed to run negative if an I/O is submitted when
154 	 *  some bytes are remaining, but the I/O is bigger than that amount. The
155 	 *  excess will be deducted from the next timeslice.
156 	 */
157 	int64_t remaining_this_timeslice;
158 
159 	/** Minimum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
160 	uint32_t min_per_timeslice;
161 
162 	/** Maximum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
163 	uint32_t max_per_timeslice;
164 
165 	/** Function to check whether to queue the IO. */
166 	bool (*queue_io)(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
167 
168 	/** Function to update for the submitted IO. */
169 	void (*update_quota)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
170 };
171 
172 struct spdk_bdev_qos {
173 	/** Types of structure of rate limits. */
174 	struct spdk_bdev_qos_limit rate_limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
175 
176 	/** The channel that all I/O are funneled through. */
177 	struct spdk_bdev_channel *ch;
178 
179 	/** The thread on which the poller is running. */
180 	struct spdk_thread *thread;
181 
182 	/** Queue of I/O waiting to be issued. */
183 	bdev_io_tailq_t queued;
184 
185 	/** Size of a timeslice in tsc ticks. */
186 	uint64_t timeslice_size;
187 
188 	/** Timestamp of start of last timeslice. */
189 	uint64_t last_timeslice;
190 
191 	/** Poller that processes queued I/O commands each time slice. */
192 	struct spdk_poller *poller;
193 };
194 
195 struct spdk_bdev_mgmt_channel {
196 	bdev_io_stailq_t need_buf_small;
197 	bdev_io_stailq_t need_buf_large;
198 
199 	/*
200 	 * Each thread keeps a cache of bdev_io - this allows
201 	 *  bdev threads which are *not* DPDK threads to still
202 	 *  benefit from a per-thread bdev_io cache.  Without
203 	 *  this, non-DPDK threads fetching from the mempool
204 	 *  incur a cmpxchg on get and put.
205 	 */
206 	bdev_io_stailq_t per_thread_cache;
207 	uint32_t	per_thread_cache_count;
208 	uint32_t	bdev_io_cache_size;
209 
210 	TAILQ_HEAD(, spdk_bdev_shared_resource)	shared_resources;
211 	TAILQ_HEAD(, spdk_bdev_io_wait_entry)	io_wait_queue;
212 };
213 
214 /*
215  * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
216  * will queue here their IO that awaits retry. It makes it possible to retry sending
217  * IO to one bdev after IO from other bdev completes.
218  */
219 struct spdk_bdev_shared_resource {
220 	/* The bdev management channel */
221 	struct spdk_bdev_mgmt_channel *mgmt_ch;
222 
223 	/*
224 	 * Count of I/O submitted to bdev module and waiting for completion.
225 	 * Incremented before submit_request() is called on an spdk_bdev_io.
226 	 */
227 	uint64_t		io_outstanding;
228 
229 	/*
230 	 * Queue of IO awaiting retry because of a previous NOMEM status returned
231 	 *  on this channel.
232 	 */
233 	bdev_io_tailq_t		nomem_io;
234 
235 	/*
236 	 * Threshold which io_outstanding must drop to before retrying nomem_io.
237 	 */
238 	uint64_t		nomem_threshold;
239 
240 	/* I/O channel allocated by a bdev module */
241 	struct spdk_io_channel	*shared_ch;
242 
243 	/* Refcount of bdev channels using this resource */
244 	uint32_t		ref;
245 
246 	TAILQ_ENTRY(spdk_bdev_shared_resource) link;
247 };
248 
249 #define BDEV_CH_RESET_IN_PROGRESS	(1 << 0)
250 #define BDEV_CH_QOS_ENABLED		(1 << 1)
251 
252 struct spdk_bdev_channel {
253 	struct spdk_bdev	*bdev;
254 
255 	/* The channel for the underlying device */
256 	struct spdk_io_channel	*channel;
257 
258 	/* Per io_device per thread data */
259 	struct spdk_bdev_shared_resource *shared_resource;
260 
261 	struct spdk_bdev_io_stat stat;
262 
263 	/*
264 	 * Count of I/O submitted to the underlying dev module through this channel
265 	 * and waiting for completion.
266 	 */
267 	uint64_t		io_outstanding;
268 
269 	/*
270 	 * List of all submitted I/Os including I/O that are generated via splitting.
271 	 */
272 	bdev_io_tailq_t		io_submitted;
273 
274 	/*
275 	 * List of spdk_bdev_io that are currently queued because they write to a locked
276 	 * LBA range.
277 	 */
278 	bdev_io_tailq_t		io_locked;
279 
280 	uint32_t		flags;
281 
282 	struct spdk_histogram_data *histogram;
283 
284 #ifdef SPDK_CONFIG_VTUNE
285 	uint64_t		start_tsc;
286 	uint64_t		interval_tsc;
287 	__itt_string_handle	*handle;
288 	struct spdk_bdev_io_stat prev_stat;
289 #endif
290 
291 	bdev_io_tailq_t		queued_resets;
292 
293 	lba_range_tailq_t	locked_ranges;
294 };
295 
296 struct media_event_entry {
297 	struct spdk_bdev_media_event	event;
298 	TAILQ_ENTRY(media_event_entry)	tailq;
299 };
300 
301 #define MEDIA_EVENT_POOL_SIZE 64
302 
303 struct spdk_bdev_desc {
304 	struct spdk_bdev		*bdev;
305 	struct spdk_thread		*thread;
306 	struct {
307 		spdk_bdev_event_cb_t event_fn;
308 		void *ctx;
309 	}				callback;
310 	bool				closed;
311 	bool				write;
312 	pthread_mutex_t			mutex;
313 	uint32_t			refs;
314 	TAILQ_HEAD(, media_event_entry)	pending_media_events;
315 	TAILQ_HEAD(, media_event_entry)	free_media_events;
316 	struct media_event_entry	*media_events_buffer;
317 	TAILQ_ENTRY(spdk_bdev_desc)	link;
318 
319 	uint64_t		timeout_in_sec;
320 	spdk_bdev_io_timeout_cb	cb_fn;
321 	void			*cb_arg;
322 	struct spdk_poller	*io_timeout_poller;
323 };
324 
325 struct spdk_bdev_iostat_ctx {
326 	struct spdk_bdev_io_stat *stat;
327 	spdk_bdev_get_device_stat_cb cb;
328 	void *cb_arg;
329 };
330 
331 struct set_qos_limit_ctx {
332 	void (*cb_fn)(void *cb_arg, int status);
333 	void *cb_arg;
334 	struct spdk_bdev *bdev;
335 };
336 
337 #define __bdev_to_io_dev(bdev)		(((char *)bdev) + 1)
338 #define __bdev_from_io_dev(io_dev)	((struct spdk_bdev *)(((char *)io_dev) - 1))
339 
340 static void bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
341 static void bdev_write_zero_buffer_next(void *_bdev_io);
342 
343 static void bdev_enable_qos_msg(struct spdk_io_channel_iter *i);
344 static void bdev_enable_qos_done(struct spdk_io_channel_iter *i, int status);
345 
346 static int
347 bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
348 			  struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
349 			  uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg);
350 static int
351 bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
352 			   struct iovec *iov, int iovcnt, void *md_buf,
353 			   uint64_t offset_blocks, uint64_t num_blocks,
354 			   spdk_bdev_io_completion_cb cb, void *cb_arg);
355 
356 static int
357 bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
358 		    uint64_t offset, uint64_t length,
359 		    lock_range_cb cb_fn, void *cb_arg);
360 
361 static int
362 bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
363 		      uint64_t offset, uint64_t length,
364 		      lock_range_cb cb_fn, void *cb_arg);
365 
366 static inline void bdev_io_complete(void *ctx);
367 
368 static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
369 static bool bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort);
370 
371 void
372 spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
373 {
374 	if (!opts) {
375 		SPDK_ERRLOG("opts should not be NULL\n");
376 		return;
377 	}
378 
379 	if (!opts_size) {
380 		SPDK_ERRLOG("opts_size should not be zero value\n");
381 		return;
382 	}
383 
384 	opts->opts_size = opts_size;
385 
386 #define SET_FIELD(field) \
387 	if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts_size) { \
388 		opts->field = g_bdev_opts.field; \
389 	} \
390 
391 	SET_FIELD(bdev_io_pool_size);
392 	SET_FIELD(bdev_io_cache_size);
393 	SET_FIELD(bdev_auto_examine);
394 	SET_FIELD(small_buf_pool_size);
395 	SET_FIELD(large_buf_pool_size);
396 
397 	/* Do not remove this statement, you should always update this statement when you adding a new field,
398 	 * and do not forget to add the SET_FIELD statement for your added field. */
399 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
400 
401 #undef SET_FIELD
402 }
403 
404 int
405 spdk_bdev_set_opts(struct spdk_bdev_opts *opts)
406 {
407 	uint32_t min_pool_size;
408 
409 	if (!opts) {
410 		SPDK_ERRLOG("opts cannot be NULL\n");
411 		return -1;
412 	}
413 
414 	if (!opts->opts_size) {
415 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
416 		return -1;
417 	}
418 
419 	/*
420 	 * Add 1 to the thread count to account for the extra mgmt_ch that gets created during subsystem
421 	 *  initialization.  A second mgmt_ch will be created on the same thread when the application starts
422 	 *  but before the deferred put_io_channel event is executed for the first mgmt_ch.
423 	 */
424 	min_pool_size = opts->bdev_io_cache_size * (spdk_thread_get_count() + 1);
425 	if (opts->bdev_io_pool_size < min_pool_size) {
426 		SPDK_ERRLOG("bdev_io_pool_size %" PRIu32 " is not compatible with bdev_io_cache_size %" PRIu32
427 			    " and %" PRIu32 " threads\n", opts->bdev_io_pool_size, opts->bdev_io_cache_size,
428 			    spdk_thread_get_count());
429 		SPDK_ERRLOG("bdev_io_pool_size must be at least %" PRIu32 "\n", min_pool_size);
430 		return -1;
431 	}
432 
433 	if (opts->small_buf_pool_size < BUF_SMALL_POOL_SIZE) {
434 		SPDK_ERRLOG("small_buf_pool_size must be at least %" PRIu32 "\n", BUF_SMALL_POOL_SIZE);
435 		return -1;
436 	}
437 
438 	if (opts->large_buf_pool_size < BUF_LARGE_POOL_SIZE) {
439 		SPDK_ERRLOG("large_buf_pool_size must be at least %" PRIu32 "\n", BUF_LARGE_POOL_SIZE);
440 		return -1;
441 	}
442 
443 #define SET_FIELD(field) \
444         if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
445                 g_bdev_opts.field = opts->field; \
446         } \
447 
448 	SET_FIELD(bdev_io_pool_size);
449 	SET_FIELD(bdev_io_cache_size);
450 	SET_FIELD(bdev_auto_examine);
451 	SET_FIELD(small_buf_pool_size);
452 	SET_FIELD(large_buf_pool_size);
453 
454 	g_bdev_opts.opts_size = opts->opts_size;
455 
456 #undef SET_FIELD
457 
458 	return 0;
459 }
460 
461 struct spdk_bdev_wait_for_examine_ctx {
462 	struct spdk_poller              *poller;
463 	spdk_bdev_wait_for_examine_cb	cb_fn;
464 	void				*cb_arg;
465 };
466 
467 static bool
468 bdev_module_all_actions_completed(void);
469 
470 static int
471 bdev_wait_for_examine_cb(void *arg)
472 {
473 	struct spdk_bdev_wait_for_examine_ctx *ctx = arg;
474 
475 	if (!bdev_module_all_actions_completed()) {
476 		return SPDK_POLLER_IDLE;
477 	}
478 
479 	spdk_poller_unregister(&ctx->poller);
480 	ctx->cb_fn(ctx->cb_arg);
481 	free(ctx);
482 
483 	return SPDK_POLLER_BUSY;
484 }
485 
486 int
487 spdk_bdev_wait_for_examine(spdk_bdev_wait_for_examine_cb cb_fn, void *cb_arg)
488 {
489 	struct spdk_bdev_wait_for_examine_ctx *ctx;
490 
491 	ctx = calloc(1, sizeof(*ctx));
492 	if (ctx == NULL) {
493 		return -ENOMEM;
494 	}
495 	ctx->cb_fn = cb_fn;
496 	ctx->cb_arg = cb_arg;
497 	ctx->poller = SPDK_POLLER_REGISTER(bdev_wait_for_examine_cb, ctx, 0);
498 
499 	return 0;
500 }
501 
502 struct spdk_bdev_examine_item {
503 	char *name;
504 	TAILQ_ENTRY(spdk_bdev_examine_item) link;
505 };
506 
507 TAILQ_HEAD(spdk_bdev_examine_allowlist, spdk_bdev_examine_item);
508 
509 struct spdk_bdev_examine_allowlist g_bdev_examine_allowlist = TAILQ_HEAD_INITIALIZER(
510 			g_bdev_examine_allowlist);
511 
512 static inline bool
513 bdev_examine_allowlist_check(const char *name)
514 {
515 	struct spdk_bdev_examine_item *item;
516 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
517 		if (strcmp(name, item->name) == 0) {
518 			return true;
519 		}
520 	}
521 	return false;
522 }
523 
524 static inline void
525 bdev_examine_allowlist_free(void)
526 {
527 	struct spdk_bdev_examine_item *item;
528 	while (!TAILQ_EMPTY(&g_bdev_examine_allowlist)) {
529 		item = TAILQ_FIRST(&g_bdev_examine_allowlist);
530 		TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
531 		free(item->name);
532 		free(item);
533 	}
534 }
535 
536 static inline bool
537 bdev_in_examine_allowlist(struct spdk_bdev *bdev)
538 {
539 	struct spdk_bdev_alias *tmp;
540 	if (bdev_examine_allowlist_check(bdev->name)) {
541 		return true;
542 	}
543 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
544 		if (bdev_examine_allowlist_check(tmp->alias)) {
545 			return true;
546 		}
547 	}
548 	return false;
549 }
550 
551 static inline bool
552 bdev_ok_to_examine(struct spdk_bdev *bdev)
553 {
554 	if (g_bdev_opts.bdev_auto_examine) {
555 		return true;
556 	} else {
557 		return bdev_in_examine_allowlist(bdev);
558 	}
559 }
560 
561 static void
562 bdev_examine(struct spdk_bdev *bdev)
563 {
564 	struct spdk_bdev_module *module;
565 	uint32_t action;
566 
567 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
568 		if (module->examine_config && bdev_ok_to_examine(bdev)) {
569 			action = module->internal.action_in_progress;
570 			module->internal.action_in_progress++;
571 			module->examine_config(bdev);
572 			if (action != module->internal.action_in_progress) {
573 				SPDK_ERRLOG("examine_config for module %s did not call spdk_bdev_module_examine_done()\n",
574 					    module->name);
575 			}
576 		}
577 	}
578 
579 	if (bdev->internal.claim_module && bdev_ok_to_examine(bdev)) {
580 		if (bdev->internal.claim_module->examine_disk) {
581 			bdev->internal.claim_module->internal.action_in_progress++;
582 			bdev->internal.claim_module->examine_disk(bdev);
583 		}
584 		return;
585 	}
586 
587 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
588 		if (module->examine_disk && bdev_ok_to_examine(bdev)) {
589 			module->internal.action_in_progress++;
590 			module->examine_disk(bdev);
591 		}
592 	}
593 }
594 
595 int
596 spdk_bdev_examine(const char *name)
597 {
598 	struct spdk_bdev *bdev;
599 	struct spdk_bdev_examine_item *item;
600 
601 	if (g_bdev_opts.bdev_auto_examine) {
602 		SPDK_ERRLOG("Manual examine is not allowed if auto examine is enabled");
603 		return -EINVAL;
604 	}
605 
606 	if (bdev_examine_allowlist_check(name)) {
607 		SPDK_ERRLOG("Duplicate bdev name for manual examine: %s\n", name);
608 		return -EEXIST;
609 	}
610 
611 	item = calloc(1, sizeof(*item));
612 	if (!item) {
613 		return -ENOMEM;
614 	}
615 	item->name = strdup(name);
616 	if (!item->name) {
617 		free(item);
618 		return -ENOMEM;
619 	}
620 	TAILQ_INSERT_TAIL(&g_bdev_examine_allowlist, item, link);
621 
622 	bdev = spdk_bdev_get_by_name(name);
623 	if (bdev) {
624 		bdev_examine(bdev);
625 	}
626 	return 0;
627 }
628 
629 static inline void
630 bdev_examine_allowlist_config_json(struct spdk_json_write_ctx *w)
631 {
632 	struct spdk_bdev_examine_item *item;
633 	TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
634 		spdk_json_write_object_begin(w);
635 		spdk_json_write_named_string(w, "method", "bdev_examine");
636 		spdk_json_write_named_object_begin(w, "params");
637 		spdk_json_write_named_string(w, "name", item->name);
638 		spdk_json_write_object_end(w);
639 		spdk_json_write_object_end(w);
640 	}
641 }
642 
643 struct spdk_bdev *
644 spdk_bdev_first(void)
645 {
646 	struct spdk_bdev *bdev;
647 
648 	bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
649 	if (bdev) {
650 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
651 	}
652 
653 	return bdev;
654 }
655 
656 struct spdk_bdev *
657 spdk_bdev_next(struct spdk_bdev *prev)
658 {
659 	struct spdk_bdev *bdev;
660 
661 	bdev = TAILQ_NEXT(prev, internal.link);
662 	if (bdev) {
663 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
664 	}
665 
666 	return bdev;
667 }
668 
669 static struct spdk_bdev *
670 _bdev_next_leaf(struct spdk_bdev *bdev)
671 {
672 	while (bdev != NULL) {
673 		if (bdev->internal.claim_module == NULL) {
674 			return bdev;
675 		} else {
676 			bdev = TAILQ_NEXT(bdev, internal.link);
677 		}
678 	}
679 
680 	return bdev;
681 }
682 
683 struct spdk_bdev *
684 spdk_bdev_first_leaf(void)
685 {
686 	struct spdk_bdev *bdev;
687 
688 	bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
689 
690 	if (bdev) {
691 		SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
692 	}
693 
694 	return bdev;
695 }
696 
697 struct spdk_bdev *
698 spdk_bdev_next_leaf(struct spdk_bdev *prev)
699 {
700 	struct spdk_bdev *bdev;
701 
702 	bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
703 
704 	if (bdev) {
705 		SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
706 	}
707 
708 	return bdev;
709 }
710 
711 struct spdk_bdev *
712 spdk_bdev_get_by_name(const char *bdev_name)
713 {
714 	struct spdk_bdev_alias *tmp;
715 	struct spdk_bdev *bdev = spdk_bdev_first();
716 
717 	while (bdev != NULL) {
718 		if (strcmp(bdev_name, bdev->name) == 0) {
719 			return bdev;
720 		}
721 
722 		TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
723 			if (strcmp(bdev_name, tmp->alias) == 0) {
724 				return bdev;
725 			}
726 		}
727 
728 		bdev = spdk_bdev_next(bdev);
729 	}
730 
731 	return NULL;
732 }
733 
734 void
735 spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
736 {
737 	struct iovec *iovs;
738 
739 	if (bdev_io->u.bdev.iovs == NULL) {
740 		bdev_io->u.bdev.iovs = &bdev_io->iov;
741 		bdev_io->u.bdev.iovcnt = 1;
742 	}
743 
744 	iovs = bdev_io->u.bdev.iovs;
745 
746 	assert(iovs != NULL);
747 	assert(bdev_io->u.bdev.iovcnt >= 1);
748 
749 	iovs[0].iov_base = buf;
750 	iovs[0].iov_len = len;
751 }
752 
753 void
754 spdk_bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
755 {
756 	assert((len / spdk_bdev_get_md_size(bdev_io->bdev)) >= bdev_io->u.bdev.num_blocks);
757 	bdev_io->u.bdev.md_buf = md_buf;
758 }
759 
760 static bool
761 _is_buf_allocated(const struct iovec *iovs)
762 {
763 	if (iovs == NULL) {
764 		return false;
765 	}
766 
767 	return iovs[0].iov_base != NULL;
768 }
769 
770 static bool
771 _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
772 {
773 	int i;
774 	uintptr_t iov_base;
775 
776 	if (spdk_likely(alignment == 1)) {
777 		return true;
778 	}
779 
780 	for (i = 0; i < iovcnt; i++) {
781 		iov_base = (uintptr_t)iovs[i].iov_base;
782 		if ((iov_base & (alignment - 1)) != 0) {
783 			return false;
784 		}
785 	}
786 
787 	return true;
788 }
789 
790 static void
791 _copy_iovs_to_buf(void *buf, size_t buf_len, struct iovec *iovs, int iovcnt)
792 {
793 	int i;
794 	size_t len;
795 
796 	for (i = 0; i < iovcnt; i++) {
797 		len = spdk_min(iovs[i].iov_len, buf_len);
798 		memcpy(buf, iovs[i].iov_base, len);
799 		buf += len;
800 		buf_len -= len;
801 	}
802 }
803 
804 static void
805 _copy_buf_to_iovs(struct iovec *iovs, int iovcnt, void *buf, size_t buf_len)
806 {
807 	int i;
808 	size_t len;
809 
810 	for (i = 0; i < iovcnt; i++) {
811 		len = spdk_min(iovs[i].iov_len, buf_len);
812 		memcpy(iovs[i].iov_base, buf, len);
813 		buf += len;
814 		buf_len -= len;
815 	}
816 }
817 
818 static void
819 _bdev_io_set_bounce_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
820 {
821 	/* save original iovec */
822 	bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs;
823 	bdev_io->internal.orig_iovcnt = bdev_io->u.bdev.iovcnt;
824 	/* set bounce iov */
825 	bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_iov;
826 	bdev_io->u.bdev.iovcnt = 1;
827 	/* set bounce buffer for this operation */
828 	bdev_io->u.bdev.iovs[0].iov_base = buf;
829 	bdev_io->u.bdev.iovs[0].iov_len = len;
830 	/* if this is write path, copy data from original buffer to bounce buffer */
831 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
832 		_copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt);
833 	}
834 }
835 
836 static void
837 _bdev_io_set_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
838 {
839 	/* save original md_buf */
840 	bdev_io->internal.orig_md_buf = bdev_io->u.bdev.md_buf;
841 	/* set bounce md_buf */
842 	bdev_io->u.bdev.md_buf = md_buf;
843 
844 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
845 		memcpy(md_buf, bdev_io->internal.orig_md_buf, len);
846 	}
847 }
848 
849 static void
850 bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, void *buf, bool status)
851 {
852 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
853 
854 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
855 		bdev_io->internal.get_aux_buf_cb(ch, bdev_io, buf);
856 		bdev_io->internal.get_aux_buf_cb = NULL;
857 	} else {
858 		assert(bdev_io->internal.get_buf_cb != NULL);
859 		bdev_io->internal.buf = buf;
860 		bdev_io->internal.get_buf_cb(ch, bdev_io, status);
861 		bdev_io->internal.get_buf_cb = NULL;
862 	}
863 }
864 
865 static void
866 _bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t len)
867 {
868 	struct spdk_bdev *bdev = bdev_io->bdev;
869 	bool buf_allocated;
870 	uint64_t md_len, alignment;
871 	void *aligned_buf;
872 
873 	if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
874 		bdev_io_get_buf_complete(bdev_io, buf, true);
875 		return;
876 	}
877 
878 	alignment = spdk_bdev_get_buf_align(bdev);
879 	buf_allocated = _is_buf_allocated(bdev_io->u.bdev.iovs);
880 	aligned_buf = (void *)(((uintptr_t)buf + (alignment - 1)) & ~(alignment - 1));
881 
882 	if (buf_allocated) {
883 		_bdev_io_set_bounce_buf(bdev_io, aligned_buf, len);
884 	} else {
885 		spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
886 	}
887 
888 	if (spdk_bdev_is_md_separate(bdev)) {
889 		aligned_buf = (char *)aligned_buf + len;
890 		md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
891 
892 		assert(((uintptr_t)aligned_buf & (alignment - 1)) == 0);
893 
894 		if (bdev_io->u.bdev.md_buf != NULL) {
895 			_bdev_io_set_bounce_md_buf(bdev_io, aligned_buf, md_len);
896 		} else {
897 			spdk_bdev_io_set_md_buf(bdev_io, aligned_buf, md_len);
898 		}
899 	}
900 	bdev_io_get_buf_complete(bdev_io, buf, true);
901 }
902 
903 static void
904 _bdev_io_put_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t buf_len)
905 {
906 	struct spdk_bdev *bdev = bdev_io->bdev;
907 	struct spdk_mempool *pool;
908 	struct spdk_bdev_io *tmp;
909 	bdev_io_stailq_t *stailq;
910 	struct spdk_bdev_mgmt_channel *ch;
911 	uint64_t md_len, alignment;
912 
913 	md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
914 	alignment = spdk_bdev_get_buf_align(bdev);
915 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
916 
917 	if (buf_len + alignment + md_len <= SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
918 	    SPDK_BDEV_POOL_ALIGNMENT) {
919 		pool = g_bdev_mgr.buf_small_pool;
920 		stailq = &ch->need_buf_small;
921 	} else {
922 		pool = g_bdev_mgr.buf_large_pool;
923 		stailq = &ch->need_buf_large;
924 	}
925 
926 	if (STAILQ_EMPTY(stailq)) {
927 		spdk_mempool_put(pool, buf);
928 	} else {
929 		tmp = STAILQ_FIRST(stailq);
930 		STAILQ_REMOVE_HEAD(stailq, internal.buf_link);
931 		_bdev_io_set_buf(tmp, buf, tmp->internal.buf_len);
932 	}
933 }
934 
935 static void
936 bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
937 {
938 	assert(bdev_io->internal.buf != NULL);
939 	_bdev_io_put_buf(bdev_io, bdev_io->internal.buf, bdev_io->internal.buf_len);
940 	bdev_io->internal.buf = NULL;
941 }
942 
943 void
944 spdk_bdev_io_put_aux_buf(struct spdk_bdev_io *bdev_io, void *buf)
945 {
946 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
947 
948 	assert(buf != NULL);
949 	_bdev_io_put_buf(bdev_io, buf, len);
950 }
951 
952 static void
953 _bdev_io_unset_bounce_buf(struct spdk_bdev_io *bdev_io)
954 {
955 	if (spdk_likely(bdev_io->internal.orig_iovcnt == 0)) {
956 		assert(bdev_io->internal.orig_md_buf == NULL);
957 		return;
958 	}
959 
960 	/* if this is read path, copy data from bounce buffer to original buffer */
961 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
962 	    bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
963 		_copy_buf_to_iovs(bdev_io->internal.orig_iovs,
964 				  bdev_io->internal.orig_iovcnt,
965 				  bdev_io->internal.bounce_iov.iov_base,
966 				  bdev_io->internal.bounce_iov.iov_len);
967 	}
968 	/* set original buffer for this io */
969 	bdev_io->u.bdev.iovcnt = bdev_io->internal.orig_iovcnt;
970 	bdev_io->u.bdev.iovs = bdev_io->internal.orig_iovs;
971 	/* disable bouncing buffer for this io */
972 	bdev_io->internal.orig_iovcnt = 0;
973 	bdev_io->internal.orig_iovs = NULL;
974 
975 	/* do the same for metadata buffer */
976 	if (spdk_unlikely(bdev_io->internal.orig_md_buf != NULL)) {
977 		assert(spdk_bdev_is_md_separate(bdev_io->bdev));
978 
979 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
980 		    bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
981 			memcpy(bdev_io->internal.orig_md_buf, bdev_io->u.bdev.md_buf,
982 			       bdev_io->u.bdev.num_blocks * spdk_bdev_get_md_size(bdev_io->bdev));
983 		}
984 
985 		bdev_io->u.bdev.md_buf = bdev_io->internal.orig_md_buf;
986 		bdev_io->internal.orig_md_buf = NULL;
987 	}
988 
989 	/* We want to free the bounce buffer here since we know we're done with it (as opposed
990 	 * to waiting for the conditional free of internal.buf in spdk_bdev_free_io()).
991 	 */
992 	bdev_io_put_buf(bdev_io);
993 }
994 
995 static void
996 bdev_io_get_buf(struct spdk_bdev_io *bdev_io, uint64_t len)
997 {
998 	struct spdk_bdev *bdev = bdev_io->bdev;
999 	struct spdk_mempool *pool;
1000 	bdev_io_stailq_t *stailq;
1001 	struct spdk_bdev_mgmt_channel *mgmt_ch;
1002 	uint64_t alignment, md_len;
1003 	void *buf;
1004 
1005 	alignment = spdk_bdev_get_buf_align(bdev);
1006 	md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
1007 
1008 	if (len + alignment + md_len > SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_LARGE_BUF_MAX_SIZE) +
1009 	    SPDK_BDEV_POOL_ALIGNMENT) {
1010 		SPDK_ERRLOG("Length + alignment %" PRIu64 " is larger than allowed\n",
1011 			    len + alignment);
1012 		bdev_io_get_buf_complete(bdev_io, NULL, false);
1013 		return;
1014 	}
1015 
1016 	mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1017 
1018 	bdev_io->internal.buf_len = len;
1019 
1020 	if (len + alignment + md_len <= SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
1021 	    SPDK_BDEV_POOL_ALIGNMENT) {
1022 		pool = g_bdev_mgr.buf_small_pool;
1023 		stailq = &mgmt_ch->need_buf_small;
1024 	} else {
1025 		pool = g_bdev_mgr.buf_large_pool;
1026 		stailq = &mgmt_ch->need_buf_large;
1027 	}
1028 
1029 	buf = spdk_mempool_get(pool);
1030 	if (!buf) {
1031 		STAILQ_INSERT_TAIL(stailq, bdev_io, internal.buf_link);
1032 	} else {
1033 		_bdev_io_set_buf(bdev_io, buf, len);
1034 	}
1035 }
1036 
1037 void
1038 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1039 {
1040 	struct spdk_bdev *bdev = bdev_io->bdev;
1041 	uint64_t alignment;
1042 
1043 	assert(cb != NULL);
1044 	bdev_io->internal.get_buf_cb = cb;
1045 
1046 	alignment = spdk_bdev_get_buf_align(bdev);
1047 
1048 	if (_is_buf_allocated(bdev_io->u.bdev.iovs) &&
1049 	    _are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
1050 		/* Buffer already present and aligned */
1051 		cb(spdk_bdev_io_get_io_channel(bdev_io), bdev_io, true);
1052 		return;
1053 	}
1054 
1055 	bdev_io_get_buf(bdev_io, len);
1056 }
1057 
1058 void
1059 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
1060 {
1061 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1062 
1063 	assert(cb != NULL);
1064 	assert(bdev_io->internal.get_aux_buf_cb == NULL);
1065 	bdev_io->internal.get_aux_buf_cb = cb;
1066 	bdev_io_get_buf(bdev_io, len);
1067 }
1068 
1069 static int
1070 bdev_module_get_max_ctx_size(void)
1071 {
1072 	struct spdk_bdev_module *bdev_module;
1073 	int max_bdev_module_size = 0;
1074 
1075 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1076 		if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
1077 			max_bdev_module_size = bdev_module->get_ctx_size();
1078 		}
1079 	}
1080 
1081 	return max_bdev_module_size;
1082 }
1083 
1084 static void
1085 bdev_qos_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1086 {
1087 	int i;
1088 	struct spdk_bdev_qos *qos = bdev->internal.qos;
1089 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
1090 
1091 	if (!qos) {
1092 		return;
1093 	}
1094 
1095 	spdk_bdev_get_qos_rate_limits(bdev, limits);
1096 
1097 	spdk_json_write_object_begin(w);
1098 	spdk_json_write_named_string(w, "method", "bdev_set_qos_limit");
1099 
1100 	spdk_json_write_named_object_begin(w, "params");
1101 	spdk_json_write_named_string(w, "name", bdev->name);
1102 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1103 		if (limits[i] > 0) {
1104 			spdk_json_write_named_uint64(w, qos_rpc_type[i], limits[i]);
1105 		}
1106 	}
1107 	spdk_json_write_object_end(w);
1108 
1109 	spdk_json_write_object_end(w);
1110 }
1111 
1112 void
1113 spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
1114 {
1115 	struct spdk_bdev_module *bdev_module;
1116 	struct spdk_bdev *bdev;
1117 
1118 	assert(w != NULL);
1119 
1120 	spdk_json_write_array_begin(w);
1121 
1122 	spdk_json_write_object_begin(w);
1123 	spdk_json_write_named_string(w, "method", "bdev_set_options");
1124 	spdk_json_write_named_object_begin(w, "params");
1125 	spdk_json_write_named_uint32(w, "bdev_io_pool_size", g_bdev_opts.bdev_io_pool_size);
1126 	spdk_json_write_named_uint32(w, "bdev_io_cache_size", g_bdev_opts.bdev_io_cache_size);
1127 	spdk_json_write_named_bool(w, "bdev_auto_examine", g_bdev_opts.bdev_auto_examine);
1128 	spdk_json_write_object_end(w);
1129 	spdk_json_write_object_end(w);
1130 
1131 	bdev_examine_allowlist_config_json(w);
1132 
1133 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1134 		if (bdev_module->config_json) {
1135 			bdev_module->config_json(w);
1136 		}
1137 	}
1138 
1139 	pthread_mutex_lock(&g_bdev_mgr.mutex);
1140 
1141 	TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
1142 		if (bdev->fn_table->write_config_json) {
1143 			bdev->fn_table->write_config_json(bdev, w);
1144 		}
1145 
1146 		bdev_qos_config_json(bdev, w);
1147 	}
1148 
1149 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
1150 
1151 	/* This has to be last RPC in array to make sure all bdevs finished examine */
1152 	spdk_json_write_object_begin(w);
1153 	spdk_json_write_named_string(w, "method", "bdev_wait_for_examine");
1154 	spdk_json_write_object_end(w);
1155 
1156 	spdk_json_write_array_end(w);
1157 }
1158 
1159 static int
1160 bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
1161 {
1162 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
1163 	struct spdk_bdev_io *bdev_io;
1164 	uint32_t i;
1165 
1166 	STAILQ_INIT(&ch->need_buf_small);
1167 	STAILQ_INIT(&ch->need_buf_large);
1168 
1169 	STAILQ_INIT(&ch->per_thread_cache);
1170 	ch->bdev_io_cache_size = g_bdev_opts.bdev_io_cache_size;
1171 
1172 	/* Pre-populate bdev_io cache to ensure this thread cannot be starved. */
1173 	ch->per_thread_cache_count = 0;
1174 	for (i = 0; i < ch->bdev_io_cache_size; i++) {
1175 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
1176 		assert(bdev_io != NULL);
1177 		ch->per_thread_cache_count++;
1178 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
1179 	}
1180 
1181 	TAILQ_INIT(&ch->shared_resources);
1182 	TAILQ_INIT(&ch->io_wait_queue);
1183 
1184 	return 0;
1185 }
1186 
1187 static void
1188 bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
1189 {
1190 	struct spdk_bdev_mgmt_channel *ch = ctx_buf;
1191 	struct spdk_bdev_io *bdev_io;
1192 
1193 	if (!STAILQ_EMPTY(&ch->need_buf_small) || !STAILQ_EMPTY(&ch->need_buf_large)) {
1194 		SPDK_ERRLOG("Pending I/O list wasn't empty on mgmt channel free\n");
1195 	}
1196 
1197 	if (!TAILQ_EMPTY(&ch->shared_resources)) {
1198 		SPDK_ERRLOG("Module channel list wasn't empty on mgmt channel free\n");
1199 	}
1200 
1201 	while (!STAILQ_EMPTY(&ch->per_thread_cache)) {
1202 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
1203 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
1204 		ch->per_thread_cache_count--;
1205 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
1206 	}
1207 
1208 	assert(ch->per_thread_cache_count == 0);
1209 }
1210 
1211 static void
1212 bdev_init_complete(int rc)
1213 {
1214 	spdk_bdev_init_cb cb_fn = g_init_cb_fn;
1215 	void *cb_arg = g_init_cb_arg;
1216 	struct spdk_bdev_module *m;
1217 
1218 	g_bdev_mgr.init_complete = true;
1219 	g_init_cb_fn = NULL;
1220 	g_init_cb_arg = NULL;
1221 
1222 	/*
1223 	 * For modules that need to know when subsystem init is complete,
1224 	 * inform them now.
1225 	 */
1226 	if (rc == 0) {
1227 		TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
1228 			if (m->init_complete) {
1229 				m->init_complete();
1230 			}
1231 		}
1232 	}
1233 
1234 	cb_fn(cb_arg, rc);
1235 }
1236 
1237 static bool
1238 bdev_module_all_actions_completed(void)
1239 {
1240 	struct spdk_bdev_module *m;
1241 
1242 	TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
1243 		if (m->internal.action_in_progress > 0) {
1244 			return false;
1245 		}
1246 	}
1247 	return true;
1248 }
1249 
1250 static void
1251 bdev_module_action_complete(void)
1252 {
1253 	/*
1254 	 * Don't finish bdev subsystem initialization if
1255 	 * module pre-initialization is still in progress, or
1256 	 * the subsystem been already initialized.
1257 	 */
1258 	if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
1259 		return;
1260 	}
1261 
1262 	/*
1263 	 * Check all bdev modules for inits/examinations in progress. If any
1264 	 * exist, return immediately since we cannot finish bdev subsystem
1265 	 * initialization until all are completed.
1266 	 */
1267 	if (!bdev_module_all_actions_completed()) {
1268 		return;
1269 	}
1270 
1271 	/*
1272 	 * Modules already finished initialization - now that all
1273 	 * the bdev modules have finished their asynchronous I/O
1274 	 * processing, the entire bdev layer can be marked as complete.
1275 	 */
1276 	bdev_init_complete(0);
1277 }
1278 
1279 static void
1280 bdev_module_action_done(struct spdk_bdev_module *module)
1281 {
1282 	assert(module->internal.action_in_progress > 0);
1283 	module->internal.action_in_progress--;
1284 	bdev_module_action_complete();
1285 }
1286 
1287 void
1288 spdk_bdev_module_init_done(struct spdk_bdev_module *module)
1289 {
1290 	bdev_module_action_done(module);
1291 }
1292 
1293 void
1294 spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
1295 {
1296 	bdev_module_action_done(module);
1297 }
1298 
1299 /** The last initialized bdev module */
1300 static struct spdk_bdev_module *g_resume_bdev_module = NULL;
1301 
1302 static void
1303 bdev_init_failed(void *cb_arg)
1304 {
1305 	struct spdk_bdev_module *module = cb_arg;
1306 
1307 	module->internal.action_in_progress--;
1308 	bdev_init_complete(-1);
1309 }
1310 
1311 static int
1312 bdev_modules_init(void)
1313 {
1314 	struct spdk_bdev_module *module;
1315 	int rc = 0;
1316 
1317 	TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1318 		g_resume_bdev_module = module;
1319 		if (module->async_init) {
1320 			module->internal.action_in_progress = 1;
1321 		}
1322 		rc = module->module_init();
1323 		if (rc != 0) {
1324 			/* Bump action_in_progress to prevent other modules from completion of modules_init
1325 			 * Send message to defer application shutdown until resources are cleaned up */
1326 			module->internal.action_in_progress = 1;
1327 			spdk_thread_send_msg(spdk_get_thread(), bdev_init_failed, module);
1328 			return rc;
1329 		}
1330 	}
1331 
1332 	g_resume_bdev_module = NULL;
1333 	return 0;
1334 }
1335 
1336 void
1337 spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
1338 {
1339 	int cache_size;
1340 	int rc = 0;
1341 	char mempool_name[32];
1342 
1343 	assert(cb_fn != NULL);
1344 
1345 	g_init_cb_fn = cb_fn;
1346 	g_init_cb_arg = cb_arg;
1347 
1348 	spdk_notify_type_register("bdev_register");
1349 	spdk_notify_type_register("bdev_unregister");
1350 
1351 	snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
1352 
1353 	g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
1354 				  g_bdev_opts.bdev_io_pool_size,
1355 				  sizeof(struct spdk_bdev_io) +
1356 				  bdev_module_get_max_ctx_size(),
1357 				  0,
1358 				  SPDK_ENV_SOCKET_ID_ANY);
1359 
1360 	if (g_bdev_mgr.bdev_io_pool == NULL) {
1361 		SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
1362 		bdev_init_complete(-1);
1363 		return;
1364 	}
1365 
1366 	/**
1367 	 * Ensure no more than half of the total buffers end up local caches, by
1368 	 *   using spdk_env_get_core_count() to determine how many local caches we need
1369 	 *   to account for.
1370 	 */
1371 	cache_size = BUF_SMALL_POOL_SIZE / (2 * spdk_env_get_core_count());
1372 	snprintf(mempool_name, sizeof(mempool_name), "buf_small_pool_%d", getpid());
1373 
1374 	g_bdev_mgr.buf_small_pool = spdk_mempool_create(mempool_name,
1375 				    g_bdev_opts.small_buf_pool_size,
1376 				    SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_SMALL_BUF_MAX_SIZE) +
1377 				    SPDK_BDEV_POOL_ALIGNMENT,
1378 				    cache_size,
1379 				    SPDK_ENV_SOCKET_ID_ANY);
1380 	if (!g_bdev_mgr.buf_small_pool) {
1381 		SPDK_ERRLOG("create rbuf small pool failed\n");
1382 		bdev_init_complete(-1);
1383 		return;
1384 	}
1385 
1386 	cache_size = BUF_LARGE_POOL_SIZE / (2 * spdk_env_get_core_count());
1387 	snprintf(mempool_name, sizeof(mempool_name), "buf_large_pool_%d", getpid());
1388 
1389 	g_bdev_mgr.buf_large_pool = spdk_mempool_create(mempool_name,
1390 				    g_bdev_opts.large_buf_pool_size,
1391 				    SPDK_BDEV_BUF_SIZE_WITH_MD(SPDK_BDEV_LARGE_BUF_MAX_SIZE) +
1392 				    SPDK_BDEV_POOL_ALIGNMENT,
1393 				    cache_size,
1394 				    SPDK_ENV_SOCKET_ID_ANY);
1395 	if (!g_bdev_mgr.buf_large_pool) {
1396 		SPDK_ERRLOG("create rbuf large pool failed\n");
1397 		bdev_init_complete(-1);
1398 		return;
1399 	}
1400 
1401 	g_bdev_mgr.zero_buffer = spdk_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
1402 					      NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1403 	if (!g_bdev_mgr.zero_buffer) {
1404 		SPDK_ERRLOG("create bdev zero buffer failed\n");
1405 		bdev_init_complete(-1);
1406 		return;
1407 	}
1408 
1409 #ifdef SPDK_CONFIG_VTUNE
1410 	g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
1411 #endif
1412 
1413 	spdk_io_device_register(&g_bdev_mgr, bdev_mgmt_channel_create,
1414 				bdev_mgmt_channel_destroy,
1415 				sizeof(struct spdk_bdev_mgmt_channel),
1416 				"bdev_mgr");
1417 
1418 	rc = bdev_modules_init();
1419 	g_bdev_mgr.module_init_complete = true;
1420 	if (rc != 0) {
1421 		SPDK_ERRLOG("bdev modules init failed\n");
1422 		return;
1423 	}
1424 
1425 	bdev_module_action_complete();
1426 }
1427 
1428 static void
1429 bdev_mgr_unregister_cb(void *io_device)
1430 {
1431 	spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
1432 
1433 	if (g_bdev_mgr.bdev_io_pool) {
1434 		if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != g_bdev_opts.bdev_io_pool_size) {
1435 			SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
1436 				    spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
1437 				    g_bdev_opts.bdev_io_pool_size);
1438 		}
1439 
1440 		spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
1441 	}
1442 
1443 	if (g_bdev_mgr.buf_small_pool) {
1444 		if (spdk_mempool_count(g_bdev_mgr.buf_small_pool) != g_bdev_opts.small_buf_pool_size) {
1445 			SPDK_ERRLOG("Small buffer pool count is %zu but should be %u\n",
1446 				    spdk_mempool_count(g_bdev_mgr.buf_small_pool),
1447 				    g_bdev_opts.small_buf_pool_size);
1448 			assert(false);
1449 		}
1450 
1451 		spdk_mempool_free(g_bdev_mgr.buf_small_pool);
1452 	}
1453 
1454 	if (g_bdev_mgr.buf_large_pool) {
1455 		if (spdk_mempool_count(g_bdev_mgr.buf_large_pool) != g_bdev_opts.large_buf_pool_size) {
1456 			SPDK_ERRLOG("Large buffer pool count is %zu but should be %u\n",
1457 				    spdk_mempool_count(g_bdev_mgr.buf_large_pool),
1458 				    g_bdev_opts.large_buf_pool_size);
1459 			assert(false);
1460 		}
1461 
1462 		spdk_mempool_free(g_bdev_mgr.buf_large_pool);
1463 	}
1464 
1465 	spdk_free(g_bdev_mgr.zero_buffer);
1466 
1467 	bdev_examine_allowlist_free();
1468 
1469 	cb_fn(g_fini_cb_arg);
1470 	g_fini_cb_fn = NULL;
1471 	g_fini_cb_arg = NULL;
1472 	g_bdev_mgr.init_complete = false;
1473 	g_bdev_mgr.module_init_complete = false;
1474 }
1475 
1476 static void
1477 bdev_module_finish_iter(void *arg)
1478 {
1479 	struct spdk_bdev_module *bdev_module;
1480 
1481 	/* FIXME: Handling initialization failures is broken now,
1482 	 * so we won't even try cleaning up after successfully
1483 	 * initialized modules. if module_init_complete is false,
1484 	 * just call spdk_bdev_mgr_unregister_cb
1485 	 */
1486 	if (!g_bdev_mgr.module_init_complete) {
1487 		bdev_mgr_unregister_cb(NULL);
1488 		return;
1489 	}
1490 
1491 	/* Start iterating from the last touched module */
1492 	if (!g_resume_bdev_module) {
1493 		bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
1494 	} else {
1495 		bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list,
1496 					 internal.tailq);
1497 	}
1498 
1499 	while (bdev_module) {
1500 		if (bdev_module->async_fini) {
1501 			/* Save our place so we can resume later. We must
1502 			 * save the variable here, before calling module_fini()
1503 			 * below, because in some cases the module may immediately
1504 			 * call spdk_bdev_module_finish_done() and re-enter
1505 			 * this function to continue iterating. */
1506 			g_resume_bdev_module = bdev_module;
1507 		}
1508 
1509 		if (bdev_module->module_fini) {
1510 			bdev_module->module_fini();
1511 		}
1512 
1513 		if (bdev_module->async_fini) {
1514 			return;
1515 		}
1516 
1517 		bdev_module = TAILQ_PREV(bdev_module, bdev_module_list,
1518 					 internal.tailq);
1519 	}
1520 
1521 	g_resume_bdev_module = NULL;
1522 	spdk_io_device_unregister(&g_bdev_mgr, bdev_mgr_unregister_cb);
1523 }
1524 
1525 void
1526 spdk_bdev_module_finish_done(void)
1527 {
1528 	if (spdk_get_thread() != g_fini_thread) {
1529 		spdk_thread_send_msg(g_fini_thread, bdev_module_finish_iter, NULL);
1530 	} else {
1531 		bdev_module_finish_iter(NULL);
1532 	}
1533 }
1534 
1535 static void
1536 bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
1537 {
1538 	struct spdk_bdev *bdev = cb_arg;
1539 
1540 	if (bdeverrno && bdev) {
1541 		SPDK_WARNLOG("Unable to unregister bdev '%s' during spdk_bdev_finish()\n",
1542 			     bdev->name);
1543 
1544 		/*
1545 		 * Since the call to spdk_bdev_unregister() failed, we have no way to free this
1546 		 *  bdev; try to continue by manually removing this bdev from the list and continue
1547 		 *  with the next bdev in the list.
1548 		 */
1549 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
1550 	}
1551 
1552 	if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
1553 		SPDK_DEBUGLOG(bdev, "Done unregistering bdevs\n");
1554 		/*
1555 		 * Bdev module finish need to be deferred as we might be in the middle of some context
1556 		 * (like bdev part free) that will use this bdev (or private bdev driver ctx data)
1557 		 * after returning.
1558 		 */
1559 		spdk_thread_send_msg(spdk_get_thread(), bdev_module_finish_iter, NULL);
1560 		return;
1561 	}
1562 
1563 	/*
1564 	 * Unregister last unclaimed bdev in the list, to ensure that bdev subsystem
1565 	 * shutdown proceeds top-down. The goal is to give virtual bdevs an opportunity
1566 	 * to detect clean shutdown as opposed to run-time hot removal of the underlying
1567 	 * base bdevs.
1568 	 *
1569 	 * Also, walk the list in the reverse order.
1570 	 */
1571 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
1572 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
1573 		if (bdev->internal.claim_module != NULL) {
1574 			SPDK_DEBUGLOG(bdev, "Skipping claimed bdev '%s'(<-'%s').\n",
1575 				      bdev->name, bdev->internal.claim_module->name);
1576 			continue;
1577 		}
1578 
1579 		SPDK_DEBUGLOG(bdev, "Unregistering bdev '%s'\n", bdev->name);
1580 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
1581 		return;
1582 	}
1583 
1584 	/*
1585 	 * If any bdev fails to unclaim underlying bdev properly, we may face the
1586 	 * case of bdev list consisting of claimed bdevs only (if claims are managed
1587 	 * correctly, this would mean there's a loop in the claims graph which is
1588 	 * clearly impossible). Warn and unregister last bdev on the list then.
1589 	 */
1590 	for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
1591 	     bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
1592 		SPDK_WARNLOG("Unregistering claimed bdev '%s'!\n", bdev->name);
1593 		spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
1594 		return;
1595 	}
1596 }
1597 
1598 void
1599 spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
1600 {
1601 	struct spdk_bdev_module *m;
1602 
1603 	assert(cb_fn != NULL);
1604 
1605 	g_fini_thread = spdk_get_thread();
1606 
1607 	g_fini_cb_fn = cb_fn;
1608 	g_fini_cb_arg = cb_arg;
1609 
1610 	TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
1611 		if (m->fini_start) {
1612 			m->fini_start();
1613 		}
1614 	}
1615 
1616 	bdev_finish_unregister_bdevs_iter(NULL, 0);
1617 }
1618 
1619 struct spdk_bdev_io *
1620 bdev_channel_get_io(struct spdk_bdev_channel *channel)
1621 {
1622 	struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
1623 	struct spdk_bdev_io *bdev_io;
1624 
1625 	if (ch->per_thread_cache_count > 0) {
1626 		bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
1627 		STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
1628 		ch->per_thread_cache_count--;
1629 	} else if (spdk_unlikely(!TAILQ_EMPTY(&ch->io_wait_queue))) {
1630 		/*
1631 		 * Don't try to look for bdev_ios in the global pool if there are
1632 		 * waiters on bdev_ios - we don't want this caller to jump the line.
1633 		 */
1634 		bdev_io = NULL;
1635 	} else {
1636 		bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
1637 	}
1638 
1639 	return bdev_io;
1640 }
1641 
1642 void
1643 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
1644 {
1645 	struct spdk_bdev_mgmt_channel *ch;
1646 
1647 	assert(bdev_io != NULL);
1648 	assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING);
1649 
1650 	ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1651 
1652 	if (bdev_io->internal.buf != NULL) {
1653 		bdev_io_put_buf(bdev_io);
1654 	}
1655 
1656 	if (ch->per_thread_cache_count < ch->bdev_io_cache_size) {
1657 		ch->per_thread_cache_count++;
1658 		STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
1659 		while (ch->per_thread_cache_count > 0 && !TAILQ_EMPTY(&ch->io_wait_queue)) {
1660 			struct spdk_bdev_io_wait_entry *entry;
1661 
1662 			entry = TAILQ_FIRST(&ch->io_wait_queue);
1663 			TAILQ_REMOVE(&ch->io_wait_queue, entry, link);
1664 			entry->cb_fn(entry->cb_arg);
1665 		}
1666 	} else {
1667 		/* We should never have a full cache with entries on the io wait queue. */
1668 		assert(TAILQ_EMPTY(&ch->io_wait_queue));
1669 		spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
1670 	}
1671 }
1672 
1673 static bool
1674 bdev_qos_is_iops_rate_limit(enum spdk_bdev_qos_rate_limit_type limit)
1675 {
1676 	assert(limit != SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
1677 
1678 	switch (limit) {
1679 	case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
1680 		return true;
1681 	case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
1682 	case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
1683 	case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
1684 		return false;
1685 	case SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES:
1686 	default:
1687 		return false;
1688 	}
1689 }
1690 
1691 static bool
1692 bdev_qos_io_to_limit(struct spdk_bdev_io *bdev_io)
1693 {
1694 	switch (bdev_io->type) {
1695 	case SPDK_BDEV_IO_TYPE_NVME_IO:
1696 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
1697 	case SPDK_BDEV_IO_TYPE_READ:
1698 	case SPDK_BDEV_IO_TYPE_WRITE:
1699 		return true;
1700 	case SPDK_BDEV_IO_TYPE_ZCOPY:
1701 		if (bdev_io->u.bdev.zcopy.start) {
1702 			return true;
1703 		} else {
1704 			return false;
1705 		}
1706 	default:
1707 		return false;
1708 	}
1709 }
1710 
1711 static bool
1712 bdev_is_read_io(struct spdk_bdev_io *bdev_io)
1713 {
1714 	switch (bdev_io->type) {
1715 	case SPDK_BDEV_IO_TYPE_NVME_IO:
1716 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
1717 		/* Bit 1 (0x2) set for read operation */
1718 		if (bdev_io->u.nvme_passthru.cmd.opc & SPDK_NVME_OPC_READ) {
1719 			return true;
1720 		} else {
1721 			return false;
1722 		}
1723 	case SPDK_BDEV_IO_TYPE_READ:
1724 		return true;
1725 	case SPDK_BDEV_IO_TYPE_ZCOPY:
1726 		/* Populate to read from disk */
1727 		if (bdev_io->u.bdev.zcopy.populate) {
1728 			return true;
1729 		} else {
1730 			return false;
1731 		}
1732 	default:
1733 		return false;
1734 	}
1735 }
1736 
1737 static uint64_t
1738 bdev_get_io_size_in_byte(struct spdk_bdev_io *bdev_io)
1739 {
1740 	struct spdk_bdev	*bdev = bdev_io->bdev;
1741 
1742 	switch (bdev_io->type) {
1743 	case SPDK_BDEV_IO_TYPE_NVME_IO:
1744 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
1745 		return bdev_io->u.nvme_passthru.nbytes;
1746 	case SPDK_BDEV_IO_TYPE_READ:
1747 	case SPDK_BDEV_IO_TYPE_WRITE:
1748 		return bdev_io->u.bdev.num_blocks * bdev->blocklen;
1749 	case SPDK_BDEV_IO_TYPE_ZCOPY:
1750 		/* Track the data in the start phase only */
1751 		if (bdev_io->u.bdev.zcopy.start) {
1752 			return bdev_io->u.bdev.num_blocks * bdev->blocklen;
1753 		} else {
1754 			return 0;
1755 		}
1756 	default:
1757 		return 0;
1758 	}
1759 }
1760 
1761 static bool
1762 bdev_qos_rw_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1763 {
1764 	if (limit->max_per_timeslice > 0 && limit->remaining_this_timeslice <= 0) {
1765 		return true;
1766 	} else {
1767 		return false;
1768 	}
1769 }
1770 
1771 static bool
1772 bdev_qos_r_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1773 {
1774 	if (bdev_is_read_io(io) == false) {
1775 		return false;
1776 	}
1777 
1778 	return bdev_qos_rw_queue_io(limit, io);
1779 }
1780 
1781 static bool
1782 bdev_qos_w_queue_io(const struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1783 {
1784 	if (bdev_is_read_io(io) == true) {
1785 		return false;
1786 	}
1787 
1788 	return bdev_qos_rw_queue_io(limit, io);
1789 }
1790 
1791 static void
1792 bdev_qos_rw_iops_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1793 {
1794 	limit->remaining_this_timeslice--;
1795 }
1796 
1797 static void
1798 bdev_qos_rw_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1799 {
1800 	limit->remaining_this_timeslice -= bdev_get_io_size_in_byte(io);
1801 }
1802 
1803 static void
1804 bdev_qos_r_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1805 {
1806 	if (bdev_is_read_io(io) == false) {
1807 		return;
1808 	}
1809 
1810 	return bdev_qos_rw_bps_update_quota(limit, io);
1811 }
1812 
1813 static void
1814 bdev_qos_w_bps_update_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
1815 {
1816 	if (bdev_is_read_io(io) == true) {
1817 		return;
1818 	}
1819 
1820 	return bdev_qos_rw_bps_update_quota(limit, io);
1821 }
1822 
1823 static void
1824 bdev_qos_set_ops(struct spdk_bdev_qos *qos)
1825 {
1826 	int i;
1827 
1828 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1829 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
1830 			qos->rate_limits[i].queue_io = NULL;
1831 			qos->rate_limits[i].update_quota = NULL;
1832 			continue;
1833 		}
1834 
1835 		switch (i) {
1836 		case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
1837 			qos->rate_limits[i].queue_io = bdev_qos_rw_queue_io;
1838 			qos->rate_limits[i].update_quota = bdev_qos_rw_iops_update_quota;
1839 			break;
1840 		case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
1841 			qos->rate_limits[i].queue_io = bdev_qos_rw_queue_io;
1842 			qos->rate_limits[i].update_quota = bdev_qos_rw_bps_update_quota;
1843 			break;
1844 		case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
1845 			qos->rate_limits[i].queue_io = bdev_qos_r_queue_io;
1846 			qos->rate_limits[i].update_quota = bdev_qos_r_bps_update_quota;
1847 			break;
1848 		case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
1849 			qos->rate_limits[i].queue_io = bdev_qos_w_queue_io;
1850 			qos->rate_limits[i].update_quota = bdev_qos_w_bps_update_quota;
1851 			break;
1852 		default:
1853 			break;
1854 		}
1855 	}
1856 }
1857 
1858 static void
1859 _bdev_io_complete_in_submit(struct spdk_bdev_channel *bdev_ch,
1860 			    struct spdk_bdev_io *bdev_io,
1861 			    enum spdk_bdev_io_status status)
1862 {
1863 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1864 
1865 	bdev_io->internal.in_submit_request = true;
1866 	bdev_ch->io_outstanding++;
1867 	shared_resource->io_outstanding++;
1868 	spdk_bdev_io_complete(bdev_io, status);
1869 	bdev_io->internal.in_submit_request = false;
1870 }
1871 
1872 static inline void
1873 bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_io)
1874 {
1875 	struct spdk_bdev *bdev = bdev_io->bdev;
1876 	struct spdk_io_channel *ch = bdev_ch->channel;
1877 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1878 
1879 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
1880 		struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
1881 		struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
1882 
1883 		if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
1884 		    bdev_abort_buf_io(&mgmt_channel->need_buf_small, bio_to_abort) ||
1885 		    bdev_abort_buf_io(&mgmt_channel->need_buf_large, bio_to_abort)) {
1886 			_bdev_io_complete_in_submit(bdev_ch, bdev_io,
1887 						    SPDK_BDEV_IO_STATUS_SUCCESS);
1888 			return;
1889 		}
1890 	}
1891 
1892 	if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
1893 		bdev_ch->io_outstanding++;
1894 		shared_resource->io_outstanding++;
1895 		bdev_io->internal.in_submit_request = true;
1896 		bdev->fn_table->submit_request(ch, bdev_io);
1897 		bdev_io->internal.in_submit_request = false;
1898 	} else {
1899 		TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, internal.link);
1900 	}
1901 }
1902 
1903 static int
1904 bdev_qos_io_submit(struct spdk_bdev_channel *ch, struct spdk_bdev_qos *qos)
1905 {
1906 	struct spdk_bdev_io		*bdev_io = NULL, *tmp = NULL;
1907 	int				i, submitted_ios = 0;
1908 
1909 	TAILQ_FOREACH_SAFE(bdev_io, &qos->queued, internal.link, tmp) {
1910 		if (bdev_qos_io_to_limit(bdev_io) == true) {
1911 			for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1912 				if (!qos->rate_limits[i].queue_io) {
1913 					continue;
1914 				}
1915 
1916 				if (qos->rate_limits[i].queue_io(&qos->rate_limits[i],
1917 								 bdev_io) == true) {
1918 					return submitted_ios;
1919 				}
1920 			}
1921 			for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1922 				if (!qos->rate_limits[i].update_quota) {
1923 					continue;
1924 				}
1925 
1926 				qos->rate_limits[i].update_quota(&qos->rate_limits[i], bdev_io);
1927 			}
1928 		}
1929 
1930 		TAILQ_REMOVE(&qos->queued, bdev_io, internal.link);
1931 		bdev_io_do_submit(ch, bdev_io);
1932 		submitted_ios++;
1933 	}
1934 
1935 	return submitted_ios;
1936 }
1937 
1938 static void
1939 bdev_queue_io_wait_with_cb(struct spdk_bdev_io *bdev_io, spdk_bdev_io_wait_cb cb_fn)
1940 {
1941 	int rc;
1942 
1943 	bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
1944 	bdev_io->internal.waitq_entry.cb_fn = cb_fn;
1945 	bdev_io->internal.waitq_entry.cb_arg = bdev_io;
1946 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
1947 				     &bdev_io->internal.waitq_entry);
1948 	if (rc != 0) {
1949 		SPDK_ERRLOG("Queue IO failed, rc=%d\n", rc);
1950 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1951 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
1952 	}
1953 }
1954 
1955 static bool
1956 bdev_io_type_can_split(uint8_t type)
1957 {
1958 	assert(type != SPDK_BDEV_IO_TYPE_INVALID);
1959 	assert(type < SPDK_BDEV_NUM_IO_TYPES);
1960 
1961 	/* Only split READ and WRITE I/O.  Theoretically other types of I/O like
1962 	 * UNMAP could be split, but these types of I/O are typically much larger
1963 	 * in size (sometimes the size of the entire block device), and the bdev
1964 	 * module can more efficiently split these types of I/O.  Plus those types
1965 	 * of I/O do not have a payload, which makes the splitting process simpler.
1966 	 */
1967 	if (type == SPDK_BDEV_IO_TYPE_READ || type == SPDK_BDEV_IO_TYPE_WRITE) {
1968 		return true;
1969 	} else {
1970 		return false;
1971 	}
1972 }
1973 
1974 static bool
1975 bdev_io_should_split(struct spdk_bdev_io *bdev_io)
1976 {
1977 	uint32_t io_boundary = bdev_io->bdev->optimal_io_boundary;
1978 	uint32_t max_size = bdev_io->bdev->max_segment_size;
1979 	int max_segs = bdev_io->bdev->max_num_segments;
1980 
1981 	io_boundary = bdev_io->bdev->split_on_optimal_io_boundary ? io_boundary : 0;
1982 
1983 	if (spdk_likely(!io_boundary && !max_segs && !max_size)) {
1984 		return false;
1985 	}
1986 
1987 	if (!bdev_io_type_can_split(bdev_io->type)) {
1988 		return false;
1989 	}
1990 
1991 	if (io_boundary) {
1992 		uint64_t start_stripe, end_stripe;
1993 
1994 		start_stripe = bdev_io->u.bdev.offset_blocks;
1995 		end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
1996 		/* Avoid expensive div operations if possible.  These spdk_u32 functions are very cheap. */
1997 		if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
1998 			start_stripe >>= spdk_u32log2(io_boundary);
1999 			end_stripe >>= spdk_u32log2(io_boundary);
2000 		} else {
2001 			start_stripe /= io_boundary;
2002 			end_stripe /= io_boundary;
2003 		}
2004 
2005 		if (start_stripe != end_stripe) {
2006 			return true;
2007 		}
2008 	}
2009 
2010 	if (max_segs) {
2011 		if (bdev_io->u.bdev.iovcnt > max_segs) {
2012 			return true;
2013 		}
2014 	}
2015 
2016 	if (max_size) {
2017 		for (int i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
2018 			if (bdev_io->u.bdev.iovs[i].iov_len > max_size) {
2019 				return true;
2020 			}
2021 		}
2022 	}
2023 
2024 	return false;
2025 }
2026 
2027 static uint32_t
2028 _to_next_boundary(uint64_t offset, uint32_t boundary)
2029 {
2030 	return (boundary - (offset % boundary));
2031 }
2032 
2033 static void
2034 bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
2035 
2036 static void
2037 _bdev_io_split(void *_bdev_io)
2038 {
2039 	struct iovec *parent_iov, *iov;
2040 	struct spdk_bdev_io *bdev_io = _bdev_io;
2041 	struct spdk_bdev *bdev = bdev_io->bdev;
2042 	uint64_t parent_offset, current_offset, remaining;
2043 	uint32_t parent_iov_offset, parent_iovcnt, parent_iovpos, child_iovcnt;
2044 	uint32_t to_next_boundary, to_next_boundary_bytes, to_last_block_bytes;
2045 	uint32_t iovcnt, iov_len, child_iovsize;
2046 	uint32_t blocklen = bdev->blocklen;
2047 	uint32_t io_boundary = bdev->optimal_io_boundary;
2048 	uint32_t max_segment_size = bdev->max_segment_size;
2049 	uint32_t max_child_iovcnt = bdev->max_num_segments;
2050 	void *md_buf = NULL;
2051 	int rc;
2052 
2053 	max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX;
2054 	max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, BDEV_IO_NUM_CHILD_IOV) :
2055 			   BDEV_IO_NUM_CHILD_IOV;
2056 	io_boundary = bdev->split_on_optimal_io_boundary ? io_boundary : UINT32_MAX;
2057 
2058 	remaining = bdev_io->u.bdev.split_remaining_num_blocks;
2059 	current_offset = bdev_io->u.bdev.split_current_offset_blocks;
2060 	parent_offset = bdev_io->u.bdev.offset_blocks;
2061 	parent_iov_offset = (current_offset - parent_offset) * blocklen;
2062 	parent_iovcnt = bdev_io->u.bdev.iovcnt;
2063 
2064 	for (parent_iovpos = 0; parent_iovpos < parent_iovcnt; parent_iovpos++) {
2065 		parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
2066 		if (parent_iov_offset < parent_iov->iov_len) {
2067 			break;
2068 		}
2069 		parent_iov_offset -= parent_iov->iov_len;
2070 	}
2071 
2072 	child_iovcnt = 0;
2073 	while (remaining > 0 && parent_iovpos < parent_iovcnt && child_iovcnt < BDEV_IO_NUM_CHILD_IOV) {
2074 		to_next_boundary = _to_next_boundary(current_offset, io_boundary);
2075 		to_next_boundary = spdk_min(remaining, to_next_boundary);
2076 		to_next_boundary_bytes = to_next_boundary * blocklen;
2077 
2078 		iov = &bdev_io->child_iov[child_iovcnt];
2079 		iovcnt = 0;
2080 
2081 		if (bdev_io->u.bdev.md_buf) {
2082 			md_buf = (char *)bdev_io->u.bdev.md_buf +
2083 				 (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev);
2084 		}
2085 
2086 		child_iovsize = spdk_min(BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt);
2087 		while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt &&
2088 		       iovcnt < child_iovsize) {
2089 			parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
2090 			iov_len = parent_iov->iov_len - parent_iov_offset;
2091 
2092 			iov_len = spdk_min(iov_len, max_segment_size);
2093 			iov_len = spdk_min(iov_len, to_next_boundary_bytes);
2094 			to_next_boundary_bytes -= iov_len;
2095 
2096 			bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
2097 			bdev_io->child_iov[child_iovcnt].iov_len = iov_len;
2098 
2099 			if (iov_len < parent_iov->iov_len - parent_iov_offset) {
2100 				parent_iov_offset += iov_len;
2101 			} else {
2102 				parent_iovpos++;
2103 				parent_iov_offset = 0;
2104 			}
2105 			child_iovcnt++;
2106 			iovcnt++;
2107 		}
2108 
2109 		if (to_next_boundary_bytes > 0) {
2110 			/* We had to stop this child I/O early because we ran out of
2111 			 * child_iov space or were limited by max_num_segments.
2112 			 * Ensure the iovs to be aligned with block size and
2113 			 * then adjust to_next_boundary before starting the
2114 			 * child I/O.
2115 			 */
2116 			assert(child_iovcnt == BDEV_IO_NUM_CHILD_IOV ||
2117 			       iovcnt == child_iovsize);
2118 			to_last_block_bytes = to_next_boundary_bytes % blocklen;
2119 			if (to_last_block_bytes != 0) {
2120 				uint32_t child_iovpos = child_iovcnt - 1;
2121 				/* don't decrease child_iovcnt when it equals to BDEV_IO_NUM_CHILD_IOV
2122 				 * so the loop will naturally end
2123 				 */
2124 
2125 				to_last_block_bytes = blocklen - to_last_block_bytes;
2126 				to_next_boundary_bytes += to_last_block_bytes;
2127 				while (to_last_block_bytes > 0 && iovcnt > 0) {
2128 					iov_len = spdk_min(to_last_block_bytes,
2129 							   bdev_io->child_iov[child_iovpos].iov_len);
2130 					bdev_io->child_iov[child_iovpos].iov_len -= iov_len;
2131 					if (bdev_io->child_iov[child_iovpos].iov_len == 0) {
2132 						child_iovpos--;
2133 						if (--iovcnt == 0) {
2134 							/* If the child IO is less than a block size just return.
2135 							 * If the first child IO of any split round is less than
2136 							 * a block size, an error exit.
2137 							 */
2138 							if (bdev_io->u.bdev.split_outstanding == 0) {
2139 								SPDK_ERRLOG("The first child io was less than a block size\n");
2140 								bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2141 								spdk_trace_record_tsc(spdk_get_ticks(), TRACE_BDEV_IO_DONE, 0, 0,
2142 										      (uintptr_t)bdev_io, 0);
2143 								TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
2144 								bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2145 							}
2146 
2147 							return;
2148 						}
2149 					}
2150 
2151 					to_last_block_bytes -= iov_len;
2152 
2153 					if (parent_iov_offset == 0) {
2154 						parent_iovpos--;
2155 						parent_iov_offset = bdev_io->u.bdev.iovs[parent_iovpos].iov_len;
2156 					}
2157 					parent_iov_offset -= iov_len;
2158 				}
2159 
2160 				assert(to_last_block_bytes == 0);
2161 			}
2162 			to_next_boundary -= to_next_boundary_bytes / blocklen;
2163 		}
2164 
2165 		bdev_io->u.bdev.split_outstanding++;
2166 
2167 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
2168 			rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
2169 						       spdk_io_channel_from_ctx(bdev_io->internal.ch),
2170 						       iov, iovcnt, md_buf, current_offset,
2171 						       to_next_boundary,
2172 						       bdev_io_split_done, bdev_io);
2173 		} else {
2174 			rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
2175 							spdk_io_channel_from_ctx(bdev_io->internal.ch),
2176 							iov, iovcnt, md_buf, current_offset,
2177 							to_next_boundary,
2178 							bdev_io_split_done, bdev_io);
2179 		}
2180 
2181 		if (rc == 0) {
2182 			current_offset += to_next_boundary;
2183 			remaining -= to_next_boundary;
2184 			bdev_io->u.bdev.split_current_offset_blocks = current_offset;
2185 			bdev_io->u.bdev.split_remaining_num_blocks = remaining;
2186 		} else {
2187 			bdev_io->u.bdev.split_outstanding--;
2188 			if (rc == -ENOMEM) {
2189 				if (bdev_io->u.bdev.split_outstanding == 0) {
2190 					/* No I/O is outstanding. Hence we should wait here. */
2191 					bdev_queue_io_wait_with_cb(bdev_io, _bdev_io_split);
2192 				}
2193 			} else {
2194 				bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2195 				if (bdev_io->u.bdev.split_outstanding == 0) {
2196 					spdk_trace_record_tsc(spdk_get_ticks(), TRACE_BDEV_IO_DONE, 0, 0,
2197 							      (uintptr_t)bdev_io, 0);
2198 					TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
2199 					bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2200 				}
2201 			}
2202 
2203 			return;
2204 		}
2205 	}
2206 }
2207 
2208 static void
2209 bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2210 {
2211 	struct spdk_bdev_io *parent_io = cb_arg;
2212 
2213 	spdk_bdev_free_io(bdev_io);
2214 
2215 	if (!success) {
2216 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2217 		/* If any child I/O failed, stop further splitting process. */
2218 		parent_io->u.bdev.split_current_offset_blocks += parent_io->u.bdev.split_remaining_num_blocks;
2219 		parent_io->u.bdev.split_remaining_num_blocks = 0;
2220 	}
2221 	parent_io->u.bdev.split_outstanding--;
2222 	if (parent_io->u.bdev.split_outstanding != 0) {
2223 		return;
2224 	}
2225 
2226 	/*
2227 	 * Parent I/O finishes when all blocks are consumed.
2228 	 */
2229 	if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
2230 		assert(parent_io->internal.cb != bdev_io_split_done);
2231 		spdk_trace_record_tsc(spdk_get_ticks(), TRACE_BDEV_IO_DONE, 0, 0,
2232 				      (uintptr_t)parent_io, 0);
2233 		TAILQ_REMOVE(&parent_io->internal.ch->io_submitted, parent_io, internal.ch_link);
2234 		parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
2235 				       parent_io->internal.caller_ctx);
2236 		return;
2237 	}
2238 
2239 	/*
2240 	 * Continue with the splitting process.  This function will complete the parent I/O if the
2241 	 * splitting is done.
2242 	 */
2243 	_bdev_io_split(parent_io);
2244 }
2245 
2246 static void
2247 bdev_io_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success);
2248 
2249 static void
2250 bdev_io_split(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2251 {
2252 	assert(bdev_io_type_can_split(bdev_io->type));
2253 
2254 	bdev_io->u.bdev.split_current_offset_blocks = bdev_io->u.bdev.offset_blocks;
2255 	bdev_io->u.bdev.split_remaining_num_blocks = bdev_io->u.bdev.num_blocks;
2256 	bdev_io->u.bdev.split_outstanding = 0;
2257 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
2258 
2259 	if (_is_buf_allocated(bdev_io->u.bdev.iovs)) {
2260 		_bdev_io_split(bdev_io);
2261 	} else {
2262 		assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
2263 		spdk_bdev_io_get_buf(bdev_io, bdev_io_split_get_buf_cb,
2264 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
2265 	}
2266 }
2267 
2268 static void
2269 bdev_io_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
2270 {
2271 	if (!success) {
2272 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2273 		return;
2274 	}
2275 
2276 	_bdev_io_split(bdev_io);
2277 }
2278 
2279 /* Explicitly mark this inline, since it's used as a function pointer and otherwise won't
2280  *  be inlined, at least on some compilers.
2281  */
2282 static inline void
2283 _bdev_io_submit(void *ctx)
2284 {
2285 	struct spdk_bdev_io *bdev_io = ctx;
2286 	struct spdk_bdev *bdev = bdev_io->bdev;
2287 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
2288 	uint64_t tsc;
2289 
2290 	tsc = spdk_get_ticks();
2291 	bdev_io->internal.submit_tsc = tsc;
2292 	spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_START, 0, 0, (uintptr_t)bdev_io, bdev_io->type);
2293 
2294 	if (spdk_likely(bdev_ch->flags == 0)) {
2295 		bdev_io_do_submit(bdev_ch, bdev_io);
2296 		return;
2297 	}
2298 
2299 	if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
2300 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
2301 	} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
2302 		if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
2303 		    bdev_abort_queued_io(&bdev->internal.qos->queued, bdev_io->u.abort.bio_to_abort)) {
2304 			_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
2305 		} else {
2306 			TAILQ_INSERT_TAIL(&bdev->internal.qos->queued, bdev_io, internal.link);
2307 			bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
2308 		}
2309 	} else {
2310 		SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
2311 		_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2312 	}
2313 }
2314 
2315 bool
2316 bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2);
2317 
2318 bool
2319 bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2)
2320 {
2321 	if (range1->length == 0 || range2->length == 0) {
2322 		return false;
2323 	}
2324 
2325 	if (range1->offset + range1->length <= range2->offset) {
2326 		return false;
2327 	}
2328 
2329 	if (range2->offset + range2->length <= range1->offset) {
2330 		return false;
2331 	}
2332 
2333 	return true;
2334 }
2335 
2336 static bool
2337 bdev_io_range_is_locked(struct spdk_bdev_io *bdev_io, struct lba_range *range)
2338 {
2339 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
2340 	struct lba_range r;
2341 
2342 	switch (bdev_io->type) {
2343 	case SPDK_BDEV_IO_TYPE_NVME_IO:
2344 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2345 		/* Don't try to decode the NVMe command - just assume worst-case and that
2346 		 * it overlaps a locked range.
2347 		 */
2348 		return true;
2349 	case SPDK_BDEV_IO_TYPE_WRITE:
2350 	case SPDK_BDEV_IO_TYPE_UNMAP:
2351 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2352 	case SPDK_BDEV_IO_TYPE_ZCOPY:
2353 		r.offset = bdev_io->u.bdev.offset_blocks;
2354 		r.length = bdev_io->u.bdev.num_blocks;
2355 		if (!bdev_lba_range_overlapped(range, &r)) {
2356 			/* This I/O doesn't overlap the specified LBA range. */
2357 			return false;
2358 		} else if (range->owner_ch == ch && range->locked_ctx == bdev_io->internal.caller_ctx) {
2359 			/* This I/O overlaps, but the I/O is on the same channel that locked this
2360 			 * range, and the caller_ctx is the same as the locked_ctx.  This means
2361 			 * that this I/O is associated with the lock, and is allowed to execute.
2362 			 */
2363 			return false;
2364 		} else {
2365 			return true;
2366 		}
2367 	default:
2368 		return false;
2369 	}
2370 }
2371 
2372 void
2373 bdev_io_submit(struct spdk_bdev_io *bdev_io)
2374 {
2375 	struct spdk_bdev *bdev = bdev_io->bdev;
2376 	struct spdk_thread *thread = spdk_bdev_io_get_thread(bdev_io);
2377 	struct spdk_bdev_channel *ch = bdev_io->internal.ch;
2378 
2379 	assert(thread != NULL);
2380 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
2381 
2382 	if (!TAILQ_EMPTY(&ch->locked_ranges)) {
2383 		struct lba_range *range;
2384 
2385 		TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
2386 			if (bdev_io_range_is_locked(bdev_io, range)) {
2387 				TAILQ_INSERT_TAIL(&ch->io_locked, bdev_io, internal.ch_link);
2388 				return;
2389 			}
2390 		}
2391 	}
2392 
2393 	TAILQ_INSERT_TAIL(&ch->io_submitted, bdev_io, internal.ch_link);
2394 
2395 	if (bdev_io_should_split(bdev_io)) {
2396 		bdev_io->internal.submit_tsc = spdk_get_ticks();
2397 		spdk_trace_record_tsc(bdev_io->internal.submit_tsc, TRACE_BDEV_IO_START, 0, 0,
2398 				      (uintptr_t)bdev_io, bdev_io->type);
2399 		bdev_io_split(NULL, bdev_io);
2400 		return;
2401 	}
2402 
2403 	if (ch->flags & BDEV_CH_QOS_ENABLED) {
2404 		if ((thread == bdev->internal.qos->thread) || !bdev->internal.qos->thread) {
2405 			_bdev_io_submit(bdev_io);
2406 		} else {
2407 			bdev_io->internal.io_submit_ch = ch;
2408 			bdev_io->internal.ch = bdev->internal.qos->ch;
2409 			spdk_thread_send_msg(bdev->internal.qos->thread, _bdev_io_submit, bdev_io);
2410 		}
2411 	} else {
2412 		_bdev_io_submit(bdev_io);
2413 	}
2414 }
2415 
2416 static void
2417 bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
2418 {
2419 	struct spdk_bdev *bdev = bdev_io->bdev;
2420 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
2421 	struct spdk_io_channel *ch = bdev_ch->channel;
2422 
2423 	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
2424 
2425 	bdev_io->internal.in_submit_request = true;
2426 	bdev->fn_table->submit_request(ch, bdev_io);
2427 	bdev_io->internal.in_submit_request = false;
2428 }
2429 
2430 void
2431 bdev_io_init(struct spdk_bdev_io *bdev_io,
2432 	     struct spdk_bdev *bdev, void *cb_arg,
2433 	     spdk_bdev_io_completion_cb cb)
2434 {
2435 	bdev_io->bdev = bdev;
2436 	bdev_io->internal.caller_ctx = cb_arg;
2437 	bdev_io->internal.cb = cb;
2438 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
2439 	bdev_io->internal.in_submit_request = false;
2440 	bdev_io->internal.buf = NULL;
2441 	bdev_io->internal.io_submit_ch = NULL;
2442 	bdev_io->internal.orig_iovs = NULL;
2443 	bdev_io->internal.orig_iovcnt = 0;
2444 	bdev_io->internal.orig_md_buf = NULL;
2445 	bdev_io->internal.error.nvme.cdw0 = 0;
2446 	bdev_io->num_retries = 0;
2447 	bdev_io->internal.get_buf_cb = NULL;
2448 	bdev_io->internal.get_aux_buf_cb = NULL;
2449 }
2450 
2451 static bool
2452 bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
2453 {
2454 	return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
2455 }
2456 
2457 bool
2458 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
2459 {
2460 	bool supported;
2461 
2462 	supported = bdev_io_type_supported(bdev, io_type);
2463 
2464 	if (!supported) {
2465 		switch (io_type) {
2466 		case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
2467 			/* The bdev layer will emulate write zeroes as long as write is supported. */
2468 			supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
2469 			break;
2470 		case SPDK_BDEV_IO_TYPE_ZCOPY:
2471 			/* Zero copy can be emulated with regular read and write */
2472 			supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ) &&
2473 				    bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
2474 			break;
2475 		default:
2476 			break;
2477 		}
2478 	}
2479 
2480 	return supported;
2481 }
2482 
2483 int
2484 spdk_bdev_dump_info_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
2485 {
2486 	if (bdev->fn_table->dump_info_json) {
2487 		return bdev->fn_table->dump_info_json(bdev->ctxt, w);
2488 	}
2489 
2490 	return 0;
2491 }
2492 
2493 static void
2494 bdev_qos_update_max_quota_per_timeslice(struct spdk_bdev_qos *qos)
2495 {
2496 	uint32_t max_per_timeslice = 0;
2497 	int i;
2498 
2499 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2500 		if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
2501 			qos->rate_limits[i].max_per_timeslice = 0;
2502 			continue;
2503 		}
2504 
2505 		max_per_timeslice = qos->rate_limits[i].limit *
2506 				    SPDK_BDEV_QOS_TIMESLICE_IN_USEC / SPDK_SEC_TO_USEC;
2507 
2508 		qos->rate_limits[i].max_per_timeslice = spdk_max(max_per_timeslice,
2509 							qos->rate_limits[i].min_per_timeslice);
2510 
2511 		qos->rate_limits[i].remaining_this_timeslice = qos->rate_limits[i].max_per_timeslice;
2512 	}
2513 
2514 	bdev_qos_set_ops(qos);
2515 }
2516 
2517 static int
2518 bdev_channel_poll_qos(void *arg)
2519 {
2520 	struct spdk_bdev_qos *qos = arg;
2521 	uint64_t now = spdk_get_ticks();
2522 	int i;
2523 
2524 	if (now < (qos->last_timeslice + qos->timeslice_size)) {
2525 		/* We received our callback earlier than expected - return
2526 		 *  immediately and wait to do accounting until at least one
2527 		 *  timeslice has actually expired.  This should never happen
2528 		 *  with a well-behaved timer implementation.
2529 		 */
2530 		return SPDK_POLLER_IDLE;
2531 	}
2532 
2533 	/* Reset for next round of rate limiting */
2534 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2535 		/* We may have allowed the IOs or bytes to slightly overrun in the last
2536 		 * timeslice. remaining_this_timeslice is signed, so if it's negative
2537 		 * here, we'll account for the overrun so that the next timeslice will
2538 		 * be appropriately reduced.
2539 		 */
2540 		if (qos->rate_limits[i].remaining_this_timeslice > 0) {
2541 			qos->rate_limits[i].remaining_this_timeslice = 0;
2542 		}
2543 	}
2544 
2545 	while (now >= (qos->last_timeslice + qos->timeslice_size)) {
2546 		qos->last_timeslice += qos->timeslice_size;
2547 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2548 			qos->rate_limits[i].remaining_this_timeslice +=
2549 				qos->rate_limits[i].max_per_timeslice;
2550 		}
2551 	}
2552 
2553 	return bdev_qos_io_submit(qos->ch, qos);
2554 }
2555 
2556 static void
2557 bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
2558 {
2559 	struct spdk_bdev_shared_resource *shared_resource;
2560 	struct lba_range *range;
2561 
2562 	while (!TAILQ_EMPTY(&ch->locked_ranges)) {
2563 		range = TAILQ_FIRST(&ch->locked_ranges);
2564 		TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
2565 		free(range);
2566 	}
2567 
2568 	spdk_put_io_channel(ch->channel);
2569 
2570 	shared_resource = ch->shared_resource;
2571 
2572 	assert(TAILQ_EMPTY(&ch->io_locked));
2573 	assert(TAILQ_EMPTY(&ch->io_submitted));
2574 	assert(ch->io_outstanding == 0);
2575 	assert(shared_resource->ref > 0);
2576 	shared_resource->ref--;
2577 	if (shared_resource->ref == 0) {
2578 		assert(shared_resource->io_outstanding == 0);
2579 		TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
2580 		spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
2581 		free(shared_resource);
2582 	}
2583 }
2584 
2585 /* Caller must hold bdev->internal.mutex. */
2586 static void
2587 bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
2588 {
2589 	struct spdk_bdev_qos	*qos = bdev->internal.qos;
2590 	int			i;
2591 
2592 	/* Rate limiting on this bdev enabled */
2593 	if (qos) {
2594 		if (qos->ch == NULL) {
2595 			struct spdk_io_channel *io_ch;
2596 
2597 			SPDK_DEBUGLOG(bdev, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
2598 				      bdev->name, spdk_get_thread());
2599 
2600 			/* No qos channel has been selected, so set one up */
2601 
2602 			/* Take another reference to ch */
2603 			io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
2604 			assert(io_ch != NULL);
2605 			qos->ch = ch;
2606 
2607 			qos->thread = spdk_io_channel_get_thread(io_ch);
2608 
2609 			TAILQ_INIT(&qos->queued);
2610 
2611 			for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2612 				if (bdev_qos_is_iops_rate_limit(i) == true) {
2613 					qos->rate_limits[i].min_per_timeslice =
2614 						SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE;
2615 				} else {
2616 					qos->rate_limits[i].min_per_timeslice =
2617 						SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE;
2618 				}
2619 
2620 				if (qos->rate_limits[i].limit == 0) {
2621 					qos->rate_limits[i].limit = SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
2622 				}
2623 			}
2624 			bdev_qos_update_max_quota_per_timeslice(qos);
2625 			qos->timeslice_size =
2626 				SPDK_BDEV_QOS_TIMESLICE_IN_USEC * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
2627 			qos->last_timeslice = spdk_get_ticks();
2628 			qos->poller = SPDK_POLLER_REGISTER(bdev_channel_poll_qos,
2629 							   qos,
2630 							   SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
2631 		}
2632 
2633 		ch->flags |= BDEV_CH_QOS_ENABLED;
2634 	}
2635 }
2636 
2637 struct poll_timeout_ctx {
2638 	struct spdk_bdev_desc	*desc;
2639 	uint64_t		timeout_in_sec;
2640 	spdk_bdev_io_timeout_cb	cb_fn;
2641 	void			*cb_arg;
2642 };
2643 
2644 static void
2645 bdev_desc_free(struct spdk_bdev_desc *desc)
2646 {
2647 	pthread_mutex_destroy(&desc->mutex);
2648 	free(desc->media_events_buffer);
2649 	free(desc);
2650 }
2651 
2652 static void
2653 bdev_channel_poll_timeout_io_done(struct spdk_io_channel_iter *i, int status)
2654 {
2655 	struct poll_timeout_ctx *ctx  = spdk_io_channel_iter_get_ctx(i);
2656 	struct spdk_bdev_desc *desc = ctx->desc;
2657 
2658 	free(ctx);
2659 
2660 	pthread_mutex_lock(&desc->mutex);
2661 	desc->refs--;
2662 	if (desc->closed == true && desc->refs == 0) {
2663 		pthread_mutex_unlock(&desc->mutex);
2664 		bdev_desc_free(desc);
2665 		return;
2666 	}
2667 	pthread_mutex_unlock(&desc->mutex);
2668 }
2669 
2670 static void
2671 bdev_channel_poll_timeout_io(struct spdk_io_channel_iter *i)
2672 {
2673 	struct poll_timeout_ctx *ctx  = spdk_io_channel_iter_get_ctx(i);
2674 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
2675 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(io_ch);
2676 	struct spdk_bdev_desc *desc = ctx->desc;
2677 	struct spdk_bdev_io *bdev_io;
2678 	uint64_t now;
2679 
2680 	pthread_mutex_lock(&desc->mutex);
2681 	if (desc->closed == true) {
2682 		pthread_mutex_unlock(&desc->mutex);
2683 		spdk_for_each_channel_continue(i, -1);
2684 		return;
2685 	}
2686 	pthread_mutex_unlock(&desc->mutex);
2687 
2688 	now = spdk_get_ticks();
2689 	TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
2690 		/* Exclude any I/O that are generated via splitting. */
2691 		if (bdev_io->internal.cb == bdev_io_split_done) {
2692 			continue;
2693 		}
2694 
2695 		/* Once we find an I/O that has not timed out, we can immediately
2696 		 * exit the loop.
2697 		 */
2698 		if (now < (bdev_io->internal.submit_tsc +
2699 			   ctx->timeout_in_sec * spdk_get_ticks_hz())) {
2700 			goto end;
2701 		}
2702 
2703 		if (bdev_io->internal.desc == desc) {
2704 			ctx->cb_fn(ctx->cb_arg, bdev_io);
2705 		}
2706 	}
2707 
2708 end:
2709 	spdk_for_each_channel_continue(i, 0);
2710 }
2711 
2712 static int
2713 bdev_poll_timeout_io(void *arg)
2714 {
2715 	struct spdk_bdev_desc *desc = arg;
2716 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
2717 	struct poll_timeout_ctx *ctx;
2718 
2719 	ctx = calloc(1, sizeof(struct poll_timeout_ctx));
2720 	if (!ctx) {
2721 		SPDK_ERRLOG("failed to allocate memory\n");
2722 		return SPDK_POLLER_BUSY;
2723 	}
2724 	ctx->desc = desc;
2725 	ctx->cb_arg = desc->cb_arg;
2726 	ctx->cb_fn = desc->cb_fn;
2727 	ctx->timeout_in_sec = desc->timeout_in_sec;
2728 
2729 	/* Take a ref on the descriptor in case it gets closed while we are checking
2730 	 * all of the channels.
2731 	 */
2732 	pthread_mutex_lock(&desc->mutex);
2733 	desc->refs++;
2734 	pthread_mutex_unlock(&desc->mutex);
2735 
2736 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
2737 			      bdev_channel_poll_timeout_io,
2738 			      ctx,
2739 			      bdev_channel_poll_timeout_io_done);
2740 
2741 	return SPDK_POLLER_BUSY;
2742 }
2743 
2744 int
2745 spdk_bdev_set_timeout(struct spdk_bdev_desc *desc, uint64_t timeout_in_sec,
2746 		      spdk_bdev_io_timeout_cb cb_fn, void *cb_arg)
2747 {
2748 	assert(desc->thread == spdk_get_thread());
2749 
2750 	spdk_poller_unregister(&desc->io_timeout_poller);
2751 
2752 	if (timeout_in_sec) {
2753 		assert(cb_fn != NULL);
2754 		desc->io_timeout_poller = SPDK_POLLER_REGISTER(bdev_poll_timeout_io,
2755 					  desc,
2756 					  SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * SPDK_SEC_TO_USEC /
2757 					  1000);
2758 		if (desc->io_timeout_poller == NULL) {
2759 			SPDK_ERRLOG("can not register the desc timeout IO poller\n");
2760 			return -1;
2761 		}
2762 	}
2763 
2764 	desc->cb_fn = cb_fn;
2765 	desc->cb_arg = cb_arg;
2766 	desc->timeout_in_sec = timeout_in_sec;
2767 
2768 	return 0;
2769 }
2770 
2771 static int
2772 bdev_channel_create(void *io_device, void *ctx_buf)
2773 {
2774 	struct spdk_bdev		*bdev = __bdev_from_io_dev(io_device);
2775 	struct spdk_bdev_channel	*ch = ctx_buf;
2776 	struct spdk_io_channel		*mgmt_io_ch;
2777 	struct spdk_bdev_mgmt_channel	*mgmt_ch;
2778 	struct spdk_bdev_shared_resource *shared_resource;
2779 	struct lba_range		*range;
2780 
2781 	ch->bdev = bdev;
2782 	ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
2783 	if (!ch->channel) {
2784 		return -1;
2785 	}
2786 
2787 	assert(ch->histogram == NULL);
2788 	if (bdev->internal.histogram_enabled) {
2789 		ch->histogram = spdk_histogram_data_alloc();
2790 		if (ch->histogram == NULL) {
2791 			SPDK_ERRLOG("Could not allocate histogram\n");
2792 		}
2793 	}
2794 
2795 	mgmt_io_ch = spdk_get_io_channel(&g_bdev_mgr);
2796 	if (!mgmt_io_ch) {
2797 		spdk_put_io_channel(ch->channel);
2798 		return -1;
2799 	}
2800 
2801 	mgmt_ch = spdk_io_channel_get_ctx(mgmt_io_ch);
2802 	TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
2803 		if (shared_resource->shared_ch == ch->channel) {
2804 			spdk_put_io_channel(mgmt_io_ch);
2805 			shared_resource->ref++;
2806 			break;
2807 		}
2808 	}
2809 
2810 	if (shared_resource == NULL) {
2811 		shared_resource = calloc(1, sizeof(*shared_resource));
2812 		if (shared_resource == NULL) {
2813 			spdk_put_io_channel(ch->channel);
2814 			spdk_put_io_channel(mgmt_io_ch);
2815 			return -1;
2816 		}
2817 
2818 		shared_resource->mgmt_ch = mgmt_ch;
2819 		shared_resource->io_outstanding = 0;
2820 		TAILQ_INIT(&shared_resource->nomem_io);
2821 		shared_resource->nomem_threshold = 0;
2822 		shared_resource->shared_ch = ch->channel;
2823 		shared_resource->ref = 1;
2824 		TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
2825 	}
2826 
2827 	memset(&ch->stat, 0, sizeof(ch->stat));
2828 	ch->stat.ticks_rate = spdk_get_ticks_hz();
2829 	ch->io_outstanding = 0;
2830 	TAILQ_INIT(&ch->queued_resets);
2831 	TAILQ_INIT(&ch->locked_ranges);
2832 	ch->flags = 0;
2833 	ch->shared_resource = shared_resource;
2834 
2835 	TAILQ_INIT(&ch->io_submitted);
2836 	TAILQ_INIT(&ch->io_locked);
2837 
2838 #ifdef SPDK_CONFIG_VTUNE
2839 	{
2840 		char *name;
2841 		__itt_init_ittlib(NULL, 0);
2842 		name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
2843 		if (!name) {
2844 			bdev_channel_destroy_resource(ch);
2845 			return -1;
2846 		}
2847 		ch->handle = __itt_string_handle_create(name);
2848 		free(name);
2849 		ch->start_tsc = spdk_get_ticks();
2850 		ch->interval_tsc = spdk_get_ticks_hz() / 100;
2851 		memset(&ch->prev_stat, 0, sizeof(ch->prev_stat));
2852 	}
2853 #endif
2854 
2855 	pthread_mutex_lock(&bdev->internal.mutex);
2856 	bdev_enable_qos(bdev, ch);
2857 
2858 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
2859 		struct lba_range *new_range;
2860 
2861 		new_range = calloc(1, sizeof(*new_range));
2862 		if (new_range == NULL) {
2863 			pthread_mutex_unlock(&bdev->internal.mutex);
2864 			bdev_channel_destroy_resource(ch);
2865 			return -1;
2866 		}
2867 		new_range->length = range->length;
2868 		new_range->offset = range->offset;
2869 		new_range->locked_ctx = range->locked_ctx;
2870 		TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
2871 	}
2872 
2873 	pthread_mutex_unlock(&bdev->internal.mutex);
2874 
2875 	return 0;
2876 }
2877 
2878 /*
2879  * Abort I/O that are waiting on a data buffer.  These types of I/O are
2880  *  linked using the spdk_bdev_io internal.buf_link TAILQ_ENTRY.
2881  */
2882 static void
2883 bdev_abort_all_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_channel *ch)
2884 {
2885 	bdev_io_stailq_t tmp;
2886 	struct spdk_bdev_io *bdev_io;
2887 
2888 	STAILQ_INIT(&tmp);
2889 
2890 	while (!STAILQ_EMPTY(queue)) {
2891 		bdev_io = STAILQ_FIRST(queue);
2892 		STAILQ_REMOVE_HEAD(queue, internal.buf_link);
2893 		if (bdev_io->internal.ch == ch) {
2894 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
2895 		} else {
2896 			STAILQ_INSERT_TAIL(&tmp, bdev_io, internal.buf_link);
2897 		}
2898 	}
2899 
2900 	STAILQ_SWAP(&tmp, queue, spdk_bdev_io);
2901 }
2902 
2903 /*
2904  * Abort I/O that are queued waiting for submission.  These types of I/O are
2905  *  linked using the spdk_bdev_io link TAILQ_ENTRY.
2906  */
2907 static void
2908 bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
2909 {
2910 	struct spdk_bdev_io *bdev_io, *tmp;
2911 
2912 	TAILQ_FOREACH_SAFE(bdev_io, queue, internal.link, tmp) {
2913 		if (bdev_io->internal.ch == ch) {
2914 			TAILQ_REMOVE(queue, bdev_io, internal.link);
2915 			/*
2916 			 * spdk_bdev_io_complete() assumes that the completed I/O had
2917 			 *  been submitted to the bdev module.  Since in this case it
2918 			 *  hadn't, bump io_outstanding to account for the decrement
2919 			 *  that spdk_bdev_io_complete() will do.
2920 			 */
2921 			if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
2922 				ch->io_outstanding++;
2923 				ch->shared_resource->io_outstanding++;
2924 			}
2925 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
2926 		}
2927 	}
2928 }
2929 
2930 static bool
2931 bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
2932 {
2933 	struct spdk_bdev_io *bdev_io;
2934 
2935 	TAILQ_FOREACH(bdev_io, queue, internal.link) {
2936 		if (bdev_io == bio_to_abort) {
2937 			TAILQ_REMOVE(queue, bio_to_abort, internal.link);
2938 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
2939 			return true;
2940 		}
2941 	}
2942 
2943 	return false;
2944 }
2945 
2946 static bool
2947 bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort)
2948 {
2949 	struct spdk_bdev_io *bdev_io;
2950 
2951 	STAILQ_FOREACH(bdev_io, queue, internal.buf_link) {
2952 		if (bdev_io == bio_to_abort) {
2953 			STAILQ_REMOVE(queue, bio_to_abort, spdk_bdev_io, internal.buf_link);
2954 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
2955 			return true;
2956 		}
2957 	}
2958 
2959 	return false;
2960 }
2961 
2962 static void
2963 bdev_qos_channel_destroy(void *cb_arg)
2964 {
2965 	struct spdk_bdev_qos *qos = cb_arg;
2966 
2967 	spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
2968 	spdk_poller_unregister(&qos->poller);
2969 
2970 	SPDK_DEBUGLOG(bdev, "Free QoS %p.\n", qos);
2971 
2972 	free(qos);
2973 }
2974 
2975 static int
2976 bdev_qos_destroy(struct spdk_bdev *bdev)
2977 {
2978 	int i;
2979 
2980 	/*
2981 	 * Cleanly shutting down the QoS poller is tricky, because
2982 	 * during the asynchronous operation the user could open
2983 	 * a new descriptor and create a new channel, spawning
2984 	 * a new QoS poller.
2985 	 *
2986 	 * The strategy is to create a new QoS structure here and swap it
2987 	 * in. The shutdown path then continues to refer to the old one
2988 	 * until it completes and then releases it.
2989 	 */
2990 	struct spdk_bdev_qos *new_qos, *old_qos;
2991 
2992 	old_qos = bdev->internal.qos;
2993 
2994 	new_qos = calloc(1, sizeof(*new_qos));
2995 	if (!new_qos) {
2996 		SPDK_ERRLOG("Unable to allocate memory to shut down QoS.\n");
2997 		return -ENOMEM;
2998 	}
2999 
3000 	/* Copy the old QoS data into the newly allocated structure */
3001 	memcpy(new_qos, old_qos, sizeof(*new_qos));
3002 
3003 	/* Zero out the key parts of the QoS structure */
3004 	new_qos->ch = NULL;
3005 	new_qos->thread = NULL;
3006 	new_qos->poller = NULL;
3007 	TAILQ_INIT(&new_qos->queued);
3008 	/*
3009 	 * The limit member of spdk_bdev_qos_limit structure is not zeroed.
3010 	 * It will be used later for the new QoS structure.
3011 	 */
3012 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3013 		new_qos->rate_limits[i].remaining_this_timeslice = 0;
3014 		new_qos->rate_limits[i].min_per_timeslice = 0;
3015 		new_qos->rate_limits[i].max_per_timeslice = 0;
3016 	}
3017 
3018 	bdev->internal.qos = new_qos;
3019 
3020 	if (old_qos->thread == NULL) {
3021 		free(old_qos);
3022 	} else {
3023 		spdk_thread_send_msg(old_qos->thread, bdev_qos_channel_destroy, old_qos);
3024 	}
3025 
3026 	/* It is safe to continue with destroying the bdev even though the QoS channel hasn't
3027 	 * been destroyed yet. The destruction path will end up waiting for the final
3028 	 * channel to be put before it releases resources. */
3029 
3030 	return 0;
3031 }
3032 
3033 static void
3034 bdev_io_stat_add(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat *add)
3035 {
3036 	total->bytes_read += add->bytes_read;
3037 	total->num_read_ops += add->num_read_ops;
3038 	total->bytes_written += add->bytes_written;
3039 	total->num_write_ops += add->num_write_ops;
3040 	total->bytes_unmapped += add->bytes_unmapped;
3041 	total->num_unmap_ops += add->num_unmap_ops;
3042 	total->read_latency_ticks += add->read_latency_ticks;
3043 	total->write_latency_ticks += add->write_latency_ticks;
3044 	total->unmap_latency_ticks += add->unmap_latency_ticks;
3045 }
3046 
3047 static void
3048 bdev_channel_destroy(void *io_device, void *ctx_buf)
3049 {
3050 	struct spdk_bdev_channel	*ch = ctx_buf;
3051 	struct spdk_bdev_mgmt_channel	*mgmt_ch;
3052 	struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
3053 
3054 	SPDK_DEBUGLOG(bdev, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
3055 		      spdk_get_thread());
3056 
3057 	/* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
3058 	pthread_mutex_lock(&ch->bdev->internal.mutex);
3059 	bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat);
3060 	pthread_mutex_unlock(&ch->bdev->internal.mutex);
3061 
3062 	mgmt_ch = shared_resource->mgmt_ch;
3063 
3064 	bdev_abort_all_queued_io(&ch->queued_resets, ch);
3065 	bdev_abort_all_queued_io(&shared_resource->nomem_io, ch);
3066 	bdev_abort_all_buf_io(&mgmt_ch->need_buf_small, ch);
3067 	bdev_abort_all_buf_io(&mgmt_ch->need_buf_large, ch);
3068 
3069 	if (ch->histogram) {
3070 		spdk_histogram_data_free(ch->histogram);
3071 	}
3072 
3073 	bdev_channel_destroy_resource(ch);
3074 }
3075 
3076 int
3077 spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
3078 {
3079 	struct spdk_bdev_alias *tmp;
3080 
3081 	if (alias == NULL) {
3082 		SPDK_ERRLOG("Empty alias passed\n");
3083 		return -EINVAL;
3084 	}
3085 
3086 	if (spdk_bdev_get_by_name(alias)) {
3087 		SPDK_ERRLOG("Bdev name/alias: %s already exists\n", alias);
3088 		return -EEXIST;
3089 	}
3090 
3091 	tmp = calloc(1, sizeof(*tmp));
3092 	if (tmp == NULL) {
3093 		SPDK_ERRLOG("Unable to allocate alias\n");
3094 		return -ENOMEM;
3095 	}
3096 
3097 	tmp->alias = strdup(alias);
3098 	if (tmp->alias == NULL) {
3099 		free(tmp);
3100 		SPDK_ERRLOG("Unable to allocate alias\n");
3101 		return -ENOMEM;
3102 	}
3103 
3104 	TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
3105 
3106 	return 0;
3107 }
3108 
3109 int
3110 spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
3111 {
3112 	struct spdk_bdev_alias *tmp;
3113 
3114 	TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
3115 		if (strcmp(alias, tmp->alias) == 0) {
3116 			TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
3117 			free(tmp->alias);
3118 			free(tmp);
3119 			return 0;
3120 		}
3121 	}
3122 
3123 	SPDK_INFOLOG(bdev, "Alias %s does not exists\n", alias);
3124 
3125 	return -ENOENT;
3126 }
3127 
3128 void
3129 spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
3130 {
3131 	struct spdk_bdev_alias *p, *tmp;
3132 
3133 	TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
3134 		TAILQ_REMOVE(&bdev->aliases, p, tailq);
3135 		free(p->alias);
3136 		free(p);
3137 	}
3138 }
3139 
3140 struct spdk_io_channel *
3141 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
3142 {
3143 	return spdk_get_io_channel(__bdev_to_io_dev(spdk_bdev_desc_get_bdev(desc)));
3144 }
3145 
3146 void *
3147 spdk_bdev_get_module_ctx(struct spdk_bdev_desc *desc)
3148 {
3149 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3150 	void *ctx = NULL;
3151 
3152 	if (bdev->fn_table->get_module_ctx) {
3153 		ctx = bdev->fn_table->get_module_ctx(bdev->ctxt);
3154 	}
3155 
3156 	return ctx;
3157 }
3158 
3159 const char *
3160 spdk_bdev_get_module_name(const struct spdk_bdev *bdev)
3161 {
3162 	return bdev->module->name;
3163 }
3164 
3165 const char *
3166 spdk_bdev_get_name(const struct spdk_bdev *bdev)
3167 {
3168 	return bdev->name;
3169 }
3170 
3171 const char *
3172 spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
3173 {
3174 	return bdev->product_name;
3175 }
3176 
3177 const struct spdk_bdev_aliases_list *
3178 spdk_bdev_get_aliases(const struct spdk_bdev *bdev)
3179 {
3180 	return &bdev->aliases;
3181 }
3182 
3183 uint32_t
3184 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
3185 {
3186 	return bdev->blocklen;
3187 }
3188 
3189 uint32_t
3190 spdk_bdev_get_write_unit_size(const struct spdk_bdev *bdev)
3191 {
3192 	return bdev->write_unit_size;
3193 }
3194 
3195 uint64_t
3196 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
3197 {
3198 	return bdev->blockcnt;
3199 }
3200 
3201 const char *
3202 spdk_bdev_get_qos_rpc_type(enum spdk_bdev_qos_rate_limit_type type)
3203 {
3204 	return qos_rpc_type[type];
3205 }
3206 
3207 void
3208 spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
3209 {
3210 	int i;
3211 
3212 	memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
3213 
3214 	pthread_mutex_lock(&bdev->internal.mutex);
3215 	if (bdev->internal.qos) {
3216 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3217 			if (bdev->internal.qos->rate_limits[i].limit !=
3218 			    SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
3219 				limits[i] = bdev->internal.qos->rate_limits[i].limit;
3220 				if (bdev_qos_is_iops_rate_limit(i) == false) {
3221 					/* Change from Byte to Megabyte which is user visible. */
3222 					limits[i] = limits[i] / 1024 / 1024;
3223 				}
3224 			}
3225 		}
3226 	}
3227 	pthread_mutex_unlock(&bdev->internal.mutex);
3228 }
3229 
3230 size_t
3231 spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
3232 {
3233 	return 1 << bdev->required_alignment;
3234 }
3235 
3236 uint32_t
3237 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
3238 {
3239 	return bdev->optimal_io_boundary;
3240 }
3241 
3242 bool
3243 spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
3244 {
3245 	return bdev->write_cache;
3246 }
3247 
3248 const struct spdk_uuid *
3249 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
3250 {
3251 	return &bdev->uuid;
3252 }
3253 
3254 uint16_t
3255 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
3256 {
3257 	return bdev->acwu;
3258 }
3259 
3260 uint32_t
3261 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
3262 {
3263 	return bdev->md_len;
3264 }
3265 
3266 bool
3267 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
3268 {
3269 	return (bdev->md_len != 0) && bdev->md_interleave;
3270 }
3271 
3272 bool
3273 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
3274 {
3275 	return (bdev->md_len != 0) && !bdev->md_interleave;
3276 }
3277 
3278 bool
3279 spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
3280 {
3281 	return bdev->zoned;
3282 }
3283 
3284 uint32_t
3285 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
3286 {
3287 	if (spdk_bdev_is_md_interleaved(bdev)) {
3288 		return bdev->blocklen - bdev->md_len;
3289 	} else {
3290 		return bdev->blocklen;
3291 	}
3292 }
3293 
3294 uint32_t
3295 spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
3296 {
3297 	return bdev->phys_blocklen;
3298 }
3299 
3300 static uint32_t
3301 _bdev_get_block_size_with_md(const struct spdk_bdev *bdev)
3302 {
3303 	if (!spdk_bdev_is_md_interleaved(bdev)) {
3304 		return bdev->blocklen + bdev->md_len;
3305 	} else {
3306 		return bdev->blocklen;
3307 	}
3308 }
3309 
3310 enum spdk_dif_type spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
3311 {
3312 	if (bdev->md_len != 0) {
3313 		return bdev->dif_type;
3314 	} else {
3315 		return SPDK_DIF_DISABLE;
3316 	}
3317 }
3318 
3319 bool
3320 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
3321 {
3322 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
3323 		return bdev->dif_is_head_of_md;
3324 	} else {
3325 		return false;
3326 	}
3327 }
3328 
3329 bool
3330 spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
3331 			       enum spdk_dif_check_type check_type)
3332 {
3333 	if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
3334 		return false;
3335 	}
3336 
3337 	switch (check_type) {
3338 	case SPDK_DIF_CHECK_TYPE_REFTAG:
3339 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) != 0;
3340 	case SPDK_DIF_CHECK_TYPE_APPTAG:
3341 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) != 0;
3342 	case SPDK_DIF_CHECK_TYPE_GUARD:
3343 		return (bdev->dif_check_flags & SPDK_DIF_FLAGS_GUARD_CHECK) != 0;
3344 	default:
3345 		return false;
3346 	}
3347 }
3348 
3349 uint64_t
3350 spdk_bdev_get_qd(const struct spdk_bdev *bdev)
3351 {
3352 	return bdev->internal.measured_queue_depth;
3353 }
3354 
3355 uint64_t
3356 spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
3357 {
3358 	return bdev->internal.period;
3359 }
3360 
3361 uint64_t
3362 spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
3363 {
3364 	return bdev->internal.weighted_io_time;
3365 }
3366 
3367 uint64_t
3368 spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
3369 {
3370 	return bdev->internal.io_time;
3371 }
3372 
3373 static void
3374 _calculate_measured_qd_cpl(struct spdk_io_channel_iter *i, int status)
3375 {
3376 	struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
3377 
3378 	bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
3379 
3380 	if (bdev->internal.measured_queue_depth) {
3381 		bdev->internal.io_time += bdev->internal.period;
3382 		bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
3383 	}
3384 }
3385 
3386 static void
3387 _calculate_measured_qd(struct spdk_io_channel_iter *i)
3388 {
3389 	struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
3390 	struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
3391 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(io_ch);
3392 
3393 	bdev->internal.temporary_queue_depth += ch->io_outstanding;
3394 	spdk_for_each_channel_continue(i, 0);
3395 }
3396 
3397 static int
3398 bdev_calculate_measured_queue_depth(void *ctx)
3399 {
3400 	struct spdk_bdev *bdev = ctx;
3401 	bdev->internal.temporary_queue_depth = 0;
3402 	spdk_for_each_channel(__bdev_to_io_dev(bdev), _calculate_measured_qd, bdev,
3403 			      _calculate_measured_qd_cpl);
3404 	return SPDK_POLLER_BUSY;
3405 }
3406 
3407 void
3408 spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
3409 {
3410 	bdev->internal.period = period;
3411 
3412 	if (bdev->internal.qd_poller != NULL) {
3413 		spdk_poller_unregister(&bdev->internal.qd_poller);
3414 		bdev->internal.measured_queue_depth = UINT64_MAX;
3415 	}
3416 
3417 	if (period != 0) {
3418 		bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth, bdev,
3419 					   period);
3420 	}
3421 }
3422 
3423 static void
3424 _resize_notify(void *arg)
3425 {
3426 	struct spdk_bdev_desc *desc = arg;
3427 
3428 	pthread_mutex_lock(&desc->mutex);
3429 	desc->refs--;
3430 	if (!desc->closed) {
3431 		pthread_mutex_unlock(&desc->mutex);
3432 		desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE,
3433 					desc->bdev,
3434 					desc->callback.ctx);
3435 		return;
3436 	} else if (0 == desc->refs) {
3437 		/* This descriptor was closed after this resize_notify message was sent.
3438 		 * spdk_bdev_close() could not free the descriptor since this message was
3439 		 * in flight, so we free it now using bdev_desc_free().
3440 		 */
3441 		pthread_mutex_unlock(&desc->mutex);
3442 		bdev_desc_free(desc);
3443 		return;
3444 	}
3445 	pthread_mutex_unlock(&desc->mutex);
3446 }
3447 
3448 int
3449 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
3450 {
3451 	struct spdk_bdev_desc *desc;
3452 	int ret;
3453 
3454 	if (size == bdev->blockcnt) {
3455 		return 0;
3456 	}
3457 
3458 	pthread_mutex_lock(&bdev->internal.mutex);
3459 
3460 	/* bdev has open descriptors */
3461 	if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
3462 	    bdev->blockcnt > size) {
3463 		ret = -EBUSY;
3464 	} else {
3465 		bdev->blockcnt = size;
3466 		TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
3467 			pthread_mutex_lock(&desc->mutex);
3468 			if (!desc->closed) {
3469 				desc->refs++;
3470 				spdk_thread_send_msg(desc->thread, _resize_notify, desc);
3471 			}
3472 			pthread_mutex_unlock(&desc->mutex);
3473 		}
3474 		ret = 0;
3475 	}
3476 
3477 	pthread_mutex_unlock(&bdev->internal.mutex);
3478 
3479 	return ret;
3480 }
3481 
3482 /*
3483  * Convert I/O offset and length from bytes to blocks.
3484  *
3485  * Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
3486  */
3487 static uint64_t
3488 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t offset_bytes, uint64_t *offset_blocks,
3489 		     uint64_t num_bytes, uint64_t *num_blocks)
3490 {
3491 	uint32_t block_size = bdev->blocklen;
3492 	uint8_t shift_cnt;
3493 
3494 	/* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
3495 	if (spdk_likely(spdk_u32_is_pow2(block_size))) {
3496 		shift_cnt = spdk_u32log2(block_size);
3497 		*offset_blocks = offset_bytes >> shift_cnt;
3498 		*num_blocks = num_bytes >> shift_cnt;
3499 		return (offset_bytes - (*offset_blocks << shift_cnt)) |
3500 		       (num_bytes - (*num_blocks << shift_cnt));
3501 	} else {
3502 		*offset_blocks = offset_bytes / block_size;
3503 		*num_blocks = num_bytes / block_size;
3504 		return (offset_bytes % block_size) | (num_bytes % block_size);
3505 	}
3506 }
3507 
3508 static bool
3509 bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
3510 {
3511 	/* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
3512 	 * has been an overflow and hence the offset has been wrapped around */
3513 	if (offset_blocks + num_blocks < offset_blocks) {
3514 		return false;
3515 	}
3516 
3517 	/* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
3518 	if (offset_blocks + num_blocks > bdev->blockcnt) {
3519 		return false;
3520 	}
3521 
3522 	return true;
3523 }
3524 
3525 static bool
3526 _bdev_io_check_md_buf(const struct iovec *iovs, const void *md_buf)
3527 {
3528 	return _is_buf_allocated(iovs) == (md_buf != NULL);
3529 }
3530 
3531 static int
3532 bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
3533 			 void *md_buf, int64_t offset_blocks, uint64_t num_blocks,
3534 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
3535 {
3536 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3537 	struct spdk_bdev_io *bdev_io;
3538 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3539 
3540 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3541 		return -EINVAL;
3542 	}
3543 
3544 	bdev_io = bdev_channel_get_io(channel);
3545 	if (!bdev_io) {
3546 		return -ENOMEM;
3547 	}
3548 
3549 	bdev_io->internal.ch = channel;
3550 	bdev_io->internal.desc = desc;
3551 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
3552 	bdev_io->u.bdev.iovs = &bdev_io->iov;
3553 	bdev_io->u.bdev.iovs[0].iov_base = buf;
3554 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
3555 	bdev_io->u.bdev.iovcnt = 1;
3556 	bdev_io->u.bdev.md_buf = md_buf;
3557 	bdev_io->u.bdev.num_blocks = num_blocks;
3558 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3559 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3560 
3561 	bdev_io_submit(bdev_io);
3562 	return 0;
3563 }
3564 
3565 int
3566 spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3567 	       void *buf, uint64_t offset, uint64_t nbytes,
3568 	       spdk_bdev_io_completion_cb cb, void *cb_arg)
3569 {
3570 	uint64_t offset_blocks, num_blocks;
3571 
3572 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
3573 				 nbytes, &num_blocks) != 0) {
3574 		return -EINVAL;
3575 	}
3576 
3577 	return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
3578 }
3579 
3580 int
3581 spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3582 		      void *buf, uint64_t offset_blocks, uint64_t num_blocks,
3583 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
3584 {
3585 	return bdev_read_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks, cb, cb_arg);
3586 }
3587 
3588 int
3589 spdk_bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3590 			      void *buf, void *md_buf, int64_t offset_blocks, uint64_t num_blocks,
3591 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
3592 {
3593 	struct iovec iov = {
3594 		.iov_base = buf,
3595 	};
3596 
3597 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
3598 		return -EINVAL;
3599 	}
3600 
3601 	if (!_bdev_io_check_md_buf(&iov, md_buf)) {
3602 		return -EINVAL;
3603 	}
3604 
3605 	return bdev_read_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
3606 					cb, cb_arg);
3607 }
3608 
3609 int
3610 spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3611 		struct iovec *iov, int iovcnt,
3612 		uint64_t offset, uint64_t nbytes,
3613 		spdk_bdev_io_completion_cb cb, void *cb_arg)
3614 {
3615 	uint64_t offset_blocks, num_blocks;
3616 
3617 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
3618 				 nbytes, &num_blocks) != 0) {
3619 		return -EINVAL;
3620 	}
3621 
3622 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
3623 }
3624 
3625 static int
3626 bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3627 			  struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
3628 			  uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg)
3629 {
3630 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3631 	struct spdk_bdev_io *bdev_io;
3632 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3633 
3634 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3635 		return -EINVAL;
3636 	}
3637 
3638 	bdev_io = bdev_channel_get_io(channel);
3639 	if (!bdev_io) {
3640 		return -ENOMEM;
3641 	}
3642 
3643 	bdev_io->internal.ch = channel;
3644 	bdev_io->internal.desc = desc;
3645 	bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
3646 	bdev_io->u.bdev.iovs = iov;
3647 	bdev_io->u.bdev.iovcnt = iovcnt;
3648 	bdev_io->u.bdev.md_buf = md_buf;
3649 	bdev_io->u.bdev.num_blocks = num_blocks;
3650 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3651 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3652 
3653 	bdev_io_submit(bdev_io);
3654 	return 0;
3655 }
3656 
3657 int spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3658 			   struct iovec *iov, int iovcnt,
3659 			   uint64_t offset_blocks, uint64_t num_blocks,
3660 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
3661 {
3662 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
3663 					 num_blocks, cb, cb_arg);
3664 }
3665 
3666 int
3667 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3668 			       struct iovec *iov, int iovcnt, void *md_buf,
3669 			       uint64_t offset_blocks, uint64_t num_blocks,
3670 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
3671 {
3672 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
3673 		return -EINVAL;
3674 	}
3675 
3676 	if (!_bdev_io_check_md_buf(iov, md_buf)) {
3677 		return -EINVAL;
3678 	}
3679 
3680 	return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
3681 					 num_blocks, cb, cb_arg);
3682 }
3683 
3684 static int
3685 bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3686 			  void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
3687 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
3688 {
3689 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3690 	struct spdk_bdev_io *bdev_io;
3691 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3692 
3693 	if (!desc->write) {
3694 		return -EBADF;
3695 	}
3696 
3697 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3698 		return -EINVAL;
3699 	}
3700 
3701 	bdev_io = bdev_channel_get_io(channel);
3702 	if (!bdev_io) {
3703 		return -ENOMEM;
3704 	}
3705 
3706 	bdev_io->internal.ch = channel;
3707 	bdev_io->internal.desc = desc;
3708 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
3709 	bdev_io->u.bdev.iovs = &bdev_io->iov;
3710 	bdev_io->u.bdev.iovs[0].iov_base = buf;
3711 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
3712 	bdev_io->u.bdev.iovcnt = 1;
3713 	bdev_io->u.bdev.md_buf = md_buf;
3714 	bdev_io->u.bdev.num_blocks = num_blocks;
3715 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3716 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3717 
3718 	bdev_io_submit(bdev_io);
3719 	return 0;
3720 }
3721 
3722 int
3723 spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3724 		void *buf, uint64_t offset, uint64_t nbytes,
3725 		spdk_bdev_io_completion_cb cb, void *cb_arg)
3726 {
3727 	uint64_t offset_blocks, num_blocks;
3728 
3729 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
3730 				 nbytes, &num_blocks) != 0) {
3731 		return -EINVAL;
3732 	}
3733 
3734 	return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
3735 }
3736 
3737 int
3738 spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3739 		       void *buf, uint64_t offset_blocks, uint64_t num_blocks,
3740 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
3741 {
3742 	return bdev_write_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
3743 					 cb, cb_arg);
3744 }
3745 
3746 int
3747 spdk_bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3748 			       void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
3749 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
3750 {
3751 	struct iovec iov = {
3752 		.iov_base = buf,
3753 	};
3754 
3755 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
3756 		return -EINVAL;
3757 	}
3758 
3759 	if (!_bdev_io_check_md_buf(&iov, md_buf)) {
3760 		return -EINVAL;
3761 	}
3762 
3763 	return bdev_write_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
3764 					 cb, cb_arg);
3765 }
3766 
3767 static int
3768 bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3769 			   struct iovec *iov, int iovcnt, void *md_buf,
3770 			   uint64_t offset_blocks, uint64_t num_blocks,
3771 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
3772 {
3773 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3774 	struct spdk_bdev_io *bdev_io;
3775 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3776 
3777 	if (!desc->write) {
3778 		return -EBADF;
3779 	}
3780 
3781 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3782 		return -EINVAL;
3783 	}
3784 
3785 	bdev_io = bdev_channel_get_io(channel);
3786 	if (!bdev_io) {
3787 		return -ENOMEM;
3788 	}
3789 
3790 	bdev_io->internal.ch = channel;
3791 	bdev_io->internal.desc = desc;
3792 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
3793 	bdev_io->u.bdev.iovs = iov;
3794 	bdev_io->u.bdev.iovcnt = iovcnt;
3795 	bdev_io->u.bdev.md_buf = md_buf;
3796 	bdev_io->u.bdev.num_blocks = num_blocks;
3797 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3798 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3799 
3800 	bdev_io_submit(bdev_io);
3801 	return 0;
3802 }
3803 
3804 int
3805 spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3806 		 struct iovec *iov, int iovcnt,
3807 		 uint64_t offset, uint64_t len,
3808 		 spdk_bdev_io_completion_cb cb, void *cb_arg)
3809 {
3810 	uint64_t offset_blocks, num_blocks;
3811 
3812 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
3813 				 len, &num_blocks) != 0) {
3814 		return -EINVAL;
3815 	}
3816 
3817 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
3818 }
3819 
3820 int
3821 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3822 			struct iovec *iov, int iovcnt,
3823 			uint64_t offset_blocks, uint64_t num_blocks,
3824 			spdk_bdev_io_completion_cb cb, void *cb_arg)
3825 {
3826 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
3827 					  num_blocks, cb, cb_arg);
3828 }
3829 
3830 int
3831 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3832 				struct iovec *iov, int iovcnt, void *md_buf,
3833 				uint64_t offset_blocks, uint64_t num_blocks,
3834 				spdk_bdev_io_completion_cb cb, void *cb_arg)
3835 {
3836 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
3837 		return -EINVAL;
3838 	}
3839 
3840 	if (!_bdev_io_check_md_buf(iov, md_buf)) {
3841 		return -EINVAL;
3842 	}
3843 
3844 	return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
3845 					  num_blocks, cb, cb_arg);
3846 }
3847 
3848 static void
3849 bdev_compare_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
3850 {
3851 	struct spdk_bdev_io *parent_io = cb_arg;
3852 	uint8_t *read_buf = bdev_io->u.bdev.iovs[0].iov_base;
3853 	int i, rc = 0;
3854 
3855 	if (!success) {
3856 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3857 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
3858 		spdk_bdev_free_io(bdev_io);
3859 		return;
3860 	}
3861 
3862 	for (i = 0; i < parent_io->u.bdev.iovcnt; i++) {
3863 		rc = memcmp(read_buf,
3864 			    parent_io->u.bdev.iovs[i].iov_base,
3865 			    parent_io->u.bdev.iovs[i].iov_len);
3866 		if (rc) {
3867 			break;
3868 		}
3869 		read_buf += parent_io->u.bdev.iovs[i].iov_len;
3870 	}
3871 
3872 	spdk_bdev_free_io(bdev_io);
3873 
3874 	if (rc == 0) {
3875 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
3876 		parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
3877 	} else {
3878 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
3879 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
3880 	}
3881 }
3882 
3883 static void
3884 bdev_compare_do_read(void *_bdev_io)
3885 {
3886 	struct spdk_bdev_io *bdev_io = _bdev_io;
3887 	int rc;
3888 
3889 	rc = spdk_bdev_read_blocks(bdev_io->internal.desc,
3890 				   spdk_io_channel_from_ctx(bdev_io->internal.ch), NULL,
3891 				   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
3892 				   bdev_compare_do_read_done, bdev_io);
3893 
3894 	if (rc == -ENOMEM) {
3895 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_do_read);
3896 	} else if (rc != 0) {
3897 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3898 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3899 	}
3900 }
3901 
3902 static int
3903 bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3904 			     struct iovec *iov, int iovcnt, void *md_buf,
3905 			     uint64_t offset_blocks, uint64_t num_blocks,
3906 			     spdk_bdev_io_completion_cb cb, void *cb_arg)
3907 {
3908 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3909 	struct spdk_bdev_io *bdev_io;
3910 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3911 
3912 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3913 		return -EINVAL;
3914 	}
3915 
3916 	bdev_io = bdev_channel_get_io(channel);
3917 	if (!bdev_io) {
3918 		return -ENOMEM;
3919 	}
3920 
3921 	bdev_io->internal.ch = channel;
3922 	bdev_io->internal.desc = desc;
3923 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
3924 	bdev_io->u.bdev.iovs = iov;
3925 	bdev_io->u.bdev.iovcnt = iovcnt;
3926 	bdev_io->u.bdev.md_buf = md_buf;
3927 	bdev_io->u.bdev.num_blocks = num_blocks;
3928 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3929 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3930 
3931 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
3932 		bdev_io_submit(bdev_io);
3933 		return 0;
3934 	}
3935 
3936 	bdev_compare_do_read(bdev_io);
3937 
3938 	return 0;
3939 }
3940 
3941 int
3942 spdk_bdev_comparev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3943 			  struct iovec *iov, int iovcnt,
3944 			  uint64_t offset_blocks, uint64_t num_blocks,
3945 			  spdk_bdev_io_completion_cb cb, void *cb_arg)
3946 {
3947 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
3948 					    num_blocks, cb, cb_arg);
3949 }
3950 
3951 int
3952 spdk_bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3953 				  struct iovec *iov, int iovcnt, void *md_buf,
3954 				  uint64_t offset_blocks, uint64_t num_blocks,
3955 				  spdk_bdev_io_completion_cb cb, void *cb_arg)
3956 {
3957 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
3958 		return -EINVAL;
3959 	}
3960 
3961 	if (!_bdev_io_check_md_buf(iov, md_buf)) {
3962 		return -EINVAL;
3963 	}
3964 
3965 	return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
3966 					    num_blocks, cb, cb_arg);
3967 }
3968 
3969 static int
3970 bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
3971 			    void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
3972 			    spdk_bdev_io_completion_cb cb, void *cb_arg)
3973 {
3974 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
3975 	struct spdk_bdev_io *bdev_io;
3976 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
3977 
3978 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
3979 		return -EINVAL;
3980 	}
3981 
3982 	bdev_io = bdev_channel_get_io(channel);
3983 	if (!bdev_io) {
3984 		return -ENOMEM;
3985 	}
3986 
3987 	bdev_io->internal.ch = channel;
3988 	bdev_io->internal.desc = desc;
3989 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
3990 	bdev_io->u.bdev.iovs = &bdev_io->iov;
3991 	bdev_io->u.bdev.iovs[0].iov_base = buf;
3992 	bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
3993 	bdev_io->u.bdev.iovcnt = 1;
3994 	bdev_io->u.bdev.md_buf = md_buf;
3995 	bdev_io->u.bdev.num_blocks = num_blocks;
3996 	bdev_io->u.bdev.offset_blocks = offset_blocks;
3997 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
3998 
3999 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
4000 		bdev_io_submit(bdev_io);
4001 		return 0;
4002 	}
4003 
4004 	bdev_compare_do_read(bdev_io);
4005 
4006 	return 0;
4007 }
4008 
4009 int
4010 spdk_bdev_compare_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4011 			 void *buf, uint64_t offset_blocks, uint64_t num_blocks,
4012 			 spdk_bdev_io_completion_cb cb, void *cb_arg)
4013 {
4014 	return bdev_compare_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
4015 					   cb, cb_arg);
4016 }
4017 
4018 int
4019 spdk_bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4020 				 void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
4021 				 spdk_bdev_io_completion_cb cb, void *cb_arg)
4022 {
4023 	struct iovec iov = {
4024 		.iov_base = buf,
4025 	};
4026 
4027 	if (!spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
4028 		return -EINVAL;
4029 	}
4030 
4031 	if (!_bdev_io_check_md_buf(&iov, md_buf)) {
4032 		return -EINVAL;
4033 	}
4034 
4035 	return bdev_compare_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
4036 					   cb, cb_arg);
4037 }
4038 
4039 static void
4040 bdev_comparev_and_writev_blocks_unlocked(void *ctx, int unlock_status)
4041 {
4042 	struct spdk_bdev_io *bdev_io = ctx;
4043 
4044 	if (unlock_status) {
4045 		SPDK_ERRLOG("LBA range unlock failed\n");
4046 	}
4047 
4048 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS ? true :
4049 			     false, bdev_io->internal.caller_ctx);
4050 }
4051 
4052 static void
4053 bdev_comparev_and_writev_blocks_unlock(struct spdk_bdev_io *bdev_io, int status)
4054 {
4055 	bdev_io->internal.status = status;
4056 
4057 	bdev_unlock_lba_range(bdev_io->internal.desc, spdk_io_channel_from_ctx(bdev_io->internal.ch),
4058 			      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4059 			      bdev_comparev_and_writev_blocks_unlocked, bdev_io);
4060 }
4061 
4062 static void
4063 bdev_compare_and_write_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4064 {
4065 	struct spdk_bdev_io *parent_io = cb_arg;
4066 
4067 	if (!success) {
4068 		SPDK_ERRLOG("Compare and write operation failed\n");
4069 	}
4070 
4071 	spdk_bdev_free_io(bdev_io);
4072 
4073 	bdev_comparev_and_writev_blocks_unlock(parent_io,
4074 					       success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
4075 }
4076 
4077 static void
4078 bdev_compare_and_write_do_write(void *_bdev_io)
4079 {
4080 	struct spdk_bdev_io *bdev_io = _bdev_io;
4081 	int rc;
4082 
4083 	rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
4084 				     spdk_io_channel_from_ctx(bdev_io->internal.ch),
4085 				     bdev_io->u.bdev.fused_iovs, bdev_io->u.bdev.fused_iovcnt,
4086 				     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4087 				     bdev_compare_and_write_do_write_done, bdev_io);
4088 
4089 
4090 	if (rc == -ENOMEM) {
4091 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_write);
4092 	} else if (rc != 0) {
4093 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
4094 	}
4095 }
4096 
4097 static void
4098 bdev_compare_and_write_do_compare_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4099 {
4100 	struct spdk_bdev_io *parent_io = cb_arg;
4101 
4102 	spdk_bdev_free_io(bdev_io);
4103 
4104 	if (!success) {
4105 		bdev_comparev_and_writev_blocks_unlock(parent_io, SPDK_BDEV_IO_STATUS_MISCOMPARE);
4106 		return;
4107 	}
4108 
4109 	bdev_compare_and_write_do_write(parent_io);
4110 }
4111 
4112 static void
4113 bdev_compare_and_write_do_compare(void *_bdev_io)
4114 {
4115 	struct spdk_bdev_io *bdev_io = _bdev_io;
4116 	int rc;
4117 
4118 	rc = spdk_bdev_comparev_blocks(bdev_io->internal.desc,
4119 				       spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs,
4120 				       bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
4121 				       bdev_compare_and_write_do_compare_done, bdev_io);
4122 
4123 	if (rc == -ENOMEM) {
4124 		bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_compare);
4125 	} else if (rc != 0) {
4126 		bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED);
4127 	}
4128 }
4129 
4130 static void
4131 bdev_comparev_and_writev_blocks_locked(void *ctx, int status)
4132 {
4133 	struct spdk_bdev_io *bdev_io = ctx;
4134 
4135 	if (status) {
4136 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED;
4137 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
4138 		return;
4139 	}
4140 
4141 	bdev_compare_and_write_do_compare(bdev_io);
4142 }
4143 
4144 int
4145 spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4146 				     struct iovec *compare_iov, int compare_iovcnt,
4147 				     struct iovec *write_iov, int write_iovcnt,
4148 				     uint64_t offset_blocks, uint64_t num_blocks,
4149 				     spdk_bdev_io_completion_cb cb, void *cb_arg)
4150 {
4151 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4152 	struct spdk_bdev_io *bdev_io;
4153 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4154 
4155 	if (!desc->write) {
4156 		return -EBADF;
4157 	}
4158 
4159 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4160 		return -EINVAL;
4161 	}
4162 
4163 	if (num_blocks > bdev->acwu) {
4164 		return -EINVAL;
4165 	}
4166 
4167 	bdev_io = bdev_channel_get_io(channel);
4168 	if (!bdev_io) {
4169 		return -ENOMEM;
4170 	}
4171 
4172 	bdev_io->internal.ch = channel;
4173 	bdev_io->internal.desc = desc;
4174 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
4175 	bdev_io->u.bdev.iovs = compare_iov;
4176 	bdev_io->u.bdev.iovcnt = compare_iovcnt;
4177 	bdev_io->u.bdev.fused_iovs = write_iov;
4178 	bdev_io->u.bdev.fused_iovcnt = write_iovcnt;
4179 	bdev_io->u.bdev.md_buf = NULL;
4180 	bdev_io->u.bdev.num_blocks = num_blocks;
4181 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4182 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4183 
4184 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
4185 		bdev_io_submit(bdev_io);
4186 		return 0;
4187 	}
4188 
4189 	return bdev_lock_lba_range(desc, ch, offset_blocks, num_blocks,
4190 				   bdev_comparev_and_writev_blocks_locked, bdev_io);
4191 }
4192 
4193 static void
4194 bdev_zcopy_get_buf(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
4195 {
4196 	if (!success) {
4197 		/* Don't use spdk_bdev_io_complete here - this bdev_io was never actually submitted. */
4198 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
4199 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
4200 		return;
4201 	}
4202 
4203 	if (bdev_io->u.bdev.zcopy.populate) {
4204 		/* Read the real data into the buffer */
4205 		bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
4206 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
4207 		bdev_io_submit(bdev_io);
4208 		return;
4209 	}
4210 
4211 	/* Don't use spdk_bdev_io_complete here - this bdev_io was never actually submitted. */
4212 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
4213 	bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
4214 }
4215 
4216 int
4217 spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4218 		      uint64_t offset_blocks, uint64_t num_blocks,
4219 		      bool populate,
4220 		      spdk_bdev_io_completion_cb cb, void *cb_arg)
4221 {
4222 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4223 	struct spdk_bdev_io *bdev_io;
4224 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4225 
4226 	if (!desc->write) {
4227 		return -EBADF;
4228 	}
4229 
4230 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4231 		return -EINVAL;
4232 	}
4233 
4234 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
4235 		return -ENOTSUP;
4236 	}
4237 
4238 	bdev_io = bdev_channel_get_io(channel);
4239 	if (!bdev_io) {
4240 		return -ENOMEM;
4241 	}
4242 
4243 	bdev_io->internal.ch = channel;
4244 	bdev_io->internal.desc = desc;
4245 	bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
4246 	bdev_io->u.bdev.num_blocks = num_blocks;
4247 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4248 	bdev_io->u.bdev.iovs = NULL;
4249 	bdev_io->u.bdev.iovcnt = 0;
4250 	bdev_io->u.bdev.md_buf = NULL;
4251 	bdev_io->u.bdev.zcopy.populate = populate ? 1 : 0;
4252 	bdev_io->u.bdev.zcopy.commit = 0;
4253 	bdev_io->u.bdev.zcopy.start = 1;
4254 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4255 
4256 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
4257 		bdev_io_submit(bdev_io);
4258 	} else {
4259 		/* Emulate zcopy by allocating a buffer */
4260 		spdk_bdev_io_get_buf(bdev_io, bdev_zcopy_get_buf,
4261 				     bdev_io->u.bdev.num_blocks * bdev->blocklen);
4262 	}
4263 
4264 	return 0;
4265 }
4266 
4267 int
4268 spdk_bdev_zcopy_end(struct spdk_bdev_io *bdev_io, bool commit,
4269 		    spdk_bdev_io_completion_cb cb, void *cb_arg)
4270 {
4271 	struct spdk_bdev *bdev = bdev_io->bdev;
4272 
4273 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
4274 		/* This can happen if the zcopy was emulated in start */
4275 		if (bdev_io->u.bdev.zcopy.start != 1) {
4276 			return -EINVAL;
4277 		}
4278 		bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
4279 	}
4280 
4281 	if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY) {
4282 		return -EINVAL;
4283 	}
4284 
4285 	bdev_io->u.bdev.zcopy.commit = commit ? 1 : 0;
4286 	bdev_io->u.bdev.zcopy.start = 0;
4287 	bdev_io->internal.caller_ctx = cb_arg;
4288 	bdev_io->internal.cb = cb;
4289 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
4290 
4291 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
4292 		bdev_io_submit(bdev_io);
4293 		return 0;
4294 	}
4295 
4296 	if (!bdev_io->u.bdev.zcopy.commit) {
4297 		/* Don't use spdk_bdev_io_complete here - this bdev_io was never actually submitted. */
4298 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
4299 		bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
4300 		return 0;
4301 	}
4302 
4303 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
4304 	bdev_io_submit(bdev_io);
4305 
4306 	return 0;
4307 }
4308 
4309 int
4310 spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4311 		       uint64_t offset, uint64_t len,
4312 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
4313 {
4314 	uint64_t offset_blocks, num_blocks;
4315 
4316 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4317 				 len, &num_blocks) != 0) {
4318 		return -EINVAL;
4319 	}
4320 
4321 	return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
4322 }
4323 
4324 int
4325 spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4326 			      uint64_t offset_blocks, uint64_t num_blocks,
4327 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
4328 {
4329 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4330 	struct spdk_bdev_io *bdev_io;
4331 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4332 
4333 	if (!desc->write) {
4334 		return -EBADF;
4335 	}
4336 
4337 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4338 		return -EINVAL;
4339 	}
4340 
4341 	if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
4342 	    !bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
4343 		return -ENOTSUP;
4344 	}
4345 
4346 	bdev_io = bdev_channel_get_io(channel);
4347 
4348 	if (!bdev_io) {
4349 		return -ENOMEM;
4350 	}
4351 
4352 	bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
4353 	bdev_io->internal.ch = channel;
4354 	bdev_io->internal.desc = desc;
4355 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4356 	bdev_io->u.bdev.num_blocks = num_blocks;
4357 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4358 
4359 	if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
4360 		bdev_io_submit(bdev_io);
4361 		return 0;
4362 	}
4363 
4364 	assert(bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE));
4365 	assert(_bdev_get_block_size_with_md(bdev) <= ZERO_BUFFER_SIZE);
4366 	bdev_io->u.bdev.split_remaining_num_blocks = num_blocks;
4367 	bdev_io->u.bdev.split_current_offset_blocks = offset_blocks;
4368 	bdev_write_zero_buffer_next(bdev_io);
4369 
4370 	return 0;
4371 }
4372 
4373 int
4374 spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4375 		uint64_t offset, uint64_t nbytes,
4376 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4377 {
4378 	uint64_t offset_blocks, num_blocks;
4379 
4380 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4381 				 nbytes, &num_blocks) != 0) {
4382 		return -EINVAL;
4383 	}
4384 
4385 	return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
4386 }
4387 
4388 int
4389 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4390 		       uint64_t offset_blocks, uint64_t num_blocks,
4391 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
4392 {
4393 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4394 	struct spdk_bdev_io *bdev_io;
4395 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4396 
4397 	if (!desc->write) {
4398 		return -EBADF;
4399 	}
4400 
4401 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4402 		return -EINVAL;
4403 	}
4404 
4405 	if (num_blocks == 0) {
4406 		SPDK_ERRLOG("Can't unmap 0 bytes\n");
4407 		return -EINVAL;
4408 	}
4409 
4410 	bdev_io = bdev_channel_get_io(channel);
4411 	if (!bdev_io) {
4412 		return -ENOMEM;
4413 	}
4414 
4415 	bdev_io->internal.ch = channel;
4416 	bdev_io->internal.desc = desc;
4417 	bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
4418 
4419 	bdev_io->u.bdev.iovs = &bdev_io->iov;
4420 	bdev_io->u.bdev.iovs[0].iov_base = NULL;
4421 	bdev_io->u.bdev.iovs[0].iov_len = 0;
4422 	bdev_io->u.bdev.iovcnt = 1;
4423 
4424 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4425 	bdev_io->u.bdev.num_blocks = num_blocks;
4426 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4427 
4428 	bdev_io_submit(bdev_io);
4429 	return 0;
4430 }
4431 
4432 int
4433 spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4434 		uint64_t offset, uint64_t length,
4435 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4436 {
4437 	uint64_t offset_blocks, num_blocks;
4438 
4439 	if (bdev_bytes_to_blocks(spdk_bdev_desc_get_bdev(desc), offset, &offset_blocks,
4440 				 length, &num_blocks) != 0) {
4441 		return -EINVAL;
4442 	}
4443 
4444 	return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
4445 }
4446 
4447 int
4448 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4449 		       uint64_t offset_blocks, uint64_t num_blocks,
4450 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
4451 {
4452 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4453 	struct spdk_bdev_io *bdev_io;
4454 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4455 
4456 	if (!desc->write) {
4457 		return -EBADF;
4458 	}
4459 
4460 	if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
4461 		return -EINVAL;
4462 	}
4463 
4464 	bdev_io = bdev_channel_get_io(channel);
4465 	if (!bdev_io) {
4466 		return -ENOMEM;
4467 	}
4468 
4469 	bdev_io->internal.ch = channel;
4470 	bdev_io->internal.desc = desc;
4471 	bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
4472 	bdev_io->u.bdev.iovs = NULL;
4473 	bdev_io->u.bdev.iovcnt = 0;
4474 	bdev_io->u.bdev.offset_blocks = offset_blocks;
4475 	bdev_io->u.bdev.num_blocks = num_blocks;
4476 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4477 
4478 	bdev_io_submit(bdev_io);
4479 	return 0;
4480 }
4481 
4482 static void
4483 bdev_reset_dev(struct spdk_io_channel_iter *i, int status)
4484 {
4485 	struct spdk_bdev_channel *ch = spdk_io_channel_iter_get_ctx(i);
4486 	struct spdk_bdev_io *bdev_io;
4487 
4488 	bdev_io = TAILQ_FIRST(&ch->queued_resets);
4489 	TAILQ_REMOVE(&ch->queued_resets, bdev_io, internal.link);
4490 	bdev_io_submit_reset(bdev_io);
4491 }
4492 
4493 static void
4494 bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
4495 {
4496 	struct spdk_io_channel		*ch;
4497 	struct spdk_bdev_channel	*channel;
4498 	struct spdk_bdev_mgmt_channel	*mgmt_channel;
4499 	struct spdk_bdev_shared_resource *shared_resource;
4500 	bdev_io_tailq_t			tmp_queued;
4501 
4502 	TAILQ_INIT(&tmp_queued);
4503 
4504 	ch = spdk_io_channel_iter_get_channel(i);
4505 	channel = spdk_io_channel_get_ctx(ch);
4506 	shared_resource = channel->shared_resource;
4507 	mgmt_channel = shared_resource->mgmt_ch;
4508 
4509 	channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
4510 
4511 	if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
4512 		/* The QoS object is always valid and readable while
4513 		 * the channel flag is set, so the lock here should not
4514 		 * be necessary. We're not in the fast path though, so
4515 		 * just take it anyway. */
4516 		pthread_mutex_lock(&channel->bdev->internal.mutex);
4517 		if (channel->bdev->internal.qos->ch == channel) {
4518 			TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link);
4519 		}
4520 		pthread_mutex_unlock(&channel->bdev->internal.mutex);
4521 	}
4522 
4523 	bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
4524 	bdev_abort_all_buf_io(&mgmt_channel->need_buf_small, channel);
4525 	bdev_abort_all_buf_io(&mgmt_channel->need_buf_large, channel);
4526 	bdev_abort_all_queued_io(&tmp_queued, channel);
4527 
4528 	spdk_for_each_channel_continue(i, 0);
4529 }
4530 
4531 static void
4532 bdev_start_reset(void *ctx)
4533 {
4534 	struct spdk_bdev_channel *ch = ctx;
4535 
4536 	spdk_for_each_channel(__bdev_to_io_dev(ch->bdev), bdev_reset_freeze_channel,
4537 			      ch, bdev_reset_dev);
4538 }
4539 
4540 static void
4541 bdev_channel_start_reset(struct spdk_bdev_channel *ch)
4542 {
4543 	struct spdk_bdev *bdev = ch->bdev;
4544 
4545 	assert(!TAILQ_EMPTY(&ch->queued_resets));
4546 
4547 	pthread_mutex_lock(&bdev->internal.mutex);
4548 	if (bdev->internal.reset_in_progress == NULL) {
4549 		bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
4550 		/*
4551 		 * Take a channel reference for the target bdev for the life of this
4552 		 *  reset.  This guards against the channel getting destroyed while
4553 		 *  spdk_for_each_channel() calls related to this reset IO are in
4554 		 *  progress.  We will release the reference when this reset is
4555 		 *  completed.
4556 		 */
4557 		bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
4558 		bdev_start_reset(ch);
4559 	}
4560 	pthread_mutex_unlock(&bdev->internal.mutex);
4561 }
4562 
4563 int
4564 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4565 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4566 {
4567 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4568 	struct spdk_bdev_io *bdev_io;
4569 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4570 
4571 	bdev_io = bdev_channel_get_io(channel);
4572 	if (!bdev_io) {
4573 		return -ENOMEM;
4574 	}
4575 
4576 	bdev_io->internal.ch = channel;
4577 	bdev_io->internal.desc = desc;
4578 	bdev_io->internal.submit_tsc = spdk_get_ticks();
4579 	bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
4580 	bdev_io->u.reset.ch_ref = NULL;
4581 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4582 
4583 	pthread_mutex_lock(&bdev->internal.mutex);
4584 	TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
4585 	pthread_mutex_unlock(&bdev->internal.mutex);
4586 
4587 	TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io,
4588 			  internal.ch_link);
4589 
4590 	bdev_channel_start_reset(channel);
4591 
4592 	return 0;
4593 }
4594 
4595 void
4596 spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
4597 		      struct spdk_bdev_io_stat *stat)
4598 {
4599 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4600 
4601 	*stat = channel->stat;
4602 }
4603 
4604 static void
4605 bdev_get_device_stat_done(struct spdk_io_channel_iter *i, int status)
4606 {
4607 	void *io_device = spdk_io_channel_iter_get_io_device(i);
4608 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = spdk_io_channel_iter_get_ctx(i);
4609 
4610 	bdev_iostat_ctx->cb(__bdev_from_io_dev(io_device), bdev_iostat_ctx->stat,
4611 			    bdev_iostat_ctx->cb_arg, 0);
4612 	free(bdev_iostat_ctx);
4613 }
4614 
4615 static void
4616 bdev_get_each_channel_stat(struct spdk_io_channel_iter *i)
4617 {
4618 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = spdk_io_channel_iter_get_ctx(i);
4619 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
4620 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4621 
4622 	bdev_io_stat_add(bdev_iostat_ctx->stat, &channel->stat);
4623 	spdk_for_each_channel_continue(i, 0);
4624 }
4625 
4626 void
4627 spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
4628 			  spdk_bdev_get_device_stat_cb cb, void *cb_arg)
4629 {
4630 	struct spdk_bdev_iostat_ctx *bdev_iostat_ctx;
4631 
4632 	assert(bdev != NULL);
4633 	assert(stat != NULL);
4634 	assert(cb != NULL);
4635 
4636 	bdev_iostat_ctx = calloc(1, sizeof(struct spdk_bdev_iostat_ctx));
4637 	if (bdev_iostat_ctx == NULL) {
4638 		SPDK_ERRLOG("Unable to allocate memory for spdk_bdev_iostat_ctx\n");
4639 		cb(bdev, stat, cb_arg, -ENOMEM);
4640 		return;
4641 	}
4642 
4643 	bdev_iostat_ctx->stat = stat;
4644 	bdev_iostat_ctx->cb = cb;
4645 	bdev_iostat_ctx->cb_arg = cb_arg;
4646 
4647 	/* Start with the statistics from previously deleted channels. */
4648 	pthread_mutex_lock(&bdev->internal.mutex);
4649 	bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat);
4650 	pthread_mutex_unlock(&bdev->internal.mutex);
4651 
4652 	/* Then iterate and add the statistics from each existing channel. */
4653 	spdk_for_each_channel(__bdev_to_io_dev(bdev),
4654 			      bdev_get_each_channel_stat,
4655 			      bdev_iostat_ctx,
4656 			      bdev_get_device_stat_done);
4657 }
4658 
4659 int
4660 spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4661 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
4662 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
4663 {
4664 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4665 	struct spdk_bdev_io *bdev_io;
4666 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4667 
4668 	if (!desc->write) {
4669 		return -EBADF;
4670 	}
4671 
4672 	bdev_io = bdev_channel_get_io(channel);
4673 	if (!bdev_io) {
4674 		return -ENOMEM;
4675 	}
4676 
4677 	bdev_io->internal.ch = channel;
4678 	bdev_io->internal.desc = desc;
4679 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
4680 	bdev_io->u.nvme_passthru.cmd = *cmd;
4681 	bdev_io->u.nvme_passthru.buf = buf;
4682 	bdev_io->u.nvme_passthru.nbytes = nbytes;
4683 	bdev_io->u.nvme_passthru.md_buf = NULL;
4684 	bdev_io->u.nvme_passthru.md_len = 0;
4685 
4686 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4687 
4688 	bdev_io_submit(bdev_io);
4689 	return 0;
4690 }
4691 
4692 int
4693 spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4694 			   const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
4695 			   spdk_bdev_io_completion_cb cb, void *cb_arg)
4696 {
4697 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4698 	struct spdk_bdev_io *bdev_io;
4699 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4700 
4701 	if (!desc->write) {
4702 		/*
4703 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
4704 		 *  to easily determine if the command is a read or write, but for now just
4705 		 *  do not allow io_passthru with a read-only descriptor.
4706 		 */
4707 		return -EBADF;
4708 	}
4709 
4710 	bdev_io = bdev_channel_get_io(channel);
4711 	if (!bdev_io) {
4712 		return -ENOMEM;
4713 	}
4714 
4715 	bdev_io->internal.ch = channel;
4716 	bdev_io->internal.desc = desc;
4717 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
4718 	bdev_io->u.nvme_passthru.cmd = *cmd;
4719 	bdev_io->u.nvme_passthru.buf = buf;
4720 	bdev_io->u.nvme_passthru.nbytes = nbytes;
4721 	bdev_io->u.nvme_passthru.md_buf = NULL;
4722 	bdev_io->u.nvme_passthru.md_len = 0;
4723 
4724 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4725 
4726 	bdev_io_submit(bdev_io);
4727 	return 0;
4728 }
4729 
4730 int
4731 spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4732 			      const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
4733 			      spdk_bdev_io_completion_cb cb, void *cb_arg)
4734 {
4735 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4736 	struct spdk_bdev_io *bdev_io;
4737 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4738 
4739 	if (!desc->write) {
4740 		/*
4741 		 * Do not try to parse the NVMe command - we could maybe use bits in the opcode
4742 		 *  to easily determine if the command is a read or write, but for now just
4743 		 *  do not allow io_passthru with a read-only descriptor.
4744 		 */
4745 		return -EBADF;
4746 	}
4747 
4748 	bdev_io = bdev_channel_get_io(channel);
4749 	if (!bdev_io) {
4750 		return -ENOMEM;
4751 	}
4752 
4753 	bdev_io->internal.ch = channel;
4754 	bdev_io->internal.desc = desc;
4755 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
4756 	bdev_io->u.nvme_passthru.cmd = *cmd;
4757 	bdev_io->u.nvme_passthru.buf = buf;
4758 	bdev_io->u.nvme_passthru.nbytes = nbytes;
4759 	bdev_io->u.nvme_passthru.md_buf = md_buf;
4760 	bdev_io->u.nvme_passthru.md_len = md_len;
4761 
4762 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4763 
4764 	bdev_io_submit(bdev_io);
4765 	return 0;
4766 }
4767 
4768 static void bdev_abort_retry(void *ctx);
4769 static void bdev_abort(struct spdk_bdev_io *parent_io);
4770 
4771 static void
4772 bdev_abort_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
4773 {
4774 	struct spdk_bdev_channel *channel = bdev_io->internal.ch;
4775 	struct spdk_bdev_io *parent_io = cb_arg;
4776 	struct spdk_bdev_io *bio_to_abort, *tmp_io;
4777 
4778 	bio_to_abort = bdev_io->u.abort.bio_to_abort;
4779 
4780 	spdk_bdev_free_io(bdev_io);
4781 
4782 	if (!success) {
4783 		/* Check if the target I/O completed in the meantime. */
4784 		TAILQ_FOREACH(tmp_io, &channel->io_submitted, internal.ch_link) {
4785 			if (tmp_io == bio_to_abort) {
4786 				break;
4787 			}
4788 		}
4789 
4790 		/* If the target I/O still exists, set the parent to failed. */
4791 		if (tmp_io != NULL) {
4792 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4793 		}
4794 	}
4795 
4796 	parent_io->u.bdev.split_outstanding--;
4797 	if (parent_io->u.bdev.split_outstanding == 0) {
4798 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
4799 			bdev_abort_retry(parent_io);
4800 		} else {
4801 			bdev_io_complete(parent_io);
4802 		}
4803 	}
4804 }
4805 
4806 static int
4807 bdev_abort_io(struct spdk_bdev_desc *desc, struct spdk_bdev_channel *channel,
4808 	      struct spdk_bdev_io *bio_to_abort,
4809 	      spdk_bdev_io_completion_cb cb, void *cb_arg)
4810 {
4811 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4812 	struct spdk_bdev_io *bdev_io;
4813 
4814 	if (bio_to_abort->type == SPDK_BDEV_IO_TYPE_ABORT ||
4815 	    bio_to_abort->type == SPDK_BDEV_IO_TYPE_RESET) {
4816 		/* TODO: Abort reset or abort request. */
4817 		return -ENOTSUP;
4818 	}
4819 
4820 	bdev_io = bdev_channel_get_io(channel);
4821 	if (bdev_io == NULL) {
4822 		return -ENOMEM;
4823 	}
4824 
4825 	bdev_io->internal.ch = channel;
4826 	bdev_io->internal.desc = desc;
4827 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
4828 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4829 
4830 	if (bdev->split_on_optimal_io_boundary && bdev_io_should_split(bio_to_abort)) {
4831 		bdev_io->u.bdev.abort.bio_cb_arg = bio_to_abort;
4832 
4833 		/* Parent abort request is not submitted directly, but to manage its
4834 		 * execution add it to the submitted list here.
4835 		 */
4836 		bdev_io->internal.submit_tsc = spdk_get_ticks();
4837 		TAILQ_INSERT_TAIL(&channel->io_submitted, bdev_io, internal.ch_link);
4838 
4839 		bdev_abort(bdev_io);
4840 
4841 		return 0;
4842 	}
4843 
4844 	bdev_io->u.abort.bio_to_abort = bio_to_abort;
4845 
4846 	/* Submit the abort request to the underlying bdev module. */
4847 	bdev_io_submit(bdev_io);
4848 
4849 	return 0;
4850 }
4851 
4852 static uint32_t
4853 _bdev_abort(struct spdk_bdev_io *parent_io)
4854 {
4855 	struct spdk_bdev_desc *desc = parent_io->internal.desc;
4856 	struct spdk_bdev_channel *channel = parent_io->internal.ch;
4857 	void *bio_cb_arg;
4858 	struct spdk_bdev_io *bio_to_abort;
4859 	uint32_t matched_ios;
4860 	int rc;
4861 
4862 	bio_cb_arg = parent_io->u.bdev.abort.bio_cb_arg;
4863 
4864 	/* matched_ios is returned and will be kept by the caller.
4865 	 *
4866 	 * This funcion will be used for two cases, 1) the same cb_arg is used for
4867 	 * multiple I/Os, 2) a single large I/O is split into smaller ones.
4868 	 * Incrementing split_outstanding directly here may confuse readers especially
4869 	 * for the 1st case.
4870 	 *
4871 	 * Completion of I/O abort is processed after stack unwinding. Hence this trick
4872 	 * works as expected.
4873 	 */
4874 	matched_ios = 0;
4875 	parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
4876 
4877 	TAILQ_FOREACH(bio_to_abort, &channel->io_submitted, internal.ch_link) {
4878 		if (bio_to_abort->internal.caller_ctx != bio_cb_arg) {
4879 			continue;
4880 		}
4881 
4882 		if (bio_to_abort->internal.submit_tsc > parent_io->internal.submit_tsc) {
4883 			/* Any I/O which was submitted after this abort command should be excluded. */
4884 			continue;
4885 		}
4886 
4887 		rc = bdev_abort_io(desc, channel, bio_to_abort, bdev_abort_io_done, parent_io);
4888 		if (rc != 0) {
4889 			if (rc == -ENOMEM) {
4890 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
4891 			} else {
4892 				parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4893 			}
4894 			break;
4895 		}
4896 		matched_ios++;
4897 	}
4898 
4899 	return matched_ios;
4900 }
4901 
4902 static void
4903 bdev_abort_retry(void *ctx)
4904 {
4905 	struct spdk_bdev_io *parent_io = ctx;
4906 	uint32_t matched_ios;
4907 
4908 	matched_ios = _bdev_abort(parent_io);
4909 
4910 	if (matched_ios == 0) {
4911 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
4912 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
4913 		} else {
4914 			/* For retry, the case that no target I/O was found is success
4915 			 * because it means target I/Os completed in the meantime.
4916 			 */
4917 			bdev_io_complete(parent_io);
4918 		}
4919 		return;
4920 	}
4921 
4922 	/* Use split_outstanding to manage the progress of aborting I/Os. */
4923 	parent_io->u.bdev.split_outstanding = matched_ios;
4924 }
4925 
4926 static void
4927 bdev_abort(struct spdk_bdev_io *parent_io)
4928 {
4929 	uint32_t matched_ios;
4930 
4931 	matched_ios = _bdev_abort(parent_io);
4932 
4933 	if (matched_ios == 0) {
4934 		if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
4935 			bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
4936 		} else {
4937 			/* The case the no target I/O was found is failure. */
4938 			parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4939 			bdev_io_complete(parent_io);
4940 		}
4941 		return;
4942 	}
4943 
4944 	/* Use split_outstanding to manage the progress of aborting I/Os. */
4945 	parent_io->u.bdev.split_outstanding = matched_ios;
4946 }
4947 
4948 int
4949 spdk_bdev_abort(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
4950 		void *bio_cb_arg,
4951 		spdk_bdev_io_completion_cb cb, void *cb_arg)
4952 {
4953 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4954 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4955 	struct spdk_bdev_io *bdev_io;
4956 
4957 	if (bio_cb_arg == NULL) {
4958 		return -EINVAL;
4959 	}
4960 
4961 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
4962 		return -ENOTSUP;
4963 	}
4964 
4965 	bdev_io = bdev_channel_get_io(channel);
4966 	if (bdev_io == NULL) {
4967 		return -ENOMEM;
4968 	}
4969 
4970 	bdev_io->internal.ch = channel;
4971 	bdev_io->internal.desc = desc;
4972 	bdev_io->internal.submit_tsc = spdk_get_ticks();
4973 	bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
4974 	bdev_io_init(bdev_io, bdev, cb_arg, cb);
4975 
4976 	bdev_io->u.bdev.abort.bio_cb_arg = bio_cb_arg;
4977 
4978 	/* Parent abort request is not submitted directly, but to manage its execution,
4979 	 * add it to the submitted list here.
4980 	 */
4981 	TAILQ_INSERT_TAIL(&channel->io_submitted, bdev_io, internal.ch_link);
4982 
4983 	bdev_abort(bdev_io);
4984 
4985 	return 0;
4986 }
4987 
4988 int
4989 spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
4990 			struct spdk_bdev_io_wait_entry *entry)
4991 {
4992 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
4993 	struct spdk_bdev_mgmt_channel *mgmt_ch = channel->shared_resource->mgmt_ch;
4994 
4995 	if (bdev != entry->bdev) {
4996 		SPDK_ERRLOG("bdevs do not match\n");
4997 		return -EINVAL;
4998 	}
4999 
5000 	if (mgmt_ch->per_thread_cache_count > 0) {
5001 		SPDK_ERRLOG("Cannot queue io_wait if spdk_bdev_io available in per-thread cache\n");
5002 		return -EINVAL;
5003 	}
5004 
5005 	TAILQ_INSERT_TAIL(&mgmt_ch->io_wait_queue, entry, link);
5006 	return 0;
5007 }
5008 
5009 static void
5010 bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
5011 {
5012 	struct spdk_bdev *bdev = bdev_ch->bdev;
5013 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
5014 	struct spdk_bdev_io *bdev_io;
5015 
5016 	if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
5017 		/*
5018 		 * Allow some more I/O to complete before retrying the nomem_io queue.
5019 		 *  Some drivers (such as nvme) cannot immediately take a new I/O in
5020 		 *  the context of a completion, because the resources for the I/O are
5021 		 *  not released until control returns to the bdev poller.  Also, we
5022 		 *  may require several small I/O to complete before a larger I/O
5023 		 *  (that requires splitting) can be submitted.
5024 		 */
5025 		return;
5026 	}
5027 
5028 	while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
5029 		bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
5030 		TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, internal.link);
5031 		bdev_io->internal.ch->io_outstanding++;
5032 		shared_resource->io_outstanding++;
5033 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
5034 		bdev_io->internal.error.nvme.cdw0 = 0;
5035 		bdev_io->num_retries++;
5036 		bdev->fn_table->submit_request(spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
5037 		if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
5038 			break;
5039 		}
5040 	}
5041 }
5042 
5043 static inline void
5044 bdev_io_complete(void *ctx)
5045 {
5046 	struct spdk_bdev_io *bdev_io = ctx;
5047 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
5048 	uint64_t tsc, tsc_diff;
5049 
5050 	if (spdk_unlikely(bdev_io->internal.in_submit_request || bdev_io->internal.io_submit_ch)) {
5051 		/*
5052 		 * Send the completion to the thread that originally submitted the I/O,
5053 		 * which may not be the current thread in the case of QoS.
5054 		 */
5055 		if (bdev_io->internal.io_submit_ch) {
5056 			bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
5057 			bdev_io->internal.io_submit_ch = NULL;
5058 		}
5059 
5060 		/*
5061 		 * Defer completion to avoid potential infinite recursion if the
5062 		 * user's completion callback issues a new I/O.
5063 		 */
5064 		spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
5065 				     bdev_io_complete, bdev_io);
5066 		return;
5067 	}
5068 
5069 	tsc = spdk_get_ticks();
5070 	tsc_diff = tsc - bdev_io->internal.submit_tsc;
5071 	spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, 0);
5072 
5073 	TAILQ_REMOVE(&bdev_ch->io_submitted, bdev_io, internal.ch_link);
5074 
5075 	if (bdev_io->internal.ch->histogram) {
5076 		spdk_histogram_data_tally(bdev_io->internal.ch->histogram, tsc_diff);
5077 	}
5078 
5079 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
5080 		switch (bdev_io->type) {
5081 		case SPDK_BDEV_IO_TYPE_READ:
5082 			bdev_io->internal.ch->stat.bytes_read += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5083 			bdev_io->internal.ch->stat.num_read_ops++;
5084 			bdev_io->internal.ch->stat.read_latency_ticks += tsc_diff;
5085 			break;
5086 		case SPDK_BDEV_IO_TYPE_WRITE:
5087 			bdev_io->internal.ch->stat.bytes_written += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5088 			bdev_io->internal.ch->stat.num_write_ops++;
5089 			bdev_io->internal.ch->stat.write_latency_ticks += tsc_diff;
5090 			break;
5091 		case SPDK_BDEV_IO_TYPE_UNMAP:
5092 			bdev_io->internal.ch->stat.bytes_unmapped += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5093 			bdev_io->internal.ch->stat.num_unmap_ops++;
5094 			bdev_io->internal.ch->stat.unmap_latency_ticks += tsc_diff;
5095 			break;
5096 		case SPDK_BDEV_IO_TYPE_ZCOPY:
5097 			/* Track the data in the start phase only */
5098 			if (bdev_io->u.bdev.zcopy.start) {
5099 				if (bdev_io->u.bdev.zcopy.populate) {
5100 					bdev_io->internal.ch->stat.bytes_read +=
5101 						bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5102 					bdev_io->internal.ch->stat.num_read_ops++;
5103 					bdev_io->internal.ch->stat.read_latency_ticks += tsc_diff;
5104 				} else {
5105 					bdev_io->internal.ch->stat.bytes_written +=
5106 						bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
5107 					bdev_io->internal.ch->stat.num_write_ops++;
5108 					bdev_io->internal.ch->stat.write_latency_ticks += tsc_diff;
5109 				}
5110 			}
5111 			break;
5112 		default:
5113 			break;
5114 		}
5115 	}
5116 
5117 #ifdef SPDK_CONFIG_VTUNE
5118 	uint64_t now_tsc = spdk_get_ticks();
5119 	if (now_tsc > (bdev_io->internal.ch->start_tsc + bdev_io->internal.ch->interval_tsc)) {
5120 		uint64_t data[5];
5121 
5122 		data[0] = bdev_io->internal.ch->stat.num_read_ops - bdev_io->internal.ch->prev_stat.num_read_ops;
5123 		data[1] = bdev_io->internal.ch->stat.bytes_read - bdev_io->internal.ch->prev_stat.bytes_read;
5124 		data[2] = bdev_io->internal.ch->stat.num_write_ops - bdev_io->internal.ch->prev_stat.num_write_ops;
5125 		data[3] = bdev_io->internal.ch->stat.bytes_written - bdev_io->internal.ch->prev_stat.bytes_written;
5126 		data[4] = bdev_io->bdev->fn_table->get_spin_time ?
5127 			  bdev_io->bdev->fn_table->get_spin_time(spdk_bdev_io_get_io_channel(bdev_io)) : 0;
5128 
5129 		__itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_io->internal.ch->handle,
5130 				   __itt_metadata_u64, 5, data);
5131 
5132 		bdev_io->internal.ch->prev_stat = bdev_io->internal.ch->stat;
5133 		bdev_io->internal.ch->start_tsc = now_tsc;
5134 	}
5135 #endif
5136 
5137 	assert(bdev_io->internal.cb != NULL);
5138 	assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
5139 
5140 	bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
5141 			     bdev_io->internal.caller_ctx);
5142 }
5143 
5144 static void
5145 bdev_reset_complete(struct spdk_io_channel_iter *i, int status)
5146 {
5147 	struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
5148 
5149 	if (bdev_io->u.reset.ch_ref != NULL) {
5150 		spdk_put_io_channel(bdev_io->u.reset.ch_ref);
5151 		bdev_io->u.reset.ch_ref = NULL;
5152 	}
5153 
5154 	bdev_io_complete(bdev_io);
5155 }
5156 
5157 static void
5158 bdev_unfreeze_channel(struct spdk_io_channel_iter *i)
5159 {
5160 	struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
5161 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
5162 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
5163 	struct spdk_bdev_io *queued_reset;
5164 
5165 	ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
5166 	while (!TAILQ_EMPTY(&ch->queued_resets)) {
5167 		queued_reset = TAILQ_FIRST(&ch->queued_resets);
5168 		TAILQ_REMOVE(&ch->queued_resets, queued_reset, internal.link);
5169 		spdk_bdev_io_complete(queued_reset, bdev_io->internal.status);
5170 	}
5171 
5172 	spdk_for_each_channel_continue(i, 0);
5173 }
5174 
5175 void
5176 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
5177 {
5178 	struct spdk_bdev *bdev = bdev_io->bdev;
5179 	struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
5180 	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
5181 
5182 	bdev_io->internal.status = status;
5183 
5184 	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
5185 		bool unlock_channels = false;
5186 
5187 		if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
5188 			SPDK_ERRLOG("NOMEM returned for reset\n");
5189 		}
5190 		pthread_mutex_lock(&bdev->internal.mutex);
5191 		if (bdev_io == bdev->internal.reset_in_progress) {
5192 			bdev->internal.reset_in_progress = NULL;
5193 			unlock_channels = true;
5194 		}
5195 		pthread_mutex_unlock(&bdev->internal.mutex);
5196 
5197 		if (unlock_channels) {
5198 			spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_unfreeze_channel,
5199 					      bdev_io, bdev_reset_complete);
5200 			return;
5201 		}
5202 	} else {
5203 		_bdev_io_unset_bounce_buf(bdev_io);
5204 
5205 		assert(bdev_ch->io_outstanding > 0);
5206 		assert(shared_resource->io_outstanding > 0);
5207 		bdev_ch->io_outstanding--;
5208 		shared_resource->io_outstanding--;
5209 
5210 		if (spdk_unlikely(status == SPDK_BDEV_IO_STATUS_NOMEM)) {
5211 			TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
5212 			/*
5213 			 * Wait for some of the outstanding I/O to complete before we
5214 			 *  retry any of the nomem_io.  Normally we will wait for
5215 			 *  NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
5216 			 *  depth channels we will instead wait for half to complete.
5217 			 */
5218 			shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
5219 							   (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
5220 			return;
5221 		}
5222 
5223 		if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
5224 			bdev_ch_retry_io(bdev_ch);
5225 		}
5226 	}
5227 
5228 	bdev_io_complete(bdev_io);
5229 }
5230 
5231 void
5232 spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
5233 				  enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
5234 {
5235 	if (sc == SPDK_SCSI_STATUS_GOOD) {
5236 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5237 	} else {
5238 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
5239 		bdev_io->internal.error.scsi.sc = sc;
5240 		bdev_io->internal.error.scsi.sk = sk;
5241 		bdev_io->internal.error.scsi.asc = asc;
5242 		bdev_io->internal.error.scsi.ascq = ascq;
5243 	}
5244 
5245 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
5246 }
5247 
5248 void
5249 spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
5250 			     int *sc, int *sk, int *asc, int *ascq)
5251 {
5252 	assert(sc != NULL);
5253 	assert(sk != NULL);
5254 	assert(asc != NULL);
5255 	assert(ascq != NULL);
5256 
5257 	switch (bdev_io->internal.status) {
5258 	case SPDK_BDEV_IO_STATUS_SUCCESS:
5259 		*sc = SPDK_SCSI_STATUS_GOOD;
5260 		*sk = SPDK_SCSI_SENSE_NO_SENSE;
5261 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
5262 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
5263 		break;
5264 	case SPDK_BDEV_IO_STATUS_NVME_ERROR:
5265 		spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
5266 		break;
5267 	case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
5268 		*sc = bdev_io->internal.error.scsi.sc;
5269 		*sk = bdev_io->internal.error.scsi.sk;
5270 		*asc = bdev_io->internal.error.scsi.asc;
5271 		*ascq = bdev_io->internal.error.scsi.ascq;
5272 		break;
5273 	default:
5274 		*sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
5275 		*sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
5276 		*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
5277 		*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
5278 		break;
5279 	}
5280 }
5281 
5282 void
5283 spdk_bdev_io_complete_aio_status(struct spdk_bdev_io *bdev_io, int aio_result)
5284 {
5285 	if (aio_result == 0) {
5286 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5287 	} else {
5288 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_AIO_ERROR;
5289 	}
5290 
5291 	bdev_io->internal.error.aio_result = aio_result;
5292 
5293 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
5294 }
5295 
5296 void
5297 spdk_bdev_io_get_aio_status(const struct spdk_bdev_io *bdev_io, int *aio_result)
5298 {
5299 	assert(aio_result != NULL);
5300 
5301 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_AIO_ERROR) {
5302 		*aio_result = bdev_io->internal.error.aio_result;
5303 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
5304 		*aio_result = 0;
5305 	} else {
5306 		*aio_result = -EIO;
5307 	}
5308 }
5309 
5310 void
5311 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
5312 {
5313 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
5314 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5315 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
5316 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
5317 	} else {
5318 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
5319 	}
5320 
5321 	bdev_io->internal.error.nvme.cdw0 = cdw0;
5322 	bdev_io->internal.error.nvme.sct = sct;
5323 	bdev_io->internal.error.nvme.sc = sc;
5324 
5325 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
5326 }
5327 
5328 void
5329 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
5330 {
5331 	assert(sct != NULL);
5332 	assert(sc != NULL);
5333 	assert(cdw0 != NULL);
5334 
5335 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
5336 		*sct = bdev_io->internal.error.nvme.sct;
5337 		*sc = bdev_io->internal.error.nvme.sc;
5338 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
5339 		*sct = SPDK_NVME_SCT_GENERIC;
5340 		*sc = SPDK_NVME_SC_SUCCESS;
5341 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
5342 		*sct = SPDK_NVME_SCT_GENERIC;
5343 		*sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5344 	} else {
5345 		*sct = SPDK_NVME_SCT_GENERIC;
5346 		*sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5347 	}
5348 
5349 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
5350 }
5351 
5352 void
5353 spdk_bdev_io_get_nvme_fused_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
5354 				   int *first_sct, int *first_sc, int *second_sct, int *second_sc)
5355 {
5356 	assert(first_sct != NULL);
5357 	assert(first_sc != NULL);
5358 	assert(second_sct != NULL);
5359 	assert(second_sc != NULL);
5360 	assert(cdw0 != NULL);
5361 
5362 	if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
5363 		if (bdev_io->internal.error.nvme.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
5364 		    bdev_io->internal.error.nvme.sc == SPDK_NVME_SC_COMPARE_FAILURE) {
5365 			*first_sct = bdev_io->internal.error.nvme.sct;
5366 			*first_sc = bdev_io->internal.error.nvme.sc;
5367 			*second_sct = SPDK_NVME_SCT_GENERIC;
5368 			*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
5369 		} else {
5370 			*first_sct = SPDK_NVME_SCT_GENERIC;
5371 			*first_sc = SPDK_NVME_SC_SUCCESS;
5372 			*second_sct = bdev_io->internal.error.nvme.sct;
5373 			*second_sc = bdev_io->internal.error.nvme.sc;
5374 		}
5375 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
5376 		*first_sct = SPDK_NVME_SCT_GENERIC;
5377 		*first_sc = SPDK_NVME_SC_SUCCESS;
5378 		*second_sct = SPDK_NVME_SCT_GENERIC;
5379 		*second_sc = SPDK_NVME_SC_SUCCESS;
5380 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED) {
5381 		*first_sct = SPDK_NVME_SCT_GENERIC;
5382 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5383 		*second_sct = SPDK_NVME_SCT_GENERIC;
5384 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
5385 	} else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_MISCOMPARE) {
5386 		*first_sct = SPDK_NVME_SCT_MEDIA_ERROR;
5387 		*first_sc = SPDK_NVME_SC_COMPARE_FAILURE;
5388 		*second_sct = SPDK_NVME_SCT_GENERIC;
5389 		*second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
5390 	} else {
5391 		*first_sct = SPDK_NVME_SCT_GENERIC;
5392 		*first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5393 		*second_sct = SPDK_NVME_SCT_GENERIC;
5394 		*second_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5395 	}
5396 
5397 	*cdw0 = bdev_io->internal.error.nvme.cdw0;
5398 }
5399 
5400 struct spdk_thread *
5401 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
5402 {
5403 	return spdk_io_channel_get_thread(bdev_io->internal.ch->channel);
5404 }
5405 
5406 struct spdk_io_channel *
5407 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
5408 {
5409 	return bdev_io->internal.ch->channel;
5410 }
5411 
5412 static int
5413 bdev_init(struct spdk_bdev *bdev)
5414 {
5415 	char *bdev_name;
5416 
5417 	assert(bdev->module != NULL);
5418 
5419 	if (!bdev->name) {
5420 		SPDK_ERRLOG("Bdev name is NULL\n");
5421 		return -EINVAL;
5422 	}
5423 
5424 	if (!strlen(bdev->name)) {
5425 		SPDK_ERRLOG("Bdev name must not be an empty string\n");
5426 		return -EINVAL;
5427 	}
5428 
5429 	if (spdk_bdev_get_by_name(bdev->name)) {
5430 		SPDK_ERRLOG("Bdev name:%s already exists\n", bdev->name);
5431 		return -EEXIST;
5432 	}
5433 
5434 	/* Users often register their own I/O devices using the bdev name. In
5435 	 * order to avoid conflicts, prepend bdev_. */
5436 	bdev_name = spdk_sprintf_alloc("bdev_%s", bdev->name);
5437 	if (!bdev_name) {
5438 		SPDK_ERRLOG("Unable to allocate memory for internal bdev name.\n");
5439 		return -ENOMEM;
5440 	}
5441 
5442 	bdev->internal.status = SPDK_BDEV_STATUS_READY;
5443 	bdev->internal.measured_queue_depth = UINT64_MAX;
5444 	bdev->internal.claim_module = NULL;
5445 	bdev->internal.qd_poller = NULL;
5446 	bdev->internal.qos = NULL;
5447 
5448 	/* If the user didn't specify a uuid, generate one. */
5449 	if (spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))) {
5450 		spdk_uuid_generate(&bdev->uuid);
5451 	}
5452 
5453 	if (spdk_bdev_get_buf_align(bdev) > 1) {
5454 		if (bdev->split_on_optimal_io_boundary) {
5455 			bdev->optimal_io_boundary = spdk_min(bdev->optimal_io_boundary,
5456 							     SPDK_BDEV_LARGE_BUF_MAX_SIZE / bdev->blocklen);
5457 		} else {
5458 			bdev->split_on_optimal_io_boundary = true;
5459 			bdev->optimal_io_boundary = SPDK_BDEV_LARGE_BUF_MAX_SIZE / bdev->blocklen;
5460 		}
5461 	}
5462 
5463 	/* If the user didn't specify a write unit size, set it to one. */
5464 	if (bdev->write_unit_size == 0) {
5465 		bdev->write_unit_size = 1;
5466 	}
5467 
5468 	/* Set ACWU value to 1 if bdev module did not set it (does not support it natively) */
5469 	if (bdev->acwu == 0) {
5470 		bdev->acwu = 1;
5471 	}
5472 
5473 	if (bdev->phys_blocklen == 0) {
5474 		bdev->phys_blocklen = spdk_bdev_get_data_block_size(bdev);
5475 	}
5476 
5477 	TAILQ_INIT(&bdev->internal.open_descs);
5478 	TAILQ_INIT(&bdev->internal.locked_ranges);
5479 	TAILQ_INIT(&bdev->internal.pending_locked_ranges);
5480 
5481 	TAILQ_INIT(&bdev->aliases);
5482 
5483 	bdev->internal.reset_in_progress = NULL;
5484 
5485 	spdk_io_device_register(__bdev_to_io_dev(bdev),
5486 				bdev_channel_create, bdev_channel_destroy,
5487 				sizeof(struct spdk_bdev_channel),
5488 				bdev_name);
5489 
5490 	free(bdev_name);
5491 
5492 	pthread_mutex_init(&bdev->internal.mutex, NULL);
5493 	return 0;
5494 }
5495 
5496 static void
5497 bdev_destroy_cb(void *io_device)
5498 {
5499 	int			rc;
5500 	struct spdk_bdev	*bdev;
5501 	spdk_bdev_unregister_cb	cb_fn;
5502 	void			*cb_arg;
5503 
5504 	bdev = __bdev_from_io_dev(io_device);
5505 	cb_fn = bdev->internal.unregister_cb;
5506 	cb_arg = bdev->internal.unregister_ctx;
5507 
5508 	pthread_mutex_destroy(&bdev->internal.mutex);
5509 	free(bdev->internal.qos);
5510 
5511 	rc = bdev->fn_table->destruct(bdev->ctxt);
5512 	if (rc < 0) {
5513 		SPDK_ERRLOG("destruct failed\n");
5514 	}
5515 	if (rc <= 0 && cb_fn != NULL) {
5516 		cb_fn(cb_arg, rc);
5517 	}
5518 }
5519 
5520 static void
5521 bdev_start_finished(void *arg)
5522 {
5523 	struct spdk_bdev *bdev = arg;
5524 
5525 	spdk_notify_send("bdev_register", spdk_bdev_get_name(bdev));
5526 }
5527 
5528 static void
5529 bdev_start(struct spdk_bdev *bdev)
5530 {
5531 	SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
5532 	TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
5533 
5534 	/* Examine configuration before initializing I/O */
5535 	bdev_examine(bdev);
5536 
5537 	spdk_bdev_wait_for_examine(bdev_start_finished, bdev);
5538 }
5539 
5540 int
5541 spdk_bdev_register(struct spdk_bdev *bdev)
5542 {
5543 	int rc = bdev_init(bdev);
5544 
5545 	if (rc == 0) {
5546 		bdev_start(bdev);
5547 	}
5548 
5549 	return rc;
5550 }
5551 
5552 void
5553 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
5554 {
5555 	if (bdev->internal.unregister_cb != NULL) {
5556 		bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
5557 	}
5558 }
5559 
5560 static void
5561 _remove_notify(void *arg)
5562 {
5563 	struct spdk_bdev_desc *desc = arg;
5564 
5565 	pthread_mutex_lock(&desc->mutex);
5566 	desc->refs--;
5567 
5568 	if (!desc->closed) {
5569 		pthread_mutex_unlock(&desc->mutex);
5570 		desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx);
5571 		return;
5572 	} else if (0 == desc->refs) {
5573 		/* This descriptor was closed after this remove_notify message was sent.
5574 		 * spdk_bdev_close() could not free the descriptor since this message was
5575 		 * in flight, so we free it now using bdev_desc_free().
5576 		 */
5577 		pthread_mutex_unlock(&desc->mutex);
5578 		bdev_desc_free(desc);
5579 		return;
5580 	}
5581 	pthread_mutex_unlock(&desc->mutex);
5582 }
5583 
5584 /* Must be called while holding bdev->internal.mutex.
5585  * returns: 0 - bdev removed and ready to be destructed.
5586  *          -EBUSY - bdev can't be destructed yet.  */
5587 static int
5588 bdev_unregister_unsafe(struct spdk_bdev *bdev)
5589 {
5590 	struct spdk_bdev_desc	*desc, *tmp;
5591 	int			rc = 0;
5592 
5593 	/* Notify each descriptor about hotremoval */
5594 	TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
5595 		rc = -EBUSY;
5596 		pthread_mutex_lock(&desc->mutex);
5597 		/*
5598 		 * Defer invocation of the event_cb to a separate message that will
5599 		 *  run later on its thread.  This ensures this context unwinds and
5600 		 *  we don't recursively unregister this bdev again if the event_cb
5601 		 *  immediately closes its descriptor.
5602 		 */
5603 		desc->refs++;
5604 		spdk_thread_send_msg(desc->thread, _remove_notify, desc);
5605 		pthread_mutex_unlock(&desc->mutex);
5606 	}
5607 
5608 	/* If there are no descriptors, proceed removing the bdev */
5609 	if (rc == 0) {
5610 		TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
5611 		SPDK_DEBUGLOG(bdev, "Removing bdev %s from list done\n", bdev->name);
5612 		spdk_notify_send("bdev_unregister", spdk_bdev_get_name(bdev));
5613 	}
5614 
5615 	return rc;
5616 }
5617 
5618 void
5619 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
5620 {
5621 	struct spdk_thread	*thread;
5622 	int			rc;
5623 
5624 	SPDK_DEBUGLOG(bdev, "Removing bdev %s from list\n", bdev->name);
5625 
5626 	thread = spdk_get_thread();
5627 	if (!thread) {
5628 		/* The user called this from a non-SPDK thread. */
5629 		if (cb_fn != NULL) {
5630 			cb_fn(cb_arg, -ENOTSUP);
5631 		}
5632 		return;
5633 	}
5634 
5635 	pthread_mutex_lock(&g_bdev_mgr.mutex);
5636 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
5637 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
5638 		if (cb_fn) {
5639 			cb_fn(cb_arg, -EBUSY);
5640 		}
5641 		return;
5642 	}
5643 
5644 	pthread_mutex_lock(&bdev->internal.mutex);
5645 	bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
5646 	bdev->internal.unregister_cb = cb_fn;
5647 	bdev->internal.unregister_ctx = cb_arg;
5648 
5649 	/* Call under lock. */
5650 	rc = bdev_unregister_unsafe(bdev);
5651 	pthread_mutex_unlock(&bdev->internal.mutex);
5652 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
5653 
5654 	if (rc == 0) {
5655 		spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
5656 	}
5657 }
5658 
5659 static int
5660 bdev_start_qos(struct spdk_bdev *bdev)
5661 {
5662 	struct set_qos_limit_ctx *ctx;
5663 
5664 	/* Enable QoS */
5665 	if (bdev->internal.qos && bdev->internal.qos->thread == NULL) {
5666 		ctx = calloc(1, sizeof(*ctx));
5667 		if (ctx == NULL) {
5668 			SPDK_ERRLOG("Failed to allocate memory for QoS context\n");
5669 			return -ENOMEM;
5670 		}
5671 		ctx->bdev = bdev;
5672 		spdk_for_each_channel(__bdev_to_io_dev(bdev),
5673 				      bdev_enable_qos_msg, ctx,
5674 				      bdev_enable_qos_done);
5675 	}
5676 
5677 	return 0;
5678 }
5679 
5680 static int
5681 bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
5682 {
5683 	struct spdk_thread *thread;
5684 	int rc = 0;
5685 
5686 	thread = spdk_get_thread();
5687 	if (!thread) {
5688 		SPDK_ERRLOG("Cannot open bdev from non-SPDK thread.\n");
5689 		return -ENOTSUP;
5690 	}
5691 
5692 	SPDK_DEBUGLOG(bdev, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
5693 		      spdk_get_thread());
5694 
5695 	desc->bdev = bdev;
5696 	desc->thread = thread;
5697 	desc->write = write;
5698 
5699 	pthread_mutex_lock(&bdev->internal.mutex);
5700 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
5701 		pthread_mutex_unlock(&bdev->internal.mutex);
5702 		return -ENODEV;
5703 	}
5704 
5705 	if (write && bdev->internal.claim_module) {
5706 		SPDK_ERRLOG("Could not open %s - %s module already claimed it\n",
5707 			    bdev->name, bdev->internal.claim_module->name);
5708 		pthread_mutex_unlock(&bdev->internal.mutex);
5709 		return -EPERM;
5710 	}
5711 
5712 	rc = bdev_start_qos(bdev);
5713 	if (rc != 0) {
5714 		SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
5715 		pthread_mutex_unlock(&bdev->internal.mutex);
5716 		return rc;
5717 	}
5718 
5719 	TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
5720 
5721 	pthread_mutex_unlock(&bdev->internal.mutex);
5722 
5723 	return 0;
5724 }
5725 
5726 int
5727 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
5728 		   void *event_ctx, struct spdk_bdev_desc **_desc)
5729 {
5730 	struct spdk_bdev_desc *desc;
5731 	struct spdk_bdev *bdev;
5732 	unsigned int event_id;
5733 	int rc;
5734 
5735 	if (event_cb == NULL) {
5736 		SPDK_ERRLOG("Missing event callback function\n");
5737 		return -EINVAL;
5738 	}
5739 
5740 	pthread_mutex_lock(&g_bdev_mgr.mutex);
5741 
5742 	bdev = spdk_bdev_get_by_name(bdev_name);
5743 
5744 	if (bdev == NULL) {
5745 		SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
5746 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
5747 		return -ENODEV;
5748 	}
5749 
5750 	desc = calloc(1, sizeof(*desc));
5751 	if (desc == NULL) {
5752 		SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
5753 		pthread_mutex_unlock(&g_bdev_mgr.mutex);
5754 		return -ENOMEM;
5755 	}
5756 
5757 	TAILQ_INIT(&desc->pending_media_events);
5758 	TAILQ_INIT(&desc->free_media_events);
5759 
5760 	desc->callback.event_fn = event_cb;
5761 	desc->callback.ctx = event_ctx;
5762 	pthread_mutex_init(&desc->mutex, NULL);
5763 
5764 	if (bdev->media_events) {
5765 		desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
5766 						   sizeof(*desc->media_events_buffer));
5767 		if (desc->media_events_buffer == NULL) {
5768 			SPDK_ERRLOG("Failed to initialize media event pool\n");
5769 			bdev_desc_free(desc);
5770 			pthread_mutex_unlock(&g_bdev_mgr.mutex);
5771 			return -ENOMEM;
5772 		}
5773 
5774 		for (event_id = 0; event_id < MEDIA_EVENT_POOL_SIZE; ++event_id) {
5775 			TAILQ_INSERT_TAIL(&desc->free_media_events,
5776 					  &desc->media_events_buffer[event_id], tailq);
5777 		}
5778 	}
5779 
5780 	rc = bdev_open(bdev, write, desc);
5781 	if (rc != 0) {
5782 		bdev_desc_free(desc);
5783 		desc = NULL;
5784 	}
5785 
5786 	*_desc = desc;
5787 
5788 	pthread_mutex_unlock(&g_bdev_mgr.mutex);
5789 
5790 	return rc;
5791 }
5792 
5793 void
5794 spdk_bdev_close(struct spdk_bdev_desc *desc)
5795 {
5796 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5797 	int rc;
5798 
5799 	SPDK_DEBUGLOG(bdev, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
5800 		      spdk_get_thread());
5801 
5802 	assert(desc->thread == spdk_get_thread());
5803 
5804 	spdk_poller_unregister(&desc->io_timeout_poller);
5805 
5806 	pthread_mutex_lock(&bdev->internal.mutex);
5807 	pthread_mutex_lock(&desc->mutex);
5808 
5809 	TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
5810 
5811 	desc->closed = true;
5812 
5813 	if (0 == desc->refs) {
5814 		pthread_mutex_unlock(&desc->mutex);
5815 		bdev_desc_free(desc);
5816 	} else {
5817 		pthread_mutex_unlock(&desc->mutex);
5818 	}
5819 
5820 	/* If no more descriptors, kill QoS channel */
5821 	if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
5822 		SPDK_DEBUGLOG(bdev, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
5823 			      bdev->name, spdk_get_thread());
5824 
5825 		if (bdev_qos_destroy(bdev)) {
5826 			/* There isn't anything we can do to recover here. Just let the
5827 			 * old QoS poller keep running. The QoS handling won't change
5828 			 * cores when the user allocates a new channel, but it won't break. */
5829 			SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
5830 		}
5831 	}
5832 
5833 	spdk_bdev_set_qd_sampling_period(bdev, 0);
5834 
5835 	if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
5836 		rc = bdev_unregister_unsafe(bdev);
5837 		pthread_mutex_unlock(&bdev->internal.mutex);
5838 
5839 		if (rc == 0) {
5840 			spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
5841 		}
5842 	} else {
5843 		pthread_mutex_unlock(&bdev->internal.mutex);
5844 	}
5845 }
5846 
5847 int
5848 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
5849 			    struct spdk_bdev_module *module)
5850 {
5851 	if (bdev->internal.claim_module != NULL) {
5852 		SPDK_ERRLOG("bdev %s already claimed by module %s\n", bdev->name,
5853 			    bdev->internal.claim_module->name);
5854 		return -EPERM;
5855 	}
5856 
5857 	if (desc && !desc->write) {
5858 		desc->write = true;
5859 	}
5860 
5861 	bdev->internal.claim_module = module;
5862 	return 0;
5863 }
5864 
5865 void
5866 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
5867 {
5868 	assert(bdev->internal.claim_module != NULL);
5869 	bdev->internal.claim_module = NULL;
5870 }
5871 
5872 struct spdk_bdev *
5873 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
5874 {
5875 	assert(desc != NULL);
5876 	return desc->bdev;
5877 }
5878 
5879 void
5880 spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
5881 {
5882 	struct iovec *iovs;
5883 	int iovcnt;
5884 
5885 	if (bdev_io == NULL) {
5886 		return;
5887 	}
5888 
5889 	switch (bdev_io->type) {
5890 	case SPDK_BDEV_IO_TYPE_READ:
5891 	case SPDK_BDEV_IO_TYPE_WRITE:
5892 	case SPDK_BDEV_IO_TYPE_ZCOPY:
5893 		iovs = bdev_io->u.bdev.iovs;
5894 		iovcnt = bdev_io->u.bdev.iovcnt;
5895 		break;
5896 	default:
5897 		iovs = NULL;
5898 		iovcnt = 0;
5899 		break;
5900 	}
5901 
5902 	if (iovp) {
5903 		*iovp = iovs;
5904 	}
5905 	if (iovcntp) {
5906 		*iovcntp = iovcnt;
5907 	}
5908 }
5909 
5910 void *
5911 spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
5912 {
5913 	if (bdev_io == NULL) {
5914 		return NULL;
5915 	}
5916 
5917 	if (!spdk_bdev_is_md_separate(bdev_io->bdev)) {
5918 		return NULL;
5919 	}
5920 
5921 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
5922 	    bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
5923 		return bdev_io->u.bdev.md_buf;
5924 	}
5925 
5926 	return NULL;
5927 }
5928 
5929 void *
5930 spdk_bdev_io_get_cb_arg(struct spdk_bdev_io *bdev_io)
5931 {
5932 	if (bdev_io == NULL) {
5933 		assert(false);
5934 		return NULL;
5935 	}
5936 
5937 	return bdev_io->internal.caller_ctx;
5938 }
5939 
5940 void
5941 spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
5942 {
5943 
5944 	if (spdk_bdev_module_list_find(bdev_module->name)) {
5945 		SPDK_ERRLOG("ERROR: module '%s' already registered.\n", bdev_module->name);
5946 		assert(false);
5947 	}
5948 
5949 	/*
5950 	 * Modules with examine callbacks must be initialized first, so they are
5951 	 *  ready to handle examine callbacks from later modules that will
5952 	 *  register physical bdevs.
5953 	 */
5954 	if (bdev_module->examine_config != NULL || bdev_module->examine_disk != NULL) {
5955 		TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
5956 	} else {
5957 		TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
5958 	}
5959 }
5960 
5961 struct spdk_bdev_module *
5962 spdk_bdev_module_list_find(const char *name)
5963 {
5964 	struct spdk_bdev_module *bdev_module;
5965 
5966 	TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
5967 		if (strcmp(name, bdev_module->name) == 0) {
5968 			break;
5969 		}
5970 	}
5971 
5972 	return bdev_module;
5973 }
5974 
5975 static void
5976 bdev_write_zero_buffer_next(void *_bdev_io)
5977 {
5978 	struct spdk_bdev_io *bdev_io = _bdev_io;
5979 	uint64_t num_bytes, num_blocks;
5980 	void *md_buf = NULL;
5981 	int rc;
5982 
5983 	num_bytes = spdk_min(_bdev_get_block_size_with_md(bdev_io->bdev) *
5984 			     bdev_io->u.bdev.split_remaining_num_blocks,
5985 			     ZERO_BUFFER_SIZE);
5986 	num_blocks = num_bytes / _bdev_get_block_size_with_md(bdev_io->bdev);
5987 
5988 	if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
5989 		md_buf = (char *)g_bdev_mgr.zero_buffer +
5990 			 spdk_bdev_get_block_size(bdev_io->bdev) * num_blocks;
5991 	}
5992 
5993 	rc = bdev_write_blocks_with_md(bdev_io->internal.desc,
5994 				       spdk_io_channel_from_ctx(bdev_io->internal.ch),
5995 				       g_bdev_mgr.zero_buffer, md_buf,
5996 				       bdev_io->u.bdev.split_current_offset_blocks, num_blocks,
5997 				       bdev_write_zero_buffer_done, bdev_io);
5998 	if (rc == 0) {
5999 		bdev_io->u.bdev.split_remaining_num_blocks -= num_blocks;
6000 		bdev_io->u.bdev.split_current_offset_blocks += num_blocks;
6001 	} else if (rc == -ENOMEM) {
6002 		bdev_queue_io_wait_with_cb(bdev_io, bdev_write_zero_buffer_next);
6003 	} else {
6004 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6005 		bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6006 	}
6007 }
6008 
6009 static void
6010 bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6011 {
6012 	struct spdk_bdev_io *parent_io = cb_arg;
6013 
6014 	spdk_bdev_free_io(bdev_io);
6015 
6016 	if (!success) {
6017 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6018 		parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
6019 		return;
6020 	}
6021 
6022 	if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
6023 		parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6024 		parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
6025 		return;
6026 	}
6027 
6028 	bdev_write_zero_buffer_next(parent_io);
6029 }
6030 
6031 static void
6032 bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
6033 {
6034 	pthread_mutex_lock(&ctx->bdev->internal.mutex);
6035 	ctx->bdev->internal.qos_mod_in_progress = false;
6036 	pthread_mutex_unlock(&ctx->bdev->internal.mutex);
6037 
6038 	if (ctx->cb_fn) {
6039 		ctx->cb_fn(ctx->cb_arg, status);
6040 	}
6041 	free(ctx);
6042 }
6043 
6044 static void
6045 bdev_disable_qos_done(void *cb_arg)
6046 {
6047 	struct set_qos_limit_ctx *ctx = cb_arg;
6048 	struct spdk_bdev *bdev = ctx->bdev;
6049 	struct spdk_bdev_io *bdev_io;
6050 	struct spdk_bdev_qos *qos;
6051 
6052 	pthread_mutex_lock(&bdev->internal.mutex);
6053 	qos = bdev->internal.qos;
6054 	bdev->internal.qos = NULL;
6055 	pthread_mutex_unlock(&bdev->internal.mutex);
6056 
6057 	while (!TAILQ_EMPTY(&qos->queued)) {
6058 		/* Send queued I/O back to their original thread for resubmission. */
6059 		bdev_io = TAILQ_FIRST(&qos->queued);
6060 		TAILQ_REMOVE(&qos->queued, bdev_io, internal.link);
6061 
6062 		if (bdev_io->internal.io_submit_ch) {
6063 			/*
6064 			 * Channel was changed when sending it to the QoS thread - change it back
6065 			 *  before sending it back to the original thread.
6066 			 */
6067 			bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
6068 			bdev_io->internal.io_submit_ch = NULL;
6069 		}
6070 
6071 		spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
6072 				     _bdev_io_submit, bdev_io);
6073 	}
6074 
6075 	if (qos->thread != NULL) {
6076 		spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
6077 		spdk_poller_unregister(&qos->poller);
6078 	}
6079 
6080 	free(qos);
6081 
6082 	bdev_set_qos_limit_done(ctx, 0);
6083 }
6084 
6085 static void
6086 bdev_disable_qos_msg_done(struct spdk_io_channel_iter *i, int status)
6087 {
6088 	void *io_device = spdk_io_channel_iter_get_io_device(i);
6089 	struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
6090 	struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6091 	struct spdk_thread *thread;
6092 
6093 	pthread_mutex_lock(&bdev->internal.mutex);
6094 	thread = bdev->internal.qos->thread;
6095 	pthread_mutex_unlock(&bdev->internal.mutex);
6096 
6097 	if (thread != NULL) {
6098 		spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
6099 	} else {
6100 		bdev_disable_qos_done(ctx);
6101 	}
6102 }
6103 
6104 static void
6105 bdev_disable_qos_msg(struct spdk_io_channel_iter *i)
6106 {
6107 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
6108 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch);
6109 
6110 	bdev_ch->flags &= ~BDEV_CH_QOS_ENABLED;
6111 
6112 	spdk_for_each_channel_continue(i, 0);
6113 }
6114 
6115 static void
6116 bdev_update_qos_rate_limit_msg(void *cb_arg)
6117 {
6118 	struct set_qos_limit_ctx *ctx = cb_arg;
6119 	struct spdk_bdev *bdev = ctx->bdev;
6120 
6121 	pthread_mutex_lock(&bdev->internal.mutex);
6122 	bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
6123 	pthread_mutex_unlock(&bdev->internal.mutex);
6124 
6125 	bdev_set_qos_limit_done(ctx, 0);
6126 }
6127 
6128 static void
6129 bdev_enable_qos_msg(struct spdk_io_channel_iter *i)
6130 {
6131 	void *io_device = spdk_io_channel_iter_get_io_device(i);
6132 	struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
6133 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
6134 	struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch);
6135 
6136 	pthread_mutex_lock(&bdev->internal.mutex);
6137 	bdev_enable_qos(bdev, bdev_ch);
6138 	pthread_mutex_unlock(&bdev->internal.mutex);
6139 	spdk_for_each_channel_continue(i, 0);
6140 }
6141 
6142 static void
6143 bdev_enable_qos_done(struct spdk_io_channel_iter *i, int status)
6144 {
6145 	struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6146 
6147 	bdev_set_qos_limit_done(ctx, status);
6148 }
6149 
6150 static void
6151 bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
6152 {
6153 	int i;
6154 
6155 	assert(bdev->internal.qos != NULL);
6156 
6157 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
6158 		if (limits[i] != SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
6159 			bdev->internal.qos->rate_limits[i].limit = limits[i];
6160 
6161 			if (limits[i] == 0) {
6162 				bdev->internal.qos->rate_limits[i].limit =
6163 					SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
6164 			}
6165 		}
6166 	}
6167 }
6168 
6169 void
6170 spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
6171 			      void (*cb_fn)(void *cb_arg, int status), void *cb_arg)
6172 {
6173 	struct set_qos_limit_ctx	*ctx;
6174 	uint32_t			limit_set_complement;
6175 	uint64_t			min_limit_per_sec;
6176 	int				i;
6177 	bool				disable_rate_limit = true;
6178 
6179 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
6180 		if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
6181 			continue;
6182 		}
6183 
6184 		if (limits[i] > 0) {
6185 			disable_rate_limit = false;
6186 		}
6187 
6188 		if (bdev_qos_is_iops_rate_limit(i) == true) {
6189 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_IOS_PER_SEC;
6190 		} else {
6191 			/* Change from megabyte to byte rate limit */
6192 			limits[i] = limits[i] * 1024 * 1024;
6193 			min_limit_per_sec = SPDK_BDEV_QOS_MIN_BYTES_PER_SEC;
6194 		}
6195 
6196 		limit_set_complement = limits[i] % min_limit_per_sec;
6197 		if (limit_set_complement) {
6198 			SPDK_ERRLOG("Requested rate limit %" PRIu64 " is not a multiple of %" PRIu64 "\n",
6199 				    limits[i], min_limit_per_sec);
6200 			limits[i] += min_limit_per_sec - limit_set_complement;
6201 			SPDK_ERRLOG("Round up the rate limit to %" PRIu64 "\n", limits[i]);
6202 		}
6203 	}
6204 
6205 	ctx = calloc(1, sizeof(*ctx));
6206 	if (ctx == NULL) {
6207 		cb_fn(cb_arg, -ENOMEM);
6208 		return;
6209 	}
6210 
6211 	ctx->cb_fn = cb_fn;
6212 	ctx->cb_arg = cb_arg;
6213 	ctx->bdev = bdev;
6214 
6215 	pthread_mutex_lock(&bdev->internal.mutex);
6216 	if (bdev->internal.qos_mod_in_progress) {
6217 		pthread_mutex_unlock(&bdev->internal.mutex);
6218 		free(ctx);
6219 		cb_fn(cb_arg, -EAGAIN);
6220 		return;
6221 	}
6222 	bdev->internal.qos_mod_in_progress = true;
6223 
6224 	if (disable_rate_limit == true && bdev->internal.qos) {
6225 		for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
6226 			if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED &&
6227 			    (bdev->internal.qos->rate_limits[i].limit > 0 &&
6228 			     bdev->internal.qos->rate_limits[i].limit !=
6229 			     SPDK_BDEV_QOS_LIMIT_NOT_DEFINED)) {
6230 				disable_rate_limit = false;
6231 				break;
6232 			}
6233 		}
6234 	}
6235 
6236 	if (disable_rate_limit == false) {
6237 		if (bdev->internal.qos == NULL) {
6238 			bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
6239 			if (!bdev->internal.qos) {
6240 				pthread_mutex_unlock(&bdev->internal.mutex);
6241 				SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
6242 				bdev_set_qos_limit_done(ctx, -ENOMEM);
6243 				return;
6244 			}
6245 		}
6246 
6247 		if (bdev->internal.qos->thread == NULL) {
6248 			/* Enabling */
6249 			bdev_set_qos_rate_limits(bdev, limits);
6250 
6251 			spdk_for_each_channel(__bdev_to_io_dev(bdev),
6252 					      bdev_enable_qos_msg, ctx,
6253 					      bdev_enable_qos_done);
6254 		} else {
6255 			/* Updating */
6256 			bdev_set_qos_rate_limits(bdev, limits);
6257 
6258 			spdk_thread_send_msg(bdev->internal.qos->thread,
6259 					     bdev_update_qos_rate_limit_msg, ctx);
6260 		}
6261 	} else {
6262 		if (bdev->internal.qos != NULL) {
6263 			bdev_set_qos_rate_limits(bdev, limits);
6264 
6265 			/* Disabling */
6266 			spdk_for_each_channel(__bdev_to_io_dev(bdev),
6267 					      bdev_disable_qos_msg, ctx,
6268 					      bdev_disable_qos_msg_done);
6269 		} else {
6270 			pthread_mutex_unlock(&bdev->internal.mutex);
6271 			bdev_set_qos_limit_done(ctx, 0);
6272 			return;
6273 		}
6274 	}
6275 
6276 	pthread_mutex_unlock(&bdev->internal.mutex);
6277 }
6278 
6279 struct spdk_bdev_histogram_ctx {
6280 	spdk_bdev_histogram_status_cb cb_fn;
6281 	void *cb_arg;
6282 	struct spdk_bdev *bdev;
6283 	int status;
6284 };
6285 
6286 static void
6287 bdev_histogram_disable_channel_cb(struct spdk_io_channel_iter *i, int status)
6288 {
6289 	struct spdk_bdev_histogram_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6290 
6291 	pthread_mutex_lock(&ctx->bdev->internal.mutex);
6292 	ctx->bdev->internal.histogram_in_progress = false;
6293 	pthread_mutex_unlock(&ctx->bdev->internal.mutex);
6294 	ctx->cb_fn(ctx->cb_arg, ctx->status);
6295 	free(ctx);
6296 }
6297 
6298 static void
6299 bdev_histogram_disable_channel(struct spdk_io_channel_iter *i)
6300 {
6301 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6302 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6303 
6304 	if (ch->histogram != NULL) {
6305 		spdk_histogram_data_free(ch->histogram);
6306 		ch->histogram = NULL;
6307 	}
6308 	spdk_for_each_channel_continue(i, 0);
6309 }
6310 
6311 static void
6312 bdev_histogram_enable_channel_cb(struct spdk_io_channel_iter *i, int status)
6313 {
6314 	struct spdk_bdev_histogram_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6315 
6316 	if (status != 0) {
6317 		ctx->status = status;
6318 		ctx->bdev->internal.histogram_enabled = false;
6319 		spdk_for_each_channel(__bdev_to_io_dev(ctx->bdev), bdev_histogram_disable_channel, ctx,
6320 				      bdev_histogram_disable_channel_cb);
6321 	} else {
6322 		pthread_mutex_lock(&ctx->bdev->internal.mutex);
6323 		ctx->bdev->internal.histogram_in_progress = false;
6324 		pthread_mutex_unlock(&ctx->bdev->internal.mutex);
6325 		ctx->cb_fn(ctx->cb_arg, ctx->status);
6326 		free(ctx);
6327 	}
6328 }
6329 
6330 static void
6331 bdev_histogram_enable_channel(struct spdk_io_channel_iter *i)
6332 {
6333 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6334 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6335 	int status = 0;
6336 
6337 	if (ch->histogram == NULL) {
6338 		ch->histogram = spdk_histogram_data_alloc();
6339 		if (ch->histogram == NULL) {
6340 			status = -ENOMEM;
6341 		}
6342 	}
6343 
6344 	spdk_for_each_channel_continue(i, status);
6345 }
6346 
6347 void
6348 spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
6349 			   void *cb_arg, bool enable)
6350 {
6351 	struct spdk_bdev_histogram_ctx *ctx;
6352 
6353 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_ctx));
6354 	if (ctx == NULL) {
6355 		cb_fn(cb_arg, -ENOMEM);
6356 		return;
6357 	}
6358 
6359 	ctx->bdev = bdev;
6360 	ctx->status = 0;
6361 	ctx->cb_fn = cb_fn;
6362 	ctx->cb_arg = cb_arg;
6363 
6364 	pthread_mutex_lock(&bdev->internal.mutex);
6365 	if (bdev->internal.histogram_in_progress) {
6366 		pthread_mutex_unlock(&bdev->internal.mutex);
6367 		free(ctx);
6368 		cb_fn(cb_arg, -EAGAIN);
6369 		return;
6370 	}
6371 
6372 	bdev->internal.histogram_in_progress = true;
6373 	pthread_mutex_unlock(&bdev->internal.mutex);
6374 
6375 	bdev->internal.histogram_enabled = enable;
6376 
6377 	if (enable) {
6378 		/* Allocate histogram for each channel */
6379 		spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_enable_channel, ctx,
6380 				      bdev_histogram_enable_channel_cb);
6381 	} else {
6382 		spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_disable_channel, ctx,
6383 				      bdev_histogram_disable_channel_cb);
6384 	}
6385 }
6386 
6387 struct spdk_bdev_histogram_data_ctx {
6388 	spdk_bdev_histogram_data_cb cb_fn;
6389 	void *cb_arg;
6390 	struct spdk_bdev *bdev;
6391 	/** merged histogram data from all channels */
6392 	struct spdk_histogram_data	*histogram;
6393 };
6394 
6395 static void
6396 bdev_histogram_get_channel_cb(struct spdk_io_channel_iter *i, int status)
6397 {
6398 	struct spdk_bdev_histogram_data_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6399 
6400 	ctx->cb_fn(ctx->cb_arg, status, ctx->histogram);
6401 	free(ctx);
6402 }
6403 
6404 static void
6405 bdev_histogram_get_channel(struct spdk_io_channel_iter *i)
6406 {
6407 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6408 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6409 	struct spdk_bdev_histogram_data_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6410 	int status = 0;
6411 
6412 	if (ch->histogram == NULL) {
6413 		status = -EFAULT;
6414 	} else {
6415 		spdk_histogram_data_merge(ctx->histogram, ch->histogram);
6416 	}
6417 
6418 	spdk_for_each_channel_continue(i, status);
6419 }
6420 
6421 void
6422 spdk_bdev_histogram_get(struct spdk_bdev *bdev, struct spdk_histogram_data *histogram,
6423 			spdk_bdev_histogram_data_cb cb_fn,
6424 			void *cb_arg)
6425 {
6426 	struct spdk_bdev_histogram_data_ctx *ctx;
6427 
6428 	ctx = calloc(1, sizeof(struct spdk_bdev_histogram_data_ctx));
6429 	if (ctx == NULL) {
6430 		cb_fn(cb_arg, -ENOMEM, NULL);
6431 		return;
6432 	}
6433 
6434 	ctx->bdev = bdev;
6435 	ctx->cb_fn = cb_fn;
6436 	ctx->cb_arg = cb_arg;
6437 
6438 	ctx->histogram = histogram;
6439 
6440 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_histogram_get_channel, ctx,
6441 			      bdev_histogram_get_channel_cb);
6442 }
6443 
6444 size_t
6445 spdk_bdev_get_media_events(struct spdk_bdev_desc *desc, struct spdk_bdev_media_event *events,
6446 			   size_t max_events)
6447 {
6448 	struct media_event_entry *entry;
6449 	size_t num_events = 0;
6450 
6451 	for (; num_events < max_events; ++num_events) {
6452 		entry = TAILQ_FIRST(&desc->pending_media_events);
6453 		if (entry == NULL) {
6454 			break;
6455 		}
6456 
6457 		events[num_events] = entry->event;
6458 		TAILQ_REMOVE(&desc->pending_media_events, entry, tailq);
6459 		TAILQ_INSERT_TAIL(&desc->free_media_events, entry, tailq);
6460 	}
6461 
6462 	return num_events;
6463 }
6464 
6465 int
6466 spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media_event *events,
6467 			    size_t num_events)
6468 {
6469 	struct spdk_bdev_desc *desc;
6470 	struct media_event_entry *entry;
6471 	size_t event_id;
6472 	int rc = 0;
6473 
6474 	assert(bdev->media_events);
6475 
6476 	pthread_mutex_lock(&bdev->internal.mutex);
6477 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
6478 		if (desc->write) {
6479 			break;
6480 		}
6481 	}
6482 
6483 	if (desc == NULL || desc->media_events_buffer == NULL) {
6484 		rc = -ENODEV;
6485 		goto out;
6486 	}
6487 
6488 	for (event_id = 0; event_id < num_events; ++event_id) {
6489 		entry = TAILQ_FIRST(&desc->free_media_events);
6490 		if (entry == NULL) {
6491 			break;
6492 		}
6493 
6494 		TAILQ_REMOVE(&desc->free_media_events, entry, tailq);
6495 		TAILQ_INSERT_TAIL(&desc->pending_media_events, entry, tailq);
6496 		entry->event = events[event_id];
6497 	}
6498 
6499 	rc = event_id;
6500 out:
6501 	pthread_mutex_unlock(&bdev->internal.mutex);
6502 	return rc;
6503 }
6504 
6505 void
6506 spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
6507 {
6508 	struct spdk_bdev_desc *desc;
6509 
6510 	pthread_mutex_lock(&bdev->internal.mutex);
6511 	TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
6512 		if (!TAILQ_EMPTY(&desc->pending_media_events)) {
6513 			desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev,
6514 						desc->callback.ctx);
6515 		}
6516 	}
6517 	pthread_mutex_unlock(&bdev->internal.mutex);
6518 }
6519 
6520 struct locked_lba_range_ctx {
6521 	struct lba_range		range;
6522 	struct spdk_bdev		*bdev;
6523 	struct lba_range		*current_range;
6524 	struct lba_range		*owner_range;
6525 	struct spdk_poller		*poller;
6526 	lock_range_cb			cb_fn;
6527 	void				*cb_arg;
6528 };
6529 
6530 static void
6531 bdev_lock_error_cleanup_cb(struct spdk_io_channel_iter *i, int status)
6532 {
6533 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6534 
6535 	ctx->cb_fn(ctx->cb_arg, -ENOMEM);
6536 	free(ctx);
6537 }
6538 
6539 static void
6540 bdev_unlock_lba_range_get_channel(struct spdk_io_channel_iter *i);
6541 
6542 static void
6543 bdev_lock_lba_range_cb(struct spdk_io_channel_iter *i, int status)
6544 {
6545 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6546 	struct spdk_bdev *bdev = ctx->bdev;
6547 
6548 	if (status == -ENOMEM) {
6549 		/* One of the channels could not allocate a range object.
6550 		 * So we have to go back and clean up any ranges that were
6551 		 * allocated successfully before we return error status to
6552 		 * the caller.  We can reuse the unlock function to do that
6553 		 * clean up.
6554 		 */
6555 		spdk_for_each_channel(__bdev_to_io_dev(bdev),
6556 				      bdev_unlock_lba_range_get_channel, ctx,
6557 				      bdev_lock_error_cleanup_cb);
6558 		return;
6559 	}
6560 
6561 	/* All channels have locked this range and no I/O overlapping the range
6562 	 * are outstanding!  Set the owner_ch for the range object for the
6563 	 * locking channel, so that this channel will know that it is allowed
6564 	 * to write to this range.
6565 	 */
6566 	ctx->owner_range->owner_ch = ctx->range.owner_ch;
6567 	ctx->cb_fn(ctx->cb_arg, status);
6568 
6569 	/* Don't free the ctx here.  Its range is in the bdev's global list of
6570 	 * locked ranges still, and will be removed and freed when this range
6571 	 * is later unlocked.
6572 	 */
6573 }
6574 
6575 static int
6576 bdev_lock_lba_range_check_io(void *_i)
6577 {
6578 	struct spdk_io_channel_iter *i = _i;
6579 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6580 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6581 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6582 	struct lba_range *range = ctx->current_range;
6583 	struct spdk_bdev_io *bdev_io;
6584 
6585 	spdk_poller_unregister(&ctx->poller);
6586 
6587 	/* The range is now in the locked_ranges, so no new IO can be submitted to this
6588 	 * range.  But we need to wait until any outstanding IO overlapping with this range
6589 	 * are completed.
6590 	 */
6591 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
6592 		if (bdev_io_range_is_locked(bdev_io, range)) {
6593 			ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
6594 			return SPDK_POLLER_BUSY;
6595 		}
6596 	}
6597 
6598 	spdk_for_each_channel_continue(i, 0);
6599 	return SPDK_POLLER_BUSY;
6600 }
6601 
6602 static void
6603 bdev_lock_lba_range_get_channel(struct spdk_io_channel_iter *i)
6604 {
6605 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6606 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6607 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6608 	struct lba_range *range;
6609 
6610 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
6611 		if (range->length == ctx->range.length &&
6612 		    range->offset == ctx->range.offset &&
6613 		    range->locked_ctx == ctx->range.locked_ctx) {
6614 			/* This range already exists on this channel, so don't add
6615 			 * it again.  This can happen when a new channel is created
6616 			 * while the for_each_channel operation is in progress.
6617 			 * Do not check for outstanding I/O in that case, since the
6618 			 * range was locked before any I/O could be submitted to the
6619 			 * new channel.
6620 			 */
6621 			spdk_for_each_channel_continue(i, 0);
6622 			return;
6623 		}
6624 	}
6625 
6626 	range = calloc(1, sizeof(*range));
6627 	if (range == NULL) {
6628 		spdk_for_each_channel_continue(i, -ENOMEM);
6629 		return;
6630 	}
6631 
6632 	range->length = ctx->range.length;
6633 	range->offset = ctx->range.offset;
6634 	range->locked_ctx = ctx->range.locked_ctx;
6635 	ctx->current_range = range;
6636 	if (ctx->range.owner_ch == ch) {
6637 		/* This is the range object for the channel that will hold
6638 		 * the lock.  Store it in the ctx object so that we can easily
6639 		 * set its owner_ch after the lock is finally acquired.
6640 		 */
6641 		ctx->owner_range = range;
6642 	}
6643 	TAILQ_INSERT_TAIL(&ch->locked_ranges, range, tailq);
6644 	bdev_lock_lba_range_check_io(i);
6645 }
6646 
6647 static void
6648 bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx)
6649 {
6650 	assert(spdk_get_thread() == ctx->range.owner_ch->channel->thread);
6651 
6652 	/* We will add a copy of this range to each channel now. */
6653 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_lock_lba_range_get_channel, ctx,
6654 			      bdev_lock_lba_range_cb);
6655 }
6656 
6657 static bool
6658 bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq)
6659 {
6660 	struct lba_range *r;
6661 
6662 	TAILQ_FOREACH(r, tailq, tailq) {
6663 		if (bdev_lba_range_overlapped(range, r)) {
6664 			return true;
6665 		}
6666 	}
6667 	return false;
6668 }
6669 
6670 static int
6671 bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
6672 		    uint64_t offset, uint64_t length,
6673 		    lock_range_cb cb_fn, void *cb_arg)
6674 {
6675 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6676 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6677 	struct locked_lba_range_ctx *ctx;
6678 
6679 	if (cb_arg == NULL) {
6680 		SPDK_ERRLOG("cb_arg must not be NULL\n");
6681 		return -EINVAL;
6682 	}
6683 
6684 	ctx = calloc(1, sizeof(*ctx));
6685 	if (ctx == NULL) {
6686 		return -ENOMEM;
6687 	}
6688 
6689 	ctx->range.offset = offset;
6690 	ctx->range.length = length;
6691 	ctx->range.owner_ch = ch;
6692 	ctx->range.locked_ctx = cb_arg;
6693 	ctx->bdev = bdev;
6694 	ctx->cb_fn = cb_fn;
6695 	ctx->cb_arg = cb_arg;
6696 
6697 	pthread_mutex_lock(&bdev->internal.mutex);
6698 	if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
6699 		/* There is an active lock overlapping with this range.
6700 		 * Put it on the pending list until this range no
6701 		 * longer overlaps with another.
6702 		 */
6703 		TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq);
6704 	} else {
6705 		TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
6706 		bdev_lock_lba_range_ctx(bdev, ctx);
6707 	}
6708 	pthread_mutex_unlock(&bdev->internal.mutex);
6709 	return 0;
6710 }
6711 
6712 static void
6713 bdev_lock_lba_range_ctx_msg(void *_ctx)
6714 {
6715 	struct locked_lba_range_ctx *ctx = _ctx;
6716 
6717 	bdev_lock_lba_range_ctx(ctx->bdev, ctx);
6718 }
6719 
6720 static void
6721 bdev_unlock_lba_range_cb(struct spdk_io_channel_iter *i, int status)
6722 {
6723 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6724 	struct locked_lba_range_ctx *pending_ctx;
6725 	struct spdk_bdev_channel *ch = ctx->range.owner_ch;
6726 	struct spdk_bdev *bdev = ch->bdev;
6727 	struct lba_range *range, *tmp;
6728 
6729 	pthread_mutex_lock(&bdev->internal.mutex);
6730 	/* Check if there are any pending locked ranges that overlap with this range
6731 	 * that was just unlocked.  If there are, check that it doesn't overlap with any
6732 	 * other locked ranges before calling bdev_lock_lba_range_ctx which will start
6733 	 * the lock process.
6734 	 */
6735 	TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) {
6736 		if (bdev_lba_range_overlapped(range, &ctx->range) &&
6737 		    !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) {
6738 			TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq);
6739 			pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
6740 			TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq);
6741 			spdk_thread_send_msg(pending_ctx->range.owner_ch->channel->thread,
6742 					     bdev_lock_lba_range_ctx_msg, pending_ctx);
6743 		}
6744 	}
6745 	pthread_mutex_unlock(&bdev->internal.mutex);
6746 
6747 	ctx->cb_fn(ctx->cb_arg, status);
6748 	free(ctx);
6749 }
6750 
6751 static void
6752 bdev_unlock_lba_range_get_channel(struct spdk_io_channel_iter *i)
6753 {
6754 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
6755 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6756 	struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
6757 	TAILQ_HEAD(, spdk_bdev_io) io_locked;
6758 	struct spdk_bdev_io *bdev_io;
6759 	struct lba_range *range;
6760 
6761 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
6762 		if (ctx->range.offset == range->offset &&
6763 		    ctx->range.length == range->length &&
6764 		    ctx->range.locked_ctx == range->locked_ctx) {
6765 			TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
6766 			free(range);
6767 			break;
6768 		}
6769 	}
6770 
6771 	/* Note: we should almost always be able to assert that the range specified
6772 	 * was found.  But there are some very rare corner cases where a new channel
6773 	 * gets created simultaneously with a range unlock, where this function
6774 	 * would execute on that new channel and wouldn't have the range.
6775 	 * We also use this to clean up range allocations when a later allocation
6776 	 * fails in the locking path.
6777 	 * So we can't actually assert() here.
6778 	 */
6779 
6780 	/* Swap the locked IO into a temporary list, and then try to submit them again.
6781 	 * We could hyper-optimize this to only resubmit locked I/O that overlap
6782 	 * with the range that was just unlocked, but this isn't a performance path so
6783 	 * we go for simplicity here.
6784 	 */
6785 	TAILQ_INIT(&io_locked);
6786 	TAILQ_SWAP(&ch->io_locked, &io_locked, spdk_bdev_io, internal.ch_link);
6787 	while (!TAILQ_EMPTY(&io_locked)) {
6788 		bdev_io = TAILQ_FIRST(&io_locked);
6789 		TAILQ_REMOVE(&io_locked, bdev_io, internal.ch_link);
6790 		bdev_io_submit(bdev_io);
6791 	}
6792 
6793 	spdk_for_each_channel_continue(i, 0);
6794 }
6795 
6796 static int
6797 bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
6798 		      uint64_t offset, uint64_t length,
6799 		      lock_range_cb cb_fn, void *cb_arg)
6800 {
6801 	struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6802 	struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
6803 	struct locked_lba_range_ctx *ctx;
6804 	struct lba_range *range;
6805 	bool range_found = false;
6806 
6807 	/* Let's make sure the specified channel actually has a lock on
6808 	 * the specified range.  Note that the range must match exactly.
6809 	 */
6810 	TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
6811 		if (range->offset == offset && range->length == length &&
6812 		    range->owner_ch == ch && range->locked_ctx == cb_arg) {
6813 			range_found = true;
6814 			break;
6815 		}
6816 	}
6817 
6818 	if (!range_found) {
6819 		return -EINVAL;
6820 	}
6821 
6822 	pthread_mutex_lock(&bdev->internal.mutex);
6823 	/* We confirmed that this channel has locked the specified range.  To
6824 	 * start the unlock the process, we find the range in the bdev's locked_ranges
6825 	 * and remove it.  This ensures new channels don't inherit the locked range.
6826 	 * Then we will send a message to each channel (including the one specified
6827 	 * here) to remove the range from its per-channel list.
6828 	 */
6829 	TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
6830 		if (range->offset == offset && range->length == length &&
6831 		    range->locked_ctx == cb_arg) {
6832 			break;
6833 		}
6834 	}
6835 	if (range == NULL) {
6836 		assert(false);
6837 		pthread_mutex_unlock(&bdev->internal.mutex);
6838 		return -EINVAL;
6839 	}
6840 	TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
6841 	ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
6842 	pthread_mutex_unlock(&bdev->internal.mutex);
6843 
6844 	ctx->cb_fn = cb_fn;
6845 	ctx->cb_arg = cb_arg;
6846 
6847 	spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_unlock_lba_range_get_channel, ctx,
6848 			      bdev_unlock_lba_range_cb);
6849 	return 0;
6850 }
6851 
6852 SPDK_LOG_REGISTER_COMPONENT(bdev)
6853 
6854 SPDK_TRACE_REGISTER_FN(bdev_trace, "bdev", TRACE_GROUP_BDEV)
6855 {
6856 	spdk_trace_register_owner(OWNER_BDEV, 'b');
6857 	spdk_trace_register_object(OBJECT_BDEV_IO, 'i');
6858 	spdk_trace_register_description("BDEV_IO_START", TRACE_BDEV_IO_START, OWNER_BDEV,
6859 					OBJECT_BDEV_IO, 1, 0, "type:   ");
6860 	spdk_trace_register_description("BDEV_IO_DONE", TRACE_BDEV_IO_DONE, OWNER_BDEV,
6861 					OBJECT_BDEV_IO, 0, 0, "");
6862 }
6863