xref: /spdk/lib/blob/blobstore.h (revision 3299bf6d5a3e5dbc4fcda51ed9a15d0f870476b6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef SPDK_BLOBSTORE_H
8 #define SPDK_BLOBSTORE_H
9 
10 #include "spdk/assert.h"
11 #include "spdk/blob.h"
12 #include "spdk/queue.h"
13 #include "spdk/util.h"
14 #include "spdk/tree.h"
15 #include "spdk/thread.h"
16 
17 #include "request.h"
18 
19 /* In Memory Data Structures
20  *
21  * The following data structures exist only in memory.
22  */
23 
24 #define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
25 #define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
26 #define SPDK_BLOB_OPTS_MAX_MD_OPS 32
27 #define SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS 512
28 #define SPDK_BLOB_BLOBID_HIGH_BIT (1ULL << 32)
29 
30 struct spdk_xattr {
31 	uint32_t	index;
32 	uint16_t	value_len;
33 	char		*name;
34 	void		*value;
35 	TAILQ_ENTRY(spdk_xattr)	link;
36 };
37 
38 /* The mutable part of the blob data that is sync'd to
39  * disk. The data in here is both mutable and persistent.
40  */
41 struct spdk_blob_mut_data {
42 	/* Number of data clusters in the blob */
43 	uint64_t	num_clusters;
44 
45 	/* Array LBAs that are the beginning of a cluster, in
46 	 * the order they appear in the blob.
47 	 */
48 	uint64_t	*clusters;
49 
50 	/* The size of the clusters array. This is greater than or
51 	 * equal to 'num_clusters'.
52 	 */
53 	size_t		cluster_array_size;
54 
55 	/* The number of allocated clusters in the clusters array */
56 	uint64_t	num_allocated_clusters;
57 
58 	/* Number of extent pages */
59 	uint64_t	num_extent_pages;
60 
61 	/* Array of page offsets into the metadata region,
62 	 * containing extents. Can contain entries for not yet
63 	 * allocated pages. */
64 	uint32_t	*extent_pages;
65 
66 	/* The size of the extent page array. This is greater than or
67 	 * equal to 'num_extent_pages'. */
68 	size_t		extent_pages_array_size;
69 
70 	/* Number of metadata pages */
71 	uint32_t	num_pages;
72 
73 	/* Array of page offsets into the metadata region, in
74 	 * the order of the metadata page sequence.
75 	 */
76 	uint32_t	*pages;
77 };
78 
79 enum spdk_blob_state {
80 	/* The blob in-memory version does not match the on-disk
81 	 * version.
82 	 */
83 	SPDK_BLOB_STATE_DIRTY,
84 
85 	/* The blob in memory version of the blob matches the on disk
86 	 * version.
87 	 */
88 	SPDK_BLOB_STATE_CLEAN,
89 
90 	/* The in-memory state being synchronized with the on-disk
91 	 * blob state. */
92 	SPDK_BLOB_STATE_LOADING,
93 };
94 
95 TAILQ_HEAD(spdk_xattr_tailq, spdk_xattr);
96 
97 struct spdk_blob_list {
98 	spdk_blob_id id;
99 	size_t clone_count;
100 	TAILQ_HEAD(, spdk_blob_list) clones;
101 	TAILQ_ENTRY(spdk_blob_list) link;
102 };
103 
104 struct spdk_blob {
105 	struct spdk_blob_store *bs;
106 
107 	uint32_t	open_ref;
108 
109 	spdk_blob_id	id;
110 	spdk_blob_id	parent_id;
111 
112 	enum spdk_blob_state		state;
113 
114 	/* Two copies of the mutable data. One is a version
115 	 * that matches the last known data on disk (clean).
116 	 * The other (active) is the current data. Syncing
117 	 * a blob makes the clean match the active.
118 	 */
119 	struct spdk_blob_mut_data	clean;
120 	struct spdk_blob_mut_data	active;
121 
122 	bool		invalid;
123 	bool		data_ro;
124 	bool		md_ro;
125 
126 	uint64_t	invalid_flags;
127 	uint64_t	data_ro_flags;
128 	uint64_t	md_ro_flags;
129 
130 	struct spdk_bs_dev *back_bs_dev;
131 
132 	/* TODO: The xattrs are mutable, but we don't want to be
133 	 * copying them unnecessarily. Figure this out.
134 	 */
135 	struct spdk_xattr_tailq xattrs;
136 	struct spdk_xattr_tailq xattrs_internal;
137 
138 	RB_ENTRY(spdk_blob) link;
139 
140 	uint32_t frozen_refcnt;
141 	bool locked_operation_in_progress;
142 	enum blob_clear_method clear_method;
143 	bool extent_rle_found;
144 	bool extent_table_found;
145 	bool use_extent_table;
146 
147 	/* A list of pending metadata pending_persists */
148 	TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
149 	TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;
150 
151 	/* Number of data clusters retrieved from extent table,
152 	 * that many have to be read from extent pages. */
153 	uint64_t	remaining_clusters_in_et;
154 };
155 
156 struct spdk_blob_store {
157 	uint64_t			md_start; /* Offset from beginning of disk, in pages */
158 	uint32_t			md_len; /* Count, in pages */
159 
160 	struct spdk_io_channel		*md_channel;
161 	uint32_t			max_channel_ops;
162 
163 	struct spdk_thread		*md_thread;
164 
165 	struct spdk_bs_dev		*dev;
166 
167 	struct spdk_bit_array		*used_md_pages;		/* Protected by used_lock */
168 	struct spdk_bit_pool		*used_clusters;		/* Protected by used_lock */
169 	struct spdk_bit_array		*used_blobids;
170 	struct spdk_bit_array		*open_blobids;
171 
172 	struct spdk_spinlock		used_lock;
173 
174 	uint32_t			cluster_sz;
175 	uint64_t			total_clusters;
176 	uint64_t			total_data_clusters;
177 	uint64_t			num_free_clusters;	/* Protected by used_lock */
178 	uint64_t			pages_per_cluster;
179 	uint64_t			io_units_per_cluster;
180 	uint8_t				pages_per_cluster_shift;
181 	uint8_t				io_units_per_cluster_shift;
182 	uint32_t			io_unit_size;
183 
184 	spdk_blob_id			super_blob;
185 	struct spdk_bs_type		bstype;
186 
187 	struct spdk_bs_cpl		unload_cpl;
188 	int				unload_err;
189 
190 	RB_HEAD(spdk_blob_tree, spdk_blob) open_blobs;
191 	TAILQ_HEAD(, spdk_blob_list)	snapshots;
192 
193 	bool				clean;
194 
195 	spdk_bs_esnap_dev_create	esnap_bs_dev_create;
196 	void				*esnap_ctx;
197 
198 	/* If external snapshot channels are being destroyed while
199 	 * the blobstore is unloaded, the unload is deferred until
200 	 * after the channel destruction completes.
201 	 */
202 	uint32_t			esnap_channels_unloading;
203 	spdk_bs_op_complete		esnap_unload_cb_fn;
204 	void				*esnap_unload_cb_arg;
205 };
206 
207 struct spdk_bs_channel {
208 	struct spdk_bs_request_set	*req_mem;
209 	TAILQ_HEAD(, spdk_bs_request_set) reqs;
210 
211 	struct spdk_blob_store		*bs;
212 
213 	struct spdk_bs_dev		*dev;
214 	struct spdk_io_channel		*dev_channel;
215 
216 	/* This page is only used during insert of a new cluster. */
217 	struct spdk_blob_md_page	*new_cluster_page;
218 
219 	TAILQ_HEAD(, spdk_bs_request_set) need_cluster_alloc;
220 	TAILQ_HEAD(, spdk_bs_request_set) queued_io;
221 
222 	RB_HEAD(blob_esnap_channel_tree, blob_esnap_channel) esnap_channels;
223 };
224 
225 /** operation type */
226 enum spdk_blob_op_type {
227 	SPDK_BLOB_WRITE,
228 	SPDK_BLOB_READ,
229 	SPDK_BLOB_UNMAP,
230 	SPDK_BLOB_WRITE_ZEROES,
231 	SPDK_BLOB_WRITEV,
232 	SPDK_BLOB_READV,
233 };
234 
235 /* back bs_dev */
236 
237 #define BLOB_SNAPSHOT "SNAP"
238 #define SNAPSHOT_IN_PROGRESS "SNAPTMP"
239 #define SNAPSHOT_PENDING_REMOVAL "SNAPRM"
240 #define BLOB_EXTERNAL_SNAPSHOT_ID "EXTSNAP"
241 
242 struct spdk_blob_bs_dev {
243 	struct spdk_bs_dev bs_dev;
244 	struct spdk_blob *blob;
245 };
246 
247 /* On-Disk Data Structures
248  *
249  * The following data structures exist on disk.
250  */
251 #define SPDK_BS_INITIAL_VERSION 1
252 #define SPDK_BS_VERSION 3 /* current version */
253 
254 #pragma pack(push, 1)
255 
256 #define SPDK_MD_MASK_TYPE_USED_PAGES 0
257 #define SPDK_MD_MASK_TYPE_USED_CLUSTERS 1
258 #define SPDK_MD_MASK_TYPE_USED_BLOBIDS 2
259 
260 struct spdk_bs_md_mask {
261 	uint8_t		type;
262 	uint32_t	length; /* In bits */
263 	uint8_t		mask[0];
264 };
265 
266 #define SPDK_MD_DESCRIPTOR_TYPE_PADDING 0
267 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR 2
268 #define SPDK_MD_DESCRIPTOR_TYPE_FLAGS 3
269 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL 4
270 
271 /* Following descriptors define cluster layout in a blob.
272  * EXTENT_RLE cannot be present in blobs metadata,
273  * at the same time as EXTENT_TABLE and EXTENT_PAGE descriptors. */
274 
275 /* EXTENT_RLE descriptor holds an array of LBA that points to
276  * beginning of allocated clusters. The array is run-length encoded,
277  * with 0's being unallocated clusters. It is part of serialized
278  * metadata chain for a blob. */
279 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE 1
280 /* EXTENT_TABLE descriptor holds array of md page offsets that
281  * point to pages with EXTENT_PAGE descriptor. The 0's in the array
282  * are run-length encoded, non-zero values are unallocated pages.
283  * It is part of serialized metadata chain for a blob. */
284 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE 5
285 /* EXTENT_PAGE descriptor holds an array of LBAs that point to
286  * beginning of allocated clusters. The array is run-length encoded,
287  * with 0's being unallocated clusters. It is NOT part of
288  * serialized metadata chain for a blob. */
289 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE 6
290 
291 struct spdk_blob_md_descriptor_xattr {
292 	uint8_t		type;
293 	uint32_t	length;
294 
295 	uint16_t	name_length;
296 	uint16_t	value_length;
297 
298 	char		name[0];
299 	/* String name immediately followed by string value. */
300 };
301 
302 struct spdk_blob_md_descriptor_extent_rle {
303 	uint8_t		type;
304 	uint32_t	length;
305 
306 	struct {
307 		uint32_t	cluster_idx;
308 		uint32_t	length; /* In units of clusters */
309 	} extents[0];
310 };
311 
312 struct spdk_blob_md_descriptor_extent_table {
313 	uint8_t		type;
314 	uint32_t	length;
315 
316 	/* Number of data clusters in the blob */
317 	uint64_t	num_clusters;
318 
319 	struct {
320 		uint32_t	page_idx;
321 		uint32_t	num_pages; /* In units of pages */
322 	} extent_page[0];
323 };
324 
325 struct spdk_blob_md_descriptor_extent_page {
326 	uint8_t		type;
327 	uint32_t	length;
328 
329 	/* First cluster index in this extent page */
330 	uint32_t	start_cluster_idx;
331 
332 	uint32_t	cluster_idx[0];
333 };
334 
335 #define SPDK_BLOB_THIN_PROV		(1ULL << 0)
336 #define SPDK_BLOB_INTERNAL_XATTR	(1ULL << 1)
337 #define SPDK_BLOB_EXTENT_TABLE		(1ULL << 2)
338 #define SPDK_BLOB_EXTERNAL_SNAPSHOT	(1ULL << 3)
339 #define SPDK_BLOB_INVALID_FLAGS_MASK	(SPDK_BLOB_THIN_PROV | SPDK_BLOB_INTERNAL_XATTR | \
340 					 SPDK_BLOB_EXTENT_TABLE | SPDK_BLOB_EXTERNAL_SNAPSHOT)
341 
342 #define SPDK_BLOB_READ_ONLY (1ULL << 0)
343 #define SPDK_BLOB_DATA_RO_FLAGS_MASK	SPDK_BLOB_READ_ONLY
344 
345 #define SPDK_BLOB_CLEAR_METHOD_SHIFT 0
346 #define SPDK_BLOB_CLEAR_METHOD (3ULL << SPDK_BLOB_CLEAR_METHOD_SHIFT)
347 #define SPDK_BLOB_MD_RO_FLAGS_MASK	SPDK_BLOB_CLEAR_METHOD
348 
349 struct spdk_blob_md_descriptor_flags {
350 	uint8_t		type;
351 	uint32_t	length;
352 
353 	/*
354 	 * If a flag in invalid_flags is set that the application is not aware of,
355 	 *  it will not allow the blob to be opened.
356 	 */
357 	uint64_t	invalid_flags;
358 
359 	/*
360 	 * If a flag in data_ro_flags is set that the application is not aware of,
361 	 *  allow the blob to be opened in data_read_only and md_read_only mode.
362 	 */
363 	uint64_t	data_ro_flags;
364 
365 	/*
366 	 * If a flag in md_ro_flags is set the application is not aware of,
367 	 *  allow the blob to be opened in md_read_only mode.
368 	 */
369 	uint64_t	md_ro_flags;
370 };
371 
372 struct spdk_blob_md_descriptor {
373 	uint8_t		type;
374 	uint32_t	length;
375 };
376 
377 #define SPDK_INVALID_MD_PAGE UINT32_MAX
378 
379 struct spdk_blob_md_page {
380 	spdk_blob_id     id;
381 
382 	uint32_t	sequence_num;
383 	uint32_t	reserved0;
384 
385 	/* Descriptors here */
386 	uint8_t		descriptors[4072];
387 
388 	uint32_t	next;
389 	uint32_t	crc;
390 };
391 #define SPDK_BS_PAGE_SIZE 0x1000
392 SPDK_STATIC_ASSERT(SPDK_BS_PAGE_SIZE == sizeof(struct spdk_blob_md_page), "Invalid md page size");
393 
394 #define SPDK_BS_MAX_DESC_SIZE SPDK_SIZEOF_MEMBER(struct spdk_blob_md_page, descriptors)
395 
396 /* Maximum number of extents a single Extent Page can fit.
397  * For an SPDK_BS_PAGE_SIZE of 4K SPDK_EXTENTS_PER_EP would be 512. */
398 #define SPDK_EXTENTS_PER_EP_MAX ((SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_extent_page)) / sizeof(uint32_t))
399 #define SPDK_EXTENTS_PER_EP (spdk_align64pow2(SPDK_EXTENTS_PER_EP_MAX + 1) >> 1u)
400 
401 #define SPDK_BS_SUPER_BLOCK_SIG "SPDKBLOB"
402 
403 struct spdk_bs_super_block {
404 	uint8_t		signature[8];
405 	uint32_t	version;
406 	uint32_t	length;
407 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
408 	spdk_blob_id	super_blob;
409 
410 	uint32_t	cluster_size; /* In bytes */
411 
412 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
413 	uint32_t	used_page_mask_len; /* Count, in pages */
414 
415 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
416 	uint32_t	used_cluster_mask_len; /* Count, in pages */
417 
418 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
419 	uint32_t	md_len; /* Count, in pages */
420 
421 	struct spdk_bs_type	bstype; /* blobstore type */
422 
423 	uint32_t	used_blobid_mask_start; /* Offset from beginning of disk, in pages */
424 	uint32_t	used_blobid_mask_len; /* Count, in pages */
425 
426 	uint64_t	size; /* size of blobstore in bytes */
427 	uint32_t	io_unit_size; /* Size of io unit in bytes */
428 
429 	uint8_t		reserved[4000];
430 	uint32_t	crc;
431 };
432 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
433 
434 #pragma pack(pop)
435 
436 struct spdk_bs_dev *bs_create_zeroes_dev(void);
437 struct spdk_bs_dev *bs_create_blob_bs_dev(struct spdk_blob *blob);
438 struct spdk_io_channel *blob_esnap_get_io_channel(struct spdk_io_channel *ch,
439 		struct spdk_blob *blob);
440 bool blob_backed_with_zeroes_dev(struct spdk_blob *blob);
441 
442 /* Unit Conversions
443  *
444  * The blobstore works with several different units:
445  * - Byte: Self explanatory
446  * - LBA: The logical blocks on the backing storage device.
447  * - Page: The read/write units of blobs and metadata. This is
448  *         an offset into a blob in units of 4KiB.
449  * - Cluster Index: The disk is broken into a sequential list of
450  *		    clusters. This is the offset from the beginning.
451  *
452  * NOTE: These conversions all act on simple magnitudes, not with any sort
453  *        of knowledge about the blobs themselves. For instance, converting
454  *        a page to an lba with the conversion function below simply converts
455  *        a number of pages to an equivalent number of lbas, but that
456  *        lba certainly isn't the right lba that corresponds to a page offset
457  *        for a particular blob.
458  */
459 static inline uint64_t
460 bs_byte_to_lba(struct spdk_blob_store *bs, uint64_t length)
461 {
462 	assert(length % bs->dev->blocklen == 0);
463 
464 	return length / bs->dev->blocklen;
465 }
466 
467 static inline uint64_t
468 bs_dev_byte_to_lba(struct spdk_bs_dev *bs_dev, uint64_t length)
469 {
470 	assert(length % bs_dev->blocklen == 0);
471 
472 	return length / bs_dev->blocklen;
473 }
474 
475 static inline uint64_t
476 bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
477 {
478 	return page * SPDK_BS_PAGE_SIZE / bs->dev->blocklen;
479 }
480 
481 static inline uint64_t
482 bs_md_page_to_lba(struct spdk_blob_store *bs, uint32_t page)
483 {
484 	assert(page < bs->md_len);
485 	return bs_page_to_lba(bs, page + bs->md_start);
486 }
487 
488 static inline uint64_t
489 bs_dev_io_unit_to_lba(struct spdk_blob *blob, struct spdk_bs_dev *bs_dev, uint64_t io_unit)
490 {
491 	return io_unit * blob->bs->io_unit_size / bs_dev->blocklen;
492 }
493 
494 static inline uint64_t
495 bs_cluster_to_io_unit(struct spdk_blob_store *bs, uint32_t cluster)
496 {
497 	return (uint64_t)cluster * bs->io_units_per_cluster;
498 }
499 
500 static inline uint32_t
501 bs_io_unit_to_cluster(struct spdk_blob_store *bs, uint64_t io_unit)
502 {
503 	assert(io_unit % bs->io_units_per_cluster == 0);
504 
505 	return io_unit / bs->io_units_per_cluster;
506 }
507 
508 static inline uint64_t
509 bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
510 {
511 	assert(bs->cluster_sz / bs->dev->blocklen > 0);
512 
513 	return (uint64_t)cluster * (bs->cluster_sz / bs->dev->blocklen);
514 }
515 
516 static inline uint32_t
517 bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
518 {
519 	assert(lba % (bs->cluster_sz / bs->dev->blocklen) == 0);
520 
521 	return lba / (bs->cluster_sz / bs->dev->blocklen);
522 }
523 
524 static inline uint64_t
525 bs_io_unit_to_back_dev_lba(struct spdk_blob *blob, uint64_t io_unit)
526 {
527 	return io_unit * (blob->bs->io_unit_size / blob->back_bs_dev->blocklen);
528 }
529 
530 static inline uint64_t
531 bs_cluster_to_extent_table_id(uint64_t cluster_num)
532 {
533 	return cluster_num / SPDK_EXTENTS_PER_EP;
534 }
535 
536 static inline uint32_t *
537 bs_cluster_to_extent_page(struct spdk_blob *blob, uint64_t cluster_num)
538 {
539 	uint64_t extent_table_id = bs_cluster_to_extent_table_id(cluster_num);
540 
541 	assert(blob->use_extent_table);
542 	assert(extent_table_id < blob->active.extent_pages_array_size);
543 
544 	return &blob->active.extent_pages[extent_table_id];
545 }
546 
547 static inline uint64_t
548 bs_io_units_per_cluster(struct spdk_blob *blob)
549 {
550 	return blob->bs->io_units_per_cluster;
551 }
552 
553 /* End basic conversions */
554 
555 static inline uint64_t
556 bs_blobid_to_page(spdk_blob_id id)
557 {
558 	return id & 0xFFFFFFFF;
559 }
560 
561 /* The blob id is a 64 bit number. The lower 32 bits are the page_idx. The upper
562  * 32 bits are not currently used. Stick a 1 there just to catch bugs where the
563  * code assumes blob id == page_idx.
564  */
565 static inline spdk_blob_id
566 bs_page_to_blobid(uint64_t page_idx)
567 {
568 	if (page_idx > UINT32_MAX) {
569 		return SPDK_BLOBID_INVALID;
570 	}
571 	return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
572 }
573 
574 /* Given an io unit offset into a blob, look up the LBA for the
575  * start of that io unit.
576  */
577 static inline uint64_t
578 bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
579 {
580 	uint64_t	lba;
581 	uint8_t		shift;
582 	uint64_t	io_units_per_cluster = blob->bs->io_units_per_cluster;
583 
584 	shift = blob->bs->io_units_per_cluster_shift;
585 	assert(io_unit < blob->active.num_clusters * io_units_per_cluster);
586 	if (shift != 0) {
587 		lba = blob->active.clusters[io_unit >> shift];
588 	} else {
589 		lba = blob->active.clusters[io_unit / io_units_per_cluster];
590 	}
591 	if (lba == 0) {
592 		return 0;
593 	} else {
594 		return lba + io_unit % io_units_per_cluster;
595 	}
596 }
597 
598 /* Given an io_unit offset into a blob, look up the number of io_units until the
599  * next cluster boundary.
600  */
601 static inline uint32_t
602 bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
603 {
604 	uint64_t	io_units_per_cluster;
605 
606 	io_units_per_cluster = bs_io_units_per_cluster(blob);
607 
608 	return io_units_per_cluster - (io_unit % io_units_per_cluster);
609 }
610 
611 /* Given an io_unit offset into a blob, look up the number of io_unit into blob to beginning of current cluster */
612 static inline uint64_t
613 bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
614 {
615 	uint64_t	io_units_per_cluster = blob->bs->io_units_per_cluster;
616 
617 	return io_unit - (io_unit % io_units_per_cluster);
618 }
619 
620 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
621 static inline uint32_t
622 bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
623 {
624 	uint64_t	io_units_per_cluster = blob->bs->io_units_per_cluster;
625 	uint8_t		shift = blob->bs->io_units_per_cluster_shift;
626 
627 	if (shift != 0) {
628 		return io_unit >> shift;
629 	} else {
630 		return io_unit / io_units_per_cluster;
631 	}
632 }
633 
634 /* Given an io unit offset into a blob, look up if it is from allocated cluster. */
635 static inline bool
636 bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
637 {
638 	uint64_t lba = bs_blob_io_unit_to_lba(blob, io_unit);
639 
640 	if (lba == 0) {
641 		assert(spdk_blob_is_thin_provisioned(blob));
642 		return false;
643 	} else {
644 		return true;
645 	}
646 }
647 
648 #endif
649