xref: /spdk/lib/blob/blobstore.h (revision f6866117acb32c78d5ea7bd76ba330284655af35)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef SPDK_BLOBSTORE_H
8 #define SPDK_BLOBSTORE_H
9 
10 #include "spdk/assert.h"
11 #include "spdk/blob.h"
12 #include "spdk/queue.h"
13 #include "spdk/util.h"
14 #include "spdk/tree.h"
15 #include "spdk/thread.h"
16 
17 #include "request.h"
18 
19 /* In Memory Data Structures
20  *
21  * The following data structures exist only in memory.
22  */
23 
24 #define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
25 #define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
26 #define SPDK_BLOB_OPTS_MAX_MD_OPS 32
27 #define SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS 512
28 #define SPDK_BLOB_BLOBID_HIGH_BIT (1ULL << 32)
29 
30 struct spdk_xattr {
31 	uint32_t	index;
32 	uint16_t	value_len;
33 	char		*name;
34 	void		*value;
35 	TAILQ_ENTRY(spdk_xattr)	link;
36 };
37 
38 /* The mutable part of the blob data that is sync'd to
39  * disk. The data in here is both mutable and persistent.
40  */
41 struct spdk_blob_mut_data {
42 	/* Number of data clusters in the blob */
43 	uint64_t	num_clusters;
44 
45 	/* Array LBAs that are the beginning of a cluster, in
46 	 * the order they appear in the blob.
47 	 */
48 	uint64_t	*clusters;
49 
50 	/* The size of the clusters array. This is greater than or
51 	 * equal to 'num_clusters'.
52 	 */
53 	size_t		cluster_array_size;
54 
55 	/* Number of extent pages */
56 	uint64_t	num_extent_pages;
57 
58 	/* Array of page offsets into the metadata region,
59 	 * containing extents. Can contain entries for not yet
60 	 * allocated pages. */
61 	uint32_t	*extent_pages;
62 
63 	/* The size of the extent page array. This is greater than or
64 	 * equal to 'num_extent_pages'. */
65 	size_t		extent_pages_array_size;
66 
67 	/* Number of metadata pages */
68 	uint32_t	num_pages;
69 
70 	/* Array of page offsets into the metadata region, in
71 	 * the order of the metadata page sequence.
72 	 */
73 	uint32_t	*pages;
74 };
75 
76 enum spdk_blob_state {
77 	/* The blob in-memory version does not match the on-disk
78 	 * version.
79 	 */
80 	SPDK_BLOB_STATE_DIRTY,
81 
82 	/* The blob in memory version of the blob matches the on disk
83 	 * version.
84 	 */
85 	SPDK_BLOB_STATE_CLEAN,
86 
87 	/* The in-memory state being synchronized with the on-disk
88 	 * blob state. */
89 	SPDK_BLOB_STATE_LOADING,
90 };
91 
92 TAILQ_HEAD(spdk_xattr_tailq, spdk_xattr);
93 
94 struct spdk_blob_list {
95 	spdk_blob_id id;
96 	size_t clone_count;
97 	TAILQ_HEAD(, spdk_blob_list) clones;
98 	TAILQ_ENTRY(spdk_blob_list) link;
99 };
100 
101 struct spdk_blob {
102 	struct spdk_blob_store *bs;
103 
104 	uint32_t	open_ref;
105 
106 	spdk_blob_id	id;
107 	spdk_blob_id	parent_id;
108 
109 	enum spdk_blob_state		state;
110 
111 	/* Two copies of the mutable data. One is a version
112 	 * that matches the last known data on disk (clean).
113 	 * The other (active) is the current data. Syncing
114 	 * a blob makes the clean match the active.
115 	 */
116 	struct spdk_blob_mut_data	clean;
117 	struct spdk_blob_mut_data	active;
118 
119 	bool		invalid;
120 	bool		data_ro;
121 	bool		md_ro;
122 
123 	uint64_t	invalid_flags;
124 	uint64_t	data_ro_flags;
125 	uint64_t	md_ro_flags;
126 
127 	struct spdk_bs_dev *back_bs_dev;
128 
129 	/* TODO: The xattrs are mutable, but we don't want to be
130 	 * copying them unnecessarily. Figure this out.
131 	 */
132 	struct spdk_xattr_tailq xattrs;
133 	struct spdk_xattr_tailq xattrs_internal;
134 
135 	RB_ENTRY(spdk_blob) link;
136 
137 	uint32_t frozen_refcnt;
138 	bool locked_operation_in_progress;
139 	enum blob_clear_method clear_method;
140 	bool extent_rle_found;
141 	bool extent_table_found;
142 	bool use_extent_table;
143 
144 	/* A list of pending metadata pending_persists */
145 	TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
146 	TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;
147 
148 	/* Number of data clusters retrieved from extent table,
149 	 * that many have to be read from extent pages. */
150 	uint64_t	remaining_clusters_in_et;
151 };
152 
153 struct spdk_blob_store {
154 	uint64_t			md_start; /* Offset from beginning of disk, in pages */
155 	uint32_t			md_len; /* Count, in pages */
156 
157 	struct spdk_io_channel		*md_channel;
158 	uint32_t			max_channel_ops;
159 
160 	struct spdk_thread		*md_thread;
161 
162 	struct spdk_bs_dev		*dev;
163 
164 	struct spdk_bit_array		*used_md_pages;		/* Protected by used_lock */
165 	struct spdk_bit_pool		*used_clusters;		/* Protected by used_lock */
166 	struct spdk_bit_array		*used_blobids;
167 	struct spdk_bit_array		*open_blobids;
168 
169 	struct spdk_spinlock		used_lock;
170 
171 	uint32_t			cluster_sz;
172 	uint64_t			total_clusters;
173 	uint64_t			total_data_clusters;
174 	uint64_t			num_free_clusters;	/* Protected by used_lock */
175 	uint64_t			pages_per_cluster;
176 	uint8_t				pages_per_cluster_shift;
177 	uint32_t			io_unit_size;
178 
179 	spdk_blob_id			super_blob;
180 	struct spdk_bs_type		bstype;
181 
182 	struct spdk_bs_cpl		unload_cpl;
183 	int				unload_err;
184 
185 	RB_HEAD(spdk_blob_tree, spdk_blob) open_blobs;
186 	TAILQ_HEAD(, spdk_blob_list)	snapshots;
187 
188 	bool				clean;
189 
190 	spdk_bs_esnap_dev_create	esnap_bs_dev_create;
191 };
192 
193 struct spdk_bs_channel {
194 	struct spdk_bs_request_set	*req_mem;
195 	TAILQ_HEAD(, spdk_bs_request_set) reqs;
196 
197 	struct spdk_blob_store		*bs;
198 
199 	struct spdk_bs_dev		*dev;
200 	struct spdk_io_channel		*dev_channel;
201 
202 	/* This page is only used during insert of a new cluster. */
203 	struct spdk_blob_md_page	*new_cluster_page;
204 
205 	TAILQ_HEAD(, spdk_bs_request_set) need_cluster_alloc;
206 	TAILQ_HEAD(, spdk_bs_request_set) queued_io;
207 };
208 
209 /** operation type */
210 enum spdk_blob_op_type {
211 	SPDK_BLOB_WRITE,
212 	SPDK_BLOB_READ,
213 	SPDK_BLOB_UNMAP,
214 	SPDK_BLOB_WRITE_ZEROES,
215 	SPDK_BLOB_WRITEV,
216 	SPDK_BLOB_READV,
217 };
218 
219 /* back bs_dev */
220 
221 #define BLOB_SNAPSHOT "SNAP"
222 #define SNAPSHOT_IN_PROGRESS "SNAPTMP"
223 #define SNAPSHOT_PENDING_REMOVAL "SNAPRM"
224 #define BLOB_EXTERNAL_SNAPSHOT_ID "EXTSNAP"
225 
226 struct spdk_blob_bs_dev {
227 	struct spdk_bs_dev bs_dev;
228 	struct spdk_blob *blob;
229 };
230 
231 /* On-Disk Data Structures
232  *
233  * The following data structures exist on disk.
234  */
235 #define SPDK_BS_INITIAL_VERSION 1
236 #define SPDK_BS_VERSION 3 /* current version */
237 
238 #pragma pack(push, 1)
239 
240 #define SPDK_MD_MASK_TYPE_USED_PAGES 0
241 #define SPDK_MD_MASK_TYPE_USED_CLUSTERS 1
242 #define SPDK_MD_MASK_TYPE_USED_BLOBIDS 2
243 
244 struct spdk_bs_md_mask {
245 	uint8_t		type;
246 	uint32_t	length; /* In bits */
247 	uint8_t		mask[0];
248 };
249 
250 #define SPDK_MD_DESCRIPTOR_TYPE_PADDING 0
251 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR 2
252 #define SPDK_MD_DESCRIPTOR_TYPE_FLAGS 3
253 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL 4
254 
255 /* Following descriptors define cluster layout in a blob.
256  * EXTENT_RLE cannot be present in blobs metadata,
257  * at the same time as EXTENT_TABLE and EXTENT_PAGE descriptors. */
258 
259 /* EXTENT_RLE descriptor holds an array of LBA that points to
260  * beginning of allocated clusters. The array is run-length encoded,
261  * with 0's being unallocated clusters. It is part of serialized
262  * metadata chain for a blob. */
263 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE 1
264 /* EXTENT_TABLE descriptor holds array of md page offsets that
265  * point to pages with EXTENT_PAGE descriptor. The 0's in the array
266  * are run-length encoded, non-zero values are unallocated pages.
267  * It is part of serialized metadata chain for a blob. */
268 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE 5
269 /* EXTENT_PAGE descriptor holds an array of LBAs that point to
270  * beginning of allocated clusters. The array is run-length encoded,
271  * with 0's being unallocated clusters. It is NOT part of
272  * serialized metadata chain for a blob. */
273 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE 6
274 
275 struct spdk_blob_md_descriptor_xattr {
276 	uint8_t		type;
277 	uint32_t	length;
278 
279 	uint16_t	name_length;
280 	uint16_t	value_length;
281 
282 	char		name[0];
283 	/* String name immediately followed by string value. */
284 };
285 
286 struct spdk_blob_md_descriptor_extent_rle {
287 	uint8_t		type;
288 	uint32_t	length;
289 
290 	struct {
291 		uint32_t	cluster_idx;
292 		uint32_t	length; /* In units of clusters */
293 	} extents[0];
294 };
295 
296 struct spdk_blob_md_descriptor_extent_table {
297 	uint8_t		type;
298 	uint32_t	length;
299 
300 	/* Number of data clusters in the blob */
301 	uint64_t	num_clusters;
302 
303 	struct {
304 		uint32_t	page_idx;
305 		uint32_t	num_pages; /* In units of pages */
306 	} extent_page[0];
307 };
308 
309 struct spdk_blob_md_descriptor_extent_page {
310 	uint8_t		type;
311 	uint32_t	length;
312 
313 	/* First cluster index in this extent page */
314 	uint32_t	start_cluster_idx;
315 
316 	uint32_t	cluster_idx[0];
317 };
318 
319 #define SPDK_BLOB_THIN_PROV		(1ULL << 0)
320 #define SPDK_BLOB_INTERNAL_XATTR	(1ULL << 1)
321 #define SPDK_BLOB_EXTENT_TABLE		(1ULL << 2)
322 #define SPDK_BLOB_EXTERNAL_SNAPSHOT	(1ULL << 3)
323 #define SPDK_BLOB_INVALID_FLAGS_MASK	(SPDK_BLOB_THIN_PROV | SPDK_BLOB_INTERNAL_XATTR | \
324 					 SPDK_BLOB_EXTENT_TABLE | SPDK_BLOB_EXTERNAL_SNAPSHOT)
325 
326 #define SPDK_BLOB_READ_ONLY (1ULL << 0)
327 #define SPDK_BLOB_DATA_RO_FLAGS_MASK	SPDK_BLOB_READ_ONLY
328 
329 #define SPDK_BLOB_CLEAR_METHOD_SHIFT 0
330 #define SPDK_BLOB_CLEAR_METHOD (3ULL << SPDK_BLOB_CLEAR_METHOD_SHIFT)
331 #define SPDK_BLOB_MD_RO_FLAGS_MASK	SPDK_BLOB_CLEAR_METHOD
332 
333 struct spdk_blob_md_descriptor_flags {
334 	uint8_t		type;
335 	uint32_t	length;
336 
337 	/*
338 	 * If a flag in invalid_flags is set that the application is not aware of,
339 	 *  it will not allow the blob to be opened.
340 	 */
341 	uint64_t	invalid_flags;
342 
343 	/*
344 	 * If a flag in data_ro_flags is set that the application is not aware of,
345 	 *  allow the blob to be opened in data_read_only and md_read_only mode.
346 	 */
347 	uint64_t	data_ro_flags;
348 
349 	/*
350 	 * If a flag in md_ro_flags is set the application is not aware of,
351 	 *  allow the blob to be opened in md_read_only mode.
352 	 */
353 	uint64_t	md_ro_flags;
354 };
355 
356 struct spdk_blob_md_descriptor {
357 	uint8_t		type;
358 	uint32_t	length;
359 };
360 
361 #define SPDK_INVALID_MD_PAGE UINT32_MAX
362 
363 struct spdk_blob_md_page {
364 	spdk_blob_id     id;
365 
366 	uint32_t	sequence_num;
367 	uint32_t	reserved0;
368 
369 	/* Descriptors here */
370 	uint8_t		descriptors[4072];
371 
372 	uint32_t	next;
373 	uint32_t	crc;
374 };
375 #define SPDK_BS_PAGE_SIZE 0x1000
376 SPDK_STATIC_ASSERT(SPDK_BS_PAGE_SIZE == sizeof(struct spdk_blob_md_page), "Invalid md page size");
377 
378 #define SPDK_BS_MAX_DESC_SIZE SPDK_SIZEOF_MEMBER(struct spdk_blob_md_page, descriptors)
379 
380 /* Maximum number of extents a single Extent Page can fit.
381  * For an SPDK_BS_PAGE_SIZE of 4K SPDK_EXTENTS_PER_EP would be 512. */
382 #define SPDK_EXTENTS_PER_EP_MAX ((SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_extent_page)) / sizeof(uint32_t))
383 #define SPDK_EXTENTS_PER_EP (spdk_align64pow2(SPDK_EXTENTS_PER_EP_MAX + 1) >> 1u)
384 
385 #define SPDK_BS_SUPER_BLOCK_SIG "SPDKBLOB"
386 
387 struct spdk_bs_super_block {
388 	uint8_t		signature[8];
389 	uint32_t	version;
390 	uint32_t	length;
391 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
392 	spdk_blob_id	super_blob;
393 
394 	uint32_t	cluster_size; /* In bytes */
395 
396 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
397 	uint32_t	used_page_mask_len; /* Count, in pages */
398 
399 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
400 	uint32_t	used_cluster_mask_len; /* Count, in pages */
401 
402 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
403 	uint32_t	md_len; /* Count, in pages */
404 
405 	struct spdk_bs_type	bstype; /* blobstore type */
406 
407 	uint32_t	used_blobid_mask_start; /* Offset from beginning of disk, in pages */
408 	uint32_t	used_blobid_mask_len; /* Count, in pages */
409 
410 	uint64_t	size; /* size of blobstore in bytes */
411 	uint32_t	io_unit_size; /* Size of io unit in bytes */
412 
413 	uint8_t		reserved[4000];
414 	uint32_t	crc;
415 };
416 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
417 
418 #pragma pack(pop)
419 
420 struct spdk_bs_dev *bs_create_zeroes_dev(void);
421 struct spdk_bs_dev *bs_create_blob_bs_dev(struct spdk_blob *blob);
422 
423 /* Unit Conversions
424  *
425  * The blobstore works with several different units:
426  * - Byte: Self explanatory
427  * - LBA: The logical blocks on the backing storage device.
428  * - Page: The read/write units of blobs and metadata. This is
429  *         an offset into a blob in units of 4KiB.
430  * - Cluster Index: The disk is broken into a sequential list of
431  *		    clusters. This is the offset from the beginning.
432  *
433  * NOTE: These conversions all act on simple magnitudes, not with any sort
434  *        of knowledge about the blobs themselves. For instance, converting
435  *        a page to an lba with the conversion function below simply converts
436  *        a number of pages to an equivalent number of lbas, but that
437  *        lba certainly isn't the right lba that corresponds to a page offset
438  *        for a particular blob.
439  */
440 static inline uint64_t
441 bs_byte_to_lba(struct spdk_blob_store *bs, uint64_t length)
442 {
443 	assert(length % bs->dev->blocklen == 0);
444 
445 	return length / bs->dev->blocklen;
446 }
447 
448 static inline uint64_t
449 bs_dev_byte_to_lba(struct spdk_bs_dev *bs_dev, uint64_t length)
450 {
451 	assert(length % bs_dev->blocklen == 0);
452 
453 	return length / bs_dev->blocklen;
454 }
455 
456 static inline uint64_t
457 bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
458 {
459 	return page * SPDK_BS_PAGE_SIZE / bs->dev->blocklen;
460 }
461 
462 static inline uint64_t
463 bs_md_page_to_lba(struct spdk_blob_store *bs, uint32_t page)
464 {
465 	assert(page < bs->md_len);
466 	return bs_page_to_lba(bs, page + bs->md_start);
467 }
468 
469 static inline uint64_t
470 bs_dev_page_to_lba(struct spdk_bs_dev *bs_dev, uint64_t page)
471 {
472 	return page * SPDK_BS_PAGE_SIZE / bs_dev->blocklen;
473 }
474 
475 static inline uint64_t
476 bs_io_unit_per_page(struct spdk_blob_store *bs)
477 {
478 	return SPDK_BS_PAGE_SIZE / bs->io_unit_size;
479 }
480 
481 static inline uint64_t
482 bs_io_unit_to_page(struct spdk_blob_store *bs, uint64_t io_unit)
483 {
484 	return io_unit / bs_io_unit_per_page(bs);
485 }
486 
487 static inline uint64_t
488 bs_cluster_to_page(struct spdk_blob_store *bs, uint32_t cluster)
489 {
490 	return (uint64_t)cluster * bs->pages_per_cluster;
491 }
492 
493 static inline uint32_t
494 bs_page_to_cluster(struct spdk_blob_store *bs, uint64_t page)
495 {
496 	assert(page % bs->pages_per_cluster == 0);
497 
498 	return page / bs->pages_per_cluster;
499 }
500 
501 static inline uint64_t
502 bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
503 {
504 	assert(bs->cluster_sz / bs->dev->blocklen > 0);
505 
506 	return (uint64_t)cluster * (bs->cluster_sz / bs->dev->blocklen);
507 }
508 
509 static inline uint32_t
510 bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
511 {
512 	assert(lba % (bs->cluster_sz / bs->dev->blocklen) == 0);
513 
514 	return lba / (bs->cluster_sz / bs->dev->blocklen);
515 }
516 
517 static inline uint64_t
518 bs_io_unit_to_back_dev_lba(struct spdk_blob *blob, uint64_t io_unit)
519 {
520 	return io_unit * (blob->bs->io_unit_size / blob->back_bs_dev->blocklen);
521 }
522 
523 static inline uint64_t
524 bs_cluster_to_extent_table_id(uint64_t cluster_num)
525 {
526 	return cluster_num / SPDK_EXTENTS_PER_EP;
527 }
528 
529 static inline uint32_t *
530 bs_cluster_to_extent_page(struct spdk_blob *blob, uint64_t cluster_num)
531 {
532 	uint64_t extent_table_id = bs_cluster_to_extent_table_id(cluster_num);
533 
534 	assert(blob->use_extent_table);
535 	assert(extent_table_id < blob->active.extent_pages_array_size);
536 
537 	return &blob->active.extent_pages[extent_table_id];
538 }
539 
540 static inline uint64_t
541 bs_io_units_per_cluster(struct spdk_blob *blob)
542 {
543 	uint64_t	io_units_per_cluster;
544 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
545 
546 	if (shift != 0) {
547 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) << shift;
548 	} else {
549 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) * blob->bs->pages_per_cluster;
550 	}
551 
552 	return io_units_per_cluster;
553 }
554 
555 /* End basic conversions */
556 
557 static inline uint64_t
558 bs_blobid_to_page(spdk_blob_id id)
559 {
560 	return id & 0xFFFFFFFF;
561 }
562 
563 /* The blob id is a 64 bit number. The lower 32 bits are the page_idx. The upper
564  * 32 bits are not currently used. Stick a 1 there just to catch bugs where the
565  * code assumes blob id == page_idx.
566  */
567 static inline spdk_blob_id
568 bs_page_to_blobid(uint64_t page_idx)
569 {
570 	if (page_idx > UINT32_MAX) {
571 		return SPDK_BLOBID_INVALID;
572 	}
573 	return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
574 }
575 
576 /* Given an io unit offset into a blob, look up the LBA for the
577  * start of that io unit.
578  */
579 static inline uint64_t
580 bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
581 {
582 	uint64_t	lba;
583 	uint64_t	pages_per_cluster;
584 	uint8_t		shift;
585 	uint64_t	io_units_per_cluster;
586 	uint64_t	io_units_per_page;
587 	uint64_t	page;
588 
589 	page = bs_io_unit_to_page(blob->bs, io_unit);
590 
591 	pages_per_cluster = blob->bs->pages_per_cluster;
592 	shift = blob->bs->pages_per_cluster_shift;
593 	io_units_per_page = bs_io_unit_per_page(blob->bs);
594 
595 	assert(page < blob->active.num_clusters * pages_per_cluster);
596 
597 	if (shift != 0) {
598 		io_units_per_cluster = io_units_per_page << shift;
599 		lba = blob->active.clusters[page >> shift];
600 	} else {
601 		io_units_per_cluster = io_units_per_page * pages_per_cluster;
602 		lba = blob->active.clusters[page / pages_per_cluster];
603 	}
604 	lba += io_unit % io_units_per_cluster;
605 	return lba;
606 }
607 
608 /* Given an io_unit offset into a blob, look up the number of io_units until the
609  * next cluster boundary.
610  */
611 static inline uint32_t
612 bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
613 {
614 	uint64_t	io_units_per_cluster;
615 
616 	io_units_per_cluster = bs_io_units_per_cluster(blob);
617 
618 	return io_units_per_cluster - (io_unit % io_units_per_cluster);
619 }
620 
621 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
622 static inline uint32_t
623 bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
624 {
625 	uint64_t	pages_per_cluster;
626 	uint64_t	page;
627 
628 	pages_per_cluster = blob->bs->pages_per_cluster;
629 	page = bs_io_unit_to_page(blob->bs, io_unit);
630 
631 	return page - (page % pages_per_cluster);
632 }
633 
634 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
635 static inline uint32_t
636 bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
637 {
638 	uint64_t	pages_per_cluster = blob->bs->pages_per_cluster;
639 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
640 	uint32_t	page_offset;
641 
642 	page_offset = io_unit / bs_io_unit_per_page(blob->bs);
643 	if (shift != 0) {
644 		return page_offset >> shift;
645 	} else {
646 		return page_offset / pages_per_cluster;
647 	}
648 }
649 
650 /* Given an io unit offset into a blob, look up if it is from allocated cluster. */
651 static inline bool
652 bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
653 {
654 	uint64_t	lba;
655 	uint64_t	page;
656 	uint64_t	pages_per_cluster;
657 	uint8_t		shift;
658 
659 	shift = blob->bs->pages_per_cluster_shift;
660 	pages_per_cluster = blob->bs->pages_per_cluster;
661 	page = bs_io_unit_to_page(blob->bs, io_unit);
662 
663 	assert(page < blob->active.num_clusters * pages_per_cluster);
664 
665 	if (shift != 0) {
666 		lba = blob->active.clusters[page >> shift];
667 	} else {
668 		lba = blob->active.clusters[page / pages_per_cluster];
669 	}
670 
671 	if (lba == 0) {
672 		assert(spdk_blob_is_thin_provisioned(blob));
673 		return false;
674 	} else {
675 		return true;
676 	}
677 }
678 
679 #endif
680