xref: /spdk/lib/blob/blobstore.h (revision a0d24145bf3d795cf89adc414320b138fae480ab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef SPDK_BLOBSTORE_H
8 #define SPDK_BLOBSTORE_H
9 
10 #include "spdk/assert.h"
11 #include "spdk/blob.h"
12 #include "spdk/queue.h"
13 #include "spdk/util.h"
14 #include "spdk/tree.h"
15 #include "spdk/thread.h"
16 
17 #include "request.h"
18 
19 /* In Memory Data Structures
20  *
21  * The following data structures exist only in memory.
22  */
23 
24 #define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
25 #define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
26 #define SPDK_BLOB_OPTS_MAX_MD_OPS 32
27 #define SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS 512
28 #define SPDK_BLOB_BLOBID_HIGH_BIT (1ULL << 32)
29 
30 struct spdk_xattr {
31 	uint32_t	index;
32 	uint16_t	value_len;
33 	char		*name;
34 	void		*value;
35 	TAILQ_ENTRY(spdk_xattr)	link;
36 };
37 
38 /* The mutable part of the blob data that is sync'd to
39  * disk. The data in here is both mutable and persistent.
40  */
41 struct spdk_blob_mut_data {
42 	/* Number of data clusters in the blob */
43 	uint64_t	num_clusters;
44 
45 	/* Array LBAs that are the beginning of a cluster, in
46 	 * the order they appear in the blob.
47 	 */
48 	uint64_t	*clusters;
49 
50 	/* The size of the clusters array. This is greater than or
51 	 * equal to 'num_clusters'.
52 	 */
53 	size_t		cluster_array_size;
54 
55 	/* Number of extent pages */
56 	uint64_t	num_extent_pages;
57 
58 	/* Array of page offsets into the metadata region,
59 	 * containing extents. Can contain entries for not yet
60 	 * allocated pages. */
61 	uint32_t	*extent_pages;
62 
63 	/* The size of the extent page array. This is greater than or
64 	 * equal to 'num_extent_pages'. */
65 	size_t		extent_pages_array_size;
66 
67 	/* Number of metadata pages */
68 	uint32_t	num_pages;
69 
70 	/* Array of page offsets into the metadata region, in
71 	 * the order of the metadata page sequence.
72 	 */
73 	uint32_t	*pages;
74 };
75 
76 enum spdk_blob_state {
77 	/* The blob in-memory version does not match the on-disk
78 	 * version.
79 	 */
80 	SPDK_BLOB_STATE_DIRTY,
81 
82 	/* The blob in memory version of the blob matches the on disk
83 	 * version.
84 	 */
85 	SPDK_BLOB_STATE_CLEAN,
86 
87 	/* The in-memory state being synchronized with the on-disk
88 	 * blob state. */
89 	SPDK_BLOB_STATE_LOADING,
90 };
91 
92 TAILQ_HEAD(spdk_xattr_tailq, spdk_xattr);
93 
94 struct spdk_blob_list {
95 	spdk_blob_id id;
96 	size_t clone_count;
97 	TAILQ_HEAD(, spdk_blob_list) clones;
98 	TAILQ_ENTRY(spdk_blob_list) link;
99 };
100 
101 struct spdk_blob {
102 	struct spdk_blob_store *bs;
103 
104 	uint32_t	open_ref;
105 
106 	spdk_blob_id	id;
107 	spdk_blob_id	parent_id;
108 
109 	enum spdk_blob_state		state;
110 
111 	/* Two copies of the mutable data. One is a version
112 	 * that matches the last known data on disk (clean).
113 	 * The other (active) is the current data. Syncing
114 	 * a blob makes the clean match the active.
115 	 */
116 	struct spdk_blob_mut_data	clean;
117 	struct spdk_blob_mut_data	active;
118 
119 	bool		invalid;
120 	bool		data_ro;
121 	bool		md_ro;
122 
123 	uint64_t	invalid_flags;
124 	uint64_t	data_ro_flags;
125 	uint64_t	md_ro_flags;
126 
127 	struct spdk_bs_dev *back_bs_dev;
128 
129 	/* TODO: The xattrs are mutable, but we don't want to be
130 	 * copying them unnecessarily. Figure this out.
131 	 */
132 	struct spdk_xattr_tailq xattrs;
133 	struct spdk_xattr_tailq xattrs_internal;
134 
135 	RB_ENTRY(spdk_blob) link;
136 
137 	uint32_t frozen_refcnt;
138 	bool locked_operation_in_progress;
139 	enum blob_clear_method clear_method;
140 	bool extent_rle_found;
141 	bool extent_table_found;
142 	bool use_extent_table;
143 
144 	/* A list of pending metadata pending_persists */
145 	TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
146 	TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;
147 
148 	/* Number of data clusters retrieved from extent table,
149 	 * that many have to be read from extent pages. */
150 	uint64_t	remaining_clusters_in_et;
151 };
152 
153 struct spdk_blob_store {
154 	uint64_t			md_start; /* Offset from beginning of disk, in pages */
155 	uint32_t			md_len; /* Count, in pages */
156 
157 	struct spdk_io_channel		*md_channel;
158 	uint32_t			max_channel_ops;
159 
160 	struct spdk_thread		*md_thread;
161 
162 	struct spdk_bs_dev		*dev;
163 
164 	struct spdk_bit_array		*used_md_pages;		/* Protected by used_lock */
165 	struct spdk_bit_pool		*used_clusters;		/* Protected by used_lock */
166 	struct spdk_bit_array		*used_blobids;
167 	struct spdk_bit_array		*open_blobids;
168 
169 	struct spdk_spinlock		used_lock;
170 
171 	uint32_t			cluster_sz;
172 	uint64_t			total_clusters;
173 	uint64_t			total_data_clusters;
174 	uint64_t			num_free_clusters;	/* Protected by used_lock */
175 	uint64_t			pages_per_cluster;
176 	uint8_t				pages_per_cluster_shift;
177 	uint32_t			io_unit_size;
178 
179 	spdk_blob_id			super_blob;
180 	struct spdk_bs_type		bstype;
181 
182 	struct spdk_bs_cpl		unload_cpl;
183 	int				unload_err;
184 
185 	RB_HEAD(spdk_blob_tree, spdk_blob) open_blobs;
186 	TAILQ_HEAD(, spdk_blob_list)	snapshots;
187 
188 	bool				clean;
189 
190 	spdk_bs_esnap_dev_create	esnap_bs_dev_create;
191 	void				*esnap_ctx;
192 };
193 
194 struct spdk_bs_channel {
195 	struct spdk_bs_request_set	*req_mem;
196 	TAILQ_HEAD(, spdk_bs_request_set) reqs;
197 
198 	struct spdk_blob_store		*bs;
199 
200 	struct spdk_bs_dev		*dev;
201 	struct spdk_io_channel		*dev_channel;
202 
203 	/* This page is only used during insert of a new cluster. */
204 	struct spdk_blob_md_page	*new_cluster_page;
205 
206 	TAILQ_HEAD(, spdk_bs_request_set) need_cluster_alloc;
207 	TAILQ_HEAD(, spdk_bs_request_set) queued_io;
208 };
209 
210 /** operation type */
211 enum spdk_blob_op_type {
212 	SPDK_BLOB_WRITE,
213 	SPDK_BLOB_READ,
214 	SPDK_BLOB_UNMAP,
215 	SPDK_BLOB_WRITE_ZEROES,
216 	SPDK_BLOB_WRITEV,
217 	SPDK_BLOB_READV,
218 };
219 
220 /* back bs_dev */
221 
222 #define BLOB_SNAPSHOT "SNAP"
223 #define SNAPSHOT_IN_PROGRESS "SNAPTMP"
224 #define SNAPSHOT_PENDING_REMOVAL "SNAPRM"
225 #define BLOB_EXTERNAL_SNAPSHOT_ID "EXTSNAP"
226 
227 struct spdk_blob_bs_dev {
228 	struct spdk_bs_dev bs_dev;
229 	struct spdk_blob *blob;
230 };
231 
232 /* On-Disk Data Structures
233  *
234  * The following data structures exist on disk.
235  */
236 #define SPDK_BS_INITIAL_VERSION 1
237 #define SPDK_BS_VERSION 3 /* current version */
238 
239 #pragma pack(push, 1)
240 
241 #define SPDK_MD_MASK_TYPE_USED_PAGES 0
242 #define SPDK_MD_MASK_TYPE_USED_CLUSTERS 1
243 #define SPDK_MD_MASK_TYPE_USED_BLOBIDS 2
244 
245 struct spdk_bs_md_mask {
246 	uint8_t		type;
247 	uint32_t	length; /* In bits */
248 	uint8_t		mask[0];
249 };
250 
251 #define SPDK_MD_DESCRIPTOR_TYPE_PADDING 0
252 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR 2
253 #define SPDK_MD_DESCRIPTOR_TYPE_FLAGS 3
254 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL 4
255 
256 /* Following descriptors define cluster layout in a blob.
257  * EXTENT_RLE cannot be present in blobs metadata,
258  * at the same time as EXTENT_TABLE and EXTENT_PAGE descriptors. */
259 
260 /* EXTENT_RLE descriptor holds an array of LBA that points to
261  * beginning of allocated clusters. The array is run-length encoded,
262  * with 0's being unallocated clusters. It is part of serialized
263  * metadata chain for a blob. */
264 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE 1
265 /* EXTENT_TABLE descriptor holds array of md page offsets that
266  * point to pages with EXTENT_PAGE descriptor. The 0's in the array
267  * are run-length encoded, non-zero values are unallocated pages.
268  * It is part of serialized metadata chain for a blob. */
269 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE 5
270 /* EXTENT_PAGE descriptor holds an array of LBAs that point to
271  * beginning of allocated clusters. The array is run-length encoded,
272  * with 0's being unallocated clusters. It is NOT part of
273  * serialized metadata chain for a blob. */
274 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE 6
275 
276 struct spdk_blob_md_descriptor_xattr {
277 	uint8_t		type;
278 	uint32_t	length;
279 
280 	uint16_t	name_length;
281 	uint16_t	value_length;
282 
283 	char		name[0];
284 	/* String name immediately followed by string value. */
285 };
286 
287 struct spdk_blob_md_descriptor_extent_rle {
288 	uint8_t		type;
289 	uint32_t	length;
290 
291 	struct {
292 		uint32_t	cluster_idx;
293 		uint32_t	length; /* In units of clusters */
294 	} extents[0];
295 };
296 
297 struct spdk_blob_md_descriptor_extent_table {
298 	uint8_t		type;
299 	uint32_t	length;
300 
301 	/* Number of data clusters in the blob */
302 	uint64_t	num_clusters;
303 
304 	struct {
305 		uint32_t	page_idx;
306 		uint32_t	num_pages; /* In units of pages */
307 	} extent_page[0];
308 };
309 
310 struct spdk_blob_md_descriptor_extent_page {
311 	uint8_t		type;
312 	uint32_t	length;
313 
314 	/* First cluster index in this extent page */
315 	uint32_t	start_cluster_idx;
316 
317 	uint32_t	cluster_idx[0];
318 };
319 
320 #define SPDK_BLOB_THIN_PROV		(1ULL << 0)
321 #define SPDK_BLOB_INTERNAL_XATTR	(1ULL << 1)
322 #define SPDK_BLOB_EXTENT_TABLE		(1ULL << 2)
323 #define SPDK_BLOB_EXTERNAL_SNAPSHOT	(1ULL << 3)
324 #define SPDK_BLOB_INVALID_FLAGS_MASK	(SPDK_BLOB_THIN_PROV | SPDK_BLOB_INTERNAL_XATTR | \
325 					 SPDK_BLOB_EXTENT_TABLE | SPDK_BLOB_EXTERNAL_SNAPSHOT)
326 
327 #define SPDK_BLOB_READ_ONLY (1ULL << 0)
328 #define SPDK_BLOB_DATA_RO_FLAGS_MASK	SPDK_BLOB_READ_ONLY
329 
330 #define SPDK_BLOB_CLEAR_METHOD_SHIFT 0
331 #define SPDK_BLOB_CLEAR_METHOD (3ULL << SPDK_BLOB_CLEAR_METHOD_SHIFT)
332 #define SPDK_BLOB_MD_RO_FLAGS_MASK	SPDK_BLOB_CLEAR_METHOD
333 
334 struct spdk_blob_md_descriptor_flags {
335 	uint8_t		type;
336 	uint32_t	length;
337 
338 	/*
339 	 * If a flag in invalid_flags is set that the application is not aware of,
340 	 *  it will not allow the blob to be opened.
341 	 */
342 	uint64_t	invalid_flags;
343 
344 	/*
345 	 * If a flag in data_ro_flags is set that the application is not aware of,
346 	 *  allow the blob to be opened in data_read_only and md_read_only mode.
347 	 */
348 	uint64_t	data_ro_flags;
349 
350 	/*
351 	 * If a flag in md_ro_flags is set the application is not aware of,
352 	 *  allow the blob to be opened in md_read_only mode.
353 	 */
354 	uint64_t	md_ro_flags;
355 };
356 
357 struct spdk_blob_md_descriptor {
358 	uint8_t		type;
359 	uint32_t	length;
360 };
361 
362 #define SPDK_INVALID_MD_PAGE UINT32_MAX
363 
364 struct spdk_blob_md_page {
365 	spdk_blob_id     id;
366 
367 	uint32_t	sequence_num;
368 	uint32_t	reserved0;
369 
370 	/* Descriptors here */
371 	uint8_t		descriptors[4072];
372 
373 	uint32_t	next;
374 	uint32_t	crc;
375 };
376 #define SPDK_BS_PAGE_SIZE 0x1000
377 SPDK_STATIC_ASSERT(SPDK_BS_PAGE_SIZE == sizeof(struct spdk_blob_md_page), "Invalid md page size");
378 
379 #define SPDK_BS_MAX_DESC_SIZE SPDK_SIZEOF_MEMBER(struct spdk_blob_md_page, descriptors)
380 
381 /* Maximum number of extents a single Extent Page can fit.
382  * For an SPDK_BS_PAGE_SIZE of 4K SPDK_EXTENTS_PER_EP would be 512. */
383 #define SPDK_EXTENTS_PER_EP_MAX ((SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_extent_page)) / sizeof(uint32_t))
384 #define SPDK_EXTENTS_PER_EP (spdk_align64pow2(SPDK_EXTENTS_PER_EP_MAX + 1) >> 1u)
385 
386 #define SPDK_BS_SUPER_BLOCK_SIG "SPDKBLOB"
387 
388 struct spdk_bs_super_block {
389 	uint8_t		signature[8];
390 	uint32_t	version;
391 	uint32_t	length;
392 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
393 	spdk_blob_id	super_blob;
394 
395 	uint32_t	cluster_size; /* In bytes */
396 
397 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
398 	uint32_t	used_page_mask_len; /* Count, in pages */
399 
400 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
401 	uint32_t	used_cluster_mask_len; /* Count, in pages */
402 
403 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
404 	uint32_t	md_len; /* Count, in pages */
405 
406 	struct spdk_bs_type	bstype; /* blobstore type */
407 
408 	uint32_t	used_blobid_mask_start; /* Offset from beginning of disk, in pages */
409 	uint32_t	used_blobid_mask_len; /* Count, in pages */
410 
411 	uint64_t	size; /* size of blobstore in bytes */
412 	uint32_t	io_unit_size; /* Size of io unit in bytes */
413 
414 	uint8_t		reserved[4000];
415 	uint32_t	crc;
416 };
417 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
418 
419 #pragma pack(pop)
420 
421 struct spdk_bs_dev *bs_create_zeroes_dev(void);
422 struct spdk_bs_dev *bs_create_blob_bs_dev(struct spdk_blob *blob);
423 
424 /* Unit Conversions
425  *
426  * The blobstore works with several different units:
427  * - Byte: Self explanatory
428  * - LBA: The logical blocks on the backing storage device.
429  * - Page: The read/write units of blobs and metadata. This is
430  *         an offset into a blob in units of 4KiB.
431  * - Cluster Index: The disk is broken into a sequential list of
432  *		    clusters. This is the offset from the beginning.
433  *
434  * NOTE: These conversions all act on simple magnitudes, not with any sort
435  *        of knowledge about the blobs themselves. For instance, converting
436  *        a page to an lba with the conversion function below simply converts
437  *        a number of pages to an equivalent number of lbas, but that
438  *        lba certainly isn't the right lba that corresponds to a page offset
439  *        for a particular blob.
440  */
441 static inline uint64_t
442 bs_byte_to_lba(struct spdk_blob_store *bs, uint64_t length)
443 {
444 	assert(length % bs->dev->blocklen == 0);
445 
446 	return length / bs->dev->blocklen;
447 }
448 
449 static inline uint64_t
450 bs_dev_byte_to_lba(struct spdk_bs_dev *bs_dev, uint64_t length)
451 {
452 	assert(length % bs_dev->blocklen == 0);
453 
454 	return length / bs_dev->blocklen;
455 }
456 
457 static inline uint64_t
458 bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
459 {
460 	return page * SPDK_BS_PAGE_SIZE / bs->dev->blocklen;
461 }
462 
463 static inline uint64_t
464 bs_md_page_to_lba(struct spdk_blob_store *bs, uint32_t page)
465 {
466 	assert(page < bs->md_len);
467 	return bs_page_to_lba(bs, page + bs->md_start);
468 }
469 
470 static inline uint64_t
471 bs_dev_page_to_lba(struct spdk_bs_dev *bs_dev, uint64_t page)
472 {
473 	return page * SPDK_BS_PAGE_SIZE / bs_dev->blocklen;
474 }
475 
476 static inline uint64_t
477 bs_io_unit_per_page(struct spdk_blob_store *bs)
478 {
479 	return SPDK_BS_PAGE_SIZE / bs->io_unit_size;
480 }
481 
482 static inline uint64_t
483 bs_io_unit_to_page(struct spdk_blob_store *bs, uint64_t io_unit)
484 {
485 	return io_unit / bs_io_unit_per_page(bs);
486 }
487 
488 static inline uint64_t
489 bs_cluster_to_page(struct spdk_blob_store *bs, uint32_t cluster)
490 {
491 	return (uint64_t)cluster * bs->pages_per_cluster;
492 }
493 
494 static inline uint32_t
495 bs_page_to_cluster(struct spdk_blob_store *bs, uint64_t page)
496 {
497 	assert(page % bs->pages_per_cluster == 0);
498 
499 	return page / bs->pages_per_cluster;
500 }
501 
502 static inline uint64_t
503 bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
504 {
505 	assert(bs->cluster_sz / bs->dev->blocklen > 0);
506 
507 	return (uint64_t)cluster * (bs->cluster_sz / bs->dev->blocklen);
508 }
509 
510 static inline uint32_t
511 bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
512 {
513 	assert(lba % (bs->cluster_sz / bs->dev->blocklen) == 0);
514 
515 	return lba / (bs->cluster_sz / bs->dev->blocklen);
516 }
517 
518 static inline uint64_t
519 bs_io_unit_to_back_dev_lba(struct spdk_blob *blob, uint64_t io_unit)
520 {
521 	return io_unit * (blob->bs->io_unit_size / blob->back_bs_dev->blocklen);
522 }
523 
524 static inline uint64_t
525 bs_cluster_to_extent_table_id(uint64_t cluster_num)
526 {
527 	return cluster_num / SPDK_EXTENTS_PER_EP;
528 }
529 
530 static inline uint32_t *
531 bs_cluster_to_extent_page(struct spdk_blob *blob, uint64_t cluster_num)
532 {
533 	uint64_t extent_table_id = bs_cluster_to_extent_table_id(cluster_num);
534 
535 	assert(blob->use_extent_table);
536 	assert(extent_table_id < blob->active.extent_pages_array_size);
537 
538 	return &blob->active.extent_pages[extent_table_id];
539 }
540 
541 static inline uint64_t
542 bs_io_units_per_cluster(struct spdk_blob *blob)
543 {
544 	uint64_t	io_units_per_cluster;
545 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
546 
547 	if (shift != 0) {
548 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) << shift;
549 	} else {
550 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) * blob->bs->pages_per_cluster;
551 	}
552 
553 	return io_units_per_cluster;
554 }
555 
556 /* End basic conversions */
557 
558 static inline uint64_t
559 bs_blobid_to_page(spdk_blob_id id)
560 {
561 	return id & 0xFFFFFFFF;
562 }
563 
564 /* The blob id is a 64 bit number. The lower 32 bits are the page_idx. The upper
565  * 32 bits are not currently used. Stick a 1 there just to catch bugs where the
566  * code assumes blob id == page_idx.
567  */
568 static inline spdk_blob_id
569 bs_page_to_blobid(uint64_t page_idx)
570 {
571 	if (page_idx > UINT32_MAX) {
572 		return SPDK_BLOBID_INVALID;
573 	}
574 	return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
575 }
576 
577 /* Given an io unit offset into a blob, look up the LBA for the
578  * start of that io unit.
579  */
580 static inline uint64_t
581 bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
582 {
583 	uint64_t	lba;
584 	uint64_t	pages_per_cluster;
585 	uint8_t		shift;
586 	uint64_t	io_units_per_cluster;
587 	uint64_t	io_units_per_page;
588 	uint64_t	page;
589 
590 	page = bs_io_unit_to_page(blob->bs, io_unit);
591 
592 	pages_per_cluster = blob->bs->pages_per_cluster;
593 	shift = blob->bs->pages_per_cluster_shift;
594 	io_units_per_page = bs_io_unit_per_page(blob->bs);
595 
596 	assert(page < blob->active.num_clusters * pages_per_cluster);
597 
598 	if (shift != 0) {
599 		io_units_per_cluster = io_units_per_page << shift;
600 		lba = blob->active.clusters[page >> shift];
601 	} else {
602 		io_units_per_cluster = io_units_per_page * pages_per_cluster;
603 		lba = blob->active.clusters[page / pages_per_cluster];
604 	}
605 	lba += io_unit % io_units_per_cluster;
606 	return lba;
607 }
608 
609 /* Given an io_unit offset into a blob, look up the number of io_units until the
610  * next cluster boundary.
611  */
612 static inline uint32_t
613 bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
614 {
615 	uint64_t	io_units_per_cluster;
616 
617 	io_units_per_cluster = bs_io_units_per_cluster(blob);
618 
619 	return io_units_per_cluster - (io_unit % io_units_per_cluster);
620 }
621 
622 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
623 static inline uint32_t
624 bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
625 {
626 	uint64_t	pages_per_cluster;
627 	uint64_t	page;
628 
629 	pages_per_cluster = blob->bs->pages_per_cluster;
630 	page = bs_io_unit_to_page(blob->bs, io_unit);
631 
632 	return page - (page % pages_per_cluster);
633 }
634 
635 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
636 static inline uint32_t
637 bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
638 {
639 	uint64_t	pages_per_cluster = blob->bs->pages_per_cluster;
640 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
641 	uint32_t	page_offset;
642 
643 	page_offset = io_unit / bs_io_unit_per_page(blob->bs);
644 	if (shift != 0) {
645 		return page_offset >> shift;
646 	} else {
647 		return page_offset / pages_per_cluster;
648 	}
649 }
650 
651 /* Given an io unit offset into a blob, look up if it is from allocated cluster. */
652 static inline bool
653 bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
654 {
655 	uint64_t	lba;
656 	uint64_t	page;
657 	uint64_t	pages_per_cluster;
658 	uint8_t		shift;
659 
660 	shift = blob->bs->pages_per_cluster_shift;
661 	pages_per_cluster = blob->bs->pages_per_cluster;
662 	page = bs_io_unit_to_page(blob->bs, io_unit);
663 
664 	assert(page < blob->active.num_clusters * pages_per_cluster);
665 
666 	if (shift != 0) {
667 		lba = blob->active.clusters[page >> shift];
668 	} else {
669 		lba = blob->active.clusters[page / pages_per_cluster];
670 	}
671 
672 	if (lba == 0) {
673 		assert(spdk_blob_is_thin_provisioned(blob));
674 		return false;
675 	} else {
676 		return true;
677 	}
678 }
679 
680 #endif
681