xref: /spdk/lib/blob/blobstore.h (revision 30afc27748e69257ca50f7e3a4b4ca6466ffc26b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef SPDK_BLOBSTORE_H
8 #define SPDK_BLOBSTORE_H
9 
10 #include "spdk/assert.h"
11 #include "spdk/blob.h"
12 #include "spdk/queue.h"
13 #include "spdk/util.h"
14 #include "spdk/tree.h"
15 #include "spdk/thread.h"
16 
17 #include "request.h"
18 
19 /* In Memory Data Structures
20  *
21  * The following data structures exist only in memory.
22  */
23 
24 #define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
25 #define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
26 #define SPDK_BLOB_OPTS_MAX_MD_OPS 32
27 #define SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS 512
28 #define SPDK_BLOB_BLOBID_HIGH_BIT (1ULL << 32)
29 
30 struct spdk_xattr {
31 	uint32_t	index;
32 	uint16_t	value_len;
33 	char		*name;
34 	void		*value;
35 	TAILQ_ENTRY(spdk_xattr)	link;
36 };
37 
38 /* The mutable part of the blob data that is sync'd to
39  * disk. The data in here is both mutable and persistent.
40  */
41 struct spdk_blob_mut_data {
42 	/* Number of data clusters in the blob */
43 	uint64_t	num_clusters;
44 
45 	/* Array LBAs that are the beginning of a cluster, in
46 	 * the order they appear in the blob.
47 	 */
48 	uint64_t	*clusters;
49 
50 	/* The size of the clusters array. This is greater than or
51 	 * equal to 'num_clusters'.
52 	 */
53 	size_t		cluster_array_size;
54 
55 	/* The number of allocated clusters in the clusters array */
56 	uint64_t	num_allocated_clusters;
57 
58 	/* Number of extent pages */
59 	uint64_t	num_extent_pages;
60 
61 	/* Array of page offsets into the metadata region,
62 	 * containing extents. Can contain entries for not yet
63 	 * allocated pages. */
64 	uint32_t	*extent_pages;
65 
66 	/* The size of the extent page array. This is greater than or
67 	 * equal to 'num_extent_pages'. */
68 	size_t		extent_pages_array_size;
69 
70 	/* Number of metadata pages */
71 	uint32_t	num_pages;
72 
73 	/* Array of page offsets into the metadata region, in
74 	 * the order of the metadata page sequence.
75 	 */
76 	uint32_t	*pages;
77 };
78 
79 enum spdk_blob_state {
80 	/* The blob in-memory version does not match the on-disk
81 	 * version.
82 	 */
83 	SPDK_BLOB_STATE_DIRTY,
84 
85 	/* The blob in memory version of the blob matches the on disk
86 	 * version.
87 	 */
88 	SPDK_BLOB_STATE_CLEAN,
89 
90 	/* The in-memory state being synchronized with the on-disk
91 	 * blob state. */
92 	SPDK_BLOB_STATE_LOADING,
93 };
94 
95 TAILQ_HEAD(spdk_xattr_tailq, spdk_xattr);
96 
97 struct spdk_blob_list {
98 	spdk_blob_id id;
99 	size_t clone_count;
100 	TAILQ_HEAD(, spdk_blob_list) clones;
101 	TAILQ_ENTRY(spdk_blob_list) link;
102 };
103 
104 struct spdk_blob {
105 	struct spdk_blob_store *bs;
106 
107 	uint32_t	open_ref;
108 
109 	spdk_blob_id	id;
110 	spdk_blob_id	parent_id;
111 
112 	enum spdk_blob_state		state;
113 
114 	/* Two copies of the mutable data. One is a version
115 	 * that matches the last known data on disk (clean).
116 	 * The other (active) is the current data. Syncing
117 	 * a blob makes the clean match the active.
118 	 */
119 	struct spdk_blob_mut_data	clean;
120 	struct spdk_blob_mut_data	active;
121 
122 	bool		invalid;
123 	bool		data_ro;
124 	bool		md_ro;
125 
126 	uint64_t	invalid_flags;
127 	uint64_t	data_ro_flags;
128 	uint64_t	md_ro_flags;
129 
130 	struct spdk_bs_dev *back_bs_dev;
131 
132 	/* TODO: The xattrs are mutable, but we don't want to be
133 	 * copying them unnecessarily. Figure this out.
134 	 */
135 	struct spdk_xattr_tailq xattrs;
136 	struct spdk_xattr_tailq xattrs_internal;
137 
138 	RB_ENTRY(spdk_blob) link;
139 
140 	uint32_t frozen_refcnt;
141 	bool locked_operation_in_progress;
142 	enum blob_clear_method clear_method;
143 	bool extent_rle_found;
144 	bool extent_table_found;
145 	bool use_extent_table;
146 
147 	/* A list of pending metadata pending_persists */
148 	TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
149 	TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;
150 
151 	/* Number of data clusters retrieved from extent table,
152 	 * that many have to be read from extent pages. */
153 	uint64_t	remaining_clusters_in_et;
154 };
155 
156 struct spdk_blob_store {
157 	uint64_t			md_start; /* Offset from beginning of disk, in pages */
158 	uint32_t			md_len; /* Count, in pages */
159 
160 	struct spdk_io_channel		*md_channel;
161 	uint32_t			max_channel_ops;
162 
163 	struct spdk_thread		*md_thread;
164 
165 	struct spdk_bs_dev		*dev;
166 
167 	struct spdk_bit_array		*used_md_pages;		/* Protected by used_lock */
168 	struct spdk_bit_pool		*used_clusters;		/* Protected by used_lock */
169 	struct spdk_bit_array		*used_blobids;
170 	struct spdk_bit_array		*open_blobids;
171 
172 	struct spdk_spinlock		used_lock;
173 
174 	uint32_t			cluster_sz;
175 	uint64_t			total_clusters;
176 	uint64_t			total_data_clusters;
177 	uint64_t			num_free_clusters;	/* Protected by used_lock */
178 	uint64_t			pages_per_cluster;
179 	uint8_t				pages_per_cluster_shift;
180 	uint32_t			io_unit_size;
181 
182 	spdk_blob_id			super_blob;
183 	struct spdk_bs_type		bstype;
184 
185 	struct spdk_bs_cpl		unload_cpl;
186 	int				unload_err;
187 
188 	RB_HEAD(spdk_blob_tree, spdk_blob) open_blobs;
189 	TAILQ_HEAD(, spdk_blob_list)	snapshots;
190 
191 	bool				clean;
192 
193 	spdk_bs_esnap_dev_create	esnap_bs_dev_create;
194 	void				*esnap_ctx;
195 
196 	/* If external snapshot channels are being destroyed while
197 	 * the blobstore is unloaded, the unload is deferred until
198 	 * after the channel destruction completes.
199 	 */
200 	uint32_t			esnap_channels_unloading;
201 	spdk_bs_op_complete		esnap_unload_cb_fn;
202 	void				*esnap_unload_cb_arg;
203 };
204 
205 struct spdk_bs_channel {
206 	struct spdk_bs_request_set	*req_mem;
207 	TAILQ_HEAD(, spdk_bs_request_set) reqs;
208 
209 	struct spdk_blob_store		*bs;
210 
211 	struct spdk_bs_dev		*dev;
212 	struct spdk_io_channel		*dev_channel;
213 
214 	/* This page is only used during insert of a new cluster. */
215 	struct spdk_blob_md_page	*new_cluster_page;
216 
217 	TAILQ_HEAD(, spdk_bs_request_set) need_cluster_alloc;
218 	TAILQ_HEAD(, spdk_bs_request_set) queued_io;
219 
220 	RB_HEAD(blob_esnap_channel_tree, blob_esnap_channel) esnap_channels;
221 };
222 
223 /** operation type */
224 enum spdk_blob_op_type {
225 	SPDK_BLOB_WRITE,
226 	SPDK_BLOB_READ,
227 	SPDK_BLOB_UNMAP,
228 	SPDK_BLOB_WRITE_ZEROES,
229 	SPDK_BLOB_WRITEV,
230 	SPDK_BLOB_READV,
231 };
232 
233 /* back bs_dev */
234 
235 #define BLOB_SNAPSHOT "SNAP"
236 #define SNAPSHOT_IN_PROGRESS "SNAPTMP"
237 #define SNAPSHOT_PENDING_REMOVAL "SNAPRM"
238 #define BLOB_EXTERNAL_SNAPSHOT_ID "EXTSNAP"
239 
240 struct spdk_blob_bs_dev {
241 	struct spdk_bs_dev bs_dev;
242 	struct spdk_blob *blob;
243 };
244 
245 /* On-Disk Data Structures
246  *
247  * The following data structures exist on disk.
248  */
249 #define SPDK_BS_INITIAL_VERSION 1
250 #define SPDK_BS_VERSION 3 /* current version */
251 
252 #pragma pack(push, 1)
253 
254 #define SPDK_MD_MASK_TYPE_USED_PAGES 0
255 #define SPDK_MD_MASK_TYPE_USED_CLUSTERS 1
256 #define SPDK_MD_MASK_TYPE_USED_BLOBIDS 2
257 
258 struct spdk_bs_md_mask {
259 	uint8_t		type;
260 	uint32_t	length; /* In bits */
261 	uint8_t		mask[0];
262 };
263 
264 #define SPDK_MD_DESCRIPTOR_TYPE_PADDING 0
265 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR 2
266 #define SPDK_MD_DESCRIPTOR_TYPE_FLAGS 3
267 #define SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL 4
268 
269 /* Following descriptors define cluster layout in a blob.
270  * EXTENT_RLE cannot be present in blobs metadata,
271  * at the same time as EXTENT_TABLE and EXTENT_PAGE descriptors. */
272 
273 /* EXTENT_RLE descriptor holds an array of LBA that points to
274  * beginning of allocated clusters. The array is run-length encoded,
275  * with 0's being unallocated clusters. It is part of serialized
276  * metadata chain for a blob. */
277 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE 1
278 /* EXTENT_TABLE descriptor holds array of md page offsets that
279  * point to pages with EXTENT_PAGE descriptor. The 0's in the array
280  * are run-length encoded, non-zero values are unallocated pages.
281  * It is part of serialized metadata chain for a blob. */
282 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE 5
283 /* EXTENT_PAGE descriptor holds an array of LBAs that point to
284  * beginning of allocated clusters. The array is run-length encoded,
285  * with 0's being unallocated clusters. It is NOT part of
286  * serialized metadata chain for a blob. */
287 #define SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE 6
288 
289 struct spdk_blob_md_descriptor_xattr {
290 	uint8_t		type;
291 	uint32_t	length;
292 
293 	uint16_t	name_length;
294 	uint16_t	value_length;
295 
296 	char		name[0];
297 	/* String name immediately followed by string value. */
298 };
299 
300 struct spdk_blob_md_descriptor_extent_rle {
301 	uint8_t		type;
302 	uint32_t	length;
303 
304 	struct {
305 		uint32_t	cluster_idx;
306 		uint32_t	length; /* In units of clusters */
307 	} extents[0];
308 };
309 
310 struct spdk_blob_md_descriptor_extent_table {
311 	uint8_t		type;
312 	uint32_t	length;
313 
314 	/* Number of data clusters in the blob */
315 	uint64_t	num_clusters;
316 
317 	struct {
318 		uint32_t	page_idx;
319 		uint32_t	num_pages; /* In units of pages */
320 	} extent_page[0];
321 };
322 
323 struct spdk_blob_md_descriptor_extent_page {
324 	uint8_t		type;
325 	uint32_t	length;
326 
327 	/* First cluster index in this extent page */
328 	uint32_t	start_cluster_idx;
329 
330 	uint32_t	cluster_idx[0];
331 };
332 
333 #define SPDK_BLOB_THIN_PROV		(1ULL << 0)
334 #define SPDK_BLOB_INTERNAL_XATTR	(1ULL << 1)
335 #define SPDK_BLOB_EXTENT_TABLE		(1ULL << 2)
336 #define SPDK_BLOB_EXTERNAL_SNAPSHOT	(1ULL << 3)
337 #define SPDK_BLOB_INVALID_FLAGS_MASK	(SPDK_BLOB_THIN_PROV | SPDK_BLOB_INTERNAL_XATTR | \
338 					 SPDK_BLOB_EXTENT_TABLE | SPDK_BLOB_EXTERNAL_SNAPSHOT)
339 
340 #define SPDK_BLOB_READ_ONLY (1ULL << 0)
341 #define SPDK_BLOB_DATA_RO_FLAGS_MASK	SPDK_BLOB_READ_ONLY
342 
343 #define SPDK_BLOB_CLEAR_METHOD_SHIFT 0
344 #define SPDK_BLOB_CLEAR_METHOD (3ULL << SPDK_BLOB_CLEAR_METHOD_SHIFT)
345 #define SPDK_BLOB_MD_RO_FLAGS_MASK	SPDK_BLOB_CLEAR_METHOD
346 
347 struct spdk_blob_md_descriptor_flags {
348 	uint8_t		type;
349 	uint32_t	length;
350 
351 	/*
352 	 * If a flag in invalid_flags is set that the application is not aware of,
353 	 *  it will not allow the blob to be opened.
354 	 */
355 	uint64_t	invalid_flags;
356 
357 	/*
358 	 * If a flag in data_ro_flags is set that the application is not aware of,
359 	 *  allow the blob to be opened in data_read_only and md_read_only mode.
360 	 */
361 	uint64_t	data_ro_flags;
362 
363 	/*
364 	 * If a flag in md_ro_flags is set the application is not aware of,
365 	 *  allow the blob to be opened in md_read_only mode.
366 	 */
367 	uint64_t	md_ro_flags;
368 };
369 
370 struct spdk_blob_md_descriptor {
371 	uint8_t		type;
372 	uint32_t	length;
373 };
374 
375 #define SPDK_INVALID_MD_PAGE UINT32_MAX
376 
377 struct spdk_blob_md_page {
378 	spdk_blob_id     id;
379 
380 	uint32_t	sequence_num;
381 	uint32_t	reserved0;
382 
383 	/* Descriptors here */
384 	uint8_t		descriptors[4072];
385 
386 	uint32_t	next;
387 	uint32_t	crc;
388 };
389 #define SPDK_BS_PAGE_SIZE 0x1000
390 SPDK_STATIC_ASSERT(SPDK_BS_PAGE_SIZE == sizeof(struct spdk_blob_md_page), "Invalid md page size");
391 
392 #define SPDK_BS_MAX_DESC_SIZE SPDK_SIZEOF_MEMBER(struct spdk_blob_md_page, descriptors)
393 
394 /* Maximum number of extents a single Extent Page can fit.
395  * For an SPDK_BS_PAGE_SIZE of 4K SPDK_EXTENTS_PER_EP would be 512. */
396 #define SPDK_EXTENTS_PER_EP_MAX ((SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_extent_page)) / sizeof(uint32_t))
397 #define SPDK_EXTENTS_PER_EP (spdk_align64pow2(SPDK_EXTENTS_PER_EP_MAX + 1) >> 1u)
398 
399 #define SPDK_BS_SUPER_BLOCK_SIG "SPDKBLOB"
400 
401 struct spdk_bs_super_block {
402 	uint8_t		signature[8];
403 	uint32_t	version;
404 	uint32_t	length;
405 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
406 	spdk_blob_id	super_blob;
407 
408 	uint32_t	cluster_size; /* In bytes */
409 
410 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
411 	uint32_t	used_page_mask_len; /* Count, in pages */
412 
413 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
414 	uint32_t	used_cluster_mask_len; /* Count, in pages */
415 
416 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
417 	uint32_t	md_len; /* Count, in pages */
418 
419 	struct spdk_bs_type	bstype; /* blobstore type */
420 
421 	uint32_t	used_blobid_mask_start; /* Offset from beginning of disk, in pages */
422 	uint32_t	used_blobid_mask_len; /* Count, in pages */
423 
424 	uint64_t	size; /* size of blobstore in bytes */
425 	uint32_t	io_unit_size; /* Size of io unit in bytes */
426 
427 	uint8_t		reserved[4000];
428 	uint32_t	crc;
429 };
430 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
431 
432 #pragma pack(pop)
433 
434 struct spdk_bs_dev *bs_create_zeroes_dev(void);
435 struct spdk_bs_dev *bs_create_blob_bs_dev(struct spdk_blob *blob);
436 struct spdk_io_channel *blob_esnap_get_io_channel(struct spdk_io_channel *ch,
437 		struct spdk_blob *blob);
438 bool blob_backed_with_zeroes_dev(struct spdk_blob *blob);
439 
440 /* Unit Conversions
441  *
442  * The blobstore works with several different units:
443  * - Byte: Self explanatory
444  * - LBA: The logical blocks on the backing storage device.
445  * - Page: The read/write units of blobs and metadata. This is
446  *         an offset into a blob in units of 4KiB.
447  * - Cluster Index: The disk is broken into a sequential list of
448  *		    clusters. This is the offset from the beginning.
449  *
450  * NOTE: These conversions all act on simple magnitudes, not with any sort
451  *        of knowledge about the blobs themselves. For instance, converting
452  *        a page to an lba with the conversion function below simply converts
453  *        a number of pages to an equivalent number of lbas, but that
454  *        lba certainly isn't the right lba that corresponds to a page offset
455  *        for a particular blob.
456  */
457 static inline uint64_t
458 bs_byte_to_lba(struct spdk_blob_store *bs, uint64_t length)
459 {
460 	assert(length % bs->dev->blocklen == 0);
461 
462 	return length / bs->dev->blocklen;
463 }
464 
465 static inline uint64_t
466 bs_dev_byte_to_lba(struct spdk_bs_dev *bs_dev, uint64_t length)
467 {
468 	assert(length % bs_dev->blocklen == 0);
469 
470 	return length / bs_dev->blocklen;
471 }
472 
473 static inline uint64_t
474 bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
475 {
476 	return page * SPDK_BS_PAGE_SIZE / bs->dev->blocklen;
477 }
478 
479 static inline uint64_t
480 bs_md_page_to_lba(struct spdk_blob_store *bs, uint32_t page)
481 {
482 	assert(page < bs->md_len);
483 	return bs_page_to_lba(bs, page + bs->md_start);
484 }
485 
486 static inline uint64_t
487 bs_dev_page_to_lba(struct spdk_bs_dev *bs_dev, uint64_t page)
488 {
489 	return page * SPDK_BS_PAGE_SIZE / bs_dev->blocklen;
490 }
491 
492 static inline uint64_t
493 bs_io_unit_per_page(struct spdk_blob_store *bs)
494 {
495 	return SPDK_BS_PAGE_SIZE / bs->io_unit_size;
496 }
497 
498 static inline uint64_t
499 bs_io_unit_to_page(struct spdk_blob_store *bs, uint64_t io_unit)
500 {
501 	return io_unit / bs_io_unit_per_page(bs);
502 }
503 
504 static inline uint64_t
505 bs_cluster_to_page(struct spdk_blob_store *bs, uint32_t cluster)
506 {
507 	return (uint64_t)cluster * bs->pages_per_cluster;
508 }
509 
510 static inline uint32_t
511 bs_page_to_cluster(struct spdk_blob_store *bs, uint64_t page)
512 {
513 	assert(page % bs->pages_per_cluster == 0);
514 
515 	return page / bs->pages_per_cluster;
516 }
517 
518 static inline uint64_t
519 bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
520 {
521 	assert(bs->cluster_sz / bs->dev->blocklen > 0);
522 
523 	return (uint64_t)cluster * (bs->cluster_sz / bs->dev->blocklen);
524 }
525 
526 static inline uint32_t
527 bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
528 {
529 	assert(lba % (bs->cluster_sz / bs->dev->blocklen) == 0);
530 
531 	return lba / (bs->cluster_sz / bs->dev->blocklen);
532 }
533 
534 static inline uint64_t
535 bs_io_unit_to_back_dev_lba(struct spdk_blob *blob, uint64_t io_unit)
536 {
537 	return io_unit * (blob->bs->io_unit_size / blob->back_bs_dev->blocklen);
538 }
539 
540 static inline uint64_t
541 bs_cluster_to_extent_table_id(uint64_t cluster_num)
542 {
543 	return cluster_num / SPDK_EXTENTS_PER_EP;
544 }
545 
546 static inline uint32_t *
547 bs_cluster_to_extent_page(struct spdk_blob *blob, uint64_t cluster_num)
548 {
549 	uint64_t extent_table_id = bs_cluster_to_extent_table_id(cluster_num);
550 
551 	assert(blob->use_extent_table);
552 	assert(extent_table_id < blob->active.extent_pages_array_size);
553 
554 	return &blob->active.extent_pages[extent_table_id];
555 }
556 
557 static inline uint64_t
558 bs_io_units_per_cluster(struct spdk_blob *blob)
559 {
560 	uint64_t	io_units_per_cluster;
561 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
562 
563 	if (shift != 0) {
564 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) << shift;
565 	} else {
566 		io_units_per_cluster = bs_io_unit_per_page(blob->bs) * blob->bs->pages_per_cluster;
567 	}
568 
569 	return io_units_per_cluster;
570 }
571 
572 /* End basic conversions */
573 
574 static inline uint64_t
575 bs_blobid_to_page(spdk_blob_id id)
576 {
577 	return id & 0xFFFFFFFF;
578 }
579 
580 /* The blob id is a 64 bit number. The lower 32 bits are the page_idx. The upper
581  * 32 bits are not currently used. Stick a 1 there just to catch bugs where the
582  * code assumes blob id == page_idx.
583  */
584 static inline spdk_blob_id
585 bs_page_to_blobid(uint64_t page_idx)
586 {
587 	if (page_idx > UINT32_MAX) {
588 		return SPDK_BLOBID_INVALID;
589 	}
590 	return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
591 }
592 
593 /* Given an io unit offset into a blob, look up the LBA for the
594  * start of that io unit.
595  */
596 static inline uint64_t
597 bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
598 {
599 	uint64_t	lba;
600 	uint64_t	pages_per_cluster;
601 	uint8_t		shift;
602 	uint64_t	io_units_per_cluster;
603 	uint64_t	io_units_per_page;
604 	uint64_t	page;
605 
606 	page = bs_io_unit_to_page(blob->bs, io_unit);
607 
608 	pages_per_cluster = blob->bs->pages_per_cluster;
609 	shift = blob->bs->pages_per_cluster_shift;
610 	io_units_per_page = bs_io_unit_per_page(blob->bs);
611 
612 	assert(page < blob->active.num_clusters * pages_per_cluster);
613 
614 	if (shift != 0) {
615 		io_units_per_cluster = io_units_per_page << shift;
616 		lba = blob->active.clusters[page >> shift];
617 	} else {
618 		io_units_per_cluster = io_units_per_page * pages_per_cluster;
619 		lba = blob->active.clusters[page / pages_per_cluster];
620 	}
621 	lba += io_unit % io_units_per_cluster;
622 	return lba;
623 }
624 
625 /* Given an io_unit offset into a blob, look up the number of io_units until the
626  * next cluster boundary.
627  */
628 static inline uint32_t
629 bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
630 {
631 	uint64_t	io_units_per_cluster;
632 
633 	io_units_per_cluster = bs_io_units_per_cluster(blob);
634 
635 	return io_units_per_cluster - (io_unit % io_units_per_cluster);
636 }
637 
638 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
639 static inline uint32_t
640 bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
641 {
642 	uint64_t	pages_per_cluster;
643 	uint64_t	page;
644 
645 	pages_per_cluster = blob->bs->pages_per_cluster;
646 	page = bs_io_unit_to_page(blob->bs, io_unit);
647 
648 	return page - (page % pages_per_cluster);
649 }
650 
651 /* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
652 static inline uint32_t
653 bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
654 {
655 	uint64_t	pages_per_cluster = blob->bs->pages_per_cluster;
656 	uint8_t		shift = blob->bs->pages_per_cluster_shift;
657 	uint32_t	page_offset;
658 
659 	page_offset = io_unit / bs_io_unit_per_page(blob->bs);
660 	if (shift != 0) {
661 		return page_offset >> shift;
662 	} else {
663 		return page_offset / pages_per_cluster;
664 	}
665 }
666 
667 /* Given an io unit offset into a blob, look up if it is from allocated cluster. */
668 static inline bool
669 bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
670 {
671 	uint64_t	lba;
672 	uint64_t	page;
673 	uint64_t	pages_per_cluster;
674 	uint8_t		shift;
675 
676 	shift = blob->bs->pages_per_cluster_shift;
677 	pages_per_cluster = blob->bs->pages_per_cluster;
678 	page = bs_io_unit_to_page(blob->bs, io_unit);
679 
680 	assert(page < blob->active.num_clusters * pages_per_cluster);
681 
682 	if (shift != 0) {
683 		lba = blob->active.clusters[page >> shift];
684 	} else {
685 		lba = blob->active.clusters[page / pages_per_cluster];
686 	}
687 
688 	if (lba == 0) {
689 		assert(spdk_blob_is_thin_provisioned(blob));
690 		return false;
691 	} else {
692 		return true;
693 	}
694 }
695 
696 #endif
697