xref: /freebsd-src/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c (revision 87bf66d4a7488c496af110d4d05cc0273d49f82e)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * This file and its contents are supplied under the terms of the
3eda14cbcSMatt Macy  * Common Development and Distribution License ("CDDL"), version 1.0.
4eda14cbcSMatt Macy  * You may only use this file in accordance with the terms of version
5eda14cbcSMatt Macy  * 1.0 of the CDDL.
6eda14cbcSMatt Macy  *
7eda14cbcSMatt Macy  * A full copy of the text of the CDDL should have accompanied this
8eda14cbcSMatt Macy  * source.  A copy of the CDDL is also available via the Internet at
9eda14cbcSMatt Macy  * http://www.illumos.org/license/CDDL.
10eda14cbcSMatt Macy  */
11eda14cbcSMatt Macy 
12eda14cbcSMatt Macy /*
13eda14cbcSMatt Macy  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
14eda14cbcSMatt Macy  * Copyright (c) 2016 by Delphix. All rights reserved.
15eda14cbcSMatt Macy  */
16eda14cbcSMatt Macy 
17eda14cbcSMatt Macy /*
18eda14cbcSMatt Macy  * See abd.c for a general overview of the arc buffered data (ABD).
19eda14cbcSMatt Macy  *
20eda14cbcSMatt Macy  * Using a large proportion of scattered ABDs decreases ARC fragmentation since
21eda14cbcSMatt Macy  * when we are at the limit of allocatable space, using equal-size chunks will
22eda14cbcSMatt Macy  * allow us to quickly reclaim enough space for a new large allocation (assuming
23eda14cbcSMatt Macy  * it is also scattered).
24eda14cbcSMatt Macy  *
25eda14cbcSMatt Macy  * ABDs are allocated scattered by default unless the caller uses
26eda14cbcSMatt Macy  * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled.
27eda14cbcSMatt Macy  */
28eda14cbcSMatt Macy 
29eda14cbcSMatt Macy #include <sys/abd_impl.h>
30eda14cbcSMatt Macy #include <sys/param.h>
31eda14cbcSMatt Macy #include <sys/types.h>
32eda14cbcSMatt Macy #include <sys/zio.h>
33eda14cbcSMatt Macy #include <sys/zfs_context.h>
34eda14cbcSMatt Macy #include <sys/zfs_znode.h>
357a7741afSMartin Matuska #include <sys/vm.h>
36eda14cbcSMatt Macy 
37eda14cbcSMatt Macy typedef struct abd_stats {
38eda14cbcSMatt Macy 	kstat_named_t abdstat_struct_size;
39eda14cbcSMatt Macy 	kstat_named_t abdstat_scatter_cnt;
40eda14cbcSMatt Macy 	kstat_named_t abdstat_scatter_data_size;
41eda14cbcSMatt Macy 	kstat_named_t abdstat_scatter_chunk_waste;
42eda14cbcSMatt Macy 	kstat_named_t abdstat_linear_cnt;
43eda14cbcSMatt Macy 	kstat_named_t abdstat_linear_data_size;
44eda14cbcSMatt Macy } abd_stats_t;
45eda14cbcSMatt Macy 
46eda14cbcSMatt Macy static abd_stats_t abd_stats = {
47eda14cbcSMatt Macy 	/* Amount of memory occupied by all of the abd_t struct allocations */
48eda14cbcSMatt Macy 	{ "struct_size",			KSTAT_DATA_UINT64 },
49eda14cbcSMatt Macy 	/*
50eda14cbcSMatt Macy 	 * The number of scatter ABDs which are currently allocated, excluding
51eda14cbcSMatt Macy 	 * ABDs which don't own their data (for instance the ones which were
52eda14cbcSMatt Macy 	 * allocated through abd_get_offset()).
53eda14cbcSMatt Macy 	 */
54eda14cbcSMatt Macy 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
55eda14cbcSMatt Macy 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
56eda14cbcSMatt Macy 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
57eda14cbcSMatt Macy 	/*
58eda14cbcSMatt Macy 	 * The amount of space wasted at the end of the last chunk across all
59eda14cbcSMatt Macy 	 * scatter ABDs tracked by scatter_cnt.
60eda14cbcSMatt Macy 	 */
61eda14cbcSMatt Macy 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
62eda14cbcSMatt Macy 	/*
63eda14cbcSMatt Macy 	 * The number of linear ABDs which are currently allocated, excluding
64eda14cbcSMatt Macy 	 * ABDs which don't own their data (for instance the ones which were
65eda14cbcSMatt Macy 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
66eda14cbcSMatt Macy 	 * ABD takes ownership of its buf then it will become tracked.
67eda14cbcSMatt Macy 	 */
68eda14cbcSMatt Macy 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
69eda14cbcSMatt Macy 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
70eda14cbcSMatt Macy 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
71eda14cbcSMatt Macy };
72eda14cbcSMatt Macy 
730d8fe237SMartin Matuska struct {
740d8fe237SMartin Matuska 	wmsum_t abdstat_struct_size;
750d8fe237SMartin Matuska 	wmsum_t abdstat_scatter_cnt;
760d8fe237SMartin Matuska 	wmsum_t abdstat_scatter_data_size;
770d8fe237SMartin Matuska 	wmsum_t abdstat_scatter_chunk_waste;
780d8fe237SMartin Matuska 	wmsum_t abdstat_linear_cnt;
790d8fe237SMartin Matuska 	wmsum_t abdstat_linear_data_size;
800d8fe237SMartin Matuska } abd_sums;
810d8fe237SMartin Matuska 
82eda14cbcSMatt Macy /*
837cd22ac4SMartin Matuska  * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
847cd22ac4SMartin Matuska  * ABD's for.  Smaller allocations will use linear ABD's which use
857cd22ac4SMartin Matuska  * zio_[data_]buf_alloc().
867cd22ac4SMartin Matuska  *
877cd22ac4SMartin Matuska  * Scatter ABD's use at least one page each, so sub-page allocations waste
887cd22ac4SMartin Matuska  * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
897cd22ac4SMartin Matuska  * half of each page).  Using linear ABD's for small allocations means that
907cd22ac4SMartin Matuska  * they will be put on slabs which contain many allocations.
917cd22ac4SMartin Matuska  *
927cd22ac4SMartin Matuska  * Linear ABDs for multi-page allocations are easier to use, and in some cases
937cd22ac4SMartin Matuska  * it allows to avoid buffer copying.  But allocation and especially free
947cd22ac4SMartin Matuska  * of multi-page linear ABDs are expensive operations due to KVA mapping and
957cd22ac4SMartin Matuska  * unmapping, and with time they cause KVA fragmentations.
96eda14cbcSMatt Macy  */
97e92ffd9bSMartin Matuska static size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1;
98eda14cbcSMatt Macy 
99eda14cbcSMatt Macy SYSCTL_DECL(_vfs_zfs);
100eda14cbcSMatt Macy 
101eda14cbcSMatt Macy SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
102eda14cbcSMatt Macy 	&zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
1037cd22ac4SMartin Matuska SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_scatter_min_size, CTLFLAG_RWTUN,
1047cd22ac4SMartin Matuska 	&zfs_abd_scatter_min_size, 0, "Minimum size of scatter allocations.");
105eda14cbcSMatt Macy 
106eda14cbcSMatt Macy kmem_cache_t *abd_chunk_cache;
107eda14cbcSMatt Macy static kstat_t *abd_ksp;
108eda14cbcSMatt Macy 
109eda14cbcSMatt Macy /*
110eda14cbcSMatt Macy  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
1117cd22ac4SMartin Matuska  * just a single zero'd page-sized buffer. This allows us to conserve
1127cd22ac4SMartin Matuska  * memory by only using a single zero buffer for the scatter chunks.
113eda14cbcSMatt Macy  */
114eda14cbcSMatt Macy abd_t *abd_zero_scatter = NULL;
115eda14cbcSMatt Macy 
1167877fdebSMatt Macy static uint_t
117eda14cbcSMatt Macy abd_chunkcnt_for_bytes(size_t size)
118eda14cbcSMatt Macy {
1197cd22ac4SMartin Matuska 	return ((size + PAGE_MASK) >> PAGE_SHIFT);
120eda14cbcSMatt Macy }
121eda14cbcSMatt Macy 
1227877fdebSMatt Macy static inline uint_t
123eda14cbcSMatt Macy abd_scatter_chunkcnt(abd_t *abd)
124eda14cbcSMatt Macy {
125eda14cbcSMatt Macy 	ASSERT(!abd_is_linear(abd));
126eda14cbcSMatt Macy 	return (abd_chunkcnt_for_bytes(
127eda14cbcSMatt Macy 	    ABD_SCATTER(abd).abd_offset + abd->abd_size));
128eda14cbcSMatt Macy }
129eda14cbcSMatt Macy 
130eda14cbcSMatt Macy boolean_t
131eda14cbcSMatt Macy abd_size_alloc_linear(size_t size)
132eda14cbcSMatt Macy {
1331f88aa09SMartin Matuska 	return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
134eda14cbcSMatt Macy }
135eda14cbcSMatt Macy 
136eda14cbcSMatt Macy void
137eda14cbcSMatt Macy abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
138eda14cbcSMatt Macy {
1397a7741afSMartin Matuska 	uint_t n;
1407a7741afSMartin Matuska 
1417a7741afSMartin Matuska 	n = abd_scatter_chunkcnt(abd);
142eda14cbcSMatt Macy 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
1437cd22ac4SMartin Matuska 	int waste = (n << PAGE_SHIFT) - abd->abd_size;
144eda14cbcSMatt Macy 	if (op == ABDSTAT_INCR) {
145eda14cbcSMatt Macy 		ABDSTAT_BUMP(abdstat_scatter_cnt);
146eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
147eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
148eda14cbcSMatt Macy 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
149eda14cbcSMatt Macy 	} else {
150eda14cbcSMatt Macy 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
151eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
152eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
153eda14cbcSMatt Macy 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
154eda14cbcSMatt Macy 	}
155eda14cbcSMatt Macy }
156eda14cbcSMatt Macy 
157eda14cbcSMatt Macy void
158eda14cbcSMatt Macy abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
159eda14cbcSMatt Macy {
160eda14cbcSMatt Macy 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
161eda14cbcSMatt Macy 	if (op == ABDSTAT_INCR) {
162eda14cbcSMatt Macy 		ABDSTAT_BUMP(abdstat_linear_cnt);
163eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
164eda14cbcSMatt Macy 	} else {
165eda14cbcSMatt Macy 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
166eda14cbcSMatt Macy 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
167eda14cbcSMatt Macy 	}
168eda14cbcSMatt Macy }
169eda14cbcSMatt Macy 
170eda14cbcSMatt Macy void
171eda14cbcSMatt Macy abd_verify_scatter(abd_t *abd)
172eda14cbcSMatt Macy {
1737877fdebSMatt Macy 	uint_t i, n;
1747877fdebSMatt Macy 
175eda14cbcSMatt Macy 	/*
1767cd22ac4SMartin Matuska 	 * There is no scatter linear pages in FreeBSD so there is
1777cd22ac4SMartin Matuska 	 * an error if the ABD has been marked as a linear page.
178eda14cbcSMatt Macy 	 */
1797877fdebSMatt Macy 	ASSERT(!abd_is_linear_page(abd));
1807cd22ac4SMartin Matuska 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <, PAGE_SIZE);
1817877fdebSMatt Macy 	n = abd_scatter_chunkcnt(abd);
1827877fdebSMatt Macy 	for (i = 0; i < n; i++) {
1837877fdebSMatt Macy 		ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
184eda14cbcSMatt Macy 	}
185eda14cbcSMatt Macy }
186eda14cbcSMatt Macy 
187eda14cbcSMatt Macy void
188eda14cbcSMatt Macy abd_alloc_chunks(abd_t *abd, size_t size)
189eda14cbcSMatt Macy {
1907877fdebSMatt Macy 	uint_t i, n;
1917877fdebSMatt Macy 
1927877fdebSMatt Macy 	n = abd_chunkcnt_for_bytes(size);
1937877fdebSMatt Macy 	for (i = 0; i < n; i++) {
1947cd22ac4SMartin Matuska 		ABD_SCATTER(abd).abd_chunks[i] =
1957cd22ac4SMartin Matuska 		    kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
196eda14cbcSMatt Macy 	}
197eda14cbcSMatt Macy }
198eda14cbcSMatt Macy 
199eda14cbcSMatt Macy void
200eda14cbcSMatt Macy abd_free_chunks(abd_t *abd)
201eda14cbcSMatt Macy {
2027877fdebSMatt Macy 	uint_t i, n;
2037877fdebSMatt Macy 
2047a7741afSMartin Matuska 	/*
2057a7741afSMartin Matuska 	 * Scatter ABDs may be constructed by abd_alloc_from_pages() from
2067a7741afSMartin Matuska 	 * an array of pages. In which case they should not be freed.
2077a7741afSMartin Matuska 	 */
2087a7741afSMartin Matuska 	if (!abd_is_from_pages(abd)) {
2097877fdebSMatt Macy 		n = abd_scatter_chunkcnt(abd);
2107877fdebSMatt Macy 		for (i = 0; i < n; i++) {
2117cd22ac4SMartin Matuska 			kmem_cache_free(abd_chunk_cache,
2127cd22ac4SMartin Matuska 			    ABD_SCATTER(abd).abd_chunks[i]);
213eda14cbcSMatt Macy 		}
214eda14cbcSMatt Macy 	}
2157a7741afSMartin Matuska }
216eda14cbcSMatt Macy 
217eda14cbcSMatt Macy abd_t *
218184c1b94SMartin Matuska abd_alloc_struct_impl(size_t size)
219eda14cbcSMatt Macy {
2207877fdebSMatt Macy 	uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
221eda14cbcSMatt Macy 	/*
222eda14cbcSMatt Macy 	 * In the event we are allocating a gang ABD, the size passed in
223eda14cbcSMatt Macy 	 * will be 0. We must make sure to set abd_size to the size of an
224eda14cbcSMatt Macy 	 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang
225eda14cbcSMatt Macy 	 * ABD struct allocation accounts for an additional 24 bytes over
226eda14cbcSMatt Macy 	 * a scatter ABD with 0 chunks.
227eda14cbcSMatt Macy 	 */
228eda14cbcSMatt Macy 	size_t abd_size = MAX(sizeof (abd_t),
229eda14cbcSMatt Macy 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
230eda14cbcSMatt Macy 	abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
231eda14cbcSMatt Macy 	ASSERT3P(abd, !=, NULL);
232eda14cbcSMatt Macy 	ABDSTAT_INCR(abdstat_struct_size, abd_size);
233eda14cbcSMatt Macy 
234eda14cbcSMatt Macy 	return (abd);
235eda14cbcSMatt Macy }
236eda14cbcSMatt Macy 
237eda14cbcSMatt Macy void
238184c1b94SMartin Matuska abd_free_struct_impl(abd_t *abd)
239eda14cbcSMatt Macy {
2407877fdebSMatt Macy 	uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
241eda14cbcSMatt Macy 	    abd_scatter_chunkcnt(abd);
2427877fdebSMatt Macy 	ssize_t size = MAX(sizeof (abd_t),
243eda14cbcSMatt Macy 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
244eda14cbcSMatt Macy 	kmem_free(abd, size);
245eda14cbcSMatt Macy 	ABDSTAT_INCR(abdstat_struct_size, -size);
246eda14cbcSMatt Macy }
247eda14cbcSMatt Macy 
248eda14cbcSMatt Macy /*
249eda14cbcSMatt Macy  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
25040c911e8SMateusz Guzik  * each chunk in the scatterlist will be set to the same area.
251eda14cbcSMatt Macy  */
25240c911e8SMateusz Guzik _Static_assert(ZERO_REGION_SIZE >= PAGE_SIZE, "zero_region too small");
253eda14cbcSMatt Macy static void
254eda14cbcSMatt Macy abd_alloc_zero_scatter(void)
255eda14cbcSMatt Macy {
2567877fdebSMatt Macy 	uint_t i, n;
2577877fdebSMatt Macy 
2587877fdebSMatt Macy 	n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
259eda14cbcSMatt Macy 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
260e2df9bb4SMartin Matuska 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
261eda14cbcSMatt Macy 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
262eda14cbcSMatt Macy 
263eda14cbcSMatt Macy 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
264eda14cbcSMatt Macy 
2657877fdebSMatt Macy 	for (i = 0; i < n; i++) {
266eda14cbcSMatt Macy 		ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
26740c911e8SMateusz Guzik 		    __DECONST(void *, zero_region);
268eda14cbcSMatt Macy 	}
269eda14cbcSMatt Macy 
270eda14cbcSMatt Macy 	ABDSTAT_BUMP(abdstat_scatter_cnt);
2717cd22ac4SMartin Matuska 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGE_SIZE);
272eda14cbcSMatt Macy }
273eda14cbcSMatt Macy 
274eda14cbcSMatt Macy static void
275eda14cbcSMatt Macy abd_free_zero_scatter(void)
276eda14cbcSMatt Macy {
277eda14cbcSMatt Macy 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
2787cd22ac4SMartin Matuska 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGE_SIZE);
279eda14cbcSMatt Macy 
280eda14cbcSMatt Macy 	abd_free_struct(abd_zero_scatter);
281eda14cbcSMatt Macy 	abd_zero_scatter = NULL;
282eda14cbcSMatt Macy }
283eda14cbcSMatt Macy 
2840d8fe237SMartin Matuska static int
2850d8fe237SMartin Matuska abd_kstats_update(kstat_t *ksp, int rw)
2860d8fe237SMartin Matuska {
2870d8fe237SMartin Matuska 	abd_stats_t *as = ksp->ks_data;
2880d8fe237SMartin Matuska 
2890d8fe237SMartin Matuska 	if (rw == KSTAT_WRITE)
2900d8fe237SMartin Matuska 		return (EACCES);
2910d8fe237SMartin Matuska 	as->abdstat_struct_size.value.ui64 =
2920d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_struct_size);
2930d8fe237SMartin Matuska 	as->abdstat_scatter_cnt.value.ui64 =
2940d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_scatter_cnt);
2950d8fe237SMartin Matuska 	as->abdstat_scatter_data_size.value.ui64 =
2960d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_scatter_data_size);
2970d8fe237SMartin Matuska 	as->abdstat_scatter_chunk_waste.value.ui64 =
2980d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
2990d8fe237SMartin Matuska 	as->abdstat_linear_cnt.value.ui64 =
3000d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_linear_cnt);
3010d8fe237SMartin Matuska 	as->abdstat_linear_data_size.value.ui64 =
3020d8fe237SMartin Matuska 	    wmsum_value(&abd_sums.abdstat_linear_data_size);
3030d8fe237SMartin Matuska 	return (0);
3040d8fe237SMartin Matuska }
3050d8fe237SMartin Matuska 
306eda14cbcSMatt Macy void
307eda14cbcSMatt Macy abd_init(void)
308eda14cbcSMatt Macy {
3097cd22ac4SMartin Matuska 	abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0,
310ce4dcb97SMartin Matuska 	    NULL, NULL, NULL, NULL, 0, KMC_NODEBUG | KMC_RECLAIMABLE);
311eda14cbcSMatt Macy 
3120d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_struct_size, 0);
3130d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
3140d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
3150d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
3160d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
3170d8fe237SMartin Matuska 	wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
3180d8fe237SMartin Matuska 
319eda14cbcSMatt Macy 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
320eda14cbcSMatt Macy 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
321eda14cbcSMatt Macy 	if (abd_ksp != NULL) {
322eda14cbcSMatt Macy 		abd_ksp->ks_data = &abd_stats;
3230d8fe237SMartin Matuska 		abd_ksp->ks_update = abd_kstats_update;
324eda14cbcSMatt Macy 		kstat_install(abd_ksp);
325eda14cbcSMatt Macy 	}
326eda14cbcSMatt Macy 
327eda14cbcSMatt Macy 	abd_alloc_zero_scatter();
328eda14cbcSMatt Macy }
329eda14cbcSMatt Macy 
330eda14cbcSMatt Macy void
331eda14cbcSMatt Macy abd_fini(void)
332eda14cbcSMatt Macy {
333eda14cbcSMatt Macy 	abd_free_zero_scatter();
334eda14cbcSMatt Macy 
335eda14cbcSMatt Macy 	if (abd_ksp != NULL) {
336eda14cbcSMatt Macy 		kstat_delete(abd_ksp);
337eda14cbcSMatt Macy 		abd_ksp = NULL;
338eda14cbcSMatt Macy 	}
339eda14cbcSMatt Macy 
3400d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_struct_size);
3410d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_scatter_cnt);
3420d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_scatter_data_size);
3430d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
3440d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_linear_cnt);
3450d8fe237SMartin Matuska 	wmsum_fini(&abd_sums.abdstat_linear_data_size);
3460d8fe237SMartin Matuska 
347eda14cbcSMatt Macy 	kmem_cache_destroy(abd_chunk_cache);
348eda14cbcSMatt Macy 	abd_chunk_cache = NULL;
349eda14cbcSMatt Macy }
350eda14cbcSMatt Macy 
351eda14cbcSMatt Macy void
352eda14cbcSMatt Macy abd_free_linear_page(abd_t *abd)
353eda14cbcSMatt Macy {
3547a7741afSMartin Matuska 	ASSERT3P(abd->abd_u.abd_linear.sf, !=, NULL);
3557a7741afSMartin Matuska 	zfs_unmap_page(abd->abd_u.abd_linear.sf);
356eda14cbcSMatt Macy }
357eda14cbcSMatt Macy 
358eda14cbcSMatt Macy /*
359eda14cbcSMatt Macy  * If we're going to use this ABD for doing I/O using the block layer, the
360eda14cbcSMatt Macy  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
361eda14cbcSMatt Macy  * plan to store this ABD in memory for a long period of time, we should
362eda14cbcSMatt Macy  * allocate the ABD type that requires the least data copying to do the I/O.
363eda14cbcSMatt Macy  *
364eda14cbcSMatt Macy  * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
365eda14cbcSMatt Macy  * using a scatter/gather list we should switch to that and replace this call
366eda14cbcSMatt Macy  * with vanilla abd_alloc().
367eda14cbcSMatt Macy  */
368eda14cbcSMatt Macy abd_t *
369eda14cbcSMatt Macy abd_alloc_for_io(size_t size, boolean_t is_metadata)
370eda14cbcSMatt Macy {
371eda14cbcSMatt Macy 	return (abd_alloc_linear(size, is_metadata));
372eda14cbcSMatt Macy }
373eda14cbcSMatt Macy 
3747a7741afSMartin Matuska static abd_t *
3757a7741afSMartin Matuska abd_get_offset_from_pages(abd_t *abd, abd_t *sabd, size_t chunkcnt,
3767a7741afSMartin Matuska     size_t new_offset)
3777a7741afSMartin Matuska {
3787a7741afSMartin Matuska 	ASSERT(abd_is_from_pages(sabd));
3797a7741afSMartin Matuska 
3807a7741afSMartin Matuska 	/*
3817a7741afSMartin Matuska 	 * Set the child child chunks to point at the parent chunks as
3827a7741afSMartin Matuska 	 * the chunks are just pages and we don't want to copy them.
3837a7741afSMartin Matuska 	 */
3847a7741afSMartin Matuska 	size_t parent_offset = new_offset / PAGE_SIZE;
3857a7741afSMartin Matuska 	ASSERT3U(parent_offset, <, abd_scatter_chunkcnt(sabd));
3867a7741afSMartin Matuska 	for (int i = 0; i < chunkcnt; i++)
3877a7741afSMartin Matuska 		ABD_SCATTER(abd).abd_chunks[i] =
3887a7741afSMartin Matuska 		    ABD_SCATTER(sabd).abd_chunks[parent_offset + i];
3897a7741afSMartin Matuska 
3907a7741afSMartin Matuska 	abd->abd_flags |= ABD_FLAG_FROM_PAGES;
3917a7741afSMartin Matuska 	return (abd);
3927a7741afSMartin Matuska }
3937a7741afSMartin Matuska 
394eda14cbcSMatt Macy abd_t *
3957cd22ac4SMartin Matuska abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
3967cd22ac4SMartin Matuska     size_t size)
397eda14cbcSMatt Macy {
398eda14cbcSMatt Macy 	abd_verify(sabd);
399eda14cbcSMatt Macy 	ASSERT3U(off, <=, sabd->abd_size);
400eda14cbcSMatt Macy 
401eda14cbcSMatt Macy 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
4027cd22ac4SMartin Matuska 	size_t chunkcnt = abd_chunkcnt_for_bytes(
4037cd22ac4SMartin Matuska 	    (new_offset & PAGE_MASK) + size);
4047cd22ac4SMartin Matuska 
4057cd22ac4SMartin Matuska 	ASSERT3U(chunkcnt, <=, abd_scatter_chunkcnt(sabd));
406eda14cbcSMatt Macy 
407184c1b94SMartin Matuska 	/*
408184c1b94SMartin Matuska 	 * If an abd struct is provided, it is only the minimum size.  If we
409184c1b94SMartin Matuska 	 * need additional chunks, we need to allocate a new struct.
410184c1b94SMartin Matuska 	 */
411184c1b94SMartin Matuska 	if (abd != NULL &&
412184c1b94SMartin Matuska 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) >
413184c1b94SMartin Matuska 	    sizeof (abd_t)) {
414184c1b94SMartin Matuska 		abd = NULL;
415184c1b94SMartin Matuska 	}
416184c1b94SMartin Matuska 
417184c1b94SMartin Matuska 	if (abd == NULL)
4187cd22ac4SMartin Matuska 		abd = abd_alloc_struct(chunkcnt << PAGE_SHIFT);
419eda14cbcSMatt Macy 
420eda14cbcSMatt Macy 	/*
421eda14cbcSMatt Macy 	 * Even if this buf is filesystem metadata, we only track that
422eda14cbcSMatt Macy 	 * if we own the underlying data buffer, which is not true in
423eda14cbcSMatt Macy 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
424eda14cbcSMatt Macy 	 */
425eda14cbcSMatt Macy 
4267cd22ac4SMartin Matuska 	ABD_SCATTER(abd).abd_offset = new_offset & PAGE_MASK;
427eda14cbcSMatt Macy 
4287a7741afSMartin Matuska 	if (abd_is_from_pages(sabd)) {
4297a7741afSMartin Matuska 		return (abd_get_offset_from_pages(abd, sabd, chunkcnt,
4307a7741afSMartin Matuska 		    new_offset));
4317a7741afSMartin Matuska 	}
4327a7741afSMartin Matuska 
433eda14cbcSMatt Macy 	/* Copy the scatterlist starting at the correct offset */
434eda14cbcSMatt Macy 	(void) memcpy(&ABD_SCATTER(abd).abd_chunks,
4357cd22ac4SMartin Matuska 	    &ABD_SCATTER(sabd).abd_chunks[new_offset >> PAGE_SHIFT],
436eda14cbcSMatt Macy 	    chunkcnt * sizeof (void *));
437eda14cbcSMatt Macy 
438eda14cbcSMatt Macy 	return (abd);
439eda14cbcSMatt Macy }
440eda14cbcSMatt Macy 
441eda14cbcSMatt Macy /*
4427a7741afSMartin Matuska  * Allocate a scatter ABD structure from user pages.
4437a7741afSMartin Matuska  */
4447a7741afSMartin Matuska abd_t *
4457a7741afSMartin Matuska abd_alloc_from_pages(vm_page_t *pages, unsigned long offset, uint64_t size)
4467a7741afSMartin Matuska {
4477a7741afSMartin Matuska 	VERIFY3U(size, <=, DMU_MAX_ACCESS);
4487a7741afSMartin Matuska 	ASSERT3U(offset, <, PAGE_SIZE);
4497a7741afSMartin Matuska 	ASSERT3P(pages, !=, NULL);
4507a7741afSMartin Matuska 
4517a7741afSMartin Matuska 	abd_t *abd = abd_alloc_struct(size);
4527a7741afSMartin Matuska 	abd->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_FROM_PAGES;
4537a7741afSMartin Matuska 	abd->abd_size = size;
4547a7741afSMartin Matuska 
4557a7741afSMartin Matuska 	if ((offset + size) <= PAGE_SIZE) {
4567a7741afSMartin Matuska 		/*
4577a7741afSMartin Matuska 		 * There is only a single page worth of data, so we will just
4587a7741afSMartin Matuska 		 * use  a linear ABD. We have to make sure to take into account
4597a7741afSMartin Matuska 		 * the offset though. In all other cases our offset will be 0
4607a7741afSMartin Matuska 		 * as we are always PAGE_SIZE aligned.
4617a7741afSMartin Matuska 		 */
4627a7741afSMartin Matuska 		abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_LINEAR_PAGE;
4637a7741afSMartin Matuska 		ABD_LINEAR_BUF(abd) = (char *)zfs_map_page(pages[0],
4647a7741afSMartin Matuska 		    &abd->abd_u.abd_linear.sf) + offset;
4657a7741afSMartin Matuska 	} else {
4667a7741afSMartin Matuska 		ABD_SCATTER(abd).abd_offset = offset;
4677a7741afSMartin Matuska 		ASSERT0(ABD_SCATTER(abd).abd_offset);
4687a7741afSMartin Matuska 
4697a7741afSMartin Matuska 		/*
4707a7741afSMartin Matuska 		 * Setting the ABD's abd_chunks to point to the user pages.
4717a7741afSMartin Matuska 		 */
4727a7741afSMartin Matuska 		for (int i = 0; i < abd_chunkcnt_for_bytes(size); i++)
4737a7741afSMartin Matuska 			ABD_SCATTER(abd).abd_chunks[i] = pages[i];
4747a7741afSMartin Matuska 	}
4757a7741afSMartin Matuska 
4767a7741afSMartin Matuska 	return (abd);
4777a7741afSMartin Matuska }
4787a7741afSMartin Matuska 
4797a7741afSMartin Matuska /*
480eda14cbcSMatt Macy  * Initialize the abd_iter.
481eda14cbcSMatt Macy  */
482eda14cbcSMatt Macy void
483eda14cbcSMatt Macy abd_iter_init(struct abd_iter *aiter, abd_t *abd)
484eda14cbcSMatt Macy {
485eda14cbcSMatt Macy 	ASSERT(!abd_is_gang(abd));
486eda14cbcSMatt Macy 	abd_verify(abd);
487783d3ff6SMartin Matuska 	memset(aiter, 0, sizeof (struct abd_iter));
488eda14cbcSMatt Macy 	aiter->iter_abd = abd;
489eda14cbcSMatt Macy }
490eda14cbcSMatt Macy 
491eda14cbcSMatt Macy /*
492eda14cbcSMatt Macy  * This is just a helper function to see if we have exhausted the
493eda14cbcSMatt Macy  * abd_iter and reached the end.
494eda14cbcSMatt Macy  */
495eda14cbcSMatt Macy boolean_t
496eda14cbcSMatt Macy abd_iter_at_end(struct abd_iter *aiter)
497eda14cbcSMatt Macy {
498eda14cbcSMatt Macy 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
499eda14cbcSMatt Macy }
500eda14cbcSMatt Macy 
501eda14cbcSMatt Macy /*
502eda14cbcSMatt Macy  * Advance the iterator by a certain amount. Cannot be called when a chunk is
503eda14cbcSMatt Macy  * in use. This can be safely called when the aiter has already exhausted, in
504eda14cbcSMatt Macy  * which case this does nothing.
505eda14cbcSMatt Macy  */
506eda14cbcSMatt Macy void
507eda14cbcSMatt Macy abd_iter_advance(struct abd_iter *aiter, size_t amount)
508eda14cbcSMatt Macy {
509eda14cbcSMatt Macy 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
510eda14cbcSMatt Macy 	ASSERT0(aiter->iter_mapsize);
511eda14cbcSMatt Macy 
512eda14cbcSMatt Macy 	/* There's nothing left to advance to, so do nothing */
513eda14cbcSMatt Macy 	if (abd_iter_at_end(aiter))
514eda14cbcSMatt Macy 		return;
515eda14cbcSMatt Macy 
516eda14cbcSMatt Macy 	aiter->iter_pos += amount;
517eda14cbcSMatt Macy }
518eda14cbcSMatt Macy 
519eda14cbcSMatt Macy /*
520eda14cbcSMatt Macy  * Map the current chunk into aiter. This can be safely called when the aiter
521eda14cbcSMatt Macy  * has already exhausted, in which case this does nothing.
522eda14cbcSMatt Macy  */
523eda14cbcSMatt Macy void
524eda14cbcSMatt Macy abd_iter_map(struct abd_iter *aiter)
525eda14cbcSMatt Macy {
526eda14cbcSMatt Macy 	void *paddr;
527eda14cbcSMatt Macy 
528eda14cbcSMatt Macy 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
529eda14cbcSMatt Macy 	ASSERT0(aiter->iter_mapsize);
530eda14cbcSMatt Macy 
531eda14cbcSMatt Macy 	/* There's nothing left to iterate over, so do nothing */
532eda14cbcSMatt Macy 	if (abd_iter_at_end(aiter))
533eda14cbcSMatt Macy 		return;
534eda14cbcSMatt Macy 
5357cd22ac4SMartin Matuska 	abd_t *abd = aiter->iter_abd;
5367cd22ac4SMartin Matuska 	size_t offset = aiter->iter_pos;
5377cd22ac4SMartin Matuska 	if (abd_is_linear(abd)) {
5387cd22ac4SMartin Matuska 		aiter->iter_mapsize = abd->abd_size - offset;
5397cd22ac4SMartin Matuska 		paddr = ABD_LINEAR_BUF(abd);
5407a7741afSMartin Matuska 	} else if (abd_is_from_pages(abd)) {
5417a7741afSMartin Matuska 		aiter->sf = NULL;
5427a7741afSMartin Matuska 		offset += ABD_SCATTER(abd).abd_offset;
5437a7741afSMartin Matuska 		size_t index = offset / PAGE_SIZE;
5447a7741afSMartin Matuska 		offset &= PAGE_MASK;
5457a7741afSMartin Matuska 		aiter->iter_mapsize = MIN(PAGE_SIZE - offset,
5467a7741afSMartin Matuska 		    abd->abd_size - aiter->iter_pos);
5477a7741afSMartin Matuska 		paddr = zfs_map_page(
5487a7741afSMartin Matuska 		    ABD_SCATTER(aiter->iter_abd).abd_chunks[index],
5497a7741afSMartin Matuska 		    &aiter->sf);
550eda14cbcSMatt Macy 	} else {
5517cd22ac4SMartin Matuska 		offset += ABD_SCATTER(abd).abd_offset;
5527cd22ac4SMartin Matuska 		paddr = ABD_SCATTER(abd).abd_chunks[offset >> PAGE_SHIFT];
5537cd22ac4SMartin Matuska 		offset &= PAGE_MASK;
5547cd22ac4SMartin Matuska 		aiter->iter_mapsize = MIN(PAGE_SIZE - offset,
5557cd22ac4SMartin Matuska 		    abd->abd_size - aiter->iter_pos);
556eda14cbcSMatt Macy 	}
557eda14cbcSMatt Macy 	aiter->iter_mapaddr = (char *)paddr + offset;
558eda14cbcSMatt Macy }
559eda14cbcSMatt Macy 
560eda14cbcSMatt Macy /*
561eda14cbcSMatt Macy  * Unmap the current chunk from aiter. This can be safely called when the aiter
562eda14cbcSMatt Macy  * has already exhausted, in which case this does nothing.
563eda14cbcSMatt Macy  */
564eda14cbcSMatt Macy void
565eda14cbcSMatt Macy abd_iter_unmap(struct abd_iter *aiter)
566eda14cbcSMatt Macy {
5677cd22ac4SMartin Matuska 	if (!abd_iter_at_end(aiter)) {
568eda14cbcSMatt Macy 		ASSERT3P(aiter->iter_mapaddr, !=, NULL);
569eda14cbcSMatt Macy 		ASSERT3U(aiter->iter_mapsize, >, 0);
5707cd22ac4SMartin Matuska 	}
571eda14cbcSMatt Macy 
5727a7741afSMartin Matuska 	if (abd_is_from_pages(aiter->iter_abd) &&
5737a7741afSMartin Matuska 	    !abd_is_linear_page(aiter->iter_abd)) {
5747a7741afSMartin Matuska 		ASSERT3P(aiter->sf, !=, NULL);
5757a7741afSMartin Matuska 		zfs_unmap_page(aiter->sf);
5767a7741afSMartin Matuska 	}
5777a7741afSMartin Matuska 
578eda14cbcSMatt Macy 	aiter->iter_mapaddr = NULL;
579eda14cbcSMatt Macy 	aiter->iter_mapsize = 0;
580eda14cbcSMatt Macy }
581eda14cbcSMatt Macy 
582eda14cbcSMatt Macy void
583eda14cbcSMatt Macy abd_cache_reap_now(void)
584eda14cbcSMatt Macy {
585eda14cbcSMatt Macy 	kmem_cache_reap_soon(abd_chunk_cache);
586eda14cbcSMatt Macy }
5877a7741afSMartin Matuska 
5887a7741afSMartin Matuska /*
5897a7741afSMartin Matuska  * Borrow a raw buffer from an ABD without copying the contents of the ABD
5907a7741afSMartin Matuska  * into the buffer. If the ABD is scattered, this will alloate a raw buffer
5917a7741afSMartin Matuska  * whose contents are undefined. To copy over the existing data in the ABD, use
5927a7741afSMartin Matuska  * abd_borrow_buf_copy() instead.
5937a7741afSMartin Matuska  */
5947a7741afSMartin Matuska void *
5957a7741afSMartin Matuska abd_borrow_buf(abd_t *abd, size_t n)
5967a7741afSMartin Matuska {
5977a7741afSMartin Matuska 	void *buf;
5987a7741afSMartin Matuska 	abd_verify(abd);
5997a7741afSMartin Matuska 	ASSERT3U(abd->abd_size, >=, 0);
6007a7741afSMartin Matuska 	if (abd_is_linear(abd)) {
6017a7741afSMartin Matuska 		buf = abd_to_buf(abd);
6027a7741afSMartin Matuska 	} else {
6037a7741afSMartin Matuska 		buf = zio_buf_alloc(n);
6047a7741afSMartin Matuska 	}
6057a7741afSMartin Matuska #ifdef ZFS_DEBUG
6067a7741afSMartin Matuska 	(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
6077a7741afSMartin Matuska #endif
6087a7741afSMartin Matuska 	return (buf);
6097a7741afSMartin Matuska }
6107a7741afSMartin Matuska 
6117a7741afSMartin Matuska void *
6127a7741afSMartin Matuska abd_borrow_buf_copy(abd_t *abd, size_t n)
6137a7741afSMartin Matuska {
6147a7741afSMartin Matuska 	void *buf = abd_borrow_buf(abd, n);
6157a7741afSMartin Matuska 	if (!abd_is_linear(abd)) {
6167a7741afSMartin Matuska 		abd_copy_to_buf(buf, abd, n);
6177a7741afSMartin Matuska 	}
6187a7741afSMartin Matuska 	return (buf);
6197a7741afSMartin Matuska }
6207a7741afSMartin Matuska 
6217a7741afSMartin Matuska /*
6227a7741afSMartin Matuska  * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
623*87bf66d4SMartin Matuska  * not change the contents of the ABD. If you want any changes you made to
624*87bf66d4SMartin Matuska  * buf to be copied back to abd, use abd_return_buf_copy() instead. If the
625*87bf66d4SMartin Matuska  * ABD is not constructed from user pages from Direct I/O then an ASSERT
626*87bf66d4SMartin Matuska  * checks to make sure the contents of the buffer have not changed since it was
627*87bf66d4SMartin Matuska  * borrowed. We can not ASSERT the contents of the buffer have not changed if
628*87bf66d4SMartin Matuska  * it is composed of user pages. While Direct I/O write pages are placed under
629*87bf66d4SMartin Matuska  * write protection and can not be changed, this is not the case for Direct I/O
630*87bf66d4SMartin Matuska  * reads. The pages of a Direct I/O read could be manipulated at any time.
631*87bf66d4SMartin Matuska  * Checksum verifications in the ZIO pipeline check for this issue and handle
632*87bf66d4SMartin Matuska  * it by returning an error on checksum verification failure.
6337a7741afSMartin Matuska  */
6347a7741afSMartin Matuska void
6357a7741afSMartin Matuska abd_return_buf(abd_t *abd, void *buf, size_t n)
6367a7741afSMartin Matuska {
6377a7741afSMartin Matuska 	abd_verify(abd);
6387a7741afSMartin Matuska 	ASSERT3U(abd->abd_size, >=, n);
6397a7741afSMartin Matuska #ifdef ZFS_DEBUG
6407a7741afSMartin Matuska 	(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
6417a7741afSMartin Matuska #endif
642*87bf66d4SMartin Matuska 	if (abd_is_from_pages(abd)) {
643*87bf66d4SMartin Matuska 		if (!abd_is_linear_page(abd))
644*87bf66d4SMartin Matuska 			zio_buf_free(buf, n);
645*87bf66d4SMartin Matuska 	} else if (abd_is_linear(abd)) {
6467a7741afSMartin Matuska 		ASSERT3P(buf, ==, abd_to_buf(abd));
647*87bf66d4SMartin Matuska 	} else if (abd_is_gang(abd)) {
648*87bf66d4SMartin Matuska #ifdef ZFS_DEBUG
649*87bf66d4SMartin Matuska 		/*
650*87bf66d4SMartin Matuska 		 * We have to be careful with gang ABD's that we do not ASSERT
651*87bf66d4SMartin Matuska 		 * for any ABD's that contain user pages from Direct I/O. See
652*87bf66d4SMartin Matuska 		 * the comment above about Direct I/O read buffers possibly
653*87bf66d4SMartin Matuska 		 * being manipulated. In order to handle this, we jsut iterate
654*87bf66d4SMartin Matuska 		 * through the gang ABD and only verify ABD's that are not from
655*87bf66d4SMartin Matuska 		 * user pages.
656*87bf66d4SMartin Matuska 		 */
657*87bf66d4SMartin Matuska 		void *cmp_buf = buf;
658*87bf66d4SMartin Matuska 
659*87bf66d4SMartin Matuska 		for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
660*87bf66d4SMartin Matuska 		    cabd != NULL;
661*87bf66d4SMartin Matuska 		    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
662*87bf66d4SMartin Matuska 			if (!abd_is_from_pages(cabd)) {
663*87bf66d4SMartin Matuska 				ASSERT0(abd_cmp_buf(cabd, cmp_buf,
664*87bf66d4SMartin Matuska 				    cabd->abd_size));
665*87bf66d4SMartin Matuska 			}
666*87bf66d4SMartin Matuska 			cmp_buf = (char *)cmp_buf + cabd->abd_size;
667*87bf66d4SMartin Matuska 		}
668*87bf66d4SMartin Matuska #endif
669*87bf66d4SMartin Matuska 		zio_buf_free(buf, n);
6707a7741afSMartin Matuska 	} else {
6717a7741afSMartin Matuska 		ASSERT0(abd_cmp_buf(abd, buf, n));
6727a7741afSMartin Matuska 		zio_buf_free(buf, n);
6737a7741afSMartin Matuska 	}
6747a7741afSMartin Matuska }
6757a7741afSMartin Matuska 
6767a7741afSMartin Matuska void
6777a7741afSMartin Matuska abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
6787a7741afSMartin Matuska {
6797a7741afSMartin Matuska 	if (!abd_is_linear(abd)) {
6807a7741afSMartin Matuska 		abd_copy_from_buf(abd, buf, n);
6817a7741afSMartin Matuska 	}
6827a7741afSMartin Matuska 	abd_return_buf(abd, buf, n);
6837a7741afSMartin Matuska }
684