xref: /freebsd-src/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c (revision 0a7e5f1f02aad2ff5fff1c60f44c6975fd07e1d9)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
14  * Copyright (c) 2016 by Delphix. All rights reserved.
15  */
16 
17 /*
18  * See abd.c for a general overview of the arc buffered data (ABD).
19  *
20  * Using a large proportion of scattered ABDs decreases ARC fragmentation since
21  * when we are at the limit of allocatable space, using equal-size chunks will
22  * allow us to quickly reclaim enough space for a new large allocation (assuming
23  * it is also scattered).
24  *
25  * ABDs are allocated scattered by default unless the caller uses
26  * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled.
27  */
28 
29 #include <sys/abd_impl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/zio.h>
33 #include <sys/zfs_context.h>
34 #include <sys/zfs_znode.h>
35 
36 typedef struct abd_stats {
37 	kstat_named_t abdstat_struct_size;
38 	kstat_named_t abdstat_scatter_cnt;
39 	kstat_named_t abdstat_scatter_data_size;
40 	kstat_named_t abdstat_scatter_chunk_waste;
41 	kstat_named_t abdstat_linear_cnt;
42 	kstat_named_t abdstat_linear_data_size;
43 } abd_stats_t;
44 
45 static abd_stats_t abd_stats = {
46 	/* Amount of memory occupied by all of the abd_t struct allocations */
47 	{ "struct_size",			KSTAT_DATA_UINT64 },
48 	/*
49 	 * The number of scatter ABDs which are currently allocated, excluding
50 	 * ABDs which don't own their data (for instance the ones which were
51 	 * allocated through abd_get_offset()).
52 	 */
53 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
54 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
55 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
56 	/*
57 	 * The amount of space wasted at the end of the last chunk across all
58 	 * scatter ABDs tracked by scatter_cnt.
59 	 */
60 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
61 	/*
62 	 * The number of linear ABDs which are currently allocated, excluding
63 	 * ABDs which don't own their data (for instance the ones which were
64 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
65 	 * ABD takes ownership of its buf then it will become tracked.
66 	 */
67 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
68 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
69 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
70 };
71 
72 struct {
73 	wmsum_t abdstat_struct_size;
74 	wmsum_t abdstat_scatter_cnt;
75 	wmsum_t abdstat_scatter_data_size;
76 	wmsum_t abdstat_scatter_chunk_waste;
77 	wmsum_t abdstat_linear_cnt;
78 	wmsum_t abdstat_linear_data_size;
79 } abd_sums;
80 
81 /*
82  * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
83  * ABD's for.  Smaller allocations will use linear ABD's which use
84  * zio_[data_]buf_alloc().
85  *
86  * Scatter ABD's use at least one page each, so sub-page allocations waste
87  * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
88  * half of each page).  Using linear ABD's for small allocations means that
89  * they will be put on slabs which contain many allocations.
90  *
91  * Linear ABDs for multi-page allocations are easier to use, and in some cases
92  * it allows to avoid buffer copying.  But allocation and especially free
93  * of multi-page linear ABDs are expensive operations due to KVA mapping and
94  * unmapping, and with time they cause KVA fragmentations.
95  */
96 static size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1;
97 
98 SYSCTL_DECL(_vfs_zfs);
99 
100 SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
101 	&zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
102 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_scatter_min_size, CTLFLAG_RWTUN,
103 	&zfs_abd_scatter_min_size, 0, "Minimum size of scatter allocations.");
104 
105 kmem_cache_t *abd_chunk_cache;
106 static kstat_t *abd_ksp;
107 
108 /*
109  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
110  * just a single zero'd page-sized buffer. This allows us to conserve
111  * memory by only using a single zero buffer for the scatter chunks.
112  */
113 abd_t *abd_zero_scatter = NULL;
114 
115 static uint_t
116 abd_chunkcnt_for_bytes(size_t size)
117 {
118 	return ((size + PAGE_MASK) >> PAGE_SHIFT);
119 }
120 
121 static inline uint_t
122 abd_scatter_chunkcnt(abd_t *abd)
123 {
124 	ASSERT(!abd_is_linear(abd));
125 	return (abd_chunkcnt_for_bytes(
126 	    ABD_SCATTER(abd).abd_offset + abd->abd_size));
127 }
128 
129 boolean_t
130 abd_size_alloc_linear(size_t size)
131 {
132 	return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
133 }
134 
135 void
136 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
137 {
138 	uint_t n = abd_scatter_chunkcnt(abd);
139 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
140 	int waste = (n << PAGE_SHIFT) - abd->abd_size;
141 	if (op == ABDSTAT_INCR) {
142 		ABDSTAT_BUMP(abdstat_scatter_cnt);
143 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
144 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
145 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
146 	} else {
147 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
148 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
149 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
150 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
151 	}
152 }
153 
154 void
155 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
156 {
157 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
158 	if (op == ABDSTAT_INCR) {
159 		ABDSTAT_BUMP(abdstat_linear_cnt);
160 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
161 	} else {
162 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
163 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
164 	}
165 }
166 
167 void
168 abd_verify_scatter(abd_t *abd)
169 {
170 	uint_t i, n;
171 
172 	/*
173 	 * There is no scatter linear pages in FreeBSD so there is
174 	 * an error if the ABD has been marked as a linear page.
175 	 */
176 	ASSERT(!abd_is_linear_page(abd));
177 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <, PAGE_SIZE);
178 	n = abd_scatter_chunkcnt(abd);
179 	for (i = 0; i < n; i++) {
180 		ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
181 	}
182 }
183 
184 void
185 abd_alloc_chunks(abd_t *abd, size_t size)
186 {
187 	uint_t i, n;
188 
189 	n = abd_chunkcnt_for_bytes(size);
190 	for (i = 0; i < n; i++) {
191 		ABD_SCATTER(abd).abd_chunks[i] =
192 		    kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
193 	}
194 }
195 
196 void
197 abd_free_chunks(abd_t *abd)
198 {
199 	uint_t i, n;
200 
201 	n = abd_scatter_chunkcnt(abd);
202 	for (i = 0; i < n; i++) {
203 		kmem_cache_free(abd_chunk_cache,
204 		    ABD_SCATTER(abd).abd_chunks[i]);
205 	}
206 }
207 
208 abd_t *
209 abd_alloc_struct_impl(size_t size)
210 {
211 	uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
212 	/*
213 	 * In the event we are allocating a gang ABD, the size passed in
214 	 * will be 0. We must make sure to set abd_size to the size of an
215 	 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang
216 	 * ABD struct allocation accounts for an additional 24 bytes over
217 	 * a scatter ABD with 0 chunks.
218 	 */
219 	size_t abd_size = MAX(sizeof (abd_t),
220 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
221 	abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
222 	ASSERT3P(abd, !=, NULL);
223 	ABDSTAT_INCR(abdstat_struct_size, abd_size);
224 
225 	return (abd);
226 }
227 
228 void
229 abd_free_struct_impl(abd_t *abd)
230 {
231 	uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
232 	    abd_scatter_chunkcnt(abd);
233 	ssize_t size = MAX(sizeof (abd_t),
234 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
235 	kmem_free(abd, size);
236 	ABDSTAT_INCR(abdstat_struct_size, -size);
237 }
238 
239 /*
240  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
241  * each chunk in the scatterlist will be set to the same area.
242  */
243 _Static_assert(ZERO_REGION_SIZE >= PAGE_SIZE, "zero_region too small");
244 static void
245 abd_alloc_zero_scatter(void)
246 {
247 	uint_t i, n;
248 
249 	n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
250 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
251 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
252 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
253 
254 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
255 
256 	for (i = 0; i < n; i++) {
257 		ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
258 		    __DECONST(void *, zero_region);
259 	}
260 
261 	ABDSTAT_BUMP(abdstat_scatter_cnt);
262 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGE_SIZE);
263 }
264 
265 static void
266 abd_free_zero_scatter(void)
267 {
268 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
269 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGE_SIZE);
270 
271 	abd_free_struct(abd_zero_scatter);
272 	abd_zero_scatter = NULL;
273 }
274 
275 static int
276 abd_kstats_update(kstat_t *ksp, int rw)
277 {
278 	abd_stats_t *as = ksp->ks_data;
279 
280 	if (rw == KSTAT_WRITE)
281 		return (EACCES);
282 	as->abdstat_struct_size.value.ui64 =
283 	    wmsum_value(&abd_sums.abdstat_struct_size);
284 	as->abdstat_scatter_cnt.value.ui64 =
285 	    wmsum_value(&abd_sums.abdstat_scatter_cnt);
286 	as->abdstat_scatter_data_size.value.ui64 =
287 	    wmsum_value(&abd_sums.abdstat_scatter_data_size);
288 	as->abdstat_scatter_chunk_waste.value.ui64 =
289 	    wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
290 	as->abdstat_linear_cnt.value.ui64 =
291 	    wmsum_value(&abd_sums.abdstat_linear_cnt);
292 	as->abdstat_linear_data_size.value.ui64 =
293 	    wmsum_value(&abd_sums.abdstat_linear_data_size);
294 	return (0);
295 }
296 
297 void
298 abd_init(void)
299 {
300 	abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0,
301 	    NULL, NULL, NULL, NULL, 0, KMC_NODEBUG | KMC_RECLAIMABLE);
302 
303 	wmsum_init(&abd_sums.abdstat_struct_size, 0);
304 	wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
305 	wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
306 	wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
307 	wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
308 	wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
309 
310 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
311 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
312 	if (abd_ksp != NULL) {
313 		abd_ksp->ks_data = &abd_stats;
314 		abd_ksp->ks_update = abd_kstats_update;
315 		kstat_install(abd_ksp);
316 	}
317 
318 	abd_alloc_zero_scatter();
319 }
320 
321 void
322 abd_fini(void)
323 {
324 	abd_free_zero_scatter();
325 
326 	if (abd_ksp != NULL) {
327 		kstat_delete(abd_ksp);
328 		abd_ksp = NULL;
329 	}
330 
331 	wmsum_fini(&abd_sums.abdstat_struct_size);
332 	wmsum_fini(&abd_sums.abdstat_scatter_cnt);
333 	wmsum_fini(&abd_sums.abdstat_scatter_data_size);
334 	wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
335 	wmsum_fini(&abd_sums.abdstat_linear_cnt);
336 	wmsum_fini(&abd_sums.abdstat_linear_data_size);
337 
338 	kmem_cache_destroy(abd_chunk_cache);
339 	abd_chunk_cache = NULL;
340 }
341 
342 void
343 abd_free_linear_page(abd_t *abd)
344 {
345 	/*
346 	 * FreeBSD does not have scatter linear pages
347 	 * so there is an error.
348 	 */
349 	VERIFY(0);
350 }
351 
352 /*
353  * If we're going to use this ABD for doing I/O using the block layer, the
354  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
355  * plan to store this ABD in memory for a long period of time, we should
356  * allocate the ABD type that requires the least data copying to do the I/O.
357  *
358  * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
359  * using a scatter/gather list we should switch to that and replace this call
360  * with vanilla abd_alloc().
361  */
362 abd_t *
363 abd_alloc_for_io(size_t size, boolean_t is_metadata)
364 {
365 	return (abd_alloc_linear(size, is_metadata));
366 }
367 
368 abd_t *
369 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
370     size_t size)
371 {
372 	abd_verify(sabd);
373 	ASSERT3U(off, <=, sabd->abd_size);
374 
375 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
376 	size_t chunkcnt = abd_chunkcnt_for_bytes(
377 	    (new_offset & PAGE_MASK) + size);
378 
379 	ASSERT3U(chunkcnt, <=, abd_scatter_chunkcnt(sabd));
380 
381 	/*
382 	 * If an abd struct is provided, it is only the minimum size.  If we
383 	 * need additional chunks, we need to allocate a new struct.
384 	 */
385 	if (abd != NULL &&
386 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) >
387 	    sizeof (abd_t)) {
388 		abd = NULL;
389 	}
390 
391 	if (abd == NULL)
392 		abd = abd_alloc_struct(chunkcnt << PAGE_SHIFT);
393 
394 	/*
395 	 * Even if this buf is filesystem metadata, we only track that
396 	 * if we own the underlying data buffer, which is not true in
397 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
398 	 */
399 
400 	ABD_SCATTER(abd).abd_offset = new_offset & PAGE_MASK;
401 
402 	/* Copy the scatterlist starting at the correct offset */
403 	(void) memcpy(&ABD_SCATTER(abd).abd_chunks,
404 	    &ABD_SCATTER(sabd).abd_chunks[new_offset >> PAGE_SHIFT],
405 	    chunkcnt * sizeof (void *));
406 
407 	return (abd);
408 }
409 
410 /*
411  * Initialize the abd_iter.
412  */
413 void
414 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
415 {
416 	ASSERT(!abd_is_gang(abd));
417 	abd_verify(abd);
418 	memset(aiter, 0, sizeof (struct abd_iter));
419 	aiter->iter_abd = abd;
420 }
421 
422 /*
423  * This is just a helper function to see if we have exhausted the
424  * abd_iter and reached the end.
425  */
426 boolean_t
427 abd_iter_at_end(struct abd_iter *aiter)
428 {
429 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
430 }
431 
432 /*
433  * Advance the iterator by a certain amount. Cannot be called when a chunk is
434  * in use. This can be safely called when the aiter has already exhausted, in
435  * which case this does nothing.
436  */
437 void
438 abd_iter_advance(struct abd_iter *aiter, size_t amount)
439 {
440 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
441 	ASSERT0(aiter->iter_mapsize);
442 
443 	/* There's nothing left to advance to, so do nothing */
444 	if (abd_iter_at_end(aiter))
445 		return;
446 
447 	aiter->iter_pos += amount;
448 }
449 
450 /*
451  * Map the current chunk into aiter. This can be safely called when the aiter
452  * has already exhausted, in which case this does nothing.
453  */
454 void
455 abd_iter_map(struct abd_iter *aiter)
456 {
457 	void *paddr;
458 
459 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
460 	ASSERT0(aiter->iter_mapsize);
461 
462 	/* There's nothing left to iterate over, so do nothing */
463 	if (abd_iter_at_end(aiter))
464 		return;
465 
466 	abd_t *abd = aiter->iter_abd;
467 	size_t offset = aiter->iter_pos;
468 	if (abd_is_linear(abd)) {
469 		aiter->iter_mapsize = abd->abd_size - offset;
470 		paddr = ABD_LINEAR_BUF(abd);
471 	} else {
472 		offset += ABD_SCATTER(abd).abd_offset;
473 		paddr = ABD_SCATTER(abd).abd_chunks[offset >> PAGE_SHIFT];
474 		offset &= PAGE_MASK;
475 		aiter->iter_mapsize = MIN(PAGE_SIZE - offset,
476 		    abd->abd_size - aiter->iter_pos);
477 	}
478 	aiter->iter_mapaddr = (char *)paddr + offset;
479 }
480 
481 /*
482  * Unmap the current chunk from aiter. This can be safely called when the aiter
483  * has already exhausted, in which case this does nothing.
484  */
485 void
486 abd_iter_unmap(struct abd_iter *aiter)
487 {
488 	if (!abd_iter_at_end(aiter)) {
489 		ASSERT3P(aiter->iter_mapaddr, !=, NULL);
490 		ASSERT3U(aiter->iter_mapsize, >, 0);
491 	}
492 
493 	aiter->iter_mapaddr = NULL;
494 	aiter->iter_mapsize = 0;
495 }
496 
497 void
498 abd_cache_reap_now(void)
499 {
500 	kmem_cache_reap_soon(abd_chunk_cache);
501 }
502