xref: /freebsd-src/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c (revision 2eb4d8dc723da3cf7d735a3226ae49da4c8c5dbc)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
14  * Copyright (c) 2016 by Delphix. All rights reserved.
15  */
16 
17 /*
18  * See abd.c for a general overview of the arc buffered data (ABD).
19  *
20  * Using a large proportion of scattered ABDs decreases ARC fragmentation since
21  * when we are at the limit of allocatable space, using equal-size chunks will
22  * allow us to quickly reclaim enough space for a new large allocation (assuming
23  * it is also scattered).
24  *
25  * ABDs are allocated scattered by default unless the caller uses
26  * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled.
27  */
28 
29 #include <sys/abd_impl.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/zio.h>
33 #include <sys/zfs_context.h>
34 #include <sys/zfs_znode.h>
35 
36 typedef struct abd_stats {
37 	kstat_named_t abdstat_struct_size;
38 	kstat_named_t abdstat_scatter_cnt;
39 	kstat_named_t abdstat_scatter_data_size;
40 	kstat_named_t abdstat_scatter_chunk_waste;
41 	kstat_named_t abdstat_linear_cnt;
42 	kstat_named_t abdstat_linear_data_size;
43 } abd_stats_t;
44 
45 static abd_stats_t abd_stats = {
46 	/* Amount of memory occupied by all of the abd_t struct allocations */
47 	{ "struct_size",			KSTAT_DATA_UINT64 },
48 	/*
49 	 * The number of scatter ABDs which are currently allocated, excluding
50 	 * ABDs which don't own their data (for instance the ones which were
51 	 * allocated through abd_get_offset()).
52 	 */
53 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
54 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
55 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
56 	/*
57 	 * The amount of space wasted at the end of the last chunk across all
58 	 * scatter ABDs tracked by scatter_cnt.
59 	 */
60 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
61 	/*
62 	 * The number of linear ABDs which are currently allocated, excluding
63 	 * ABDs which don't own their data (for instance the ones which were
64 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
65 	 * ABD takes ownership of its buf then it will become tracked.
66 	 */
67 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
68 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
69 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
70 };
71 
72 struct {
73 	wmsum_t abdstat_struct_size;
74 	wmsum_t abdstat_scatter_cnt;
75 	wmsum_t abdstat_scatter_data_size;
76 	wmsum_t abdstat_scatter_chunk_waste;
77 	wmsum_t abdstat_linear_cnt;
78 	wmsum_t abdstat_linear_data_size;
79 } abd_sums;
80 
81 /*
82  * The size of the chunks ABD allocates. Because the sizes allocated from the
83  * kmem_cache can't change, this tunable can only be modified at boot. Changing
84  * it at runtime would cause ABD iteration to work incorrectly for ABDs which
85  * were allocated with the old size, so a safeguard has been put in place which
86  * will cause the machine to panic if you change it and try to access the data
87  * within a scattered ABD.
88  */
89 size_t zfs_abd_chunk_size = 4096;
90 
91 #if defined(_KERNEL)
92 SYSCTL_DECL(_vfs_zfs);
93 
94 SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
95 	&zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
96 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN,
97 	&zfs_abd_chunk_size, 0, "The size of the chunks ABD allocates");
98 #endif
99 
100 kmem_cache_t *abd_chunk_cache;
101 static kstat_t *abd_ksp;
102 
103 /*
104  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
105  * just a single zero'd sized zfs_abd_chunk_size buffer. This
106  * allows us to conserve memory by only using a single zero buffer
107  * for the scatter chunks.
108  */
109 abd_t *abd_zero_scatter = NULL;
110 static char *abd_zero_buf = NULL;
111 
112 static void
113 abd_free_chunk(void *c)
114 {
115 	kmem_cache_free(abd_chunk_cache, c);
116 }
117 
118 static uint_t
119 abd_chunkcnt_for_bytes(size_t size)
120 {
121 	return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size);
122 }
123 
124 static inline uint_t
125 abd_scatter_chunkcnt(abd_t *abd)
126 {
127 	ASSERT(!abd_is_linear(abd));
128 	return (abd_chunkcnt_for_bytes(
129 	    ABD_SCATTER(abd).abd_offset + abd->abd_size));
130 }
131 
132 boolean_t
133 abd_size_alloc_linear(size_t size)
134 {
135 	return (size <= zfs_abd_chunk_size ? B_TRUE : B_FALSE);
136 }
137 
138 void
139 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
140 {
141 	uint_t n = abd_scatter_chunkcnt(abd);
142 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
143 	int waste = n * zfs_abd_chunk_size - abd->abd_size;
144 	if (op == ABDSTAT_INCR) {
145 		ABDSTAT_BUMP(abdstat_scatter_cnt);
146 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
147 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
148 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
149 	} else {
150 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
151 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
152 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
153 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
154 	}
155 }
156 
157 void
158 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
159 {
160 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
161 	if (op == ABDSTAT_INCR) {
162 		ABDSTAT_BUMP(abdstat_linear_cnt);
163 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
164 	} else {
165 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
166 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
167 	}
168 }
169 
170 void
171 abd_verify_scatter(abd_t *abd)
172 {
173 	uint_t i, n;
174 
175 	/*
176 	 * There is no scatter linear pages in FreeBSD so there is an
177 	 * if an error if the ABD has been marked as a linear page.
178 	 */
179 	ASSERT(!abd_is_linear_page(abd));
180 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <, zfs_abd_chunk_size);
181 	n = abd_scatter_chunkcnt(abd);
182 	for (i = 0; i < n; i++) {
183 		ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
184 	}
185 }
186 
187 void
188 abd_alloc_chunks(abd_t *abd, size_t size)
189 {
190 	uint_t i, n;
191 
192 	n = abd_chunkcnt_for_bytes(size);
193 	for (i = 0; i < n; i++) {
194 		void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
195 		ASSERT3P(c, !=, NULL);
196 		ABD_SCATTER(abd).abd_chunks[i] = c;
197 	}
198 	ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size;
199 }
200 
201 void
202 abd_free_chunks(abd_t *abd)
203 {
204 	uint_t i, n;
205 
206 	n = abd_scatter_chunkcnt(abd);
207 	for (i = 0; i < n; i++) {
208 		abd_free_chunk(ABD_SCATTER(abd).abd_chunks[i]);
209 	}
210 }
211 
212 abd_t *
213 abd_alloc_struct_impl(size_t size)
214 {
215 	uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
216 	/*
217 	 * In the event we are allocating a gang ABD, the size passed in
218 	 * will be 0. We must make sure to set abd_size to the size of an
219 	 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang
220 	 * ABD struct allocation accounts for an additional 24 bytes over
221 	 * a scatter ABD with 0 chunks.
222 	 */
223 	size_t abd_size = MAX(sizeof (abd_t),
224 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
225 	abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
226 	ASSERT3P(abd, !=, NULL);
227 	ABDSTAT_INCR(abdstat_struct_size, abd_size);
228 
229 	return (abd);
230 }
231 
232 void
233 abd_free_struct_impl(abd_t *abd)
234 {
235 	uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
236 	    abd_scatter_chunkcnt(abd);
237 	ssize_t size = MAX(sizeof (abd_t),
238 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
239 	kmem_free(abd, size);
240 	ABDSTAT_INCR(abdstat_struct_size, -size);
241 }
242 
243 /*
244  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
245  * each chunk in the scatterlist will be set to abd_zero_buf.
246  */
247 static void
248 abd_alloc_zero_scatter(void)
249 {
250 	uint_t i, n;
251 
252 	n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
253 	abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP);
254 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
255 
256 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
257 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
258 
259 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
260 	ABD_SCATTER(abd_zero_scatter).abd_chunk_size =
261 	    zfs_abd_chunk_size;
262 
263 	for (i = 0; i < n; i++) {
264 		ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
265 		    abd_zero_buf;
266 	}
267 
268 	ABDSTAT_BUMP(abdstat_scatter_cnt);
269 	ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size);
270 }
271 
272 static void
273 abd_free_zero_scatter(void)
274 {
275 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
276 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size);
277 
278 	abd_free_struct(abd_zero_scatter);
279 	abd_zero_scatter = NULL;
280 	kmem_free(abd_zero_buf, zfs_abd_chunk_size);
281 }
282 
283 static int
284 abd_kstats_update(kstat_t *ksp, int rw)
285 {
286 	abd_stats_t *as = ksp->ks_data;
287 
288 	if (rw == KSTAT_WRITE)
289 		return (EACCES);
290 	as->abdstat_struct_size.value.ui64 =
291 	    wmsum_value(&abd_sums.abdstat_struct_size);
292 	as->abdstat_scatter_cnt.value.ui64 =
293 	    wmsum_value(&abd_sums.abdstat_scatter_cnt);
294 	as->abdstat_scatter_data_size.value.ui64 =
295 	    wmsum_value(&abd_sums.abdstat_scatter_data_size);
296 	as->abdstat_scatter_chunk_waste.value.ui64 =
297 	    wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
298 	as->abdstat_linear_cnt.value.ui64 =
299 	    wmsum_value(&abd_sums.abdstat_linear_cnt);
300 	as->abdstat_linear_data_size.value.ui64 =
301 	    wmsum_value(&abd_sums.abdstat_linear_data_size);
302 	return (0);
303 }
304 
305 void
306 abd_init(void)
307 {
308 	abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0,
309 	    NULL, NULL, NULL, NULL, 0, KMC_NODEBUG);
310 
311 	wmsum_init(&abd_sums.abdstat_struct_size, 0);
312 	wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
313 	wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
314 	wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
315 	wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
316 	wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
317 
318 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
319 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
320 	if (abd_ksp != NULL) {
321 		abd_ksp->ks_data = &abd_stats;
322 		abd_ksp->ks_update = abd_kstats_update;
323 		kstat_install(abd_ksp);
324 	}
325 
326 	abd_alloc_zero_scatter();
327 }
328 
329 void
330 abd_fini(void)
331 {
332 	abd_free_zero_scatter();
333 
334 	if (abd_ksp != NULL) {
335 		kstat_delete(abd_ksp);
336 		abd_ksp = NULL;
337 	}
338 
339 	wmsum_fini(&abd_sums.abdstat_struct_size);
340 	wmsum_fini(&abd_sums.abdstat_scatter_cnt);
341 	wmsum_fini(&abd_sums.abdstat_scatter_data_size);
342 	wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
343 	wmsum_fini(&abd_sums.abdstat_linear_cnt);
344 	wmsum_fini(&abd_sums.abdstat_linear_data_size);
345 
346 	kmem_cache_destroy(abd_chunk_cache);
347 	abd_chunk_cache = NULL;
348 }
349 
350 void
351 abd_free_linear_page(abd_t *abd)
352 {
353 	/*
354 	 * FreeBSD does not have scatter linear pages
355 	 * so there is an error.
356 	 */
357 	VERIFY(0);
358 }
359 
360 /*
361  * If we're going to use this ABD for doing I/O using the block layer, the
362  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
363  * plan to store this ABD in memory for a long period of time, we should
364  * allocate the ABD type that requires the least data copying to do the I/O.
365  *
366  * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
367  * using a scatter/gather list we should switch to that and replace this call
368  * with vanilla abd_alloc().
369  */
370 abd_t *
371 abd_alloc_for_io(size_t size, boolean_t is_metadata)
372 {
373 	return (abd_alloc_linear(size, is_metadata));
374 }
375 
376 abd_t *
377 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off)
378 {
379 	abd_verify(sabd);
380 	ASSERT3U(off, <=, sabd->abd_size);
381 
382 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
383 	uint_t chunkcnt = abd_scatter_chunkcnt(sabd) -
384 	    (new_offset / zfs_abd_chunk_size);
385 
386 	/*
387 	 * If an abd struct is provided, it is only the minimum size.  If we
388 	 * need additional chunks, we need to allocate a new struct.
389 	 */
390 	if (abd != NULL &&
391 	    offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) >
392 	    sizeof (abd_t)) {
393 		abd = NULL;
394 	}
395 
396 	if (abd == NULL)
397 		abd = abd_alloc_struct(chunkcnt * zfs_abd_chunk_size);
398 
399 	/*
400 	 * Even if this buf is filesystem metadata, we only track that
401 	 * if we own the underlying data buffer, which is not true in
402 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
403 	 */
404 
405 	ABD_SCATTER(abd).abd_offset = new_offset % zfs_abd_chunk_size;
406 	ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size;
407 
408 	/* Copy the scatterlist starting at the correct offset */
409 	(void) memcpy(&ABD_SCATTER(abd).abd_chunks,
410 	    &ABD_SCATTER(sabd).abd_chunks[new_offset /
411 	    zfs_abd_chunk_size],
412 	    chunkcnt * sizeof (void *));
413 
414 	return (abd);
415 }
416 
417 static inline size_t
418 abd_iter_scatter_chunk_offset(struct abd_iter *aiter)
419 {
420 	ASSERT(!abd_is_linear(aiter->iter_abd));
421 	return ((ABD_SCATTER(aiter->iter_abd).abd_offset +
422 	    aiter->iter_pos) % zfs_abd_chunk_size);
423 }
424 
425 static inline size_t
426 abd_iter_scatter_chunk_index(struct abd_iter *aiter)
427 {
428 	ASSERT(!abd_is_linear(aiter->iter_abd));
429 	return ((ABD_SCATTER(aiter->iter_abd).abd_offset +
430 	    aiter->iter_pos) / zfs_abd_chunk_size);
431 }
432 
433 /*
434  * Initialize the abd_iter.
435  */
436 void
437 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
438 {
439 	ASSERT(!abd_is_gang(abd));
440 	abd_verify(abd);
441 	aiter->iter_abd = abd;
442 	aiter->iter_pos = 0;
443 	aiter->iter_mapaddr = NULL;
444 	aiter->iter_mapsize = 0;
445 }
446 
447 /*
448  * This is just a helper function to see if we have exhausted the
449  * abd_iter and reached the end.
450  */
451 boolean_t
452 abd_iter_at_end(struct abd_iter *aiter)
453 {
454 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
455 }
456 
457 /*
458  * Advance the iterator by a certain amount. Cannot be called when a chunk is
459  * in use. This can be safely called when the aiter has already exhausted, in
460  * which case this does nothing.
461  */
462 void
463 abd_iter_advance(struct abd_iter *aiter, size_t amount)
464 {
465 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
466 	ASSERT0(aiter->iter_mapsize);
467 
468 	/* There's nothing left to advance to, so do nothing */
469 	if (abd_iter_at_end(aiter))
470 		return;
471 
472 	aiter->iter_pos += amount;
473 }
474 
475 /*
476  * Map the current chunk into aiter. This can be safely called when the aiter
477  * has already exhausted, in which case this does nothing.
478  */
479 void
480 abd_iter_map(struct abd_iter *aiter)
481 {
482 	void *paddr;
483 	size_t offset = 0;
484 
485 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
486 	ASSERT0(aiter->iter_mapsize);
487 
488 	/* Panic if someone has changed zfs_abd_chunk_size */
489 	IMPLY(!abd_is_linear(aiter->iter_abd), zfs_abd_chunk_size ==
490 	    ABD_SCATTER(aiter->iter_abd).abd_chunk_size);
491 
492 	/* There's nothing left to iterate over, so do nothing */
493 	if (abd_iter_at_end(aiter))
494 		return;
495 
496 	if (abd_is_linear(aiter->iter_abd)) {
497 		offset = aiter->iter_pos;
498 		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
499 		paddr = ABD_LINEAR_BUF(aiter->iter_abd);
500 	} else {
501 		size_t index = abd_iter_scatter_chunk_index(aiter);
502 		offset = abd_iter_scatter_chunk_offset(aiter);
503 		aiter->iter_mapsize = MIN(zfs_abd_chunk_size - offset,
504 		    aiter->iter_abd->abd_size - aiter->iter_pos);
505 		paddr = ABD_SCATTER(aiter->iter_abd).abd_chunks[index];
506 	}
507 	aiter->iter_mapaddr = (char *)paddr + offset;
508 }
509 
510 /*
511  * Unmap the current chunk from aiter. This can be safely called when the aiter
512  * has already exhausted, in which case this does nothing.
513  */
514 void
515 abd_iter_unmap(struct abd_iter *aiter)
516 {
517 	/* There's nothing left to unmap, so do nothing */
518 	if (abd_iter_at_end(aiter))
519 		return;
520 
521 	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
522 	ASSERT3U(aiter->iter_mapsize, >, 0);
523 
524 	aiter->iter_mapaddr = NULL;
525 	aiter->iter_mapsize = 0;
526 }
527 
528 void
529 abd_cache_reap_now(void)
530 {
531 	kmem_cache_reap_soon(abd_chunk_cache);
532 }
533