1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2014 by Chunwei Chen. All rights reserved. 14 * Copyright (c) 2016 by Delphix. All rights reserved. 15 */ 16 17 /* 18 * See abd.c for a general overview of the arc buffered data (ABD). 19 * 20 * Using a large proportion of scattered ABDs decreases ARC fragmentation since 21 * when we are at the limit of allocatable space, using equal-size chunks will 22 * allow us to quickly reclaim enough space for a new large allocation (assuming 23 * it is also scattered). 24 * 25 * ABDs are allocated scattered by default unless the caller uses 26 * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled. 27 */ 28 29 #include <sys/abd_impl.h> 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/zio.h> 33 #include <sys/zfs_context.h> 34 #include <sys/zfs_znode.h> 35 36 typedef struct abd_stats { 37 kstat_named_t abdstat_struct_size; 38 kstat_named_t abdstat_scatter_cnt; 39 kstat_named_t abdstat_scatter_data_size; 40 kstat_named_t abdstat_scatter_chunk_waste; 41 kstat_named_t abdstat_linear_cnt; 42 kstat_named_t abdstat_linear_data_size; 43 } abd_stats_t; 44 45 static abd_stats_t abd_stats = { 46 /* Amount of memory occupied by all of the abd_t struct allocations */ 47 { "struct_size", KSTAT_DATA_UINT64 }, 48 /* 49 * The number of scatter ABDs which are currently allocated, excluding 50 * ABDs which don't own their data (for instance the ones which were 51 * allocated through abd_get_offset()). 52 */ 53 { "scatter_cnt", KSTAT_DATA_UINT64 }, 54 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */ 55 { "scatter_data_size", KSTAT_DATA_UINT64 }, 56 /* 57 * The amount of space wasted at the end of the last chunk across all 58 * scatter ABDs tracked by scatter_cnt. 59 */ 60 { "scatter_chunk_waste", KSTAT_DATA_UINT64 }, 61 /* 62 * The number of linear ABDs which are currently allocated, excluding 63 * ABDs which don't own their data (for instance the ones which were 64 * allocated through abd_get_offset() and abd_get_from_buf()). If an 65 * ABD takes ownership of its buf then it will become tracked. 66 */ 67 { "linear_cnt", KSTAT_DATA_UINT64 }, 68 /* Amount of data stored in all linear ABDs tracked by linear_cnt */ 69 { "linear_data_size", KSTAT_DATA_UINT64 }, 70 }; 71 72 /* 73 * The size of the chunks ABD allocates. Because the sizes allocated from the 74 * kmem_cache can't change, this tunable can only be modified at boot. Changing 75 * it at runtime would cause ABD iteration to work incorrectly for ABDs which 76 * were allocated with the old size, so a safeguard has been put in place which 77 * will cause the machine to panic if you change it and try to access the data 78 * within a scattered ABD. 79 */ 80 size_t zfs_abd_chunk_size = 4096; 81 82 #if defined(_KERNEL) 83 SYSCTL_DECL(_vfs_zfs); 84 85 SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN, 86 &zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers"); 87 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN, 88 &zfs_abd_chunk_size, 0, "The size of the chunks ABD allocates"); 89 #endif 90 91 kmem_cache_t *abd_chunk_cache; 92 static kstat_t *abd_ksp; 93 94 /* 95 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are 96 * just a single zero'd sized zfs_abd_chunk_size buffer. This 97 * allows us to conserve memory by only using a single zero buffer 98 * for the scatter chunks. 99 */ 100 abd_t *abd_zero_scatter = NULL; 101 static char *abd_zero_buf = NULL; 102 103 static void 104 abd_free_chunk(void *c) 105 { 106 kmem_cache_free(abd_chunk_cache, c); 107 } 108 109 static size_t 110 abd_chunkcnt_for_bytes(size_t size) 111 { 112 return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size); 113 } 114 115 static inline size_t 116 abd_scatter_chunkcnt(abd_t *abd) 117 { 118 ASSERT(!abd_is_linear(abd)); 119 return (abd_chunkcnt_for_bytes( 120 ABD_SCATTER(abd).abd_offset + abd->abd_size)); 121 } 122 123 boolean_t 124 abd_size_alloc_linear(size_t size) 125 { 126 return (size <= zfs_abd_chunk_size ? B_TRUE : B_FALSE); 127 } 128 129 void 130 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op) 131 { 132 size_t n = abd_scatter_chunkcnt(abd); 133 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 134 int waste = n * zfs_abd_chunk_size - abd->abd_size; 135 if (op == ABDSTAT_INCR) { 136 ABDSTAT_BUMP(abdstat_scatter_cnt); 137 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size); 138 ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste); 139 arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE); 140 } else { 141 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 142 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); 143 ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste); 144 arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE); 145 } 146 } 147 148 void 149 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op) 150 { 151 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 152 if (op == ABDSTAT_INCR) { 153 ABDSTAT_BUMP(abdstat_linear_cnt); 154 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size); 155 } else { 156 ABDSTAT_BUMPDOWN(abdstat_linear_cnt); 157 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size); 158 } 159 } 160 161 void 162 abd_verify_scatter(abd_t *abd) 163 { 164 /* 165 * There is no scatter linear pages in FreeBSD so there is an 166 * if an error if the ABD has been marked as a linear page. 167 */ 168 VERIFY(!abd_is_linear_page(abd)); 169 ASSERT3U(ABD_SCATTER(abd).abd_offset, <, 170 zfs_abd_chunk_size); 171 size_t n = abd_scatter_chunkcnt(abd); 172 for (int i = 0; i < n; i++) { 173 ASSERT3P( 174 ABD_SCATTER(abd).abd_chunks[i], !=, NULL); 175 } 176 } 177 178 void 179 abd_alloc_chunks(abd_t *abd, size_t size) 180 { 181 size_t n = abd_chunkcnt_for_bytes(size); 182 for (int i = 0; i < n; i++) { 183 void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE); 184 ASSERT3P(c, !=, NULL); 185 ABD_SCATTER(abd).abd_chunks[i] = c; 186 } 187 ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size; 188 } 189 190 void 191 abd_free_chunks(abd_t *abd) 192 { 193 size_t n = abd_scatter_chunkcnt(abd); 194 for (int i = 0; i < n; i++) { 195 abd_free_chunk(ABD_SCATTER(abd).abd_chunks[i]); 196 } 197 } 198 199 abd_t * 200 abd_alloc_struct(size_t size) 201 { 202 size_t chunkcnt = abd_chunkcnt_for_bytes(size); 203 /* 204 * In the event we are allocating a gang ABD, the size passed in 205 * will be 0. We must make sure to set abd_size to the size of an 206 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang 207 * ABD struct allocation accounts for an additional 24 bytes over 208 * a scatter ABD with 0 chunks. 209 */ 210 size_t abd_size = MAX(sizeof (abd_t), 211 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 212 abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE); 213 ASSERT3P(abd, !=, NULL); 214 list_link_init(&abd->abd_gang_link); 215 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL); 216 ABDSTAT_INCR(abdstat_struct_size, abd_size); 217 218 return (abd); 219 } 220 221 void 222 abd_free_struct(abd_t *abd) 223 { 224 size_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 : 225 abd_scatter_chunkcnt(abd); 226 int size = MAX(sizeof (abd_t), 227 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 228 mutex_destroy(&abd->abd_mtx); 229 ASSERT(!list_link_active(&abd->abd_gang_link)); 230 kmem_free(abd, size); 231 ABDSTAT_INCR(abdstat_struct_size, -size); 232 } 233 234 /* 235 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where 236 * each chunk in the scatterlist will be set to abd_zero_buf. 237 */ 238 static void 239 abd_alloc_zero_scatter(void) 240 { 241 size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); 242 abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP); 243 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); 244 245 abd_zero_scatter->abd_flags = ABD_FLAG_OWNER | ABD_FLAG_ZEROS; 246 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; 247 abd_zero_scatter->abd_parent = NULL; 248 zfs_refcount_create(&abd_zero_scatter->abd_children); 249 250 ABD_SCATTER(abd_zero_scatter).abd_offset = 0; 251 ABD_SCATTER(abd_zero_scatter).abd_chunk_size = 252 zfs_abd_chunk_size; 253 254 for (int i = 0; i < n; i++) { 255 ABD_SCATTER(abd_zero_scatter).abd_chunks[i] = 256 abd_zero_buf; 257 } 258 259 ABDSTAT_BUMP(abdstat_scatter_cnt); 260 ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size); 261 } 262 263 static void 264 abd_free_zero_scatter(void) 265 { 266 zfs_refcount_destroy(&abd_zero_scatter->abd_children); 267 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 268 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size); 269 270 abd_free_struct(abd_zero_scatter); 271 abd_zero_scatter = NULL; 272 kmem_free(abd_zero_buf, zfs_abd_chunk_size); 273 } 274 275 void 276 abd_init(void) 277 { 278 abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0, 279 NULL, NULL, NULL, NULL, 0, KMC_NODEBUG); 280 281 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED, 282 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 283 if (abd_ksp != NULL) { 284 abd_ksp->ks_data = &abd_stats; 285 kstat_install(abd_ksp); 286 } 287 288 abd_alloc_zero_scatter(); 289 } 290 291 void 292 abd_fini(void) 293 { 294 abd_free_zero_scatter(); 295 296 if (abd_ksp != NULL) { 297 kstat_delete(abd_ksp); 298 abd_ksp = NULL; 299 } 300 301 kmem_cache_destroy(abd_chunk_cache); 302 abd_chunk_cache = NULL; 303 } 304 305 void 306 abd_free_linear_page(abd_t *abd) 307 { 308 /* 309 * FreeBSD does not have have scatter linear pages 310 * so there is an error. 311 */ 312 VERIFY(0); 313 } 314 315 /* 316 * If we're going to use this ABD for doing I/O using the block layer, the 317 * consumer of the ABD data doesn't care if it's scattered or not, and we don't 318 * plan to store this ABD in memory for a long period of time, we should 319 * allocate the ABD type that requires the least data copying to do the I/O. 320 * 321 * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os 322 * using a scatter/gather list we should switch to that and replace this call 323 * with vanilla abd_alloc(). 324 */ 325 abd_t * 326 abd_alloc_for_io(size_t size, boolean_t is_metadata) 327 { 328 return (abd_alloc_linear(size, is_metadata)); 329 } 330 331 /* 332 * This is just a helper function to abd_get_offset_scatter() to alloc a 333 * scatter ABD using the calculated chunkcnt based on the offset within the 334 * parent ABD. 335 */ 336 static abd_t * 337 abd_alloc_scatter_offset_chunkcnt(size_t chunkcnt) 338 { 339 size_t abd_size = offsetof(abd_t, 340 abd_u.abd_scatter.abd_chunks[chunkcnt]); 341 abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE); 342 ASSERT3P(abd, !=, NULL); 343 list_link_init(&abd->abd_gang_link); 344 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL); 345 ABDSTAT_INCR(abdstat_struct_size, abd_size); 346 347 return (abd); 348 } 349 350 abd_t * 351 abd_get_offset_scatter(abd_t *sabd, size_t off) 352 { 353 abd_t *abd = NULL; 354 355 abd_verify(sabd); 356 ASSERT3U(off, <=, sabd->abd_size); 357 358 size_t new_offset = ABD_SCATTER(sabd).abd_offset + off; 359 size_t chunkcnt = abd_scatter_chunkcnt(sabd) - 360 (new_offset / zfs_abd_chunk_size); 361 362 abd = abd_alloc_scatter_offset_chunkcnt(chunkcnt); 363 364 /* 365 * Even if this buf is filesystem metadata, we only track that 366 * if we own the underlying data buffer, which is not true in 367 * this case. Therefore, we don't ever use ABD_FLAG_META here. 368 */ 369 abd->abd_flags = 0; 370 371 ABD_SCATTER(abd).abd_offset = new_offset % zfs_abd_chunk_size; 372 ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size; 373 374 /* Copy the scatterlist starting at the correct offset */ 375 (void) memcpy(&ABD_SCATTER(abd).abd_chunks, 376 &ABD_SCATTER(sabd).abd_chunks[new_offset / 377 zfs_abd_chunk_size], 378 chunkcnt * sizeof (void *)); 379 380 return (abd); 381 } 382 383 static inline size_t 384 abd_iter_scatter_chunk_offset(struct abd_iter *aiter) 385 { 386 ASSERT(!abd_is_linear(aiter->iter_abd)); 387 return ((ABD_SCATTER(aiter->iter_abd).abd_offset + 388 aiter->iter_pos) % zfs_abd_chunk_size); 389 } 390 391 static inline size_t 392 abd_iter_scatter_chunk_index(struct abd_iter *aiter) 393 { 394 ASSERT(!abd_is_linear(aiter->iter_abd)); 395 return ((ABD_SCATTER(aiter->iter_abd).abd_offset + 396 aiter->iter_pos) / zfs_abd_chunk_size); 397 } 398 399 /* 400 * Initialize the abd_iter. 401 */ 402 void 403 abd_iter_init(struct abd_iter *aiter, abd_t *abd) 404 { 405 ASSERT(!abd_is_gang(abd)); 406 abd_verify(abd); 407 aiter->iter_abd = abd; 408 aiter->iter_pos = 0; 409 aiter->iter_mapaddr = NULL; 410 aiter->iter_mapsize = 0; 411 } 412 413 /* 414 * This is just a helper function to see if we have exhausted the 415 * abd_iter and reached the end. 416 */ 417 boolean_t 418 abd_iter_at_end(struct abd_iter *aiter) 419 { 420 return (aiter->iter_pos == aiter->iter_abd->abd_size); 421 } 422 423 /* 424 * Advance the iterator by a certain amount. Cannot be called when a chunk is 425 * in use. This can be safely called when the aiter has already exhausted, in 426 * which case this does nothing. 427 */ 428 void 429 abd_iter_advance(struct abd_iter *aiter, size_t amount) 430 { 431 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 432 ASSERT0(aiter->iter_mapsize); 433 434 /* There's nothing left to advance to, so do nothing */ 435 if (abd_iter_at_end(aiter)) 436 return; 437 438 aiter->iter_pos += amount; 439 } 440 441 /* 442 * Map the current chunk into aiter. This can be safely called when the aiter 443 * has already exhausted, in which case this does nothing. 444 */ 445 void 446 abd_iter_map(struct abd_iter *aiter) 447 { 448 void *paddr; 449 size_t offset = 0; 450 451 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 452 ASSERT0(aiter->iter_mapsize); 453 454 /* Panic if someone has changed zfs_abd_chunk_size */ 455 IMPLY(!abd_is_linear(aiter->iter_abd), zfs_abd_chunk_size == 456 ABD_SCATTER(aiter->iter_abd).abd_chunk_size); 457 458 /* There's nothing left to iterate over, so do nothing */ 459 if (abd_iter_at_end(aiter)) 460 return; 461 462 if (abd_is_linear(aiter->iter_abd)) { 463 offset = aiter->iter_pos; 464 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset; 465 paddr = ABD_LINEAR_BUF(aiter->iter_abd); 466 } else { 467 size_t index = abd_iter_scatter_chunk_index(aiter); 468 offset = abd_iter_scatter_chunk_offset(aiter); 469 aiter->iter_mapsize = MIN(zfs_abd_chunk_size - offset, 470 aiter->iter_abd->abd_size - aiter->iter_pos); 471 paddr = ABD_SCATTER(aiter->iter_abd).abd_chunks[index]; 472 } 473 aiter->iter_mapaddr = (char *)paddr + offset; 474 } 475 476 /* 477 * Unmap the current chunk from aiter. This can be safely called when the aiter 478 * has already exhausted, in which case this does nothing. 479 */ 480 void 481 abd_iter_unmap(struct abd_iter *aiter) 482 { 483 /* There's nothing left to unmap, so do nothing */ 484 if (abd_iter_at_end(aiter)) 485 return; 486 487 ASSERT3P(aiter->iter_mapaddr, !=, NULL); 488 ASSERT3U(aiter->iter_mapsize, >, 0); 489 490 aiter->iter_mapaddr = NULL; 491 aiter->iter_mapsize = 0; 492 } 493 494 void 495 abd_cache_reap_now(void) 496 { 497 kmem_cache_reap_soon(abd_chunk_cache); 498 } 499