1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 24 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 /* 30 * DVA-based Adjustable Replacement Cache 31 * 32 * While much of the theory of operation used here is 33 * based on the self-tuning, low overhead replacement cache 34 * presented by Megiddo and Modha at FAST 2003, there are some 35 * significant differences: 36 * 37 * 1. The Megiddo and Modha model assumes any page is evictable. 38 * Pages in its cache cannot be "locked" into memory. This makes 39 * the eviction algorithm simple: evict the last page in the list. 40 * This also make the performance characteristics easy to reason 41 * about. Our cache is not so simple. At any given moment, some 42 * subset of the blocks in the cache are un-evictable because we 43 * have handed out a reference to them. Blocks are only evictable 44 * when there are no external references active. This makes 45 * eviction far more problematic: we choose to evict the evictable 46 * blocks that are the "lowest" in the list. 47 * 48 * There are times when it is not possible to evict the requested 49 * space. In these circumstances we are unable to adjust the cache 50 * size. To prevent the cache growing unbounded at these times we 51 * implement a "cache throttle" that slows the flow of new data 52 * into the cache until we can make space available. 53 * 54 * 2. The Megiddo and Modha model assumes a fixed cache size. 55 * Pages are evicted when the cache is full and there is a cache 56 * miss. Our model has a variable sized cache. It grows with 57 * high use, but also tries to react to memory pressure from the 58 * operating system: decreasing its size when system memory is 59 * tight. 60 * 61 * 3. The Megiddo and Modha model assumes a fixed page size. All 62 * elements of the cache are therefore exactly the same size. So 63 * when adjusting the cache size following a cache miss, its simply 64 * a matter of choosing a single page to evict. In our model, we 65 * have variable sized cache blocks (rangeing from 512 bytes to 66 * 128K bytes). We therefore choose a set of blocks to evict to make 67 * space for a cache miss that approximates as closely as possible 68 * the space used by the new block. 69 * 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71 * by N. Megiddo & D. Modha, FAST 2003 72 */ 73 74 /* 75 * The locking model: 76 * 77 * A new reference to a cache buffer can be obtained in two 78 * ways: 1) via a hash table lookup using the DVA as a key, 79 * or 2) via one of the ARC lists. The arc_read() interface 80 * uses method 1, while the internal arc algorithms for 81 * adjusting the cache use method 2. We therefore provide two 82 * types of locks: 1) the hash table lock array, and 2) the 83 * arc list locks. 84 * 85 * Buffers do not have their own mutexes, rather they rely on the 86 * hash table mutexes for the bulk of their protection (i.e. most 87 * fields in the arc_buf_hdr_t are protected by these mutexes). 88 * 89 * buf_hash_find() returns the appropriate mutex (held) when it 90 * locates the requested buffer in the hash table. It returns 91 * NULL for the mutex if the buffer was not in the table. 92 * 93 * buf_hash_remove() expects the appropriate hash mutex to be 94 * already held before it is invoked. 95 * 96 * Each arc state also has a mutex which is used to protect the 97 * buffer list associated with the state. When attempting to 98 * obtain a hash table lock while holding an arc list lock you 99 * must use: mutex_tryenter() to avoid deadlock. Also note that 100 * the active state mutex must be held before the ghost state mutex. 101 * 102 * Arc buffers may have an associated eviction callback function. 103 * This function will be invoked prior to removing the buffer (e.g. 104 * in arc_do_user_evicts()). Note however that the data associated 105 * with the buffer may be evicted prior to the callback. The callback 106 * must be made with *no locks held* (to prevent deadlock). Additionally, 107 * the users of callbacks must ensure that their private data is 108 * protected from simultaneous callbacks from arc_clear_callback() 109 * and arc_do_user_evicts(). 110 * 111 * Note that the majority of the performance stats are manipulated 112 * with atomic operations. 113 * 114 * The L2ARC uses the l2ad_mtx on each vdev for the following: 115 * 116 * - L2ARC buflist creation 117 * - L2ARC buflist eviction 118 * - L2ARC write completion, which walks L2ARC buflists 119 * - ARC header destruction, as it removes from L2ARC buflists 120 * - ARC header release, as it removes from L2ARC buflists 121 */ 122 123 /* 124 * ARC operation: 125 * 126 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. 127 * This structure can point either to a block that is still in the cache or to 128 * one that is only accessible in an L2 ARC device, or it can provide 129 * information about a block that was recently evicted. If a block is 130 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough 131 * information to retrieve it from the L2ARC device. This information is 132 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block 133 * that is in this state cannot access the data directly. 134 * 135 * Blocks that are actively being referenced or have not been evicted 136 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within 137 * the arc_buf_hdr_t that will point to the data block in memory. A block can 138 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC 139 * caches data in two ways -- in a list of arc buffers (arc_buf_t) and 140 * also in the arc_buf_hdr_t's private physical data block pointer (b_pdata). 141 * Each arc buffer (arc_buf_t) is being actively accessed by a specific ARC 142 * consumer, and always contains uncompressed data. The ARC will provide 143 * references to this data and will keep it cached until it is no longer in 144 * use. Typically, the arc will try to cache only the L1ARC's physical data 145 * block and will aggressively evict any arc_buf_t that is no longer referenced. 146 * The amount of memory consumed by the arc_buf_t's can be seen via the 147 * "overhead_size" kstat. 148 * 149 * 150 * arc_buf_hdr_t 151 * +-----------+ 152 * | | 153 * | | 154 * | | 155 * +-----------+ 156 * l2arc_buf_hdr_t| | 157 * | | 158 * +-----------+ 159 * l1arc_buf_hdr_t| | 160 * | | arc_buf_t 161 * | b_buf +------------>+---------+ arc_buf_t 162 * | | |b_next +---->+---------+ 163 * | b_pdata +-+ |---------| |b_next +-->NULL 164 * +-----------+ | | | +---------+ 165 * | |b_data +-+ | | 166 * | +---------+ | |b_data +-+ 167 * +->+------+ | +---------+ | 168 * (potentially) | | | | 169 * compressed | | | | 170 * data +------+ | v 171 * +->+------+ +------+ 172 * uncompressed | | | | 173 * data | | | | 174 * +------+ +------+ 175 * 176 * The L1ARC's data pointer, however, may or may not be uncompressed. The 177 * ARC has the ability to store the physical data (b_pdata) associated with 178 * the DVA of the arc_buf_hdr_t. Since the b_pdata is a copy of the on-disk 179 * physical block, it will match its on-disk compression characteristics. 180 * If the block on-disk is compressed, then the physical data block 181 * in the cache will also be compressed and vice-versa. This behavior 182 * can be disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the 183 * compressed ARC functionality is disabled, the b_pdata will point to an 184 * uncompressed version of the on-disk data. 185 * 186 * When a consumer reads a block, the ARC must first look to see if the 187 * arc_buf_hdr_t is cached. If the hdr is cached and already has an arc_buf_t, 188 * then an additional arc_buf_t is allocated and the uncompressed data is 189 * bcopied from the existing arc_buf_t. If the hdr is cached but does not 190 * have an arc_buf_t, then the ARC allocates a new arc_buf_t and decompresses 191 * the b_pdata contents into the arc_buf_t's b_data. If the arc_buf_hdr_t's 192 * b_pdata is not compressed, then the block is shared with the newly 193 * allocated arc_buf_t. This block sharing only occurs with one arc_buf_t 194 * in the arc buffer chain. Sharing the block reduces the memory overhead 195 * required when the hdr is caching uncompressed blocks or the compressed 196 * arc functionality has been disabled via 'zfs_compressed_arc_enabled'. 197 * 198 * The diagram below shows an example of an uncompressed ARC hdr that is 199 * sharing its data with an arc_buf_t: 200 * 201 * arc_buf_hdr_t 202 * +-----------+ 203 * | | 204 * | | 205 * | | 206 * +-----------+ 207 * l2arc_buf_hdr_t| | 208 * | | 209 * +-----------+ 210 * l1arc_buf_hdr_t| | 211 * | | arc_buf_t (shared) 212 * | b_buf +------------>+---------+ arc_buf_t 213 * | | |b_next +---->+---------+ 214 * | b_pdata +-+ |---------| |b_next +-->NULL 215 * +-----------+ | | | +---------+ 216 * | |b_data +-+ | | 217 * | +---------+ | |b_data +-+ 218 * +->+------+ | +---------+ | 219 * | | | | 220 * uncompressed | | | | 221 * data +------+ | | 222 * ^ +->+------+ | 223 * | uncompressed | | | 224 * | data | | | 225 * | +------+ | 226 * +---------------------------------+ 227 * 228 * Writing to the arc requires that the ARC first discard the b_pdata 229 * since the physical block is about to be rewritten. The new data contents 230 * will be contained in the arc_buf_t (uncompressed). As the I/O pipeline 231 * performs the write, it may compress the data before writing it to disk. 232 * The ARC will be called with the transformed data and will bcopy the 233 * transformed on-disk block into a newly allocated b_pdata. 234 * 235 * When the L2ARC is in use, it will also take advantage of the b_pdata. The 236 * L2ARC will always write the contents of b_pdata to the L2ARC. This means 237 * that when compressed arc is enabled that the L2ARC blocks are identical 238 * to the on-disk block in the main data pool. This provides a significant 239 * advantage since the ARC can leverage the bp's checksum when reading from the 240 * L2ARC to determine if the contents are valid. However, if the compressed 241 * arc is disabled, then the L2ARC's block must be transformed to look 242 * like the physical block in the main data pool before comparing the 243 * checksum and determining its validity. 244 */ 245 246 #include <sys/spa.h> 247 #include <sys/zio.h> 248 #include <sys/spa_impl.h> 249 #include <sys/zio_compress.h> 250 #include <sys/zio_checksum.h> 251 #include <sys/zfs_context.h> 252 #include <sys/arc.h> 253 #include <sys/refcount.h> 254 #include <sys/vdev.h> 255 #include <sys/vdev_impl.h> 256 #include <sys/dsl_pool.h> 257 #include <sys/multilist.h> 258 #ifdef _KERNEL 259 #include <sys/dnlc.h> 260 #include <sys/racct.h> 261 #endif 262 #include <sys/callb.h> 263 #include <sys/kstat.h> 264 #include <sys/trim_map.h> 265 #include <zfs_fletcher.h> 266 #include <sys/sdt.h> 267 268 #include <machine/vmparam.h> 269 270 #ifdef illumos 271 #ifndef _KERNEL 272 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 273 boolean_t arc_watch = B_FALSE; 274 int arc_procfd; 275 #endif 276 #endif /* illumos */ 277 278 #ifdef __NetBSD__ 279 #include <uvm/uvm.h> 280 #ifndef btop 281 #define btop(x) ((x) / PAGE_SIZE) 282 #endif 283 //#define needfree (uvmexp.free < uvmexp.freetarg ? uvmexp.freetarg : 0) 284 #define buf_init arc_buf_init 285 #define freemem uvmexp.free 286 #define minfree uvmexp.freemin 287 #define desfree uvmexp.freetarg 288 #define lotsfree (desfree * 2) 289 #define availrmem desfree 290 #define swapfs_minfree 0 291 #define swapfs_reserve 0 292 #undef curproc 293 #define curproc curlwp 294 #define proc_pageout uvm.pagedaemon_lwp 295 296 static void *zio_arena; 297 298 #include <sys/callback.h> 299 /* Structures used for memory and kva space reclaim. */ 300 static struct callback_entry arc_kva_reclaim_entry; 301 302 #endif /* __NetBSD__ */ 303 304 static kmutex_t arc_reclaim_lock; 305 static kcondvar_t arc_reclaim_thread_cv; 306 static boolean_t arc_reclaim_thread_exit; 307 static kcondvar_t arc_reclaim_waiters_cv; 308 309 #ifdef __FreeBSD__ 310 static kmutex_t arc_dnlc_evicts_lock; 311 static kcondvar_t arc_dnlc_evicts_cv; 312 static boolean_t arc_dnlc_evicts_thread_exit; 313 314 uint_t arc_reduce_dnlc_percent = 3; 315 #endif 316 317 /* 318 * The number of headers to evict in arc_evict_state_impl() before 319 * dropping the sublist lock and evicting from another sublist. A lower 320 * value means we're more likely to evict the "correct" header (i.e. the 321 * oldest header in the arc state), but comes with higher overhead 322 * (i.e. more invocations of arc_evict_state_impl()). 323 */ 324 int zfs_arc_evict_batch_limit = 10; 325 326 /* 327 * The number of sublists used for each of the arc state lists. If this 328 * is not set to a suitable value by the user, it will be configured to 329 * the number of CPUs on the system in arc_init(). 330 */ 331 int zfs_arc_num_sublists_per_state = 0; 332 333 /* number of seconds before growing cache again */ 334 static int arc_grow_retry = 60; 335 336 /* shift of arc_c for calculating overflow limit in arc_get_data_buf */ 337 int zfs_arc_overflow_shift = 8; 338 339 /* shift of arc_c for calculating both min and max arc_p */ 340 static int arc_p_min_shift = 4; 341 342 /* log2(fraction of arc to reclaim) */ 343 static int arc_shrink_shift = 7; 344 345 /* 346 * log2(fraction of ARC which must be free to allow growing). 347 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, 348 * when reading a new block into the ARC, we will evict an equal-sized block 349 * from the ARC. 350 * 351 * This must be less than arc_shrink_shift, so that when we shrink the ARC, 352 * we will still not allow it to grow. 353 */ 354 int arc_no_grow_shift = 5; 355 356 357 /* 358 * minimum lifespan of a prefetch block in clock ticks 359 * (initialized in arc_init()) 360 */ 361 static int arc_min_prefetch_lifespan; 362 363 /* 364 * If this percent of memory is free, don't throttle. 365 */ 366 int arc_lotsfree_percent = 10; 367 368 static int arc_dead; 369 extern boolean_t zfs_prefetch_disable; 370 371 /* 372 * The arc has filled available memory and has now warmed up. 373 */ 374 static boolean_t arc_warm; 375 376 /* 377 * These tunables are for performance analysis. 378 */ 379 uint64_t zfs_arc_max; 380 uint64_t zfs_arc_min; 381 uint64_t zfs_arc_meta_limit = 0; 382 uint64_t zfs_arc_meta_min = 0; 383 int zfs_arc_grow_retry = 0; 384 int zfs_arc_shrink_shift = 0; 385 int zfs_arc_p_min_shift = 0; 386 uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 387 u_int zfs_arc_free_target = 0; 388 389 /* Absolute min for arc min / max is 16MB. */ 390 static uint64_t arc_abs_min = 16 << 20; 391 392 boolean_t zfs_compressed_arc_enabled = B_TRUE; 393 394 #if defined(__FreeBSD__) && defined(_KERNEL) 395 static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS); 396 static int sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS); 397 static int sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS); 398 static int sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS); 399 400 static void 401 arc_free_target_init(void *unused __unused) 402 { 403 404 zfs_arc_free_target = vm_pageout_wakeup_thresh; 405 } 406 SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, 407 arc_free_target_init, NULL); 408 409 TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 410 TUNABLE_QUAD("vfs.zfs.arc_meta_min", &zfs_arc_meta_min); 411 TUNABLE_INT("vfs.zfs.arc_shrink_shift", &zfs_arc_shrink_shift); 412 SYSCTL_DECL(_vfs_zfs); 413 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, CTLTYPE_U64 | CTLFLAG_RWTUN, 414 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_max, "QU", "Maximum ARC size"); 415 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, CTLTYPE_U64 | CTLFLAG_RWTUN, 416 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_min, "QU", "Minimum ARC size"); 417 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN, 418 &zfs_arc_average_blocksize, 0, 419 "ARC average blocksize"); 420 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW, 421 &arc_shrink_shift, 0, 422 "log2(fraction of arc to reclaim)"); 423 SYSCTL_INT(_vfs_zfs, OID_AUTO, compressed_arc_enabled, CTLFLAG_RDTUN, 424 &zfs_compressed_arc_enabled, 0, "Enable compressed ARC"); 425 426 /* 427 * We don't have a tunable for arc_free_target due to the dependency on 428 * pagedaemon initialisation. 429 */ 430 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target, 431 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int), 432 sysctl_vfs_zfs_arc_free_target, "IU", 433 "Desired number of free pages below which ARC triggers reclaim"); 434 435 static int 436 sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS) 437 { 438 u_int val; 439 int err; 440 441 val = zfs_arc_free_target; 442 err = sysctl_handle_int(oidp, &val, 0, req); 443 if (err != 0 || req->newptr == NULL) 444 return (err); 445 446 if (val < minfree) 447 return (EINVAL); 448 if (val > vm_cnt.v_page_count) 449 return (EINVAL); 450 451 zfs_arc_free_target = val; 452 453 return (0); 454 } 455 456 /* 457 * Must be declared here, before the definition of corresponding kstat 458 * macro which uses the same names will confuse the compiler. 459 */ 460 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_meta_limit, 461 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t), 462 sysctl_vfs_zfs_arc_meta_limit, "QU", 463 "ARC metadata limit"); 464 #endif 465 466 /* 467 * Note that buffers can be in one of 6 states: 468 * ARC_anon - anonymous (discussed below) 469 * ARC_mru - recently used, currently cached 470 * ARC_mru_ghost - recentely used, no longer in cache 471 * ARC_mfu - frequently used, currently cached 472 * ARC_mfu_ghost - frequently used, no longer in cache 473 * ARC_l2c_only - exists in L2ARC but not other states 474 * When there are no active references to the buffer, they are 475 * are linked onto a list in one of these arc states. These are 476 * the only buffers that can be evicted or deleted. Within each 477 * state there are multiple lists, one for meta-data and one for 478 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 479 * etc.) is tracked separately so that it can be managed more 480 * explicitly: favored over data, limited explicitly. 481 * 482 * Anonymous buffers are buffers that are not associated with 483 * a DVA. These are buffers that hold dirty block copies 484 * before they are written to stable storage. By definition, 485 * they are "ref'd" and are considered part of arc_mru 486 * that cannot be freed. Generally, they will aquire a DVA 487 * as they are written and migrate onto the arc_mru list. 488 * 489 * The ARC_l2c_only state is for buffers that are in the second 490 * level ARC but no longer in any of the ARC_m* lists. The second 491 * level ARC itself may also contain buffers that are in any of 492 * the ARC_m* states - meaning that a buffer can exist in two 493 * places. The reason for the ARC_l2c_only state is to keep the 494 * buffer header in the hash table, so that reads that hit the 495 * second level ARC benefit from these fast lookups. 496 */ 497 498 typedef struct arc_state { 499 /* 500 * list of evictable buffers 501 */ 502 multilist_t arcs_list[ARC_BUFC_NUMTYPES]; 503 /* 504 * total amount of evictable data in this state 505 */ 506 refcount_t arcs_esize[ARC_BUFC_NUMTYPES]; 507 /* 508 * total amount of data in this state; this includes: evictable, 509 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA. 510 */ 511 refcount_t arcs_size; 512 } arc_state_t; 513 514 /* The 6 states: */ 515 static arc_state_t ARC_anon; 516 static arc_state_t ARC_mru; 517 static arc_state_t ARC_mru_ghost; 518 static arc_state_t ARC_mfu; 519 static arc_state_t ARC_mfu_ghost; 520 static arc_state_t ARC_l2c_only; 521 522 typedef struct arc_stats { 523 kstat_named_t arcstat_hits; 524 kstat_named_t arcstat_misses; 525 kstat_named_t arcstat_demand_data_hits; 526 kstat_named_t arcstat_demand_data_misses; 527 kstat_named_t arcstat_demand_metadata_hits; 528 kstat_named_t arcstat_demand_metadata_misses; 529 kstat_named_t arcstat_prefetch_data_hits; 530 kstat_named_t arcstat_prefetch_data_misses; 531 kstat_named_t arcstat_prefetch_metadata_hits; 532 kstat_named_t arcstat_prefetch_metadata_misses; 533 kstat_named_t arcstat_mru_hits; 534 kstat_named_t arcstat_mru_ghost_hits; 535 kstat_named_t arcstat_mfu_hits; 536 kstat_named_t arcstat_mfu_ghost_hits; 537 kstat_named_t arcstat_allocated; 538 kstat_named_t arcstat_deleted; 539 /* 540 * Number of buffers that could not be evicted because the hash lock 541 * was held by another thread. The lock may not necessarily be held 542 * by something using the same buffer, since hash locks are shared 543 * by multiple buffers. 544 */ 545 kstat_named_t arcstat_mutex_miss; 546 /* 547 * Number of buffers skipped because they have I/O in progress, are 548 * indrect prefetch buffers that have not lived long enough, or are 549 * not from the spa we're trying to evict from. 550 */ 551 kstat_named_t arcstat_evict_skip; 552 /* 553 * Number of times arc_evict_state() was unable to evict enough 554 * buffers to reach it's target amount. 555 */ 556 kstat_named_t arcstat_evict_not_enough; 557 kstat_named_t arcstat_evict_l2_cached; 558 kstat_named_t arcstat_evict_l2_eligible; 559 kstat_named_t arcstat_evict_l2_ineligible; 560 kstat_named_t arcstat_evict_l2_skip; 561 kstat_named_t arcstat_hash_elements; 562 kstat_named_t arcstat_hash_elements_max; 563 kstat_named_t arcstat_hash_collisions; 564 kstat_named_t arcstat_hash_chains; 565 kstat_named_t arcstat_hash_chain_max; 566 kstat_named_t arcstat_p; 567 kstat_named_t arcstat_c; 568 kstat_named_t arcstat_c_min; 569 kstat_named_t arcstat_c_max; 570 kstat_named_t arcstat_size; 571 /* 572 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pdata. 573 * Note that the compressed bytes may match the uncompressed bytes 574 * if the block is either not compressed or compressed arc is disabled. 575 */ 576 kstat_named_t arcstat_compressed_size; 577 /* 578 * Uncompressed size of the data stored in b_pdata. If compressed 579 * arc is disabled then this value will be identical to the stat 580 * above. 581 */ 582 kstat_named_t arcstat_uncompressed_size; 583 /* 584 * Number of bytes stored in all the arc_buf_t's. This is classified 585 * as "overhead" since this data is typically short-lived and will 586 * be evicted from the arc when it becomes unreferenced unless the 587 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level 588 * values have been set (see comment in dbuf.c for more information). 589 */ 590 kstat_named_t arcstat_overhead_size; 591 /* 592 * Number of bytes consumed by internal ARC structures necessary 593 * for tracking purposes; these structures are not actually 594 * backed by ARC buffers. This includes arc_buf_hdr_t structures 595 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only 596 * caches), and arc_buf_t structures (allocated via arc_buf_t 597 * cache). 598 */ 599 kstat_named_t arcstat_hdr_size; 600 /* 601 * Number of bytes consumed by ARC buffers of type equal to 602 * ARC_BUFC_DATA. This is generally consumed by buffers backing 603 * on disk user data (e.g. plain file contents). 604 */ 605 kstat_named_t arcstat_data_size; 606 /* 607 * Number of bytes consumed by ARC buffers of type equal to 608 * ARC_BUFC_METADATA. This is generally consumed by buffers 609 * backing on disk data that is used for internal ZFS 610 * structures (e.g. ZAP, dnode, indirect blocks, etc). 611 */ 612 kstat_named_t arcstat_metadata_size; 613 /* 614 * Number of bytes consumed by various buffers and structures 615 * not actually backed with ARC buffers. This includes bonus 616 * buffers (allocated directly via zio_buf_* functions), 617 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t 618 * cache), and dnode_t structures (allocated via dnode_t cache). 619 */ 620 kstat_named_t arcstat_other_size; 621 /* 622 * Total number of bytes consumed by ARC buffers residing in the 623 * arc_anon state. This includes *all* buffers in the arc_anon 624 * state; e.g. data, metadata, evictable, and unevictable buffers 625 * are all included in this value. 626 */ 627 kstat_named_t arcstat_anon_size; 628 /* 629 * Number of bytes consumed by ARC buffers that meet the 630 * following criteria: backing buffers of type ARC_BUFC_DATA, 631 * residing in the arc_anon state, and are eligible for eviction 632 * (e.g. have no outstanding holds on the buffer). 633 */ 634 kstat_named_t arcstat_anon_evictable_data; 635 /* 636 * Number of bytes consumed by ARC buffers that meet the 637 * following criteria: backing buffers of type ARC_BUFC_METADATA, 638 * residing in the arc_anon state, and are eligible for eviction 639 * (e.g. have no outstanding holds on the buffer). 640 */ 641 kstat_named_t arcstat_anon_evictable_metadata; 642 /* 643 * Total number of bytes consumed by ARC buffers residing in the 644 * arc_mru state. This includes *all* buffers in the arc_mru 645 * state; e.g. data, metadata, evictable, and unevictable buffers 646 * are all included in this value. 647 */ 648 kstat_named_t arcstat_mru_size; 649 /* 650 * Number of bytes consumed by ARC buffers that meet the 651 * following criteria: backing buffers of type ARC_BUFC_DATA, 652 * residing in the arc_mru state, and are eligible for eviction 653 * (e.g. have no outstanding holds on the buffer). 654 */ 655 kstat_named_t arcstat_mru_evictable_data; 656 /* 657 * Number of bytes consumed by ARC buffers that meet the 658 * following criteria: backing buffers of type ARC_BUFC_METADATA, 659 * residing in the arc_mru state, and are eligible for eviction 660 * (e.g. have no outstanding holds on the buffer). 661 */ 662 kstat_named_t arcstat_mru_evictable_metadata; 663 /* 664 * Total number of bytes that *would have been* consumed by ARC 665 * buffers in the arc_mru_ghost state. The key thing to note 666 * here, is the fact that this size doesn't actually indicate 667 * RAM consumption. The ghost lists only consist of headers and 668 * don't actually have ARC buffers linked off of these headers. 669 * Thus, *if* the headers had associated ARC buffers, these 670 * buffers *would have* consumed this number of bytes. 671 */ 672 kstat_named_t arcstat_mru_ghost_size; 673 /* 674 * Number of bytes that *would have been* consumed by ARC 675 * buffers that are eligible for eviction, of type 676 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. 677 */ 678 kstat_named_t arcstat_mru_ghost_evictable_data; 679 /* 680 * Number of bytes that *would have been* consumed by ARC 681 * buffers that are eligible for eviction, of type 682 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 683 */ 684 kstat_named_t arcstat_mru_ghost_evictable_metadata; 685 /* 686 * Total number of bytes consumed by ARC buffers residing in the 687 * arc_mfu state. This includes *all* buffers in the arc_mfu 688 * state; e.g. data, metadata, evictable, and unevictable buffers 689 * are all included in this value. 690 */ 691 kstat_named_t arcstat_mfu_size; 692 /* 693 * Number of bytes consumed by ARC buffers that are eligible for 694 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu 695 * state. 696 */ 697 kstat_named_t arcstat_mfu_evictable_data; 698 /* 699 * Number of bytes consumed by ARC buffers that are eligible for 700 * eviction, of type ARC_BUFC_METADATA, and reside in the 701 * arc_mfu state. 702 */ 703 kstat_named_t arcstat_mfu_evictable_metadata; 704 /* 705 * Total number of bytes that *would have been* consumed by ARC 706 * buffers in the arc_mfu_ghost state. See the comment above 707 * arcstat_mru_ghost_size for more details. 708 */ 709 kstat_named_t arcstat_mfu_ghost_size; 710 /* 711 * Number of bytes that *would have been* consumed by ARC 712 * buffers that are eligible for eviction, of type 713 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. 714 */ 715 kstat_named_t arcstat_mfu_ghost_evictable_data; 716 /* 717 * Number of bytes that *would have been* consumed by ARC 718 * buffers that are eligible for eviction, of type 719 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 720 */ 721 kstat_named_t arcstat_mfu_ghost_evictable_metadata; 722 kstat_named_t arcstat_l2_hits; 723 kstat_named_t arcstat_l2_misses; 724 kstat_named_t arcstat_l2_feeds; 725 kstat_named_t arcstat_l2_rw_clash; 726 kstat_named_t arcstat_l2_read_bytes; 727 kstat_named_t arcstat_l2_write_bytes; 728 kstat_named_t arcstat_l2_writes_sent; 729 kstat_named_t arcstat_l2_writes_done; 730 kstat_named_t arcstat_l2_writes_error; 731 kstat_named_t arcstat_l2_writes_lock_retry; 732 kstat_named_t arcstat_l2_evict_lock_retry; 733 kstat_named_t arcstat_l2_evict_reading; 734 kstat_named_t arcstat_l2_evict_l1cached; 735 kstat_named_t arcstat_l2_free_on_write; 736 kstat_named_t arcstat_l2_abort_lowmem; 737 kstat_named_t arcstat_l2_cksum_bad; 738 kstat_named_t arcstat_l2_io_error; 739 kstat_named_t arcstat_l2_size; 740 kstat_named_t arcstat_l2_asize; 741 kstat_named_t arcstat_l2_hdr_size; 742 kstat_named_t arcstat_l2_write_trylock_fail; 743 kstat_named_t arcstat_l2_write_passed_headroom; 744 kstat_named_t arcstat_l2_write_spa_mismatch; 745 kstat_named_t arcstat_l2_write_in_l2; 746 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 747 kstat_named_t arcstat_l2_write_not_cacheable; 748 kstat_named_t arcstat_l2_write_full; 749 kstat_named_t arcstat_l2_write_buffer_iter; 750 kstat_named_t arcstat_l2_write_pios; 751 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 752 kstat_named_t arcstat_l2_write_buffer_list_iter; 753 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 754 kstat_named_t arcstat_memory_throttle_count; 755 kstat_named_t arcstat_meta_used; 756 kstat_named_t arcstat_meta_limit; 757 kstat_named_t arcstat_meta_max; 758 kstat_named_t arcstat_meta_min; 759 kstat_named_t arcstat_sync_wait_for_async; 760 kstat_named_t arcstat_demand_hit_predictive_prefetch; 761 } arc_stats_t; 762 763 static arc_stats_t arc_stats = { 764 { "hits", KSTAT_DATA_UINT64 }, 765 { "misses", KSTAT_DATA_UINT64 }, 766 { "demand_data_hits", KSTAT_DATA_UINT64 }, 767 { "demand_data_misses", KSTAT_DATA_UINT64 }, 768 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 769 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 770 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 771 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 772 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 773 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 774 { "mru_hits", KSTAT_DATA_UINT64 }, 775 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 776 { "mfu_hits", KSTAT_DATA_UINT64 }, 777 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 778 { "allocated", KSTAT_DATA_UINT64 }, 779 { "deleted", KSTAT_DATA_UINT64 }, 780 { "mutex_miss", KSTAT_DATA_UINT64 }, 781 { "evict_skip", KSTAT_DATA_UINT64 }, 782 { "evict_not_enough", KSTAT_DATA_UINT64 }, 783 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 784 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 785 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 786 { "evict_l2_skip", KSTAT_DATA_UINT64 }, 787 { "hash_elements", KSTAT_DATA_UINT64 }, 788 { "hash_elements_max", KSTAT_DATA_UINT64 }, 789 { "hash_collisions", KSTAT_DATA_UINT64 }, 790 { "hash_chains", KSTAT_DATA_UINT64 }, 791 { "hash_chain_max", KSTAT_DATA_UINT64 }, 792 { "p", KSTAT_DATA_UINT64 }, 793 { "c", KSTAT_DATA_UINT64 }, 794 { "c_min", KSTAT_DATA_UINT64 }, 795 { "c_max", KSTAT_DATA_UINT64 }, 796 { "size", KSTAT_DATA_UINT64 }, 797 { "compressed_size", KSTAT_DATA_UINT64 }, 798 { "uncompressed_size", KSTAT_DATA_UINT64 }, 799 { "overhead_size", KSTAT_DATA_UINT64 }, 800 { "hdr_size", KSTAT_DATA_UINT64 }, 801 { "data_size", KSTAT_DATA_UINT64 }, 802 { "metadata_size", KSTAT_DATA_UINT64 }, 803 { "other_size", KSTAT_DATA_UINT64 }, 804 { "anon_size", KSTAT_DATA_UINT64 }, 805 { "anon_evictable_data", KSTAT_DATA_UINT64 }, 806 { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, 807 { "mru_size", KSTAT_DATA_UINT64 }, 808 { "mru_evictable_data", KSTAT_DATA_UINT64 }, 809 { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, 810 { "mru_ghost_size", KSTAT_DATA_UINT64 }, 811 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, 812 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 813 { "mfu_size", KSTAT_DATA_UINT64 }, 814 { "mfu_evictable_data", KSTAT_DATA_UINT64 }, 815 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, 816 { "mfu_ghost_size", KSTAT_DATA_UINT64 }, 817 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, 818 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 819 { "l2_hits", KSTAT_DATA_UINT64 }, 820 { "l2_misses", KSTAT_DATA_UINT64 }, 821 { "l2_feeds", KSTAT_DATA_UINT64 }, 822 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 823 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 824 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 825 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 826 { "l2_writes_done", KSTAT_DATA_UINT64 }, 827 { "l2_writes_error", KSTAT_DATA_UINT64 }, 828 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, 829 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 830 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 831 { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, 832 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 833 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 834 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 835 { "l2_io_error", KSTAT_DATA_UINT64 }, 836 { "l2_size", KSTAT_DATA_UINT64 }, 837 { "l2_asize", KSTAT_DATA_UINT64 }, 838 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 839 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 840 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 841 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 842 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 843 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 844 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 845 { "l2_write_full", KSTAT_DATA_UINT64 }, 846 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 847 { "l2_write_pios", KSTAT_DATA_UINT64 }, 848 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 849 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 850 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }, 851 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 852 { "arc_meta_used", KSTAT_DATA_UINT64 }, 853 { "arc_meta_limit", KSTAT_DATA_UINT64 }, 854 { "arc_meta_max", KSTAT_DATA_UINT64 }, 855 { "arc_meta_min", KSTAT_DATA_UINT64 }, 856 { "sync_wait_for_async", KSTAT_DATA_UINT64 }, 857 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, 858 }; 859 860 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 861 862 #define ARCSTAT_INCR(stat, val) \ 863 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 864 865 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 866 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 867 868 #define ARCSTAT_MAX(stat, val) { \ 869 uint64_t m; \ 870 while ((val) > (m = arc_stats.stat.value.ui64) && \ 871 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 872 continue; \ 873 } 874 875 #define ARCSTAT_MAXSTAT(stat) \ 876 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 877 878 /* 879 * We define a macro to allow ARC hits/misses to be easily broken down by 880 * two separate conditions, giving a total of four different subtypes for 881 * each of hits and misses (so eight statistics total). 882 */ 883 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 884 if (cond1) { \ 885 if (cond2) { \ 886 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 887 } else { \ 888 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 889 } \ 890 } else { \ 891 if (cond2) { \ 892 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 893 } else { \ 894 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 895 } \ 896 } 897 898 kstat_t *arc_ksp; 899 static arc_state_t *arc_anon; 900 static arc_state_t *arc_mru; 901 static arc_state_t *arc_mru_ghost; 902 static arc_state_t *arc_mfu; 903 static arc_state_t *arc_mfu_ghost; 904 static arc_state_t *arc_l2c_only; 905 906 /* 907 * There are several ARC variables that are critical to export as kstats -- 908 * but we don't want to have to grovel around in the kstat whenever we wish to 909 * manipulate them. For these variables, we therefore define them to be in 910 * terms of the statistic variable. This assures that we are not introducing 911 * the possibility of inconsistency by having shadow copies of the variables, 912 * while still allowing the code to be readable. 913 */ 914 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 915 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 916 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 917 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 918 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 919 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 920 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 921 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ 922 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 923 924 /* compressed size of entire arc */ 925 #define arc_compressed_size ARCSTAT(arcstat_compressed_size) 926 /* uncompressed size of entire arc */ 927 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size) 928 /* number of bytes in the arc from arc_buf_t's */ 929 #define arc_overhead_size ARCSTAT(arcstat_overhead_size) 930 931 static int arc_no_grow; /* Don't try to grow cache size */ 932 static uint64_t arc_tempreserve; 933 static uint64_t arc_loaned_bytes; 934 935 typedef struct arc_callback arc_callback_t; 936 937 struct arc_callback { 938 void *acb_private; 939 arc_done_func_t *acb_done; 940 arc_buf_t *acb_buf; 941 zio_t *acb_zio_dummy; 942 arc_callback_t *acb_next; 943 }; 944 945 typedef struct arc_write_callback arc_write_callback_t; 946 947 struct arc_write_callback { 948 void *awcb_private; 949 arc_done_func_t *awcb_ready; 950 arc_done_func_t *awcb_children_ready; 951 arc_done_func_t *awcb_physdone; 952 arc_done_func_t *awcb_done; 953 arc_buf_t *awcb_buf; 954 }; 955 956 /* 957 * ARC buffers are separated into multiple structs as a memory saving measure: 958 * - Common fields struct, always defined, and embedded within it: 959 * - L2-only fields, always allocated but undefined when not in L2ARC 960 * - L1-only fields, only allocated when in L1ARC 961 * 962 * Buffer in L1 Buffer only in L2 963 * +------------------------+ +------------------------+ 964 * | arc_buf_hdr_t | | arc_buf_hdr_t | 965 * | | | | 966 * | | | | 967 * | | | | 968 * +------------------------+ +------------------------+ 969 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t | 970 * | (undefined if L1-only) | | | 971 * +------------------------+ +------------------------+ 972 * | l1arc_buf_hdr_t | 973 * | | 974 * | | 975 * | | 976 * | | 977 * +------------------------+ 978 * 979 * Because it's possible for the L2ARC to become extremely large, we can wind 980 * up eating a lot of memory in L2ARC buffer headers, so the size of a header 981 * is minimized by only allocating the fields necessary for an L1-cached buffer 982 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and 983 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple 984 * words in pointers. arc_hdr_realloc() is used to switch a header between 985 * these two allocation states. 986 */ 987 typedef struct l1arc_buf_hdr { 988 kmutex_t b_freeze_lock; 989 zio_cksum_t *b_freeze_cksum; 990 #ifdef ZFS_DEBUG 991 /* 992 * used for debugging wtih kmem_flags - by allocating and freeing 993 * b_thawed when the buffer is thawed, we get a record of the stack 994 * trace that thawed it. 995 */ 996 void *b_thawed; 997 #endif 998 999 arc_buf_t *b_buf; 1000 uint32_t b_bufcnt; 1001 /* for waiting on writes to complete */ 1002 kcondvar_t b_cv; 1003 uint8_t b_byteswap; 1004 1005 /* protected by arc state mutex */ 1006 arc_state_t *b_state; 1007 multilist_node_t b_arc_node; 1008 1009 /* updated atomically */ 1010 clock_t b_arc_access; 1011 1012 /* self protecting */ 1013 refcount_t b_refcnt; 1014 1015 arc_callback_t *b_acb; 1016 void *b_pdata; 1017 } l1arc_buf_hdr_t; 1018 1019 typedef struct l2arc_dev l2arc_dev_t; 1020 1021 typedef struct l2arc_buf_hdr { 1022 /* protected by arc_buf_hdr mutex */ 1023 l2arc_dev_t *b_dev; /* L2ARC device */ 1024 uint64_t b_daddr; /* disk address, offset byte */ 1025 1026 list_node_t b_l2node; 1027 } l2arc_buf_hdr_t; 1028 1029 struct arc_buf_hdr { 1030 /* protected by hash lock */ 1031 dva_t b_dva; 1032 uint64_t b_birth; 1033 1034 arc_buf_contents_t b_type; 1035 arc_buf_hdr_t *b_hash_next; 1036 arc_flags_t b_flags; 1037 1038 /* 1039 * This field stores the size of the data buffer after 1040 * compression, and is set in the arc's zio completion handlers. 1041 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes). 1042 * 1043 * While the block pointers can store up to 32MB in their psize 1044 * field, we can only store up to 32MB minus 512B. This is due 1045 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e. 1046 * a field of zeros represents 512B in the bp). We can't use a 1047 * bias of 1 since we need to reserve a psize of zero, here, to 1048 * represent holes and embedded blocks. 1049 * 1050 * This isn't a problem in practice, since the maximum size of a 1051 * buffer is limited to 16MB, so we never need to store 32MB in 1052 * this field. Even in the upstream illumos code base, the 1053 * maximum size of a buffer is limited to 16MB. 1054 */ 1055 uint16_t b_psize; 1056 1057 /* 1058 * This field stores the size of the data buffer before 1059 * compression, and cannot change once set. It is in units 1060 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes) 1061 */ 1062 uint16_t b_lsize; /* immutable */ 1063 uint64_t b_spa; /* immutable */ 1064 1065 /* L2ARC fields. Undefined when not in L2ARC. */ 1066 l2arc_buf_hdr_t b_l2hdr; 1067 /* L1ARC fields. Undefined when in l2arc_only state */ 1068 l1arc_buf_hdr_t b_l1hdr; 1069 }; 1070 1071 #if defined(__FreeBSD__) && defined(_KERNEL) 1072 static int 1073 sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS) 1074 { 1075 uint64_t val; 1076 int err; 1077 1078 val = arc_meta_limit; 1079 err = sysctl_handle_64(oidp, &val, 0, req); 1080 if (err != 0 || req->newptr == NULL) 1081 return (err); 1082 1083 if (val <= 0 || val > arc_c_max) 1084 return (EINVAL); 1085 1086 arc_meta_limit = val; 1087 return (0); 1088 } 1089 1090 static int 1091 sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS) 1092 { 1093 uint64_t val; 1094 int err; 1095 1096 val = zfs_arc_max; 1097 err = sysctl_handle_64(oidp, &val, 0, req); 1098 if (err != 0 || req->newptr == NULL) 1099 return (err); 1100 1101 if (zfs_arc_max == 0) { 1102 /* Loader tunable so blindly set */ 1103 zfs_arc_max = val; 1104 return (0); 1105 } 1106 1107 if (val < arc_abs_min || val > kmem_size()) 1108 return (EINVAL); 1109 if (val < arc_c_min) 1110 return (EINVAL); 1111 if (zfs_arc_meta_limit > 0 && val < zfs_arc_meta_limit) 1112 return (EINVAL); 1113 1114 arc_c_max = val; 1115 1116 arc_c = arc_c_max; 1117 arc_p = (arc_c >> 1); 1118 1119 if (zfs_arc_meta_limit == 0) { 1120 /* limit meta-data to 1/4 of the arc capacity */ 1121 arc_meta_limit = arc_c_max / 4; 1122 } 1123 1124 /* if kmem_flags are set, lets try to use less memory */ 1125 if (kmem_debugging()) 1126 arc_c = arc_c / 2; 1127 1128 zfs_arc_max = arc_c; 1129 1130 return (0); 1131 } 1132 1133 static int 1134 sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS) 1135 { 1136 uint64_t val; 1137 int err; 1138 1139 val = zfs_arc_min; 1140 err = sysctl_handle_64(oidp, &val, 0, req); 1141 if (err != 0 || req->newptr == NULL) 1142 return (err); 1143 1144 if (zfs_arc_min == 0) { 1145 /* Loader tunable so blindly set */ 1146 zfs_arc_min = val; 1147 return (0); 1148 } 1149 1150 if (val < arc_abs_min || val > arc_c_max) 1151 return (EINVAL); 1152 1153 arc_c_min = val; 1154 1155 if (zfs_arc_meta_min == 0) 1156 arc_meta_min = arc_c_min / 2; 1157 1158 if (arc_c < arc_c_min) 1159 arc_c = arc_c_min; 1160 1161 zfs_arc_min = arc_c_min; 1162 1163 return (0); 1164 } 1165 #endif 1166 1167 #define GHOST_STATE(state) \ 1168 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 1169 (state) == arc_l2c_only) 1170 1171 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 1172 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 1173 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 1174 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 1175 #define HDR_COMPRESSION_ENABLED(hdr) \ 1176 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) 1177 1178 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 1179 #define HDR_L2_READING(hdr) \ 1180 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ 1181 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) 1182 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 1183 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 1184 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 1185 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) 1186 1187 #define HDR_ISTYPE_METADATA(hdr) \ 1188 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) 1189 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) 1190 1191 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) 1192 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) 1193 1194 /* For storing compression mode in b_flags */ 1195 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) 1196 1197 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ 1198 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) 1199 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ 1200 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); 1201 1202 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) 1203 1204 /* 1205 * Other sizes 1206 */ 1207 1208 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 1209 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) 1210 1211 /* 1212 * Hash table routines 1213 */ 1214 1215 #define HT_LOCK_PAD CACHE_LINE_SIZE 1216 1217 struct ht_lock { 1218 kmutex_t ht_lock; 1219 #ifdef _KERNEL 1220 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 1221 #endif 1222 }; 1223 1224 #define BUF_LOCKS 256 1225 typedef struct buf_hash_table { 1226 uint64_t ht_mask; 1227 arc_buf_hdr_t **ht_table; 1228 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 1229 } buf_hash_table_t; 1230 1231 static buf_hash_table_t buf_hash_table; 1232 1233 #define BUF_HASH_INDEX(spa, dva, birth) \ 1234 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 1235 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 1236 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 1237 #define HDR_LOCK(hdr) \ 1238 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 1239 1240 uint64_t zfs_crc64_table[256]; 1241 1242 /* 1243 * Level 2 ARC 1244 */ 1245 1246 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 1247 #define L2ARC_HEADROOM 2 /* num of writes */ 1248 /* 1249 * If we discover during ARC scan any buffers to be compressed, we boost 1250 * our headroom for the next scanning cycle by this percentage multiple. 1251 */ 1252 #define L2ARC_HEADROOM_BOOST 200 1253 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 1254 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 1255 1256 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 1257 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 1258 1259 /* L2ARC Performance Tunables */ 1260 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 1261 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 1262 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 1263 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 1264 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 1265 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 1266 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 1267 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 1268 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 1269 1270 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 1271 &l2arc_write_max, 0, "max write size"); 1272 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 1273 &l2arc_write_boost, 0, "extra write during warmup"); 1274 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 1275 &l2arc_headroom, 0, "number of dev writes"); 1276 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 1277 &l2arc_feed_secs, 0, "interval seconds"); 1278 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 1279 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 1280 1281 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 1282 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 1283 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 1284 &l2arc_feed_again, 0, "turbo warmup"); 1285 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 1286 &l2arc_norw, 0, "no reads during writes"); 1287 1288 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 1289 &ARC_anon.arcs_size.rc_count, 0, "size of anonymous state"); 1290 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD, 1291 &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1292 "size of anonymous state"); 1293 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD, 1294 &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1295 "size of anonymous state"); 1296 1297 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 1298 &ARC_mru.arcs_size.rc_count, 0, "size of mru state"); 1299 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD, 1300 &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1301 "size of metadata in mru state"); 1302 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD, 1303 &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1304 "size of data in mru state"); 1305 1306 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 1307 &ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state"); 1308 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD, 1309 &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1310 "size of metadata in mru ghost state"); 1311 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD, 1312 &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1313 "size of data in mru ghost state"); 1314 1315 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 1316 &ARC_mfu.arcs_size.rc_count, 0, "size of mfu state"); 1317 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD, 1318 &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1319 "size of metadata in mfu state"); 1320 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD, 1321 &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1322 "size of data in mfu state"); 1323 1324 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 1325 &ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state"); 1326 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD, 1327 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1328 "size of metadata in mfu ghost state"); 1329 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD, 1330 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1331 "size of data in mfu ghost state"); 1332 1333 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 1334 &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state"); 1335 1336 /* 1337 * L2ARC Internals 1338 */ 1339 struct l2arc_dev { 1340 vdev_t *l2ad_vdev; /* vdev */ 1341 spa_t *l2ad_spa; /* spa */ 1342 uint64_t l2ad_hand; /* next write location */ 1343 uint64_t l2ad_start; /* first addr on device */ 1344 uint64_t l2ad_end; /* last addr on device */ 1345 boolean_t l2ad_first; /* first sweep through */ 1346 boolean_t l2ad_writing; /* currently writing */ 1347 kmutex_t l2ad_mtx; /* lock for buffer list */ 1348 list_t l2ad_buflist; /* buffer list */ 1349 list_node_t l2ad_node; /* device list node */ 1350 refcount_t l2ad_alloc; /* allocated bytes */ 1351 }; 1352 1353 static list_t L2ARC_dev_list; /* device list */ 1354 static list_t *l2arc_dev_list; /* device list pointer */ 1355 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 1356 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 1357 static list_t L2ARC_free_on_write; /* free after write buf list */ 1358 static list_t *l2arc_free_on_write; /* free after write list ptr */ 1359 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 1360 static uint64_t l2arc_ndev; /* number of devices */ 1361 1362 typedef struct l2arc_read_callback { 1363 arc_buf_hdr_t *l2rcb_hdr; /* read buffer */ 1364 blkptr_t l2rcb_bp; /* original blkptr */ 1365 zbookmark_phys_t l2rcb_zb; /* original bookmark */ 1366 int l2rcb_flags; /* original flags */ 1367 void *l2rcb_data; /* temporary buffer */ 1368 } l2arc_read_callback_t; 1369 1370 typedef struct l2arc_write_callback { 1371 l2arc_dev_t *l2wcb_dev; /* device info */ 1372 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 1373 } l2arc_write_callback_t; 1374 1375 typedef struct l2arc_data_free { 1376 /* protected by l2arc_free_on_write_mtx */ 1377 void *l2df_data; 1378 size_t l2df_size; 1379 arc_buf_contents_t l2df_type; 1380 list_node_t l2df_list_node; 1381 } l2arc_data_free_t; 1382 1383 static kmutex_t l2arc_feed_thr_lock; 1384 static kcondvar_t l2arc_feed_thr_cv; 1385 static uint8_t l2arc_thread_exit; 1386 1387 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); 1388 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); 1389 static void arc_hdr_free_pdata(arc_buf_hdr_t *hdr); 1390 static void arc_hdr_alloc_pdata(arc_buf_hdr_t *); 1391 static void arc_access(arc_buf_hdr_t *, kmutex_t *); 1392 static boolean_t arc_is_overflowing(); 1393 static void arc_buf_watch(arc_buf_t *); 1394 1395 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); 1396 static uint32_t arc_bufc_to_flags(arc_buf_contents_t); 1397 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1398 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1399 1400 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 1401 static void l2arc_read_done(zio_t *); 1402 1403 static void 1404 l2arc_trim(const arc_buf_hdr_t *hdr) 1405 { 1406 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 1407 1408 ASSERT(HDR_HAS_L2HDR(hdr)); 1409 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 1410 1411 if (HDR_GET_PSIZE(hdr) != 0) { 1412 trim_map_free(dev->l2ad_vdev, hdr->b_l2hdr.b_daddr, 1413 HDR_GET_PSIZE(hdr), 0); 1414 } 1415 } 1416 1417 static uint64_t 1418 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 1419 { 1420 uint8_t *vdva = (uint8_t *)dva; 1421 uint64_t crc = -1ULL; 1422 int i; 1423 1424 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 1425 1426 for (i = 0; i < sizeof (dva_t); i++) 1427 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 1428 1429 crc ^= (spa>>8) ^ birth; 1430 1431 return (crc); 1432 } 1433 1434 #define HDR_EMPTY(hdr) \ 1435 ((hdr)->b_dva.dva_word[0] == 0 && \ 1436 (hdr)->b_dva.dva_word[1] == 0) 1437 1438 #define HDR_EQUAL(spa, dva, birth, hdr) \ 1439 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 1440 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 1441 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) 1442 1443 static void 1444 buf_discard_identity(arc_buf_hdr_t *hdr) 1445 { 1446 hdr->b_dva.dva_word[0] = 0; 1447 hdr->b_dva.dva_word[1] = 0; 1448 hdr->b_birth = 0; 1449 } 1450 1451 static arc_buf_hdr_t * 1452 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 1453 { 1454 const dva_t *dva = BP_IDENTITY(bp); 1455 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 1456 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 1457 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1458 arc_buf_hdr_t *hdr; 1459 1460 mutex_enter(hash_lock); 1461 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 1462 hdr = hdr->b_hash_next) { 1463 if (HDR_EQUAL(spa, dva, birth, hdr)) { 1464 *lockp = hash_lock; 1465 return (hdr); 1466 } 1467 } 1468 mutex_exit(hash_lock); 1469 *lockp = NULL; 1470 return (NULL); 1471 } 1472 1473 /* 1474 * Insert an entry into the hash table. If there is already an element 1475 * equal to elem in the hash table, then the already existing element 1476 * will be returned and the new element will not be inserted. 1477 * Otherwise returns NULL. 1478 * If lockp == NULL, the caller is assumed to already hold the hash lock. 1479 */ 1480 static arc_buf_hdr_t * 1481 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 1482 { 1483 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1484 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1485 arc_buf_hdr_t *fhdr; 1486 uint32_t i; 1487 1488 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 1489 ASSERT(hdr->b_birth != 0); 1490 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1491 1492 if (lockp != NULL) { 1493 *lockp = hash_lock; 1494 mutex_enter(hash_lock); 1495 } else { 1496 ASSERT(MUTEX_HELD(hash_lock)); 1497 } 1498 1499 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 1500 fhdr = fhdr->b_hash_next, i++) { 1501 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 1502 return (fhdr); 1503 } 1504 1505 hdr->b_hash_next = buf_hash_table.ht_table[idx]; 1506 buf_hash_table.ht_table[idx] = hdr; 1507 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1508 1509 /* collect some hash table performance data */ 1510 if (i > 0) { 1511 ARCSTAT_BUMP(arcstat_hash_collisions); 1512 if (i == 1) 1513 ARCSTAT_BUMP(arcstat_hash_chains); 1514 1515 ARCSTAT_MAX(arcstat_hash_chain_max, i); 1516 } 1517 1518 ARCSTAT_BUMP(arcstat_hash_elements); 1519 ARCSTAT_MAXSTAT(arcstat_hash_elements); 1520 1521 return (NULL); 1522 } 1523 1524 static void 1525 buf_hash_remove(arc_buf_hdr_t *hdr) 1526 { 1527 arc_buf_hdr_t *fhdr, **hdrp; 1528 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1529 1530 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 1531 ASSERT(HDR_IN_HASH_TABLE(hdr)); 1532 1533 hdrp = &buf_hash_table.ht_table[idx]; 1534 while ((fhdr = *hdrp) != hdr) { 1535 ASSERT3P(fhdr, !=, NULL); 1536 hdrp = &fhdr->b_hash_next; 1537 } 1538 *hdrp = hdr->b_hash_next; 1539 hdr->b_hash_next = NULL; 1540 arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1541 1542 /* collect some hash table performance data */ 1543 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 1544 1545 if (buf_hash_table.ht_table[idx] && 1546 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 1547 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 1548 } 1549 1550 /* 1551 * Global data structures and functions for the buf kmem cache. 1552 */ 1553 static kmem_cache_t *hdr_full_cache; 1554 static kmem_cache_t *hdr_l2only_cache; 1555 static kmem_cache_t *buf_cache; 1556 1557 static void 1558 buf_fini(void) 1559 { 1560 int i; 1561 1562 kmem_free(buf_hash_table.ht_table, 1563 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 1564 for (i = 0; i < BUF_LOCKS; i++) 1565 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 1566 kmem_cache_destroy(hdr_full_cache); 1567 kmem_cache_destroy(hdr_l2only_cache); 1568 kmem_cache_destroy(buf_cache); 1569 } 1570 1571 /* 1572 * Constructor callback - called when the cache is empty 1573 * and a new buf is requested. 1574 */ 1575 /* ARGSUSED */ 1576 static int 1577 hdr_full_cons(void *vbuf, void *unused, int kmflag) 1578 { 1579 arc_buf_hdr_t *hdr = vbuf; 1580 1581 #ifdef __NetBSD__ 1582 hdr = unused; 1583 #endif 1584 bzero(hdr, HDR_FULL_SIZE); 1585 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); 1586 refcount_create(&hdr->b_l1hdr.b_refcnt); 1587 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 1588 multilist_link_init(&hdr->b_l1hdr.b_arc_node); 1589 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1590 1591 return (0); 1592 } 1593 1594 /* ARGSUSED */ 1595 static int 1596 hdr_l2only_cons(void *vbuf, void *unused, int kmflag) 1597 { 1598 arc_buf_hdr_t *hdr = vbuf; 1599 1600 #ifdef __NetBSD__ 1601 hdr = unused; 1602 #endif 1603 bzero(hdr, HDR_L2ONLY_SIZE); 1604 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1605 1606 return (0); 1607 } 1608 1609 /* ARGSUSED */ 1610 static int 1611 buf_cons(void *vbuf, void *unused, int kmflag) 1612 { 1613 arc_buf_t *buf = vbuf; 1614 1615 #ifdef __NetBSD__ 1616 buf = unused; 1617 #endif 1618 bzero(buf, sizeof (arc_buf_t)); 1619 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1620 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1621 1622 return (0); 1623 } 1624 1625 /* 1626 * Destructor callback - called when a cached buf is 1627 * no longer required. 1628 */ 1629 /* ARGSUSED */ 1630 static void 1631 hdr_full_dest(void *vbuf, void *unused) 1632 { 1633 arc_buf_hdr_t *hdr = vbuf; 1634 1635 #ifdef __NetBSD__ 1636 hdr = unused; 1637 #endif 1638 ASSERT(HDR_EMPTY(hdr)); 1639 cv_destroy(&hdr->b_l1hdr.b_cv); 1640 refcount_destroy(&hdr->b_l1hdr.b_refcnt); 1641 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); 1642 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1643 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1644 } 1645 1646 /* ARGSUSED */ 1647 static void 1648 hdr_l2only_dest(void *vbuf, void *unused) 1649 { 1650 arc_buf_hdr_t *hdr = vbuf; 1651 1652 #ifdef __NetBSD__ 1653 hdr = unused; 1654 #endif 1655 ASSERT(HDR_EMPTY(hdr)); 1656 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1657 } 1658 1659 /* ARGSUSED */ 1660 static void 1661 buf_dest(void *vbuf, void *unused) 1662 { 1663 arc_buf_t *buf = vbuf; 1664 1665 #ifdef __NetBSD__ 1666 buf = unused; 1667 #endif 1668 mutex_destroy(&buf->b_evict_lock); 1669 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1670 } 1671 1672 /* 1673 * Reclaim callback -- invoked when memory is low. 1674 */ 1675 /* ARGSUSED */ 1676 static void 1677 hdr_recl(void *unused) 1678 { 1679 dprintf("hdr_recl called\n"); 1680 /* 1681 * umem calls the reclaim func when we destroy the buf cache, 1682 * which is after we do arc_fini(). 1683 */ 1684 if (!arc_dead) 1685 cv_signal(&arc_reclaim_thread_cv); 1686 } 1687 1688 static void 1689 buf_init(void) 1690 { 1691 uint64_t *ct; 1692 uint64_t hsize = 1ULL << 12; 1693 int i, j; 1694 1695 /* 1696 * The hash table is big enough to fill all of physical memory 1697 * with an average block size of zfs_arc_average_blocksize (default 8K). 1698 * By default, the table will take up 1699 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 1700 */ 1701 while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE) 1702 hsize <<= 1; 1703 retry: 1704 buf_hash_table.ht_mask = hsize - 1; 1705 buf_hash_table.ht_table = 1706 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1707 if (buf_hash_table.ht_table == NULL) { 1708 ASSERT(hsize > (1ULL << 8)); 1709 hsize >>= 1; 1710 goto retry; 1711 } 1712 1713 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 1714 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); 1715 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", 1716 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, 1717 NULL, NULL, 0); 1718 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1719 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1720 1721 for (i = 0; i < 256; i++) 1722 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1723 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1724 1725 for (i = 0; i < BUF_LOCKS; i++) { 1726 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1727 NULL, MUTEX_DEFAULT, NULL); 1728 } 1729 } 1730 1731 #define ARC_MINTIME (hz>>4) /* 62 ms */ 1732 1733 static inline boolean_t 1734 arc_buf_is_shared(arc_buf_t *buf) 1735 { 1736 boolean_t shared = (buf->b_data != NULL && 1737 buf->b_data == buf->b_hdr->b_l1hdr.b_pdata); 1738 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); 1739 return (shared); 1740 } 1741 1742 static inline void 1743 arc_cksum_free(arc_buf_hdr_t *hdr) 1744 { 1745 ASSERT(HDR_HAS_L1HDR(hdr)); 1746 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1747 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1748 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); 1749 hdr->b_l1hdr.b_freeze_cksum = NULL; 1750 } 1751 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1752 } 1753 1754 static void 1755 arc_cksum_verify(arc_buf_t *buf) 1756 { 1757 arc_buf_hdr_t *hdr = buf->b_hdr; 1758 zio_cksum_t zc; 1759 1760 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1761 return; 1762 1763 ASSERT(HDR_HAS_L1HDR(hdr)); 1764 1765 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1766 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { 1767 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1768 return; 1769 } 1770 fletcher_2_native(buf->b_data, HDR_GET_LSIZE(hdr), NULL, &zc); 1771 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) 1772 panic("buffer modified while frozen!"); 1773 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1774 } 1775 1776 static boolean_t 1777 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) 1778 { 1779 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp); 1780 boolean_t valid_cksum; 1781 1782 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); 1783 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); 1784 1785 /* 1786 * We rely on the blkptr's checksum to determine if the block 1787 * is valid or not. When compressed arc is enabled, the l2arc 1788 * writes the block to the l2arc just as it appears in the pool. 1789 * This allows us to use the blkptr's checksum to validate the 1790 * data that we just read off of the l2arc without having to store 1791 * a separate checksum in the arc_buf_hdr_t. However, if compressed 1792 * arc is disabled, then the data written to the l2arc is always 1793 * uncompressed and won't match the block as it exists in the main 1794 * pool. When this is the case, we must first compress it if it is 1795 * compressed on the main pool before we can validate the checksum. 1796 */ 1797 if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) { 1798 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 1799 uint64_t lsize = HDR_GET_LSIZE(hdr); 1800 uint64_t csize; 1801 1802 void *cbuf = zio_buf_alloc(HDR_GET_PSIZE(hdr)); 1803 csize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1804 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr)); 1805 if (csize < HDR_GET_PSIZE(hdr)) { 1806 /* 1807 * Compressed blocks are always a multiple of the 1808 * smallest ashift in the pool. Ideally, we would 1809 * like to round up the csize to the next 1810 * spa_min_ashift but that value may have changed 1811 * since the block was last written. Instead, 1812 * we rely on the fact that the hdr's psize 1813 * was set to the psize of the block when it was 1814 * last written. We set the csize to that value 1815 * and zero out any part that should not contain 1816 * data. 1817 */ 1818 bzero((char *)cbuf + csize, HDR_GET_PSIZE(hdr) - csize); 1819 csize = HDR_GET_PSIZE(hdr); 1820 } 1821 zio_push_transform(zio, cbuf, csize, HDR_GET_PSIZE(hdr), NULL); 1822 } 1823 1824 /* 1825 * Block pointers always store the checksum for the logical data. 1826 * If the block pointer has the gang bit set, then the checksum 1827 * it represents is for the reconstituted data and not for an 1828 * individual gang member. The zio pipeline, however, must be able to 1829 * determine the checksum of each of the gang constituents so it 1830 * treats the checksum comparison differently than what we need 1831 * for l2arc blocks. This prevents us from using the 1832 * zio_checksum_error() interface directly. Instead we must call the 1833 * zio_checksum_error_impl() so that we can ensure the checksum is 1834 * generated using the correct checksum algorithm and accounts for the 1835 * logical I/O size and not just a gang fragment. 1836 */ 1837 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp, 1838 BP_GET_CHECKSUM(zio->io_bp), zio->io_data, zio->io_size, 1839 zio->io_offset, NULL) == 0); 1840 zio_pop_transforms(zio); 1841 return (valid_cksum); 1842 } 1843 1844 static void 1845 arc_cksum_compute(arc_buf_t *buf) 1846 { 1847 arc_buf_hdr_t *hdr = buf->b_hdr; 1848 1849 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1850 return; 1851 1852 ASSERT(HDR_HAS_L1HDR(hdr)); 1853 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1854 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1855 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1856 return; 1857 } 1858 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), 1859 KM_SLEEP); 1860 fletcher_2_native(buf->b_data, HDR_GET_LSIZE(hdr), NULL, 1861 hdr->b_l1hdr.b_freeze_cksum); 1862 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1863 #ifdef illumos 1864 arc_buf_watch(buf); 1865 #endif 1866 } 1867 1868 #ifdef illumos 1869 #ifndef _KERNEL 1870 typedef struct procctl { 1871 long cmd; 1872 prwatch_t prwatch; 1873 } procctl_t; 1874 #endif 1875 1876 /* ARGSUSED */ 1877 static void 1878 arc_buf_unwatch(arc_buf_t *buf) 1879 { 1880 #ifndef _KERNEL 1881 if (arc_watch) { 1882 int result; 1883 procctl_t ctl; 1884 ctl.cmd = PCWATCH; 1885 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1886 ctl.prwatch.pr_size = 0; 1887 ctl.prwatch.pr_wflags = 0; 1888 result = write(arc_procfd, &ctl, sizeof (ctl)); 1889 ASSERT3U(result, ==, sizeof (ctl)); 1890 } 1891 #endif 1892 } 1893 1894 /* ARGSUSED */ 1895 static void 1896 arc_buf_watch(arc_buf_t *buf) 1897 { 1898 #ifndef _KERNEL 1899 if (arc_watch) { 1900 int result; 1901 procctl_t ctl; 1902 ctl.cmd = PCWATCH; 1903 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1904 ctl.prwatch.pr_size = HDR_GET_LSIZE(buf->b_hdr); 1905 ctl.prwatch.pr_wflags = WA_WRITE; 1906 result = write(arc_procfd, &ctl, sizeof (ctl)); 1907 ASSERT3U(result, ==, sizeof (ctl)); 1908 } 1909 #endif 1910 } 1911 #endif /* illumos */ 1912 1913 static arc_buf_contents_t 1914 arc_buf_type(arc_buf_hdr_t *hdr) 1915 { 1916 arc_buf_contents_t type; 1917 if (HDR_ISTYPE_METADATA(hdr)) { 1918 type = ARC_BUFC_METADATA; 1919 } else { 1920 type = ARC_BUFC_DATA; 1921 } 1922 VERIFY3U(hdr->b_type, ==, type); 1923 return (type); 1924 } 1925 1926 static uint32_t 1927 arc_bufc_to_flags(arc_buf_contents_t type) 1928 { 1929 switch (type) { 1930 case ARC_BUFC_DATA: 1931 /* metadata field is 0 if buffer contains normal data */ 1932 return (0); 1933 case ARC_BUFC_METADATA: 1934 return (ARC_FLAG_BUFC_METADATA); 1935 default: 1936 break; 1937 } 1938 panic("undefined ARC buffer type!"); 1939 return ((uint32_t)-1); 1940 } 1941 1942 void 1943 arc_buf_thaw(arc_buf_t *buf) 1944 { 1945 arc_buf_hdr_t *hdr = buf->b_hdr; 1946 1947 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1948 if (hdr->b_l1hdr.b_state != arc_anon) 1949 panic("modifying non-anon buffer!"); 1950 if (HDR_IO_IN_PROGRESS(hdr)) 1951 panic("modifying buffer while i/o in progress!"); 1952 arc_cksum_verify(buf); 1953 } 1954 1955 ASSERT(HDR_HAS_L1HDR(hdr)); 1956 arc_cksum_free(hdr); 1957 1958 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1959 #ifdef ZFS_DEBUG 1960 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1961 if (hdr->b_l1hdr.b_thawed != NULL) 1962 kmem_free(hdr->b_l1hdr.b_thawed, 1); 1963 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP); 1964 } 1965 #endif 1966 1967 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1968 1969 #ifdef illumos 1970 arc_buf_unwatch(buf); 1971 #endif 1972 } 1973 1974 void 1975 arc_buf_freeze(arc_buf_t *buf) 1976 { 1977 arc_buf_hdr_t *hdr = buf->b_hdr; 1978 kmutex_t *hash_lock; 1979 1980 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1981 return; 1982 1983 hash_lock = HDR_LOCK(hdr); 1984 mutex_enter(hash_lock); 1985 1986 ASSERT(HDR_HAS_L1HDR(hdr)); 1987 ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL || 1988 hdr->b_l1hdr.b_state == arc_anon); 1989 arc_cksum_compute(buf); 1990 mutex_exit(hash_lock); 1991 1992 } 1993 1994 /* 1995 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, 1996 * the following functions should be used to ensure that the flags are 1997 * updated in a thread-safe way. When manipulating the flags either 1998 * the hash_lock must be held or the hdr must be undiscoverable. This 1999 * ensures that we're not racing with any other threads when updating 2000 * the flags. 2001 */ 2002 static inline void 2003 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2004 { 2005 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2006 hdr->b_flags |= flags; 2007 } 2008 2009 static inline void 2010 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2011 { 2012 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2013 hdr->b_flags &= ~flags; 2014 } 2015 2016 /* 2017 * Setting the compression bits in the arc_buf_hdr_t's b_flags is 2018 * done in a special way since we have to clear and set bits 2019 * at the same time. Consumers that wish to set the compression bits 2020 * must use this function to ensure that the flags are updated in 2021 * thread-safe manner. 2022 */ 2023 static void 2024 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) 2025 { 2026 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2027 2028 /* 2029 * Holes and embedded blocks will always have a psize = 0 so 2030 * we ignore the compression of the blkptr and set the 2031 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF. 2032 * Holes and embedded blocks remain anonymous so we don't 2033 * want to uncompress them. Mark them as uncompressed. 2034 */ 2035 if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { 2036 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2037 HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); 2038 ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); 2039 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 2040 } else { 2041 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2042 HDR_SET_COMPRESS(hdr, cmp); 2043 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); 2044 ASSERT(HDR_COMPRESSION_ENABLED(hdr)); 2045 } 2046 } 2047 2048 static int 2049 arc_decompress(arc_buf_t *buf) 2050 { 2051 arc_buf_hdr_t *hdr = buf->b_hdr; 2052 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; 2053 int error; 2054 2055 if (arc_buf_is_shared(buf)) { 2056 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 2057 } else if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) { 2058 /* 2059 * The arc_buf_hdr_t is either not compressed or is 2060 * associated with an embedded block or a hole in which 2061 * case they remain anonymous. 2062 */ 2063 IMPLY(HDR_COMPRESSION_ENABLED(hdr), HDR_GET_PSIZE(hdr) == 0 || 2064 HDR_GET_PSIZE(hdr) == HDR_GET_LSIZE(hdr)); 2065 ASSERT(!HDR_SHARED_DATA(hdr)); 2066 bcopy(hdr->b_l1hdr.b_pdata, buf->b_data, HDR_GET_LSIZE(hdr)); 2067 } else { 2068 ASSERT(!HDR_SHARED_DATA(hdr)); 2069 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr)); 2070 error = zio_decompress_data(HDR_GET_COMPRESS(hdr), 2071 hdr->b_l1hdr.b_pdata, buf->b_data, HDR_GET_PSIZE(hdr), 2072 HDR_GET_LSIZE(hdr)); 2073 if (error != 0) { 2074 zfs_dbgmsg("hdr %p, compress %d, psize %d, lsize %d", 2075 hdr, HDR_GET_COMPRESS(hdr), HDR_GET_PSIZE(hdr), 2076 HDR_GET_LSIZE(hdr)); 2077 return (SET_ERROR(EIO)); 2078 } 2079 } 2080 if (bswap != DMU_BSWAP_NUMFUNCS) { 2081 ASSERT(!HDR_SHARED_DATA(hdr)); 2082 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); 2083 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); 2084 } 2085 arc_cksum_compute(buf); 2086 return (0); 2087 } 2088 2089 /* 2090 * Return the size of the block, b_pdata, that is stored in the arc_buf_hdr_t. 2091 */ 2092 static uint64_t 2093 arc_hdr_size(arc_buf_hdr_t *hdr) 2094 { 2095 uint64_t size; 2096 2097 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 2098 HDR_GET_PSIZE(hdr) > 0) { 2099 size = HDR_GET_PSIZE(hdr); 2100 } else { 2101 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); 2102 size = HDR_GET_LSIZE(hdr); 2103 } 2104 return (size); 2105 } 2106 2107 /* 2108 * Increment the amount of evictable space in the arc_state_t's refcount. 2109 * We account for the space used by the hdr and the arc buf individually 2110 * so that we can add and remove them from the refcount individually. 2111 */ 2112 static void 2113 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) 2114 { 2115 arc_buf_contents_t type = arc_buf_type(hdr); 2116 uint64_t lsize = HDR_GET_LSIZE(hdr); 2117 2118 ASSERT(HDR_HAS_L1HDR(hdr)); 2119 2120 if (GHOST_STATE(state)) { 2121 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2122 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2123 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2124 (void) refcount_add_many(&state->arcs_esize[type], lsize, hdr); 2125 return; 2126 } 2127 2128 ASSERT(!GHOST_STATE(state)); 2129 if (hdr->b_l1hdr.b_pdata != NULL) { 2130 (void) refcount_add_many(&state->arcs_esize[type], 2131 arc_hdr_size(hdr), hdr); 2132 } 2133 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2134 buf = buf->b_next) { 2135 if (arc_buf_is_shared(buf)) { 2136 ASSERT(ARC_BUF_LAST(buf)); 2137 continue; 2138 } 2139 (void) refcount_add_many(&state->arcs_esize[type], lsize, buf); 2140 } 2141 } 2142 2143 /* 2144 * Decrement the amount of evictable space in the arc_state_t's refcount. 2145 * We account for the space used by the hdr and the arc buf individually 2146 * so that we can add and remove them from the refcount individually. 2147 */ 2148 static void 2149 arc_evitable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) 2150 { 2151 arc_buf_contents_t type = arc_buf_type(hdr); 2152 uint64_t lsize = HDR_GET_LSIZE(hdr); 2153 2154 ASSERT(HDR_HAS_L1HDR(hdr)); 2155 2156 if (GHOST_STATE(state)) { 2157 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2158 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2159 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2160 (void) refcount_remove_many(&state->arcs_esize[type], 2161 lsize, hdr); 2162 return; 2163 } 2164 2165 ASSERT(!GHOST_STATE(state)); 2166 if (hdr->b_l1hdr.b_pdata != NULL) { 2167 (void) refcount_remove_many(&state->arcs_esize[type], 2168 arc_hdr_size(hdr), hdr); 2169 } 2170 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2171 buf = buf->b_next) { 2172 if (arc_buf_is_shared(buf)) { 2173 ASSERT(ARC_BUF_LAST(buf)); 2174 continue; 2175 } 2176 (void) refcount_remove_many(&state->arcs_esize[type], 2177 lsize, buf); 2178 } 2179 } 2180 2181 /* 2182 * Add a reference to this hdr indicating that someone is actively 2183 * referencing that memory. When the refcount transitions from 0 to 1, 2184 * we remove it from the respective arc_state_t list to indicate that 2185 * it is not evictable. 2186 */ 2187 static void 2188 add_reference(arc_buf_hdr_t *hdr, void *tag) 2189 { 2190 ASSERT(HDR_HAS_L1HDR(hdr)); 2191 if (!MUTEX_HELD(HDR_LOCK(hdr))) { 2192 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 2193 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2194 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2195 } 2196 2197 arc_state_t *state = hdr->b_l1hdr.b_state; 2198 2199 if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && 2200 (state != arc_anon)) { 2201 /* We don't use the L2-only state list. */ 2202 if (state != arc_l2c_only) { 2203 multilist_remove(&state->arcs_list[arc_buf_type(hdr)], 2204 hdr); 2205 arc_evitable_space_decrement(hdr, state); 2206 } 2207 /* remove the prefetch flag if we get a reference */ 2208 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 2209 } 2210 } 2211 2212 /* 2213 * Remove a reference from this hdr. When the reference transitions from 2214 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's 2215 * list making it eligible for eviction. 2216 */ 2217 static int 2218 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 2219 { 2220 int cnt; 2221 arc_state_t *state = hdr->b_l1hdr.b_state; 2222 2223 ASSERT(HDR_HAS_L1HDR(hdr)); 2224 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 2225 ASSERT(!GHOST_STATE(state)); 2226 2227 /* 2228 * arc_l2c_only counts as a ghost state so we don't need to explicitly 2229 * check to prevent usage of the arc_l2c_only list. 2230 */ 2231 if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && 2232 (state != arc_anon)) { 2233 multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr); 2234 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 2235 arc_evictable_space_increment(hdr, state); 2236 } 2237 return (cnt); 2238 } 2239 2240 /* 2241 * Move the supplied buffer to the indicated state. The hash lock 2242 * for the buffer must be held by the caller. 2243 */ 2244 static void 2245 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 2246 kmutex_t *hash_lock) 2247 { 2248 arc_state_t *old_state; 2249 int64_t refcnt; 2250 uint32_t bufcnt; 2251 boolean_t update_old, update_new; 2252 arc_buf_contents_t buftype = arc_buf_type(hdr); 2253 2254 /* 2255 * We almost always have an L1 hdr here, since we call arc_hdr_realloc() 2256 * in arc_read() when bringing a buffer out of the L2ARC. However, the 2257 * L1 hdr doesn't always exist when we change state to arc_anon before 2258 * destroying a header, in which case reallocating to add the L1 hdr is 2259 * pointless. 2260 */ 2261 if (HDR_HAS_L1HDR(hdr)) { 2262 old_state = hdr->b_l1hdr.b_state; 2263 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); 2264 bufcnt = hdr->b_l1hdr.b_bufcnt; 2265 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pdata != NULL); 2266 } else { 2267 old_state = arc_l2c_only; 2268 refcnt = 0; 2269 bufcnt = 0; 2270 update_old = B_FALSE; 2271 } 2272 update_new = update_old; 2273 2274 ASSERT(MUTEX_HELD(hash_lock)); 2275 ASSERT3P(new_state, !=, old_state); 2276 ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); 2277 ASSERT(old_state != arc_anon || bufcnt <= 1); 2278 2279 /* 2280 * If this buffer is evictable, transfer it from the 2281 * old state list to the new state list. 2282 */ 2283 if (refcnt == 0) { 2284 if (old_state != arc_anon && old_state != arc_l2c_only) { 2285 ASSERT(HDR_HAS_L1HDR(hdr)); 2286 multilist_remove(&old_state->arcs_list[buftype], hdr); 2287 2288 if (GHOST_STATE(old_state)) { 2289 ASSERT0(bufcnt); 2290 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2291 update_old = B_TRUE; 2292 } 2293 arc_evitable_space_decrement(hdr, old_state); 2294 } 2295 if (new_state != arc_anon && new_state != arc_l2c_only) { 2296 2297 /* 2298 * An L1 header always exists here, since if we're 2299 * moving to some L1-cached state (i.e. not l2c_only or 2300 * anonymous), we realloc the header to add an L1hdr 2301 * beforehand. 2302 */ 2303 ASSERT(HDR_HAS_L1HDR(hdr)); 2304 multilist_insert(&new_state->arcs_list[buftype], hdr); 2305 2306 if (GHOST_STATE(new_state)) { 2307 ASSERT0(bufcnt); 2308 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2309 update_new = B_TRUE; 2310 } 2311 arc_evictable_space_increment(hdr, new_state); 2312 } 2313 } 2314 2315 ASSERT(!HDR_EMPTY(hdr)); 2316 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 2317 buf_hash_remove(hdr); 2318 2319 /* adjust state sizes (ignore arc_l2c_only) */ 2320 2321 if (update_new && new_state != arc_l2c_only) { 2322 ASSERT(HDR_HAS_L1HDR(hdr)); 2323 if (GHOST_STATE(new_state)) { 2324 ASSERT0(bufcnt); 2325 2326 /* 2327 * When moving a header to a ghost state, we first 2328 * remove all arc buffers. Thus, we'll have a 2329 * bufcnt of zero, and no arc buffer to use for 2330 * the reference. As a result, we use the arc 2331 * header pointer for the reference. 2332 */ 2333 (void) refcount_add_many(&new_state->arcs_size, 2334 HDR_GET_LSIZE(hdr), hdr); 2335 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2336 } else { 2337 uint32_t buffers = 0; 2338 2339 /* 2340 * Each individual buffer holds a unique reference, 2341 * thus we must remove each of these references one 2342 * at a time. 2343 */ 2344 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2345 buf = buf->b_next) { 2346 ASSERT3U(bufcnt, !=, 0); 2347 buffers++; 2348 2349 /* 2350 * When the arc_buf_t is sharing the data 2351 * block with the hdr, the owner of the 2352 * reference belongs to the hdr. Only 2353 * add to the refcount if the arc_buf_t is 2354 * not shared. 2355 */ 2356 if (arc_buf_is_shared(buf)) { 2357 ASSERT(ARC_BUF_LAST(buf)); 2358 continue; 2359 } 2360 2361 (void) refcount_add_many(&new_state->arcs_size, 2362 HDR_GET_LSIZE(hdr), buf); 2363 } 2364 ASSERT3U(bufcnt, ==, buffers); 2365 2366 if (hdr->b_l1hdr.b_pdata != NULL) { 2367 (void) refcount_add_many(&new_state->arcs_size, 2368 arc_hdr_size(hdr), hdr); 2369 } else { 2370 ASSERT(GHOST_STATE(old_state)); 2371 } 2372 } 2373 } 2374 2375 if (update_old && old_state != arc_l2c_only) { 2376 ASSERT(HDR_HAS_L1HDR(hdr)); 2377 if (GHOST_STATE(old_state)) { 2378 ASSERT0(bufcnt); 2379 2380 /* 2381 * When moving a header off of a ghost state, 2382 * the header will not contain any arc buffers. 2383 * We use the arc header pointer for the reference 2384 * which is exactly what we did when we put the 2385 * header on the ghost state. 2386 */ 2387 2388 (void) refcount_remove_many(&old_state->arcs_size, 2389 HDR_GET_LSIZE(hdr), hdr); 2390 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2391 } else { 2392 uint32_t buffers = 0; 2393 2394 /* 2395 * Each individual buffer holds a unique reference, 2396 * thus we must remove each of these references one 2397 * at a time. 2398 */ 2399 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2400 buf = buf->b_next) { 2401 ASSERT3P(bufcnt, !=, 0); 2402 buffers++; 2403 2404 /* 2405 * When the arc_buf_t is sharing the data 2406 * block with the hdr, the owner of the 2407 * reference belongs to the hdr. Only 2408 * add to the refcount if the arc_buf_t is 2409 * not shared. 2410 */ 2411 if (arc_buf_is_shared(buf)) { 2412 ASSERT(ARC_BUF_LAST(buf)); 2413 continue; 2414 } 2415 2416 (void) refcount_remove_many( 2417 &old_state->arcs_size, HDR_GET_LSIZE(hdr), 2418 buf); 2419 } 2420 ASSERT3U(bufcnt, ==, buffers); 2421 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 2422 (void) refcount_remove_many( 2423 &old_state->arcs_size, arc_hdr_size(hdr), hdr); 2424 } 2425 } 2426 2427 if (HDR_HAS_L1HDR(hdr)) 2428 hdr->b_l1hdr.b_state = new_state; 2429 2430 /* 2431 * L2 headers should never be on the L2 state list since they don't 2432 * have L1 headers allocated. 2433 */ 2434 ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && 2435 multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); 2436 } 2437 2438 void 2439 arc_space_consume(uint64_t space, arc_space_type_t type) 2440 { 2441 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2442 2443 switch (type) { 2444 case ARC_SPACE_DATA: 2445 ARCSTAT_INCR(arcstat_data_size, space); 2446 break; 2447 case ARC_SPACE_META: 2448 ARCSTAT_INCR(arcstat_metadata_size, space); 2449 break; 2450 case ARC_SPACE_OTHER: 2451 ARCSTAT_INCR(arcstat_other_size, space); 2452 break; 2453 case ARC_SPACE_HDRS: 2454 ARCSTAT_INCR(arcstat_hdr_size, space); 2455 break; 2456 case ARC_SPACE_L2HDRS: 2457 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 2458 break; 2459 } 2460 2461 if (type != ARC_SPACE_DATA) 2462 ARCSTAT_INCR(arcstat_meta_used, space); 2463 2464 atomic_add_64(&arc_size, space); 2465 } 2466 2467 void 2468 arc_space_return(uint64_t space, arc_space_type_t type) 2469 { 2470 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2471 2472 switch (type) { 2473 case ARC_SPACE_DATA: 2474 ARCSTAT_INCR(arcstat_data_size, -space); 2475 break; 2476 case ARC_SPACE_META: 2477 ARCSTAT_INCR(arcstat_metadata_size, -space); 2478 break; 2479 case ARC_SPACE_OTHER: 2480 ARCSTAT_INCR(arcstat_other_size, -space); 2481 break; 2482 case ARC_SPACE_HDRS: 2483 ARCSTAT_INCR(arcstat_hdr_size, -space); 2484 break; 2485 case ARC_SPACE_L2HDRS: 2486 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 2487 break; 2488 } 2489 2490 if (type != ARC_SPACE_DATA) { 2491 ASSERT(arc_meta_used >= space); 2492 if (arc_meta_max < arc_meta_used) 2493 arc_meta_max = arc_meta_used; 2494 ARCSTAT_INCR(arcstat_meta_used, -space); 2495 } 2496 2497 ASSERT(arc_size >= space); 2498 atomic_add_64(&arc_size, -space); 2499 } 2500 2501 /* 2502 * Allocate an initial buffer for this hdr, subsequent buffers will 2503 * use arc_buf_clone(). 2504 */ 2505 static arc_buf_t * 2506 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag) 2507 { 2508 arc_buf_t *buf; 2509 2510 ASSERT(HDR_HAS_L1HDR(hdr)); 2511 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 2512 VERIFY(hdr->b_type == ARC_BUFC_DATA || 2513 hdr->b_type == ARC_BUFC_METADATA); 2514 2515 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2516 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2517 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2518 2519 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2520 buf->b_hdr = hdr; 2521 buf->b_data = NULL; 2522 buf->b_next = NULL; 2523 2524 add_reference(hdr, tag); 2525 2526 /* 2527 * We're about to change the hdr's b_flags. We must either 2528 * hold the hash_lock or be undiscoverable. 2529 */ 2530 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2531 2532 /* 2533 * If the hdr's data can be shared (no byteswapping, hdr is 2534 * uncompressed, hdr's data is not currently being written to the 2535 * L2ARC write) then we share the data buffer and set the appropriate 2536 * bit in the hdr's b_flags to indicate the hdr is sharing it's 2537 * b_pdata with the arc_buf_t. Otherwise, we allocate a new buffer to 2538 * store the buf's data. 2539 */ 2540 if (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && 2541 HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF && !HDR_L2_WRITING(hdr)) { 2542 buf->b_data = hdr->b_l1hdr.b_pdata; 2543 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 2544 } else { 2545 buf->b_data = arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2546 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 2547 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2548 } 2549 VERIFY3P(buf->b_data, !=, NULL); 2550 2551 hdr->b_l1hdr.b_buf = buf; 2552 hdr->b_l1hdr.b_bufcnt += 1; 2553 2554 return (buf); 2555 } 2556 2557 /* 2558 * Used when allocating additional buffers. 2559 */ 2560 static arc_buf_t * 2561 arc_buf_clone(arc_buf_t *from) 2562 { 2563 arc_buf_t *buf; 2564 arc_buf_hdr_t *hdr = from->b_hdr; 2565 uint64_t size = HDR_GET_LSIZE(hdr); 2566 2567 ASSERT(HDR_HAS_L1HDR(hdr)); 2568 ASSERT(hdr->b_l1hdr.b_state != arc_anon); 2569 2570 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2571 buf->b_hdr = hdr; 2572 buf->b_data = NULL; 2573 buf->b_next = hdr->b_l1hdr.b_buf; 2574 hdr->b_l1hdr.b_buf = buf; 2575 buf->b_data = arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2576 bcopy(from->b_data, buf->b_data, size); 2577 hdr->b_l1hdr.b_bufcnt += 1; 2578 2579 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 2580 return (buf); 2581 } 2582 2583 static char *arc_onloan_tag = "onloan"; 2584 2585 /* 2586 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 2587 * flight data by arc_tempreserve_space() until they are "returned". Loaned 2588 * buffers must be returned to the arc before they can be used by the DMU or 2589 * freed. 2590 */ 2591 arc_buf_t * 2592 arc_loan_buf(spa_t *spa, int size) 2593 { 2594 arc_buf_t *buf; 2595 2596 buf = arc_alloc_buf(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 2597 2598 atomic_add_64(&arc_loaned_bytes, size); 2599 return (buf); 2600 } 2601 2602 /* 2603 * Return a loaned arc buffer to the arc. 2604 */ 2605 void 2606 arc_return_buf(arc_buf_t *buf, void *tag) 2607 { 2608 arc_buf_hdr_t *hdr = buf->b_hdr; 2609 2610 ASSERT3P(buf->b_data, !=, NULL); 2611 ASSERT(HDR_HAS_L1HDR(hdr)); 2612 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 2613 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2614 2615 atomic_add_64(&arc_loaned_bytes, -HDR_GET_LSIZE(hdr)); 2616 } 2617 2618 /* Detach an arc_buf from a dbuf (tag) */ 2619 void 2620 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 2621 { 2622 arc_buf_hdr_t *hdr = buf->b_hdr; 2623 2624 ASSERT3P(buf->b_data, !=, NULL); 2625 ASSERT(HDR_HAS_L1HDR(hdr)); 2626 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2627 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); 2628 2629 atomic_add_64(&arc_loaned_bytes, HDR_GET_LSIZE(hdr)); 2630 } 2631 2632 static void 2633 l2arc_free_data_on_write(void *data, size_t size, arc_buf_contents_t type) 2634 { 2635 l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); 2636 2637 df->l2df_data = data; 2638 df->l2df_size = size; 2639 df->l2df_type = type; 2640 mutex_enter(&l2arc_free_on_write_mtx); 2641 list_insert_head(l2arc_free_on_write, df); 2642 mutex_exit(&l2arc_free_on_write_mtx); 2643 } 2644 2645 static void 2646 arc_hdr_free_on_write(arc_buf_hdr_t *hdr) 2647 { 2648 arc_state_t *state = hdr->b_l1hdr.b_state; 2649 arc_buf_contents_t type = arc_buf_type(hdr); 2650 uint64_t size = arc_hdr_size(hdr); 2651 2652 /* protected by hash lock, if in the hash table */ 2653 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 2654 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2655 ASSERT(state != arc_anon && state != arc_l2c_only); 2656 2657 (void) refcount_remove_many(&state->arcs_esize[type], 2658 size, hdr); 2659 } 2660 (void) refcount_remove_many(&state->arcs_size, size, hdr); 2661 if (type == ARC_BUFC_METADATA) { 2662 arc_space_return(size, ARC_SPACE_META); 2663 } else { 2664 ASSERT(type == ARC_BUFC_DATA); 2665 arc_space_return(size, ARC_SPACE_DATA); 2666 } 2667 2668 l2arc_free_data_on_write(hdr->b_l1hdr.b_pdata, size, type); 2669 } 2670 2671 /* 2672 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the 2673 * data buffer, we transfer the refcount ownership to the hdr and update 2674 * the appropriate kstats. 2675 */ 2676 static void 2677 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2678 { 2679 arc_state_t *state = hdr->b_l1hdr.b_state; 2680 2681 ASSERT(!HDR_SHARED_DATA(hdr)); 2682 ASSERT(!arc_buf_is_shared(buf)); 2683 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2684 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2685 2686 /* 2687 * Start sharing the data buffer. We transfer the 2688 * refcount ownership to the hdr since it always owns 2689 * the refcount whenever an arc_buf_t is shared. 2690 */ 2691 refcount_transfer_ownership(&state->arcs_size, buf, hdr); 2692 hdr->b_l1hdr.b_pdata = buf->b_data; 2693 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 2694 2695 /* 2696 * Since we've transferred ownership to the hdr we need 2697 * to increment its compressed and uncompressed kstats and 2698 * decrement the overhead size. 2699 */ 2700 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 2701 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 2702 ARCSTAT_INCR(arcstat_overhead_size, -HDR_GET_LSIZE(hdr)); 2703 } 2704 2705 static void 2706 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2707 { 2708 arc_state_t *state = hdr->b_l1hdr.b_state; 2709 2710 ASSERT(HDR_SHARED_DATA(hdr)); 2711 ASSERT(arc_buf_is_shared(buf)); 2712 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 2713 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2714 2715 /* 2716 * We are no longer sharing this buffer so we need 2717 * to transfer its ownership to the rightful owner. 2718 */ 2719 refcount_transfer_ownership(&state->arcs_size, hdr, buf); 2720 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2721 hdr->b_l1hdr.b_pdata = NULL; 2722 2723 /* 2724 * Since the buffer is no longer shared between 2725 * the arc buf and the hdr, count it as overhead. 2726 */ 2727 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 2728 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 2729 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 2730 } 2731 2732 /* 2733 * Free up buf->b_data and if 'remove' is set, then pull the 2734 * arc_buf_t off of the the arc_buf_hdr_t's list and free it. 2735 */ 2736 static void 2737 arc_buf_destroy_impl(arc_buf_t *buf, boolean_t remove) 2738 { 2739 arc_buf_t **bufp; 2740 arc_buf_hdr_t *hdr = buf->b_hdr; 2741 uint64_t size = HDR_GET_LSIZE(hdr); 2742 boolean_t destroyed_buf_is_shared = arc_buf_is_shared(buf); 2743 2744 /* 2745 * Free up the data associated with the buf but only 2746 * if we're not sharing this with the hdr. If we are sharing 2747 * it with the hdr, then hdr will have performed the allocation 2748 * so allow it to do the free. 2749 */ 2750 if (buf->b_data != NULL) { 2751 /* 2752 * We're about to change the hdr's b_flags. We must either 2753 * hold the hash_lock or be undiscoverable. 2754 */ 2755 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2756 2757 arc_cksum_verify(buf); 2758 #ifdef illumos 2759 arc_buf_unwatch(buf); 2760 #endif 2761 2762 if (destroyed_buf_is_shared) { 2763 ASSERT(ARC_BUF_LAST(buf)); 2764 ASSERT(HDR_SHARED_DATA(hdr)); 2765 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2766 } else { 2767 arc_free_data_buf(hdr, buf->b_data, size, buf); 2768 ARCSTAT_INCR(arcstat_overhead_size, -size); 2769 } 2770 buf->b_data = NULL; 2771 2772 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 2773 hdr->b_l1hdr.b_bufcnt -= 1; 2774 } 2775 2776 /* only remove the buf if requested */ 2777 if (!remove) 2778 return; 2779 2780 /* remove the buf from the hdr list */ 2781 arc_buf_t *lastbuf = NULL; 2782 bufp = &hdr->b_l1hdr.b_buf; 2783 while (*bufp != NULL) { 2784 if (*bufp == buf) 2785 *bufp = buf->b_next; 2786 2787 /* 2788 * If we've removed a buffer in the middle of 2789 * the list then update the lastbuf and update 2790 * bufp. 2791 */ 2792 if (*bufp != NULL) { 2793 lastbuf = *bufp; 2794 bufp = &(*bufp)->b_next; 2795 } 2796 } 2797 buf->b_next = NULL; 2798 ASSERT3P(lastbuf, !=, buf); 2799 2800 /* 2801 * If the current arc_buf_t is sharing its data 2802 * buffer with the hdr, then reassign the hdr's 2803 * b_pdata to share it with the new buffer at the end 2804 * of the list. The shared buffer is always the last one 2805 * on the hdr's buffer list. 2806 */ 2807 if (destroyed_buf_is_shared && lastbuf != NULL) { 2808 ASSERT(ARC_BUF_LAST(buf)); 2809 ASSERT(ARC_BUF_LAST(lastbuf)); 2810 VERIFY(!arc_buf_is_shared(lastbuf)); 2811 2812 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 2813 arc_hdr_free_pdata(hdr); 2814 2815 /* 2816 * We must setup a new shared block between the 2817 * last buffer and the hdr. The data would have 2818 * been allocated by the arc buf so we need to transfer 2819 * ownership to the hdr since it's now being shared. 2820 */ 2821 arc_share_buf(hdr, lastbuf); 2822 } else if (HDR_SHARED_DATA(hdr)) { 2823 ASSERT(arc_buf_is_shared(lastbuf)); 2824 } 2825 2826 if (hdr->b_l1hdr.b_bufcnt == 0) 2827 arc_cksum_free(hdr); 2828 2829 /* clean up the buf */ 2830 buf->b_hdr = NULL; 2831 kmem_cache_free(buf_cache, buf); 2832 } 2833 2834 static void 2835 arc_hdr_alloc_pdata(arc_buf_hdr_t *hdr) 2836 { 2837 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 2838 ASSERT(HDR_HAS_L1HDR(hdr)); 2839 ASSERT(!HDR_SHARED_DATA(hdr)); 2840 2841 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2842 hdr->b_l1hdr.b_pdata = arc_get_data_buf(hdr, arc_hdr_size(hdr), hdr); 2843 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 2844 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 2845 2846 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 2847 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 2848 } 2849 2850 static void 2851 arc_hdr_free_pdata(arc_buf_hdr_t *hdr) 2852 { 2853 ASSERT(HDR_HAS_L1HDR(hdr)); 2854 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 2855 2856 /* 2857 * If the hdr is currently being written to the l2arc then 2858 * we defer freeing the data by adding it to the l2arc_free_on_write 2859 * list. The l2arc will free the data once it's finished 2860 * writing it to the l2arc device. 2861 */ 2862 if (HDR_L2_WRITING(hdr)) { 2863 arc_hdr_free_on_write(hdr); 2864 ARCSTAT_BUMP(arcstat_l2_free_on_write); 2865 } else { 2866 arc_free_data_buf(hdr, hdr->b_l1hdr.b_pdata, 2867 arc_hdr_size(hdr), hdr); 2868 } 2869 hdr->b_l1hdr.b_pdata = NULL; 2870 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 2871 2872 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 2873 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 2874 } 2875 2876 static arc_buf_hdr_t * 2877 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, 2878 enum zio_compress compress, arc_buf_contents_t type) 2879 { 2880 arc_buf_hdr_t *hdr; 2881 2882 ASSERT3U(lsize, >, 0); 2883 VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); 2884 2885 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 2886 ASSERT(HDR_EMPTY(hdr)); 2887 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 2888 ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL); 2889 HDR_SET_PSIZE(hdr, psize); 2890 HDR_SET_LSIZE(hdr, lsize); 2891 hdr->b_spa = spa; 2892 hdr->b_type = type; 2893 hdr->b_flags = 0; 2894 arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); 2895 arc_hdr_set_compress(hdr, compress); 2896 2897 hdr->b_l1hdr.b_state = arc_anon; 2898 hdr->b_l1hdr.b_arc_access = 0; 2899 hdr->b_l1hdr.b_bufcnt = 0; 2900 hdr->b_l1hdr.b_buf = NULL; 2901 2902 /* 2903 * Allocate the hdr's buffer. This will contain either 2904 * the compressed or uncompressed data depending on the block 2905 * it references and compressed arc enablement. 2906 */ 2907 arc_hdr_alloc_pdata(hdr); 2908 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2909 2910 return (hdr); 2911 } 2912 2913 /* 2914 * Transition between the two allocation states for the arc_buf_hdr struct. 2915 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without 2916 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller 2917 * version is used when a cache buffer is only in the L2ARC in order to reduce 2918 * memory usage. 2919 */ 2920 static arc_buf_hdr_t * 2921 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) 2922 { 2923 ASSERT(HDR_HAS_L2HDR(hdr)); 2924 2925 arc_buf_hdr_t *nhdr; 2926 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 2927 2928 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || 2929 (old == hdr_l2only_cache && new == hdr_full_cache)); 2930 2931 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); 2932 2933 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 2934 buf_hash_remove(hdr); 2935 2936 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); 2937 2938 if (new == hdr_full_cache) { 2939 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); 2940 /* 2941 * arc_access and arc_change_state need to be aware that a 2942 * header has just come out of L2ARC, so we set its state to 2943 * l2c_only even though it's about to change. 2944 */ 2945 nhdr->b_l1hdr.b_state = arc_l2c_only; 2946 2947 /* Verify previous threads set to NULL before freeing */ 2948 ASSERT3P(nhdr->b_l1hdr.b_pdata, ==, NULL); 2949 } else { 2950 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2951 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2952 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 2953 2954 /* 2955 * If we've reached here, We must have been called from 2956 * arc_evict_hdr(), as such we should have already been 2957 * removed from any ghost list we were previously on 2958 * (which protects us from racing with arc_evict_state), 2959 * thus no locking is needed during this check. 2960 */ 2961 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 2962 2963 /* 2964 * A buffer must not be moved into the arc_l2c_only 2965 * state if it's not finished being written out to the 2966 * l2arc device. Otherwise, the b_l1hdr.b_pdata field 2967 * might try to be accessed, even though it was removed. 2968 */ 2969 VERIFY(!HDR_L2_WRITING(hdr)); 2970 VERIFY3P(hdr->b_l1hdr.b_pdata, ==, NULL); 2971 2972 #ifdef ZFS_DEBUG 2973 if (hdr->b_l1hdr.b_thawed != NULL) { 2974 kmem_free(hdr->b_l1hdr.b_thawed, 1); 2975 hdr->b_l1hdr.b_thawed = NULL; 2976 } 2977 #endif 2978 2979 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); 2980 } 2981 /* 2982 * The header has been reallocated so we need to re-insert it into any 2983 * lists it was on. 2984 */ 2985 (void) buf_hash_insert(nhdr, NULL); 2986 2987 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); 2988 2989 mutex_enter(&dev->l2ad_mtx); 2990 2991 /* 2992 * We must place the realloc'ed header back into the list at 2993 * the same spot. Otherwise, if it's placed earlier in the list, 2994 * l2arc_write_buffers() could find it during the function's 2995 * write phase, and try to write it out to the l2arc. 2996 */ 2997 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); 2998 list_remove(&dev->l2ad_buflist, hdr); 2999 3000 mutex_exit(&dev->l2ad_mtx); 3001 3002 /* 3003 * Since we're using the pointer address as the tag when 3004 * incrementing and decrementing the l2ad_alloc refcount, we 3005 * must remove the old pointer (that we're about to destroy) and 3006 * add the new pointer to the refcount. Otherwise we'd remove 3007 * the wrong pointer address when calling arc_hdr_destroy() later. 3008 */ 3009 3010 (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); 3011 (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr); 3012 3013 buf_discard_identity(hdr); 3014 kmem_cache_free(old, hdr); 3015 3016 return (nhdr); 3017 } 3018 3019 /* 3020 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. 3021 * The buf is returned thawed since we expect the consumer to modify it. 3022 */ 3023 arc_buf_t * 3024 arc_alloc_buf(spa_t *spa, int32_t size, void *tag, arc_buf_contents_t type) 3025 { 3026 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, 3027 ZIO_COMPRESS_OFF, type); 3028 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3029 arc_buf_t *buf = arc_buf_alloc_impl(hdr, tag); 3030 arc_buf_thaw(buf); 3031 return (buf); 3032 } 3033 3034 static void 3035 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) 3036 { 3037 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 3038 l2arc_dev_t *dev = l2hdr->b_dev; 3039 uint64_t asize = arc_hdr_size(hdr); 3040 3041 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 3042 ASSERT(HDR_HAS_L2HDR(hdr)); 3043 3044 list_remove(&dev->l2ad_buflist, hdr); 3045 3046 ARCSTAT_INCR(arcstat_l2_asize, -asize); 3047 ARCSTAT_INCR(arcstat_l2_size, -HDR_GET_LSIZE(hdr)); 3048 3049 vdev_space_update(dev->l2ad_vdev, -asize, 0, 0); 3050 3051 (void) refcount_remove_many(&dev->l2ad_alloc, asize, hdr); 3052 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 3053 } 3054 3055 static void 3056 arc_hdr_destroy(arc_buf_hdr_t *hdr) 3057 { 3058 if (HDR_HAS_L1HDR(hdr)) { 3059 ASSERT(hdr->b_l1hdr.b_buf == NULL || 3060 hdr->b_l1hdr.b_bufcnt > 0); 3061 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3062 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3063 } 3064 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3065 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 3066 3067 if (!HDR_EMPTY(hdr)) 3068 buf_discard_identity(hdr); 3069 3070 if (HDR_HAS_L2HDR(hdr)) { 3071 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3072 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); 3073 3074 if (!buflist_held) 3075 mutex_enter(&dev->l2ad_mtx); 3076 3077 /* 3078 * Even though we checked this conditional above, we 3079 * need to check this again now that we have the 3080 * l2ad_mtx. This is because we could be racing with 3081 * another thread calling l2arc_evict() which might have 3082 * destroyed this header's L2 portion as we were waiting 3083 * to acquire the l2ad_mtx. If that happens, we don't 3084 * want to re-destroy the header's L2 portion. 3085 */ 3086 if (HDR_HAS_L2HDR(hdr)) { 3087 l2arc_trim(hdr); 3088 arc_hdr_l2hdr_destroy(hdr); 3089 } 3090 3091 if (!buflist_held) 3092 mutex_exit(&dev->l2ad_mtx); 3093 } 3094 3095 if (HDR_HAS_L1HDR(hdr)) { 3096 arc_cksum_free(hdr); 3097 3098 while (hdr->b_l1hdr.b_buf != NULL) 3099 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf, B_TRUE); 3100 3101 #ifdef ZFS_DEBUG 3102 if (hdr->b_l1hdr.b_thawed != NULL) { 3103 kmem_free(hdr->b_l1hdr.b_thawed, 1); 3104 hdr->b_l1hdr.b_thawed = NULL; 3105 } 3106 #endif 3107 3108 if (hdr->b_l1hdr.b_pdata != NULL) { 3109 arc_hdr_free_pdata(hdr); 3110 } 3111 } 3112 3113 ASSERT3P(hdr->b_hash_next, ==, NULL); 3114 if (HDR_HAS_L1HDR(hdr)) { 3115 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3116 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 3117 kmem_cache_free(hdr_full_cache, hdr); 3118 } else { 3119 kmem_cache_free(hdr_l2only_cache, hdr); 3120 } 3121 } 3122 3123 void 3124 arc_buf_destroy(arc_buf_t *buf, void* tag) 3125 { 3126 arc_buf_hdr_t *hdr = buf->b_hdr; 3127 kmutex_t *hash_lock = HDR_LOCK(hdr); 3128 3129 if (hdr->b_l1hdr.b_state == arc_anon) { 3130 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 3131 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3132 VERIFY0(remove_reference(hdr, NULL, tag)); 3133 arc_hdr_destroy(hdr); 3134 return; 3135 } 3136 3137 mutex_enter(hash_lock); 3138 ASSERT3P(hdr, ==, buf->b_hdr); 3139 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 3140 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3141 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); 3142 ASSERT3P(buf->b_data, !=, NULL); 3143 3144 (void) remove_reference(hdr, hash_lock, tag); 3145 arc_buf_destroy_impl(buf, B_TRUE); 3146 mutex_exit(hash_lock); 3147 } 3148 3149 int32_t 3150 arc_buf_size(arc_buf_t *buf) 3151 { 3152 return (HDR_GET_LSIZE(buf->b_hdr)); 3153 } 3154 3155 /* 3156 * Evict the arc_buf_hdr that is provided as a parameter. The resultant 3157 * state of the header is dependent on its state prior to entering this 3158 * function. The following transitions are possible: 3159 * 3160 * - arc_mru -> arc_mru_ghost 3161 * - arc_mfu -> arc_mfu_ghost 3162 * - arc_mru_ghost -> arc_l2c_only 3163 * - arc_mru_ghost -> deleted 3164 * - arc_mfu_ghost -> arc_l2c_only 3165 * - arc_mfu_ghost -> deleted 3166 */ 3167 static int64_t 3168 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 3169 { 3170 arc_state_t *evicted_state, *state; 3171 int64_t bytes_evicted = 0; 3172 3173 ASSERT(MUTEX_HELD(hash_lock)); 3174 ASSERT(HDR_HAS_L1HDR(hdr)); 3175 3176 state = hdr->b_l1hdr.b_state; 3177 if (GHOST_STATE(state)) { 3178 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3179 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 3180 3181 /* 3182 * l2arc_write_buffers() relies on a header's L1 portion 3183 * (i.e. its b_pdata field) during its write phase. 3184 * Thus, we cannot push a header onto the arc_l2c_only 3185 * state (removing it's L1 piece) until the header is 3186 * done being written to the l2arc. 3187 */ 3188 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { 3189 ARCSTAT_BUMP(arcstat_evict_l2_skip); 3190 return (bytes_evicted); 3191 } 3192 3193 ARCSTAT_BUMP(arcstat_deleted); 3194 bytes_evicted += HDR_GET_LSIZE(hdr); 3195 3196 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 3197 3198 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 3199 if (HDR_HAS_L2HDR(hdr)) { 3200 ASSERT(hdr->b_l1hdr.b_pdata == NULL); 3201 /* 3202 * This buffer is cached on the 2nd Level ARC; 3203 * don't destroy the header. 3204 */ 3205 arc_change_state(arc_l2c_only, hdr, hash_lock); 3206 /* 3207 * dropping from L1+L2 cached to L2-only, 3208 * realloc to remove the L1 header. 3209 */ 3210 hdr = arc_hdr_realloc(hdr, hdr_full_cache, 3211 hdr_l2only_cache); 3212 } else { 3213 ASSERT(hdr->b_l1hdr.b_pdata == NULL); 3214 arc_change_state(arc_anon, hdr, hash_lock); 3215 arc_hdr_destroy(hdr); 3216 } 3217 return (bytes_evicted); 3218 } 3219 3220 ASSERT(state == arc_mru || state == arc_mfu); 3221 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3222 3223 /* prefetch buffers have a minimum lifespan */ 3224 if (HDR_IO_IN_PROGRESS(hdr) || 3225 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && 3226 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < 3227 arc_min_prefetch_lifespan)) { 3228 ARCSTAT_BUMP(arcstat_evict_skip); 3229 return (bytes_evicted); 3230 } 3231 3232 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 3233 while (hdr->b_l1hdr.b_buf) { 3234 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 3235 if (!mutex_tryenter(&buf->b_evict_lock)) { 3236 ARCSTAT_BUMP(arcstat_mutex_miss); 3237 break; 3238 } 3239 if (buf->b_data != NULL) 3240 bytes_evicted += HDR_GET_LSIZE(hdr); 3241 mutex_exit(&buf->b_evict_lock); 3242 arc_buf_destroy_impl(buf, B_TRUE); 3243 } 3244 3245 if (HDR_HAS_L2HDR(hdr)) { 3246 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); 3247 } else { 3248 if (l2arc_write_eligible(hdr->b_spa, hdr)) { 3249 ARCSTAT_INCR(arcstat_evict_l2_eligible, 3250 HDR_GET_LSIZE(hdr)); 3251 } else { 3252 ARCSTAT_INCR(arcstat_evict_l2_ineligible, 3253 HDR_GET_LSIZE(hdr)); 3254 } 3255 } 3256 3257 if (hdr->b_l1hdr.b_bufcnt == 0) { 3258 arc_cksum_free(hdr); 3259 3260 bytes_evicted += arc_hdr_size(hdr); 3261 3262 /* 3263 * If this hdr is being evicted and has a compressed 3264 * buffer then we discard it here before we change states. 3265 * This ensures that the accounting is updated correctly 3266 * in arc_free_data_buf(). 3267 */ 3268 arc_hdr_free_pdata(hdr); 3269 3270 arc_change_state(evicted_state, hdr, hash_lock); 3271 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3272 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 3273 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 3274 } 3275 3276 return (bytes_evicted); 3277 } 3278 3279 static uint64_t 3280 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, 3281 uint64_t spa, int64_t bytes) 3282 { 3283 multilist_sublist_t *mls; 3284 uint64_t bytes_evicted = 0; 3285 arc_buf_hdr_t *hdr; 3286 kmutex_t *hash_lock; 3287 int evict_count = 0; 3288 3289 ASSERT3P(marker, !=, NULL); 3290 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3291 3292 mls = multilist_sublist_lock(ml, idx); 3293 3294 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; 3295 hdr = multilist_sublist_prev(mls, marker)) { 3296 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || 3297 (evict_count >= zfs_arc_evict_batch_limit)) 3298 break; 3299 3300 /* 3301 * To keep our iteration location, move the marker 3302 * forward. Since we're not holding hdr's hash lock, we 3303 * must be very careful and not remove 'hdr' from the 3304 * sublist. Otherwise, other consumers might mistake the 3305 * 'hdr' as not being on a sublist when they call the 3306 * multilist_link_active() function (they all rely on 3307 * the hash lock protecting concurrent insertions and 3308 * removals). multilist_sublist_move_forward() was 3309 * specifically implemented to ensure this is the case 3310 * (only 'marker' will be removed and re-inserted). 3311 */ 3312 multilist_sublist_move_forward(mls, marker); 3313 3314 /* 3315 * The only case where the b_spa field should ever be 3316 * zero, is the marker headers inserted by 3317 * arc_evict_state(). It's possible for multiple threads 3318 * to be calling arc_evict_state() concurrently (e.g. 3319 * dsl_pool_close() and zio_inject_fault()), so we must 3320 * skip any markers we see from these other threads. 3321 */ 3322 if (hdr->b_spa == 0) 3323 continue; 3324 3325 /* we're only interested in evicting buffers of a certain spa */ 3326 if (spa != 0 && hdr->b_spa != spa) { 3327 ARCSTAT_BUMP(arcstat_evict_skip); 3328 continue; 3329 } 3330 3331 hash_lock = HDR_LOCK(hdr); 3332 3333 /* 3334 * We aren't calling this function from any code path 3335 * that would already be holding a hash lock, so we're 3336 * asserting on this assumption to be defensive in case 3337 * this ever changes. Without this check, it would be 3338 * possible to incorrectly increment arcstat_mutex_miss 3339 * below (e.g. if the code changed such that we called 3340 * this function with a hash lock held). 3341 */ 3342 ASSERT(!MUTEX_HELD(hash_lock)); 3343 3344 if (mutex_tryenter(hash_lock)) { 3345 uint64_t evicted = arc_evict_hdr(hdr, hash_lock); 3346 mutex_exit(hash_lock); 3347 3348 bytes_evicted += evicted; 3349 3350 /* 3351 * If evicted is zero, arc_evict_hdr() must have 3352 * decided to skip this header, don't increment 3353 * evict_count in this case. 3354 */ 3355 if (evicted != 0) 3356 evict_count++; 3357 3358 /* 3359 * If arc_size isn't overflowing, signal any 3360 * threads that might happen to be waiting. 3361 * 3362 * For each header evicted, we wake up a single 3363 * thread. If we used cv_broadcast, we could 3364 * wake up "too many" threads causing arc_size 3365 * to significantly overflow arc_c; since 3366 * arc_get_data_buf() doesn't check for overflow 3367 * when it's woken up (it doesn't because it's 3368 * possible for the ARC to be overflowing while 3369 * full of un-evictable buffers, and the 3370 * function should proceed in this case). 3371 * 3372 * If threads are left sleeping, due to not 3373 * using cv_broadcast, they will be woken up 3374 * just before arc_reclaim_thread() sleeps. 3375 */ 3376 mutex_enter(&arc_reclaim_lock); 3377 if (!arc_is_overflowing()) 3378 cv_signal(&arc_reclaim_waiters_cv); 3379 mutex_exit(&arc_reclaim_lock); 3380 } else { 3381 ARCSTAT_BUMP(arcstat_mutex_miss); 3382 } 3383 } 3384 3385 multilist_sublist_unlock(mls); 3386 3387 return (bytes_evicted); 3388 } 3389 3390 /* 3391 * Evict buffers from the given arc state, until we've removed the 3392 * specified number of bytes. Move the removed buffers to the 3393 * appropriate evict state. 3394 * 3395 * This function makes a "best effort". It skips over any buffers 3396 * it can't get a hash_lock on, and so, may not catch all candidates. 3397 * It may also return without evicting as much space as requested. 3398 * 3399 * If bytes is specified using the special value ARC_EVICT_ALL, this 3400 * will evict all available (i.e. unlocked and evictable) buffers from 3401 * the given arc state; which is used by arc_flush(). 3402 */ 3403 static uint64_t 3404 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, 3405 arc_buf_contents_t type) 3406 { 3407 uint64_t total_evicted = 0; 3408 multilist_t *ml = &state->arcs_list[type]; 3409 int num_sublists; 3410 arc_buf_hdr_t **markers; 3411 3412 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3413 3414 num_sublists = multilist_get_num_sublists(ml); 3415 3416 /* 3417 * If we've tried to evict from each sublist, made some 3418 * progress, but still have not hit the target number of bytes 3419 * to evict, we want to keep trying. The markers allow us to 3420 * pick up where we left off for each individual sublist, rather 3421 * than starting from the tail each time. 3422 */ 3423 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); 3424 for (int i = 0; i < num_sublists; i++) { 3425 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); 3426 3427 /* 3428 * A b_spa of 0 is used to indicate that this header is 3429 * a marker. This fact is used in arc_adjust_type() and 3430 * arc_evict_state_impl(). 3431 */ 3432 markers[i]->b_spa = 0; 3433 3434 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3435 multilist_sublist_insert_tail(mls, markers[i]); 3436 multilist_sublist_unlock(mls); 3437 } 3438 3439 /* 3440 * While we haven't hit our target number of bytes to evict, or 3441 * we're evicting all available buffers. 3442 */ 3443 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { 3444 /* 3445 * Start eviction using a randomly selected sublist, 3446 * this is to try and evenly balance eviction across all 3447 * sublists. Always starting at the same sublist 3448 * (e.g. index 0) would cause evictions to favor certain 3449 * sublists over others. 3450 */ 3451 int sublist_idx = multilist_get_random_index(ml); 3452 uint64_t scan_evicted = 0; 3453 3454 for (int i = 0; i < num_sublists; i++) { 3455 uint64_t bytes_remaining; 3456 uint64_t bytes_evicted; 3457 3458 if (bytes == ARC_EVICT_ALL) 3459 bytes_remaining = ARC_EVICT_ALL; 3460 else if (total_evicted < bytes) 3461 bytes_remaining = bytes - total_evicted; 3462 else 3463 break; 3464 3465 bytes_evicted = arc_evict_state_impl(ml, sublist_idx, 3466 markers[sublist_idx], spa, bytes_remaining); 3467 3468 scan_evicted += bytes_evicted; 3469 total_evicted += bytes_evicted; 3470 3471 /* we've reached the end, wrap to the beginning */ 3472 if (++sublist_idx >= num_sublists) 3473 sublist_idx = 0; 3474 } 3475 3476 /* 3477 * If we didn't evict anything during this scan, we have 3478 * no reason to believe we'll evict more during another 3479 * scan, so break the loop. 3480 */ 3481 if (scan_evicted == 0) { 3482 /* This isn't possible, let's make that obvious */ 3483 ASSERT3S(bytes, !=, 0); 3484 3485 /* 3486 * When bytes is ARC_EVICT_ALL, the only way to 3487 * break the loop is when scan_evicted is zero. 3488 * In that case, we actually have evicted enough, 3489 * so we don't want to increment the kstat. 3490 */ 3491 if (bytes != ARC_EVICT_ALL) { 3492 ASSERT3S(total_evicted, <, bytes); 3493 ARCSTAT_BUMP(arcstat_evict_not_enough); 3494 } 3495 3496 break; 3497 } 3498 } 3499 3500 for (int i = 0; i < num_sublists; i++) { 3501 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3502 multilist_sublist_remove(mls, markers[i]); 3503 multilist_sublist_unlock(mls); 3504 3505 kmem_cache_free(hdr_full_cache, markers[i]); 3506 } 3507 kmem_free(markers, sizeof (*markers) * num_sublists); 3508 3509 return (total_evicted); 3510 } 3511 3512 /* 3513 * Flush all "evictable" data of the given type from the arc state 3514 * specified. This will not evict any "active" buffers (i.e. referenced). 3515 * 3516 * When 'retry' is set to B_FALSE, the function will make a single pass 3517 * over the state and evict any buffers that it can. Since it doesn't 3518 * continually retry the eviction, it might end up leaving some buffers 3519 * in the ARC due to lock misses. 3520 * 3521 * When 'retry' is set to B_TRUE, the function will continually retry the 3522 * eviction until *all* evictable buffers have been removed from the 3523 * state. As a result, if concurrent insertions into the state are 3524 * allowed (e.g. if the ARC isn't shutting down), this function might 3525 * wind up in an infinite loop, continually trying to evict buffers. 3526 */ 3527 static uint64_t 3528 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, 3529 boolean_t retry) 3530 { 3531 uint64_t evicted = 0; 3532 3533 while (refcount_count(&state->arcs_esize[type]) != 0) { 3534 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); 3535 3536 if (!retry) 3537 break; 3538 } 3539 3540 return (evicted); 3541 } 3542 3543 /* 3544 * Evict the specified number of bytes from the state specified, 3545 * restricting eviction to the spa and type given. This function 3546 * prevents us from trying to evict more from a state's list than 3547 * is "evictable", and to skip evicting altogether when passed a 3548 * negative value for "bytes". In contrast, arc_evict_state() will 3549 * evict everything it can, when passed a negative value for "bytes". 3550 */ 3551 static uint64_t 3552 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, 3553 arc_buf_contents_t type) 3554 { 3555 int64_t delta; 3556 3557 if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) { 3558 delta = MIN(refcount_count(&state->arcs_esize[type]), bytes); 3559 return (arc_evict_state(state, spa, delta, type)); 3560 } 3561 3562 return (0); 3563 } 3564 3565 /* 3566 * Evict metadata buffers from the cache, such that arc_meta_used is 3567 * capped by the arc_meta_limit tunable. 3568 */ 3569 static uint64_t 3570 arc_adjust_meta(void) 3571 { 3572 uint64_t total_evicted = 0; 3573 int64_t target; 3574 3575 /* 3576 * If we're over the meta limit, we want to evict enough 3577 * metadata to get back under the meta limit. We don't want to 3578 * evict so much that we drop the MRU below arc_p, though. If 3579 * we're over the meta limit more than we're over arc_p, we 3580 * evict some from the MRU here, and some from the MFU below. 3581 */ 3582 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 3583 (int64_t)(refcount_count(&arc_anon->arcs_size) + 3584 refcount_count(&arc_mru->arcs_size) - arc_p)); 3585 3586 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3587 3588 /* 3589 * Similar to the above, we want to evict enough bytes to get us 3590 * below the meta limit, but not so much as to drop us below the 3591 * space alloted to the MFU (which is defined as arc_c - arc_p). 3592 */ 3593 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 3594 (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); 3595 3596 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3597 3598 return (total_evicted); 3599 } 3600 3601 /* 3602 * Return the type of the oldest buffer in the given arc state 3603 * 3604 * This function will select a random sublist of type ARC_BUFC_DATA and 3605 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist 3606 * is compared, and the type which contains the "older" buffer will be 3607 * returned. 3608 */ 3609 static arc_buf_contents_t 3610 arc_adjust_type(arc_state_t *state) 3611 { 3612 multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA]; 3613 multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA]; 3614 int data_idx = multilist_get_random_index(data_ml); 3615 int meta_idx = multilist_get_random_index(meta_ml); 3616 multilist_sublist_t *data_mls; 3617 multilist_sublist_t *meta_mls; 3618 arc_buf_contents_t type; 3619 arc_buf_hdr_t *data_hdr; 3620 arc_buf_hdr_t *meta_hdr; 3621 3622 /* 3623 * We keep the sublist lock until we're finished, to prevent 3624 * the headers from being destroyed via arc_evict_state(). 3625 */ 3626 data_mls = multilist_sublist_lock(data_ml, data_idx); 3627 meta_mls = multilist_sublist_lock(meta_ml, meta_idx); 3628 3629 /* 3630 * These two loops are to ensure we skip any markers that 3631 * might be at the tail of the lists due to arc_evict_state(). 3632 */ 3633 3634 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; 3635 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { 3636 if (data_hdr->b_spa != 0) 3637 break; 3638 } 3639 3640 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; 3641 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { 3642 if (meta_hdr->b_spa != 0) 3643 break; 3644 } 3645 3646 if (data_hdr == NULL && meta_hdr == NULL) { 3647 type = ARC_BUFC_DATA; 3648 } else if (data_hdr == NULL) { 3649 ASSERT3P(meta_hdr, !=, NULL); 3650 type = ARC_BUFC_METADATA; 3651 } else if (meta_hdr == NULL) { 3652 ASSERT3P(data_hdr, !=, NULL); 3653 type = ARC_BUFC_DATA; 3654 } else { 3655 ASSERT3P(data_hdr, !=, NULL); 3656 ASSERT3P(meta_hdr, !=, NULL); 3657 3658 /* The headers can't be on the sublist without an L1 header */ 3659 ASSERT(HDR_HAS_L1HDR(data_hdr)); 3660 ASSERT(HDR_HAS_L1HDR(meta_hdr)); 3661 3662 if (data_hdr->b_l1hdr.b_arc_access < 3663 meta_hdr->b_l1hdr.b_arc_access) { 3664 type = ARC_BUFC_DATA; 3665 } else { 3666 type = ARC_BUFC_METADATA; 3667 } 3668 } 3669 3670 multilist_sublist_unlock(meta_mls); 3671 multilist_sublist_unlock(data_mls); 3672 3673 return (type); 3674 } 3675 3676 /* 3677 * Evict buffers from the cache, such that arc_size is capped by arc_c. 3678 */ 3679 static uint64_t 3680 arc_adjust(void) 3681 { 3682 uint64_t total_evicted = 0; 3683 uint64_t bytes; 3684 int64_t target; 3685 3686 /* 3687 * If we're over arc_meta_limit, we want to correct that before 3688 * potentially evicting data buffers below. 3689 */ 3690 total_evicted += arc_adjust_meta(); 3691 3692 /* 3693 * Adjust MRU size 3694 * 3695 * If we're over the target cache size, we want to evict enough 3696 * from the list to get back to our target size. We don't want 3697 * to evict too much from the MRU, such that it drops below 3698 * arc_p. So, if we're over our target cache size more than 3699 * the MRU is over arc_p, we'll evict enough to get back to 3700 * arc_p here, and then evict more from the MFU below. 3701 */ 3702 target = MIN((int64_t)(arc_size - arc_c), 3703 (int64_t)(refcount_count(&arc_anon->arcs_size) + 3704 refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p)); 3705 3706 /* 3707 * If we're below arc_meta_min, always prefer to evict data. 3708 * Otherwise, try to satisfy the requested number of bytes to 3709 * evict from the type which contains older buffers; in an 3710 * effort to keep newer buffers in the cache regardless of their 3711 * type. If we cannot satisfy the number of bytes from this 3712 * type, spill over into the next type. 3713 */ 3714 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && 3715 arc_meta_used > arc_meta_min) { 3716 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3717 total_evicted += bytes; 3718 3719 /* 3720 * If we couldn't evict our target number of bytes from 3721 * metadata, we try to get the rest from data. 3722 */ 3723 target -= bytes; 3724 3725 total_evicted += 3726 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 3727 } else { 3728 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 3729 total_evicted += bytes; 3730 3731 /* 3732 * If we couldn't evict our target number of bytes from 3733 * data, we try to get the rest from metadata. 3734 */ 3735 target -= bytes; 3736 3737 total_evicted += 3738 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 3739 } 3740 3741 /* 3742 * Adjust MFU size 3743 * 3744 * Now that we've tried to evict enough from the MRU to get its 3745 * size back to arc_p, if we're still above the target cache 3746 * size, we evict the rest from the MFU. 3747 */ 3748 target = arc_size - arc_c; 3749 3750 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && 3751 arc_meta_used > arc_meta_min) { 3752 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3753 total_evicted += bytes; 3754 3755 /* 3756 * If we couldn't evict our target number of bytes from 3757 * metadata, we try to get the rest from data. 3758 */ 3759 target -= bytes; 3760 3761 total_evicted += 3762 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 3763 } else { 3764 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 3765 total_evicted += bytes; 3766 3767 /* 3768 * If we couldn't evict our target number of bytes from 3769 * data, we try to get the rest from data. 3770 */ 3771 target -= bytes; 3772 3773 total_evicted += 3774 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3775 } 3776 3777 /* 3778 * Adjust ghost lists 3779 * 3780 * In addition to the above, the ARC also defines target values 3781 * for the ghost lists. The sum of the mru list and mru ghost 3782 * list should never exceed the target size of the cache, and 3783 * the sum of the mru list, mfu list, mru ghost list, and mfu 3784 * ghost list should never exceed twice the target size of the 3785 * cache. The following logic enforces these limits on the ghost 3786 * caches, and evicts from them as needed. 3787 */ 3788 target = refcount_count(&arc_mru->arcs_size) + 3789 refcount_count(&arc_mru_ghost->arcs_size) - arc_c; 3790 3791 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); 3792 total_evicted += bytes; 3793 3794 target -= bytes; 3795 3796 total_evicted += 3797 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); 3798 3799 /* 3800 * We assume the sum of the mru list and mfu list is less than 3801 * or equal to arc_c (we enforced this above), which means we 3802 * can use the simpler of the two equations below: 3803 * 3804 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c 3805 * mru ghost + mfu ghost <= arc_c 3806 */ 3807 target = refcount_count(&arc_mru_ghost->arcs_size) + 3808 refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; 3809 3810 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); 3811 total_evicted += bytes; 3812 3813 target -= bytes; 3814 3815 total_evicted += 3816 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); 3817 3818 return (total_evicted); 3819 } 3820 3821 void 3822 arc_flush(spa_t *spa, boolean_t retry) 3823 { 3824 uint64_t guid = 0; 3825 3826 /* 3827 * If retry is B_TRUE, a spa must not be specified since we have 3828 * no good way to determine if all of a spa's buffers have been 3829 * evicted from an arc state. 3830 */ 3831 ASSERT(!retry || spa == 0); 3832 3833 if (spa != NULL) 3834 guid = spa_load_guid(spa); 3835 3836 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); 3837 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); 3838 3839 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); 3840 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); 3841 3842 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); 3843 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); 3844 3845 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); 3846 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); 3847 } 3848 3849 void 3850 arc_shrink(int64_t to_free) 3851 { 3852 if (arc_c > arc_c_min) { 3853 DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t, 3854 arc_c_min, uint64_t, arc_p, uint64_t, to_free); 3855 if (arc_c > arc_c_min + to_free) 3856 atomic_add_64(&arc_c, -to_free); 3857 else 3858 arc_c = arc_c_min; 3859 3860 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 3861 if (arc_c > arc_size) 3862 arc_c = MAX(arc_size, arc_c_min); 3863 if (arc_p > arc_c) 3864 arc_p = (arc_c >> 1); 3865 3866 DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t, 3867 arc_p); 3868 3869 ASSERT(arc_c >= arc_c_min); 3870 ASSERT((int64_t)arc_p >= 0); 3871 } 3872 3873 if (arc_size > arc_c) { 3874 DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size, 3875 uint64_t, arc_c); 3876 (void) arc_adjust(); 3877 } 3878 } 3879 3880 static long needfree = 0; 3881 3882 typedef enum free_memory_reason_t { 3883 FMR_UNKNOWN, 3884 FMR_NEEDFREE, 3885 FMR_LOTSFREE, 3886 FMR_SWAPFS_MINFREE, 3887 FMR_PAGES_PP_MAXIMUM, 3888 FMR_HEAP_ARENA, 3889 FMR_ZIO_ARENA, 3890 FMR_ZIO_FRAG, 3891 } free_memory_reason_t; 3892 3893 int64_t last_free_memory; 3894 free_memory_reason_t last_free_reason; 3895 3896 /* 3897 * Additional reserve of pages for pp_reserve. 3898 */ 3899 int64_t arc_pages_pp_reserve = 64; 3900 3901 /* 3902 * Additional reserve of pages for swapfs. 3903 */ 3904 int64_t arc_swapfs_reserve = 64; 3905 3906 /* 3907 * Return the amount of memory that can be consumed before reclaim will be 3908 * needed. Positive if there is sufficient free memory, negative indicates 3909 * the amount of memory that needs to be freed up. 3910 */ 3911 static int64_t 3912 arc_available_memory(void) 3913 { 3914 int64_t lowest = INT64_MAX; 3915 int64_t n; 3916 free_memory_reason_t r = FMR_UNKNOWN; 3917 3918 #ifdef _KERNEL 3919 if (needfree > 0) { 3920 n = PAGESIZE * (-needfree); 3921 if (n < lowest) { 3922 lowest = n; 3923 r = FMR_NEEDFREE; 3924 } 3925 } 3926 3927 /* 3928 * Cooperate with pagedaemon when it's time for it to scan 3929 * and reclaim some pages. 3930 */ 3931 n = PAGESIZE * ((int64_t)freemem - zfs_arc_free_target); 3932 if (n < lowest) { 3933 lowest = n; 3934 r = FMR_LOTSFREE; 3935 } 3936 3937 #ifdef illumos 3938 /* 3939 * check that we're out of range of the pageout scanner. It starts to 3940 * schedule paging if freemem is less than lotsfree and needfree. 3941 * lotsfree is the high-water mark for pageout, and needfree is the 3942 * number of needed free pages. We add extra pages here to make sure 3943 * the scanner doesn't start up while we're freeing memory. 3944 */ 3945 n = PAGESIZE * (freemem - lotsfree - needfree - desfree); 3946 if (n < lowest) { 3947 lowest = n; 3948 r = FMR_LOTSFREE; 3949 } 3950 3951 /* 3952 * check to make sure that swapfs has enough space so that anon 3953 * reservations can still succeed. anon_resvmem() checks that the 3954 * availrmem is greater than swapfs_minfree, and the number of reserved 3955 * swap pages. We also add a bit of extra here just to prevent 3956 * circumstances from getting really dire. 3957 */ 3958 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - 3959 desfree - arc_swapfs_reserve); 3960 if (n < lowest) { 3961 lowest = n; 3962 r = FMR_SWAPFS_MINFREE; 3963 } 3964 3965 3966 /* 3967 * Check that we have enough availrmem that memory locking (e.g., via 3968 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 3969 * stores the number of pages that cannot be locked; when availrmem 3970 * drops below pages_pp_maximum, page locking mechanisms such as 3971 * page_pp_lock() will fail.) 3972 */ 3973 n = PAGESIZE * (availrmem - pages_pp_maximum - 3974 arc_pages_pp_reserve); 3975 if (n < lowest) { 3976 lowest = n; 3977 r = FMR_PAGES_PP_MAXIMUM; 3978 } 3979 3980 #endif /* illumos */ 3981 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC) 3982 /* 3983 * If we're on an i386 platform, it's possible that we'll exhaust the 3984 * kernel heap space before we ever run out of available physical 3985 * memory. Most checks of the size of the heap_area compare against 3986 * tune.t_minarmem, which is the minimum available real memory that we 3987 * can have in the system. However, this is generally fixed at 25 pages 3988 * which is so low that it's useless. In this comparison, we seek to 3989 * calculate the total heap-size, and reclaim if more than 3/4ths of the 3990 * heap is allocated. (Or, in the calculation, if less than 1/4th is 3991 * free) 3992 */ 3993 n = (int64_t)vmem_size(heap_arena, VMEM_FREE) - 3994 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); 3995 if (n < lowest) { 3996 lowest = n; 3997 r = FMR_HEAP_ARENA; 3998 } 3999 #define zio_arena NULL 4000 #else 4001 #define zio_arena heap_arena 4002 #endif 4003 4004 /* 4005 * If zio data pages are being allocated out of a separate heap segment, 4006 * then enforce that the size of available vmem for this arena remains 4007 * above about 1/16th free. 4008 * 4009 * Note: The 1/16th arena free requirement was put in place 4010 * to aggressively evict memory from the arc in order to avoid 4011 * memory fragmentation issues. 4012 */ 4013 if (zio_arena != NULL) { 4014 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - 4015 (vmem_size(zio_arena, VMEM_ALLOC) >> 4); 4016 if (n < lowest) { 4017 lowest = n; 4018 r = FMR_ZIO_ARENA; 4019 } 4020 } 4021 4022 #if __FreeBSD__ 4023 /* 4024 * Above limits know nothing about real level of KVA fragmentation. 4025 * Start aggressive reclamation if too little sequential KVA left. 4026 */ 4027 if (lowest > 0) { 4028 n = (vmem_size(heap_arena, VMEM_MAXFREE) < SPA_MAXBLOCKSIZE) ? 4029 -((int64_t)vmem_size(heap_arena, VMEM_ALLOC) >> 4) : 4030 INT64_MAX; 4031 if (n < lowest) { 4032 lowest = n; 4033 r = FMR_ZIO_FRAG; 4034 } 4035 } 4036 #endif 4037 4038 #else /* _KERNEL */ 4039 /* Every 100 calls, free a small amount */ 4040 if (spa_get_random(100) == 0) 4041 lowest = -1024; 4042 #endif /* _KERNEL */ 4043 4044 last_free_memory = lowest; 4045 last_free_reason = r; 4046 DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r); 4047 return (lowest); 4048 } 4049 4050 4051 /* 4052 * Determine if the system is under memory pressure and is asking 4053 * to reclaim memory. A return value of B_TRUE indicates that the system 4054 * is under memory pressure and that the arc should adjust accordingly. 4055 */ 4056 static boolean_t 4057 arc_reclaim_needed(void) 4058 { 4059 return (arc_available_memory() < 0); 4060 } 4061 4062 extern kmem_cache_t *zio_buf_cache[]; 4063 extern kmem_cache_t *zio_data_buf_cache[]; 4064 extern kmem_cache_t *range_seg_cache; 4065 4066 static __noinline void 4067 arc_kmem_reap_now(void) 4068 { 4069 size_t i; 4070 kmem_cache_t *prev_cache = NULL; 4071 kmem_cache_t *prev_data_cache = NULL; 4072 4073 DTRACE_PROBE(arc__kmem_reap_start); 4074 #ifdef _KERNEL 4075 if (arc_meta_used >= arc_meta_limit) { 4076 /* 4077 * We are exceeding our meta-data cache limit. 4078 * Purge some DNLC entries to release holds on meta-data. 4079 */ 4080 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 4081 } 4082 #if defined(__i386) 4083 /* 4084 * Reclaim unused memory from all kmem caches. 4085 */ 4086 kmem_reap(); 4087 #endif 4088 #endif 4089 4090 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 4091 if (zio_buf_cache[i] != prev_cache) { 4092 prev_cache = zio_buf_cache[i]; 4093 kmem_cache_reap_now(zio_buf_cache[i]); 4094 } 4095 if (zio_data_buf_cache[i] != prev_data_cache) { 4096 prev_data_cache = zio_data_buf_cache[i]; 4097 kmem_cache_reap_now(zio_data_buf_cache[i]); 4098 } 4099 } 4100 kmem_cache_reap_now(buf_cache); 4101 kmem_cache_reap_now(hdr_full_cache); 4102 kmem_cache_reap_now(hdr_l2only_cache); 4103 kmem_cache_reap_now(range_seg_cache); 4104 4105 #ifdef illumos 4106 if (zio_arena != NULL) { 4107 /* 4108 * Ask the vmem arena to reclaim unused memory from its 4109 * quantum caches. 4110 */ 4111 vmem_qcache_reap(zio_arena); 4112 } 4113 #endif 4114 DTRACE_PROBE(arc__kmem_reap_end); 4115 } 4116 4117 /* 4118 * Threads can block in arc_get_data_buf() waiting for this thread to evict 4119 * enough data and signal them to proceed. When this happens, the threads in 4120 * arc_get_data_buf() are sleeping while holding the hash lock for their 4121 * particular arc header. Thus, we must be careful to never sleep on a 4122 * hash lock in this thread. This is to prevent the following deadlock: 4123 * 4124 * - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L", 4125 * waiting for the reclaim thread to signal it. 4126 * 4127 * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter, 4128 * fails, and goes to sleep forever. 4129 * 4130 * This possible deadlock is avoided by always acquiring a hash lock 4131 * using mutex_tryenter() from arc_reclaim_thread(). 4132 */ 4133 static void 4134 arc_reclaim_thread(void *dummy __unused) 4135 { 4136 hrtime_t growtime = 0; 4137 callb_cpr_t cpr; 4138 4139 CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG); 4140 4141 mutex_enter(&arc_reclaim_lock); 4142 while (!arc_reclaim_thread_exit) { 4143 uint64_t evicted = 0; 4144 4145 /* 4146 * This is necessary in order for the mdb ::arc dcmd to 4147 * show up to date information. Since the ::arc command 4148 * does not call the kstat's update function, without 4149 * this call, the command may show stale stats for the 4150 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even 4151 * with this change, the data might be up to 1 second 4152 * out of date; but that should suffice. The arc_state_t 4153 * structures can be queried directly if more accurate 4154 * information is needed. 4155 */ 4156 if (arc_ksp != NULL) 4157 arc_ksp->ks_update(arc_ksp, KSTAT_READ); 4158 4159 mutex_exit(&arc_reclaim_lock); 4160 4161 /* 4162 * We call arc_adjust() before (possibly) calling 4163 * arc_kmem_reap_now(), so that we can wake up 4164 * arc_get_data_buf() sooner. 4165 */ 4166 evicted = arc_adjust(); 4167 4168 int64_t free_memory = arc_available_memory(); 4169 if (free_memory < 0) { 4170 4171 arc_no_grow = B_TRUE; 4172 arc_warm = B_TRUE; 4173 4174 /* 4175 * Wait at least zfs_grow_retry (default 60) seconds 4176 * before considering growing. 4177 */ 4178 growtime = gethrtime() + SEC2NSEC(arc_grow_retry); 4179 4180 arc_kmem_reap_now(); 4181 4182 /* 4183 * If we are still low on memory, shrink the ARC 4184 * so that we have arc_shrink_min free space. 4185 */ 4186 free_memory = arc_available_memory(); 4187 4188 int64_t to_free = 4189 (arc_c >> arc_shrink_shift) - free_memory; 4190 if (to_free > 0) { 4191 #ifdef _KERNEL 4192 to_free = MAX(to_free, ptob(needfree)); 4193 #endif 4194 arc_shrink(to_free); 4195 } 4196 } else if (free_memory < arc_c >> arc_no_grow_shift) { 4197 arc_no_grow = B_TRUE; 4198 } else if (gethrtime() >= growtime) { 4199 arc_no_grow = B_FALSE; 4200 } 4201 4202 mutex_enter(&arc_reclaim_lock); 4203 4204 /* 4205 * If evicted is zero, we couldn't evict anything via 4206 * arc_adjust(). This could be due to hash lock 4207 * collisions, but more likely due to the majority of 4208 * arc buffers being unevictable. Therefore, even if 4209 * arc_size is above arc_c, another pass is unlikely to 4210 * be helpful and could potentially cause us to enter an 4211 * infinite loop. 4212 */ 4213 if (arc_size <= arc_c || evicted == 0) { 4214 #ifdef _KERNEL 4215 needfree = 0; 4216 #endif 4217 /* 4218 * We're either no longer overflowing, or we 4219 * can't evict anything more, so we should wake 4220 * up any threads before we go to sleep. 4221 */ 4222 cv_broadcast(&arc_reclaim_waiters_cv); 4223 4224 /* 4225 * Block until signaled, or after one second (we 4226 * might need to perform arc_kmem_reap_now() 4227 * even if we aren't being signalled) 4228 */ 4229 CALLB_CPR_SAFE_BEGIN(&cpr); 4230 (void) cv_timedwait_hires(&arc_reclaim_thread_cv, 4231 &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 4232 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock); 4233 } 4234 } 4235 4236 arc_reclaim_thread_exit = B_FALSE; 4237 cv_broadcast(&arc_reclaim_thread_cv); 4238 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */ 4239 thread_exit(); 4240 } 4241 4242 #ifdef __FreeBSD__ 4243 4244 static u_int arc_dnlc_evicts_arg; 4245 extern struct vfsops zfs_vfsops; 4246 4247 static void 4248 arc_dnlc_evicts_thread(void *dummy __unused) 4249 { 4250 callb_cpr_t cpr; 4251 u_int percent; 4252 4253 CALLB_CPR_INIT(&cpr, &arc_dnlc_evicts_lock, callb_generic_cpr, FTAG); 4254 4255 mutex_enter(&arc_dnlc_evicts_lock); 4256 while (!arc_dnlc_evicts_thread_exit) { 4257 CALLB_CPR_SAFE_BEGIN(&cpr); 4258 (void) cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock); 4259 CALLB_CPR_SAFE_END(&cpr, &arc_dnlc_evicts_lock); 4260 if (arc_dnlc_evicts_arg != 0) { 4261 percent = arc_dnlc_evicts_arg; 4262 mutex_exit(&arc_dnlc_evicts_lock); 4263 #ifdef _KERNEL 4264 vnlru_free(desiredvnodes * percent / 100, &zfs_vfsops); 4265 #endif 4266 mutex_enter(&arc_dnlc_evicts_lock); 4267 /* 4268 * Clear our token only after vnlru_free() 4269 * pass is done, to avoid false queueing of 4270 * the requests. 4271 */ 4272 arc_dnlc_evicts_arg = 0; 4273 } 4274 } 4275 arc_dnlc_evicts_thread_exit = FALSE; 4276 cv_broadcast(&arc_dnlc_evicts_cv); 4277 CALLB_CPR_EXIT(&cpr); 4278 thread_exit(); 4279 } 4280 4281 void 4282 dnlc_reduce_cache(void *arg) 4283 { 4284 u_int percent; 4285 4286 percent = (u_int)(uintptr_t)arg; 4287 mutex_enter(&arc_dnlc_evicts_lock); 4288 if (arc_dnlc_evicts_arg == 0) { 4289 arc_dnlc_evicts_arg = percent; 4290 cv_broadcast(&arc_dnlc_evicts_cv); 4291 } 4292 mutex_exit(&arc_dnlc_evicts_lock); 4293 } 4294 4295 #endif 4296 4297 /* 4298 * Adapt arc info given the number of bytes we are trying to add and 4299 * the state that we are comming from. This function is only called 4300 * when we are adding new content to the cache. 4301 */ 4302 static void 4303 arc_adapt(int bytes, arc_state_t *state) 4304 { 4305 int mult; 4306 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 4307 int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); 4308 int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); 4309 4310 if (state == arc_l2c_only) 4311 return; 4312 4313 ASSERT(bytes > 0); 4314 /* 4315 * Adapt the target size of the MRU list: 4316 * - if we just hit in the MRU ghost list, then increase 4317 * the target size of the MRU list. 4318 * - if we just hit in the MFU ghost list, then increase 4319 * the target size of the MFU list by decreasing the 4320 * target size of the MRU list. 4321 */ 4322 if (state == arc_mru_ghost) { 4323 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); 4324 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 4325 4326 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 4327 } else if (state == arc_mfu_ghost) { 4328 uint64_t delta; 4329 4330 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); 4331 mult = MIN(mult, 10); 4332 4333 delta = MIN(bytes * mult, arc_p); 4334 arc_p = MAX(arc_p_min, arc_p - delta); 4335 } 4336 ASSERT((int64_t)arc_p >= 0); 4337 4338 if (arc_reclaim_needed()) { 4339 cv_signal(&arc_reclaim_thread_cv); 4340 return; 4341 } 4342 4343 if (arc_no_grow) 4344 return; 4345 4346 if (arc_c >= arc_c_max) 4347 return; 4348 4349 /* 4350 * If we're within (2 * maxblocksize) bytes of the target 4351 * cache size, increment the target cache size 4352 */ 4353 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 4354 DTRACE_PROBE1(arc__inc_adapt, int, bytes); 4355 atomic_add_64(&arc_c, (int64_t)bytes); 4356 if (arc_c > arc_c_max) 4357 arc_c = arc_c_max; 4358 else if (state == arc_anon) 4359 atomic_add_64(&arc_p, (int64_t)bytes); 4360 if (arc_p > arc_c) 4361 arc_p = arc_c; 4362 } 4363 ASSERT((int64_t)arc_p >= 0); 4364 } 4365 4366 /* 4367 * Check if arc_size has grown past our upper threshold, determined by 4368 * zfs_arc_overflow_shift. 4369 */ 4370 static boolean_t 4371 arc_is_overflowing(void) 4372 { 4373 /* Always allow at least one block of overflow */ 4374 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, 4375 arc_c >> zfs_arc_overflow_shift); 4376 4377 return (arc_size >= arc_c + overflow); 4378 } 4379 4380 /* 4381 * Allocate a block and return it to the caller. If we are hitting the 4382 * hard limit for the cache size, we must sleep, waiting for the eviction 4383 * thread to catch up. If we're past the target size but below the hard 4384 * limit, we'll only signal the reclaim thread and continue on. 4385 */ 4386 static void * 4387 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4388 { 4389 void *datap = NULL; 4390 arc_state_t *state = hdr->b_l1hdr.b_state; 4391 arc_buf_contents_t type = arc_buf_type(hdr); 4392 4393 arc_adapt(size, state); 4394 4395 /* 4396 * If arc_size is currently overflowing, and has grown past our 4397 * upper limit, we must be adding data faster than the evict 4398 * thread can evict. Thus, to ensure we don't compound the 4399 * problem by adding more data and forcing arc_size to grow even 4400 * further past it's target size, we halt and wait for the 4401 * eviction thread to catch up. 4402 * 4403 * It's also possible that the reclaim thread is unable to evict 4404 * enough buffers to get arc_size below the overflow limit (e.g. 4405 * due to buffers being un-evictable, or hash lock collisions). 4406 * In this case, we want to proceed regardless if we're 4407 * overflowing; thus we don't use a while loop here. 4408 */ 4409 if (arc_is_overflowing()) { 4410 mutex_enter(&arc_reclaim_lock); 4411 4412 /* 4413 * Now that we've acquired the lock, we may no longer be 4414 * over the overflow limit, lets check. 4415 * 4416 * We're ignoring the case of spurious wake ups. If that 4417 * were to happen, it'd let this thread consume an ARC 4418 * buffer before it should have (i.e. before we're under 4419 * the overflow limit and were signalled by the reclaim 4420 * thread). As long as that is a rare occurrence, it 4421 * shouldn't cause any harm. 4422 */ 4423 if (arc_is_overflowing()) { 4424 cv_signal(&arc_reclaim_thread_cv); 4425 cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); 4426 } 4427 4428 mutex_exit(&arc_reclaim_lock); 4429 } 4430 4431 VERIFY3U(hdr->b_type, ==, type); 4432 if (type == ARC_BUFC_METADATA) { 4433 datap = zio_buf_alloc(size); 4434 arc_space_consume(size, ARC_SPACE_META); 4435 } else { 4436 ASSERT(type == ARC_BUFC_DATA); 4437 datap = zio_data_buf_alloc(size); 4438 arc_space_consume(size, ARC_SPACE_DATA); 4439 } 4440 4441 /* 4442 * Update the state size. Note that ghost states have a 4443 * "ghost size" and so don't need to be updated. 4444 */ 4445 if (!GHOST_STATE(state)) { 4446 4447 (void) refcount_add_many(&state->arcs_size, size, tag); 4448 4449 /* 4450 * If this is reached via arc_read, the link is 4451 * protected by the hash lock. If reached via 4452 * arc_buf_alloc, the header should not be accessed by 4453 * any other thread. And, if reached via arc_read_done, 4454 * the hash lock will protect it if it's found in the 4455 * hash table; otherwise no other thread should be 4456 * trying to [add|remove]_reference it. 4457 */ 4458 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 4459 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4460 (void) refcount_add_many(&state->arcs_esize[type], 4461 size, tag); 4462 } 4463 4464 /* 4465 * If we are growing the cache, and we are adding anonymous 4466 * data, and we have outgrown arc_p, update arc_p 4467 */ 4468 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon && 4469 (refcount_count(&arc_anon->arcs_size) + 4470 refcount_count(&arc_mru->arcs_size) > arc_p)) 4471 arc_p = MIN(arc_c, arc_p + size); 4472 } 4473 ARCSTAT_BUMP(arcstat_allocated); 4474 return (datap); 4475 } 4476 4477 /* 4478 * Free the arc data buffer. 4479 */ 4480 static void 4481 arc_free_data_buf(arc_buf_hdr_t *hdr, void *data, uint64_t size, void *tag) 4482 { 4483 arc_state_t *state = hdr->b_l1hdr.b_state; 4484 arc_buf_contents_t type = arc_buf_type(hdr); 4485 4486 /* protected by hash lock, if in the hash table */ 4487 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 4488 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4489 ASSERT(state != arc_anon && state != arc_l2c_only); 4490 4491 (void) refcount_remove_many(&state->arcs_esize[type], 4492 size, tag); 4493 } 4494 (void) refcount_remove_many(&state->arcs_size, size, tag); 4495 4496 VERIFY3U(hdr->b_type, ==, type); 4497 if (type == ARC_BUFC_METADATA) { 4498 zio_buf_free(data, size); 4499 arc_space_return(size, ARC_SPACE_META); 4500 } else { 4501 ASSERT(type == ARC_BUFC_DATA); 4502 zio_data_buf_free(data, size); 4503 arc_space_return(size, ARC_SPACE_DATA); 4504 } 4505 } 4506 4507 /* 4508 * This routine is called whenever a buffer is accessed. 4509 * NOTE: the hash lock is dropped in this function. 4510 */ 4511 static void 4512 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 4513 { 4514 clock_t now; 4515 4516 ASSERT(MUTEX_HELD(hash_lock)); 4517 ASSERT(HDR_HAS_L1HDR(hdr)); 4518 4519 if (hdr->b_l1hdr.b_state == arc_anon) { 4520 /* 4521 * This buffer is not in the cache, and does not 4522 * appear in our "ghost" list. Add the new buffer 4523 * to the MRU state. 4524 */ 4525 4526 ASSERT0(hdr->b_l1hdr.b_arc_access); 4527 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4528 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 4529 arc_change_state(arc_mru, hdr, hash_lock); 4530 4531 } else if (hdr->b_l1hdr.b_state == arc_mru) { 4532 now = ddi_get_lbolt(); 4533 4534 /* 4535 * If this buffer is here because of a prefetch, then either: 4536 * - clear the flag if this is a "referencing" read 4537 * (any subsequent access will bump this into the MFU state). 4538 * or 4539 * - move the buffer to the head of the list if this is 4540 * another prefetch (to make it less likely to be evicted). 4541 */ 4542 if (HDR_PREFETCH(hdr)) { 4543 if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 4544 /* link protected by hash lock */ 4545 ASSERT(multilist_link_active( 4546 &hdr->b_l1hdr.b_arc_node)); 4547 } else { 4548 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 4549 ARCSTAT_BUMP(arcstat_mru_hits); 4550 } 4551 hdr->b_l1hdr.b_arc_access = now; 4552 return; 4553 } 4554 4555 /* 4556 * This buffer has been "accessed" only once so far, 4557 * but it is still in the cache. Move it to the MFU 4558 * state. 4559 */ 4560 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) { 4561 /* 4562 * More than 125ms have passed since we 4563 * instantiated this buffer. Move it to the 4564 * most frequently used state. 4565 */ 4566 hdr->b_l1hdr.b_arc_access = now; 4567 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4568 arc_change_state(arc_mfu, hdr, hash_lock); 4569 } 4570 ARCSTAT_BUMP(arcstat_mru_hits); 4571 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { 4572 arc_state_t *new_state; 4573 /* 4574 * This buffer has been "accessed" recently, but 4575 * was evicted from the cache. Move it to the 4576 * MFU state. 4577 */ 4578 4579 if (HDR_PREFETCH(hdr)) { 4580 new_state = arc_mru; 4581 if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) 4582 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 4583 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 4584 } else { 4585 new_state = arc_mfu; 4586 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4587 } 4588 4589 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4590 arc_change_state(new_state, hdr, hash_lock); 4591 4592 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 4593 } else if (hdr->b_l1hdr.b_state == arc_mfu) { 4594 /* 4595 * This buffer has been accessed more than once and is 4596 * still in the cache. Keep it in the MFU state. 4597 * 4598 * NOTE: an add_reference() that occurred when we did 4599 * the arc_read() will have kicked this off the list. 4600 * If it was a prefetch, we will explicitly move it to 4601 * the head of the list now. 4602 */ 4603 if ((HDR_PREFETCH(hdr)) != 0) { 4604 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4605 /* link protected by hash_lock */ 4606 ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 4607 } 4608 ARCSTAT_BUMP(arcstat_mfu_hits); 4609 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4610 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { 4611 arc_state_t *new_state = arc_mfu; 4612 /* 4613 * This buffer has been accessed more than once but has 4614 * been evicted from the cache. Move it back to the 4615 * MFU state. 4616 */ 4617 4618 if (HDR_PREFETCH(hdr)) { 4619 /* 4620 * This is a prefetch access... 4621 * move this block back to the MRU state. 4622 */ 4623 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 4624 new_state = arc_mru; 4625 } 4626 4627 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4628 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4629 arc_change_state(new_state, hdr, hash_lock); 4630 4631 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 4632 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { 4633 /* 4634 * This buffer is on the 2nd Level ARC. 4635 */ 4636 4637 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 4638 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 4639 arc_change_state(arc_mfu, hdr, hash_lock); 4640 } else { 4641 ASSERT(!"invalid arc state"); 4642 } 4643 } 4644 4645 /* a generic arc_done_func_t which you can use */ 4646 /* ARGSUSED */ 4647 void 4648 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 4649 { 4650 if (zio == NULL || zio->io_error == 0) 4651 bcopy(buf->b_data, arg, HDR_GET_LSIZE(buf->b_hdr)); 4652 arc_buf_destroy(buf, arg); 4653 } 4654 4655 /* a generic arc_done_func_t */ 4656 void 4657 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 4658 { 4659 arc_buf_t **bufp = arg; 4660 if (zio && zio->io_error) { 4661 arc_buf_destroy(buf, arg); 4662 *bufp = NULL; 4663 } else { 4664 *bufp = buf; 4665 ASSERT(buf->b_data); 4666 } 4667 } 4668 4669 static void 4670 arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) 4671 { 4672 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { 4673 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); 4674 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 4675 } else { 4676 if (HDR_COMPRESSION_ENABLED(hdr)) { 4677 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, 4678 BP_GET_COMPRESS(bp)); 4679 } 4680 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); 4681 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); 4682 } 4683 } 4684 4685 static void 4686 arc_read_done(zio_t *zio) 4687 { 4688 arc_buf_hdr_t *hdr = zio->io_private; 4689 arc_buf_t *abuf = NULL; /* buffer we're assigning to callback */ 4690 kmutex_t *hash_lock = NULL; 4691 arc_callback_t *callback_list, *acb; 4692 int freeable = B_FALSE; 4693 4694 /* 4695 * The hdr was inserted into hash-table and removed from lists 4696 * prior to starting I/O. We should find this header, since 4697 * it's in the hash table, and it should be legit since it's 4698 * not possible to evict it during the I/O. The only possible 4699 * reason for it not to be found is if we were freed during the 4700 * read. 4701 */ 4702 if (HDR_IN_HASH_TABLE(hdr)) { 4703 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 4704 ASSERT3U(hdr->b_dva.dva_word[0], ==, 4705 BP_IDENTITY(zio->io_bp)->dva_word[0]); 4706 ASSERT3U(hdr->b_dva.dva_word[1], ==, 4707 BP_IDENTITY(zio->io_bp)->dva_word[1]); 4708 4709 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 4710 &hash_lock); 4711 4712 ASSERT((found == hdr && 4713 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 4714 (found == hdr && HDR_L2_READING(hdr))); 4715 ASSERT3P(hash_lock, !=, NULL); 4716 } 4717 4718 if (zio->io_error == 0) { 4719 /* byteswap if necessary */ 4720 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 4721 if (BP_GET_LEVEL(zio->io_bp) > 0) { 4722 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; 4723 } else { 4724 hdr->b_l1hdr.b_byteswap = 4725 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 4726 } 4727 } else { 4728 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 4729 } 4730 } 4731 4732 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); 4733 if (l2arc_noprefetch && HDR_PREFETCH(hdr)) 4734 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); 4735 4736 callback_list = hdr->b_l1hdr.b_acb; 4737 ASSERT3P(callback_list, !=, NULL); 4738 4739 if (hash_lock && zio->io_error == 0 && 4740 hdr->b_l1hdr.b_state == arc_anon) { 4741 /* 4742 * Only call arc_access on anonymous buffers. This is because 4743 * if we've issued an I/O for an evicted buffer, we've already 4744 * called arc_access (to prevent any simultaneous readers from 4745 * getting confused). 4746 */ 4747 arc_access(hdr, hash_lock); 4748 } 4749 4750 /* create copies of the data buffer for the callers */ 4751 for (acb = callback_list; acb; acb = acb->acb_next) { 4752 if (acb->acb_done != NULL) { 4753 /* 4754 * If we're here, then this must be a demand read 4755 * since prefetch requests don't have callbacks. 4756 * If a read request has a callback (i.e. acb_done is 4757 * not NULL), then we decompress the data for the 4758 * first request and clone the rest. This avoids 4759 * having to waste cpu resources decompressing data 4760 * that nobody is explicitly waiting to read. 4761 */ 4762 if (abuf == NULL) { 4763 acb->acb_buf = arc_buf_alloc_impl(hdr, 4764 acb->acb_private); 4765 if (zio->io_error == 0) { 4766 zio->io_error = 4767 arc_decompress(acb->acb_buf); 4768 } 4769 abuf = acb->acb_buf; 4770 } else { 4771 add_reference(hdr, acb->acb_private); 4772 acb->acb_buf = arc_buf_clone(abuf); 4773 } 4774 } 4775 } 4776 hdr->b_l1hdr.b_acb = NULL; 4777 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 4778 if (abuf == NULL) { 4779 /* 4780 * This buffer didn't have a callback so it must 4781 * be a prefetch. 4782 */ 4783 ASSERT(HDR_PREFETCH(hdr)); 4784 ASSERT0(hdr->b_l1hdr.b_bufcnt); 4785 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 4786 } 4787 4788 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || 4789 callback_list != NULL); 4790 4791 if (zio->io_error == 0) { 4792 arc_hdr_verify(hdr, zio->io_bp); 4793 } else { 4794 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 4795 if (hdr->b_l1hdr.b_state != arc_anon) 4796 arc_change_state(arc_anon, hdr, hash_lock); 4797 if (HDR_IN_HASH_TABLE(hdr)) 4798 buf_hash_remove(hdr); 4799 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 4800 } 4801 4802 /* 4803 * Broadcast before we drop the hash_lock to avoid the possibility 4804 * that the hdr (and hence the cv) might be freed before we get to 4805 * the cv_broadcast(). 4806 */ 4807 cv_broadcast(&hdr->b_l1hdr.b_cv); 4808 4809 if (hash_lock != NULL) { 4810 mutex_exit(hash_lock); 4811 } else { 4812 /* 4813 * This block was freed while we waited for the read to 4814 * complete. It has been removed from the hash table and 4815 * moved to the anonymous state (so that it won't show up 4816 * in the cache). 4817 */ 4818 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 4819 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 4820 } 4821 4822 /* execute each callback and free its structure */ 4823 while ((acb = callback_list) != NULL) { 4824 if (acb->acb_done) 4825 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 4826 4827 if (acb->acb_zio_dummy != NULL) { 4828 acb->acb_zio_dummy->io_error = zio->io_error; 4829 zio_nowait(acb->acb_zio_dummy); 4830 } 4831 4832 callback_list = acb->acb_next; 4833 kmem_free(acb, sizeof (arc_callback_t)); 4834 } 4835 4836 if (freeable) 4837 arc_hdr_destroy(hdr); 4838 } 4839 4840 /* 4841 * "Read" the block at the specified DVA (in bp) via the 4842 * cache. If the block is found in the cache, invoke the provided 4843 * callback immediately and return. Note that the `zio' parameter 4844 * in the callback will be NULL in this case, since no IO was 4845 * required. If the block is not in the cache pass the read request 4846 * on to the spa with a substitute callback function, so that the 4847 * requested block will be added to the cache. 4848 * 4849 * If a read request arrives for a block that has a read in-progress, 4850 * either wait for the in-progress read to complete (and return the 4851 * results); or, if this is a read with a "done" func, add a record 4852 * to the read to invoke the "done" func when the read completes, 4853 * and return; or just return. 4854 * 4855 * arc_read_done() will invoke all the requested "done" functions 4856 * for readers of this block. 4857 */ 4858 int 4859 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 4860 void *private, zio_priority_t priority, int zio_flags, 4861 arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 4862 { 4863 arc_buf_hdr_t *hdr = NULL; 4864 kmutex_t *hash_lock = NULL; 4865 zio_t *rzio; 4866 uint64_t guid = spa_load_guid(spa); 4867 4868 ASSERT(!BP_IS_EMBEDDED(bp) || 4869 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 4870 4871 top: 4872 if (!BP_IS_EMBEDDED(bp)) { 4873 /* 4874 * Embedded BP's have no DVA and require no I/O to "read". 4875 * Create an anonymous arc buf to back it. 4876 */ 4877 hdr = buf_hash_find(guid, bp, &hash_lock); 4878 } 4879 4880 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pdata != NULL) { 4881 arc_buf_t *buf = NULL; 4882 *arc_flags |= ARC_FLAG_CACHED; 4883 4884 if (HDR_IO_IN_PROGRESS(hdr)) { 4885 4886 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && 4887 priority == ZIO_PRIORITY_SYNC_READ) { 4888 /* 4889 * This sync read must wait for an 4890 * in-progress async read (e.g. a predictive 4891 * prefetch). Async reads are queued 4892 * separately at the vdev_queue layer, so 4893 * this is a form of priority inversion. 4894 * Ideally, we would "inherit" the demand 4895 * i/o's priority by moving the i/o from 4896 * the async queue to the synchronous queue, 4897 * but there is currently no mechanism to do 4898 * so. Track this so that we can evaluate 4899 * the magnitude of this potential performance 4900 * problem. 4901 * 4902 * Note that if the prefetch i/o is already 4903 * active (has been issued to the device), 4904 * the prefetch improved performance, because 4905 * we issued it sooner than we would have 4906 * without the prefetch. 4907 */ 4908 DTRACE_PROBE1(arc__sync__wait__for__async, 4909 arc_buf_hdr_t *, hdr); 4910 ARCSTAT_BUMP(arcstat_sync_wait_for_async); 4911 } 4912 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 4913 arc_hdr_clear_flags(hdr, 4914 ARC_FLAG_PREDICTIVE_PREFETCH); 4915 } 4916 4917 if (*arc_flags & ARC_FLAG_WAIT) { 4918 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 4919 mutex_exit(hash_lock); 4920 goto top; 4921 } 4922 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 4923 4924 if (done) { 4925 arc_callback_t *acb = NULL; 4926 4927 acb = kmem_zalloc(sizeof (arc_callback_t), 4928 KM_SLEEP); 4929 acb->acb_done = done; 4930 acb->acb_private = private; 4931 if (pio != NULL) 4932 acb->acb_zio_dummy = zio_null(pio, 4933 spa, NULL, NULL, NULL, zio_flags); 4934 4935 ASSERT3P(acb->acb_done, !=, NULL); 4936 acb->acb_next = hdr->b_l1hdr.b_acb; 4937 hdr->b_l1hdr.b_acb = acb; 4938 mutex_exit(hash_lock); 4939 return (0); 4940 } 4941 mutex_exit(hash_lock); 4942 return (0); 4943 } 4944 4945 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 4946 hdr->b_l1hdr.b_state == arc_mfu); 4947 4948 if (done) { 4949 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 4950 /* 4951 * This is a demand read which does not have to 4952 * wait for i/o because we did a predictive 4953 * prefetch i/o for it, which has completed. 4954 */ 4955 DTRACE_PROBE1( 4956 arc__demand__hit__predictive__prefetch, 4957 arc_buf_hdr_t *, hdr); 4958 ARCSTAT_BUMP( 4959 arcstat_demand_hit_predictive_prefetch); 4960 arc_hdr_clear_flags(hdr, 4961 ARC_FLAG_PREDICTIVE_PREFETCH); 4962 } 4963 ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp)); 4964 4965 /* 4966 * If this block is already in use, create a new 4967 * copy of the data so that we will be guaranteed 4968 * that arc_release() will always succeed. 4969 */ 4970 buf = hdr->b_l1hdr.b_buf; 4971 if (buf == NULL) { 4972 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 4973 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 4974 buf = arc_buf_alloc_impl(hdr, private); 4975 VERIFY0(arc_decompress(buf)); 4976 } else { 4977 add_reference(hdr, private); 4978 buf = arc_buf_clone(buf); 4979 } 4980 ASSERT3P(buf->b_data, !=, NULL); 4981 4982 } else if (*arc_flags & ARC_FLAG_PREFETCH && 4983 refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 4984 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 4985 } 4986 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 4987 arc_access(hdr, hash_lock); 4988 if (*arc_flags & ARC_FLAG_L2CACHE) 4989 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 4990 mutex_exit(hash_lock); 4991 ARCSTAT_BUMP(arcstat_hits); 4992 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 4993 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 4994 data, metadata, hits); 4995 4996 if (done) 4997 done(NULL, buf, private); 4998 } else { 4999 uint64_t lsize = BP_GET_LSIZE(bp); 5000 uint64_t psize = BP_GET_PSIZE(bp); 5001 arc_callback_t *acb; 5002 vdev_t *vd = NULL; 5003 uint64_t addr = 0; 5004 boolean_t devw = B_FALSE; 5005 uint64_t size; 5006 5007 if (hdr == NULL) { 5008 /* this block is not in the cache */ 5009 arc_buf_hdr_t *exists = NULL; 5010 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 5011 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 5012 BP_GET_COMPRESS(bp), type); 5013 5014 if (!BP_IS_EMBEDDED(bp)) { 5015 hdr->b_dva = *BP_IDENTITY(bp); 5016 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 5017 exists = buf_hash_insert(hdr, &hash_lock); 5018 } 5019 if (exists != NULL) { 5020 /* somebody beat us to the hash insert */ 5021 mutex_exit(hash_lock); 5022 buf_discard_identity(hdr); 5023 arc_hdr_destroy(hdr); 5024 goto top; /* restart the IO request */ 5025 } 5026 } else { 5027 /* 5028 * This block is in the ghost cache. If it was L2-only 5029 * (and thus didn't have an L1 hdr), we realloc the 5030 * header to add an L1 hdr. 5031 */ 5032 if (!HDR_HAS_L1HDR(hdr)) { 5033 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, 5034 hdr_full_cache); 5035 } 5036 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 5037 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state)); 5038 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5039 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5040 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 5041 5042 /* 5043 * This is a delicate dance that we play here. 5044 * This hdr is in the ghost list so we access it 5045 * to move it out of the ghost list before we 5046 * initiate the read. If it's a prefetch then 5047 * it won't have a callback so we'll remove the 5048 * reference that arc_buf_alloc_impl() created. We 5049 * do this after we've called arc_access() to 5050 * avoid hitting an assert in remove_reference(). 5051 */ 5052 arc_access(hdr, hash_lock); 5053 arc_hdr_alloc_pdata(hdr); 5054 } 5055 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 5056 size = arc_hdr_size(hdr); 5057 5058 /* 5059 * If compression is enabled on the hdr, then will do 5060 * RAW I/O and will store the compressed data in the hdr's 5061 * data block. Otherwise, the hdr's data block will contain 5062 * the uncompressed data. 5063 */ 5064 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 5065 zio_flags |= ZIO_FLAG_RAW; 5066 } 5067 5068 if (*arc_flags & ARC_FLAG_PREFETCH) 5069 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 5070 if (*arc_flags & ARC_FLAG_L2CACHE) 5071 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 5072 if (BP_GET_LEVEL(bp) > 0) 5073 arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); 5074 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) 5075 arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); 5076 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); 5077 5078 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 5079 acb->acb_done = done; 5080 acb->acb_private = private; 5081 5082 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5083 hdr->b_l1hdr.b_acb = acb; 5084 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5085 5086 if (HDR_HAS_L2HDR(hdr) && 5087 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { 5088 devw = hdr->b_l2hdr.b_dev->l2ad_writing; 5089 addr = hdr->b_l2hdr.b_daddr; 5090 /* 5091 * Lock out device removal. 5092 */ 5093 if (vdev_is_dead(vd) || 5094 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 5095 vd = NULL; 5096 } 5097 5098 if (priority == ZIO_PRIORITY_ASYNC_READ) 5099 arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5100 else 5101 arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5102 5103 if (hash_lock != NULL) 5104 mutex_exit(hash_lock); 5105 5106 /* 5107 * At this point, we have a level 1 cache miss. Try again in 5108 * L2ARC if possible. 5109 */ 5110 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); 5111 5112 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 5113 uint64_t, lsize, zbookmark_phys_t *, zb); 5114 ARCSTAT_BUMP(arcstat_misses); 5115 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5116 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 5117 data, metadata, misses); 5118 #ifdef __FreeBSD__ 5119 #ifdef _KERNEL 5120 #ifdef RACCT 5121 if (racct_enable) { 5122 PROC_LOCK(curproc); 5123 racct_add_force(curproc, RACCT_READBPS, size); 5124 racct_add_force(curproc, RACCT_READIOPS, 1); 5125 PROC_UNLOCK(curproc); 5126 } 5127 #endif /* RACCT */ 5128 curthread->td_ru.ru_inblock++; 5129 #endif 5130 #endif 5131 5132 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 5133 /* 5134 * Read from the L2ARC if the following are true: 5135 * 1. The L2ARC vdev was previously cached. 5136 * 2. This buffer still has L2ARC metadata. 5137 * 3. This buffer isn't currently writing to the L2ARC. 5138 * 4. The L2ARC entry wasn't evicted, which may 5139 * also have invalidated the vdev. 5140 * 5. This isn't prefetch and l2arc_noprefetch is set. 5141 */ 5142 if (HDR_HAS_L2HDR(hdr) && 5143 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 5144 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 5145 l2arc_read_callback_t *cb; 5146 void* b_data; 5147 5148 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 5149 ARCSTAT_BUMP(arcstat_l2_hits); 5150 5151 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 5152 KM_SLEEP); 5153 cb->l2rcb_hdr = hdr; 5154 cb->l2rcb_bp = *bp; 5155 cb->l2rcb_zb = *zb; 5156 cb->l2rcb_flags = zio_flags; 5157 uint64_t asize = vdev_psize_to_asize(vd, size); 5158 if (asize != size) { 5159 b_data = zio_data_buf_alloc(asize); 5160 cb->l2rcb_data = b_data; 5161 } else { 5162 b_data = hdr->b_l1hdr.b_pdata; 5163 } 5164 5165 ASSERT(addr >= VDEV_LABEL_START_SIZE && 5166 addr + asize < vd->vdev_psize - 5167 VDEV_LABEL_END_SIZE); 5168 5169 /* 5170 * l2arc read. The SCL_L2ARC lock will be 5171 * released by l2arc_read_done(). 5172 * Issue a null zio if the underlying buffer 5173 * was squashed to zero size by compression. 5174 */ 5175 ASSERT3U(HDR_GET_COMPRESS(hdr), !=, 5176 ZIO_COMPRESS_EMPTY); 5177 rzio = zio_read_phys(pio, vd, addr, 5178 asize, b_data, 5179 ZIO_CHECKSUM_OFF, 5180 l2arc_read_done, cb, priority, 5181 zio_flags | ZIO_FLAG_DONT_CACHE | 5182 ZIO_FLAG_CANFAIL | 5183 ZIO_FLAG_DONT_PROPAGATE | 5184 ZIO_FLAG_DONT_RETRY, B_FALSE); 5185 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 5186 zio_t *, rzio); 5187 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 5188 5189 if (*arc_flags & ARC_FLAG_NOWAIT) { 5190 zio_nowait(rzio); 5191 return (0); 5192 } 5193 5194 ASSERT(*arc_flags & ARC_FLAG_WAIT); 5195 if (zio_wait(rzio) == 0) 5196 return (0); 5197 5198 /* l2arc read error; goto zio_read() */ 5199 } else { 5200 DTRACE_PROBE1(l2arc__miss, 5201 arc_buf_hdr_t *, hdr); 5202 ARCSTAT_BUMP(arcstat_l2_misses); 5203 if (HDR_L2_WRITING(hdr)) 5204 ARCSTAT_BUMP(arcstat_l2_rw_clash); 5205 spa_config_exit(spa, SCL_L2ARC, vd); 5206 } 5207 } else { 5208 if (vd != NULL) 5209 spa_config_exit(spa, SCL_L2ARC, vd); 5210 if (l2arc_ndev != 0) { 5211 DTRACE_PROBE1(l2arc__miss, 5212 arc_buf_hdr_t *, hdr); 5213 ARCSTAT_BUMP(arcstat_l2_misses); 5214 } 5215 } 5216 5217 rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pdata, size, 5218 arc_read_done, hdr, priority, zio_flags, zb); 5219 5220 if (*arc_flags & ARC_FLAG_WAIT) 5221 return (zio_wait(rzio)); 5222 5223 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 5224 zio_nowait(rzio); 5225 } 5226 return (0); 5227 } 5228 5229 /* 5230 * Notify the arc that a block was freed, and thus will never be used again. 5231 */ 5232 void 5233 arc_freed(spa_t *spa, const blkptr_t *bp) 5234 { 5235 arc_buf_hdr_t *hdr; 5236 kmutex_t *hash_lock; 5237 uint64_t guid = spa_load_guid(spa); 5238 5239 ASSERT(!BP_IS_EMBEDDED(bp)); 5240 5241 hdr = buf_hash_find(guid, bp, &hash_lock); 5242 if (hdr == NULL) 5243 return; 5244 5245 /* 5246 * We might be trying to free a block that is still doing I/O 5247 * (i.e. prefetch) or has a reference (i.e. a dedup-ed, 5248 * dmu_sync-ed block). If this block is being prefetched, then it 5249 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr 5250 * until the I/O completes. A block may also have a reference if it is 5251 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would 5252 * have written the new block to its final resting place on disk but 5253 * without the dedup flag set. This would have left the hdr in the MRU 5254 * state and discoverable. When the txg finally syncs it detects that 5255 * the block was overridden in open context and issues an override I/O. 5256 * Since this is a dedup block, the override I/O will determine if the 5257 * block is already in the DDT. If so, then it will replace the io_bp 5258 * with the bp from the DDT and allow the I/O to finish. When the I/O 5259 * reaches the done callback, dbuf_write_override_done, it will 5260 * check to see if the io_bp and io_bp_override are identical. 5261 * If they are not, then it indicates that the bp was replaced with 5262 * the bp in the DDT and the override bp is freed. This allows 5263 * us to arrive here with a reference on a block that is being 5264 * freed. So if we have an I/O in progress, or a reference to 5265 * this hdr, then we don't destroy the hdr. 5266 */ 5267 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && 5268 refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { 5269 arc_change_state(arc_anon, hdr, hash_lock); 5270 arc_hdr_destroy(hdr); 5271 mutex_exit(hash_lock); 5272 } else { 5273 mutex_exit(hash_lock); 5274 } 5275 5276 } 5277 5278 /* 5279 * Release this buffer from the cache, making it an anonymous buffer. This 5280 * must be done after a read and prior to modifying the buffer contents. 5281 * If the buffer has more than one reference, we must make 5282 * a new hdr for the buffer. 5283 */ 5284 void 5285 arc_release(arc_buf_t *buf, void *tag) 5286 { 5287 arc_buf_hdr_t *hdr = buf->b_hdr; 5288 5289 /* 5290 * It would be nice to assert that if it's DMU metadata (level > 5291 * 0 || it's the dnode file), then it must be syncing context. 5292 * But we don't know that information at this level. 5293 */ 5294 5295 mutex_enter(&buf->b_evict_lock); 5296 5297 ASSERT(HDR_HAS_L1HDR(hdr)); 5298 5299 /* 5300 * We don't grab the hash lock prior to this check, because if 5301 * the buffer's header is in the arc_anon state, it won't be 5302 * linked into the hash table. 5303 */ 5304 if (hdr->b_l1hdr.b_state == arc_anon) { 5305 mutex_exit(&buf->b_evict_lock); 5306 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5307 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 5308 ASSERT(!HDR_HAS_L2HDR(hdr)); 5309 ASSERT(HDR_EMPTY(hdr)); 5310 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 5311 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); 5312 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); 5313 5314 hdr->b_l1hdr.b_arc_access = 0; 5315 5316 /* 5317 * If the buf is being overridden then it may already 5318 * have a hdr that is not empty. 5319 */ 5320 buf_discard_identity(hdr); 5321 arc_buf_thaw(buf); 5322 5323 return; 5324 } 5325 5326 kmutex_t *hash_lock = HDR_LOCK(hdr); 5327 mutex_enter(hash_lock); 5328 5329 /* 5330 * This assignment is only valid as long as the hash_lock is 5331 * held, we must be careful not to reference state or the 5332 * b_state field after dropping the lock. 5333 */ 5334 arc_state_t *state = hdr->b_l1hdr.b_state; 5335 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 5336 ASSERT3P(state, !=, arc_anon); 5337 5338 /* this buffer is not on any list */ 5339 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0); 5340 5341 if (HDR_HAS_L2HDR(hdr)) { 5342 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5343 5344 /* 5345 * We have to recheck this conditional again now that 5346 * we're holding the l2ad_mtx to prevent a race with 5347 * another thread which might be concurrently calling 5348 * l2arc_evict(). In that case, l2arc_evict() might have 5349 * destroyed the header's L2 portion as we were waiting 5350 * to acquire the l2ad_mtx. 5351 */ 5352 if (HDR_HAS_L2HDR(hdr)) { 5353 l2arc_trim(hdr); 5354 arc_hdr_l2hdr_destroy(hdr); 5355 } 5356 5357 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5358 } 5359 5360 /* 5361 * Do we have more than one buf? 5362 */ 5363 if (hdr->b_l1hdr.b_bufcnt > 1) { 5364 arc_buf_hdr_t *nhdr; 5365 arc_buf_t **bufp; 5366 uint64_t spa = hdr->b_spa; 5367 uint64_t psize = HDR_GET_PSIZE(hdr); 5368 uint64_t lsize = HDR_GET_LSIZE(hdr); 5369 enum zio_compress compress = HDR_GET_COMPRESS(hdr); 5370 arc_buf_contents_t type = arc_buf_type(hdr); 5371 VERIFY3U(hdr->b_type, ==, type); 5372 5373 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); 5374 (void) remove_reference(hdr, hash_lock, tag); 5375 5376 if (arc_buf_is_shared(buf)) { 5377 ASSERT(HDR_SHARED_DATA(hdr)); 5378 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 5379 ASSERT(ARC_BUF_LAST(buf)); 5380 } 5381 5382 /* 5383 * Pull the data off of this hdr and attach it to 5384 * a new anonymous hdr. Also find the last buffer 5385 * in the hdr's buffer list. 5386 */ 5387 arc_buf_t *lastbuf = NULL; 5388 bufp = &hdr->b_l1hdr.b_buf; 5389 while (*bufp != NULL) { 5390 if (*bufp == buf) { 5391 *bufp = buf->b_next; 5392 } 5393 5394 /* 5395 * If we've removed a buffer in the middle of 5396 * the list then update the lastbuf and update 5397 * bufp. 5398 */ 5399 if (*bufp != NULL) { 5400 lastbuf = *bufp; 5401 bufp = &(*bufp)->b_next; 5402 } 5403 } 5404 buf->b_next = NULL; 5405 ASSERT3P(lastbuf, !=, buf); 5406 ASSERT3P(lastbuf, !=, NULL); 5407 5408 /* 5409 * If the current arc_buf_t and the hdr are sharing their data 5410 * buffer, then we must stop sharing that block, transfer 5411 * ownership and setup sharing with a new arc_buf_t at the end 5412 * of the hdr's b_buf list. 5413 */ 5414 if (arc_buf_is_shared(buf)) { 5415 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 5416 ASSERT(ARC_BUF_LAST(lastbuf)); 5417 VERIFY(!arc_buf_is_shared(lastbuf)); 5418 5419 /* 5420 * First, sever the block sharing relationship between 5421 * buf and the arc_buf_hdr_t. Then, setup a new 5422 * block sharing relationship with the last buffer 5423 * on the arc_buf_t list. 5424 */ 5425 arc_unshare_buf(hdr, buf); 5426 arc_share_buf(hdr, lastbuf); 5427 VERIFY3P(lastbuf->b_data, !=, NULL); 5428 } else if (HDR_SHARED_DATA(hdr)) { 5429 ASSERT(arc_buf_is_shared(lastbuf)); 5430 } 5431 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 5432 ASSERT3P(state, !=, arc_l2c_only); 5433 5434 (void) refcount_remove_many(&state->arcs_size, 5435 HDR_GET_LSIZE(hdr), buf); 5436 5437 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { 5438 ASSERT3P(state, !=, arc_l2c_only); 5439 (void) refcount_remove_many(&state->arcs_esize[type], 5440 HDR_GET_LSIZE(hdr), buf); 5441 } 5442 5443 hdr->b_l1hdr.b_bufcnt -= 1; 5444 arc_cksum_verify(buf); 5445 #ifdef illumos 5446 arc_buf_unwatch(buf); 5447 #endif 5448 5449 mutex_exit(hash_lock); 5450 5451 /* 5452 * Allocate a new hdr. The new hdr will contain a b_pdata 5453 * buffer which will be freed in arc_write(). 5454 */ 5455 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type); 5456 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); 5457 ASSERT0(nhdr->b_l1hdr.b_bufcnt); 5458 ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt)); 5459 VERIFY3U(nhdr->b_type, ==, type); 5460 ASSERT(!HDR_SHARED_DATA(nhdr)); 5461 5462 nhdr->b_l1hdr.b_buf = buf; 5463 nhdr->b_l1hdr.b_bufcnt = 1; 5464 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); 5465 buf->b_hdr = nhdr; 5466 5467 mutex_exit(&buf->b_evict_lock); 5468 (void) refcount_add_many(&arc_anon->arcs_size, 5469 HDR_GET_LSIZE(nhdr), buf); 5470 } else { 5471 mutex_exit(&buf->b_evict_lock); 5472 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); 5473 /* protected by hash lock, or hdr is on arc_anon */ 5474 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 5475 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5476 arc_change_state(arc_anon, hdr, hash_lock); 5477 hdr->b_l1hdr.b_arc_access = 0; 5478 mutex_exit(hash_lock); 5479 5480 buf_discard_identity(hdr); 5481 arc_buf_thaw(buf); 5482 } 5483 } 5484 5485 int 5486 arc_released(arc_buf_t *buf) 5487 { 5488 int released; 5489 5490 mutex_enter(&buf->b_evict_lock); 5491 released = (buf->b_data != NULL && 5492 buf->b_hdr->b_l1hdr.b_state == arc_anon); 5493 mutex_exit(&buf->b_evict_lock); 5494 return (released); 5495 } 5496 5497 #ifdef ZFS_DEBUG 5498 int 5499 arc_referenced(arc_buf_t *buf) 5500 { 5501 int referenced; 5502 5503 mutex_enter(&buf->b_evict_lock); 5504 referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); 5505 mutex_exit(&buf->b_evict_lock); 5506 return (referenced); 5507 } 5508 #endif 5509 5510 static void 5511 arc_write_ready(zio_t *zio) 5512 { 5513 arc_write_callback_t *callback = zio->io_private; 5514 arc_buf_t *buf = callback->awcb_buf; 5515 arc_buf_hdr_t *hdr = buf->b_hdr; 5516 uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp); 5517 5518 ASSERT(HDR_HAS_L1HDR(hdr)); 5519 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); 5520 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 5521 5522 /* 5523 * If we're reexecuting this zio because the pool suspended, then 5524 * cleanup any state that was previously set the first time the 5525 * callback as invoked. 5526 */ 5527 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { 5528 arc_cksum_free(hdr); 5529 #ifdef illumos 5530 arc_buf_unwatch(buf); 5531 #endif 5532 if (hdr->b_l1hdr.b_pdata != NULL) { 5533 if (arc_buf_is_shared(buf)) { 5534 ASSERT(HDR_SHARED_DATA(hdr)); 5535 5536 arc_unshare_buf(hdr, buf); 5537 } else { 5538 arc_hdr_free_pdata(hdr); 5539 } 5540 } 5541 } 5542 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 5543 ASSERT(!HDR_SHARED_DATA(hdr)); 5544 ASSERT(!arc_buf_is_shared(buf)); 5545 5546 callback->awcb_ready(zio, buf, callback->awcb_private); 5547 5548 if (HDR_IO_IN_PROGRESS(hdr)) 5549 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); 5550 5551 arc_cksum_compute(buf); 5552 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5553 5554 enum zio_compress compress; 5555 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 5556 compress = ZIO_COMPRESS_OFF; 5557 } else { 5558 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp)); 5559 compress = BP_GET_COMPRESS(zio->io_bp); 5560 } 5561 HDR_SET_PSIZE(hdr, psize); 5562 arc_hdr_set_compress(hdr, compress); 5563 5564 /* 5565 * If the hdr is compressed, then copy the compressed 5566 * zio contents into arc_buf_hdr_t. Otherwise, copy the original 5567 * data buf into the hdr. Ideally, we would like to always copy the 5568 * io_data into b_pdata but the user may have disabled compressed 5569 * arc thus the on-disk block may or may not match what we maintain 5570 * in the hdr's b_pdata field. 5571 */ 5572 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 5573 ASSERT(BP_GET_COMPRESS(zio->io_bp) != ZIO_COMPRESS_OFF); 5574 ASSERT3U(psize, >, 0); 5575 arc_hdr_alloc_pdata(hdr); 5576 bcopy(zio->io_data, hdr->b_l1hdr.b_pdata, psize); 5577 } else { 5578 ASSERT3P(buf->b_data, ==, zio->io_orig_data); 5579 ASSERT3U(zio->io_orig_size, ==, HDR_GET_LSIZE(hdr)); 5580 ASSERT3U(hdr->b_l1hdr.b_byteswap, ==, DMU_BSWAP_NUMFUNCS); 5581 ASSERT(!HDR_SHARED_DATA(hdr)); 5582 ASSERT(!arc_buf_is_shared(buf)); 5583 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 5584 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 5585 5586 /* 5587 * This hdr is not compressed so we're able to share 5588 * the arc_buf_t data buffer with the hdr. 5589 */ 5590 arc_share_buf(hdr, buf); 5591 VERIFY0(bcmp(zio->io_orig_data, hdr->b_l1hdr.b_pdata, 5592 HDR_GET_LSIZE(hdr))); 5593 } 5594 arc_hdr_verify(hdr, zio->io_bp); 5595 } 5596 5597 static void 5598 arc_write_children_ready(zio_t *zio) 5599 { 5600 arc_write_callback_t *callback = zio->io_private; 5601 arc_buf_t *buf = callback->awcb_buf; 5602 5603 callback->awcb_children_ready(zio, buf, callback->awcb_private); 5604 } 5605 5606 /* 5607 * The SPA calls this callback for each physical write that happens on behalf 5608 * of a logical write. See the comment in dbuf_write_physdone() for details. 5609 */ 5610 static void 5611 arc_write_physdone(zio_t *zio) 5612 { 5613 arc_write_callback_t *cb = zio->io_private; 5614 if (cb->awcb_physdone != NULL) 5615 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 5616 } 5617 5618 static void 5619 arc_write_done(zio_t *zio) 5620 { 5621 arc_write_callback_t *callback = zio->io_private; 5622 arc_buf_t *buf = callback->awcb_buf; 5623 arc_buf_hdr_t *hdr = buf->b_hdr; 5624 5625 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5626 5627 if (zio->io_error == 0) { 5628 arc_hdr_verify(hdr, zio->io_bp); 5629 5630 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 5631 buf_discard_identity(hdr); 5632 } else { 5633 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 5634 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 5635 } 5636 } else { 5637 ASSERT(HDR_EMPTY(hdr)); 5638 } 5639 5640 /* 5641 * If the block to be written was all-zero or compressed enough to be 5642 * embedded in the BP, no write was performed so there will be no 5643 * dva/birth/checksum. The buffer must therefore remain anonymous 5644 * (and uncached). 5645 */ 5646 if (!HDR_EMPTY(hdr)) { 5647 arc_buf_hdr_t *exists; 5648 kmutex_t *hash_lock; 5649 5650 ASSERT(zio->io_error == 0); 5651 5652 arc_cksum_verify(buf); 5653 5654 exists = buf_hash_insert(hdr, &hash_lock); 5655 if (exists != NULL) { 5656 /* 5657 * This can only happen if we overwrite for 5658 * sync-to-convergence, because we remove 5659 * buffers from the hash table when we arc_free(). 5660 */ 5661 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 5662 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 5663 panic("bad overwrite, hdr=%p exists=%p", 5664 (void *)hdr, (void *)exists); 5665 ASSERT(refcount_is_zero( 5666 &exists->b_l1hdr.b_refcnt)); 5667 arc_change_state(arc_anon, exists, hash_lock); 5668 mutex_exit(hash_lock); 5669 arc_hdr_destroy(exists); 5670 exists = buf_hash_insert(hdr, &hash_lock); 5671 ASSERT3P(exists, ==, NULL); 5672 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 5673 /* nopwrite */ 5674 ASSERT(zio->io_prop.zp_nopwrite); 5675 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 5676 panic("bad nopwrite, hdr=%p exists=%p", 5677 (void *)hdr, (void *)exists); 5678 } else { 5679 /* Dedup */ 5680 ASSERT(hdr->b_l1hdr.b_bufcnt == 1); 5681 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 5682 ASSERT(BP_GET_DEDUP(zio->io_bp)); 5683 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 5684 } 5685 } 5686 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5687 /* if it's not anon, we are doing a scrub */ 5688 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) 5689 arc_access(hdr, hash_lock); 5690 mutex_exit(hash_lock); 5691 } else { 5692 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5693 } 5694 5695 ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5696 callback->awcb_done(zio, buf, callback->awcb_private); 5697 5698 kmem_free(callback, sizeof (arc_write_callback_t)); 5699 } 5700 5701 zio_t * 5702 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 5703 boolean_t l2arc, const zio_prop_t *zp, arc_done_func_t *ready, 5704 arc_done_func_t *children_ready, arc_done_func_t *physdone, 5705 arc_done_func_t *done, void *private, zio_priority_t priority, 5706 int zio_flags, const zbookmark_phys_t *zb) 5707 { 5708 arc_buf_hdr_t *hdr = buf->b_hdr; 5709 arc_write_callback_t *callback; 5710 zio_t *zio; 5711 5712 ASSERT3P(ready, !=, NULL); 5713 ASSERT3P(done, !=, NULL); 5714 ASSERT(!HDR_IO_ERROR(hdr)); 5715 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5716 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5717 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 5718 if (l2arc) 5719 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 5720 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 5721 callback->awcb_ready = ready; 5722 callback->awcb_children_ready = children_ready; 5723 callback->awcb_physdone = physdone; 5724 callback->awcb_done = done; 5725 callback->awcb_private = private; 5726 callback->awcb_buf = buf; 5727 5728 /* 5729 * The hdr's b_pdata is now stale, free it now. A new data block 5730 * will be allocated when the zio pipeline calls arc_write_ready(). 5731 */ 5732 if (hdr->b_l1hdr.b_pdata != NULL) { 5733 /* 5734 * If the buf is currently sharing the data block with 5735 * the hdr then we need to break that relationship here. 5736 * The hdr will remain with a NULL data pointer and the 5737 * buf will take sole ownership of the block. 5738 */ 5739 if (arc_buf_is_shared(buf)) { 5740 ASSERT(ARC_BUF_LAST(buf)); 5741 arc_unshare_buf(hdr, buf); 5742 } else { 5743 arc_hdr_free_pdata(hdr); 5744 } 5745 VERIFY3P(buf->b_data, !=, NULL); 5746 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); 5747 } 5748 ASSERT(!arc_buf_is_shared(buf)); 5749 ASSERT3P(hdr->b_l1hdr.b_pdata, ==, NULL); 5750 5751 zio = zio_write(pio, spa, txg, bp, buf->b_data, HDR_GET_LSIZE(hdr), zp, 5752 arc_write_ready, 5753 (children_ready != NULL) ? arc_write_children_ready : NULL, 5754 arc_write_physdone, arc_write_done, callback, 5755 priority, zio_flags, zb); 5756 5757 return (zio); 5758 } 5759 5760 static int 5761 arc_memory_throttle(uint64_t reserve, uint64_t txg) 5762 { 5763 #ifdef _KERNEL 5764 uint64_t available_memory = ptob(freemem); 5765 static uint64_t page_load = 0; 5766 static uint64_t last_txg = 0; 5767 5768 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC) 5769 available_memory = 5770 MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE))); 5771 #endif 5772 5773 if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100) 5774 return (0); 5775 5776 if (txg > last_txg) { 5777 last_txg = txg; 5778 page_load = 0; 5779 } 5780 /* 5781 * If we are in pageout, we know that memory is already tight, 5782 * the arc is already going to be evicting, so we just want to 5783 * continue to let page writes occur as quickly as possible. 5784 */ 5785 if (curlwp == uvm.pagedaemon_lwp) { 5786 if (page_load > MAX(ptob(minfree), available_memory) / 4) 5787 return (SET_ERROR(ERESTART)); 5788 /* Note: reserve is inflated, so we deflate */ 5789 page_load += reserve / 8; 5790 return (0); 5791 } else if (page_load > 0 && arc_reclaim_needed()) { 5792 /* memory is low, delay before restarting */ 5793 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 5794 return (SET_ERROR(EAGAIN)); 5795 } 5796 page_load = 0; 5797 #endif 5798 return (0); 5799 } 5800 5801 void 5802 arc_tempreserve_clear(uint64_t reserve) 5803 { 5804 atomic_add_64(&arc_tempreserve, -reserve); 5805 ASSERT((int64_t)arc_tempreserve >= 0); 5806 } 5807 5808 int 5809 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 5810 { 5811 int error; 5812 uint64_t anon_size; 5813 5814 if (reserve > arc_c/4 && !arc_no_grow) { 5815 arc_c = MIN(arc_c_max, reserve * 4); 5816 DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c); 5817 } 5818 if (reserve > arc_c) 5819 return (SET_ERROR(ENOMEM)); 5820 5821 /* 5822 * Don't count loaned bufs as in flight dirty data to prevent long 5823 * network delays from blocking transactions that are ready to be 5824 * assigned to a txg. 5825 */ 5826 anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - 5827 arc_loaned_bytes), 0); 5828 5829 /* 5830 * Writes will, almost always, require additional memory allocations 5831 * in order to compress/encrypt/etc the data. We therefore need to 5832 * make sure that there is sufficient available memory for this. 5833 */ 5834 error = arc_memory_throttle(reserve, txg); 5835 if (error != 0) 5836 return (error); 5837 5838 /* 5839 * Throttle writes when the amount of dirty data in the cache 5840 * gets too large. We try to keep the cache less than half full 5841 * of dirty blocks so that our sync times don't grow too large. 5842 * Note: if two requests come in concurrently, we might let them 5843 * both succeed, when one of them should fail. Not a huge deal. 5844 */ 5845 5846 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 5847 anon_size > arc_c / 4) { 5848 uint64_t meta_esize = 5849 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 5850 uint64_t data_esize = 5851 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 5852 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 5853 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 5854 arc_tempreserve >> 10, meta_esize >> 10, 5855 data_esize >> 10, reserve >> 10, arc_c >> 10); 5856 return (SET_ERROR(ERESTART)); 5857 } 5858 atomic_add_64(&arc_tempreserve, reserve); 5859 return (0); 5860 } 5861 5862 static void 5863 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, 5864 kstat_named_t *evict_data, kstat_named_t *evict_metadata) 5865 { 5866 size->value.ui64 = refcount_count(&state->arcs_size); 5867 evict_data->value.ui64 = 5868 refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); 5869 evict_metadata->value.ui64 = 5870 refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); 5871 } 5872 5873 static int 5874 arc_kstat_update(kstat_t *ksp, int rw) 5875 { 5876 arc_stats_t *as = ksp->ks_data; 5877 5878 if (rw == KSTAT_WRITE) { 5879 return (EACCES); 5880 } else { 5881 arc_kstat_update_state(arc_anon, 5882 &as->arcstat_anon_size, 5883 &as->arcstat_anon_evictable_data, 5884 &as->arcstat_anon_evictable_metadata); 5885 arc_kstat_update_state(arc_mru, 5886 &as->arcstat_mru_size, 5887 &as->arcstat_mru_evictable_data, 5888 &as->arcstat_mru_evictable_metadata); 5889 arc_kstat_update_state(arc_mru_ghost, 5890 &as->arcstat_mru_ghost_size, 5891 &as->arcstat_mru_ghost_evictable_data, 5892 &as->arcstat_mru_ghost_evictable_metadata); 5893 arc_kstat_update_state(arc_mfu, 5894 &as->arcstat_mfu_size, 5895 &as->arcstat_mfu_evictable_data, 5896 &as->arcstat_mfu_evictable_metadata); 5897 arc_kstat_update_state(arc_mfu_ghost, 5898 &as->arcstat_mfu_ghost_size, 5899 &as->arcstat_mfu_ghost_evictable_data, 5900 &as->arcstat_mfu_ghost_evictable_metadata); 5901 } 5902 5903 return (0); 5904 } 5905 5906 /* 5907 * This function *must* return indices evenly distributed between all 5908 * sublists of the multilist. This is needed due to how the ARC eviction 5909 * code is laid out; arc_evict_state() assumes ARC buffers are evenly 5910 * distributed between all sublists and uses this assumption when 5911 * deciding which sublist to evict from and how much to evict from it. 5912 */ 5913 unsigned int 5914 arc_state_multilist_index_func(multilist_t *ml, void *obj) 5915 { 5916 arc_buf_hdr_t *hdr = obj; 5917 5918 /* 5919 * We rely on b_dva to generate evenly distributed index 5920 * numbers using buf_hash below. So, as an added precaution, 5921 * let's make sure we never add empty buffers to the arc lists. 5922 */ 5923 ASSERT(!HDR_EMPTY(hdr)); 5924 5925 /* 5926 * The assumption here, is the hash value for a given 5927 * arc_buf_hdr_t will remain constant throughout it's lifetime 5928 * (i.e. it's b_spa, b_dva, and b_birth fields don't change). 5929 * Thus, we don't need to store the header's sublist index 5930 * on insertion, as this index can be recalculated on removal. 5931 * 5932 * Also, the low order bits of the hash value are thought to be 5933 * distributed evenly. Otherwise, in the case that the multilist 5934 * has a power of two number of sublists, each sublists' usage 5935 * would not be evenly distributed. 5936 */ 5937 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % 5938 multilist_get_num_sublists(ml)); 5939 } 5940 5941 #ifdef _KERNEL 5942 #ifdef __FreeBSD__ 5943 static eventhandler_tag arc_event_lowmem = NULL; 5944 #endif 5945 5946 static void 5947 arc_lowmem(void *arg __unused, int howto __unused) 5948 { 5949 5950 mutex_enter(&arc_reclaim_lock); 5951 /* XXX: Memory deficit should be passed as argument. */ 5952 needfree = btoc(arc_c >> arc_shrink_shift); 5953 DTRACE_PROBE(arc__needfree); 5954 cv_signal(&arc_reclaim_thread_cv); 5955 5956 /* 5957 * It is unsafe to block here in arbitrary threads, because we can come 5958 * here from ARC itself and may hold ARC locks and thus risk a deadlock 5959 * with ARC reclaim thread. 5960 */ 5961 if (curlwp == uvm.pagedaemon_lwp) 5962 (void) cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); 5963 mutex_exit(&arc_reclaim_lock); 5964 } 5965 #endif 5966 5967 static void 5968 arc_state_init(void) 5969 { 5970 arc_anon = &ARC_anon; 5971 arc_mru = &ARC_mru; 5972 arc_mru_ghost = &ARC_mru_ghost; 5973 arc_mfu = &ARC_mfu; 5974 arc_mfu_ghost = &ARC_mfu_ghost; 5975 arc_l2c_only = &ARC_l2c_only; 5976 5977 multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 5978 sizeof (arc_buf_hdr_t), 5979 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5980 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5981 multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 5982 sizeof (arc_buf_hdr_t), 5983 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5984 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5985 multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 5986 sizeof (arc_buf_hdr_t), 5987 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5988 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5989 multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 5990 sizeof (arc_buf_hdr_t), 5991 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5992 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5993 multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 5994 sizeof (arc_buf_hdr_t), 5995 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5996 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5997 multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 5998 sizeof (arc_buf_hdr_t), 5999 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6000 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 6001 multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 6002 sizeof (arc_buf_hdr_t), 6003 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6004 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 6005 multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 6006 sizeof (arc_buf_hdr_t), 6007 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6008 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 6009 multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 6010 sizeof (arc_buf_hdr_t), 6011 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6012 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 6013 multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 6014 sizeof (arc_buf_hdr_t), 6015 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6016 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 6017 6018 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 6019 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 6020 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 6021 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 6022 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 6023 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 6024 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 6025 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 6026 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 6027 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 6028 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 6029 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 6030 6031 refcount_create(&arc_anon->arcs_size); 6032 refcount_create(&arc_mru->arcs_size); 6033 refcount_create(&arc_mru_ghost->arcs_size); 6034 refcount_create(&arc_mfu->arcs_size); 6035 refcount_create(&arc_mfu_ghost->arcs_size); 6036 refcount_create(&arc_l2c_only->arcs_size); 6037 } 6038 6039 static void 6040 arc_state_fini(void) 6041 { 6042 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 6043 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 6044 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 6045 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 6046 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 6047 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 6048 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 6049 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 6050 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 6051 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 6052 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 6053 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 6054 6055 refcount_destroy(&arc_anon->arcs_size); 6056 refcount_destroy(&arc_mru->arcs_size); 6057 refcount_destroy(&arc_mru_ghost->arcs_size); 6058 refcount_destroy(&arc_mfu->arcs_size); 6059 refcount_destroy(&arc_mfu_ghost->arcs_size); 6060 refcount_destroy(&arc_l2c_only->arcs_size); 6061 6062 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 6063 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 6064 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 6065 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 6066 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 6067 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 6068 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 6069 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 6070 } 6071 6072 uint64_t 6073 arc_max_bytes(void) 6074 { 6075 return (arc_c_max); 6076 } 6077 6078 void 6079 arc_init(void) 6080 { 6081 int i, prefetch_tunable_set = 0; 6082 6083 mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 6084 cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL); 6085 cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL); 6086 6087 #ifdef __FreeBSD__ 6088 mutex_init(&arc_dnlc_evicts_lock, NULL, MUTEX_DEFAULT, NULL); 6089 cv_init(&arc_dnlc_evicts_cv, NULL, CV_DEFAULT, NULL); 6090 #endif 6091 6092 /* Convert seconds to clock ticks */ 6093 arc_min_prefetch_lifespan = 1 * hz; 6094 6095 /* Start out with 1/8 of all memory */ 6096 arc_c = kmem_size() / 8; 6097 6098 #ifdef illumos 6099 #ifdef _KERNEL 6100 /* 6101 * On architectures where the physical memory can be larger 6102 * than the addressable space (intel in 32-bit mode), we may 6103 * need to limit the cache to 1/8 of VM size. 6104 */ 6105 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 6106 #endif 6107 #endif /* illumos */ 6108 /* set min cache to 1/32 of all memory, or arc_abs_min, whichever is more */ 6109 arc_c_min = MAX(arc_c / 4, arc_abs_min); 6110 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 6111 if (arc_c * 8 >= 1 << 30) 6112 arc_c_max = (arc_c * 8) - (1 << 30); 6113 else 6114 arc_c_max = arc_c_min; 6115 arc_c_max = MAX(arc_c * 5, arc_c_max); 6116 6117 /* 6118 * In userland, there's only the memory pressure that we artificially 6119 * create (see arc_available_memory()). Don't let arc_c get too 6120 * small, because it can cause transactions to be larger than 6121 * arc_c, causing arc_tempreserve_space() to fail. 6122 */ 6123 #ifndef _KERNEL 6124 arc_c_min = arc_c_max / 2; 6125 #endif 6126 6127 #ifdef _KERNEL 6128 /* 6129 * Allow the tunables to override our calculations if they are 6130 * reasonable. 6131 */ 6132 if (zfs_arc_max > arc_abs_min && zfs_arc_max < kmem_size()) { 6133 arc_c_max = zfs_arc_max; 6134 arc_c_min = MIN(arc_c_min, arc_c_max); 6135 } 6136 if (zfs_arc_min > arc_abs_min && zfs_arc_min <= arc_c_max) 6137 arc_c_min = zfs_arc_min; 6138 #endif 6139 6140 arc_c = arc_c_max; 6141 arc_p = (arc_c >> 1); 6142 arc_size = 0; 6143 6144 /* limit meta-data to 1/4 of the arc capacity */ 6145 arc_meta_limit = arc_c_max / 4; 6146 6147 /* Allow the tunable to override if it is reasonable */ 6148 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 6149 arc_meta_limit = zfs_arc_meta_limit; 6150 6151 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 6152 arc_c_min = arc_meta_limit / 2; 6153 6154 if (zfs_arc_meta_min > 0) { 6155 arc_meta_min = zfs_arc_meta_min; 6156 } else { 6157 arc_meta_min = arc_c_min / 2; 6158 } 6159 6160 if (zfs_arc_grow_retry > 0) 6161 arc_grow_retry = zfs_arc_grow_retry; 6162 6163 if (zfs_arc_shrink_shift > 0) 6164 arc_shrink_shift = zfs_arc_shrink_shift; 6165 6166 /* 6167 * Ensure that arc_no_grow_shift is less than arc_shrink_shift. 6168 */ 6169 if (arc_no_grow_shift >= arc_shrink_shift) 6170 arc_no_grow_shift = arc_shrink_shift - 1; 6171 6172 if (zfs_arc_p_min_shift > 0) 6173 arc_p_min_shift = zfs_arc_p_min_shift; 6174 6175 if (zfs_arc_num_sublists_per_state < 1) 6176 zfs_arc_num_sublists_per_state = MAX(max_ncpus, 1); 6177 6178 /* if kmem_flags are set, lets try to use less memory */ 6179 if (kmem_debugging()) 6180 arc_c = arc_c / 2; 6181 if (arc_c < arc_c_min) 6182 arc_c = arc_c_min; 6183 6184 zfs_arc_min = arc_c_min; 6185 zfs_arc_max = arc_c_max; 6186 6187 arc_state_init(); 6188 buf_init(); 6189 6190 arc_reclaim_thread_exit = B_FALSE; 6191 #ifdef __FreeBSD__ 6192 arc_dnlc_evicts_thread_exit = FALSE; 6193 #endif 6194 6195 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 6196 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 6197 6198 if (arc_ksp != NULL) { 6199 arc_ksp->ks_data = &arc_stats; 6200 arc_ksp->ks_update = arc_kstat_update; 6201 kstat_install(arc_ksp); 6202 } 6203 6204 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 6205 TS_RUN, minclsyspri); 6206 6207 #ifdef __FreeBSD__ 6208 #ifdef _KERNEL 6209 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 6210 EVENTHANDLER_PRI_FIRST); 6211 #endif 6212 6213 (void) thread_create(NULL, 0, arc_dnlc_evicts_thread, NULL, 0, &p0, 6214 TS_RUN, minclsyspri); 6215 #endif 6216 6217 arc_dead = B_FALSE; 6218 arc_warm = B_FALSE; 6219 6220 /* 6221 * Calculate maximum amount of dirty data per pool. 6222 * 6223 * If it has been set by /etc/system, take that. 6224 * Otherwise, use a percentage of physical memory defined by 6225 * zfs_dirty_data_max_percent (default 10%) with a cap at 6226 * zfs_dirty_data_max_max (default 4GB). 6227 */ 6228 if (zfs_dirty_data_max == 0) { 6229 zfs_dirty_data_max = ptob(physmem) * 6230 zfs_dirty_data_max_percent / 100; 6231 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 6232 zfs_dirty_data_max_max); 6233 } 6234 6235 #ifdef _KERNEL 6236 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 6237 prefetch_tunable_set = 1; 6238 6239 #ifdef __i386__ 6240 if (prefetch_tunable_set == 0) { 6241 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 6242 "-- to enable,\n"); 6243 printf(" add \"vfs.zfs.prefetch_disable=0\" " 6244 "to /boot/loader.conf.\n"); 6245 zfs_prefetch_disable = 1; 6246 } 6247 #else 6248 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 6249 prefetch_tunable_set == 0) { 6250 printf("ZFS NOTICE: Prefetch is disabled by default if less " 6251 "than 4GB of RAM is present;\n" 6252 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 6253 "to /boot/loader.conf.\n"); 6254 zfs_prefetch_disable = 1; 6255 } 6256 #endif 6257 /* Warn about ZFS memory and address space requirements. */ 6258 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 6259 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 6260 "expect unstable behavior.\n"); 6261 } 6262 if (kmem_size() < 512 * (1 << 20)) { 6263 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 6264 "expect unstable behavior.\n"); 6265 printf(" Consider tuning vm.kmem_size and " 6266 "vm.kmem_size_max\n"); 6267 printf(" in /boot/loader.conf.\n"); 6268 } 6269 #endif 6270 } 6271 6272 void 6273 arc_fini(void) 6274 { 6275 mutex_enter(&arc_reclaim_lock); 6276 arc_reclaim_thread_exit = B_TRUE; 6277 /* 6278 * The reclaim thread will set arc_reclaim_thread_exit back to 6279 * B_FALSE when it is finished exiting; we're waiting for that. 6280 */ 6281 while (arc_reclaim_thread_exit) { 6282 cv_signal(&arc_reclaim_thread_cv); 6283 cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock); 6284 } 6285 mutex_exit(&arc_reclaim_lock); 6286 6287 /* Use B_TRUE to ensure *all* buffers are evicted */ 6288 arc_flush(NULL, B_TRUE); 6289 6290 #ifdef __FreeBSD__ 6291 mutex_enter(&arc_dnlc_evicts_lock); 6292 arc_dnlc_evicts_thread_exit = TRUE; 6293 6294 /* 6295 * The user evicts thread will set arc_user_evicts_thread_exit 6296 * to FALSE when it is finished exiting; we're waiting for that. 6297 */ 6298 while (arc_dnlc_evicts_thread_exit) { 6299 cv_signal(&arc_dnlc_evicts_cv); 6300 cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock); 6301 } 6302 mutex_exit(&arc_dnlc_evicts_lock); 6303 6304 mutex_destroy(&arc_dnlc_evicts_lock); 6305 cv_destroy(&arc_dnlc_evicts_cv); 6306 #endif 6307 6308 arc_dead = B_TRUE; 6309 6310 if (arc_ksp != NULL) { 6311 kstat_delete(arc_ksp); 6312 arc_ksp = NULL; 6313 } 6314 6315 mutex_destroy(&arc_reclaim_lock); 6316 cv_destroy(&arc_reclaim_thread_cv); 6317 cv_destroy(&arc_reclaim_waiters_cv); 6318 6319 arc_state_fini(); 6320 buf_fini(); 6321 6322 ASSERT0(arc_loaned_bytes); 6323 6324 #ifdef __FreeBSD__ 6325 #ifdef _KERNEL 6326 if (arc_event_lowmem != NULL) 6327 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 6328 #endif 6329 #endif 6330 } 6331 6332 /* 6333 * Level 2 ARC 6334 * 6335 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 6336 * It uses dedicated storage devices to hold cached data, which are populated 6337 * using large infrequent writes. The main role of this cache is to boost 6338 * the performance of random read workloads. The intended L2ARC devices 6339 * include short-stroked disks, solid state disks, and other media with 6340 * substantially faster read latency than disk. 6341 * 6342 * +-----------------------+ 6343 * | ARC | 6344 * +-----------------------+ 6345 * | ^ ^ 6346 * | | | 6347 * l2arc_feed_thread() arc_read() 6348 * | | | 6349 * | l2arc read | 6350 * V | | 6351 * +---------------+ | 6352 * | L2ARC | | 6353 * +---------------+ | 6354 * | ^ | 6355 * l2arc_write() | | 6356 * | | | 6357 * V | | 6358 * +-------+ +-------+ 6359 * | vdev | | vdev | 6360 * | cache | | cache | 6361 * +-------+ +-------+ 6362 * +=========+ .-----. 6363 * : L2ARC : |-_____-| 6364 * : devices : | Disks | 6365 * +=========+ `-_____-' 6366 * 6367 * Read requests are satisfied from the following sources, in order: 6368 * 6369 * 1) ARC 6370 * 2) vdev cache of L2ARC devices 6371 * 3) L2ARC devices 6372 * 4) vdev cache of disks 6373 * 5) disks 6374 * 6375 * Some L2ARC device types exhibit extremely slow write performance. 6376 * To accommodate for this there are some significant differences between 6377 * the L2ARC and traditional cache design: 6378 * 6379 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 6380 * the ARC behave as usual, freeing buffers and placing headers on ghost 6381 * lists. The ARC does not send buffers to the L2ARC during eviction as 6382 * this would add inflated write latencies for all ARC memory pressure. 6383 * 6384 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 6385 * It does this by periodically scanning buffers from the eviction-end of 6386 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 6387 * not already there. It scans until a headroom of buffers is satisfied, 6388 * which itself is a buffer for ARC eviction. If a compressible buffer is 6389 * found during scanning and selected for writing to an L2ARC device, we 6390 * temporarily boost scanning headroom during the next scan cycle to make 6391 * sure we adapt to compression effects (which might significantly reduce 6392 * the data volume we write to L2ARC). The thread that does this is 6393 * l2arc_feed_thread(), illustrated below; example sizes are included to 6394 * provide a better sense of ratio than this diagram: 6395 * 6396 * head --> tail 6397 * +---------------------+----------+ 6398 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 6399 * +---------------------+----------+ | o L2ARC eligible 6400 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 6401 * +---------------------+----------+ | 6402 * 15.9 Gbytes ^ 32 Mbytes | 6403 * headroom | 6404 * l2arc_feed_thread() 6405 * | 6406 * l2arc write hand <--[oooo]--' 6407 * | 8 Mbyte 6408 * | write max 6409 * V 6410 * +==============================+ 6411 * L2ARC dev |####|#|###|###| |####| ... | 6412 * +==============================+ 6413 * 32 Gbytes 6414 * 6415 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 6416 * evicted, then the L2ARC has cached a buffer much sooner than it probably 6417 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 6418 * safe to say that this is an uncommon case, since buffers at the end of 6419 * the ARC lists have moved there due to inactivity. 6420 * 6421 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 6422 * then the L2ARC simply misses copying some buffers. This serves as a 6423 * pressure valve to prevent heavy read workloads from both stalling the ARC 6424 * with waits and clogging the L2ARC with writes. This also helps prevent 6425 * the potential for the L2ARC to churn if it attempts to cache content too 6426 * quickly, such as during backups of the entire pool. 6427 * 6428 * 5. After system boot and before the ARC has filled main memory, there are 6429 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 6430 * lists can remain mostly static. Instead of searching from tail of these 6431 * lists as pictured, the l2arc_feed_thread() will search from the list heads 6432 * for eligible buffers, greatly increasing its chance of finding them. 6433 * 6434 * The L2ARC device write speed is also boosted during this time so that 6435 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 6436 * there are no L2ARC reads, and no fear of degrading read performance 6437 * through increased writes. 6438 * 6439 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 6440 * the vdev queue can aggregate them into larger and fewer writes. Each 6441 * device is written to in a rotor fashion, sweeping writes through 6442 * available space then repeating. 6443 * 6444 * 7. The L2ARC does not store dirty content. It never needs to flush 6445 * write buffers back to disk based storage. 6446 * 6447 * 8. If an ARC buffer is written (and dirtied) which also exists in the 6448 * L2ARC, the now stale L2ARC buffer is immediately dropped. 6449 * 6450 * The performance of the L2ARC can be tweaked by a number of tunables, which 6451 * may be necessary for different workloads: 6452 * 6453 * l2arc_write_max max write bytes per interval 6454 * l2arc_write_boost extra write bytes during device warmup 6455 * l2arc_noprefetch skip caching prefetched buffers 6456 * l2arc_headroom number of max device writes to precache 6457 * l2arc_headroom_boost when we find compressed buffers during ARC 6458 * scanning, we multiply headroom by this 6459 * percentage factor for the next scan cycle, 6460 * since more compressed buffers are likely to 6461 * be present 6462 * l2arc_feed_secs seconds between L2ARC writing 6463 * 6464 * Tunables may be removed or added as future performance improvements are 6465 * integrated, and also may become zpool properties. 6466 * 6467 * There are three key functions that control how the L2ARC warms up: 6468 * 6469 * l2arc_write_eligible() check if a buffer is eligible to cache 6470 * l2arc_write_size() calculate how much to write 6471 * l2arc_write_interval() calculate sleep delay between writes 6472 * 6473 * These three functions determine what to write, how much, and how quickly 6474 * to send writes. 6475 */ 6476 6477 static boolean_t 6478 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 6479 { 6480 /* 6481 * A buffer is *not* eligible for the L2ARC if it: 6482 * 1. belongs to a different spa. 6483 * 2. is already cached on the L2ARC. 6484 * 3. has an I/O in progress (it may be an incomplete read). 6485 * 4. is flagged not eligible (zfs property). 6486 */ 6487 if (hdr->b_spa != spa_guid) { 6488 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 6489 return (B_FALSE); 6490 } 6491 if (HDR_HAS_L2HDR(hdr)) { 6492 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 6493 return (B_FALSE); 6494 } 6495 if (HDR_IO_IN_PROGRESS(hdr)) { 6496 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 6497 return (B_FALSE); 6498 } 6499 if (!HDR_L2CACHE(hdr)) { 6500 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 6501 return (B_FALSE); 6502 } 6503 6504 return (B_TRUE); 6505 } 6506 6507 static uint64_t 6508 l2arc_write_size(void) 6509 { 6510 uint64_t size; 6511 6512 /* 6513 * Make sure our globals have meaningful values in case the user 6514 * altered them. 6515 */ 6516 size = l2arc_write_max; 6517 if (size == 0) { 6518 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 6519 "be greater than zero, resetting it to the default (%d)", 6520 L2ARC_WRITE_SIZE); 6521 size = l2arc_write_max = L2ARC_WRITE_SIZE; 6522 } 6523 6524 if (arc_warm == B_FALSE) 6525 size += l2arc_write_boost; 6526 6527 return (size); 6528 6529 } 6530 6531 static clock_t 6532 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 6533 { 6534 clock_t interval, next, now; 6535 6536 /* 6537 * If the ARC lists are busy, increase our write rate; if the 6538 * lists are stale, idle back. This is achieved by checking 6539 * how much we previously wrote - if it was more than half of 6540 * what we wanted, schedule the next write much sooner. 6541 */ 6542 if (l2arc_feed_again && wrote > (wanted / 2)) 6543 interval = (hz * l2arc_feed_min_ms) / 1000; 6544 else 6545 interval = hz * l2arc_feed_secs; 6546 6547 now = ddi_get_lbolt(); 6548 next = MAX(now, MIN(now + interval, began + interval)); 6549 6550 return (next); 6551 } 6552 6553 /* 6554 * Cycle through L2ARC devices. This is how L2ARC load balances. 6555 * If a device is returned, this also returns holding the spa config lock. 6556 */ 6557 static l2arc_dev_t * 6558 l2arc_dev_get_next(void) 6559 { 6560 l2arc_dev_t *first, *next = NULL; 6561 6562 /* 6563 * Lock out the removal of spas (spa_namespace_lock), then removal 6564 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 6565 * both locks will be dropped and a spa config lock held instead. 6566 */ 6567 mutex_enter(&spa_namespace_lock); 6568 mutex_enter(&l2arc_dev_mtx); 6569 6570 /* if there are no vdevs, there is nothing to do */ 6571 if (l2arc_ndev == 0) 6572 goto out; 6573 6574 first = NULL; 6575 next = l2arc_dev_last; 6576 do { 6577 /* loop around the list looking for a non-faulted vdev */ 6578 if (next == NULL) { 6579 next = list_head(l2arc_dev_list); 6580 } else { 6581 next = list_next(l2arc_dev_list, next); 6582 if (next == NULL) 6583 next = list_head(l2arc_dev_list); 6584 } 6585 6586 /* if we have come back to the start, bail out */ 6587 if (first == NULL) 6588 first = next; 6589 else if (next == first) 6590 break; 6591 6592 } while (vdev_is_dead(next->l2ad_vdev)); 6593 6594 /* if we were unable to find any usable vdevs, return NULL */ 6595 if (vdev_is_dead(next->l2ad_vdev)) 6596 next = NULL; 6597 6598 l2arc_dev_last = next; 6599 6600 out: 6601 mutex_exit(&l2arc_dev_mtx); 6602 6603 /* 6604 * Grab the config lock to prevent the 'next' device from being 6605 * removed while we are writing to it. 6606 */ 6607 if (next != NULL) 6608 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 6609 mutex_exit(&spa_namespace_lock); 6610 6611 return (next); 6612 } 6613 6614 /* 6615 * Free buffers that were tagged for destruction. 6616 */ 6617 static void 6618 l2arc_do_free_on_write() 6619 { 6620 list_t *buflist; 6621 l2arc_data_free_t *df, *df_prev; 6622 6623 mutex_enter(&l2arc_free_on_write_mtx); 6624 buflist = l2arc_free_on_write; 6625 6626 for (df = list_tail(buflist); df; df = df_prev) { 6627 df_prev = list_prev(buflist, df); 6628 ASSERT3P(df->l2df_data, !=, NULL); 6629 if (df->l2df_type == ARC_BUFC_METADATA) { 6630 zio_buf_free(df->l2df_data, df->l2df_size); 6631 } else { 6632 ASSERT(df->l2df_type == ARC_BUFC_DATA); 6633 zio_data_buf_free(df->l2df_data, df->l2df_size); 6634 } 6635 list_remove(buflist, df); 6636 kmem_free(df, sizeof (l2arc_data_free_t)); 6637 } 6638 6639 mutex_exit(&l2arc_free_on_write_mtx); 6640 } 6641 6642 /* 6643 * A write to a cache device has completed. Update all headers to allow 6644 * reads from these buffers to begin. 6645 */ 6646 static void 6647 l2arc_write_done(zio_t *zio) 6648 { 6649 l2arc_write_callback_t *cb; 6650 l2arc_dev_t *dev; 6651 list_t *buflist; 6652 arc_buf_hdr_t *head, *hdr, *hdr_prev; 6653 kmutex_t *hash_lock; 6654 int64_t bytes_dropped = 0; 6655 6656 cb = zio->io_private; 6657 ASSERT3P(cb, !=, NULL); 6658 dev = cb->l2wcb_dev; 6659 ASSERT3P(dev, !=, NULL); 6660 head = cb->l2wcb_head; 6661 ASSERT3P(head, !=, NULL); 6662 buflist = &dev->l2ad_buflist; 6663 ASSERT3P(buflist, !=, NULL); 6664 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 6665 l2arc_write_callback_t *, cb); 6666 6667 if (zio->io_error != 0) 6668 ARCSTAT_BUMP(arcstat_l2_writes_error); 6669 6670 /* 6671 * All writes completed, or an error was hit. 6672 */ 6673 top: 6674 mutex_enter(&dev->l2ad_mtx); 6675 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 6676 hdr_prev = list_prev(buflist, hdr); 6677 6678 hash_lock = HDR_LOCK(hdr); 6679 6680 /* 6681 * We cannot use mutex_enter or else we can deadlock 6682 * with l2arc_write_buffers (due to swapping the order 6683 * the hash lock and l2ad_mtx are taken). 6684 */ 6685 if (!mutex_tryenter(hash_lock)) { 6686 /* 6687 * Missed the hash lock. We must retry so we 6688 * don't leave the ARC_FLAG_L2_WRITING bit set. 6689 */ 6690 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); 6691 6692 /* 6693 * We don't want to rescan the headers we've 6694 * already marked as having been written out, so 6695 * we reinsert the head node so we can pick up 6696 * where we left off. 6697 */ 6698 list_remove(buflist, head); 6699 list_insert_after(buflist, hdr, head); 6700 6701 mutex_exit(&dev->l2ad_mtx); 6702 6703 /* 6704 * We wait for the hash lock to become available 6705 * to try and prevent busy waiting, and increase 6706 * the chance we'll be able to acquire the lock 6707 * the next time around. 6708 */ 6709 mutex_enter(hash_lock); 6710 mutex_exit(hash_lock); 6711 goto top; 6712 } 6713 6714 /* 6715 * We could not have been moved into the arc_l2c_only 6716 * state while in-flight due to our ARC_FLAG_L2_WRITING 6717 * bit being set. Let's just ensure that's being enforced. 6718 */ 6719 ASSERT(HDR_HAS_L1HDR(hdr)); 6720 6721 if (zio->io_error != 0) { 6722 /* 6723 * Error - drop L2ARC entry. 6724 */ 6725 list_remove(buflist, hdr); 6726 l2arc_trim(hdr); 6727 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 6728 6729 ARCSTAT_INCR(arcstat_l2_asize, -arc_hdr_size(hdr)); 6730 ARCSTAT_INCR(arcstat_l2_size, -HDR_GET_LSIZE(hdr)); 6731 6732 bytes_dropped += arc_hdr_size(hdr); 6733 (void) refcount_remove_many(&dev->l2ad_alloc, 6734 arc_hdr_size(hdr), hdr); 6735 } 6736 6737 /* 6738 * Allow ARC to begin reads and ghost list evictions to 6739 * this L2ARC entry. 6740 */ 6741 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); 6742 6743 mutex_exit(hash_lock); 6744 } 6745 6746 atomic_inc_64(&l2arc_writes_done); 6747 list_remove(buflist, head); 6748 ASSERT(!HDR_HAS_L1HDR(head)); 6749 kmem_cache_free(hdr_l2only_cache, head); 6750 mutex_exit(&dev->l2ad_mtx); 6751 6752 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 6753 6754 l2arc_do_free_on_write(); 6755 6756 kmem_free(cb, sizeof (l2arc_write_callback_t)); 6757 } 6758 6759 /* 6760 * A read to a cache device completed. Validate buffer contents before 6761 * handing over to the regular ARC routines. 6762 */ 6763 static void 6764 l2arc_read_done(zio_t *zio) 6765 { 6766 l2arc_read_callback_t *cb; 6767 arc_buf_hdr_t *hdr; 6768 kmutex_t *hash_lock; 6769 boolean_t valid_cksum; 6770 6771 ASSERT3P(zio->io_vd, !=, NULL); 6772 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 6773 6774 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 6775 6776 cb = zio->io_private; 6777 ASSERT3P(cb, !=, NULL); 6778 hdr = cb->l2rcb_hdr; 6779 ASSERT3P(hdr, !=, NULL); 6780 6781 hash_lock = HDR_LOCK(hdr); 6782 mutex_enter(hash_lock); 6783 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 6784 6785 /* 6786 * If the data was read into a temporary buffer, 6787 * move it and free the buffer. 6788 */ 6789 if (cb->l2rcb_data != NULL) { 6790 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); 6791 if (zio->io_error == 0) { 6792 bcopy(cb->l2rcb_data, hdr->b_l1hdr.b_pdata, 6793 arc_hdr_size(hdr)); 6794 } 6795 6796 /* 6797 * The following must be done regardless of whether 6798 * there was an error: 6799 * - free the temporary buffer 6800 * - point zio to the real ARC buffer 6801 * - set zio size accordingly 6802 * These are required because zio is either re-used for 6803 * an I/O of the block in the case of the error 6804 * or the zio is passed to arc_read_done() and it 6805 * needs real data. 6806 */ 6807 zio_data_buf_free(cb->l2rcb_data, zio->io_size); 6808 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); 6809 zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_pdata; 6810 } 6811 6812 ASSERT3P(zio->io_data, !=, NULL); 6813 6814 /* 6815 * Check this survived the L2ARC journey. 6816 */ 6817 ASSERT3P(zio->io_data, ==, hdr->b_l1hdr.b_pdata); 6818 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 6819 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 6820 6821 valid_cksum = arc_cksum_is_equal(hdr, zio); 6822 if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 6823 mutex_exit(hash_lock); 6824 zio->io_private = hdr; 6825 arc_read_done(zio); 6826 } else { 6827 mutex_exit(hash_lock); 6828 /* 6829 * Buffer didn't survive caching. Increment stats and 6830 * reissue to the original storage device. 6831 */ 6832 if (zio->io_error != 0) { 6833 ARCSTAT_BUMP(arcstat_l2_io_error); 6834 } else { 6835 zio->io_error = SET_ERROR(EIO); 6836 } 6837 if (!valid_cksum) 6838 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 6839 6840 /* 6841 * If there's no waiter, issue an async i/o to the primary 6842 * storage now. If there *is* a waiter, the caller must 6843 * issue the i/o in a context where it's OK to block. 6844 */ 6845 if (zio->io_waiter == NULL) { 6846 zio_t *pio = zio_unique_parent(zio); 6847 6848 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 6849 6850 zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp, 6851 hdr->b_l1hdr.b_pdata, zio->io_size, arc_read_done, 6852 hdr, zio->io_priority, cb->l2rcb_flags, 6853 &cb->l2rcb_zb)); 6854 } 6855 } 6856 6857 kmem_free(cb, sizeof (l2arc_read_callback_t)); 6858 } 6859 6860 /* 6861 * This is the list priority from which the L2ARC will search for pages to 6862 * cache. This is used within loops (0..3) to cycle through lists in the 6863 * desired order. This order can have a significant effect on cache 6864 * performance. 6865 * 6866 * Currently the metadata lists are hit first, MFU then MRU, followed by 6867 * the data lists. This function returns a locked list, and also returns 6868 * the lock pointer. 6869 */ 6870 static multilist_sublist_t * 6871 l2arc_sublist_lock(int list_num) 6872 { 6873 multilist_t *ml = NULL; 6874 unsigned int idx; 6875 6876 ASSERT(list_num >= 0 && list_num <= 3); 6877 6878 switch (list_num) { 6879 case 0: 6880 ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 6881 break; 6882 case 1: 6883 ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 6884 break; 6885 case 2: 6886 ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 6887 break; 6888 case 3: 6889 ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; 6890 break; 6891 } 6892 6893 /* 6894 * Return a randomly-selected sublist. This is acceptable 6895 * because the caller feeds only a little bit of data for each 6896 * call (8MB). Subsequent calls will result in different 6897 * sublists being selected. 6898 */ 6899 idx = multilist_get_random_index(ml); 6900 return (multilist_sublist_lock(ml, idx)); 6901 } 6902 6903 /* 6904 * Evict buffers from the device write hand to the distance specified in 6905 * bytes. This distance may span populated buffers, it may span nothing. 6906 * This is clearing a region on the L2ARC device ready for writing. 6907 * If the 'all' boolean is set, every buffer is evicted. 6908 */ 6909 static void 6910 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 6911 { 6912 list_t *buflist; 6913 arc_buf_hdr_t *hdr, *hdr_prev; 6914 kmutex_t *hash_lock; 6915 uint64_t taddr; 6916 6917 buflist = &dev->l2ad_buflist; 6918 6919 if (!all && dev->l2ad_first) { 6920 /* 6921 * This is the first sweep through the device. There is 6922 * nothing to evict. 6923 */ 6924 return; 6925 } 6926 6927 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 6928 /* 6929 * When nearing the end of the device, evict to the end 6930 * before the device write hand jumps to the start. 6931 */ 6932 taddr = dev->l2ad_end; 6933 } else { 6934 taddr = dev->l2ad_hand + distance; 6935 } 6936 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 6937 uint64_t, taddr, boolean_t, all); 6938 6939 top: 6940 mutex_enter(&dev->l2ad_mtx); 6941 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 6942 hdr_prev = list_prev(buflist, hdr); 6943 6944 hash_lock = HDR_LOCK(hdr); 6945 6946 /* 6947 * We cannot use mutex_enter or else we can deadlock 6948 * with l2arc_write_buffers (due to swapping the order 6949 * the hash lock and l2ad_mtx are taken). 6950 */ 6951 if (!mutex_tryenter(hash_lock)) { 6952 /* 6953 * Missed the hash lock. Retry. 6954 */ 6955 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 6956 mutex_exit(&dev->l2ad_mtx); 6957 mutex_enter(hash_lock); 6958 mutex_exit(hash_lock); 6959 goto top; 6960 } 6961 6962 if (HDR_L2_WRITE_HEAD(hdr)) { 6963 /* 6964 * We hit a write head node. Leave it for 6965 * l2arc_write_done(). 6966 */ 6967 list_remove(buflist, hdr); 6968 mutex_exit(hash_lock); 6969 continue; 6970 } 6971 6972 if (!all && HDR_HAS_L2HDR(hdr) && 6973 (hdr->b_l2hdr.b_daddr >= taddr || 6974 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { 6975 /* 6976 * We've evicted to the target address, 6977 * or the end of the device. 6978 */ 6979 mutex_exit(hash_lock); 6980 break; 6981 } 6982 6983 ASSERT(HDR_HAS_L2HDR(hdr)); 6984 if (!HDR_HAS_L1HDR(hdr)) { 6985 ASSERT(!HDR_L2_READING(hdr)); 6986 /* 6987 * This doesn't exist in the ARC. Destroy. 6988 * arc_hdr_destroy() will call list_remove() 6989 * and decrement arcstat_l2_size. 6990 */ 6991 arc_change_state(arc_anon, hdr, hash_lock); 6992 arc_hdr_destroy(hdr); 6993 } else { 6994 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); 6995 ARCSTAT_BUMP(arcstat_l2_evict_l1cached); 6996 /* 6997 * Invalidate issued or about to be issued 6998 * reads, since we may be about to write 6999 * over this location. 7000 */ 7001 if (HDR_L2_READING(hdr)) { 7002 ARCSTAT_BUMP(arcstat_l2_evict_reading); 7003 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); 7004 } 7005 7006 /* Ensure this header has finished being written */ 7007 ASSERT(!HDR_L2_WRITING(hdr)); 7008 7009 arc_hdr_l2hdr_destroy(hdr); 7010 } 7011 mutex_exit(hash_lock); 7012 } 7013 mutex_exit(&dev->l2ad_mtx); 7014 } 7015 7016 /* 7017 * Find and write ARC buffers to the L2ARC device. 7018 * 7019 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 7020 * for reading until they have completed writing. 7021 * The headroom_boost is an in-out parameter used to maintain headroom boost 7022 * state between calls to this function. 7023 * 7024 * Returns the number of bytes actually written (which may be smaller than 7025 * the delta by which the device hand has changed due to alignment). 7026 */ 7027 static uint64_t 7028 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 7029 { 7030 arc_buf_hdr_t *hdr, *hdr_prev, *head; 7031 uint64_t write_asize, write_psize, write_sz, headroom; 7032 boolean_t full; 7033 l2arc_write_callback_t *cb; 7034 zio_t *pio, *wzio; 7035 uint64_t guid = spa_load_guid(spa); 7036 int try; 7037 7038 ASSERT3P(dev->l2ad_vdev, !=, NULL); 7039 7040 pio = NULL; 7041 write_sz = write_asize = write_psize = 0; 7042 full = B_FALSE; 7043 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); 7044 arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); 7045 7046 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 7047 /* 7048 * Copy buffers for L2ARC writing. 7049 */ 7050 for (try = 0; try <= 3; try++) { 7051 multilist_sublist_t *mls = l2arc_sublist_lock(try); 7052 uint64_t passed_sz = 0; 7053 7054 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 7055 7056 /* 7057 * L2ARC fast warmup. 7058 * 7059 * Until the ARC is warm and starts to evict, read from the 7060 * head of the ARC lists rather than the tail. 7061 */ 7062 if (arc_warm == B_FALSE) 7063 hdr = multilist_sublist_head(mls); 7064 else 7065 hdr = multilist_sublist_tail(mls); 7066 if (hdr == NULL) 7067 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 7068 7069 headroom = target_sz * l2arc_headroom; 7070 if (zfs_compressed_arc_enabled) 7071 headroom = (headroom * l2arc_headroom_boost) / 100; 7072 7073 for (; hdr; hdr = hdr_prev) { 7074 kmutex_t *hash_lock; 7075 7076 if (arc_warm == B_FALSE) 7077 hdr_prev = multilist_sublist_next(mls, hdr); 7078 else 7079 hdr_prev = multilist_sublist_prev(mls, hdr); 7080 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, 7081 HDR_GET_LSIZE(hdr)); 7082 7083 hash_lock = HDR_LOCK(hdr); 7084 if (!mutex_tryenter(hash_lock)) { 7085 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 7086 /* 7087 * Skip this buffer rather than waiting. 7088 */ 7089 continue; 7090 } 7091 7092 passed_sz += HDR_GET_LSIZE(hdr); 7093 if (passed_sz > headroom) { 7094 /* 7095 * Searched too far. 7096 */ 7097 mutex_exit(hash_lock); 7098 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 7099 break; 7100 } 7101 7102 if (!l2arc_write_eligible(guid, hdr)) { 7103 mutex_exit(hash_lock); 7104 continue; 7105 } 7106 7107 /* 7108 * We rely on the L1 portion of the header below, so 7109 * it's invalid for this header to have been evicted out 7110 * of the ghost cache, prior to being written out. The 7111 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 7112 */ 7113 ASSERT(HDR_HAS_L1HDR(hdr)); 7114 7115 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); 7116 ASSERT3P(hdr->b_l1hdr.b_pdata, !=, NULL); 7117 ASSERT3U(arc_hdr_size(hdr), >, 0); 7118 uint64_t size = arc_hdr_size(hdr); 7119 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, 7120 size); 7121 7122 if ((write_psize + asize) > target_sz) { 7123 full = B_TRUE; 7124 mutex_exit(hash_lock); 7125 ARCSTAT_BUMP(arcstat_l2_write_full); 7126 break; 7127 } 7128 7129 if (pio == NULL) { 7130 /* 7131 * Insert a dummy header on the buflist so 7132 * l2arc_write_done() can find where the 7133 * write buffers begin without searching. 7134 */ 7135 mutex_enter(&dev->l2ad_mtx); 7136 list_insert_head(&dev->l2ad_buflist, head); 7137 mutex_exit(&dev->l2ad_mtx); 7138 7139 cb = kmem_alloc( 7140 sizeof (l2arc_write_callback_t), KM_SLEEP); 7141 cb->l2wcb_dev = dev; 7142 cb->l2wcb_head = head; 7143 pio = zio_root(spa, l2arc_write_done, cb, 7144 ZIO_FLAG_CANFAIL); 7145 ARCSTAT_BUMP(arcstat_l2_write_pios); 7146 } 7147 7148 hdr->b_l2hdr.b_dev = dev; 7149 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; 7150 arc_hdr_set_flags(hdr, 7151 ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR); 7152 7153 mutex_enter(&dev->l2ad_mtx); 7154 list_insert_head(&dev->l2ad_buflist, hdr); 7155 mutex_exit(&dev->l2ad_mtx); 7156 7157 (void) refcount_add_many(&dev->l2ad_alloc, size, hdr); 7158 7159 /* 7160 * Normally the L2ARC can use the hdr's data, but if 7161 * we're sharing data between the hdr and one of its 7162 * bufs, L2ARC needs its own copy of the data so that 7163 * the ZIO below can't race with the buf consumer. To 7164 * ensure that this copy will be available for the 7165 * lifetime of the ZIO and be cleaned up afterwards, we 7166 * add it to the l2arc_free_on_write queue. 7167 */ 7168 void *to_write; 7169 if (!HDR_SHARED_DATA(hdr) && size == asize) { 7170 to_write = hdr->b_l1hdr.b_pdata; 7171 } else { 7172 arc_buf_contents_t type = arc_buf_type(hdr); 7173 if (type == ARC_BUFC_METADATA) { 7174 to_write = zio_buf_alloc(asize); 7175 } else { 7176 ASSERT3U(type, ==, ARC_BUFC_DATA); 7177 to_write = zio_data_buf_alloc(asize); 7178 } 7179 7180 bcopy(hdr->b_l1hdr.b_pdata, to_write, size); 7181 if (asize != size) 7182 bzero(to_write + size, asize - size); 7183 l2arc_free_data_on_write(to_write, asize, type); 7184 } 7185 wzio = zio_write_phys(pio, dev->l2ad_vdev, 7186 hdr->b_l2hdr.b_daddr, asize, to_write, 7187 ZIO_CHECKSUM_OFF, NULL, hdr, 7188 ZIO_PRIORITY_ASYNC_WRITE, 7189 ZIO_FLAG_CANFAIL, B_FALSE); 7190 7191 write_sz += HDR_GET_LSIZE(hdr); 7192 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 7193 zio_t *, wzio); 7194 7195 write_asize += size; 7196 write_psize += asize; 7197 dev->l2ad_hand += asize; 7198 7199 mutex_exit(hash_lock); 7200 7201 (void) zio_nowait(wzio); 7202 } 7203 7204 multilist_sublist_unlock(mls); 7205 7206 if (full == B_TRUE) 7207 break; 7208 } 7209 7210 /* No buffers selected for writing? */ 7211 if (pio == NULL) { 7212 ASSERT0(write_sz); 7213 ASSERT(!HDR_HAS_L1HDR(head)); 7214 kmem_cache_free(hdr_l2only_cache, head); 7215 return (0); 7216 } 7217 7218 ASSERT3U(write_psize, <=, target_sz); 7219 ARCSTAT_BUMP(arcstat_l2_writes_sent); 7220 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); 7221 ARCSTAT_INCR(arcstat_l2_size, write_sz); 7222 ARCSTAT_INCR(arcstat_l2_asize, write_asize); 7223 vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0); 7224 7225 /* 7226 * Bump device hand to the device start if it is approaching the end. 7227 * l2arc_evict() will already have evicted ahead for this case. 7228 */ 7229 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 7230 dev->l2ad_hand = dev->l2ad_start; 7231 dev->l2ad_first = B_FALSE; 7232 } 7233 7234 dev->l2ad_writing = B_TRUE; 7235 (void) zio_wait(pio); 7236 dev->l2ad_writing = B_FALSE; 7237 7238 return (write_asize); 7239 } 7240 7241 /* 7242 * This thread feeds the L2ARC at regular intervals. This is the beating 7243 * heart of the L2ARC. 7244 */ 7245 static void 7246 l2arc_feed_thread(void *dummy __unused) 7247 { 7248 callb_cpr_t cpr; 7249 l2arc_dev_t *dev; 7250 spa_t *spa; 7251 uint64_t size, wrote; 7252 clock_t begin, next = ddi_get_lbolt() + hz; 7253 7254 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 7255 7256 mutex_enter(&l2arc_feed_thr_lock); 7257 7258 while (l2arc_thread_exit == 0) { 7259 CALLB_CPR_SAFE_BEGIN(&cpr); 7260 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 7261 next - ddi_get_lbolt()); 7262 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 7263 next = ddi_get_lbolt() + hz; 7264 7265 /* 7266 * Quick check for L2ARC devices. 7267 */ 7268 mutex_enter(&l2arc_dev_mtx); 7269 if (l2arc_ndev == 0) { 7270 mutex_exit(&l2arc_dev_mtx); 7271 continue; 7272 } 7273 mutex_exit(&l2arc_dev_mtx); 7274 begin = ddi_get_lbolt(); 7275 7276 /* 7277 * This selects the next l2arc device to write to, and in 7278 * doing so the next spa to feed from: dev->l2ad_spa. This 7279 * will return NULL if there are now no l2arc devices or if 7280 * they are all faulted. 7281 * 7282 * If a device is returned, its spa's config lock is also 7283 * held to prevent device removal. l2arc_dev_get_next() 7284 * will grab and release l2arc_dev_mtx. 7285 */ 7286 if ((dev = l2arc_dev_get_next()) == NULL) 7287 continue; 7288 7289 spa = dev->l2ad_spa; 7290 ASSERT3P(spa, !=, NULL); 7291 7292 /* 7293 * If the pool is read-only then force the feed thread to 7294 * sleep a little longer. 7295 */ 7296 if (!spa_writeable(spa)) { 7297 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 7298 spa_config_exit(spa, SCL_L2ARC, dev); 7299 continue; 7300 } 7301 7302 /* 7303 * Avoid contributing to memory pressure. 7304 */ 7305 if (arc_reclaim_needed()) { 7306 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 7307 spa_config_exit(spa, SCL_L2ARC, dev); 7308 continue; 7309 } 7310 7311 ARCSTAT_BUMP(arcstat_l2_feeds); 7312 7313 size = l2arc_write_size(); 7314 7315 /* 7316 * Evict L2ARC buffers that will be overwritten. 7317 */ 7318 l2arc_evict(dev, size, B_FALSE); 7319 7320 /* 7321 * Write ARC buffers. 7322 */ 7323 wrote = l2arc_write_buffers(spa, dev, size); 7324 7325 /* 7326 * Calculate interval between writes. 7327 */ 7328 next = l2arc_write_interval(begin, size, wrote); 7329 spa_config_exit(spa, SCL_L2ARC, dev); 7330 } 7331 7332 l2arc_thread_exit = 0; 7333 cv_broadcast(&l2arc_feed_thr_cv); 7334 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 7335 thread_exit(); 7336 } 7337 7338 boolean_t 7339 l2arc_vdev_present(vdev_t *vd) 7340 { 7341 l2arc_dev_t *dev; 7342 7343 mutex_enter(&l2arc_dev_mtx); 7344 for (dev = list_head(l2arc_dev_list); dev != NULL; 7345 dev = list_next(l2arc_dev_list, dev)) { 7346 if (dev->l2ad_vdev == vd) 7347 break; 7348 } 7349 mutex_exit(&l2arc_dev_mtx); 7350 7351 return (dev != NULL); 7352 } 7353 7354 /* 7355 * Add a vdev for use by the L2ARC. By this point the spa has already 7356 * validated the vdev and opened it. 7357 */ 7358 void 7359 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 7360 { 7361 l2arc_dev_t *adddev; 7362 7363 ASSERT(!l2arc_vdev_present(vd)); 7364 7365 vdev_ashift_optimize(vd); 7366 7367 /* 7368 * Create a new l2arc device entry. 7369 */ 7370 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 7371 adddev->l2ad_spa = spa; 7372 adddev->l2ad_vdev = vd; 7373 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 7374 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 7375 adddev->l2ad_hand = adddev->l2ad_start; 7376 adddev->l2ad_first = B_TRUE; 7377 adddev->l2ad_writing = B_FALSE; 7378 7379 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); 7380 /* 7381 * This is a list of all ARC buffers that are still valid on the 7382 * device. 7383 */ 7384 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 7385 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); 7386 7387 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 7388 refcount_create(&adddev->l2ad_alloc); 7389 7390 /* 7391 * Add device to global list 7392 */ 7393 mutex_enter(&l2arc_dev_mtx); 7394 list_insert_head(l2arc_dev_list, adddev); 7395 atomic_inc_64(&l2arc_ndev); 7396 mutex_exit(&l2arc_dev_mtx); 7397 } 7398 7399 /* 7400 * Remove a vdev from the L2ARC. 7401 */ 7402 void 7403 l2arc_remove_vdev(vdev_t *vd) 7404 { 7405 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 7406 7407 /* 7408 * Find the device by vdev 7409 */ 7410 mutex_enter(&l2arc_dev_mtx); 7411 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 7412 nextdev = list_next(l2arc_dev_list, dev); 7413 if (vd == dev->l2ad_vdev) { 7414 remdev = dev; 7415 break; 7416 } 7417 } 7418 ASSERT3P(remdev, !=, NULL); 7419 7420 /* 7421 * Remove device from global list 7422 */ 7423 list_remove(l2arc_dev_list, remdev); 7424 l2arc_dev_last = NULL; /* may have been invalidated */ 7425 atomic_dec_64(&l2arc_ndev); 7426 mutex_exit(&l2arc_dev_mtx); 7427 7428 /* 7429 * Clear all buflists and ARC references. L2ARC device flush. 7430 */ 7431 l2arc_evict(remdev, 0, B_TRUE); 7432 list_destroy(&remdev->l2ad_buflist); 7433 mutex_destroy(&remdev->l2ad_mtx); 7434 refcount_destroy(&remdev->l2ad_alloc); 7435 kmem_free(remdev, sizeof (l2arc_dev_t)); 7436 } 7437 7438 void 7439 l2arc_init(void) 7440 { 7441 l2arc_thread_exit = 0; 7442 l2arc_ndev = 0; 7443 l2arc_writes_sent = 0; 7444 l2arc_writes_done = 0; 7445 7446 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 7447 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 7448 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 7449 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 7450 7451 l2arc_dev_list = &L2ARC_dev_list; 7452 l2arc_free_on_write = &L2ARC_free_on_write; 7453 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 7454 offsetof(l2arc_dev_t, l2ad_node)); 7455 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 7456 offsetof(l2arc_data_free_t, l2df_list_node)); 7457 } 7458 7459 void 7460 l2arc_fini(void) 7461 { 7462 /* 7463 * This is called from dmu_fini(), which is called from spa_fini(); 7464 * Because of this, we can assume that all l2arc devices have 7465 * already been removed when the pools themselves were removed. 7466 */ 7467 7468 l2arc_do_free_on_write(); 7469 7470 mutex_destroy(&l2arc_feed_thr_lock); 7471 cv_destroy(&l2arc_feed_thr_cv); 7472 mutex_destroy(&l2arc_dev_mtx); 7473 mutex_destroy(&l2arc_free_on_write_mtx); 7474 7475 list_destroy(l2arc_dev_list); 7476 list_destroy(l2arc_free_on_write); 7477 } 7478 7479 void 7480 l2arc_start(void) 7481 { 7482 if (!(spa_mode_global & FWRITE)) 7483 return; 7484 7485 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 7486 TS_RUN, minclsyspri); 7487 } 7488 7489 void 7490 l2arc_stop(void) 7491 { 7492 if (!(spa_mode_global & FWRITE)) 7493 return; 7494 7495 mutex_enter(&l2arc_feed_thr_lock); 7496 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 7497 l2arc_thread_exit = 1; 7498 while (l2arc_thread_exit != 0) 7499 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 7500 mutex_exit(&l2arc_feed_thr_lock); 7501 } 7502