1 /* 2 * Copyright (c) 2020 iXsystems, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/conf.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/proc.h> 40 #include <sys/errno.h> 41 #include <sys/uio.h> 42 #include <sys/buf.h> 43 #include <sys/file.h> 44 #include <sys/kmem.h> 45 #include <sys/conf.h> 46 #include <sys/cmn_err.h> 47 #include <sys/stat.h> 48 #include <sys/zfs_ioctl.h> 49 #include <sys/zfs_vfsops.h> 50 #include <sys/zfs_znode.h> 51 #include <sys/zap.h> 52 #include <sys/spa.h> 53 #include <sys/spa_impl.h> 54 #include <sys/vdev.h> 55 #include <sys/vdev_impl.h> 56 #include <sys/arc_os.h> 57 #include <sys/dmu.h> 58 #include <sys/dsl_dir.h> 59 #include <sys/dsl_dataset.h> 60 #include <sys/dsl_prop.h> 61 #include <sys/dsl_deleg.h> 62 #include <sys/dmu_objset.h> 63 #include <sys/dmu_impl.h> 64 #include <sys/dmu_tx.h> 65 #include <sys/sunddi.h> 66 #include <sys/policy.h> 67 #include <sys/zone.h> 68 #include <sys/nvpair.h> 69 #include <sys/mount.h> 70 #include <sys/taskqueue.h> 71 #include <sys/sdt.h> 72 #include <sys/fs/zfs.h> 73 #include <sys/zfs_ctldir.h> 74 #include <sys/zfs_dir.h> 75 #include <sys/zfs_onexit.h> 76 #include <sys/zvol.h> 77 #include <sys/dsl_scan.h> 78 #include <sys/dmu_objset.h> 79 #include <sys/dmu_send.h> 80 #include <sys/dsl_destroy.h> 81 #include <sys/dsl_bookmark.h> 82 #include <sys/dsl_userhold.h> 83 #include <sys/zfeature.h> 84 #include <sys/zcp.h> 85 #include <sys/zio_checksum.h> 86 #include <sys/vdev_removal.h> 87 #include <sys/dsl_crypt.h> 88 89 #include <sys/zfs_ioctl_compat.h> 90 #include <sys/zfs_context.h> 91 92 #include <sys/arc_impl.h> 93 #include <sys/dsl_pool.h> 94 95 #include <sys/vmmeter.h> 96 97 SYSCTL_DECL(_vfs_zfs); 98 SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0, 99 "ZFS adaptive replacement cache"); 100 SYSCTL_NODE(_vfs_zfs, OID_AUTO, brt, CTLFLAG_RW, 0, 101 "ZFS Block Reference Table"); 102 SYSCTL_NODE(_vfs_zfs, OID_AUTO, condense, CTLFLAG_RW, 0, "ZFS condense"); 103 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf, CTLFLAG_RW, 0, "ZFS disk buf cache"); 104 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf_cache, CTLFLAG_RW, 0, 105 "ZFS disk buf cache"); 106 SYSCTL_NODE(_vfs_zfs, OID_AUTO, deadman, CTLFLAG_RW, 0, "ZFS deadman"); 107 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS dedup"); 108 SYSCTL_NODE(_vfs_zfs, OID_AUTO, l2arc, CTLFLAG_RW, 0, "ZFS l2arc"); 109 SYSCTL_NODE(_vfs_zfs, OID_AUTO, livelist, CTLFLAG_RW, 0, "ZFS livelist"); 110 SYSCTL_NODE(_vfs_zfs, OID_AUTO, lua, CTLFLAG_RW, 0, "ZFS lua"); 111 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab"); 112 SYSCTL_NODE(_vfs_zfs, OID_AUTO, mg, CTLFLAG_RW, 0, "ZFS metaslab group"); 113 SYSCTL_NODE(_vfs_zfs, OID_AUTO, multihost, CTLFLAG_RW, 0, 114 "ZFS multihost protection"); 115 SYSCTL_NODE(_vfs_zfs, OID_AUTO, prefetch, CTLFLAG_RW, 0, "ZFS prefetch"); 116 SYSCTL_NODE(_vfs_zfs, OID_AUTO, reconstruct, CTLFLAG_RW, 0, "ZFS reconstruct"); 117 SYSCTL_NODE(_vfs_zfs, OID_AUTO, recv, CTLFLAG_RW, 0, "ZFS receive"); 118 SYSCTL_NODE(_vfs_zfs, OID_AUTO, send, CTLFLAG_RW, 0, "ZFS send"); 119 SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation"); 120 SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM"); 121 SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group"); 122 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); 123 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS"); 124 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event"); 125 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL"); 126 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 127 128 SYSCTL_NODE(_vfs_zfs_livelist, OID_AUTO, condense, CTLFLAG_RW, 0, 129 "ZFS livelist condense"); 130 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache"); 131 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, file, CTLFLAG_RW, 0, "ZFS VDEV file"); 132 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0, 133 "ZFS VDEV mirror"); 134 135 SYSCTL_DECL(_vfs_zfs_version); 136 SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD, 137 (ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version"); 138 139 /* arc.c */ 140 141 int 142 param_set_arc_u64(SYSCTL_HANDLER_ARGS) 143 { 144 int err; 145 146 err = sysctl_handle_64(oidp, arg1, 0, req); 147 if (err != 0 || req->newptr == NULL) 148 return (err); 149 150 arc_tuning_update(B_TRUE); 151 152 return (0); 153 } 154 155 int 156 param_set_arc_int(SYSCTL_HANDLER_ARGS) 157 { 158 int err; 159 160 err = sysctl_handle_int(oidp, arg1, 0, req); 161 if (err != 0 || req->newptr == NULL) 162 return (err); 163 164 arc_tuning_update(B_TRUE); 165 166 return (0); 167 } 168 169 int 170 param_set_arc_max(SYSCTL_HANDLER_ARGS) 171 { 172 unsigned long val; 173 int err; 174 175 val = zfs_arc_max; 176 err = sysctl_handle_64(oidp, &val, 0, req); 177 if (err != 0 || req->newptr == NULL) 178 return (SET_ERROR(err)); 179 180 if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min || 181 val >= arc_all_memory())) 182 return (SET_ERROR(EINVAL)); 183 184 zfs_arc_max = val; 185 arc_tuning_update(B_TRUE); 186 187 /* Update the sysctl to the tuned value */ 188 if (val != 0) 189 zfs_arc_max = arc_c_max; 190 191 return (0); 192 } 193 194 /* BEGIN CSTYLED */ 195 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, 196 CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 197 NULL, 0, param_set_arc_max, "LU", 198 "Maximum ARC size in bytes (LEGACY)"); 199 /* END CSTYLED */ 200 201 int 202 param_set_arc_min(SYSCTL_HANDLER_ARGS) 203 { 204 unsigned long val; 205 int err; 206 207 val = zfs_arc_min; 208 err = sysctl_handle_64(oidp, &val, 0, req); 209 if (err != 0 || req->newptr == NULL) 210 return (SET_ERROR(err)); 211 212 if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max)) 213 return (SET_ERROR(EINVAL)); 214 215 zfs_arc_min = val; 216 arc_tuning_update(B_TRUE); 217 218 /* Update the sysctl to the tuned value */ 219 if (val != 0) 220 zfs_arc_min = arc_c_min; 221 222 return (0); 223 } 224 225 /* BEGIN CSTYLED */ 226 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, 227 CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 228 NULL, 0, param_set_arc_min, "LU", 229 "Minimum ARC size in bytes (LEGACY)"); 230 /* END CSTYLED */ 231 232 extern uint_t zfs_arc_free_target; 233 234 int 235 param_set_arc_free_target(SYSCTL_HANDLER_ARGS) 236 { 237 uint_t val; 238 int err; 239 240 val = zfs_arc_free_target; 241 err = sysctl_handle_int(oidp, &val, 0, req); 242 if (err != 0 || req->newptr == NULL) 243 return (err); 244 245 if (val < minfree) 246 return (EINVAL); 247 if (val > vm_cnt.v_page_count) 248 return (EINVAL); 249 250 zfs_arc_free_target = val; 251 252 return (0); 253 } 254 255 /* 256 * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on 257 * pagedaemon initialization. 258 */ 259 /* BEGIN CSTYLED */ 260 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target, 261 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 262 NULL, 0, param_set_arc_free_target, "IU", 263 "Desired number of free pages below which ARC triggers reclaim" 264 " (LEGACY)"); 265 /* END CSTYLED */ 266 267 int 268 param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS) 269 { 270 int err, val; 271 272 val = arc_no_grow_shift; 273 err = sysctl_handle_int(oidp, &val, 0, req); 274 if (err != 0 || req->newptr == NULL) 275 return (err); 276 277 if (val < 0 || val >= arc_shrink_shift) 278 return (EINVAL); 279 280 arc_no_grow_shift = val; 281 282 return (0); 283 } 284 285 /* BEGIN CSTYLED */ 286 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift, 287 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 288 NULL, 0, param_set_arc_no_grow_shift, "I", 289 "log2(fraction of ARC which must be free to allow growing) (LEGACY)"); 290 /* END CSTYLED */ 291 292 extern uint64_t l2arc_write_max; 293 294 /* BEGIN CSTYLED */ 295 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, 296 CTLFLAG_RWTUN, &l2arc_write_max, 0, 297 "Max write bytes per interval (LEGACY)"); 298 /* END CSTYLED */ 299 300 extern uint64_t l2arc_write_boost; 301 302 /* BEGIN CSTYLED */ 303 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, 304 CTLFLAG_RWTUN, &l2arc_write_boost, 0, 305 "Extra write bytes during device warmup (LEGACY)"); 306 /* END CSTYLED */ 307 308 extern uint64_t l2arc_headroom; 309 310 /* BEGIN CSTYLED */ 311 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, 312 CTLFLAG_RWTUN, &l2arc_headroom, 0, 313 "Number of max device writes to precache (LEGACY)"); 314 /* END CSTYLED */ 315 316 extern uint64_t l2arc_headroom_boost; 317 318 /* BEGIN CSTYLED */ 319 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost, 320 CTLFLAG_RWTUN, &l2arc_headroom_boost, 0, 321 "Compressed l2arc_headroom multiplier (LEGACY)"); 322 /* END CSTYLED */ 323 324 extern uint64_t l2arc_feed_secs; 325 326 /* BEGIN CSTYLED */ 327 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, 328 CTLFLAG_RWTUN, &l2arc_feed_secs, 0, 329 "Seconds between L2ARC writing (LEGACY)"); 330 /* END CSTYLED */ 331 332 extern uint64_t l2arc_feed_min_ms; 333 334 /* BEGIN CSTYLED */ 335 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, 336 CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0, 337 "Min feed interval in milliseconds (LEGACY)"); 338 /* END CSTYLED */ 339 340 extern int l2arc_noprefetch; 341 342 /* BEGIN CSTYLED */ 343 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, 344 CTLFLAG_RWTUN, &l2arc_noprefetch, 0, 345 "Skip caching prefetched buffers (LEGACY)"); 346 /* END CSTYLED */ 347 348 extern int l2arc_feed_again; 349 350 /* BEGIN CSTYLED */ 351 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, 352 CTLFLAG_RWTUN, &l2arc_feed_again, 0, 353 "Turbo L2ARC warmup (LEGACY)"); 354 /* END CSTYLED */ 355 356 extern int l2arc_norw; 357 358 /* BEGIN CSTYLED */ 359 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, 360 CTLFLAG_RWTUN, &l2arc_norw, 0, 361 "No reads during writes (LEGACY)"); 362 /* END CSTYLED */ 363 364 static int 365 param_get_arc_state_size(SYSCTL_HANDLER_ARGS) 366 { 367 arc_state_t *state = (arc_state_t *)arg1; 368 int64_t val; 369 370 val = zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]) + 371 zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]); 372 return (sysctl_handle_64(oidp, &val, 0, req)); 373 } 374 375 extern arc_state_t ARC_anon; 376 377 /* BEGIN CSTYLED */ 378 SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size, 379 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 380 &ARC_anon, 0, param_get_arc_state_size, "Q", 381 "size of anonymous state"); 382 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD, 383 &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 384 "size of evictable metadata in anonymous state"); 385 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD, 386 &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 387 "size of evictable data in anonymous state"); 388 /* END CSTYLED */ 389 390 extern arc_state_t ARC_mru; 391 392 /* BEGIN CSTYLED */ 393 SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size, 394 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 395 &ARC_mru, 0, param_get_arc_state_size, "Q", 396 "size of mru state"); 397 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD, 398 &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 399 "size of evictable metadata in mru state"); 400 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD, 401 &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 402 "size of evictable data in mru state"); 403 /* END CSTYLED */ 404 405 extern arc_state_t ARC_mru_ghost; 406 407 /* BEGIN CSTYLED */ 408 SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size, 409 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 410 &ARC_mru_ghost, 0, param_get_arc_state_size, "Q", 411 "size of mru ghost state"); 412 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD, 413 &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 414 "size of evictable metadata in mru ghost state"); 415 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD, 416 &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 417 "size of evictable data in mru ghost state"); 418 /* END CSTYLED */ 419 420 extern arc_state_t ARC_mfu; 421 422 /* BEGIN CSTYLED */ 423 SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size, 424 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 425 &ARC_mfu, 0, param_get_arc_state_size, "Q", 426 "size of mfu state"); 427 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD, 428 &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 429 "size of evictable metadata in mfu state"); 430 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD, 431 &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 432 "size of evictable data in mfu state"); 433 /* END CSTYLED */ 434 435 extern arc_state_t ARC_mfu_ghost; 436 437 /* BEGIN CSTYLED */ 438 SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size, 439 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 440 &ARC_mfu_ghost, 0, param_get_arc_state_size, "Q", 441 "size of mfu ghost state"); 442 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD, 443 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 444 "size of evictable metadata in mfu ghost state"); 445 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD, 446 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 447 "size of evictable data in mfu ghost state"); 448 /* END CSTYLED */ 449 450 extern arc_state_t ARC_uncached; 451 452 /* BEGIN CSTYLED */ 453 SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size, 454 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 455 &ARC_uncached, 0, param_get_arc_state_size, "Q", 456 "size of uncached state"); 457 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD, 458 &ARC_uncached.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 459 "size of evictable metadata in uncached state"); 460 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD, 461 &ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 462 "size of evictable data in uncached state"); 463 /* END CSTYLED */ 464 465 extern arc_state_t ARC_l2c_only; 466 467 /* BEGIN CSTYLED */ 468 SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size, 469 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 470 &ARC_l2c_only, 0, param_get_arc_state_size, "Q", 471 "size of l2c_only state"); 472 /* END CSTYLED */ 473 474 /* dbuf.c */ 475 476 /* dmu.c */ 477 478 /* dmu_zfetch.c */ 479 480 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)"); 481 482 extern uint32_t zfetch_max_distance; 483 484 /* BEGIN CSTYLED */ 485 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, 486 CTLFLAG_RWTUN, &zfetch_max_distance, 0, 487 "Max bytes to prefetch per stream (LEGACY)"); 488 /* END CSTYLED */ 489 490 extern uint32_t zfetch_max_idistance; 491 492 /* BEGIN CSTYLED */ 493 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance, 494 CTLFLAG_RWTUN, &zfetch_max_idistance, 0, 495 "Max bytes to prefetch indirects for per stream (LEGACY)"); 496 /* END CSTYLED */ 497 498 /* dsl_pool.c */ 499 500 /* dnode.c */ 501 502 /* dsl_scan.c */ 503 504 /* metaslab.c */ 505 506 int 507 param_set_active_allocator(SYSCTL_HANDLER_ARGS) 508 { 509 char buf[16]; 510 int rc; 511 512 if (req->newptr == NULL) 513 strlcpy(buf, zfs_active_allocator, sizeof (buf)); 514 515 rc = sysctl_handle_string(oidp, buf, sizeof (buf), req); 516 if (rc || req->newptr == NULL) 517 return (rc); 518 if (strcmp(buf, zfs_active_allocator) == 0) 519 return (0); 520 521 return (param_set_active_allocator_common(buf)); 522 } 523 524 /* 525 * In pools where the log space map feature is not enabled we touch 526 * multiple metaslabs (and their respective space maps) with each 527 * transaction group. Thus, we benefit from having a small space map 528 * block size since it allows us to issue more I/O operations scattered 529 * around the disk. So a sane default for the space map block size 530 * is 8~16K. 531 */ 532 extern int zfs_metaslab_sm_blksz_no_log; 533 534 /* BEGIN CSTYLED */ 535 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log, 536 CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0, 537 "Block size for space map in pools with log space map disabled. " 538 "Power of 2 greater than 4096."); 539 /* END CSTYLED */ 540 541 /* 542 * When the log space map feature is enabled, we accumulate a lot of 543 * changes per metaslab that are flushed once in a while so we benefit 544 * from a bigger block size like 128K for the metaslab space maps. 545 */ 546 extern int zfs_metaslab_sm_blksz_with_log; 547 548 /* BEGIN CSTYLED */ 549 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, 550 CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0, 551 "Block size for space map in pools with log space map enabled. " 552 "Power of 2 greater than 4096."); 553 /* END CSTYLED */ 554 555 /* 556 * The in-core space map representation is more compact than its on-disk form. 557 * The zfs_condense_pct determines how much more compact the in-core 558 * space map representation must be before we compact it on-disk. 559 * Values should be greater than or equal to 100. 560 */ 561 extern uint_t zfs_condense_pct; 562 563 /* BEGIN CSTYLED */ 564 SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct, 565 CTLFLAG_RWTUN, &zfs_condense_pct, 0, 566 "Condense on-disk spacemap when it is more than this many percents" 567 " of in-memory counterpart"); 568 /* END CSTYLED */ 569 570 extern uint_t zfs_remove_max_segment; 571 572 /* BEGIN CSTYLED */ 573 SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment, 574 CTLFLAG_RWTUN, &zfs_remove_max_segment, 0, 575 "Largest contiguous segment ZFS will attempt to allocate when removing" 576 " a device"); 577 /* END CSTYLED */ 578 579 extern int zfs_removal_suspend_progress; 580 581 /* BEGIN CSTYLED */ 582 SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress, 583 CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0, 584 "Ensures certain actions can happen while in the middle of a removal"); 585 /* END CSTYLED */ 586 587 /* 588 * Minimum size which forces the dynamic allocator to change 589 * it's allocation strategy. Once the space map cannot satisfy 590 * an allocation of this size then it switches to using more 591 * aggressive strategy (i.e search by size rather than offset). 592 */ 593 extern uint64_t metaslab_df_alloc_threshold; 594 595 /* BEGIN CSTYLED */ 596 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, 597 CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0, 598 "Minimum size which forces the dynamic allocator to change its" 599 " allocation strategy"); 600 /* END CSTYLED */ 601 602 /* 603 * The minimum free space, in percent, which must be available 604 * in a space map to continue allocations in a first-fit fashion. 605 * Once the space map's free space drops below this level we dynamically 606 * switch to using best-fit allocations. 607 */ 608 extern uint_t metaslab_df_free_pct; 609 610 /* BEGIN CSTYLED */ 611 SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, 612 CTLFLAG_RWTUN, &metaslab_df_free_pct, 0, 613 "The minimum free space, in percent, which must be available in a" 614 " space map to continue allocations in a first-fit fashion"); 615 /* END CSTYLED */ 616 617 /* mmp.c */ 618 619 int 620 param_set_multihost_interval(SYSCTL_HANDLER_ARGS) 621 { 622 int err; 623 624 err = sysctl_handle_64(oidp, &zfs_multihost_interval, 0, req); 625 if (err != 0 || req->newptr == NULL) 626 return (err); 627 628 if (spa_mode_global != SPA_MODE_UNINIT) 629 mmp_signal_all_threads(); 630 631 return (0); 632 } 633 634 /* spa.c */ 635 636 extern int zfs_ccw_retry_interval; 637 638 /* BEGIN CSTYLED */ 639 SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, 640 CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0, 641 "Configuration cache file write, retry after failure, interval" 642 " (seconds)"); 643 /* END CSTYLED */ 644 645 extern uint64_t zfs_max_missing_tvds_cachefile; 646 647 /* BEGIN CSTYLED */ 648 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile, 649 CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0, 650 "Allow importing pools with missing top-level vdevs in cache file"); 651 /* END CSTYLED */ 652 653 extern uint64_t zfs_max_missing_tvds_scan; 654 655 /* BEGIN CSTYLED */ 656 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan, 657 CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0, 658 "Allow importing pools with missing top-level vdevs during scan"); 659 /* END CSTYLED */ 660 661 /* spa_misc.c */ 662 663 extern int zfs_flags; 664 665 static int 666 sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 667 { 668 int err, val; 669 670 val = zfs_flags; 671 err = sysctl_handle_int(oidp, &val, 0, req); 672 if (err != 0 || req->newptr == NULL) 673 return (err); 674 675 /* 676 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 677 * arc buffers in the system have the necessary additional 678 * checksum data. However, it is safe to disable at any 679 * time. 680 */ 681 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 682 val &= ~ZFS_DEBUG_MODIFY; 683 zfs_flags = val; 684 685 return (0); 686 } 687 688 /* BEGIN CSTYLED */ 689 SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags, 690 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0, 691 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 692 /* END CSTYLED */ 693 694 int 695 param_set_deadman_synctime(SYSCTL_HANDLER_ARGS) 696 { 697 unsigned long val; 698 int err; 699 700 val = zfs_deadman_synctime_ms; 701 err = sysctl_handle_64(oidp, &val, 0, req); 702 if (err != 0 || req->newptr == NULL) 703 return (err); 704 zfs_deadman_synctime_ms = val; 705 706 spa_set_deadman_synctime(MSEC2NSEC(zfs_deadman_synctime_ms)); 707 708 return (0); 709 } 710 711 int 712 param_set_deadman_ziotime(SYSCTL_HANDLER_ARGS) 713 { 714 unsigned long val; 715 int err; 716 717 val = zfs_deadman_ziotime_ms; 718 err = sysctl_handle_64(oidp, &val, 0, req); 719 if (err != 0 || req->newptr == NULL) 720 return (err); 721 zfs_deadman_ziotime_ms = val; 722 723 spa_set_deadman_ziotime(MSEC2NSEC(zfs_deadman_synctime_ms)); 724 725 return (0); 726 } 727 728 int 729 param_set_deadman_failmode(SYSCTL_HANDLER_ARGS) 730 { 731 char buf[16]; 732 int rc; 733 734 if (req->newptr == NULL) 735 strlcpy(buf, zfs_deadman_failmode, sizeof (buf)); 736 737 rc = sysctl_handle_string(oidp, buf, sizeof (buf), req); 738 if (rc || req->newptr == NULL) 739 return (rc); 740 if (strcmp(buf, zfs_deadman_failmode) == 0) 741 return (0); 742 if (strcmp(buf, "wait") == 0) 743 zfs_deadman_failmode = "wait"; 744 if (strcmp(buf, "continue") == 0) 745 zfs_deadman_failmode = "continue"; 746 if (strcmp(buf, "panic") == 0) 747 zfs_deadman_failmode = "panic"; 748 749 return (-param_set_deadman_failmode_common(buf)); 750 } 751 752 int 753 param_set_slop_shift(SYSCTL_HANDLER_ARGS) 754 { 755 int val; 756 int err; 757 758 val = spa_slop_shift; 759 err = sysctl_handle_int(oidp, &val, 0, req); 760 if (err != 0 || req->newptr == NULL) 761 return (err); 762 763 if (val < 1 || val > 31) 764 return (EINVAL); 765 766 spa_slop_shift = val; 767 768 return (0); 769 } 770 771 /* spacemap.c */ 772 773 extern int space_map_ibs; 774 775 /* BEGIN CSTYLED */ 776 SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN, 777 &space_map_ibs, 0, "Space map indirect block shift"); 778 /* END CSTYLED */ 779 780 781 /* vdev.c */ 782 783 int 784 param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS) 785 { 786 int val; 787 int err; 788 789 val = zfs_vdev_min_auto_ashift; 790 err = sysctl_handle_int(oidp, &val, 0, req); 791 if (err != 0 || req->newptr == NULL) 792 return (SET_ERROR(err)); 793 794 if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift) 795 return (SET_ERROR(EINVAL)); 796 797 zfs_vdev_min_auto_ashift = val; 798 799 return (0); 800 } 801 802 /* BEGIN CSTYLED */ 803 SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift, 804 CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 805 &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift), 806 param_set_min_auto_ashift, "IU", 807 "Min ashift used when creating new top-level vdev. (LEGACY)"); 808 /* END CSTYLED */ 809 810 int 811 param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS) 812 { 813 int val; 814 int err; 815 816 val = zfs_vdev_max_auto_ashift; 817 err = sysctl_handle_int(oidp, &val, 0, req); 818 if (err != 0 || req->newptr == NULL) 819 return (SET_ERROR(err)); 820 821 if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift) 822 return (SET_ERROR(EINVAL)); 823 824 zfs_vdev_max_auto_ashift = val; 825 826 return (0); 827 } 828 829 /* BEGIN CSTYLED */ 830 SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift, 831 CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 832 &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift), 833 param_set_max_auto_ashift, "IU", 834 "Max ashift used when optimizing for logical -> physical sector size on" 835 " new top-level vdevs. (LEGACY)"); 836 /* END CSTYLED */ 837 838 /* 839 * Since the DTL space map of a vdev is not expected to have a lot of 840 * entries, we default its block size to 4K. 841 */ 842 extern int zfs_vdev_dtl_sm_blksz; 843 844 /* BEGIN CSTYLED */ 845 SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, 846 CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0, 847 "Block size for DTL space map. Power of 2 greater than 4096."); 848 /* END CSTYLED */ 849 850 /* 851 * vdev-wide space maps that have lots of entries written to them at 852 * the end of each transaction can benefit from a higher I/O bandwidth 853 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 854 */ 855 extern int zfs_vdev_standard_sm_blksz; 856 857 /* BEGIN CSTYLED */ 858 SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, 859 CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0, 860 "Block size for standard space map. Power of 2 greater than 4096."); 861 /* END CSTYLED */ 862 863 extern int vdev_validate_skip; 864 865 /* BEGIN CSTYLED */ 866 SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip, 867 CTLFLAG_RDTUN, &vdev_validate_skip, 0, 868 "Enable to bypass vdev_validate()."); 869 /* END CSTYLED */ 870 871 /* vdev_mirror.c */ 872 873 /* vdev_queue.c */ 874 875 extern uint_t zfs_vdev_max_active; 876 877 /* BEGIN CSTYLED */ 878 SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, 879 CTLFLAG_RWTUN, &zfs_vdev_max_active, 0, 880 "The maximum number of I/Os of all types active for each device." 881 " (LEGACY)"); 882 /* END CSTYLED */ 883 884 /* zio.c */ 885 886 /* BEGIN CSTYLED */ 887 SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, 888 CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 889 "Exclude metadata buffers from dumps as well"); 890 /* END CSTYLED */ 891