1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * Implement the swapcache daemon. When enabled swap is assumed to be 39 * configured on a fast storage device such as a SSD. Swap is assigned 40 * to clean vnode-backed pages in the inactive queue, clustered by object 41 * if possible, and written out. The swap assignment sticks around even 42 * after the underlying pages have been recycled. 43 * 44 * The daemon manages write bandwidth based on sysctl settings to control 45 * wear on the SSD. 46 * 47 * The vnode strategy code will check for the swap assignments and divert 48 * reads to the swap device when the data is present in the swapcache. 49 * 50 * This operates on both regular files and the block device vnodes used by 51 * filesystems to manage meta-data. 52 */ 53 54 #include "opt_vm.h" 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/kernel.h> 58 #include <sys/proc.h> 59 #include <sys/kthread.h> 60 #include <sys/resourcevar.h> 61 #include <sys/signalvar.h> 62 #include <sys/vnode.h> 63 #include <sys/vmmeter.h> 64 #include <sys/sysctl.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_pageout.h> 73 #include <vm/vm_pager.h> 74 #include <vm/swap_pager.h> 75 #include <vm/vm_extern.h> 76 77 #include <sys/thread2.h> 78 #include <vm/vm_page2.h> 79 80 #define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl) 81 82 /* the kernel process "vm_pageout"*/ 83 static int vm_swapcached_flush (vm_page_t m, int isblkdev); 84 static int vm_swapcache_test(vm_page_t m); 85 static void vm_swapcache_writing(vm_page_t marker); 86 static void vm_swapcache_cleaning(vm_object_t marker); 87 struct thread *swapcached_thread; 88 89 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL); 90 91 int vm_swapcache_read_enable; 92 int vm_swapcache_inactive_heuristic; 93 static int vm_swapcache_sleep; 94 static int vm_swapcache_maxlaunder = 256; 95 static int vm_swapcache_data_enable = 0; 96 static int vm_swapcache_meta_enable = 0; 97 static int vm_swapcache_maxswappct = 75; 98 static int vm_swapcache_hysteresis; 99 static int vm_swapcache_use_chflags = 1; /* require chflags cache */ 100 static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */ 101 static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */ 102 static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */ 103 static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */ 104 static int64_t vm_swapcache_write_count; 105 static int64_t vm_swapcache_maxfilesize; 106 107 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder, 108 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, ""); 109 110 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable, 111 CTLFLAG_RW, &vm_swapcache_data_enable, 0, ""); 112 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable, 113 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, ""); 114 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable, 115 CTLFLAG_RW, &vm_swapcache_read_enable, 0, ""); 116 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct, 117 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, ""); 118 SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis, 119 CTLFLAG_RW, &vm_swapcache_hysteresis, 0, ""); 120 SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags, 121 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, ""); 122 123 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst, 124 CTLFLAG_RW, &vm_swapcache_minburst, 0, ""); 125 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst, 126 CTLFLAG_RW, &vm_swapcache_curburst, 0, ""); 127 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst, 128 CTLFLAG_RW, &vm_swapcache_maxburst, 0, ""); 129 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize, 130 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, ""); 131 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate, 132 CTLFLAG_RW, &vm_swapcache_accrate, 0, ""); 133 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count, 134 CTLFLAG_RW, &vm_swapcache_write_count, 0, ""); 135 136 #define SWAPMAX(adj) \ 137 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100) 138 139 /* 140 * vm_swapcached is the high level pageout daemon. 141 * 142 * No requirements. 143 */ 144 static void 145 vm_swapcached_thread(void) 146 { 147 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING; 148 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING; 149 struct vm_page page_marker; 150 struct vm_object object_marker; 151 152 /* 153 * Thread setup 154 */ 155 curthread->td_flags |= TDF_SYSTHREAD; 156 157 lwkt_gettoken(&vm_token); 158 crit_enter(); 159 160 /* 161 * Initialize our marker for the inactive scan (SWAPC_WRITING) 162 */ 163 bzero(&page_marker, sizeof(page_marker)); 164 page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 165 page_marker.queue = PQ_INACTIVE; 166 page_marker.wire_count = 1; 167 TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq); 168 vm_swapcache_hysteresis = vmstats.v_inactive_target / 2; 169 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; 170 171 /* 172 * Initialize our marker for the vm_object scan (SWAPC_CLEANING) 173 */ 174 bzero(&object_marker, sizeof(object_marker)); 175 object_marker.type = OBJT_MARKER; 176 lwkt_gettoken(&vmobj_token); 177 TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list); 178 lwkt_reltoken(&vmobj_token); 179 180 for (;;) { 181 /* 182 * Check every 5 seconds when not enabled or if no swap 183 * is present. 184 */ 185 if ((vm_swapcache_data_enable == 0 && 186 vm_swapcache_meta_enable == 0) || 187 vm_swap_max == 0) { 188 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5); 189 continue; 190 } 191 192 /* 193 * Polling rate when enabled is approximately 10 hz. 194 */ 195 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10); 196 197 /* 198 * State hysteresis. Generate write activity up to 75% of 199 * swap, then clean out swap assignments down to 70%, then 200 * repeat. 201 */ 202 if (state == SWAPC_WRITING) { 203 if (vm_swap_cache_use > SWAPMAX(0)) 204 state = SWAPC_CLEANING; 205 } else { 206 if (vm_swap_cache_use < SWAPMAX(-5)) 207 state = SWAPC_WRITING; 208 } 209 210 /* 211 * We are allowed to continue accumulating burst value 212 * in either state. Allow the user to set curburst > maxburst 213 * for the initial load-in. 214 */ 215 if (vm_swapcache_curburst < vm_swapcache_maxburst) { 216 vm_swapcache_curburst += vm_swapcache_accrate / 10; 217 if (vm_swapcache_curburst > vm_swapcache_maxburst) 218 vm_swapcache_curburst = vm_swapcache_maxburst; 219 } 220 221 /* 222 * We don't want to nickle-and-dime the scan as that will 223 * create unnecessary fragmentation. The minimum burst 224 * is one-seconds worth of accumulation. 225 */ 226 if (state == SWAPC_WRITING) { 227 if (vm_swapcache_curburst >= vm_swapcache_accrate) { 228 if (burst == SWAPB_BURSTING) { 229 vm_swapcache_writing(&page_marker); 230 if (vm_swapcache_curburst <= 0) 231 burst = SWAPB_RECOVERING; 232 } else if (vm_swapcache_curburst > 233 vm_swapcache_minburst) { 234 vm_swapcache_writing(&page_marker); 235 burst = SWAPB_BURSTING; 236 } 237 } 238 } else { 239 vm_swapcache_cleaning(&object_marker); 240 } 241 } 242 243 /* 244 * Cleanup (NOT REACHED) 245 */ 246 TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq); 247 crit_exit(); 248 lwkt_reltoken(&vm_token); 249 250 lwkt_gettoken(&vmobj_token); 251 TAILQ_REMOVE(&vm_object_list, &object_marker, object_list); 252 lwkt_reltoken(&vmobj_token); 253 } 254 255 static struct kproc_desc swpc_kp = { 256 "swapcached", 257 vm_swapcached_thread, 258 &swapcached_thread 259 }; 260 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp) 261 262 /* 263 * The caller must hold vm_token. 264 */ 265 static void 266 vm_swapcache_writing(vm_page_t marker) 267 { 268 vm_object_t object; 269 struct vnode *vp; 270 vm_page_t m; 271 int count; 272 int isblkdev; 273 274 /* 275 * Deal with an overflow of the heuristic counter or if the user 276 * manually changes the hysteresis. 277 * 278 * Try to avoid small incremental pageouts by waiting for enough 279 * pages to buildup in the inactive queue to hopefully get a good 280 * burst in. This heuristic is bumped by the VM system and reset 281 * when our scan hits the end of the queue. 282 */ 283 if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis) 284 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; 285 if (vm_swapcache_inactive_heuristic < 0) 286 return; 287 288 /* 289 * Scan the inactive queue from our marker to locate 290 * suitable pages to push to the swap cache. 291 * 292 * We are looking for clean vnode-backed pages. 293 * 294 * NOTE: PG_SWAPPED pages in particular are not part of 295 * our count because once the cache stabilizes we 296 * can end up with a very high datarate of VM pages 297 * cycling from it. 298 */ 299 m = marker; 300 count = vm_swapcache_maxlaunder; 301 302 while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) { 303 if (m->flags & (PG_MARKER | PG_SWAPPED)) { 304 ++count; 305 continue; 306 } 307 if (vm_swapcache_curburst < 0) 308 break; 309 if (vm_swapcache_test(m)) 310 continue; 311 object = m->object; 312 vp = object->handle; 313 if (vp == NULL) 314 continue; 315 316 switch(vp->v_type) { 317 case VREG: 318 /* 319 * If data_enable is 0 do not try to swapcache data. 320 * If use_chflags is set then only swapcache data for 321 * VSWAPCACHE marked vnodes, otherwise any vnode. 322 */ 323 if (vm_swapcache_data_enable == 0 || 324 ((vp->v_flag & VSWAPCACHE) == 0 && 325 vm_swapcache_use_chflags)) { 326 continue; 327 } 328 if (vm_swapcache_maxfilesize && 329 object->size > 330 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) { 331 continue; 332 } 333 isblkdev = 0; 334 break; 335 case VCHR: 336 /* 337 * The PG_NOTMETA flag only applies to pages 338 * associated with block devices. 339 */ 340 if (m->flags & PG_NOTMETA) 341 continue; 342 if (vm_swapcache_meta_enable == 0) 343 continue; 344 isblkdev = 1; 345 break; 346 default: 347 continue; 348 } 349 350 /* 351 * Ok, move the marker and soft-busy the page. 352 */ 353 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq); 354 TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq); 355 356 /* 357 * Assign swap and initiate I/O. 358 * 359 * (adjust for the --count which also occurs in the loop) 360 */ 361 count -= vm_swapcached_flush(m, isblkdev) - 1; 362 363 /* 364 * Setup for next loop using marker. 365 */ 366 m = marker; 367 } 368 369 /* 370 * Cleanup marker position. If we hit the end of the 371 * list the marker is placed at the tail. Newly deactivated 372 * pages will be placed after it. 373 * 374 * Earlier inactive pages that were dirty and become clean 375 * are typically moved to the end of PQ_INACTIVE by virtue 376 * of vfs_vmio_release() when they become unwired from the 377 * buffer cache. 378 */ 379 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq); 380 if (m) { 381 TAILQ_INSERT_BEFORE(m, marker, pageq); 382 } else { 383 TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq); 384 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; 385 } 386 } 387 388 /* 389 * Flush the specified page using the swap_pager. 390 * 391 * Try to collect surrounding pages, including pages which may 392 * have already been assigned swap. Try to cluster within a 393 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block 394 * to match what swap_pager_putpages() can do. 395 * 396 * We also want to try to match against the buffer cache blocksize 397 * but we don't really know what it is here. Since the buffer cache 398 * wires and unwires pages in groups the fact that we skip wired pages 399 * should be sufficient. 400 * 401 * Returns a count of pages we might have flushed (minimum 1) 402 * 403 * The caller must hold vm_token. 404 */ 405 static 406 int 407 vm_swapcached_flush(vm_page_t m, int isblkdev) 408 { 409 vm_object_t object; 410 vm_page_t marray[SWAP_META_PAGES]; 411 vm_pindex_t basei; 412 int rtvals[SWAP_META_PAGES]; 413 int x; 414 int i; 415 int j; 416 int count; 417 418 vm_page_io_start(m); 419 vm_page_protect(m, VM_PROT_READ); 420 object = m->object; 421 422 /* 423 * Try to cluster around (m), keeping in mind that the swap pager 424 * can only do SMAP_META_PAGES worth of continguous write. 425 */ 426 x = (int)m->pindex & SWAP_META_MASK; 427 marray[x] = m; 428 basei = m->pindex; 429 430 for (i = x - 1; i >= 0; --i) { 431 m = vm_page_lookup(object, basei - x + i); 432 if (m == NULL) 433 break; 434 if (vm_swapcache_test(m)) 435 break; 436 if (isblkdev && (m->flags & PG_NOTMETA)) 437 break; 438 vm_page_io_start(m); 439 vm_page_protect(m, VM_PROT_READ); 440 if (m->queue - m->pc == PQ_CACHE) { 441 vm_page_unqueue_nowakeup(m); 442 vm_page_deactivate(m); 443 } 444 marray[i] = m; 445 } 446 ++i; 447 448 for (j = x + 1; j < SWAP_META_PAGES; ++j) { 449 m = vm_page_lookup(object, basei - x + j); 450 if (m == NULL) 451 break; 452 if (vm_swapcache_test(m)) 453 break; 454 if (isblkdev && (m->flags & PG_NOTMETA)) 455 break; 456 vm_page_io_start(m); 457 vm_page_protect(m, VM_PROT_READ); 458 if (m->queue - m->pc == PQ_CACHE) { 459 vm_page_unqueue_nowakeup(m); 460 vm_page_deactivate(m); 461 } 462 marray[j] = m; 463 } 464 465 count = j - i; 466 vm_object_pip_add(object, count); 467 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i); 468 vm_swapcache_write_count += count * PAGE_SIZE; 469 vm_swapcache_curburst -= count * PAGE_SIZE; 470 471 while (i < j) { 472 if (rtvals[i] != VM_PAGER_PEND) { 473 vm_page_io_finish(marray[i]); 474 vm_object_pip_wakeup(object); 475 } 476 ++i; 477 } 478 return(count); 479 } 480 481 /* 482 * Test whether a VM page is suitable for writing to the swapcache. 483 * Does not test m->queue, PG_MARKER, or PG_SWAPPED. 484 * 485 * Returns 0 on success, 1 on failure 486 * 487 * The caller must hold vm_token. 488 */ 489 static int 490 vm_swapcache_test(vm_page_t m) 491 { 492 vm_object_t object; 493 494 if (m->flags & (PG_BUSY | PG_UNMANAGED)) 495 return(1); 496 if (m->busy || m->hold_count || m->wire_count) 497 return(1); 498 if (m->valid != VM_PAGE_BITS_ALL) 499 return(1); 500 if (m->dirty & m->valid) 501 return(1); 502 if ((object = m->object) == NULL) 503 return(1); 504 if (object->type != OBJT_VNODE || 505 (object->flags & OBJ_DEAD)) { 506 return(1); 507 } 508 vm_page_test_dirty(m); 509 if (m->dirty & m->valid) 510 return(1); 511 return(0); 512 } 513 514 /* 515 * Cleaning pass 516 * 517 * The caller must hold vm_token. 518 */ 519 static 520 void 521 vm_swapcache_cleaning(vm_object_t marker) 522 { 523 vm_object_t object; 524 struct vnode *vp; 525 int count; 526 int n; 527 528 object = marker; 529 count = vm_swapcache_maxlaunder; 530 531 /* 532 * Look for vnode objects 533 */ 534 lwkt_gettoken(&vm_token); 535 lwkt_gettoken(&vmobj_token); 536 537 while ((object = TAILQ_NEXT(object, object_list)) != NULL && count--) { 538 if (object->type != OBJT_VNODE) 539 continue; 540 if ((object->flags & OBJ_DEAD) || object->swblock_count == 0) 541 continue; 542 if ((vp = object->handle) == NULL) 543 continue; 544 if (vp->v_type != VREG && vp->v_type != VCHR) 545 continue; 546 547 /* 548 * Adjust iterator. 549 */ 550 if (marker->backing_object != object) 551 marker->size = 0; 552 553 /* 554 * Move the marker so we can work on the VM object 555 */ 556 TAILQ_REMOVE(&vm_object_list, marker, object_list); 557 TAILQ_INSERT_AFTER(&vm_object_list, object, 558 marker, object_list); 559 560 /* 561 * Look for swblocks starting at our iterator. 562 * 563 * The swap_pager_condfree() function attempts to free 564 * swap space starting at the specified index. The index 565 * will be updated on return. The function will return 566 * a scan factor (NOT the number of blocks freed). 567 * 568 * If it must cut its scan of the object short due to an 569 * excessive number of swblocks, or is able to free the 570 * requested number of blocks, it will return n >= count 571 * and we break and pick it back up on a future attempt. 572 */ 573 n = swap_pager_condfree(object, &marker->size, count); 574 count -= n; 575 if (count < 0) 576 break; 577 578 /* 579 * Setup for loop. 580 */ 581 marker->size = 0; 582 object = marker; 583 } 584 585 /* 586 * Adjust marker so we continue the scan from where we left off. 587 * When we reach the end we start back at the beginning. 588 */ 589 TAILQ_REMOVE(&vm_object_list, marker, object_list); 590 if (object) 591 TAILQ_INSERT_BEFORE(object, marker, object_list); 592 else 593 TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list); 594 marker->backing_object = object; 595 596 lwkt_reltoken(&vmobj_token); 597 lwkt_reltoken(&vm_token); 598 } 599