1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/thread2.h> 110 111 #include "opt_swap.h" 112 #include <vm/vm.h> 113 #include <vm/vm_object.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_pager.h> 116 #include <vm/vm_pageout.h> 117 #include <vm/swap_pager.h> 118 #include <vm/vm_extern.h> 119 #include <vm/vm_zone.h> 120 #include <vm/vnode_pager.h> 121 122 #include <sys/buf2.h> 123 #include <vm/vm_page2.h> 124 125 #ifndef MAX_PAGEOUT_CLUSTER 126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 127 #endif 128 129 #define SWM_FREE 0x02 /* free, period */ 130 #define SWM_POP 0x04 /* pop out */ 131 132 #define SWBIO_READ 0x01 133 #define SWBIO_WRITE 0x02 134 #define SWBIO_SYNC 0x04 135 136 struct swfreeinfo { 137 vm_object_t object; 138 vm_pindex_t basei; 139 vm_pindex_t begi; 140 vm_pindex_t endi; /* inclusive */ 141 }; 142 143 struct swswapoffinfo { 144 vm_object_t object; 145 int devidx; 146 int shared; 147 }; 148 149 /* 150 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 151 * in the old system. 152 */ 153 154 int swap_pager_full; /* swap space exhaustion (task killing) */ 155 int vm_swap_cache_use; 156 int vm_swap_anon_use; 157 static int vm_report_swap_allocs; 158 159 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 160 static int nsw_rcount; /* free read buffers */ 161 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 162 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 163 static int nsw_wcount_async_max;/* assigned maximum */ 164 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 165 166 struct blist *swapblist; 167 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 168 static int swap_burst_read = 0; /* allow burst reading */ 169 static swblk_t swapiterator; /* linearize allocations */ 170 171 /* from vm_swap.c */ 172 extern struct vnode *swapdev_vp; 173 extern struct swdevt *swdevt; 174 extern int nswdev; 175 176 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 177 178 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 179 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 180 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 181 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 182 183 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 184 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 185 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 186 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 187 SYSCTL_INT(_vm, OID_AUTO, swap_size, 188 CTLFLAG_RD, &vm_swap_size, 0, ""); 189 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 190 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 191 192 vm_zone_t swap_zone; 193 194 /* 195 * Red-Black tree for swblock entries 196 * 197 * The caller must hold vm_token 198 */ 199 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 200 vm_pindex_t, swb_index); 201 202 int 203 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 204 { 205 if (swb1->swb_index < swb2->swb_index) 206 return(-1); 207 if (swb1->swb_index > swb2->swb_index) 208 return(1); 209 return(0); 210 } 211 212 static 213 int 214 rb_swblock_scancmp(struct swblock *swb, void *data) 215 { 216 struct swfreeinfo *info = data; 217 218 if (swb->swb_index < info->basei) 219 return(-1); 220 if (swb->swb_index > info->endi) 221 return(1); 222 return(0); 223 } 224 225 static 226 int 227 rb_swblock_condcmp(struct swblock *swb, void *data) 228 { 229 struct swfreeinfo *info = data; 230 231 if (swb->swb_index < info->basei) 232 return(-1); 233 return(0); 234 } 235 236 /* 237 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 238 * calls hooked from other parts of the VM system and do not appear here. 239 * (see vm/swap_pager.h). 240 */ 241 242 static void swap_pager_dealloc (vm_object_t object); 243 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 244 static void swap_chain_iodone(struct bio *biox); 245 246 struct pagerops swappagerops = { 247 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 248 swap_pager_getpage, /* pagein */ 249 swap_pager_putpages, /* pageout */ 250 swap_pager_haspage /* get backing store status for page */ 251 }; 252 253 /* 254 * dmmax is in page-sized chunks with the new swap system. It was 255 * dev-bsized chunks in the old. dmmax is always a power of 2. 256 * 257 * swap_*() routines are externally accessible. swp_*() routines are 258 * internal. 259 */ 260 261 int dmmax; 262 static int dmmax_mask; 263 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 264 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 265 266 static __inline void swp_sizecheck (void); 267 static void swp_pager_async_iodone (struct bio *bio); 268 269 /* 270 * Swap bitmap functions 271 */ 272 273 static __inline void swp_pager_freeswapspace(vm_object_t object, 274 swblk_t blk, int npages); 275 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 276 277 /* 278 * Metadata functions 279 */ 280 281 static void swp_pager_meta_convert(vm_object_t); 282 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 283 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 284 static void swp_pager_meta_free_all(vm_object_t); 285 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 286 287 /* 288 * SWP_SIZECHECK() - update swap_pager_full indication 289 * 290 * update the swap_pager_almost_full indication and warn when we are 291 * about to run out of swap space, using lowat/hiwat hysteresis. 292 * 293 * Clear swap_pager_full ( task killing ) indication when lowat is met. 294 * 295 * No restrictions on call 296 * This routine may not block. 297 * SMP races are ok. 298 */ 299 static __inline void 300 swp_sizecheck(void) 301 { 302 if (vm_swap_size < nswap_lowat) { 303 if (swap_pager_almost_full == 0) { 304 kprintf("swap_pager: out of swap space\n"); 305 swap_pager_almost_full = 1; 306 } 307 } else { 308 swap_pager_full = 0; 309 if (vm_swap_size > nswap_hiwat) 310 swap_pager_almost_full = 0; 311 } 312 } 313 314 /* 315 * SWAP_PAGER_INIT() - initialize the swap pager! 316 * 317 * Expected to be started from system init. NOTE: This code is run 318 * before much else so be careful what you depend on. Most of the VM 319 * system has yet to be initialized at this point. 320 * 321 * Called from the low level boot code only. 322 */ 323 static void 324 swap_pager_init(void *arg __unused) 325 { 326 /* 327 * Device Stripe, in PAGE_SIZE'd blocks 328 */ 329 dmmax = SWB_NPAGES * 2; 330 dmmax_mask = ~(dmmax - 1); 331 } 332 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL) 333 334 /* 335 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 336 * 337 * Expected to be started from pageout process once, prior to entering 338 * its main loop. 339 * 340 * Called from the low level boot code only. 341 */ 342 void 343 swap_pager_swap_init(void) 344 { 345 int n, n2; 346 347 /* 348 * Number of in-transit swap bp operations. Don't 349 * exhaust the pbufs completely. Make sure we 350 * initialize workable values (0 will work for hysteresis 351 * but it isn't very efficient). 352 * 353 * The nsw_cluster_max is constrained by the number of pages an XIO 354 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 355 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 356 * constrained by the swap device interleave stripe size. 357 * 358 * Currently we hardwire nsw_wcount_async to 4. This limit is 359 * designed to prevent other I/O from having high latencies due to 360 * our pageout I/O. The value 4 works well for one or two active swap 361 * devices but is probably a little low if you have more. Even so, 362 * a higher value would probably generate only a limited improvement 363 * with three or four active swap devices since the system does not 364 * typically have to pageout at extreme bandwidths. We will want 365 * at least 2 per swap devices, and 4 is a pretty good value if you 366 * have one NFS swap device due to the command/ack latency over NFS. 367 * So it all works out pretty well. 368 */ 369 370 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 371 372 nsw_rcount = (nswbuf + 1) / 2; 373 nsw_wcount_sync = (nswbuf + 3) / 4; 374 nsw_wcount_async = 4; 375 nsw_wcount_async_max = nsw_wcount_async; 376 377 /* 378 * The zone is dynamically allocated so generally size it to 379 * maxswzone (32MB to 512MB of KVM). Set a minimum size based 380 * on physical memory of around 8x (each swblock can hold 16 pages). 381 * 382 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 383 * has increased dramatically. 384 */ 385 n = vmstats.v_page_count / 2; 386 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 387 n = maxswzone / sizeof(struct swblock); 388 n2 = n; 389 390 do { 391 swap_zone = zinit( 392 "SWAPMETA", 393 sizeof(struct swblock), 394 n, 395 ZONE_INTERRUPT, 396 1); 397 if (swap_zone != NULL) 398 break; 399 /* 400 * if the allocation failed, try a zone two thirds the 401 * size of the previous attempt. 402 */ 403 n -= ((n + 2) / 3); 404 } while (n > 0); 405 406 if (swap_zone == NULL) 407 panic("swap_pager_swap_init: swap_zone == NULL"); 408 if (n2 != n) 409 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 410 } 411 412 /* 413 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 414 * its metadata structures. 415 * 416 * This routine is called from the mmap and fork code to create a new 417 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 418 * and then converting it with swp_pager_meta_convert(). 419 * 420 * We only support unnamed objects. 421 * 422 * No restrictions. 423 */ 424 vm_object_t 425 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 426 { 427 vm_object_t object; 428 429 KKASSERT(handle == NULL); 430 object = vm_object_allocate_hold(OBJT_DEFAULT, 431 OFF_TO_IDX(offset + PAGE_MASK + size)); 432 swp_pager_meta_convert(object); 433 vm_object_drop(object); 434 435 return (object); 436 } 437 438 /* 439 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 440 * 441 * The swap backing for the object is destroyed. The code is 442 * designed such that we can reinstantiate it later, but this 443 * routine is typically called only when the entire object is 444 * about to be destroyed. 445 * 446 * The object must be locked or unreferenceable. 447 * No other requirements. 448 */ 449 static void 450 swap_pager_dealloc(vm_object_t object) 451 { 452 vm_object_hold(object); 453 vm_object_pip_wait(object, "swpdea"); 454 455 /* 456 * Free all remaining metadata. We only bother to free it from 457 * the swap meta data. We do not attempt to free swapblk's still 458 * associated with vm_page_t's for this object. We do not care 459 * if paging is still in progress on some objects. 460 */ 461 swp_pager_meta_free_all(object); 462 vm_object_drop(object); 463 } 464 465 /************************************************************************ 466 * SWAP PAGER BITMAP ROUTINES * 467 ************************************************************************/ 468 469 /* 470 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 471 * 472 * Allocate swap for the requested number of pages. The starting 473 * swap block number (a page index) is returned or SWAPBLK_NONE 474 * if the allocation failed. 475 * 476 * Also has the side effect of advising that somebody made a mistake 477 * when they configured swap and didn't configure enough. 478 * 479 * The caller must hold the object. 480 * This routine may not block. 481 */ 482 static __inline swblk_t 483 swp_pager_getswapspace(vm_object_t object, int npages) 484 { 485 swblk_t blk; 486 487 lwkt_gettoken(&vm_token); 488 blk = blist_allocat(swapblist, npages, swapiterator); 489 if (blk == SWAPBLK_NONE) 490 blk = blist_allocat(swapblist, npages, 0); 491 if (blk == SWAPBLK_NONE) { 492 if (swap_pager_full != 2) { 493 kprintf("swap_pager_getswapspace: failed alloc=%d\n", 494 npages); 495 swap_pager_full = 2; 496 swap_pager_almost_full = 1; 497 } 498 } else { 499 /* swapiterator = blk; disable for now, doesn't work well */ 500 swapacctspace(blk, -npages); 501 if (object->type == OBJT_SWAP) 502 vm_swap_anon_use += npages; 503 else 504 vm_swap_cache_use += npages; 505 swp_sizecheck(); 506 } 507 lwkt_reltoken(&vm_token); 508 return(blk); 509 } 510 511 /* 512 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 513 * 514 * This routine returns the specified swap blocks back to the bitmap. 515 * 516 * Note: This routine may not block (it could in the old swap code), 517 * and through the use of the new blist routines it does not block. 518 * 519 * We must be called at splvm() to avoid races with bitmap frees from 520 * vm_page_remove() aka swap_pager_page_removed(). 521 * 522 * This routine may not block. 523 */ 524 525 static __inline void 526 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 527 { 528 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 529 530 lwkt_gettoken(&vm_token); 531 sp->sw_nused -= npages; 532 if (object->type == OBJT_SWAP) 533 vm_swap_anon_use -= npages; 534 else 535 vm_swap_cache_use -= npages; 536 537 if (sp->sw_flags & SW_CLOSING) { 538 lwkt_reltoken(&vm_token); 539 return; 540 } 541 542 blist_free(swapblist, blk, npages); 543 vm_swap_size += npages; 544 swp_sizecheck(); 545 lwkt_reltoken(&vm_token); 546 } 547 548 /* 549 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 550 * range within an object. 551 * 552 * This is a globally accessible routine. 553 * 554 * This routine removes swapblk assignments from swap metadata. 555 * 556 * The external callers of this routine typically have already destroyed 557 * or renamed vm_page_t's associated with this range in the object so 558 * we should be ok. 559 * 560 * No requirements. 561 */ 562 void 563 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 564 { 565 vm_object_hold(object); 566 swp_pager_meta_free(object, start, size); 567 vm_object_drop(object); 568 } 569 570 /* 571 * No requirements. 572 */ 573 void 574 swap_pager_freespace_all(vm_object_t object) 575 { 576 vm_object_hold(object); 577 swp_pager_meta_free_all(object); 578 vm_object_drop(object); 579 } 580 581 /* 582 * This function conditionally frees swap cache swap starting at 583 * (*basei) in the object. (count) swap blocks will be nominally freed. 584 * The actual number of blocks freed can be more or less than the 585 * requested number. 586 * 587 * This function nominally returns the number of blocks freed. However, 588 * the actual number of blocks freed may be less then the returned value. 589 * If the function is unable to exhaust the object or if it is able to 590 * free (approximately) the requested number of blocks it returns 591 * a value n > count. 592 * 593 * If we exhaust the object we will return a value n <= count. 594 * 595 * The caller must hold the object. 596 * 597 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 598 * callers should always pass a count value > 0. 599 */ 600 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 601 602 int 603 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 604 { 605 struct swfreeinfo info; 606 int n; 607 int t; 608 609 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 610 611 info.object = object; 612 info.basei = *basei; /* skip up to this page index */ 613 info.begi = count; /* max swap pages to destroy */ 614 info.endi = count * 8; /* max swblocks to scan */ 615 616 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 617 swap_pager_condfree_callback, &info); 618 *basei = info.basei; 619 620 /* 621 * Take the higher difference swblocks vs pages 622 */ 623 n = count - (int)info.begi; 624 t = count * 8 - (int)info.endi; 625 if (n < t) 626 n = t; 627 if (n < 1) 628 n = 1; 629 return(n); 630 } 631 632 /* 633 * The idea is to free whole meta-block to avoid fragmenting 634 * the swap space or disk I/O. We only do this if NO VM pages 635 * are present. 636 * 637 * We do not have to deal with clearing PG_SWAPPED in related VM 638 * pages because there are no related VM pages. 639 * 640 * The caller must hold the object. 641 */ 642 static int 643 swap_pager_condfree_callback(struct swblock *swap, void *data) 644 { 645 struct swfreeinfo *info = data; 646 vm_object_t object = info->object; 647 int i; 648 649 for (i = 0; i < SWAP_META_PAGES; ++i) { 650 if (vm_page_lookup(object, swap->swb_index + i)) 651 break; 652 } 653 info->basei = swap->swb_index + SWAP_META_PAGES; 654 if (i == SWAP_META_PAGES) { 655 info->begi -= swap->swb_count; 656 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 657 } 658 --info->endi; 659 if ((int)info->begi < 0 || (int)info->endi < 0) 660 return(-1); 661 lwkt_yield(); 662 return(0); 663 } 664 665 /* 666 * Called by vm_page_alloc() when a new VM page is inserted 667 * into a VM object. Checks whether swap has been assigned to 668 * the page and sets PG_SWAPPED as necessary. 669 * 670 * No requirements. 671 */ 672 void 673 swap_pager_page_inserted(vm_page_t m) 674 { 675 if (m->object->swblock_count) { 676 vm_object_hold(m->object); 677 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 678 vm_page_flag_set(m, PG_SWAPPED); 679 vm_object_drop(m->object); 680 } 681 } 682 683 /* 684 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 685 * 686 * Assigns swap blocks to the specified range within the object. The 687 * swap blocks are not zerod. Any previous swap assignment is destroyed. 688 * 689 * Returns 0 on success, -1 on failure. 690 * 691 * The caller is responsible for avoiding races in the specified range. 692 * No other requirements. 693 */ 694 int 695 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 696 { 697 int n = 0; 698 swblk_t blk = SWAPBLK_NONE; 699 vm_pindex_t beg = start; /* save start index */ 700 701 vm_object_hold(object); 702 703 while (size) { 704 if (n == 0) { 705 n = BLIST_MAX_ALLOC; 706 while ((blk = swp_pager_getswapspace(object, n)) == 707 SWAPBLK_NONE) 708 { 709 n >>= 1; 710 if (n == 0) { 711 swp_pager_meta_free(object, beg, 712 start - beg); 713 vm_object_drop(object); 714 return(-1); 715 } 716 } 717 } 718 swp_pager_meta_build(object, start, blk); 719 --size; 720 ++start; 721 ++blk; 722 --n; 723 } 724 swp_pager_meta_free(object, start, n); 725 vm_object_drop(object); 726 return(0); 727 } 728 729 /* 730 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 731 * and destroy the source. 732 * 733 * Copy any valid swapblks from the source to the destination. In 734 * cases where both the source and destination have a valid swapblk, 735 * we keep the destination's. 736 * 737 * This routine is allowed to block. It may block allocating metadata 738 * indirectly through swp_pager_meta_build() or if paging is still in 739 * progress on the source. 740 * 741 * XXX vm_page_collapse() kinda expects us not to block because we 742 * supposedly do not need to allocate memory, but for the moment we 743 * *may* have to get a little memory from the zone allocator, but 744 * it is taken from the interrupt memory. We should be ok. 745 * 746 * The source object contains no vm_page_t's (which is just as well) 747 * The source object is of type OBJT_SWAP. 748 * 749 * The source and destination objects must be held by the caller. 750 */ 751 void 752 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 753 vm_pindex_t base_index, int destroysource) 754 { 755 vm_pindex_t i; 756 757 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 758 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 759 760 /* 761 * transfer source to destination. 762 */ 763 for (i = 0; i < dstobject->size; ++i) { 764 swblk_t dstaddr; 765 766 /* 767 * Locate (without changing) the swapblk on the destination, 768 * unless it is invalid in which case free it silently, or 769 * if the destination is a resident page, in which case the 770 * source is thrown away. 771 */ 772 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 773 774 if (dstaddr == SWAPBLK_NONE) { 775 /* 776 * Destination has no swapblk and is not resident, 777 * copy source. 778 */ 779 swblk_t srcaddr; 780 781 srcaddr = swp_pager_meta_ctl(srcobject, 782 base_index + i, SWM_POP); 783 784 if (srcaddr != SWAPBLK_NONE) 785 swp_pager_meta_build(dstobject, i, srcaddr); 786 } else { 787 /* 788 * Destination has valid swapblk or it is represented 789 * by a resident page. We destroy the sourceblock. 790 */ 791 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 792 } 793 } 794 795 /* 796 * Free left over swap blocks in source. 797 * 798 * We have to revert the type to OBJT_DEFAULT so we do not accidently 799 * double-remove the object from the swap queues. 800 */ 801 if (destroysource) { 802 /* 803 * Reverting the type is not necessary, the caller is going 804 * to destroy srcobject directly, but I'm doing it here 805 * for consistency since we've removed the object from its 806 * queues. 807 */ 808 swp_pager_meta_free_all(srcobject); 809 if (srcobject->type == OBJT_SWAP) 810 srcobject->type = OBJT_DEFAULT; 811 } 812 } 813 814 /* 815 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 816 * the requested page. 817 * 818 * We determine whether good backing store exists for the requested 819 * page and return TRUE if it does, FALSE if it doesn't. 820 * 821 * If TRUE, we also try to determine how much valid, contiguous backing 822 * store exists before and after the requested page within a reasonable 823 * distance. We do not try to restrict it to the swap device stripe 824 * (that is handled in getpages/putpages). It probably isn't worth 825 * doing here. 826 * 827 * No requirements. 828 */ 829 boolean_t 830 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 831 { 832 swblk_t blk0; 833 834 /* 835 * do we have good backing store at the requested index ? 836 */ 837 vm_object_hold(object); 838 blk0 = swp_pager_meta_ctl(object, pindex, 0); 839 840 if (blk0 == SWAPBLK_NONE) { 841 vm_object_drop(object); 842 return (FALSE); 843 } 844 vm_object_drop(object); 845 return (TRUE); 846 } 847 848 /* 849 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 850 * 851 * This removes any associated swap backing store, whether valid or 852 * not, from the page. This operates on any VM object, not just OBJT_SWAP 853 * objects. 854 * 855 * This routine is typically called when a page is made dirty, at 856 * which point any associated swap can be freed. MADV_FREE also 857 * calls us in a special-case situation 858 * 859 * NOTE!!! If the page is clean and the swap was valid, the caller 860 * should make the page dirty before calling this routine. This routine 861 * does NOT change the m->dirty status of the page. Also: MADV_FREE 862 * depends on it. 863 * 864 * The page must be busied or soft-busied. 865 * The caller can hold the object to avoid blocking, else we might block. 866 * No other requirements. 867 */ 868 void 869 swap_pager_unswapped(vm_page_t m) 870 { 871 if (m->flags & PG_SWAPPED) { 872 vm_object_hold(m->object); 873 KKASSERT(m->flags & PG_SWAPPED); 874 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 875 vm_page_flag_clear(m, PG_SWAPPED); 876 vm_object_drop(m->object); 877 } 878 } 879 880 /* 881 * SWAP_PAGER_STRATEGY() - read, write, free blocks 882 * 883 * This implements a VM OBJECT strategy function using swap backing store. 884 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 885 * types. 886 * 887 * This is intended to be a cacheless interface (i.e. caching occurs at 888 * higher levels), and is also used as a swap-based SSD cache for vnode 889 * and device objects. 890 * 891 * All I/O goes directly to and from the swap device. 892 * 893 * We currently attempt to run I/O synchronously or asynchronously as 894 * the caller requests. This isn't perfect because we loose error 895 * sequencing when we run multiple ops in parallel to satisfy a request. 896 * But this is swap, so we let it all hang out. 897 * 898 * No requirements. 899 */ 900 void 901 swap_pager_strategy(vm_object_t object, struct bio *bio) 902 { 903 struct buf *bp = bio->bio_buf; 904 struct bio *nbio; 905 vm_pindex_t start; 906 vm_pindex_t biox_blkno = 0; 907 int count; 908 char *data; 909 struct bio *biox; 910 struct buf *bufx; 911 #if 0 912 struct bio_track *track; 913 #endif 914 915 #if 0 916 /* 917 * tracking for swapdev vnode I/Os 918 */ 919 if (bp->b_cmd == BUF_CMD_READ) 920 track = &swapdev_vp->v_track_read; 921 else 922 track = &swapdev_vp->v_track_write; 923 #endif 924 925 if (bp->b_bcount & PAGE_MASK) { 926 bp->b_error = EINVAL; 927 bp->b_flags |= B_ERROR | B_INVAL; 928 biodone(bio); 929 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 930 "not page bounded\n", 931 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 932 return; 933 } 934 935 /* 936 * Clear error indication, initialize page index, count, data pointer. 937 */ 938 bp->b_error = 0; 939 bp->b_flags &= ~B_ERROR; 940 bp->b_resid = bp->b_bcount; 941 942 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 943 count = howmany(bp->b_bcount, PAGE_SIZE); 944 data = bp->b_data; 945 946 /* 947 * Deal with BUF_CMD_FREEBLKS 948 */ 949 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 950 /* 951 * FREE PAGE(s) - destroy underlying swap that is no longer 952 * needed. 953 */ 954 vm_object_hold(object); 955 swp_pager_meta_free(object, start, count); 956 vm_object_drop(object); 957 bp->b_resid = 0; 958 biodone(bio); 959 return; 960 } 961 962 /* 963 * We need to be able to create a new cluster of I/O's. We cannot 964 * use the caller fields of the passed bio so push a new one. 965 * 966 * Because nbio is just a placeholder for the cluster links, 967 * we can biodone() the original bio instead of nbio to make 968 * things a bit more efficient. 969 */ 970 nbio = push_bio(bio); 971 nbio->bio_offset = bio->bio_offset; 972 nbio->bio_caller_info1.cluster_head = NULL; 973 nbio->bio_caller_info2.cluster_tail = NULL; 974 975 biox = NULL; 976 bufx = NULL; 977 978 /* 979 * Execute read or write 980 */ 981 vm_object_hold(object); 982 983 while (count > 0) { 984 swblk_t blk; 985 986 /* 987 * Obtain block. If block not found and writing, allocate a 988 * new block and build it into the object. 989 */ 990 blk = swp_pager_meta_ctl(object, start, 0); 991 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 992 blk = swp_pager_getswapspace(object, 1); 993 if (blk == SWAPBLK_NONE) { 994 bp->b_error = ENOMEM; 995 bp->b_flags |= B_ERROR; 996 break; 997 } 998 swp_pager_meta_build(object, start, blk); 999 } 1000 1001 /* 1002 * Do we have to flush our current collection? Yes if: 1003 * 1004 * - no swap block at this index 1005 * - swap block is not contiguous 1006 * - we cross a physical disk boundry in the 1007 * stripe. 1008 */ 1009 if ( 1010 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 1011 ((biox_blkno ^ blk) & dmmax_mask) 1012 ) 1013 ) { 1014 if (bp->b_cmd == BUF_CMD_READ) { 1015 ++mycpu->gd_cnt.v_swapin; 1016 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1017 } else { 1018 ++mycpu->gd_cnt.v_swapout; 1019 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1020 bufx->b_dirtyend = bufx->b_bcount; 1021 } 1022 1023 /* 1024 * Finished with this buf. 1025 */ 1026 KKASSERT(bufx->b_bcount != 0); 1027 if (bufx->b_cmd != BUF_CMD_READ) 1028 bufx->b_dirtyend = bufx->b_bcount; 1029 biox = NULL; 1030 bufx = NULL; 1031 } 1032 1033 /* 1034 * Add new swapblk to biox, instantiating biox if necessary. 1035 * Zero-fill reads are able to take a shortcut. 1036 */ 1037 if (blk == SWAPBLK_NONE) { 1038 /* 1039 * We can only get here if we are reading. Since 1040 * we are at splvm() we can safely modify b_resid, 1041 * even if chain ops are in progress. 1042 */ 1043 bzero(data, PAGE_SIZE); 1044 bp->b_resid -= PAGE_SIZE; 1045 } else { 1046 if (biox == NULL) { 1047 /* XXX chain count > 4, wait to <= 4 */ 1048 1049 bufx = getpbuf(NULL); 1050 biox = &bufx->b_bio1; 1051 cluster_append(nbio, bufx); 1052 bufx->b_flags |= (bp->b_flags & B_ORDERED); 1053 bufx->b_cmd = bp->b_cmd; 1054 biox->bio_done = swap_chain_iodone; 1055 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1056 biox->bio_caller_info1.cluster_parent = nbio; 1057 biox_blkno = blk; 1058 bufx->b_bcount = 0; 1059 bufx->b_data = data; 1060 } 1061 bufx->b_bcount += PAGE_SIZE; 1062 } 1063 --count; 1064 ++start; 1065 data += PAGE_SIZE; 1066 } 1067 1068 vm_object_drop(object); 1069 1070 /* 1071 * Flush out last buffer 1072 */ 1073 if (biox) { 1074 if (bufx->b_cmd == BUF_CMD_READ) { 1075 ++mycpu->gd_cnt.v_swapin; 1076 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1077 } else { 1078 ++mycpu->gd_cnt.v_swapout; 1079 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1080 bufx->b_dirtyend = bufx->b_bcount; 1081 } 1082 KKASSERT(bufx->b_bcount); 1083 if (bufx->b_cmd != BUF_CMD_READ) 1084 bufx->b_dirtyend = bufx->b_bcount; 1085 /* biox, bufx = NULL */ 1086 } 1087 1088 /* 1089 * Now initiate all the I/O. Be careful looping on our chain as 1090 * I/O's may complete while we are still initiating them. 1091 * 1092 * If the request is a 100% sparse read no bios will be present 1093 * and we just biodone() the buffer. 1094 */ 1095 nbio->bio_caller_info2.cluster_tail = NULL; 1096 bufx = nbio->bio_caller_info1.cluster_head; 1097 1098 if (bufx) { 1099 while (bufx) { 1100 biox = &bufx->b_bio1; 1101 BUF_KERNPROC(bufx); 1102 bufx = bufx->b_cluster_next; 1103 vn_strategy(swapdev_vp, biox); 1104 } 1105 } else { 1106 biodone(bio); 1107 } 1108 1109 /* 1110 * Completion of the cluster will also call biodone_chain(nbio). 1111 * We never call biodone(nbio) so we don't have to worry about 1112 * setting up a bio_done callback. It's handled in the sub-IO. 1113 */ 1114 /**/ 1115 } 1116 1117 /* 1118 * biodone callback 1119 * 1120 * No requirements. 1121 */ 1122 static void 1123 swap_chain_iodone(struct bio *biox) 1124 { 1125 struct buf **nextp; 1126 struct buf *bufx; /* chained sub-buffer */ 1127 struct bio *nbio; /* parent nbio with chain glue */ 1128 struct buf *bp; /* original bp associated with nbio */ 1129 int chain_empty; 1130 1131 bufx = biox->bio_buf; 1132 nbio = biox->bio_caller_info1.cluster_parent; 1133 bp = nbio->bio_buf; 1134 1135 /* 1136 * Update the original buffer 1137 */ 1138 KKASSERT(bp != NULL); 1139 if (bufx->b_flags & B_ERROR) { 1140 atomic_set_int(&bufx->b_flags, B_ERROR); 1141 bp->b_error = bufx->b_error; /* race ok */ 1142 } else if (bufx->b_resid != 0) { 1143 atomic_set_int(&bufx->b_flags, B_ERROR); 1144 bp->b_error = EINVAL; /* race ok */ 1145 } else { 1146 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1147 } 1148 1149 /* 1150 * Remove us from the chain. 1151 */ 1152 spin_lock(&bp->b_lock.lk_spinlock); 1153 nextp = &nbio->bio_caller_info1.cluster_head; 1154 while (*nextp != bufx) { 1155 KKASSERT(*nextp != NULL); 1156 nextp = &(*nextp)->b_cluster_next; 1157 } 1158 *nextp = bufx->b_cluster_next; 1159 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1160 spin_unlock(&bp->b_lock.lk_spinlock); 1161 1162 /* 1163 * Clean up bufx. If the chain is now empty we finish out 1164 * the parent. Note that we may be racing other completions 1165 * so we must use the chain_empty status from above. 1166 */ 1167 if (chain_empty) { 1168 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1169 atomic_set_int(&bp->b_flags, B_ERROR); 1170 bp->b_error = EINVAL; 1171 } 1172 biodone_chain(nbio); 1173 } 1174 relpbuf(bufx, NULL); 1175 } 1176 1177 /* 1178 * SWAP_PAGER_GETPAGES() - bring page in from swap 1179 * 1180 * The requested page may have to be brought in from swap. Calculate the 1181 * swap block and bring in additional pages if possible. All pages must 1182 * have contiguous swap block assignments and reside in the same object. 1183 * 1184 * The caller has a single vm_object_pip_add() reference prior to 1185 * calling us and we should return with the same. 1186 * 1187 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1188 * and any additinal pages unbusied. 1189 * 1190 * If the caller encounters a PG_RAM page it will pass it to us even though 1191 * it may be valid and dirty. We cannot overwrite the page in this case! 1192 * The case is used to allow us to issue pure read-aheads. 1193 * 1194 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1195 * the PG_RAM page is validated at the same time as mreq. What we 1196 * really need to do is issue a separate read-ahead pbuf. 1197 * 1198 * No requirements. 1199 */ 1200 static int 1201 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1202 { 1203 struct buf *bp; 1204 struct bio *bio; 1205 vm_page_t mreq; 1206 vm_page_t m; 1207 vm_offset_t kva; 1208 swblk_t blk; 1209 int i; 1210 int j; 1211 int raonly; 1212 int error; 1213 u_int32_t flags; 1214 vm_page_t marray[XIO_INTERNAL_PAGES]; 1215 1216 mreq = *mpp; 1217 1218 vm_object_hold(object); 1219 if (mreq->object != object) { 1220 panic("swap_pager_getpages: object mismatch %p/%p", 1221 object, 1222 mreq->object 1223 ); 1224 } 1225 1226 /* 1227 * We don't want to overwrite a fully valid page as it might be 1228 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1229 * valid page with PG_RAM set. 1230 * 1231 * In this case we see if the next page is a suitable page-in 1232 * candidate and if it is we issue read-ahead. PG_RAM will be 1233 * set on the last page of the read-ahead to continue the pipeline. 1234 */ 1235 if (mreq->valid == VM_PAGE_BITS_ALL) { 1236 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1237 vm_object_drop(object); 1238 return(VM_PAGER_OK); 1239 } 1240 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1241 if (blk == SWAPBLK_NONE) { 1242 vm_object_drop(object); 1243 return(VM_PAGER_OK); 1244 } 1245 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1246 TRUE, &error); 1247 if (error) { 1248 vm_object_drop(object); 1249 return(VM_PAGER_OK); 1250 } else if (m == NULL) { 1251 /* 1252 * Use VM_ALLOC_QUICK to avoid blocking on cache 1253 * page reuse. 1254 */ 1255 m = vm_page_alloc(object, mreq->pindex + 1, 1256 VM_ALLOC_QUICK); 1257 if (m == NULL) { 1258 vm_object_drop(object); 1259 return(VM_PAGER_OK); 1260 } 1261 } else { 1262 if (m->valid) { 1263 vm_page_wakeup(m); 1264 vm_object_drop(object); 1265 return(VM_PAGER_OK); 1266 } 1267 vm_page_unqueue_nowakeup(m); 1268 } 1269 /* page is busy */ 1270 mreq = m; 1271 raonly = 1; 1272 } else { 1273 raonly = 0; 1274 } 1275 1276 /* 1277 * Try to block-read contiguous pages from swap if sequential, 1278 * otherwise just read one page. Contiguous pages from swap must 1279 * reside within a single device stripe because the I/O cannot be 1280 * broken up across multiple stripes. 1281 * 1282 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1283 * set up such that the case(s) are handled implicitly. 1284 */ 1285 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1286 marray[0] = mreq; 1287 1288 for (i = 1; swap_burst_read && 1289 i < XIO_INTERNAL_PAGES && 1290 mreq->pindex + i < object->size; ++i) { 1291 swblk_t iblk; 1292 1293 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1294 if (iblk != blk + i) 1295 break; 1296 if ((blk ^ iblk) & dmmax_mask) 1297 break; 1298 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1299 TRUE, &error); 1300 if (error) { 1301 break; 1302 } else if (m == NULL) { 1303 /* 1304 * Use VM_ALLOC_QUICK to avoid blocking on cache 1305 * page reuse. 1306 */ 1307 m = vm_page_alloc(object, mreq->pindex + i, 1308 VM_ALLOC_QUICK); 1309 if (m == NULL) 1310 break; 1311 } else { 1312 if (m->valid) { 1313 vm_page_wakeup(m); 1314 break; 1315 } 1316 vm_page_unqueue_nowakeup(m); 1317 } 1318 /* page is busy */ 1319 marray[i] = m; 1320 } 1321 if (i > 1) 1322 vm_page_flag_set(marray[i - 1], PG_RAM); 1323 1324 /* 1325 * If mreq is the requested page and we have nothing to do return 1326 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1327 * page and must be cleaned up. 1328 */ 1329 if (blk == SWAPBLK_NONE) { 1330 KKASSERT(i == 1); 1331 if (raonly) { 1332 vnode_pager_freepage(mreq); 1333 vm_object_drop(object); 1334 return(VM_PAGER_OK); 1335 } else { 1336 vm_object_drop(object); 1337 return(VM_PAGER_FAIL); 1338 } 1339 } 1340 1341 /* 1342 * map our page(s) into kva for input 1343 */ 1344 bp = getpbuf_kva(&nsw_rcount); 1345 bio = &bp->b_bio1; 1346 kva = (vm_offset_t) bp->b_kvabase; 1347 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1348 pmap_qenter(kva, bp->b_xio.xio_pages, i); 1349 1350 bp->b_data = (caddr_t)kva; 1351 bp->b_bcount = PAGE_SIZE * i; 1352 bp->b_xio.xio_npages = i; 1353 bio->bio_done = swp_pager_async_iodone; 1354 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1355 bio->bio_caller_info1.index = SWBIO_READ; 1356 1357 /* 1358 * Set index. If raonly set the index beyond the array so all 1359 * the pages are treated the same, otherwise the original mreq is 1360 * at index 0. 1361 */ 1362 if (raonly) 1363 bio->bio_driver_info = (void *)(intptr_t)i; 1364 else 1365 bio->bio_driver_info = (void *)(intptr_t)0; 1366 1367 for (j = 0; j < i; ++j) 1368 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG); 1369 1370 mycpu->gd_cnt.v_swapin++; 1371 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1372 1373 /* 1374 * We still hold the lock on mreq, and our automatic completion routine 1375 * does not remove it. 1376 */ 1377 vm_object_pip_add(object, bp->b_xio.xio_npages); 1378 1379 /* 1380 * perform the I/O. NOTE!!! bp cannot be considered valid after 1381 * this point because we automatically release it on completion. 1382 * Instead, we look at the one page we are interested in which we 1383 * still hold a lock on even through the I/O completion. 1384 * 1385 * The other pages in our m[] array are also released on completion, 1386 * so we cannot assume they are valid anymore either. 1387 */ 1388 bp->b_cmd = BUF_CMD_READ; 1389 BUF_KERNPROC(bp); 1390 vn_strategy(swapdev_vp, bio); 1391 1392 /* 1393 * Wait for the page we want to complete. PG_SWAPINPROG is always 1394 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1395 * is set in the meta-data. 1396 * 1397 * If this is a read-ahead only we return immediately without 1398 * waiting for I/O. 1399 */ 1400 if (raonly) { 1401 vm_object_drop(object); 1402 return(VM_PAGER_OK); 1403 } 1404 1405 /* 1406 * Read-ahead includes originally requested page case. 1407 */ 1408 for (;;) { 1409 flags = mreq->flags; 1410 cpu_ccfence(); 1411 if ((flags & PG_SWAPINPROG) == 0) 1412 break; 1413 tsleep_interlock(mreq, 0); 1414 if (!atomic_cmpset_int(&mreq->flags, flags, 1415 flags | PG_WANTED | PG_REFERENCED)) { 1416 continue; 1417 } 1418 mycpu->gd_cnt.v_intrans++; 1419 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1420 kprintf( 1421 "swap_pager: indefinite wait buffer: " 1422 " offset: %lld, size: %ld\n", 1423 (long long)bio->bio_offset, 1424 (long)bp->b_bcount 1425 ); 1426 } 1427 } 1428 1429 /* 1430 * mreq is left bussied after completion, but all the other pages 1431 * are freed. If we had an unrecoverable read error the page will 1432 * not be valid. 1433 */ 1434 vm_object_drop(object); 1435 if (mreq->valid != VM_PAGE_BITS_ALL) 1436 return(VM_PAGER_ERROR); 1437 else 1438 return(VM_PAGER_OK); 1439 1440 /* 1441 * A final note: in a low swap situation, we cannot deallocate swap 1442 * and mark a page dirty here because the caller is likely to mark 1443 * the page clean when we return, causing the page to possibly revert 1444 * to all-zero's later. 1445 */ 1446 } 1447 1448 /* 1449 * swap_pager_putpages: 1450 * 1451 * Assign swap (if necessary) and initiate I/O on the specified pages. 1452 * 1453 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1454 * are automatically converted to SWAP objects. 1455 * 1456 * In a low memory situation we may block in vn_strategy(), but the new 1457 * vm_page reservation system coupled with properly written VFS devices 1458 * should ensure that no low-memory deadlock occurs. This is an area 1459 * which needs work. 1460 * 1461 * The parent has N vm_object_pip_add() references prior to 1462 * calling us and will remove references for rtvals[] that are 1463 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1464 * completion. 1465 * 1466 * The parent has soft-busy'd the pages it passes us and will unbusy 1467 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1468 * We need to unbusy the rest on I/O completion. 1469 * 1470 * No requirements. 1471 */ 1472 void 1473 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1474 boolean_t sync, int *rtvals) 1475 { 1476 int i; 1477 int n = 0; 1478 1479 vm_object_hold(object); 1480 1481 if (count && m[0]->object != object) { 1482 panic("swap_pager_getpages: object mismatch %p/%p", 1483 object, 1484 m[0]->object 1485 ); 1486 } 1487 1488 /* 1489 * Step 1 1490 * 1491 * Turn object into OBJT_SWAP 1492 * check for bogus sysops 1493 * force sync if not pageout process 1494 */ 1495 if (object->type == OBJT_DEFAULT) { 1496 if (object->type == OBJT_DEFAULT) 1497 swp_pager_meta_convert(object); 1498 } 1499 1500 if (curthread != pagethread) 1501 sync = TRUE; 1502 1503 /* 1504 * Step 2 1505 * 1506 * Update nsw parameters from swap_async_max sysctl values. 1507 * Do not let the sysop crash the machine with bogus numbers. 1508 */ 1509 if (swap_async_max != nsw_wcount_async_max) { 1510 int n; 1511 1512 /* 1513 * limit range 1514 */ 1515 if ((n = swap_async_max) > nswbuf / 2) 1516 n = nswbuf / 2; 1517 if (n < 1) 1518 n = 1; 1519 swap_async_max = n; 1520 1521 /* 1522 * Adjust difference ( if possible ). If the current async 1523 * count is too low, we may not be able to make the adjustment 1524 * at this time. 1525 * 1526 * vm_token needed for nsw_wcount sleep interlock 1527 */ 1528 lwkt_gettoken(&vm_token); 1529 n -= nsw_wcount_async_max; 1530 if (nsw_wcount_async + n >= 0) { 1531 nsw_wcount_async_max += n; 1532 pbuf_adjcount(&nsw_wcount_async, n); 1533 } 1534 lwkt_reltoken(&vm_token); 1535 } 1536 1537 /* 1538 * Step 3 1539 * 1540 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1541 * The page is left dirty until the pageout operation completes 1542 * successfully. 1543 */ 1544 1545 for (i = 0; i < count; i += n) { 1546 struct buf *bp; 1547 struct bio *bio; 1548 swblk_t blk; 1549 int j; 1550 1551 /* 1552 * Maximum I/O size is limited by a number of factors. 1553 */ 1554 1555 n = min(BLIST_MAX_ALLOC, count - i); 1556 n = min(n, nsw_cluster_max); 1557 1558 lwkt_gettoken(&vm_token); 1559 1560 /* 1561 * Get biggest block of swap we can. If we fail, fall 1562 * back and try to allocate a smaller block. Don't go 1563 * overboard trying to allocate space if it would overly 1564 * fragment swap. 1565 */ 1566 while ( 1567 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1568 n > 4 1569 ) { 1570 n >>= 1; 1571 } 1572 if (blk == SWAPBLK_NONE) { 1573 for (j = 0; j < n; ++j) 1574 rtvals[i+j] = VM_PAGER_FAIL; 1575 lwkt_reltoken(&vm_token); 1576 continue; 1577 } 1578 if (vm_report_swap_allocs > 0) { 1579 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1580 --vm_report_swap_allocs; 1581 } 1582 1583 /* 1584 * The I/O we are constructing cannot cross a physical 1585 * disk boundry in the swap stripe. Note: we are still 1586 * at splvm(). 1587 */ 1588 if ((blk ^ (blk + n)) & dmmax_mask) { 1589 j = ((blk + dmmax) & dmmax_mask) - blk; 1590 swp_pager_freeswapspace(object, blk + j, n - j); 1591 n = j; 1592 } 1593 1594 /* 1595 * All I/O parameters have been satisfied, build the I/O 1596 * request and assign the swap space. 1597 */ 1598 if (sync == TRUE) 1599 bp = getpbuf_kva(&nsw_wcount_sync); 1600 else 1601 bp = getpbuf_kva(&nsw_wcount_async); 1602 bio = &bp->b_bio1; 1603 1604 lwkt_reltoken(&vm_token); 1605 1606 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1607 1608 bp->b_bcount = PAGE_SIZE * n; 1609 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1610 1611 for (j = 0; j < n; ++j) { 1612 vm_page_t mreq = m[i+j]; 1613 1614 swp_pager_meta_build(mreq->object, mreq->pindex, 1615 blk + j); 1616 if (object->type == OBJT_SWAP) 1617 vm_page_dirty(mreq); 1618 rtvals[i+j] = VM_PAGER_OK; 1619 1620 vm_page_flag_set(mreq, PG_SWAPINPROG); 1621 bp->b_xio.xio_pages[j] = mreq; 1622 } 1623 bp->b_xio.xio_npages = n; 1624 1625 mycpu->gd_cnt.v_swapout++; 1626 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1627 1628 bp->b_dirtyoff = 0; /* req'd for NFS */ 1629 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1630 bp->b_cmd = BUF_CMD_WRITE; 1631 bio->bio_caller_info1.index = SWBIO_WRITE; 1632 1633 /* 1634 * asynchronous 1635 */ 1636 if (sync == FALSE) { 1637 bio->bio_done = swp_pager_async_iodone; 1638 BUF_KERNPROC(bp); 1639 vn_strategy(swapdev_vp, bio); 1640 1641 for (j = 0; j < n; ++j) 1642 rtvals[i+j] = VM_PAGER_PEND; 1643 continue; 1644 } 1645 1646 /* 1647 * Issue synchrnously. 1648 * 1649 * Wait for the sync I/O to complete, then update rtvals. 1650 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1651 * our async completion routine at the end, thus avoiding a 1652 * double-free. 1653 */ 1654 bio->bio_caller_info1.index |= SWBIO_SYNC; 1655 bio->bio_done = biodone_sync; 1656 bio->bio_flags |= BIO_SYNC; 1657 vn_strategy(swapdev_vp, bio); 1658 biowait(bio, "swwrt"); 1659 1660 for (j = 0; j < n; ++j) 1661 rtvals[i+j] = VM_PAGER_PEND; 1662 1663 /* 1664 * Now that we are through with the bp, we can call the 1665 * normal async completion, which frees everything up. 1666 */ 1667 swp_pager_async_iodone(bio); 1668 } 1669 vm_object_drop(object); 1670 } 1671 1672 /* 1673 * No requirements. 1674 */ 1675 void 1676 swap_pager_newswap(void) 1677 { 1678 swp_sizecheck(); 1679 } 1680 1681 /* 1682 * swp_pager_async_iodone: 1683 * 1684 * Completion routine for asynchronous reads and writes from/to swap. 1685 * Also called manually by synchronous code to finish up a bp. 1686 * 1687 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1688 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1689 * unbusy all pages except the 'main' request page. For WRITE 1690 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1691 * because we marked them all VM_PAGER_PEND on return from putpages ). 1692 * 1693 * This routine may not block. 1694 * 1695 * No requirements. 1696 */ 1697 static void 1698 swp_pager_async_iodone(struct bio *bio) 1699 { 1700 struct buf *bp = bio->bio_buf; 1701 vm_object_t object = NULL; 1702 int i; 1703 int *nswptr; 1704 1705 /* 1706 * report error 1707 */ 1708 if (bp->b_flags & B_ERROR) { 1709 kprintf( 1710 "swap_pager: I/O error - %s failed; offset %lld," 1711 "size %ld, error %d\n", 1712 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1713 "pagein" : "pageout"), 1714 (long long)bio->bio_offset, 1715 (long)bp->b_bcount, 1716 bp->b_error 1717 ); 1718 } 1719 1720 /* 1721 * set object, raise to splvm(). 1722 */ 1723 if (bp->b_xio.xio_npages) 1724 object = bp->b_xio.xio_pages[0]->object; 1725 1726 /* 1727 * remove the mapping for kernel virtual 1728 */ 1729 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1730 1731 /* 1732 * cleanup pages. If an error occurs writing to swap, we are in 1733 * very serious trouble. If it happens to be a disk error, though, 1734 * we may be able to recover by reassigning the swap later on. So 1735 * in this case we remove the m->swapblk assignment for the page 1736 * but do not free it in the rlist. The errornous block(s) are thus 1737 * never reallocated as swap. Redirty the page and continue. 1738 */ 1739 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1740 vm_page_t m = bp->b_xio.xio_pages[i]; 1741 1742 if (bp->b_flags & B_ERROR) { 1743 /* 1744 * If an error occurs I'd love to throw the swapblk 1745 * away without freeing it back to swapspace, so it 1746 * can never be used again. But I can't from an 1747 * interrupt. 1748 */ 1749 1750 if (bio->bio_caller_info1.index & SWBIO_READ) { 1751 /* 1752 * When reading, reqpage needs to stay 1753 * locked for the parent, but all other 1754 * pages can be freed. We still want to 1755 * wakeup the parent waiting on the page, 1756 * though. ( also: pg_reqpage can be -1 and 1757 * not match anything ). 1758 * 1759 * We have to wake specifically requested pages 1760 * up too because we cleared PG_SWAPINPROG and 1761 * someone may be waiting for that. 1762 * 1763 * NOTE: for reads, m->dirty will probably 1764 * be overridden by the original caller of 1765 * getpages so don't play cute tricks here. 1766 * 1767 * NOTE: We can't actually free the page from 1768 * here, because this is an interrupt. It 1769 * is not legal to mess with object->memq 1770 * from an interrupt. Deactivate the page 1771 * instead. 1772 */ 1773 1774 m->valid = 0; 1775 vm_page_flag_clear(m, PG_ZERO); 1776 vm_page_flag_clear(m, PG_SWAPINPROG); 1777 1778 /* 1779 * bio_driver_info holds the requested page 1780 * index. 1781 */ 1782 if (i != (int)(intptr_t)bio->bio_driver_info) { 1783 vm_page_deactivate(m); 1784 vm_page_wakeup(m); 1785 } else { 1786 vm_page_flash(m); 1787 } 1788 /* 1789 * If i == bp->b_pager.pg_reqpage, do not wake 1790 * the page up. The caller needs to. 1791 */ 1792 } else { 1793 /* 1794 * If a write error occurs remove the swap 1795 * assignment (note that PG_SWAPPED may or 1796 * may not be set depending on prior activity). 1797 * 1798 * Re-dirty OBJT_SWAP pages as there is no 1799 * other backing store, we can't throw the 1800 * page away. 1801 * 1802 * Non-OBJT_SWAP pages (aka swapcache) must 1803 * not be dirtied since they may not have 1804 * been dirty in the first place, and they 1805 * do have backing store (the vnode). 1806 */ 1807 vm_page_busy_wait(m, FALSE, "swadpg"); 1808 swp_pager_meta_ctl(m->object, m->pindex, 1809 SWM_FREE); 1810 vm_page_flag_clear(m, PG_SWAPPED); 1811 if (m->object->type == OBJT_SWAP) { 1812 vm_page_dirty(m); 1813 vm_page_activate(m); 1814 } 1815 vm_page_flag_clear(m, PG_SWAPINPROG); 1816 vm_page_io_finish(m); 1817 vm_page_wakeup(m); 1818 } 1819 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1820 /* 1821 * NOTE: for reads, m->dirty will probably be 1822 * overridden by the original caller of getpages so 1823 * we cannot set them in order to free the underlying 1824 * swap in a low-swap situation. I don't think we'd 1825 * want to do that anyway, but it was an optimization 1826 * that existed in the old swapper for a time before 1827 * it got ripped out due to precisely this problem. 1828 * 1829 * clear PG_ZERO in page. 1830 * 1831 * If not the requested page then deactivate it. 1832 * 1833 * Note that the requested page, reqpage, is left 1834 * busied, but we still have to wake it up. The 1835 * other pages are released (unbusied) by 1836 * vm_page_wakeup(). We do not set reqpage's 1837 * valid bits here, it is up to the caller. 1838 */ 1839 1840 /* 1841 * NOTE: can't call pmap_clear_modify(m) from an 1842 * interrupt thread, the pmap code may have to map 1843 * non-kernel pmaps and currently asserts the case. 1844 */ 1845 /*pmap_clear_modify(m);*/ 1846 m->valid = VM_PAGE_BITS_ALL; 1847 vm_page_undirty(m); 1848 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG); 1849 vm_page_flag_set(m, PG_SWAPPED); 1850 1851 /* 1852 * We have to wake specifically requested pages 1853 * up too because we cleared PG_SWAPINPROG and 1854 * could be waiting for it in getpages. However, 1855 * be sure to not unbusy getpages specifically 1856 * requested page - getpages expects it to be 1857 * left busy. 1858 * 1859 * bio_driver_info holds the requested page 1860 */ 1861 if (i != (int)(intptr_t)bio->bio_driver_info) { 1862 vm_page_deactivate(m); 1863 vm_page_wakeup(m); 1864 } else { 1865 vm_page_flash(m); 1866 } 1867 } else { 1868 /* 1869 * Mark the page clean but do not mess with the 1870 * pmap-layer's modified state. That state should 1871 * also be clear since the caller protected the 1872 * page VM_PROT_READ, but allow the case. 1873 * 1874 * We are in an interrupt, avoid pmap operations. 1875 * 1876 * If we have a severe page deficit, deactivate the 1877 * page. Do not try to cache it (which would also 1878 * involve a pmap op), because the page might still 1879 * be read-heavy. 1880 * 1881 * When using the swap to cache clean vnode pages 1882 * we do not mess with the page dirty bits. 1883 */ 1884 vm_page_busy_wait(m, FALSE, "swadpg"); 1885 if (m->object->type == OBJT_SWAP) 1886 vm_page_undirty(m); 1887 vm_page_flag_clear(m, PG_SWAPINPROG); 1888 vm_page_flag_set(m, PG_SWAPPED); 1889 if (vm_page_count_severe()) 1890 vm_page_deactivate(m); 1891 #if 0 1892 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1893 vm_page_protect(m, VM_PROT_READ); 1894 #endif 1895 vm_page_io_finish(m); 1896 vm_page_wakeup(m); 1897 } 1898 } 1899 1900 /* 1901 * adjust pip. NOTE: the original parent may still have its own 1902 * pip refs on the object. 1903 */ 1904 1905 if (object) 1906 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 1907 1908 /* 1909 * Release the physical I/O buffer. 1910 * 1911 * NOTE: Due to synchronous operations in the write case b_cmd may 1912 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 1913 * been cleared. 1914 * 1915 * Use vm_token to interlock nsw_rcount/wcount wakeup? 1916 */ 1917 lwkt_gettoken(&vm_token); 1918 if (bio->bio_caller_info1.index & SWBIO_READ) 1919 nswptr = &nsw_rcount; 1920 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 1921 nswptr = &nsw_wcount_sync; 1922 else 1923 nswptr = &nsw_wcount_async; 1924 bp->b_cmd = BUF_CMD_DONE; 1925 relpbuf(bp, nswptr); 1926 lwkt_reltoken(&vm_token); 1927 } 1928 1929 /* 1930 * Fault-in a potentially swapped page and remove the swap reference. 1931 * (used by swapoff code) 1932 * 1933 * object must be held. 1934 */ 1935 static __inline void 1936 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 1937 { 1938 struct vnode *vp; 1939 vm_page_t m; 1940 int error; 1941 1942 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1943 1944 if (object->type == OBJT_VNODE) { 1945 /* 1946 * Any swap related to a vnode is due to swapcache. We must 1947 * vget() the vnode in case it is not active (otherwise 1948 * vref() will panic). Calling vm_object_page_remove() will 1949 * ensure that any swap ref is removed interlocked with the 1950 * page. clean_only is set to TRUE so we don't throw away 1951 * dirty pages. 1952 */ 1953 vp = object->handle; 1954 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 1955 if (error == 0) { 1956 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 1957 vput(vp); 1958 } 1959 } else { 1960 /* 1961 * Otherwise it is a normal OBJT_SWAP object and we can 1962 * fault the page in and remove the swap. 1963 */ 1964 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 1965 VM_PROT_NONE, 1966 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 1967 sharedp, &error); 1968 if (m) 1969 vm_page_unhold(m); 1970 } 1971 } 1972 1973 /* 1974 * This removes all swap blocks related to a particular device. We have 1975 * to be careful of ripups during the scan. 1976 */ 1977 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 1978 1979 int 1980 swap_pager_swapoff(int devidx) 1981 { 1982 struct vm_object marker; 1983 vm_object_t object; 1984 struct swswapoffinfo info; 1985 1986 bzero(&marker, sizeof(marker)); 1987 marker.type = OBJT_MARKER; 1988 1989 lwkt_gettoken(&vmobj_token); 1990 TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list); 1991 1992 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 1993 if (object->type == OBJT_MARKER) 1994 goto skip; 1995 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) 1996 goto skip; 1997 vm_object_hold(object); 1998 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) { 1999 vm_object_drop(object); 2000 goto skip; 2001 } 2002 info.object = object; 2003 info.shared = 0; 2004 info.devidx = devidx; 2005 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2006 NULL, 2007 swp_pager_swapoff_callback, 2008 &info); 2009 vm_object_drop(object); 2010 skip: 2011 if (object == TAILQ_NEXT(&marker, object_list)) { 2012 TAILQ_REMOVE(&vm_object_list, &marker, object_list); 2013 TAILQ_INSERT_AFTER(&vm_object_list, object, 2014 &marker, object_list); 2015 } 2016 } 2017 TAILQ_REMOVE(&vm_object_list, &marker, object_list); 2018 lwkt_reltoken(&vmobj_token); 2019 2020 /* 2021 * If we fail to locate all swblocks we just fail gracefully and 2022 * do not bother to restore paging on the swap device. If the 2023 * user wants to retry the user can retry. 2024 */ 2025 if (swdevt[devidx].sw_nused) 2026 return (1); 2027 else 2028 return (0); 2029 } 2030 2031 static 2032 int 2033 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2034 { 2035 struct swswapoffinfo *info = data; 2036 vm_object_t object = info->object; 2037 vm_pindex_t index; 2038 swblk_t v; 2039 int i; 2040 2041 index = swap->swb_index; 2042 for (i = 0; i < SWAP_META_PAGES; ++i) { 2043 /* 2044 * Make sure we don't race a dying object. This will 2045 * kill the scan of the object's swap blocks entirely. 2046 */ 2047 if (object->flags & OBJ_DEAD) 2048 return(-1); 2049 2050 /* 2051 * Fault the page, which can obviously block. If the swap 2052 * structure disappears break out. 2053 */ 2054 v = swap->swb_pages[i]; 2055 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2056 swp_pager_fault_page(object, &info->shared, 2057 swap->swb_index + i); 2058 /* swap ptr might go away */ 2059 if (RB_LOOKUP(swblock_rb_tree, 2060 &object->swblock_root, index) != swap) { 2061 break; 2062 } 2063 } 2064 } 2065 return(0); 2066 } 2067 2068 /************************************************************************ 2069 * SWAP META DATA * 2070 ************************************************************************ 2071 * 2072 * These routines manipulate the swap metadata stored in the 2073 * OBJT_SWAP object. All swp_*() routines must be called at 2074 * splvm() because swap can be freed up by the low level vm_page 2075 * code which might be called from interrupts beyond what splbio() covers. 2076 * 2077 * Swap metadata is implemented with a global hash and not directly 2078 * linked into the object. Instead the object simply contains 2079 * appropriate tracking counters. 2080 */ 2081 2082 /* 2083 * Lookup the swblock containing the specified swap block index. 2084 * 2085 * The caller must hold the object. 2086 */ 2087 static __inline 2088 struct swblock * 2089 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2090 { 2091 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2092 index &= ~(vm_pindex_t)SWAP_META_MASK; 2093 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2094 } 2095 2096 /* 2097 * Remove a swblock from the RB tree. 2098 * 2099 * The caller must hold the object. 2100 */ 2101 static __inline 2102 void 2103 swp_pager_remove(vm_object_t object, struct swblock *swap) 2104 { 2105 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2106 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2107 } 2108 2109 /* 2110 * Convert default object to swap object if necessary 2111 * 2112 * The caller must hold the object. 2113 */ 2114 static void 2115 swp_pager_meta_convert(vm_object_t object) 2116 { 2117 if (object->type == OBJT_DEFAULT) { 2118 object->type = OBJT_SWAP; 2119 KKASSERT(object->swblock_count == 0); 2120 } 2121 } 2122 2123 /* 2124 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2125 * 2126 * We first convert the object to a swap object if it is a default 2127 * object. Vnode objects do not need to be converted. 2128 * 2129 * The specified swapblk is added to the object's swap metadata. If 2130 * the swapblk is not valid, it is freed instead. Any previously 2131 * assigned swapblk is freed. 2132 * 2133 * The caller must hold the object. 2134 */ 2135 static void 2136 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2137 { 2138 struct swblock *swap; 2139 struct swblock *oswap; 2140 vm_pindex_t v; 2141 2142 KKASSERT(swapblk != SWAPBLK_NONE); 2143 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2144 2145 /* 2146 * Convert object if necessary 2147 */ 2148 if (object->type == OBJT_DEFAULT) 2149 swp_pager_meta_convert(object); 2150 2151 /* 2152 * Locate swblock. If not found create, but if we aren't adding 2153 * anything just return. If we run out of space in the map we wait 2154 * and, since the hash table may have changed, retry. 2155 */ 2156 retry: 2157 swap = swp_pager_lookup(object, index); 2158 2159 if (swap == NULL) { 2160 int i; 2161 2162 swap = zalloc(swap_zone); 2163 if (swap == NULL) { 2164 vm_wait(0); 2165 goto retry; 2166 } 2167 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2168 swap->swb_count = 0; 2169 2170 ++object->swblock_count; 2171 2172 for (i = 0; i < SWAP_META_PAGES; ++i) 2173 swap->swb_pages[i] = SWAPBLK_NONE; 2174 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2175 KKASSERT(oswap == NULL); 2176 } 2177 2178 /* 2179 * Delete prior contents of metadata. 2180 * 2181 * NOTE: Decrement swb_count after the freeing operation (which 2182 * might block) to prevent racing destruction of the swblock. 2183 */ 2184 index &= SWAP_META_MASK; 2185 2186 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2187 swap->swb_pages[index] = SWAPBLK_NONE; 2188 /* can block */ 2189 swp_pager_freeswapspace(object, v, 1); 2190 --swap->swb_count; 2191 --mycpu->gd_vmtotal.t_vm; 2192 } 2193 2194 /* 2195 * Enter block into metadata 2196 */ 2197 swap->swb_pages[index] = swapblk; 2198 if (swapblk != SWAPBLK_NONE) { 2199 ++swap->swb_count; 2200 ++mycpu->gd_vmtotal.t_vm; 2201 } 2202 } 2203 2204 /* 2205 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2206 * 2207 * The requested range of blocks is freed, with any associated swap 2208 * returned to the swap bitmap. 2209 * 2210 * This routine will free swap metadata structures as they are cleaned 2211 * out. This routine does *NOT* operate on swap metadata associated 2212 * with resident pages. 2213 * 2214 * The caller must hold the object. 2215 */ 2216 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2217 2218 static void 2219 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2220 { 2221 struct swfreeinfo info; 2222 2223 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2224 2225 /* 2226 * Nothing to do 2227 */ 2228 if (object->swblock_count == 0) { 2229 KKASSERT(RB_EMPTY(&object->swblock_root)); 2230 return; 2231 } 2232 if (count == 0) 2233 return; 2234 2235 /* 2236 * Setup for RB tree scan. Note that the pindex range can be huge 2237 * due to the 64 bit page index space so we cannot safely iterate. 2238 */ 2239 info.object = object; 2240 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2241 info.begi = index; 2242 info.endi = index + count - 1; 2243 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2244 swp_pager_meta_free_callback, &info); 2245 } 2246 2247 /* 2248 * The caller must hold the object. 2249 */ 2250 static 2251 int 2252 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2253 { 2254 struct swfreeinfo *info = data; 2255 vm_object_t object = info->object; 2256 int index; 2257 int eindex; 2258 2259 /* 2260 * Figure out the range within the swblock. The wider scan may 2261 * return edge-case swap blocks when the start and/or end points 2262 * are in the middle of a block. 2263 */ 2264 if (swap->swb_index < info->begi) 2265 index = (int)info->begi & SWAP_META_MASK; 2266 else 2267 index = 0; 2268 2269 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2270 eindex = (int)info->endi & SWAP_META_MASK; 2271 else 2272 eindex = SWAP_META_MASK; 2273 2274 /* 2275 * Scan and free the blocks. The loop terminates early 2276 * if (swap) runs out of blocks and could be freed. 2277 * 2278 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2279 * to deal with a zfree race. 2280 */ 2281 while (index <= eindex) { 2282 swblk_t v = swap->swb_pages[index]; 2283 2284 if (v != SWAPBLK_NONE) { 2285 swap->swb_pages[index] = SWAPBLK_NONE; 2286 /* can block */ 2287 swp_pager_freeswapspace(object, v, 1); 2288 --mycpu->gd_vmtotal.t_vm; 2289 if (--swap->swb_count == 0) { 2290 swp_pager_remove(object, swap); 2291 zfree(swap_zone, swap); 2292 --object->swblock_count; 2293 break; 2294 } 2295 } 2296 ++index; 2297 } 2298 2299 /* swap may be invalid here due to zfree above */ 2300 lwkt_yield(); 2301 2302 return(0); 2303 } 2304 2305 /* 2306 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2307 * 2308 * This routine locates and destroys all swap metadata associated with 2309 * an object. 2310 * 2311 * NOTE: Decrement swb_count after the freeing operation (which 2312 * might block) to prevent racing destruction of the swblock. 2313 * 2314 * The caller must hold the object. 2315 */ 2316 static void 2317 swp_pager_meta_free_all(vm_object_t object) 2318 { 2319 struct swblock *swap; 2320 int i; 2321 2322 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2323 2324 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2325 swp_pager_remove(object, swap); 2326 for (i = 0; i < SWAP_META_PAGES; ++i) { 2327 swblk_t v = swap->swb_pages[i]; 2328 if (v != SWAPBLK_NONE) { 2329 /* can block */ 2330 swp_pager_freeswapspace(object, v, 1); 2331 --swap->swb_count; 2332 --mycpu->gd_vmtotal.t_vm; 2333 } 2334 } 2335 if (swap->swb_count != 0) 2336 panic("swap_pager_meta_free_all: swb_count != 0"); 2337 zfree(swap_zone, swap); 2338 --object->swblock_count; 2339 lwkt_yield(); 2340 } 2341 KKASSERT(object->swblock_count == 0); 2342 } 2343 2344 /* 2345 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2346 * 2347 * This routine is capable of looking up, popping, or freeing 2348 * swapblk assignments in the swap meta data or in the vm_page_t. 2349 * The routine typically returns the swapblk being looked-up, or popped, 2350 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2351 * was invalid. This routine will automatically free any invalid 2352 * meta-data swapblks. 2353 * 2354 * It is not possible to store invalid swapblks in the swap meta data 2355 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2356 * 2357 * When acting on a busy resident page and paging is in progress, we 2358 * have to wait until paging is complete but otherwise can act on the 2359 * busy page. 2360 * 2361 * SWM_FREE remove and free swap block from metadata 2362 * SWM_POP remove from meta data but do not free.. pop it out 2363 * 2364 * The caller must hold the object. 2365 */ 2366 static swblk_t 2367 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2368 { 2369 struct swblock *swap; 2370 swblk_t r1; 2371 2372 if (object->swblock_count == 0) 2373 return(SWAPBLK_NONE); 2374 2375 r1 = SWAPBLK_NONE; 2376 swap = swp_pager_lookup(object, index); 2377 2378 if (swap != NULL) { 2379 index &= SWAP_META_MASK; 2380 r1 = swap->swb_pages[index]; 2381 2382 if (r1 != SWAPBLK_NONE) { 2383 if (flags & (SWM_FREE|SWM_POP)) { 2384 swap->swb_pages[index] = SWAPBLK_NONE; 2385 --mycpu->gd_vmtotal.t_vm; 2386 if (--swap->swb_count == 0) { 2387 swp_pager_remove(object, swap); 2388 zfree(swap_zone, swap); 2389 --object->swblock_count; 2390 } 2391 } 2392 /* swap ptr may be invalid */ 2393 if (flags & SWM_FREE) { 2394 swp_pager_freeswapspace(object, r1, 1); 2395 r1 = SWAPBLK_NONE; 2396 } 2397 } 2398 /* swap ptr may be invalid */ 2399 } 2400 return(r1); 2401 } 2402