1 /* 2 * Copyright (c) 1991, 1993, 2013 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $ 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/proc.h> /* for curproc, pageproc */ 70 #include <sys/thread.h> 71 #include <sys/vnode.h> 72 #include <sys/vmmeter.h> 73 #include <sys/mman.h> 74 #include <sys/mount.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/refcount.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_object.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_pager.h> 87 #include <vm/swap_pager.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_zone.h> 91 92 #include <vm/vm_page2.h> 93 94 #include <machine/specialreg.h> 95 96 #define EASY_SCAN_FACTOR 8 97 98 static void vm_object_qcollapse(vm_object_t object, 99 vm_object_t backing_object); 100 static void vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 101 int pagerflags); 102 static void vm_object_lock_init(vm_object_t); 103 104 105 /* 106 * Virtual memory objects maintain the actual data 107 * associated with allocated virtual memory. A given 108 * page of memory exists within exactly one object. 109 * 110 * An object is only deallocated when all "references" 111 * are given up. Only one "reference" to a given 112 * region of an object should be writeable. 113 * 114 * Associated with each object is a list of all resident 115 * memory pages belonging to that object; this list is 116 * maintained by the "vm_page" module, and locked by the object's 117 * lock. 118 * 119 * Each object also records a "pager" routine which is 120 * used to retrieve (and store) pages to the proper backing 121 * storage. In addition, objects may be backed by other 122 * objects from which they were virtual-copied. 123 * 124 * The only items within the object structure which are 125 * modified after time of creation are: 126 * reference count locked by object's lock 127 * pager routine locked by object's lock 128 * 129 */ 130 131 struct vm_object kernel_object; 132 133 static long object_collapses; 134 static long object_bypasses; 135 136 struct vm_object_hash vm_object_hash[VMOBJ_HSIZE]; 137 138 MALLOC_DEFINE(M_VM_OBJECT, "vm_object", "vm_object structures"); 139 140 #if defined(DEBUG_LOCKS) 141 142 #define vm_object_vndeallocate(obj, vpp) \ 143 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__) 144 145 /* 146 * Debug helper to track hold/drop/ref/deallocate calls. 147 */ 148 static void 149 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem) 150 { 151 int i; 152 153 i = atomic_fetchadd_int(&obj->debug_index, 1); 154 i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1); 155 ksnprintf(obj->debug_hold_thrs[i], 156 sizeof(obj->debug_hold_thrs[i]), 157 "%c%d:(%d):%s", 158 (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')), 159 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 160 obj->ref_count, 161 curthread->td_comm); 162 obj->debug_hold_file[i] = file; 163 obj->debug_hold_line[i] = line; 164 #if 0 165 /* Uncomment for debugging obj refs/derefs in reproducable cases */ 166 if (strcmp(curthread->td_comm, "sshd") == 0) { 167 kprintf("%d %p refs=%d ar=%d file: %s/%d\n", 168 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 169 obj, obj->ref_count, addrem, file, line); 170 } 171 #endif 172 } 173 174 #endif 175 176 /* 177 * Misc low level routines 178 */ 179 static void 180 vm_object_lock_init(vm_object_t obj) 181 { 182 #if defined(DEBUG_LOCKS) 183 int i; 184 185 obj->debug_index = 0; 186 for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) { 187 obj->debug_hold_thrs[i][0] = 0; 188 obj->debug_hold_file[i] = NULL; 189 obj->debug_hold_line[i] = 0; 190 } 191 #endif 192 } 193 194 void 195 vm_object_lock_swap(void) 196 { 197 lwkt_token_swap(); 198 } 199 200 void 201 vm_object_lock(vm_object_t obj) 202 { 203 lwkt_gettoken(&obj->token); 204 } 205 206 /* 207 * Returns TRUE on sucesss 208 */ 209 static int 210 vm_object_lock_try(vm_object_t obj) 211 { 212 return(lwkt_trytoken(&obj->token)); 213 } 214 215 void 216 vm_object_lock_shared(vm_object_t obj) 217 { 218 lwkt_gettoken_shared(&obj->token); 219 } 220 221 void 222 vm_object_unlock(vm_object_t obj) 223 { 224 lwkt_reltoken(&obj->token); 225 } 226 227 void 228 vm_object_upgrade(vm_object_t obj) 229 { 230 lwkt_reltoken(&obj->token); 231 lwkt_gettoken(&obj->token); 232 } 233 234 void 235 vm_object_downgrade(vm_object_t obj) 236 { 237 lwkt_reltoken(&obj->token); 238 lwkt_gettoken_shared(&obj->token); 239 } 240 241 static __inline void 242 vm_object_assert_held(vm_object_t obj) 243 { 244 ASSERT_LWKT_TOKEN_HELD(&obj->token); 245 } 246 247 static __inline int 248 vm_quickcolor(void) 249 { 250 globaldata_t gd = mycpu; 251 int pg_color; 252 253 pg_color = (int)(intptr_t)gd->gd_curthread >> 10; 254 pg_color += gd->gd_quick_color; 255 gd->gd_quick_color += PQ_PRIME2; 256 257 return pg_color; 258 } 259 260 void 261 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS) 262 { 263 KKASSERT(obj != NULL); 264 265 /* 266 * Object must be held (object allocation is stable due to callers 267 * context, typically already holding the token on a parent object) 268 * prior to potentially blocking on the lock, otherwise the object 269 * can get ripped away from us. 270 */ 271 refcount_acquire(&obj->hold_count); 272 vm_object_lock(obj); 273 274 #if defined(DEBUG_LOCKS) 275 debugvm_object_add(obj, file, line, 1); 276 #endif 277 } 278 279 int 280 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS) 281 { 282 KKASSERT(obj != NULL); 283 284 /* 285 * Object must be held (object allocation is stable due to callers 286 * context, typically already holding the token on a parent object) 287 * prior to potentially blocking on the lock, otherwise the object 288 * can get ripped away from us. 289 */ 290 refcount_acquire(&obj->hold_count); 291 if (vm_object_lock_try(obj) == 0) { 292 if (refcount_release(&obj->hold_count)) { 293 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) 294 kfree(obj, M_VM_OBJECT); 295 } 296 return(0); 297 } 298 299 #if defined(DEBUG_LOCKS) 300 debugvm_object_add(obj, file, line, 1); 301 #endif 302 return(1); 303 } 304 305 void 306 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS) 307 { 308 KKASSERT(obj != NULL); 309 310 /* 311 * Object must be held (object allocation is stable due to callers 312 * context, typically already holding the token on a parent object) 313 * prior to potentially blocking on the lock, otherwise the object 314 * can get ripped away from us. 315 */ 316 refcount_acquire(&obj->hold_count); 317 vm_object_lock_shared(obj); 318 319 #if defined(DEBUG_LOCKS) 320 debugvm_object_add(obj, file, line, 1); 321 #endif 322 } 323 324 /* 325 * Drop the token and hold_count on the object. 326 * 327 * WARNING! Token might be shared. 328 */ 329 void 330 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS) 331 { 332 if (obj == NULL) 333 return; 334 335 /* 336 * No new holders should be possible once we drop hold_count 1->0 as 337 * there is no longer any way to reference the object. 338 */ 339 KKASSERT(obj->hold_count > 0); 340 if (refcount_release(&obj->hold_count)) { 341 #if defined(DEBUG_LOCKS) 342 debugvm_object_add(obj, file, line, -1); 343 #endif 344 345 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) { 346 vm_object_unlock(obj); 347 kfree(obj, M_VM_OBJECT); 348 } else { 349 vm_object_unlock(obj); 350 } 351 } else { 352 #if defined(DEBUG_LOCKS) 353 debugvm_object_add(obj, file, line, -1); 354 #endif 355 vm_object_unlock(obj); 356 } 357 } 358 359 /* 360 * Initialize a freshly allocated object, returning a held object. 361 * 362 * Used only by vm_object_allocate(), zinitna() and vm_object_init(). 363 * 364 * No requirements. 365 */ 366 void 367 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 368 { 369 struct vm_object_hash *hash; 370 371 RB_INIT(&object->rb_memq); 372 LIST_INIT(&object->shadow_head); 373 lwkt_token_init(&object->token, "vmobj"); 374 375 object->type = type; 376 object->size = size; 377 object->ref_count = 1; 378 object->memattr = VM_MEMATTR_DEFAULT; 379 object->hold_count = 0; 380 object->flags = 0; 381 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 382 vm_object_set_flag(object, OBJ_ONEMAPPING); 383 object->paging_in_progress = 0; 384 object->resident_page_count = 0; 385 object->agg_pv_list_count = 0; 386 object->shadow_count = 0; 387 /* cpu localization twist */ 388 object->pg_color = vm_quickcolor(); 389 object->handle = NULL; 390 object->backing_object = NULL; 391 object->backing_object_offset = (vm_ooffset_t)0; 392 393 object->generation++; 394 object->swblock_count = 0; 395 RB_INIT(&object->swblock_root); 396 vm_object_lock_init(object); 397 pmap_object_init(object); 398 399 vm_object_hold(object); 400 401 hash = VMOBJ_HASH(object); 402 lwkt_gettoken(&hash->token); 403 TAILQ_INSERT_TAIL(&hash->list, object, object_list); 404 lwkt_reltoken(&hash->token); 405 } 406 407 /* 408 * Initialize a VM object. 409 */ 410 void 411 vm_object_init(vm_object_t object, vm_pindex_t size) 412 { 413 _vm_object_allocate(OBJT_DEFAULT, size, object); 414 vm_object_drop(object); 415 } 416 417 /* 418 * Initialize the VM objects module. 419 * 420 * Called from the low level boot code only. Note that this occurs before 421 * kmalloc is initialized so we cannot allocate any VM objects. 422 */ 423 void 424 vm_object_init1(void) 425 { 426 int i; 427 428 for (i = 0; i < VMOBJ_HSIZE; ++i) { 429 TAILQ_INIT(&vm_object_hash[i].list); 430 lwkt_token_init(&vm_object_hash[i].token, "vmobjlst"); 431 } 432 433 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd), 434 &kernel_object); 435 vm_object_drop(&kernel_object); 436 } 437 438 void 439 vm_object_init2(void) 440 { 441 kmalloc_set_unlimited(M_VM_OBJECT); 442 } 443 444 /* 445 * Allocate and return a new object of the specified type and size. 446 * 447 * No requirements. 448 */ 449 vm_object_t 450 vm_object_allocate(objtype_t type, vm_pindex_t size) 451 { 452 vm_object_t obj; 453 454 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO); 455 _vm_object_allocate(type, size, obj); 456 vm_object_drop(obj); 457 458 return (obj); 459 } 460 461 /* 462 * This version returns a held object, allowing further atomic initialization 463 * of the object. 464 */ 465 vm_object_t 466 vm_object_allocate_hold(objtype_t type, vm_pindex_t size) 467 { 468 vm_object_t obj; 469 470 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO); 471 _vm_object_allocate(type, size, obj); 472 473 return (obj); 474 } 475 476 /* 477 * Add an additional reference to a vm_object. The object must already be 478 * held. The original non-lock version is no longer supported. The object 479 * must NOT be chain locked by anyone at the time the reference is added. 480 * 481 * Referencing a chain-locked object can blow up the fairly sensitive 482 * ref_count and shadow_count tests in the deallocator. Most callers 483 * will call vm_object_chain_wait() prior to calling 484 * vm_object_reference_locked() to avoid the case. 485 * 486 * The object must be held, but may be held shared if desired (hence why 487 * we use an atomic op). 488 */ 489 void 490 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS) 491 { 492 KKASSERT(object != NULL); 493 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 494 KKASSERT((object->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) == 0); 495 atomic_add_int(&object->ref_count, 1); 496 if (object->type == OBJT_VNODE) { 497 vref(object->handle); 498 /* XXX what if the vnode is being destroyed? */ 499 } 500 #if defined(DEBUG_LOCKS) 501 debugvm_object_add(object, file, line, 1); 502 #endif 503 } 504 505 /* 506 * This version is only allowed for vnode objects. 507 */ 508 void 509 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS) 510 { 511 KKASSERT(object->type == OBJT_VNODE); 512 atomic_add_int(&object->ref_count, 1); 513 vref(object->handle); 514 #if defined(DEBUG_LOCKS) 515 debugvm_object_add(object, file, line, 1); 516 #endif 517 } 518 519 /* 520 * Object OBJ_CHAINLOCK lock handling. 521 * 522 * The caller can chain-lock backing objects recursively and then 523 * use vm_object_chain_release_all() to undo the whole chain. 524 * 525 * Chain locks are used to prevent collapses and are only applicable 526 * to OBJT_DEFAULT and OBJT_SWAP objects. Chain locking operations 527 * on other object types are ignored. This is also important because 528 * it allows e.g. the vnode underlying a memory mapping to take concurrent 529 * faults. 530 * 531 * The object must usually be held on entry, though intermediate 532 * objects need not be held on release. The object must be held exclusively, 533 * NOT shared. Note that the prefault path checks the shared state and 534 * avoids using the chain functions. 535 */ 536 void 537 vm_object_chain_wait(vm_object_t object, int shared) 538 { 539 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 540 for (;;) { 541 uint32_t chainlk = object->chainlk; 542 543 cpu_ccfence(); 544 if (shared) { 545 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 546 tsleep_interlock(object, 0); 547 if (atomic_cmpset_int(&object->chainlk, 548 chainlk, 549 chainlk | CHAINLK_WAIT)) { 550 tsleep(object, PINTERLOCKED, 551 "objchns", 0); 552 } 553 /* retry */ 554 } else { 555 break; 556 } 557 /* retry */ 558 } else { 559 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 560 tsleep_interlock(object, 0); 561 if (atomic_cmpset_int(&object->chainlk, 562 chainlk, 563 chainlk | CHAINLK_WAIT)) 564 { 565 tsleep(object, PINTERLOCKED, 566 "objchnx", 0); 567 } 568 /* retry */ 569 } else { 570 if (atomic_cmpset_int(&object->chainlk, 571 chainlk, 572 chainlk & ~CHAINLK_WAIT)) 573 { 574 if (chainlk & CHAINLK_WAIT) 575 wakeup(object); 576 break; 577 } 578 /* retry */ 579 } 580 } 581 /* retry */ 582 } 583 } 584 585 void 586 vm_object_chain_acquire(vm_object_t object, int shared) 587 { 588 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 589 return; 590 if (vm_shared_fault == 0) 591 shared = 0; 592 593 for (;;) { 594 uint32_t chainlk = object->chainlk; 595 596 cpu_ccfence(); 597 if (shared) { 598 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 599 tsleep_interlock(object, 0); 600 if (atomic_cmpset_int(&object->chainlk, 601 chainlk, 602 chainlk | CHAINLK_WAIT)) { 603 tsleep(object, PINTERLOCKED, 604 "objchns", 0); 605 } 606 /* retry */ 607 } else if (atomic_cmpset_int(&object->chainlk, 608 chainlk, chainlk + 1)) { 609 break; 610 } 611 /* retry */ 612 } else { 613 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 614 tsleep_interlock(object, 0); 615 if (atomic_cmpset_int(&object->chainlk, 616 chainlk, 617 chainlk | 618 CHAINLK_WAIT | 619 CHAINLK_EXCLREQ)) { 620 tsleep(object, PINTERLOCKED, 621 "objchnx", 0); 622 } 623 /* retry */ 624 } else { 625 if (atomic_cmpset_int(&object->chainlk, 626 chainlk, 627 (chainlk | CHAINLK_EXCL) & 628 ~(CHAINLK_EXCLREQ | 629 CHAINLK_WAIT))) { 630 if (chainlk & CHAINLK_WAIT) 631 wakeup(object); 632 break; 633 } 634 /* retry */ 635 } 636 } 637 /* retry */ 638 } 639 } 640 641 void 642 vm_object_chain_release(vm_object_t object) 643 { 644 /*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/ 645 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 646 return; 647 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 648 for (;;) { 649 uint32_t chainlk = object->chainlk; 650 651 cpu_ccfence(); 652 if (chainlk & CHAINLK_MASK) { 653 if ((chainlk & CHAINLK_MASK) == 1 && 654 atomic_cmpset_int(&object->chainlk, 655 chainlk, 656 (chainlk - 1) & ~CHAINLK_WAIT)) { 657 if (chainlk & CHAINLK_WAIT) 658 wakeup(object); 659 break; 660 } 661 if ((chainlk & CHAINLK_MASK) > 1 && 662 atomic_cmpset_int(&object->chainlk, 663 chainlk, chainlk - 1)) { 664 break; 665 } 666 /* retry */ 667 } else { 668 KKASSERT(chainlk & CHAINLK_EXCL); 669 if (atomic_cmpset_int(&object->chainlk, 670 chainlk, 671 chainlk & ~(CHAINLK_EXCL | 672 CHAINLK_WAIT))) { 673 if (chainlk & CHAINLK_WAIT) 674 wakeup(object); 675 break; 676 } 677 } 678 } 679 } 680 681 /* 682 * Release the chain from first_object through and including stopobj. 683 * The caller is typically holding the first and last object locked 684 * (shared or exclusive) to prevent destruction races. 685 * 686 * We release stopobj first as an optimization as this object is most 687 * likely to be shared across multiple processes. 688 */ 689 void 690 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj) 691 { 692 vm_object_t backing_object; 693 vm_object_t object; 694 695 vm_object_chain_release(stopobj); 696 object = first_object; 697 698 while (object != stopobj) { 699 KKASSERT(object); 700 backing_object = object->backing_object; 701 vm_object_chain_release(object); 702 object = backing_object; 703 } 704 } 705 706 /* 707 * Dereference an object and its underlying vnode. The object may be 708 * held shared. On return the object will remain held. 709 * 710 * This function may return a vnode in *vpp which the caller must release 711 * after the caller drops its own lock. If vpp is NULL, we assume that 712 * the caller was holding an exclusive lock on the object and we vrele() 713 * the vp ourselves. 714 */ 715 static void 716 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp 717 VMOBJDBARGS) 718 { 719 struct vnode *vp = (struct vnode *) object->handle; 720 721 KASSERT(object->type == OBJT_VNODE, 722 ("vm_object_vndeallocate: not a vnode object")); 723 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 724 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 725 #ifdef INVARIANTS 726 if (object->ref_count == 0) { 727 vprint("vm_object_vndeallocate", vp); 728 panic("vm_object_vndeallocate: bad object reference count"); 729 } 730 #endif 731 for (;;) { 732 int count = object->ref_count; 733 cpu_ccfence(); 734 if (count == 1) { 735 vm_object_upgrade(object); 736 if (atomic_cmpset_int(&object->ref_count, count, 0)) { 737 vclrflags(vp, VTEXT); 738 break; 739 } 740 } else { 741 if (atomic_cmpset_int(&object->ref_count, 742 count, count - 1)) { 743 break; 744 } 745 } 746 /* retry */ 747 } 748 #if defined(DEBUG_LOCKS) 749 debugvm_object_add(object, file, line, -1); 750 #endif 751 752 /* 753 * vrele or return the vp to vrele. We can only safely vrele(vp) 754 * if the object was locked exclusively. But there are two races 755 * here. 756 * 757 * We had to upgrade the object above to safely clear VTEXT 758 * but the alternative path where the shared lock is retained 759 * can STILL race to 0 in other paths and cause our own vrele() 760 * to terminate the vnode. We can't allow that if the VM object 761 * is still locked shared. 762 */ 763 if (vpp) 764 *vpp = vp; 765 else 766 vrele(vp); 767 } 768 769 /* 770 * Release a reference to the specified object, gained either through a 771 * vm_object_allocate or a vm_object_reference call. When all references 772 * are gone, storage associated with this object may be relinquished. 773 * 774 * The caller does not have to hold the object locked but must have control 775 * over the reference in question in order to guarantee that the object 776 * does not get ripped out from under us. 777 * 778 * XXX Currently all deallocations require an exclusive lock. 779 */ 780 void 781 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS) 782 { 783 struct vnode *vp; 784 int count; 785 786 if (object == NULL) 787 return; 788 789 for (;;) { 790 count = object->ref_count; 791 cpu_ccfence(); 792 793 /* 794 * If decrementing the count enters into special handling 795 * territory (0, 1, or 2) we have to do it the hard way. 796 * Fortunate though, objects with only a few refs like this 797 * are not likely to be heavily contended anyway. 798 * 799 * For vnode objects we only care about 1->0 transitions. 800 */ 801 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) { 802 #if defined(DEBUG_LOCKS) 803 debugvm_object_add(object, file, line, 0); 804 #endif 805 vm_object_hold(object); 806 vm_object_deallocate_locked(object); 807 vm_object_drop(object); 808 break; 809 } 810 811 /* 812 * Try to decrement ref_count without acquiring a hold on 813 * the object. This is particularly important for the exec*() 814 * and exit*() code paths because the program binary may 815 * have a great deal of sharing and an exclusive lock will 816 * crowbar performance in those circumstances. 817 */ 818 if (object->type == OBJT_VNODE) { 819 vp = (struct vnode *)object->handle; 820 if (atomic_cmpset_int(&object->ref_count, 821 count, count - 1)) { 822 #if defined(DEBUG_LOCKS) 823 debugvm_object_add(object, file, line, -1); 824 #endif 825 826 vrele(vp); 827 break; 828 } 829 /* retry */ 830 } else { 831 if (atomic_cmpset_int(&object->ref_count, 832 count, count - 1)) { 833 #if defined(DEBUG_LOCKS) 834 debugvm_object_add(object, file, line, -1); 835 #endif 836 break; 837 } 838 /* retry */ 839 } 840 /* retry */ 841 } 842 } 843 844 void 845 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS) 846 { 847 struct vm_object_dealloc_list *dlist = NULL; 848 struct vm_object_dealloc_list *dtmp; 849 vm_object_t temp; 850 int must_drop = 0; 851 852 /* 853 * We may chain deallocate object, but additional objects may 854 * collect on the dlist which also have to be deallocated. We 855 * must avoid a recursion, vm_object chains can get deep. 856 */ 857 858 again: 859 while (object != NULL) { 860 /* 861 * vnode case, caller either locked the object exclusively 862 * or this is a recursion with must_drop != 0 and the vnode 863 * object will be locked shared. 864 * 865 * If locked shared we have to drop the object before we can 866 * call vrele() or risk a shared/exclusive livelock. 867 */ 868 if (object->type == OBJT_VNODE) { 869 ASSERT_LWKT_TOKEN_HELD(&object->token); 870 if (must_drop) { 871 struct vnode *tmp_vp; 872 873 vm_object_vndeallocate(object, &tmp_vp); 874 vm_object_drop(object); 875 must_drop = 0; 876 object = NULL; 877 vrele(tmp_vp); 878 } else { 879 vm_object_vndeallocate(object, NULL); 880 } 881 break; 882 } 883 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token); 884 885 /* 886 * Normal case (object is locked exclusively) 887 */ 888 if (object->ref_count == 0) { 889 panic("vm_object_deallocate: object deallocated " 890 "too many times: %d", object->type); 891 } 892 if (object->ref_count > 2) { 893 atomic_add_int(&object->ref_count, -1); 894 #if defined(DEBUG_LOCKS) 895 debugvm_object_add(object, file, line, -1); 896 #endif 897 break; 898 } 899 900 /* 901 * Here on ref_count of one or two, which are special cases for 902 * objects. 903 * 904 * Nominal ref_count > 1 case if the second ref is not from 905 * a shadow. 906 * 907 * (ONEMAPPING only applies to DEFAULT AND SWAP objects) 908 */ 909 if (object->ref_count == 2 && object->shadow_count == 0) { 910 if (object->type == OBJT_DEFAULT || 911 object->type == OBJT_SWAP) { 912 vm_object_set_flag(object, OBJ_ONEMAPPING); 913 } 914 atomic_add_int(&object->ref_count, -1); 915 #if defined(DEBUG_LOCKS) 916 debugvm_object_add(object, file, line, -1); 917 #endif 918 break; 919 } 920 921 /* 922 * If the second ref is from a shadow we chain along it 923 * upwards if object's handle is exhausted. 924 * 925 * We have to decrement object->ref_count before potentially 926 * collapsing the first shadow object or the collapse code 927 * will not be able to handle the degenerate case to remove 928 * object. However, if we do it too early the object can 929 * get ripped out from under us. 930 */ 931 if (object->ref_count == 2 && object->shadow_count == 1 && 932 object->handle == NULL && (object->type == OBJT_DEFAULT || 933 object->type == OBJT_SWAP)) { 934 temp = LIST_FIRST(&object->shadow_head); 935 KKASSERT(temp != NULL); 936 vm_object_hold(temp); 937 938 /* 939 * Wait for any paging to complete so the collapse 940 * doesn't (or isn't likely to) qcollapse. pip 941 * waiting must occur before we acquire the 942 * chainlock. 943 */ 944 while ( 945 temp->paging_in_progress || 946 object->paging_in_progress 947 ) { 948 vm_object_pip_wait(temp, "objde1"); 949 vm_object_pip_wait(object, "objde2"); 950 } 951 952 /* 953 * If the parent is locked we have to give up, as 954 * otherwise we would be acquiring locks in the 955 * wrong order and potentially deadlock. 956 */ 957 if (temp->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) { 958 vm_object_drop(temp); 959 goto skip; 960 } 961 vm_object_chain_acquire(temp, 0); 962 963 /* 964 * Recheck/retry after the hold and the paging 965 * wait, both of which can block us. 966 */ 967 if (object->ref_count != 2 || 968 object->shadow_count != 1 || 969 object->handle || 970 LIST_FIRST(&object->shadow_head) != temp || 971 (object->type != OBJT_DEFAULT && 972 object->type != OBJT_SWAP)) { 973 vm_object_chain_release(temp); 974 vm_object_drop(temp); 975 continue; 976 } 977 978 /* 979 * We can safely drop object's ref_count now. 980 */ 981 KKASSERT(object->ref_count == 2); 982 atomic_add_int(&object->ref_count, -1); 983 #if defined(DEBUG_LOCKS) 984 debugvm_object_add(object, file, line, -1); 985 #endif 986 987 /* 988 * If our single parent is not collapseable just 989 * decrement ref_count (2->1) and stop. 990 */ 991 if (temp->handle || (temp->type != OBJT_DEFAULT && 992 temp->type != OBJT_SWAP)) { 993 vm_object_chain_release(temp); 994 vm_object_drop(temp); 995 break; 996 } 997 998 /* 999 * At this point we have already dropped object's 1000 * ref_count so it is possible for a race to 1001 * deallocate obj out from under us. Any collapse 1002 * will re-check the situation. We must not block 1003 * until we are able to collapse. 1004 * 1005 * Bump temp's ref_count to avoid an unwanted 1006 * degenerate recursion (can't call 1007 * vm_object_reference_locked() because it asserts 1008 * that CHAINLOCK is not set). 1009 */ 1010 atomic_add_int(&temp->ref_count, 1); 1011 KKASSERT(temp->ref_count > 1); 1012 1013 /* 1014 * Collapse temp, then deallocate the extra ref 1015 * formally. 1016 */ 1017 vm_object_collapse(temp, &dlist); 1018 vm_object_chain_release(temp); 1019 if (must_drop) { 1020 vm_object_lock_swap(); 1021 vm_object_drop(object); 1022 } 1023 object = temp; 1024 must_drop = 1; 1025 continue; 1026 } 1027 1028 /* 1029 * Drop the ref and handle termination on the 1->0 transition. 1030 * We may have blocked above so we have to recheck. 1031 */ 1032 skip: 1033 KKASSERT(object->ref_count != 0); 1034 if (object->ref_count >= 2) { 1035 atomic_add_int(&object->ref_count, -1); 1036 #if defined(DEBUG_LOCKS) 1037 debugvm_object_add(object, file, line, -1); 1038 #endif 1039 break; 1040 } 1041 KKASSERT(object->ref_count == 1); 1042 1043 /* 1044 * 1->0 transition. Chain through the backing_object. 1045 * Maintain the ref until we've located the backing object, 1046 * then re-check. 1047 */ 1048 while ((temp = object->backing_object) != NULL) { 1049 if (temp->type == OBJT_VNODE) 1050 vm_object_hold_shared(temp); 1051 else 1052 vm_object_hold(temp); 1053 if (temp == object->backing_object) 1054 break; 1055 vm_object_drop(temp); 1056 } 1057 1058 /* 1059 * 1->0 transition verified, retry if ref_count is no longer 1060 * 1. Otherwise disconnect the backing_object (temp) and 1061 * clean up. 1062 */ 1063 if (object->ref_count != 1) { 1064 vm_object_drop(temp); 1065 continue; 1066 } 1067 1068 /* 1069 * It shouldn't be possible for the object to be chain locked 1070 * if we're removing the last ref on it. 1071 * 1072 * Removing object from temp's shadow list requires dropping 1073 * temp, which we will do on loop. 1074 * 1075 * NOTE! vnodes do not use the shadow list, but still have 1076 * the backing_object reference. 1077 */ 1078 KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0); 1079 1080 if (temp) { 1081 if (object->flags & OBJ_ONSHADOW) { 1082 LIST_REMOVE(object, shadow_list); 1083 temp->shadow_count--; 1084 temp->generation++; 1085 vm_object_clear_flag(object, OBJ_ONSHADOW); 1086 } 1087 object->backing_object = NULL; 1088 } 1089 1090 atomic_add_int(&object->ref_count, -1); 1091 if ((object->flags & OBJ_DEAD) == 0) 1092 vm_object_terminate(object); 1093 if (must_drop && temp) 1094 vm_object_lock_swap(); 1095 if (must_drop) 1096 vm_object_drop(object); 1097 object = temp; 1098 must_drop = 1; 1099 } 1100 1101 if (must_drop && object) 1102 vm_object_drop(object); 1103 1104 /* 1105 * Additional tail recursion on dlist. Avoid a recursion. Objects 1106 * on the dlist have a hold count but are not locked. 1107 */ 1108 if ((dtmp = dlist) != NULL) { 1109 dlist = dtmp->next; 1110 object = dtmp->object; 1111 kfree(dtmp, M_TEMP); 1112 1113 vm_object_lock(object); /* already held, add lock */ 1114 must_drop = 1; /* and we're responsible for it */ 1115 goto again; 1116 } 1117 } 1118 1119 /* 1120 * Destroy the specified object, freeing up related resources. 1121 * 1122 * The object must have zero references. 1123 * 1124 * The object must held. The caller is responsible for dropping the object 1125 * after terminate returns. Terminate does NOT drop the object. 1126 */ 1127 static int vm_object_terminate_callback(vm_page_t p, void *data); 1128 1129 void 1130 vm_object_terminate(vm_object_t object) 1131 { 1132 struct rb_vm_page_scan_info info; 1133 struct vm_object_hash *hash; 1134 1135 /* 1136 * Make sure no one uses us. Once we set OBJ_DEAD we should be 1137 * able to safely block. 1138 */ 1139 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1140 KKASSERT((object->flags & OBJ_DEAD) == 0); 1141 vm_object_set_flag(object, OBJ_DEAD); 1142 1143 /* 1144 * Wait for the pageout daemon to be done with the object 1145 */ 1146 vm_object_pip_wait(object, "objtrm1"); 1147 1148 KASSERT(!object->paging_in_progress, 1149 ("vm_object_terminate: pageout in progress")); 1150 1151 /* 1152 * Clean and free the pages, as appropriate. All references to the 1153 * object are gone, so we don't need to lock it. 1154 */ 1155 if (object->type == OBJT_VNODE) { 1156 struct vnode *vp; 1157 1158 /* 1159 * Clean pages and flush buffers. 1160 * 1161 * NOTE! TMPFS buffer flushes do not typically flush the 1162 * actual page to swap as this would be highly 1163 * inefficient, and normal filesystems usually wrap 1164 * page flushes with buffer cache buffers. 1165 * 1166 * To deal with this we have to call vinvalbuf() both 1167 * before and after the vm_object_page_clean(). 1168 */ 1169 vp = (struct vnode *) object->handle; 1170 vinvalbuf(vp, V_SAVE, 0, 0); 1171 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 1172 vinvalbuf(vp, V_SAVE, 0, 0); 1173 } 1174 1175 /* 1176 * Wait for any I/O to complete, after which there had better not 1177 * be any references left on the object. 1178 */ 1179 vm_object_pip_wait(object, "objtrm2"); 1180 1181 if (object->ref_count != 0) { 1182 panic("vm_object_terminate: object with references, " 1183 "ref_count=%d", object->ref_count); 1184 } 1185 1186 /* 1187 * Cleanup any shared pmaps associated with this object. 1188 */ 1189 pmap_object_free(object); 1190 1191 /* 1192 * Now free any remaining pages. For internal objects, this also 1193 * removes them from paging queues. Don't free wired pages, just 1194 * remove them from the object. 1195 */ 1196 info.count = 0; 1197 info.object = object; 1198 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1199 vm_object_terminate_callback, &info); 1200 1201 /* 1202 * Let the pager know object is dead. 1203 */ 1204 vm_pager_deallocate(object); 1205 1206 /* 1207 * Wait for the object hold count to hit 1, clean out pages as 1208 * we go. vmobj_token interlocks any race conditions that might 1209 * pick the object up from the vm_object_list after we have cleared 1210 * rb_memq. 1211 */ 1212 for (;;) { 1213 if (RB_ROOT(&object->rb_memq) == NULL) 1214 break; 1215 kprintf("vm_object_terminate: Warning, object %p " 1216 "still has %ld pages\n", 1217 object, object->resident_page_count); 1218 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1219 vm_object_terminate_callback, &info); 1220 } 1221 1222 /* 1223 * There had better not be any pages left 1224 */ 1225 KKASSERT(object->resident_page_count == 0); 1226 1227 /* 1228 * Remove the object from the global object list. 1229 */ 1230 hash = VMOBJ_HASH(object); 1231 lwkt_gettoken(&hash->token); 1232 TAILQ_REMOVE(&hash->list, object, object_list); 1233 lwkt_reltoken(&hash->token); 1234 1235 if (object->ref_count != 0) { 1236 panic("vm_object_terminate2: object with references, " 1237 "ref_count=%d", object->ref_count); 1238 } 1239 1240 /* 1241 * NOTE: The object hold_count is at least 1, so we cannot kfree() 1242 * the object here. See vm_object_drop(). 1243 */ 1244 } 1245 1246 /* 1247 * The caller must hold the object. 1248 */ 1249 static int 1250 vm_object_terminate_callback(vm_page_t p, void *data) 1251 { 1252 struct rb_vm_page_scan_info *info = data; 1253 vm_object_t object; 1254 1255 if ((++info->count & 63) == 0) 1256 lwkt_user_yield(); 1257 object = p->object; 1258 if (object != info->object) { 1259 kprintf("vm_object_terminate_callback: obj/pg race %p/%p\n", 1260 info->object, p); 1261 return(0); 1262 } 1263 vm_page_busy_wait(p, TRUE, "vmpgtrm"); 1264 if (object != p->object) { 1265 kprintf("vm_object_terminate: Warning: Encountered " 1266 "busied page %p on queue %d\n", p, p->queue); 1267 vm_page_wakeup(p); 1268 } else if (p->wire_count == 0) { 1269 /* 1270 * NOTE: p->dirty and PG_NEED_COMMIT are ignored. 1271 */ 1272 vm_page_free(p); 1273 mycpu->gd_cnt.v_pfree++; 1274 } else { 1275 if (p->queue != PQ_NONE) 1276 kprintf("vm_object_terminate: Warning: Encountered " 1277 "wired page %p on queue %d\n", p, p->queue); 1278 vm_page_remove(p); 1279 vm_page_wakeup(p); 1280 } 1281 return(0); 1282 } 1283 1284 /* 1285 * Clean all dirty pages in the specified range of object. Leaves page 1286 * on whatever queue it is currently on. If NOSYNC is set then do not 1287 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 1288 * leaving the object dirty. 1289 * 1290 * When stuffing pages asynchronously, allow clustering. XXX we need a 1291 * synchronous clustering mode implementation. 1292 * 1293 * Odd semantics: if start == end, we clean everything. 1294 * 1295 * The object must be locked? XXX 1296 */ 1297 static int vm_object_page_clean_pass1(struct vm_page *p, void *data); 1298 static int vm_object_page_clean_pass2(struct vm_page *p, void *data); 1299 1300 void 1301 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1302 int flags) 1303 { 1304 struct rb_vm_page_scan_info info; 1305 struct vnode *vp; 1306 int wholescan; 1307 int pagerflags; 1308 int generation; 1309 1310 vm_object_hold(object); 1311 if (object->type != OBJT_VNODE || 1312 (object->flags & OBJ_MIGHTBEDIRTY) == 0) { 1313 vm_object_drop(object); 1314 return; 1315 } 1316 1317 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 1318 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1319 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 1320 1321 vp = object->handle; 1322 1323 /* 1324 * Interlock other major object operations. This allows us to 1325 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY. 1326 */ 1327 vm_object_set_flag(object, OBJ_CLEANING); 1328 1329 /* 1330 * Handle 'entire object' case 1331 */ 1332 info.start_pindex = start; 1333 if (end == 0) { 1334 info.end_pindex = object->size - 1; 1335 } else { 1336 info.end_pindex = end - 1; 1337 } 1338 wholescan = (start == 0 && info.end_pindex == object->size - 1); 1339 info.limit = flags; 1340 info.pagerflags = pagerflags; 1341 info.object = object; 1342 1343 /* 1344 * If cleaning the entire object do a pass to mark the pages read-only. 1345 * If everything worked out ok, clear OBJ_WRITEABLE and 1346 * OBJ_MIGHTBEDIRTY. 1347 */ 1348 if (wholescan) { 1349 info.error = 0; 1350 info.count = 0; 1351 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1352 vm_object_page_clean_pass1, &info); 1353 if (info.error == 0) { 1354 vm_object_clear_flag(object, 1355 OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1356 if (object->type == OBJT_VNODE && 1357 (vp = (struct vnode *)object->handle) != NULL) { 1358 /* 1359 * Use new-style interface to clear VISDIRTY 1360 * because the vnode is not necessarily removed 1361 * from the syncer list(s) as often as it was 1362 * under the old interface, which can leave 1363 * the vnode on the syncer list after reclaim. 1364 */ 1365 vclrobjdirty(vp); 1366 } 1367 } 1368 } 1369 1370 /* 1371 * Do a pass to clean all the dirty pages we find. 1372 */ 1373 do { 1374 info.error = 0; 1375 info.count = 0; 1376 generation = object->generation; 1377 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1378 vm_object_page_clean_pass2, &info); 1379 } while (info.error || generation != object->generation); 1380 1381 vm_object_clear_flag(object, OBJ_CLEANING); 1382 vm_object_drop(object); 1383 } 1384 1385 /* 1386 * The caller must hold the object. 1387 */ 1388 static 1389 int 1390 vm_object_page_clean_pass1(struct vm_page *p, void *data) 1391 { 1392 struct rb_vm_page_scan_info *info = data; 1393 1394 if ((++info->count & 63) == 0) 1395 lwkt_user_yield(); 1396 if (p->object != info->object || 1397 p->pindex < info->start_pindex || 1398 p->pindex > info->end_pindex) { 1399 kprintf("vm_object_page_clean_pass1: obj/pg race %p/%p\n", 1400 info->object, p); 1401 return(0); 1402 } 1403 vm_page_flag_set(p, PG_CLEANCHK); 1404 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1405 info->error = 1; 1406 } else if (vm_page_busy_try(p, FALSE) == 0) { 1407 if (p->object == info->object) 1408 vm_page_protect(p, VM_PROT_READ); 1409 vm_page_wakeup(p); 1410 } else { 1411 info->error = 1; 1412 } 1413 return(0); 1414 } 1415 1416 /* 1417 * The caller must hold the object 1418 */ 1419 static 1420 int 1421 vm_object_page_clean_pass2(struct vm_page *p, void *data) 1422 { 1423 struct rb_vm_page_scan_info *info = data; 1424 int generation; 1425 1426 if (p->object != info->object || 1427 p->pindex < info->start_pindex || 1428 p->pindex > info->end_pindex) { 1429 kprintf("vm_object_page_clean_pass2: obj/pg race %p/%p\n", 1430 info->object, p); 1431 return(0); 1432 } 1433 1434 /* 1435 * Do not mess with pages that were inserted after we started 1436 * the cleaning pass. 1437 */ 1438 if ((p->flags & PG_CLEANCHK) == 0) 1439 goto done; 1440 1441 generation = info->object->generation; 1442 vm_page_busy_wait(p, TRUE, "vpcwai"); 1443 1444 if (p->object != info->object || 1445 p->pindex < info->start_pindex || 1446 p->pindex > info->end_pindex || 1447 info->object->generation != generation) { 1448 info->error = 1; 1449 vm_page_wakeup(p); 1450 goto done; 1451 } 1452 1453 /* 1454 * Before wasting time traversing the pmaps, check for trivial 1455 * cases where the page cannot be dirty. 1456 */ 1457 if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) { 1458 KKASSERT((p->dirty & p->valid) == 0 && 1459 (p->flags & PG_NEED_COMMIT) == 0); 1460 vm_page_wakeup(p); 1461 goto done; 1462 } 1463 1464 /* 1465 * Check whether the page is dirty or not. The page has been set 1466 * to be read-only so the check will not race a user dirtying the 1467 * page. 1468 */ 1469 vm_page_test_dirty(p); 1470 if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) { 1471 vm_page_flag_clear(p, PG_CLEANCHK); 1472 vm_page_wakeup(p); 1473 goto done; 1474 } 1475 1476 /* 1477 * If we have been asked to skip nosync pages and this is a 1478 * nosync page, skip it. Note that the object flags were 1479 * not cleared in this case (because pass1 will have returned an 1480 * error), so we do not have to set them. 1481 */ 1482 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1483 vm_page_flag_clear(p, PG_CLEANCHK); 1484 vm_page_wakeup(p); 1485 goto done; 1486 } 1487 1488 /* 1489 * Flush as many pages as we can. PG_CLEANCHK will be cleared on 1490 * the pages that get successfully flushed. Set info->error if 1491 * we raced an object modification. 1492 */ 1493 vm_object_page_collect_flush(info->object, p, info->pagerflags); 1494 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */ 1495 done: 1496 if ((++info->count & 63) == 0) 1497 lwkt_user_yield(); 1498 1499 return(0); 1500 } 1501 1502 /* 1503 * Collect the specified page and nearby pages and flush them out. 1504 * The number of pages flushed is returned. The passed page is busied 1505 * by the caller and we are responsible for its disposition. 1506 * 1507 * The caller must hold the object. 1508 */ 1509 static void 1510 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags) 1511 { 1512 int error; 1513 int is; 1514 int ib; 1515 int i; 1516 int page_base; 1517 vm_pindex_t pi; 1518 vm_page_t ma[BLIST_MAX_ALLOC]; 1519 1520 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1521 1522 pi = p->pindex; 1523 page_base = pi % BLIST_MAX_ALLOC; 1524 ma[page_base] = p; 1525 ib = page_base - 1; 1526 is = page_base + 1; 1527 1528 while (ib >= 0) { 1529 vm_page_t tp; 1530 1531 tp = vm_page_lookup_busy_try(object, pi - page_base + ib, 1532 TRUE, &error); 1533 if (error) 1534 break; 1535 if (tp == NULL) 1536 break; 1537 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1538 (tp->flags & PG_CLEANCHK) == 0) { 1539 vm_page_wakeup(tp); 1540 break; 1541 } 1542 if ((tp->queue - tp->pc) == PQ_CACHE) { 1543 vm_page_flag_clear(tp, PG_CLEANCHK); 1544 vm_page_wakeup(tp); 1545 break; 1546 } 1547 vm_page_test_dirty(tp); 1548 if ((tp->dirty & tp->valid) == 0 && 1549 (tp->flags & PG_NEED_COMMIT) == 0) { 1550 vm_page_flag_clear(tp, PG_CLEANCHK); 1551 vm_page_wakeup(tp); 1552 break; 1553 } 1554 ma[ib] = tp; 1555 --ib; 1556 } 1557 ++ib; /* fixup */ 1558 1559 while (is < BLIST_MAX_ALLOC && 1560 pi - page_base + is < object->size) { 1561 vm_page_t tp; 1562 1563 tp = vm_page_lookup_busy_try(object, pi - page_base + is, 1564 TRUE, &error); 1565 if (error) 1566 break; 1567 if (tp == NULL) 1568 break; 1569 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1570 (tp->flags & PG_CLEANCHK) == 0) { 1571 vm_page_wakeup(tp); 1572 break; 1573 } 1574 if ((tp->queue - tp->pc) == PQ_CACHE) { 1575 vm_page_flag_clear(tp, PG_CLEANCHK); 1576 vm_page_wakeup(tp); 1577 break; 1578 } 1579 vm_page_test_dirty(tp); 1580 if ((tp->dirty & tp->valid) == 0 && 1581 (tp->flags & PG_NEED_COMMIT) == 0) { 1582 vm_page_flag_clear(tp, PG_CLEANCHK); 1583 vm_page_wakeup(tp); 1584 break; 1585 } 1586 ma[is] = tp; 1587 ++is; 1588 } 1589 1590 /* 1591 * All pages in the ma[] array are busied now 1592 */ 1593 for (i = ib; i < is; ++i) { 1594 vm_page_flag_clear(ma[i], PG_CLEANCHK); 1595 vm_page_hold(ma[i]); /* XXX need this any more? */ 1596 } 1597 vm_pageout_flush(&ma[ib], is - ib, pagerflags); 1598 for (i = ib; i < is; ++i) /* XXX need this any more? */ 1599 vm_page_unhold(ma[i]); 1600 } 1601 1602 /* 1603 * Same as vm_object_pmap_copy, except range checking really 1604 * works, and is meant for small sections of an object. 1605 * 1606 * This code protects resident pages by making them read-only 1607 * and is typically called on a fork or split when a page 1608 * is converted to copy-on-write. 1609 * 1610 * NOTE: If the page is already at VM_PROT_NONE, calling 1611 * vm_page_protect will have no effect. 1612 */ 1613 void 1614 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1615 { 1616 vm_pindex_t idx; 1617 vm_page_t p; 1618 1619 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 1620 return; 1621 1622 vm_object_hold(object); 1623 for (idx = start; idx < end; idx++) { 1624 p = vm_page_lookup(object, idx); 1625 if (p == NULL) 1626 continue; 1627 vm_page_protect(p, VM_PROT_READ); 1628 } 1629 vm_object_drop(object); 1630 } 1631 1632 /* 1633 * Removes all physical pages in the specified object range from all 1634 * physical maps. 1635 * 1636 * The object must *not* be locked. 1637 */ 1638 1639 static int vm_object_pmap_remove_callback(vm_page_t p, void *data); 1640 1641 void 1642 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1643 { 1644 struct rb_vm_page_scan_info info; 1645 1646 if (object == NULL) 1647 return; 1648 info.start_pindex = start; 1649 info.end_pindex = end - 1; 1650 info.count = 0; 1651 info.object = object; 1652 1653 vm_object_hold(object); 1654 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1655 vm_object_pmap_remove_callback, &info); 1656 if (start == 0 && end == object->size) 1657 vm_object_clear_flag(object, OBJ_WRITEABLE); 1658 vm_object_drop(object); 1659 } 1660 1661 /* 1662 * The caller must hold the object 1663 */ 1664 static int 1665 vm_object_pmap_remove_callback(vm_page_t p, void *data) 1666 { 1667 struct rb_vm_page_scan_info *info = data; 1668 1669 if ((++info->count & 63) == 0) 1670 lwkt_user_yield(); 1671 1672 if (info->object != p->object || 1673 p->pindex < info->start_pindex || 1674 p->pindex > info->end_pindex) { 1675 kprintf("vm_object_pmap_remove_callback: obj/pg race %p/%p\n", 1676 info->object, p); 1677 return(0); 1678 } 1679 1680 vm_page_protect(p, VM_PROT_NONE); 1681 1682 return(0); 1683 } 1684 1685 /* 1686 * Implements the madvise function at the object/page level. 1687 * 1688 * MADV_WILLNEED (any object) 1689 * 1690 * Activate the specified pages if they are resident. 1691 * 1692 * MADV_DONTNEED (any object) 1693 * 1694 * Deactivate the specified pages if they are resident. 1695 * 1696 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only) 1697 * 1698 * Deactivate and clean the specified pages if they are 1699 * resident. This permits the process to reuse the pages 1700 * without faulting or the kernel to reclaim the pages 1701 * without I/O. 1702 * 1703 * No requirements. 1704 */ 1705 void 1706 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1707 { 1708 vm_pindex_t end, tpindex; 1709 vm_object_t tobject; 1710 vm_object_t xobj; 1711 vm_page_t m; 1712 int error; 1713 1714 if (object == NULL) 1715 return; 1716 1717 end = pindex + count; 1718 1719 vm_object_hold(object); 1720 tobject = object; 1721 1722 /* 1723 * Locate and adjust resident pages 1724 */ 1725 for (; pindex < end; pindex += 1) { 1726 relookup: 1727 if (tobject != object) 1728 vm_object_drop(tobject); 1729 tobject = object; 1730 tpindex = pindex; 1731 shadowlookup: 1732 /* 1733 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1734 * and those pages must be OBJ_ONEMAPPING. 1735 */ 1736 if (advise == MADV_FREE) { 1737 if ((tobject->type != OBJT_DEFAULT && 1738 tobject->type != OBJT_SWAP) || 1739 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1740 continue; 1741 } 1742 } 1743 1744 m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error); 1745 1746 if (error) { 1747 vm_page_sleep_busy(m, TRUE, "madvpo"); 1748 goto relookup; 1749 } 1750 if (m == NULL) { 1751 /* 1752 * There may be swap even if there is no backing page 1753 */ 1754 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1755 swap_pager_freespace(tobject, tpindex, 1); 1756 1757 /* 1758 * next object 1759 */ 1760 while ((xobj = tobject->backing_object) != NULL) { 1761 KKASSERT(xobj != object); 1762 vm_object_hold(xobj); 1763 if (xobj == tobject->backing_object) 1764 break; 1765 vm_object_drop(xobj); 1766 } 1767 if (xobj == NULL) 1768 continue; 1769 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1770 if (tobject != object) { 1771 vm_object_lock_swap(); 1772 vm_object_drop(tobject); 1773 } 1774 tobject = xobj; 1775 goto shadowlookup; 1776 } 1777 1778 /* 1779 * If the page is not in a normal active state, we skip it. 1780 * If the page is not managed there are no page queues to 1781 * mess with. Things can break if we mess with pages in 1782 * any of the below states. 1783 */ 1784 if (m->wire_count || 1785 (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) || 1786 m->valid != VM_PAGE_BITS_ALL 1787 ) { 1788 vm_page_wakeup(m); 1789 continue; 1790 } 1791 1792 /* 1793 * Theoretically once a page is known not to be busy, an 1794 * interrupt cannot come along and rip it out from under us. 1795 */ 1796 1797 if (advise == MADV_WILLNEED) { 1798 vm_page_activate(m); 1799 } else if (advise == MADV_DONTNEED) { 1800 vm_page_dontneed(m); 1801 } else if (advise == MADV_FREE) { 1802 /* 1803 * Mark the page clean. This will allow the page 1804 * to be freed up by the system. However, such pages 1805 * are often reused quickly by malloc()/free() 1806 * so we do not do anything that would cause 1807 * a page fault if we can help it. 1808 * 1809 * Specifically, we do not try to actually free 1810 * the page now nor do we try to put it in the 1811 * cache (which would cause a page fault on reuse). 1812 * 1813 * But we do make the page is freeable as we 1814 * can without actually taking the step of unmapping 1815 * it. 1816 */ 1817 pmap_clear_modify(m); 1818 m->dirty = 0; 1819 m->act_count = 0; 1820 vm_page_dontneed(m); 1821 if (tobject->type == OBJT_SWAP) 1822 swap_pager_freespace(tobject, tpindex, 1); 1823 } 1824 vm_page_wakeup(m); 1825 } 1826 if (tobject != object) 1827 vm_object_drop(tobject); 1828 vm_object_drop(object); 1829 } 1830 1831 /* 1832 * Create a new object which is backed by the specified existing object 1833 * range. Replace the pointer and offset that was pointing at the existing 1834 * object with the pointer/offset for the new object. 1835 * 1836 * If addref is non-zero the returned object is given an additional reference. 1837 * This mechanic exists to avoid the situation where refs might be 1 and 1838 * race against a collapse when the caller intends to bump it. So the 1839 * caller cannot add the ref after the fact. Used when the caller is 1840 * duplicating a vm_map_entry. 1841 * 1842 * No other requirements. 1843 */ 1844 void 1845 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, 1846 int addref) 1847 { 1848 vm_object_t source; 1849 vm_object_t result; 1850 int useshadowlist; 1851 1852 source = *objectp; 1853 1854 /* 1855 * Don't create the new object if the old object isn't shared. 1856 * We have to chain wait before adding the reference to avoid 1857 * racing a collapse or deallocation. 1858 * 1859 * Clear OBJ_ONEMAPPING flag when shadowing. 1860 * 1861 * The caller owns a ref on source via *objectp which we are going 1862 * to replace. This ref is inherited by the backing_object assignment. 1863 * from nobject and does not need to be incremented here. 1864 * 1865 * However, we add a temporary extra reference to the original source 1866 * prior to holding nobject in case we block, to avoid races where 1867 * someone else might believe that the source can be collapsed. 1868 */ 1869 useshadowlist = 0; 1870 if (source) { 1871 if (source->type != OBJT_VNODE) { 1872 useshadowlist = 1; 1873 vm_object_hold(source); 1874 vm_object_chain_wait(source, 0); 1875 if (source->ref_count == 1 && 1876 source->handle == NULL && 1877 (source->type == OBJT_DEFAULT || 1878 source->type == OBJT_SWAP)) { 1879 if (addref) { 1880 vm_object_reference_locked(source); 1881 vm_object_clear_flag(source, 1882 OBJ_ONEMAPPING); 1883 } 1884 vm_object_drop(source); 1885 return; 1886 } 1887 vm_object_reference_locked(source); 1888 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1889 } else { 1890 vm_object_reference_quick(source); 1891 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1892 } 1893 } 1894 1895 /* 1896 * Allocate a new object with the given length. The new object 1897 * is returned referenced but we may have to add another one. 1898 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 1899 * (typically because the caller is about to clone a vm_map_entry). 1900 * 1901 * The source object currently has an extra reference to prevent 1902 * collapses into it while we mess with its shadow list, which 1903 * we will remove later in this routine. 1904 * 1905 * The target object may require a second reference if asked for one 1906 * by the caller. 1907 */ 1908 result = vm_object_allocate(OBJT_DEFAULT, length); 1909 if (result == NULL) 1910 panic("vm_object_shadow: no object for shadowing"); 1911 vm_object_hold(result); 1912 if (addref) { 1913 vm_object_reference_locked(result); 1914 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1915 } 1916 1917 /* 1918 * The new object shadows the source object. Chain wait before 1919 * adjusting shadow_count or the shadow list to avoid races. 1920 * 1921 * Try to optimize the result object's page color when shadowing 1922 * in order to maintain page coloring consistency in the combined 1923 * shadowed object. 1924 * 1925 * The backing_object reference to source requires adding a ref to 1926 * source. We simply inherit the ref from the original *objectp 1927 * (which we are replacing) so no additional refs need to be added. 1928 * (we must still clean up the extra ref we had to prevent collapse 1929 * races). 1930 * 1931 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 1932 */ 1933 KKASSERT(result->backing_object == NULL); 1934 result->backing_object = source; 1935 if (source) { 1936 if (useshadowlist) { 1937 vm_object_chain_wait(source, 0); 1938 LIST_INSERT_HEAD(&source->shadow_head, 1939 result, shadow_list); 1940 source->shadow_count++; 1941 source->generation++; 1942 vm_object_set_flag(result, OBJ_ONSHADOW); 1943 } 1944 /* cpu localization twist */ 1945 result->pg_color = vm_quickcolor(); 1946 } 1947 1948 /* 1949 * Adjust the return storage. Drop the ref on source before 1950 * returning. 1951 */ 1952 result->backing_object_offset = *offset; 1953 vm_object_drop(result); 1954 *offset = 0; 1955 if (source) { 1956 if (useshadowlist) { 1957 vm_object_deallocate_locked(source); 1958 vm_object_drop(source); 1959 } else { 1960 vm_object_deallocate(source); 1961 } 1962 } 1963 1964 /* 1965 * Return the new things 1966 */ 1967 *objectp = result; 1968 } 1969 1970 #define OBSC_TEST_ALL_SHADOWED 0x0001 1971 #define OBSC_COLLAPSE_NOWAIT 0x0002 1972 #define OBSC_COLLAPSE_WAIT 0x0004 1973 1974 static int vm_object_backing_scan_callback(vm_page_t p, void *data); 1975 1976 /* 1977 * The caller must hold the object. 1978 */ 1979 static __inline int 1980 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op) 1981 { 1982 struct rb_vm_page_scan_info info; 1983 struct vm_object_hash *hash; 1984 1985 vm_object_assert_held(object); 1986 vm_object_assert_held(backing_object); 1987 1988 KKASSERT(backing_object == object->backing_object); 1989 info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1990 1991 /* 1992 * Initial conditions 1993 */ 1994 if (op & OBSC_TEST_ALL_SHADOWED) { 1995 /* 1996 * We do not want to have to test for the existence of 1997 * swap pages in the backing object. XXX but with the 1998 * new swapper this would be pretty easy to do. 1999 * 2000 * XXX what about anonymous MAP_SHARED memory that hasn't 2001 * been ZFOD faulted yet? If we do not test for this, the 2002 * shadow test may succeed! XXX 2003 */ 2004 if (backing_object->type != OBJT_DEFAULT) 2005 return(0); 2006 } 2007 if (op & OBSC_COLLAPSE_WAIT) { 2008 KKASSERT((backing_object->flags & OBJ_DEAD) == 0); 2009 vm_object_set_flag(backing_object, OBJ_DEAD); 2010 2011 hash = VMOBJ_HASH(backing_object); 2012 lwkt_gettoken(&hash->token); 2013 TAILQ_REMOVE(&hash->list, backing_object, object_list); 2014 lwkt_reltoken(&hash->token); 2015 } 2016 2017 /* 2018 * Our scan. We have to retry if a negative error code is returned, 2019 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that 2020 * the scan had to be stopped because the parent does not completely 2021 * shadow the child. 2022 */ 2023 info.object = object; 2024 info.backing_object = backing_object; 2025 info.limit = op; 2026 info.count = 0; 2027 do { 2028 info.error = 1; 2029 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL, 2030 vm_object_backing_scan_callback, 2031 &info); 2032 } while (info.error < 0); 2033 2034 return(info.error); 2035 } 2036 2037 /* 2038 * The caller must hold the object. 2039 */ 2040 static int 2041 vm_object_backing_scan_callback(vm_page_t p, void *data) 2042 { 2043 struct rb_vm_page_scan_info *info = data; 2044 vm_object_t backing_object; 2045 vm_object_t object; 2046 vm_pindex_t pindex; 2047 vm_pindex_t new_pindex; 2048 vm_pindex_t backing_offset_index; 2049 int op; 2050 2051 pindex = p->pindex; 2052 new_pindex = pindex - info->backing_offset_index; 2053 op = info->limit; 2054 object = info->object; 2055 backing_object = info->backing_object; 2056 backing_offset_index = info->backing_offset_index; 2057 2058 if (op & OBSC_TEST_ALL_SHADOWED) { 2059 vm_page_t pp; 2060 2061 /* 2062 * Ignore pages outside the parent object's range 2063 * and outside the parent object's mapping of the 2064 * backing object. 2065 * 2066 * note that we do not busy the backing object's 2067 * page. 2068 */ 2069 if (pindex < backing_offset_index || 2070 new_pindex >= object->size 2071 ) { 2072 return(0); 2073 } 2074 2075 /* 2076 * See if the parent has the page or if the parent's 2077 * object pager has the page. If the parent has the 2078 * page but the page is not valid, the parent's 2079 * object pager must have the page. 2080 * 2081 * If this fails, the parent does not completely shadow 2082 * the object and we might as well give up now. 2083 */ 2084 pp = vm_page_lookup(object, new_pindex); 2085 if ((pp == NULL || pp->valid == 0) && 2086 !vm_pager_has_page(object, new_pindex) 2087 ) { 2088 info->error = 0; /* problemo */ 2089 return(-1); /* stop the scan */ 2090 } 2091 } 2092 2093 /* 2094 * Check for busy page. Note that we may have lost (p) when we 2095 * possibly blocked above. 2096 */ 2097 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 2098 vm_page_t pp; 2099 2100 if (vm_page_busy_try(p, TRUE)) { 2101 if (op & OBSC_COLLAPSE_NOWAIT) { 2102 return(0); 2103 } else { 2104 /* 2105 * If we slept, anything could have 2106 * happened. Ask that the scan be restarted. 2107 * 2108 * Since the object is marked dead, the 2109 * backing offset should not have changed. 2110 */ 2111 vm_page_sleep_busy(p, TRUE, "vmocol"); 2112 info->error = -1; 2113 return(-1); 2114 } 2115 } 2116 2117 /* 2118 * If (p) is no longer valid restart the scan. 2119 */ 2120 if (p->object != backing_object || p->pindex != pindex) { 2121 kprintf("vm_object_backing_scan: Warning: page " 2122 "%p ripped out from under us\n", p); 2123 vm_page_wakeup(p); 2124 info->error = -1; 2125 return(-1); 2126 } 2127 2128 if (op & OBSC_COLLAPSE_NOWAIT) { 2129 if (p->valid == 0 || 2130 p->wire_count || 2131 (p->flags & PG_NEED_COMMIT)) { 2132 vm_page_wakeup(p); 2133 return(0); 2134 } 2135 } else { 2136 /* XXX what if p->valid == 0 , hold_count, etc? */ 2137 } 2138 2139 KASSERT( 2140 p->object == backing_object, 2141 ("vm_object_qcollapse(): object mismatch") 2142 ); 2143 2144 /* 2145 * Destroy any associated swap 2146 */ 2147 if (backing_object->type == OBJT_SWAP) 2148 swap_pager_freespace(backing_object, p->pindex, 1); 2149 2150 if ( 2151 p->pindex < backing_offset_index || 2152 new_pindex >= object->size 2153 ) { 2154 /* 2155 * Page is out of the parent object's range, we 2156 * can simply destroy it. 2157 */ 2158 vm_page_protect(p, VM_PROT_NONE); 2159 vm_page_free(p); 2160 return(0); 2161 } 2162 2163 pp = vm_page_lookup(object, new_pindex); 2164 if (pp != NULL || vm_pager_has_page(object, new_pindex)) { 2165 /* 2166 * page already exists in parent OR swap exists 2167 * for this location in the parent. Destroy 2168 * the original page from the backing object. 2169 * 2170 * Leave the parent's page alone 2171 */ 2172 vm_page_protect(p, VM_PROT_NONE); 2173 vm_page_free(p); 2174 return(0); 2175 } 2176 2177 /* 2178 * Page does not exist in parent, rename the 2179 * page from the backing object to the main object. 2180 * 2181 * If the page was mapped to a process, it can remain 2182 * mapped through the rename. 2183 */ 2184 if ((p->queue - p->pc) == PQ_CACHE) 2185 vm_page_deactivate(p); 2186 2187 vm_page_rename(p, object, new_pindex); 2188 vm_page_wakeup(p); 2189 /* page automatically made dirty by rename */ 2190 } 2191 return(0); 2192 } 2193 2194 /* 2195 * This version of collapse allows the operation to occur earlier and 2196 * when paging_in_progress is true for an object... This is not a complete 2197 * operation, but should plug 99.9% of the rest of the leaks. 2198 * 2199 * The caller must hold the object and backing_object and both must be 2200 * chainlocked. 2201 * 2202 * (only called from vm_object_collapse) 2203 */ 2204 static void 2205 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object) 2206 { 2207 if (backing_object->ref_count == 1) { 2208 atomic_add_int(&backing_object->ref_count, 2); 2209 #if defined(DEBUG_LOCKS) 2210 debugvm_object_add(backing_object, "qcollapse", 1, 2); 2211 #endif 2212 vm_object_backing_scan(object, backing_object, 2213 OBSC_COLLAPSE_NOWAIT); 2214 atomic_add_int(&backing_object->ref_count, -2); 2215 #if defined(DEBUG_LOCKS) 2216 debugvm_object_add(backing_object, "qcollapse", 2, -2); 2217 #endif 2218 } 2219 } 2220 2221 /* 2222 * Collapse an object with the object backing it. Pages in the backing 2223 * object are moved into the parent, and the backing object is deallocated. 2224 * Any conflict is resolved in favor of the parent's existing pages. 2225 * 2226 * object must be held and chain-locked on call. 2227 * 2228 * The caller must have an extra ref on object to prevent a race from 2229 * destroying it during the collapse. 2230 */ 2231 void 2232 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) 2233 { 2234 struct vm_object_dealloc_list *dlist = NULL; 2235 vm_object_t backing_object; 2236 2237 /* 2238 * Only one thread is attempting a collapse at any given moment. 2239 * There are few restrictions for (object) that callers of this 2240 * function check so reentrancy is likely. 2241 */ 2242 KKASSERT(object != NULL); 2243 vm_object_assert_held(object); 2244 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 2245 2246 for (;;) { 2247 vm_object_t bbobj; 2248 int dodealloc; 2249 2250 /* 2251 * We can only collapse a DEFAULT/SWAP object with a 2252 * DEFAULT/SWAP object. 2253 */ 2254 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) { 2255 backing_object = NULL; 2256 break; 2257 } 2258 2259 backing_object = object->backing_object; 2260 if (backing_object == NULL) 2261 break; 2262 if (backing_object->type != OBJT_DEFAULT && 2263 backing_object->type != OBJT_SWAP) { 2264 backing_object = NULL; 2265 break; 2266 } 2267 2268 /* 2269 * Hold the backing_object and check for races 2270 */ 2271 vm_object_hold(backing_object); 2272 if (backing_object != object->backing_object || 2273 (backing_object->type != OBJT_DEFAULT && 2274 backing_object->type != OBJT_SWAP)) { 2275 vm_object_drop(backing_object); 2276 continue; 2277 } 2278 2279 /* 2280 * Chain-lock the backing object too because if we 2281 * successfully merge its pages into the top object we 2282 * will collapse backing_object->backing_object as the 2283 * new backing_object. Re-check that it is still our 2284 * backing object. 2285 */ 2286 vm_object_chain_acquire(backing_object, 0); 2287 if (backing_object != object->backing_object) { 2288 vm_object_chain_release(backing_object); 2289 vm_object_drop(backing_object); 2290 continue; 2291 } 2292 2293 /* 2294 * we check the backing object first, because it is most likely 2295 * not collapsable. 2296 */ 2297 if (backing_object->handle != NULL || 2298 (backing_object->type != OBJT_DEFAULT && 2299 backing_object->type != OBJT_SWAP) || 2300 (backing_object->flags & OBJ_DEAD) || 2301 object->handle != NULL || 2302 (object->type != OBJT_DEFAULT && 2303 object->type != OBJT_SWAP) || 2304 (object->flags & OBJ_DEAD)) { 2305 break; 2306 } 2307 2308 /* 2309 * If paging is in progress we can't do a normal collapse. 2310 */ 2311 if ( 2312 object->paging_in_progress != 0 || 2313 backing_object->paging_in_progress != 0 2314 ) { 2315 vm_object_qcollapse(object, backing_object); 2316 break; 2317 } 2318 2319 /* 2320 * We know that we can either collapse the backing object (if 2321 * the parent is the only reference to it) or (perhaps) have 2322 * the parent bypass the object if the parent happens to shadow 2323 * all the resident pages in the entire backing object. 2324 * 2325 * This is ignoring pager-backed pages such as swap pages. 2326 * vm_object_backing_scan fails the shadowing test in this 2327 * case. 2328 */ 2329 if (backing_object->ref_count == 1) { 2330 /* 2331 * If there is exactly one reference to the backing 2332 * object, we can collapse it into the parent. 2333 */ 2334 KKASSERT(object->backing_object == backing_object); 2335 vm_object_backing_scan(object, backing_object, 2336 OBSC_COLLAPSE_WAIT); 2337 2338 /* 2339 * Move the pager from backing_object to object. 2340 */ 2341 if (backing_object->type == OBJT_SWAP) { 2342 vm_object_pip_add(backing_object, 1); 2343 2344 /* 2345 * scrap the paging_offset junk and do a 2346 * discrete copy. This also removes major 2347 * assumptions about how the swap-pager 2348 * works from where it doesn't belong. The 2349 * new swapper is able to optimize the 2350 * destroy-source case. 2351 */ 2352 vm_object_pip_add(object, 1); 2353 swap_pager_copy(backing_object, object, 2354 OFF_TO_IDX(object->backing_object_offset), 2355 TRUE); 2356 vm_object_pip_wakeup(object); 2357 vm_object_pip_wakeup(backing_object); 2358 } 2359 2360 /* 2361 * Object now shadows whatever backing_object did. 2362 * Remove object from backing_object's shadow_list. 2363 * 2364 * Removing object from backing_objects shadow list 2365 * requires releasing object, which we will do below. 2366 */ 2367 KKASSERT(object->backing_object == backing_object); 2368 if (object->flags & OBJ_ONSHADOW) { 2369 LIST_REMOVE(object, shadow_list); 2370 backing_object->shadow_count--; 2371 backing_object->generation++; 2372 vm_object_clear_flag(object, OBJ_ONSHADOW); 2373 } 2374 2375 /* 2376 * backing_object->backing_object moves from within 2377 * backing_object to within object. 2378 * 2379 * OBJT_VNODE bbobj's should have empty shadow lists. 2380 */ 2381 while ((bbobj = backing_object->backing_object) != NULL) { 2382 if (bbobj->type == OBJT_VNODE) 2383 vm_object_hold_shared(bbobj); 2384 else 2385 vm_object_hold(bbobj); 2386 if (bbobj == backing_object->backing_object) 2387 break; 2388 vm_object_drop(bbobj); 2389 } 2390 2391 /* 2392 * We are removing backing_object from bbobj's 2393 * shadow list and adding object to bbobj's shadow 2394 * list, so the ref_count on bbobj is unchanged. 2395 */ 2396 if (bbobj) { 2397 if (backing_object->flags & OBJ_ONSHADOW) { 2398 /* not locked exclusively if vnode */ 2399 KKASSERT(bbobj->type != OBJT_VNODE); 2400 LIST_REMOVE(backing_object, 2401 shadow_list); 2402 bbobj->shadow_count--; 2403 bbobj->generation++; 2404 vm_object_clear_flag(backing_object, 2405 OBJ_ONSHADOW); 2406 } 2407 backing_object->backing_object = NULL; 2408 } 2409 object->backing_object = bbobj; 2410 if (bbobj) { 2411 if (bbobj->type != OBJT_VNODE) { 2412 LIST_INSERT_HEAD(&bbobj->shadow_head, 2413 object, shadow_list); 2414 bbobj->shadow_count++; 2415 bbobj->generation++; 2416 vm_object_set_flag(object, 2417 OBJ_ONSHADOW); 2418 } 2419 } 2420 2421 object->backing_object_offset += 2422 backing_object->backing_object_offset; 2423 2424 vm_object_drop(bbobj); 2425 2426 /* 2427 * Discard the old backing_object. Nothing should be 2428 * able to ref it, other than a vm_map_split(), 2429 * and vm_map_split() will stall on our chain lock. 2430 * And we control the parent so it shouldn't be 2431 * possible for it to go away either. 2432 * 2433 * Since the backing object has no pages, no pager 2434 * left, and no object references within it, all 2435 * that is necessary is to dispose of it. 2436 */ 2437 KASSERT(backing_object->ref_count == 1, 2438 ("backing_object %p was somehow " 2439 "re-referenced during collapse!", 2440 backing_object)); 2441 KASSERT(RB_EMPTY(&backing_object->rb_memq), 2442 ("backing_object %p somehow has left " 2443 "over pages during collapse!", 2444 backing_object)); 2445 2446 /* 2447 * The object can be destroyed. 2448 * 2449 * XXX just fall through and dodealloc instead 2450 * of forcing destruction? 2451 */ 2452 atomic_add_int(&backing_object->ref_count, -1); 2453 #if defined(DEBUG_LOCKS) 2454 debugvm_object_add(backing_object, "collapse", 1, -1); 2455 #endif 2456 if ((backing_object->flags & OBJ_DEAD) == 0) 2457 vm_object_terminate(backing_object); 2458 object_collapses++; 2459 dodealloc = 0; 2460 } else { 2461 /* 2462 * If we do not entirely shadow the backing object, 2463 * there is nothing we can do so we give up. 2464 */ 2465 if (vm_object_backing_scan(object, backing_object, 2466 OBSC_TEST_ALL_SHADOWED) == 0) { 2467 break; 2468 } 2469 2470 /* 2471 * bbobj is backing_object->backing_object. Since 2472 * object completely shadows backing_object we can 2473 * bypass it and become backed by bbobj instead. 2474 * 2475 * The shadow list for vnode backing objects is not 2476 * used and a shared hold is allowed. 2477 */ 2478 while ((bbobj = backing_object->backing_object) != NULL) { 2479 if (bbobj->type == OBJT_VNODE) 2480 vm_object_hold_shared(bbobj); 2481 else 2482 vm_object_hold(bbobj); 2483 if (bbobj == backing_object->backing_object) 2484 break; 2485 vm_object_drop(bbobj); 2486 } 2487 2488 /* 2489 * Make object shadow bbobj instead of backing_object. 2490 * Remove object from backing_object's shadow list. 2491 * 2492 * Deallocating backing_object will not remove 2493 * it, since its reference count is at least 2. 2494 * 2495 * Removing object from backing_object's shadow 2496 * list requires releasing a ref, which we do 2497 * below by setting dodealloc to 1. 2498 */ 2499 KKASSERT(object->backing_object == backing_object); 2500 if (object->flags & OBJ_ONSHADOW) { 2501 LIST_REMOVE(object, shadow_list); 2502 backing_object->shadow_count--; 2503 backing_object->generation++; 2504 vm_object_clear_flag(object, OBJ_ONSHADOW); 2505 } 2506 2507 /* 2508 * Add a ref to bbobj, bbobj now shadows object. 2509 * 2510 * NOTE: backing_object->backing_object still points 2511 * to bbobj. That relationship remains intact 2512 * because backing_object has > 1 ref, so 2513 * someone else is pointing to it (hence why 2514 * we can't collapse it into object and can 2515 * only handle the all-shadowed bypass case). 2516 */ 2517 if (bbobj) { 2518 if (bbobj->type != OBJT_VNODE) { 2519 vm_object_chain_wait(bbobj, 0); 2520 vm_object_reference_locked(bbobj); 2521 LIST_INSERT_HEAD(&bbobj->shadow_head, 2522 object, shadow_list); 2523 bbobj->shadow_count++; 2524 bbobj->generation++; 2525 vm_object_set_flag(object, 2526 OBJ_ONSHADOW); 2527 } else { 2528 vm_object_reference_quick(bbobj); 2529 } 2530 object->backing_object_offset += 2531 backing_object->backing_object_offset; 2532 object->backing_object = bbobj; 2533 vm_object_drop(bbobj); 2534 } else { 2535 object->backing_object = NULL; 2536 } 2537 2538 /* 2539 * Drop the reference count on backing_object. To 2540 * handle ref_count races properly we can't assume 2541 * that the ref_count is still at least 2 so we 2542 * have to actually call vm_object_deallocate() 2543 * (after clearing the chainlock). 2544 */ 2545 object_bypasses++; 2546 dodealloc = 1; 2547 } 2548 2549 /* 2550 * Ok, we want to loop on the new object->bbobj association, 2551 * possibly collapsing it further. However if dodealloc is 2552 * non-zero we have to deallocate the backing_object which 2553 * itself can potentially undergo a collapse, creating a 2554 * recursion depth issue with the LWKT token subsystem. 2555 * 2556 * In the case where we must deallocate the backing_object 2557 * it is possible now that the backing_object has a single 2558 * shadow count on some other object (not represented here 2559 * as yet), since it no longer shadows us. Thus when we 2560 * call vm_object_deallocate() it may attempt to collapse 2561 * itself into its remaining parent. 2562 */ 2563 if (dodealloc) { 2564 struct vm_object_dealloc_list *dtmp; 2565 2566 vm_object_chain_release(backing_object); 2567 vm_object_unlock(backing_object); 2568 /* backing_object remains held */ 2569 2570 /* 2571 * Auto-deallocation list for caller convenience. 2572 */ 2573 if (dlistp == NULL) 2574 dlistp = &dlist; 2575 2576 dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK); 2577 dtmp->object = backing_object; 2578 dtmp->next = *dlistp; 2579 *dlistp = dtmp; 2580 } else { 2581 vm_object_chain_release(backing_object); 2582 vm_object_drop(backing_object); 2583 } 2584 /* backing_object = NULL; not needed */ 2585 /* loop */ 2586 } 2587 2588 /* 2589 * Clean up any left over backing_object 2590 */ 2591 if (backing_object) { 2592 vm_object_chain_release(backing_object); 2593 vm_object_drop(backing_object); 2594 } 2595 2596 /* 2597 * Clean up any auto-deallocation list. This is a convenience 2598 * for top-level callers so they don't have to pass &dlist. 2599 * Do not clean up any caller-passed dlistp, the caller will 2600 * do that. 2601 */ 2602 if (dlist) 2603 vm_object_deallocate_list(&dlist); 2604 2605 } 2606 2607 /* 2608 * vm_object_collapse() may collect additional objects in need of 2609 * deallocation. This routine deallocates these objects. The 2610 * deallocation itself can trigger additional collapses (which the 2611 * deallocate function takes care of). This procedure is used to 2612 * reduce procedural recursion since these vm_object shadow chains 2613 * can become quite long. 2614 */ 2615 void 2616 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp) 2617 { 2618 struct vm_object_dealloc_list *dlist; 2619 2620 while ((dlist = *dlistp) != NULL) { 2621 *dlistp = dlist->next; 2622 vm_object_lock(dlist->object); 2623 vm_object_deallocate_locked(dlist->object); 2624 vm_object_drop(dlist->object); 2625 kfree(dlist, M_TEMP); 2626 } 2627 } 2628 2629 /* 2630 * Removes all physical pages in the specified object range from the 2631 * object's list of pages. 2632 * 2633 * No requirements. 2634 */ 2635 static int vm_object_page_remove_callback(vm_page_t p, void *data); 2636 2637 void 2638 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2639 boolean_t clean_only) 2640 { 2641 struct rb_vm_page_scan_info info; 2642 int all; 2643 2644 /* 2645 * Degenerate cases and assertions 2646 */ 2647 vm_object_hold(object); 2648 if (object == NULL || 2649 (object->resident_page_count == 0 && object->swblock_count == 0)) { 2650 vm_object_drop(object); 2651 return; 2652 } 2653 KASSERT(object->type != OBJT_PHYS, 2654 ("attempt to remove pages from a physical object")); 2655 2656 /* 2657 * Indicate that paging is occuring on the object 2658 */ 2659 vm_object_pip_add(object, 1); 2660 2661 /* 2662 * Figure out the actual removal range and whether we are removing 2663 * the entire contents of the object or not. If removing the entire 2664 * contents, be sure to get all pages, even those that might be 2665 * beyond the end of the object. 2666 */ 2667 info.object = object; 2668 info.start_pindex = start; 2669 if (end == 0) 2670 info.end_pindex = (vm_pindex_t)-1; 2671 else 2672 info.end_pindex = end - 1; 2673 info.limit = clean_only; 2674 info.count = 0; 2675 all = (start == 0 && info.end_pindex >= object->size - 1); 2676 2677 /* 2678 * Loop until we are sure we have gotten them all. 2679 */ 2680 do { 2681 info.error = 0; 2682 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 2683 vm_object_page_remove_callback, &info); 2684 } while (info.error); 2685 2686 /* 2687 * Remove any related swap if throwing away pages, or for 2688 * non-swap objects (the swap is a clean copy in that case). 2689 */ 2690 if (object->type != OBJT_SWAP || clean_only == FALSE) { 2691 if (all) 2692 swap_pager_freespace_all(object); 2693 else 2694 swap_pager_freespace(object, info.start_pindex, 2695 info.end_pindex - info.start_pindex + 1); 2696 } 2697 2698 /* 2699 * Cleanup 2700 */ 2701 vm_object_pip_wakeup(object); 2702 vm_object_drop(object); 2703 } 2704 2705 /* 2706 * The caller must hold the object. 2707 * 2708 * NOTE: User yields are allowed when removing more than one page, but not 2709 * allowed if only removing one page (the path for single page removals 2710 * might hold a spinlock). 2711 */ 2712 static int 2713 vm_object_page_remove_callback(vm_page_t p, void *data) 2714 { 2715 struct rb_vm_page_scan_info *info = data; 2716 2717 if ((++info->count & 63) == 0) 2718 lwkt_user_yield(); 2719 2720 if (info->object != p->object || 2721 p->pindex < info->start_pindex || 2722 p->pindex > info->end_pindex) { 2723 kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n", 2724 info->object, p); 2725 return(0); 2726 } 2727 if (vm_page_busy_try(p, TRUE)) { 2728 vm_page_sleep_busy(p, TRUE, "vmopar"); 2729 info->error = 1; 2730 return(0); 2731 } 2732 if (info->object != p->object) { 2733 /* this should never happen */ 2734 kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n", 2735 info->object, p); 2736 vm_page_wakeup(p); 2737 return(0); 2738 } 2739 2740 /* 2741 * Wired pages cannot be destroyed, but they can be invalidated 2742 * and we do so if clean_only (limit) is not set. 2743 * 2744 * WARNING! The page may be wired due to being part of a buffer 2745 * cache buffer, and the buffer might be marked B_CACHE. 2746 * This is fine as part of a truncation but VFSs must be 2747 * sure to fix the buffer up when re-extending the file. 2748 * 2749 * NOTE! PG_NEED_COMMIT is ignored. 2750 */ 2751 if (p->wire_count != 0) { 2752 vm_page_protect(p, VM_PROT_NONE); 2753 if (info->limit == 0) 2754 p->valid = 0; 2755 vm_page_wakeup(p); 2756 return(0); 2757 } 2758 2759 /* 2760 * limit is our clean_only flag. If set and the page is dirty or 2761 * requires a commit, do not free it. If set and the page is being 2762 * held by someone, do not free it. 2763 */ 2764 if (info->limit && p->valid) { 2765 vm_page_test_dirty(p); 2766 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) { 2767 vm_page_wakeup(p); 2768 return(0); 2769 } 2770 } 2771 2772 /* 2773 * Destroy the page 2774 */ 2775 vm_page_protect(p, VM_PROT_NONE); 2776 vm_page_free(p); 2777 2778 return(0); 2779 } 2780 2781 /* 2782 * Coalesces two objects backing up adjoining regions of memory into a 2783 * single object. 2784 * 2785 * returns TRUE if objects were combined. 2786 * 2787 * NOTE: Only works at the moment if the second object is NULL - 2788 * if it's not, which object do we lock first? 2789 * 2790 * Parameters: 2791 * prev_object First object to coalesce 2792 * prev_offset Offset into prev_object 2793 * next_object Second object into coalesce 2794 * next_offset Offset into next_object 2795 * 2796 * prev_size Size of reference to prev_object 2797 * next_size Size of reference to next_object 2798 * 2799 * The caller does not need to hold (prev_object) but must have a stable 2800 * pointer to it (typically by holding the vm_map locked). 2801 */ 2802 boolean_t 2803 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 2804 vm_size_t prev_size, vm_size_t next_size) 2805 { 2806 vm_pindex_t next_pindex; 2807 2808 if (prev_object == NULL) 2809 return (TRUE); 2810 2811 vm_object_hold(prev_object); 2812 2813 if (prev_object->type != OBJT_DEFAULT && 2814 prev_object->type != OBJT_SWAP) { 2815 vm_object_drop(prev_object); 2816 return (FALSE); 2817 } 2818 2819 /* 2820 * Try to collapse the object first 2821 */ 2822 vm_object_chain_acquire(prev_object, 0); 2823 vm_object_collapse(prev_object, NULL); 2824 2825 /* 2826 * Can't coalesce if: . more than one reference . paged out . shadows 2827 * another object . has a copy elsewhere (any of which mean that the 2828 * pages not mapped to prev_entry may be in use anyway) 2829 */ 2830 2831 if (prev_object->backing_object != NULL) { 2832 vm_object_chain_release(prev_object); 2833 vm_object_drop(prev_object); 2834 return (FALSE); 2835 } 2836 2837 prev_size >>= PAGE_SHIFT; 2838 next_size >>= PAGE_SHIFT; 2839 next_pindex = prev_pindex + prev_size; 2840 2841 if ((prev_object->ref_count > 1) && 2842 (prev_object->size != next_pindex)) { 2843 vm_object_chain_release(prev_object); 2844 vm_object_drop(prev_object); 2845 return (FALSE); 2846 } 2847 2848 /* 2849 * Remove any pages that may still be in the object from a previous 2850 * deallocation. 2851 */ 2852 if (next_pindex < prev_object->size) { 2853 vm_object_page_remove(prev_object, 2854 next_pindex, 2855 next_pindex + next_size, FALSE); 2856 if (prev_object->type == OBJT_SWAP) 2857 swap_pager_freespace(prev_object, 2858 next_pindex, next_size); 2859 } 2860 2861 /* 2862 * Extend the object if necessary. 2863 */ 2864 if (next_pindex + next_size > prev_object->size) 2865 prev_object->size = next_pindex + next_size; 2866 2867 vm_object_chain_release(prev_object); 2868 vm_object_drop(prev_object); 2869 return (TRUE); 2870 } 2871 2872 /* 2873 * Make the object writable and flag is being possibly dirty. 2874 * 2875 * The object might not be held (or might be held but held shared), 2876 * the related vnode is probably not held either. Object and vnode are 2877 * stable by virtue of the vm_page busied by the caller preventing 2878 * destruction. 2879 * 2880 * If the related mount is flagged MNTK_THR_SYNC we need to call 2881 * vsetobjdirty(). Filesystems using this option usually shortcut 2882 * synchronization by only scanning the syncer list. 2883 */ 2884 void 2885 vm_object_set_writeable_dirty(vm_object_t object) 2886 { 2887 struct vnode *vp; 2888 2889 /*vm_object_assert_held(object);*/ 2890 /* 2891 * Avoid contention in vm fault path by checking the state before 2892 * issuing an atomic op on it. 2893 */ 2894 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) != 2895 (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) { 2896 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 2897 } 2898 if (object->type == OBJT_VNODE && 2899 (vp = (struct vnode *)object->handle) != NULL) { 2900 if ((vp->v_flag & VOBJDIRTY) == 0) { 2901 if (vp->v_mount && 2902 (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) { 2903 /* 2904 * New style THR_SYNC places vnodes on the 2905 * syncer list more deterministically. 2906 */ 2907 vsetobjdirty(vp); 2908 } else { 2909 /* 2910 * Old style scan would not necessarily place 2911 * a vnode on the syncer list when possibly 2912 * modified via mmap. 2913 */ 2914 vsetflags(vp, VOBJDIRTY); 2915 } 2916 } 2917 } 2918 } 2919 2920 #include "opt_ddb.h" 2921 #ifdef DDB 2922 #include <sys/kernel.h> 2923 2924 #include <sys/cons.h> 2925 2926 #include <ddb/ddb.h> 2927 2928 static int _vm_object_in_map (vm_map_t map, vm_object_t object, 2929 vm_map_entry_t entry); 2930 static int vm_object_in_map (vm_object_t object); 2931 2932 /* 2933 * The caller must hold the object. 2934 */ 2935 static int 2936 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2937 { 2938 vm_map_t tmpm; 2939 vm_map_entry_t tmpe; 2940 vm_object_t obj, nobj; 2941 int entcount; 2942 2943 if (map == 0) 2944 return 0; 2945 if (entry == 0) { 2946 tmpe = map->header.next; 2947 entcount = map->nentries; 2948 while (entcount-- && (tmpe != &map->header)) { 2949 if( _vm_object_in_map(map, object, tmpe)) { 2950 return 1; 2951 } 2952 tmpe = tmpe->next; 2953 } 2954 return (0); 2955 } 2956 switch(entry->maptype) { 2957 case VM_MAPTYPE_SUBMAP: 2958 tmpm = entry->object.sub_map; 2959 tmpe = tmpm->header.next; 2960 entcount = tmpm->nentries; 2961 while (entcount-- && tmpe != &tmpm->header) { 2962 if( _vm_object_in_map(tmpm, object, tmpe)) { 2963 return 1; 2964 } 2965 tmpe = tmpe->next; 2966 } 2967 break; 2968 case VM_MAPTYPE_NORMAL: 2969 case VM_MAPTYPE_VPAGETABLE: 2970 obj = entry->object.vm_object; 2971 while (obj) { 2972 if (obj == object) { 2973 if (obj != entry->object.vm_object) 2974 vm_object_drop(obj); 2975 return 1; 2976 } 2977 while ((nobj = obj->backing_object) != NULL) { 2978 vm_object_hold(nobj); 2979 if (nobj == obj->backing_object) 2980 break; 2981 vm_object_drop(nobj); 2982 } 2983 if (obj != entry->object.vm_object) { 2984 if (nobj) 2985 vm_object_lock_swap(); 2986 vm_object_drop(obj); 2987 } 2988 obj = nobj; 2989 } 2990 break; 2991 default: 2992 break; 2993 } 2994 return 0; 2995 } 2996 2997 static int vm_object_in_map_callback(struct proc *p, void *data); 2998 2999 struct vm_object_in_map_info { 3000 vm_object_t object; 3001 int rv; 3002 }; 3003 3004 /* 3005 * Debugging only 3006 */ 3007 static int 3008 vm_object_in_map(vm_object_t object) 3009 { 3010 struct vm_object_in_map_info info; 3011 3012 info.rv = 0; 3013 info.object = object; 3014 3015 allproc_scan(vm_object_in_map_callback, &info); 3016 if (info.rv) 3017 return 1; 3018 if( _vm_object_in_map(&kernel_map, object, 0)) 3019 return 1; 3020 if( _vm_object_in_map(&pager_map, object, 0)) 3021 return 1; 3022 if( _vm_object_in_map(&buffer_map, object, 0)) 3023 return 1; 3024 return 0; 3025 } 3026 3027 /* 3028 * Debugging only 3029 */ 3030 static int 3031 vm_object_in_map_callback(struct proc *p, void *data) 3032 { 3033 struct vm_object_in_map_info *info = data; 3034 3035 if (p->p_vmspace) { 3036 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) { 3037 info->rv = 1; 3038 return -1; 3039 } 3040 } 3041 return (0); 3042 } 3043 3044 DB_SHOW_COMMAND(vmochk, vm_object_check) 3045 { 3046 struct vm_object_hash *hash; 3047 vm_object_t object; 3048 int n; 3049 3050 /* 3051 * make sure that internal objs are in a map somewhere 3052 * and none have zero ref counts. 3053 */ 3054 for (n = 0; n < VMOBJ_HSIZE; ++n) { 3055 hash = &vm_object_hash[n]; 3056 for (object = TAILQ_FIRST(&hash->list); 3057 object != NULL; 3058 object = TAILQ_NEXT(object, object_list)) { 3059 if (object->type == OBJT_MARKER) 3060 continue; 3061 if (object->handle != NULL || 3062 (object->type != OBJT_DEFAULT && 3063 object->type != OBJT_SWAP)) { 3064 continue; 3065 } 3066 if (object->ref_count == 0) { 3067 db_printf("vmochk: internal obj has " 3068 "zero ref count: %ld\n", 3069 (long)object->size); 3070 } 3071 if (vm_object_in_map(object)) 3072 continue; 3073 db_printf("vmochk: internal obj is not in a map: " 3074 "ref: %d, size: %lu: 0x%lx, " 3075 "backing_object: %p\n", 3076 object->ref_count, (u_long)object->size, 3077 (u_long)object->size, 3078 (void *)object->backing_object); 3079 } 3080 } 3081 } 3082 3083 /* 3084 * Debugging only 3085 */ 3086 DB_SHOW_COMMAND(object, vm_object_print_static) 3087 { 3088 /* XXX convert args. */ 3089 vm_object_t object = (vm_object_t)addr; 3090 boolean_t full = have_addr; 3091 3092 vm_page_t p; 3093 3094 /* XXX count is an (unused) arg. Avoid shadowing it. */ 3095 #define count was_count 3096 3097 int count; 3098 3099 if (object == NULL) 3100 return; 3101 3102 db_iprintf( 3103 "Object %p: type=%d, size=0x%lx, res=%ld, ref=%d, flags=0x%x\n", 3104 object, (int)object->type, (u_long)object->size, 3105 object->resident_page_count, object->ref_count, object->flags); 3106 /* 3107 * XXX no %qd in kernel. Truncate object->backing_object_offset. 3108 */ 3109 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 3110 object->shadow_count, 3111 object->backing_object ? object->backing_object->ref_count : 0, 3112 object->backing_object, (long)object->backing_object_offset); 3113 3114 if (!full) 3115 return; 3116 3117 db_indent += 2; 3118 count = 0; 3119 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) { 3120 if (count == 0) 3121 db_iprintf("memory:="); 3122 else if (count == 6) { 3123 db_printf("\n"); 3124 db_iprintf(" ..."); 3125 count = 0; 3126 } else 3127 db_printf(","); 3128 count++; 3129 3130 db_printf("(off=0x%lx,page=0x%lx)", 3131 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 3132 } 3133 if (count != 0) 3134 db_printf("\n"); 3135 db_indent -= 2; 3136 } 3137 3138 /* XXX. */ 3139 #undef count 3140 3141 /* 3142 * XXX need this non-static entry for calling from vm_map_print. 3143 * 3144 * Debugging only 3145 */ 3146 void 3147 vm_object_print(/* db_expr_t */ long addr, 3148 boolean_t have_addr, 3149 /* db_expr_t */ long count, 3150 char *modif) 3151 { 3152 vm_object_print_static(addr, have_addr, count, modif); 3153 } 3154 3155 /* 3156 * Debugging only 3157 */ 3158 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 3159 { 3160 struct vm_object_hash *hash; 3161 vm_object_t object; 3162 int nl = 0; 3163 int c; 3164 int n; 3165 3166 for (n = 0; n < VMOBJ_HSIZE; ++n) { 3167 hash = &vm_object_hash[n]; 3168 for (object = TAILQ_FIRST(&hash->list); 3169 object != NULL; 3170 object = TAILQ_NEXT(object, object_list)) { 3171 vm_pindex_t idx, fidx; 3172 vm_pindex_t osize; 3173 vm_paddr_t pa = -1, padiff; 3174 int rcount; 3175 vm_page_t m; 3176 3177 if (object->type == OBJT_MARKER) 3178 continue; 3179 db_printf("new object: %p\n", (void *)object); 3180 if ( nl > 18) { 3181 c = cngetc(); 3182 if (c != ' ') 3183 return; 3184 nl = 0; 3185 } 3186 nl++; 3187 rcount = 0; 3188 fidx = 0; 3189 osize = object->size; 3190 if (osize > 128) 3191 osize = 128; 3192 for (idx = 0; idx < osize; idx++) { 3193 m = vm_page_lookup(object, idx); 3194 if (m == NULL) { 3195 if (rcount) { 3196 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3197 (long)fidx, rcount, (long)pa); 3198 if ( nl > 18) { 3199 c = cngetc(); 3200 if (c != ' ') 3201 return; 3202 nl = 0; 3203 } 3204 nl++; 3205 rcount = 0; 3206 } 3207 continue; 3208 } 3209 3210 if (rcount && 3211 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 3212 ++rcount; 3213 continue; 3214 } 3215 if (rcount) { 3216 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 3217 padiff >>= PAGE_SHIFT; 3218 padiff &= PQ_L2_MASK; 3219 if (padiff == 0) { 3220 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 3221 ++rcount; 3222 continue; 3223 } 3224 db_printf(" index(%ld)run(%d)pa(0x%lx)", 3225 (long)fidx, rcount, (long)pa); 3226 db_printf("pd(%ld)\n", (long)padiff); 3227 if ( nl > 18) { 3228 c = cngetc(); 3229 if (c != ' ') 3230 return; 3231 nl = 0; 3232 } 3233 nl++; 3234 } 3235 fidx = idx; 3236 pa = VM_PAGE_TO_PHYS(m); 3237 rcount = 1; 3238 } 3239 if (rcount) { 3240 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3241 (long)fidx, rcount, (long)pa); 3242 if ( nl > 18) { 3243 c = cngetc(); 3244 if (c != ' ') 3245 return; 3246 nl = 0; 3247 } 3248 nl++; 3249 } 3250 } 3251 } 3252 } 3253 #endif /* DDB */ 3254