1 /* 2 * Copyright (c) 1991, 1993, 2013 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $ 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/proc.h> /* for curproc, pageproc */ 70 #include <sys/thread.h> 71 #include <sys/vnode.h> 72 #include <sys/vmmeter.h> 73 #include <sys/mman.h> 74 #include <sys/mount.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/refcount.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_object.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_pager.h> 87 #include <vm/swap_pager.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_zone.h> 91 92 #include <vm/vm_page2.h> 93 94 #include <machine/specialreg.h> 95 96 #define EASY_SCAN_FACTOR 8 97 98 static void vm_object_qcollapse(vm_object_t object, 99 vm_object_t backing_object); 100 static void vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 101 int pagerflags); 102 static void vm_object_lock_init(vm_object_t); 103 104 105 /* 106 * Virtual memory objects maintain the actual data 107 * associated with allocated virtual memory. A given 108 * page of memory exists within exactly one object. 109 * 110 * An object is only deallocated when all "references" 111 * are given up. Only one "reference" to a given 112 * region of an object should be writeable. 113 * 114 * Associated with each object is a list of all resident 115 * memory pages belonging to that object; this list is 116 * maintained by the "vm_page" module, and locked by the object's 117 * lock. 118 * 119 * Each object also records a "pager" routine which is 120 * used to retrieve (and store) pages to the proper backing 121 * storage. In addition, objects may be backed by other 122 * objects from which they were virtual-copied. 123 * 124 * The only items within the object structure which are 125 * modified after time of creation are: 126 * reference count locked by object's lock 127 * pager routine locked by object's lock 128 * 129 */ 130 131 struct vm_object kernel_object; 132 133 static long vm_object_count; 134 135 static long object_collapses; 136 static long object_bypasses; 137 static int next_index; 138 static vm_zone_t obj_zone; 139 static struct vm_zone obj_zone_store; 140 #define VM_OBJECTS_INIT 256 141 static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 142 143 struct object_q vm_object_lists[VMOBJ_HSIZE]; 144 struct lwkt_token vmobj_tokens[VMOBJ_HSIZE]; 145 146 #if defined(DEBUG_LOCKS) 147 148 #define vm_object_vndeallocate(obj, vpp) \ 149 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__) 150 151 /* 152 * Debug helper to track hold/drop/ref/deallocate calls. 153 */ 154 static void 155 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem) 156 { 157 int i; 158 159 i = atomic_fetchadd_int(&obj->debug_index, 1); 160 i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1); 161 ksnprintf(obj->debug_hold_thrs[i], 162 sizeof(obj->debug_hold_thrs[i]), 163 "%c%d:(%d):%s", 164 (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')), 165 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 166 obj->ref_count, 167 curthread->td_comm); 168 obj->debug_hold_file[i] = file; 169 obj->debug_hold_line[i] = line; 170 #if 0 171 /* Uncomment for debugging obj refs/derefs in reproducable cases */ 172 if (strcmp(curthread->td_comm, "sshd") == 0) { 173 kprintf("%d %p refs=%d ar=%d file: %s/%d\n", 174 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 175 obj, obj->ref_count, addrem, file, line); 176 } 177 #endif 178 } 179 180 #endif 181 182 /* 183 * Misc low level routines 184 */ 185 static void 186 vm_object_lock_init(vm_object_t obj) 187 { 188 #if defined(DEBUG_LOCKS) 189 int i; 190 191 obj->debug_index = 0; 192 for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) { 193 obj->debug_hold_thrs[i][0] = 0; 194 obj->debug_hold_file[i] = NULL; 195 obj->debug_hold_line[i] = 0; 196 } 197 #endif 198 } 199 200 void 201 vm_object_lock_swap(void) 202 { 203 lwkt_token_swap(); 204 } 205 206 void 207 vm_object_lock(vm_object_t obj) 208 { 209 lwkt_gettoken(&obj->token); 210 } 211 212 /* 213 * Returns TRUE on sucesss 214 */ 215 static int 216 vm_object_lock_try(vm_object_t obj) 217 { 218 return(lwkt_trytoken(&obj->token)); 219 } 220 221 void 222 vm_object_lock_shared(vm_object_t obj) 223 { 224 lwkt_gettoken_shared(&obj->token); 225 } 226 227 void 228 vm_object_unlock(vm_object_t obj) 229 { 230 lwkt_reltoken(&obj->token); 231 } 232 233 void 234 vm_object_upgrade(vm_object_t obj) 235 { 236 lwkt_reltoken(&obj->token); 237 lwkt_gettoken(&obj->token); 238 } 239 240 void 241 vm_object_downgrade(vm_object_t obj) 242 { 243 lwkt_reltoken(&obj->token); 244 lwkt_gettoken_shared(&obj->token); 245 } 246 247 static __inline void 248 vm_object_assert_held(vm_object_t obj) 249 { 250 ASSERT_LWKT_TOKEN_HELD(&obj->token); 251 } 252 253 void 254 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS) 255 { 256 KKASSERT(obj != NULL); 257 258 /* 259 * Object must be held (object allocation is stable due to callers 260 * context, typically already holding the token on a parent object) 261 * prior to potentially blocking on the lock, otherwise the object 262 * can get ripped away from us. 263 */ 264 refcount_acquire(&obj->hold_count); 265 vm_object_lock(obj); 266 267 #if defined(DEBUG_LOCKS) 268 debugvm_object_add(obj, file, line, 1); 269 #endif 270 } 271 272 int 273 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS) 274 { 275 KKASSERT(obj != NULL); 276 277 /* 278 * Object must be held (object allocation is stable due to callers 279 * context, typically already holding the token on a parent object) 280 * prior to potentially blocking on the lock, otherwise the object 281 * can get ripped away from us. 282 */ 283 refcount_acquire(&obj->hold_count); 284 if (vm_object_lock_try(obj) == 0) { 285 if (refcount_release(&obj->hold_count)) { 286 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) 287 zfree(obj_zone, obj); 288 } 289 return(0); 290 } 291 292 #if defined(DEBUG_LOCKS) 293 debugvm_object_add(obj, file, line, 1); 294 #endif 295 return(1); 296 } 297 298 void 299 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS) 300 { 301 KKASSERT(obj != NULL); 302 303 /* 304 * Object must be held (object allocation is stable due to callers 305 * context, typically already holding the token on a parent object) 306 * prior to potentially blocking on the lock, otherwise the object 307 * can get ripped away from us. 308 */ 309 refcount_acquire(&obj->hold_count); 310 vm_object_lock_shared(obj); 311 312 #if defined(DEBUG_LOCKS) 313 debugvm_object_add(obj, file, line, 1); 314 #endif 315 } 316 317 /* 318 * Drop the token and hold_count on the object. 319 * 320 * WARNING! Token might be shared. 321 */ 322 void 323 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS) 324 { 325 if (obj == NULL) 326 return; 327 328 /* 329 * No new holders should be possible once we drop hold_count 1->0 as 330 * there is no longer any way to reference the object. 331 */ 332 KKASSERT(obj->hold_count > 0); 333 if (refcount_release(&obj->hold_count)) { 334 #if defined(DEBUG_LOCKS) 335 debugvm_object_add(obj, file, line, -1); 336 #endif 337 338 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) { 339 vm_object_unlock(obj); 340 zfree(obj_zone, obj); 341 } else { 342 vm_object_unlock(obj); 343 } 344 } else { 345 #if defined(DEBUG_LOCKS) 346 debugvm_object_add(obj, file, line, -1); 347 #endif 348 vm_object_unlock(obj); 349 } 350 } 351 352 /* 353 * Initialize a freshly allocated object, returning a held object. 354 * 355 * Used only by vm_object_allocate() and zinitna(). 356 * 357 * No requirements. 358 */ 359 void 360 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 361 { 362 int incr; 363 int n; 364 365 RB_INIT(&object->rb_memq); 366 LIST_INIT(&object->shadow_head); 367 lwkt_token_init(&object->token, "vmobj"); 368 369 object->type = type; 370 object->size = size; 371 object->ref_count = 1; 372 object->memattr = VM_MEMATTR_DEFAULT; 373 object->hold_count = 0; 374 object->flags = 0; 375 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 376 vm_object_set_flag(object, OBJ_ONEMAPPING); 377 object->paging_in_progress = 0; 378 object->resident_page_count = 0; 379 object->agg_pv_list_count = 0; 380 object->shadow_count = 0; 381 /* cpu localization twist */ 382 object->pg_color = (int)(intptr_t)curthread; 383 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 384 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 385 else 386 incr = size; 387 next_index = (next_index + incr) & PQ_L2_MASK; 388 object->handle = NULL; 389 object->backing_object = NULL; 390 object->backing_object_offset = (vm_ooffset_t)0; 391 392 object->generation++; 393 object->swblock_count = 0; 394 RB_INIT(&object->swblock_root); 395 vm_object_lock_init(object); 396 pmap_object_init(object); 397 398 vm_object_hold(object); 399 400 n = VMOBJ_HASH(object); 401 atomic_add_long(&vm_object_count, 1); 402 lwkt_gettoken(&vmobj_tokens[n]); 403 TAILQ_INSERT_TAIL(&vm_object_lists[n], object, object_list); 404 lwkt_reltoken(&vmobj_tokens[n]); 405 } 406 407 /* 408 * Initialize the VM objects module. 409 * 410 * Called from the low level boot code only. 411 */ 412 void 413 vm_object_init(void) 414 { 415 int i; 416 417 for (i = 0; i < VMOBJ_HSIZE; ++i) { 418 TAILQ_INIT(&vm_object_lists[i]); 419 lwkt_token_init(&vmobj_tokens[i], "vmobjlst"); 420 } 421 422 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd), 423 &kernel_object); 424 vm_object_drop(&kernel_object); 425 426 obj_zone = &obj_zone_store; 427 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 428 vm_objects_init, VM_OBJECTS_INIT); 429 } 430 431 void 432 vm_object_init2(void) 433 { 434 zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1); 435 } 436 437 /* 438 * Allocate and return a new object of the specified type and size. 439 * 440 * No requirements. 441 */ 442 vm_object_t 443 vm_object_allocate(objtype_t type, vm_pindex_t size) 444 { 445 vm_object_t result; 446 447 result = (vm_object_t) zalloc(obj_zone); 448 449 _vm_object_allocate(type, size, result); 450 vm_object_drop(result); 451 452 return (result); 453 } 454 455 /* 456 * This version returns a held object, allowing further atomic initialization 457 * of the object. 458 */ 459 vm_object_t 460 vm_object_allocate_hold(objtype_t type, vm_pindex_t size) 461 { 462 vm_object_t result; 463 464 result = (vm_object_t) zalloc(obj_zone); 465 466 _vm_object_allocate(type, size, result); 467 468 return (result); 469 } 470 471 /* 472 * Add an additional reference to a vm_object. The object must already be 473 * held. The original non-lock version is no longer supported. The object 474 * must NOT be chain locked by anyone at the time the reference is added. 475 * 476 * Referencing a chain-locked object can blow up the fairly sensitive 477 * ref_count and shadow_count tests in the deallocator. Most callers 478 * will call vm_object_chain_wait() prior to calling 479 * vm_object_reference_locked() to avoid the case. 480 * 481 * The object must be held, but may be held shared if desired (hence why 482 * we use an atomic op). 483 */ 484 void 485 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS) 486 { 487 KKASSERT(object != NULL); 488 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 489 KKASSERT((object->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) == 0); 490 atomic_add_int(&object->ref_count, 1); 491 if (object->type == OBJT_VNODE) { 492 vref(object->handle); 493 /* XXX what if the vnode is being destroyed? */ 494 } 495 #if defined(DEBUG_LOCKS) 496 debugvm_object_add(object, file, line, 1); 497 #endif 498 } 499 500 /* 501 * This version is only allowed for vnode objects. 502 */ 503 void 504 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS) 505 { 506 KKASSERT(object->type == OBJT_VNODE); 507 atomic_add_int(&object->ref_count, 1); 508 vref(object->handle); 509 #if defined(DEBUG_LOCKS) 510 debugvm_object_add(object, file, line, 1); 511 #endif 512 } 513 514 /* 515 * Object OBJ_CHAINLOCK lock handling. 516 * 517 * The caller can chain-lock backing objects recursively and then 518 * use vm_object_chain_release_all() to undo the whole chain. 519 * 520 * Chain locks are used to prevent collapses and are only applicable 521 * to OBJT_DEFAULT and OBJT_SWAP objects. Chain locking operations 522 * on other object types are ignored. This is also important because 523 * it allows e.g. the vnode underlying a memory mapping to take concurrent 524 * faults. 525 * 526 * The object must usually be held on entry, though intermediate 527 * objects need not be held on release. The object must be held exclusively, 528 * NOT shared. Note that the prefault path checks the shared state and 529 * avoids using the chain functions. 530 */ 531 void 532 vm_object_chain_wait(vm_object_t object, int shared) 533 { 534 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 535 for (;;) { 536 uint32_t chainlk = object->chainlk; 537 538 cpu_ccfence(); 539 if (shared) { 540 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 541 tsleep_interlock(object, 0); 542 if (atomic_cmpset_int(&object->chainlk, 543 chainlk, 544 chainlk | CHAINLK_WAIT)) { 545 tsleep(object, PINTERLOCKED, 546 "objchns", 0); 547 } 548 /* retry */ 549 } else { 550 break; 551 } 552 /* retry */ 553 } else { 554 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 555 tsleep_interlock(object, 0); 556 if (atomic_cmpset_int(&object->chainlk, 557 chainlk, 558 chainlk | CHAINLK_WAIT)) 559 { 560 tsleep(object, PINTERLOCKED, 561 "objchnx", 0); 562 } 563 /* retry */ 564 } else { 565 if (atomic_cmpset_int(&object->chainlk, 566 chainlk, 567 chainlk & ~CHAINLK_WAIT)) 568 { 569 if (chainlk & CHAINLK_WAIT) 570 wakeup(object); 571 break; 572 } 573 /* retry */ 574 } 575 } 576 /* retry */ 577 } 578 } 579 580 void 581 vm_object_chain_acquire(vm_object_t object, int shared) 582 { 583 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 584 return; 585 if (vm_shared_fault == 0) 586 shared = 0; 587 588 for (;;) { 589 uint32_t chainlk = object->chainlk; 590 591 cpu_ccfence(); 592 if (shared) { 593 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 594 tsleep_interlock(object, 0); 595 if (atomic_cmpset_int(&object->chainlk, 596 chainlk, 597 chainlk | CHAINLK_WAIT)) { 598 tsleep(object, PINTERLOCKED, 599 "objchns", 0); 600 } 601 /* retry */ 602 } else if (atomic_cmpset_int(&object->chainlk, 603 chainlk, chainlk + 1)) { 604 break; 605 } 606 /* retry */ 607 } else { 608 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 609 tsleep_interlock(object, 0); 610 if (atomic_cmpset_int(&object->chainlk, 611 chainlk, 612 chainlk | 613 CHAINLK_WAIT | 614 CHAINLK_EXCLREQ)) { 615 tsleep(object, PINTERLOCKED, 616 "objchnx", 0); 617 } 618 /* retry */ 619 } else { 620 if (atomic_cmpset_int(&object->chainlk, 621 chainlk, 622 (chainlk | CHAINLK_EXCL) & 623 ~(CHAINLK_EXCLREQ | 624 CHAINLK_WAIT))) { 625 if (chainlk & CHAINLK_WAIT) 626 wakeup(object); 627 break; 628 } 629 /* retry */ 630 } 631 } 632 /* retry */ 633 } 634 } 635 636 void 637 vm_object_chain_release(vm_object_t object) 638 { 639 /*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/ 640 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 641 return; 642 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 643 for (;;) { 644 uint32_t chainlk = object->chainlk; 645 646 cpu_ccfence(); 647 if (chainlk & CHAINLK_MASK) { 648 if ((chainlk & CHAINLK_MASK) == 1 && 649 atomic_cmpset_int(&object->chainlk, 650 chainlk, 651 (chainlk - 1) & ~CHAINLK_WAIT)) { 652 if (chainlk & CHAINLK_WAIT) 653 wakeup(object); 654 break; 655 } 656 if ((chainlk & CHAINLK_MASK) > 1 && 657 atomic_cmpset_int(&object->chainlk, 658 chainlk, chainlk - 1)) { 659 break; 660 } 661 /* retry */ 662 } else { 663 KKASSERT(chainlk & CHAINLK_EXCL); 664 if (atomic_cmpset_int(&object->chainlk, 665 chainlk, 666 chainlk & ~(CHAINLK_EXCL | 667 CHAINLK_WAIT))) { 668 if (chainlk & CHAINLK_WAIT) 669 wakeup(object); 670 break; 671 } 672 } 673 } 674 } 675 676 /* 677 * Release the chain from first_object through and including stopobj. 678 * The caller is typically holding the first and last object locked 679 * (shared or exclusive) to prevent destruction races. 680 * 681 * We release stopobj first as an optimization as this object is most 682 * likely to be shared across multiple processes. 683 */ 684 void 685 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj) 686 { 687 vm_object_t backing_object; 688 vm_object_t object; 689 690 vm_object_chain_release(stopobj); 691 object = first_object; 692 693 while (object != stopobj) { 694 KKASSERT(object); 695 backing_object = object->backing_object; 696 vm_object_chain_release(object); 697 object = backing_object; 698 } 699 } 700 701 /* 702 * Dereference an object and its underlying vnode. The object may be 703 * held shared. On return the object will remain held. 704 * 705 * This function may return a vnode in *vpp which the caller must release 706 * after the caller drops its own lock. If vpp is NULL, we assume that 707 * the caller was holding an exclusive lock on the object and we vrele() 708 * the vp ourselves. 709 */ 710 static void 711 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp 712 VMOBJDBARGS) 713 { 714 struct vnode *vp = (struct vnode *) object->handle; 715 716 KASSERT(object->type == OBJT_VNODE, 717 ("vm_object_vndeallocate: not a vnode object")); 718 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 719 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 720 #ifdef INVARIANTS 721 if (object->ref_count == 0) { 722 vprint("vm_object_vndeallocate", vp); 723 panic("vm_object_vndeallocate: bad object reference count"); 724 } 725 #endif 726 for (;;) { 727 int count = object->ref_count; 728 cpu_ccfence(); 729 if (count == 1) { 730 vm_object_upgrade(object); 731 if (atomic_cmpset_int(&object->ref_count, count, 0)) { 732 vclrflags(vp, VTEXT); 733 break; 734 } 735 } else { 736 if (atomic_cmpset_int(&object->ref_count, 737 count, count - 1)) { 738 break; 739 } 740 } 741 /* retry */ 742 } 743 #if defined(DEBUG_LOCKS) 744 debugvm_object_add(object, file, line, -1); 745 #endif 746 747 /* 748 * vrele or return the vp to vrele. We can only safely vrele(vp) 749 * if the object was locked exclusively. But there are two races 750 * here. 751 * 752 * We had to upgrade the object above to safely clear VTEXT 753 * but the alternative path where the shared lock is retained 754 * can STILL race to 0 in other paths and cause our own vrele() 755 * to terminate the vnode. We can't allow that if the VM object 756 * is still locked shared. 757 */ 758 if (vpp) 759 *vpp = vp; 760 else 761 vrele(vp); 762 } 763 764 /* 765 * Release a reference to the specified object, gained either through a 766 * vm_object_allocate or a vm_object_reference call. When all references 767 * are gone, storage associated with this object may be relinquished. 768 * 769 * The caller does not have to hold the object locked but must have control 770 * over the reference in question in order to guarantee that the object 771 * does not get ripped out from under us. 772 * 773 * XXX Currently all deallocations require an exclusive lock. 774 */ 775 void 776 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS) 777 { 778 struct vnode *vp; 779 int count; 780 781 if (object == NULL) 782 return; 783 784 for (;;) { 785 count = object->ref_count; 786 cpu_ccfence(); 787 788 /* 789 * If decrementing the count enters into special handling 790 * territory (0, 1, or 2) we have to do it the hard way. 791 * Fortunate though, objects with only a few refs like this 792 * are not likely to be heavily contended anyway. 793 * 794 * For vnode objects we only care about 1->0 transitions. 795 */ 796 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) { 797 #if defined(DEBUG_LOCKS) 798 debugvm_object_add(object, file, line, 0); 799 #endif 800 vm_object_hold(object); 801 vm_object_deallocate_locked(object); 802 vm_object_drop(object); 803 break; 804 } 805 806 /* 807 * Try to decrement ref_count without acquiring a hold on 808 * the object. This is particularly important for the exec*() 809 * and exit*() code paths because the program binary may 810 * have a great deal of sharing and an exclusive lock will 811 * crowbar performance in those circumstances. 812 */ 813 if (object->type == OBJT_VNODE) { 814 vp = (struct vnode *)object->handle; 815 if (atomic_cmpset_int(&object->ref_count, 816 count, count - 1)) { 817 #if defined(DEBUG_LOCKS) 818 debugvm_object_add(object, file, line, -1); 819 #endif 820 821 vrele(vp); 822 break; 823 } 824 /* retry */ 825 } else { 826 if (atomic_cmpset_int(&object->ref_count, 827 count, count - 1)) { 828 #if defined(DEBUG_LOCKS) 829 debugvm_object_add(object, file, line, -1); 830 #endif 831 break; 832 } 833 /* retry */ 834 } 835 /* retry */ 836 } 837 } 838 839 void 840 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS) 841 { 842 struct vm_object_dealloc_list *dlist = NULL; 843 struct vm_object_dealloc_list *dtmp; 844 vm_object_t temp; 845 int must_drop = 0; 846 847 /* 848 * We may chain deallocate object, but additional objects may 849 * collect on the dlist which also have to be deallocated. We 850 * must avoid a recursion, vm_object chains can get deep. 851 */ 852 853 again: 854 while (object != NULL) { 855 /* 856 * vnode case, caller either locked the object exclusively 857 * or this is a recursion with must_drop != 0 and the vnode 858 * object will be locked shared. 859 * 860 * If locked shared we have to drop the object before we can 861 * call vrele() or risk a shared/exclusive livelock. 862 */ 863 if (object->type == OBJT_VNODE) { 864 ASSERT_LWKT_TOKEN_HELD(&object->token); 865 if (must_drop) { 866 struct vnode *tmp_vp; 867 868 vm_object_vndeallocate(object, &tmp_vp); 869 vm_object_drop(object); 870 must_drop = 0; 871 object = NULL; 872 vrele(tmp_vp); 873 } else { 874 vm_object_vndeallocate(object, NULL); 875 } 876 break; 877 } 878 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token); 879 880 /* 881 * Normal case (object is locked exclusively) 882 */ 883 if (object->ref_count == 0) { 884 panic("vm_object_deallocate: object deallocated " 885 "too many times: %d", object->type); 886 } 887 if (object->ref_count > 2) { 888 atomic_add_int(&object->ref_count, -1); 889 #if defined(DEBUG_LOCKS) 890 debugvm_object_add(object, file, line, -1); 891 #endif 892 break; 893 } 894 895 /* 896 * Here on ref_count of one or two, which are special cases for 897 * objects. 898 * 899 * Nominal ref_count > 1 case if the second ref is not from 900 * a shadow. 901 * 902 * (ONEMAPPING only applies to DEFAULT AND SWAP objects) 903 */ 904 if (object->ref_count == 2 && object->shadow_count == 0) { 905 if (object->type == OBJT_DEFAULT || 906 object->type == OBJT_SWAP) { 907 vm_object_set_flag(object, OBJ_ONEMAPPING); 908 } 909 atomic_add_int(&object->ref_count, -1); 910 #if defined(DEBUG_LOCKS) 911 debugvm_object_add(object, file, line, -1); 912 #endif 913 break; 914 } 915 916 /* 917 * If the second ref is from a shadow we chain along it 918 * upwards if object's handle is exhausted. 919 * 920 * We have to decrement object->ref_count before potentially 921 * collapsing the first shadow object or the collapse code 922 * will not be able to handle the degenerate case to remove 923 * object. However, if we do it too early the object can 924 * get ripped out from under us. 925 */ 926 if (object->ref_count == 2 && object->shadow_count == 1 && 927 object->handle == NULL && (object->type == OBJT_DEFAULT || 928 object->type == OBJT_SWAP)) { 929 temp = LIST_FIRST(&object->shadow_head); 930 KKASSERT(temp != NULL); 931 vm_object_hold(temp); 932 933 /* 934 * Wait for any paging to complete so the collapse 935 * doesn't (or isn't likely to) qcollapse. pip 936 * waiting must occur before we acquire the 937 * chainlock. 938 */ 939 while ( 940 temp->paging_in_progress || 941 object->paging_in_progress 942 ) { 943 vm_object_pip_wait(temp, "objde1"); 944 vm_object_pip_wait(object, "objde2"); 945 } 946 947 /* 948 * If the parent is locked we have to give up, as 949 * otherwise we would be acquiring locks in the 950 * wrong order and potentially deadlock. 951 */ 952 if (temp->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) { 953 vm_object_drop(temp); 954 goto skip; 955 } 956 vm_object_chain_acquire(temp, 0); 957 958 /* 959 * Recheck/retry after the hold and the paging 960 * wait, both of which can block us. 961 */ 962 if (object->ref_count != 2 || 963 object->shadow_count != 1 || 964 object->handle || 965 LIST_FIRST(&object->shadow_head) != temp || 966 (object->type != OBJT_DEFAULT && 967 object->type != OBJT_SWAP)) { 968 vm_object_chain_release(temp); 969 vm_object_drop(temp); 970 continue; 971 } 972 973 /* 974 * We can safely drop object's ref_count now. 975 */ 976 KKASSERT(object->ref_count == 2); 977 atomic_add_int(&object->ref_count, -1); 978 #if defined(DEBUG_LOCKS) 979 debugvm_object_add(object, file, line, -1); 980 #endif 981 982 /* 983 * If our single parent is not collapseable just 984 * decrement ref_count (2->1) and stop. 985 */ 986 if (temp->handle || (temp->type != OBJT_DEFAULT && 987 temp->type != OBJT_SWAP)) { 988 vm_object_chain_release(temp); 989 vm_object_drop(temp); 990 break; 991 } 992 993 /* 994 * At this point we have already dropped object's 995 * ref_count so it is possible for a race to 996 * deallocate obj out from under us. Any collapse 997 * will re-check the situation. We must not block 998 * until we are able to collapse. 999 * 1000 * Bump temp's ref_count to avoid an unwanted 1001 * degenerate recursion (can't call 1002 * vm_object_reference_locked() because it asserts 1003 * that CHAINLOCK is not set). 1004 */ 1005 atomic_add_int(&temp->ref_count, 1); 1006 KKASSERT(temp->ref_count > 1); 1007 1008 /* 1009 * Collapse temp, then deallocate the extra ref 1010 * formally. 1011 */ 1012 vm_object_collapse(temp, &dlist); 1013 vm_object_chain_release(temp); 1014 if (must_drop) { 1015 vm_object_lock_swap(); 1016 vm_object_drop(object); 1017 } 1018 object = temp; 1019 must_drop = 1; 1020 continue; 1021 } 1022 1023 /* 1024 * Drop the ref and handle termination on the 1->0 transition. 1025 * We may have blocked above so we have to recheck. 1026 */ 1027 skip: 1028 KKASSERT(object->ref_count != 0); 1029 if (object->ref_count >= 2) { 1030 atomic_add_int(&object->ref_count, -1); 1031 #if defined(DEBUG_LOCKS) 1032 debugvm_object_add(object, file, line, -1); 1033 #endif 1034 break; 1035 } 1036 KKASSERT(object->ref_count == 1); 1037 1038 /* 1039 * 1->0 transition. Chain through the backing_object. 1040 * Maintain the ref until we've located the backing object, 1041 * then re-check. 1042 */ 1043 while ((temp = object->backing_object) != NULL) { 1044 if (temp->type == OBJT_VNODE) 1045 vm_object_hold_shared(temp); 1046 else 1047 vm_object_hold(temp); 1048 if (temp == object->backing_object) 1049 break; 1050 vm_object_drop(temp); 1051 } 1052 1053 /* 1054 * 1->0 transition verified, retry if ref_count is no longer 1055 * 1. Otherwise disconnect the backing_object (temp) and 1056 * clean up. 1057 */ 1058 if (object->ref_count != 1) { 1059 vm_object_drop(temp); 1060 continue; 1061 } 1062 1063 /* 1064 * It shouldn't be possible for the object to be chain locked 1065 * if we're removing the last ref on it. 1066 * 1067 * Removing object from temp's shadow list requires dropping 1068 * temp, which we will do on loop. 1069 * 1070 * NOTE! vnodes do not use the shadow list, but still have 1071 * the backing_object reference. 1072 */ 1073 KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0); 1074 1075 if (temp) { 1076 if (object->flags & OBJ_ONSHADOW) { 1077 LIST_REMOVE(object, shadow_list); 1078 temp->shadow_count--; 1079 temp->generation++; 1080 vm_object_clear_flag(object, OBJ_ONSHADOW); 1081 } 1082 object->backing_object = NULL; 1083 } 1084 1085 atomic_add_int(&object->ref_count, -1); 1086 if ((object->flags & OBJ_DEAD) == 0) 1087 vm_object_terminate(object); 1088 if (must_drop && temp) 1089 vm_object_lock_swap(); 1090 if (must_drop) 1091 vm_object_drop(object); 1092 object = temp; 1093 must_drop = 1; 1094 } 1095 1096 if (must_drop && object) 1097 vm_object_drop(object); 1098 1099 /* 1100 * Additional tail recursion on dlist. Avoid a recursion. Objects 1101 * on the dlist have a hold count but are not locked. 1102 */ 1103 if ((dtmp = dlist) != NULL) { 1104 dlist = dtmp->next; 1105 object = dtmp->object; 1106 kfree(dtmp, M_TEMP); 1107 1108 vm_object_lock(object); /* already held, add lock */ 1109 must_drop = 1; /* and we're responsible for it */ 1110 goto again; 1111 } 1112 } 1113 1114 /* 1115 * Destroy the specified object, freeing up related resources. 1116 * 1117 * The object must have zero references. 1118 * 1119 * The object must held. The caller is responsible for dropping the object 1120 * after terminate returns. Terminate does NOT drop the object. 1121 */ 1122 static int vm_object_terminate_callback(vm_page_t p, void *data); 1123 1124 void 1125 vm_object_terminate(vm_object_t object) 1126 { 1127 struct rb_vm_page_scan_info info; 1128 int n; 1129 1130 /* 1131 * Make sure no one uses us. Once we set OBJ_DEAD we should be 1132 * able to safely block. 1133 */ 1134 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1135 KKASSERT((object->flags & OBJ_DEAD) == 0); 1136 vm_object_set_flag(object, OBJ_DEAD); 1137 1138 /* 1139 * Wait for the pageout daemon to be done with the object 1140 */ 1141 vm_object_pip_wait(object, "objtrm1"); 1142 1143 KASSERT(!object->paging_in_progress, 1144 ("vm_object_terminate: pageout in progress")); 1145 1146 /* 1147 * Clean and free the pages, as appropriate. All references to the 1148 * object are gone, so we don't need to lock it. 1149 */ 1150 if (object->type == OBJT_VNODE) { 1151 struct vnode *vp; 1152 1153 /* 1154 * Clean pages and flush buffers. 1155 * 1156 * NOTE! TMPFS buffer flushes do not typically flush the 1157 * actual page to swap as this would be highly 1158 * inefficient, and normal filesystems usually wrap 1159 * page flushes with buffer cache buffers. 1160 * 1161 * To deal with this we have to call vinvalbuf() both 1162 * before and after the vm_object_page_clean(). 1163 */ 1164 vp = (struct vnode *) object->handle; 1165 vinvalbuf(vp, V_SAVE, 0, 0); 1166 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 1167 vinvalbuf(vp, V_SAVE, 0, 0); 1168 } 1169 1170 /* 1171 * Wait for any I/O to complete, after which there had better not 1172 * be any references left on the object. 1173 */ 1174 vm_object_pip_wait(object, "objtrm2"); 1175 1176 if (object->ref_count != 0) { 1177 panic("vm_object_terminate: object with references, " 1178 "ref_count=%d", object->ref_count); 1179 } 1180 1181 /* 1182 * Cleanup any shared pmaps associated with this object. 1183 */ 1184 pmap_object_free(object); 1185 1186 /* 1187 * Now free any remaining pages. For internal objects, this also 1188 * removes them from paging queues. Don't free wired pages, just 1189 * remove them from the object. 1190 */ 1191 info.count = 0; 1192 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1193 vm_object_terminate_callback, &info); 1194 1195 /* 1196 * Let the pager know object is dead. 1197 */ 1198 vm_pager_deallocate(object); 1199 1200 /* 1201 * Wait for the object hold count to hit 1, clean out pages as 1202 * we go. vmobj_token interlocks any race conditions that might 1203 * pick the object up from the vm_object_list after we have cleared 1204 * rb_memq. 1205 */ 1206 for (;;) { 1207 if (RB_ROOT(&object->rb_memq) == NULL) 1208 break; 1209 kprintf("vm_object_terminate: Warning, object %p " 1210 "still has %d pages\n", 1211 object, object->resident_page_count); 1212 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1213 vm_object_terminate_callback, &info); 1214 } 1215 1216 /* 1217 * There had better not be any pages left 1218 */ 1219 KKASSERT(object->resident_page_count == 0); 1220 1221 /* 1222 * Remove the object from the global object list. 1223 */ 1224 n = VMOBJ_HASH(object); 1225 lwkt_gettoken(&vmobj_tokens[n]); 1226 TAILQ_REMOVE(&vm_object_lists[n], object, object_list); 1227 lwkt_reltoken(&vmobj_tokens[n]); 1228 atomic_add_long(&vm_object_count, -1); 1229 1230 if (object->ref_count != 0) { 1231 panic("vm_object_terminate2: object with references, " 1232 "ref_count=%d", object->ref_count); 1233 } 1234 1235 /* 1236 * NOTE: The object hold_count is at least 1, so we cannot zfree() 1237 * the object here. See vm_object_drop(). 1238 */ 1239 } 1240 1241 /* 1242 * The caller must hold the object. 1243 */ 1244 static int 1245 vm_object_terminate_callback(vm_page_t p, void *data) 1246 { 1247 struct rb_vm_page_scan_info *info = data; 1248 vm_object_t object; 1249 1250 if ((++info->count & 63) == 0) 1251 lwkt_user_yield(); 1252 object = p->object; 1253 vm_page_busy_wait(p, TRUE, "vmpgtrm"); 1254 if (object != p->object) { 1255 kprintf("vm_object_terminate: Warning: Encountered " 1256 "busied page %p on queue %d\n", p, p->queue); 1257 vm_page_wakeup(p); 1258 } else if (p->wire_count == 0) { 1259 /* 1260 * NOTE: p->dirty and PG_NEED_COMMIT are ignored. 1261 */ 1262 vm_page_free(p); 1263 mycpu->gd_cnt.v_pfree++; 1264 } else { 1265 if (p->queue != PQ_NONE) 1266 kprintf("vm_object_terminate: Warning: Encountered " 1267 "wired page %p on queue %d\n", p, p->queue); 1268 vm_page_remove(p); 1269 vm_page_wakeup(p); 1270 } 1271 return(0); 1272 } 1273 1274 /* 1275 * Clean all dirty pages in the specified range of object. Leaves page 1276 * on whatever queue it is currently on. If NOSYNC is set then do not 1277 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 1278 * leaving the object dirty. 1279 * 1280 * When stuffing pages asynchronously, allow clustering. XXX we need a 1281 * synchronous clustering mode implementation. 1282 * 1283 * Odd semantics: if start == end, we clean everything. 1284 * 1285 * The object must be locked? XXX 1286 */ 1287 static int vm_object_page_clean_pass1(struct vm_page *p, void *data); 1288 static int vm_object_page_clean_pass2(struct vm_page *p, void *data); 1289 1290 void 1291 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1292 int flags) 1293 { 1294 struct rb_vm_page_scan_info info; 1295 struct vnode *vp; 1296 int wholescan; 1297 int pagerflags; 1298 int generation; 1299 1300 vm_object_hold(object); 1301 if (object->type != OBJT_VNODE || 1302 (object->flags & OBJ_MIGHTBEDIRTY) == 0) { 1303 vm_object_drop(object); 1304 return; 1305 } 1306 1307 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 1308 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1309 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 1310 1311 vp = object->handle; 1312 1313 /* 1314 * Interlock other major object operations. This allows us to 1315 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY. 1316 */ 1317 vm_object_set_flag(object, OBJ_CLEANING); 1318 1319 /* 1320 * Handle 'entire object' case 1321 */ 1322 info.start_pindex = start; 1323 if (end == 0) { 1324 info.end_pindex = object->size - 1; 1325 } else { 1326 info.end_pindex = end - 1; 1327 } 1328 wholescan = (start == 0 && info.end_pindex == object->size - 1); 1329 info.limit = flags; 1330 info.pagerflags = pagerflags; 1331 info.object = object; 1332 info.count = 0; 1333 1334 /* 1335 * If cleaning the entire object do a pass to mark the pages read-only. 1336 * If everything worked out ok, clear OBJ_WRITEABLE and 1337 * OBJ_MIGHTBEDIRTY. 1338 */ 1339 if (wholescan) { 1340 info.error = 0; 1341 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1342 vm_object_page_clean_pass1, &info); 1343 if (info.error == 0) { 1344 vm_object_clear_flag(object, 1345 OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1346 if (object->type == OBJT_VNODE && 1347 (vp = (struct vnode *)object->handle) != NULL) { 1348 /* 1349 * Use new-style interface to clear VISDIRTY 1350 * because the vnode is not necessarily removed 1351 * from the syncer list(s) as often as it was 1352 * under the old interface, which can leave 1353 * the vnode on the syncer list after reclaim. 1354 */ 1355 vclrobjdirty(vp); 1356 } 1357 } 1358 } 1359 1360 /* 1361 * Do a pass to clean all the dirty pages we find. 1362 */ 1363 do { 1364 info.error = 0; 1365 generation = object->generation; 1366 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1367 vm_object_page_clean_pass2, &info); 1368 } while (info.error || generation != object->generation); 1369 1370 vm_object_clear_flag(object, OBJ_CLEANING); 1371 vm_object_drop(object); 1372 } 1373 1374 /* 1375 * The caller must hold the object. 1376 */ 1377 static 1378 int 1379 vm_object_page_clean_pass1(struct vm_page *p, void *data) 1380 { 1381 struct rb_vm_page_scan_info *info = data; 1382 1383 if ((++info->count & 63) == 0) 1384 lwkt_user_yield(); 1385 vm_page_flag_set(p, PG_CLEANCHK); 1386 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1387 info->error = 1; 1388 } else if (vm_page_busy_try(p, FALSE) == 0) { 1389 vm_page_protect(p, VM_PROT_READ); /* must not block */ 1390 vm_page_wakeup(p); 1391 } else { 1392 info->error = 1; 1393 } 1394 return(0); 1395 } 1396 1397 /* 1398 * The caller must hold the object 1399 */ 1400 static 1401 int 1402 vm_object_page_clean_pass2(struct vm_page *p, void *data) 1403 { 1404 struct rb_vm_page_scan_info *info = data; 1405 int generation; 1406 1407 /* 1408 * Do not mess with pages that were inserted after we started 1409 * the cleaning pass. 1410 */ 1411 if ((p->flags & PG_CLEANCHK) == 0) 1412 goto done; 1413 1414 generation = info->object->generation; 1415 vm_page_busy_wait(p, TRUE, "vpcwai"); 1416 if (p->object != info->object || 1417 info->object->generation != generation) { 1418 info->error = 1; 1419 vm_page_wakeup(p); 1420 goto done; 1421 } 1422 1423 /* 1424 * Before wasting time traversing the pmaps, check for trivial 1425 * cases where the page cannot be dirty. 1426 */ 1427 if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) { 1428 KKASSERT((p->dirty & p->valid) == 0 && 1429 (p->flags & PG_NEED_COMMIT) == 0); 1430 vm_page_wakeup(p); 1431 goto done; 1432 } 1433 1434 /* 1435 * Check whether the page is dirty or not. The page has been set 1436 * to be read-only so the check will not race a user dirtying the 1437 * page. 1438 */ 1439 vm_page_test_dirty(p); 1440 if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) { 1441 vm_page_flag_clear(p, PG_CLEANCHK); 1442 vm_page_wakeup(p); 1443 goto done; 1444 } 1445 1446 /* 1447 * If we have been asked to skip nosync pages and this is a 1448 * nosync page, skip it. Note that the object flags were 1449 * not cleared in this case (because pass1 will have returned an 1450 * error), so we do not have to set them. 1451 */ 1452 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1453 vm_page_flag_clear(p, PG_CLEANCHK); 1454 vm_page_wakeup(p); 1455 goto done; 1456 } 1457 1458 /* 1459 * Flush as many pages as we can. PG_CLEANCHK will be cleared on 1460 * the pages that get successfully flushed. Set info->error if 1461 * we raced an object modification. 1462 */ 1463 vm_object_page_collect_flush(info->object, p, info->pagerflags); 1464 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */ 1465 done: 1466 if ((++info->count & 63) == 0) 1467 lwkt_user_yield(); 1468 return(0); 1469 } 1470 1471 /* 1472 * Collect the specified page and nearby pages and flush them out. 1473 * The number of pages flushed is returned. The passed page is busied 1474 * by the caller and we are responsible for its disposition. 1475 * 1476 * The caller must hold the object. 1477 */ 1478 static void 1479 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags) 1480 { 1481 int error; 1482 int is; 1483 int ib; 1484 int i; 1485 int page_base; 1486 vm_pindex_t pi; 1487 vm_page_t ma[BLIST_MAX_ALLOC]; 1488 1489 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1490 1491 pi = p->pindex; 1492 page_base = pi % BLIST_MAX_ALLOC; 1493 ma[page_base] = p; 1494 ib = page_base - 1; 1495 is = page_base + 1; 1496 1497 while (ib >= 0) { 1498 vm_page_t tp; 1499 1500 tp = vm_page_lookup_busy_try(object, pi - page_base + ib, 1501 TRUE, &error); 1502 if (error) 1503 break; 1504 if (tp == NULL) 1505 break; 1506 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1507 (tp->flags & PG_CLEANCHK) == 0) { 1508 vm_page_wakeup(tp); 1509 break; 1510 } 1511 if ((tp->queue - tp->pc) == PQ_CACHE) { 1512 vm_page_flag_clear(tp, PG_CLEANCHK); 1513 vm_page_wakeup(tp); 1514 break; 1515 } 1516 vm_page_test_dirty(tp); 1517 if ((tp->dirty & tp->valid) == 0 && 1518 (tp->flags & PG_NEED_COMMIT) == 0) { 1519 vm_page_flag_clear(tp, PG_CLEANCHK); 1520 vm_page_wakeup(tp); 1521 break; 1522 } 1523 ma[ib] = tp; 1524 --ib; 1525 } 1526 ++ib; /* fixup */ 1527 1528 while (is < BLIST_MAX_ALLOC && 1529 pi - page_base + is < object->size) { 1530 vm_page_t tp; 1531 1532 tp = vm_page_lookup_busy_try(object, pi - page_base + is, 1533 TRUE, &error); 1534 if (error) 1535 break; 1536 if (tp == NULL) 1537 break; 1538 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1539 (tp->flags & PG_CLEANCHK) == 0) { 1540 vm_page_wakeup(tp); 1541 break; 1542 } 1543 if ((tp->queue - tp->pc) == PQ_CACHE) { 1544 vm_page_flag_clear(tp, PG_CLEANCHK); 1545 vm_page_wakeup(tp); 1546 break; 1547 } 1548 vm_page_test_dirty(tp); 1549 if ((tp->dirty & tp->valid) == 0 && 1550 (tp->flags & PG_NEED_COMMIT) == 0) { 1551 vm_page_flag_clear(tp, PG_CLEANCHK); 1552 vm_page_wakeup(tp); 1553 break; 1554 } 1555 ma[is] = tp; 1556 ++is; 1557 } 1558 1559 /* 1560 * All pages in the ma[] array are busied now 1561 */ 1562 for (i = ib; i < is; ++i) { 1563 vm_page_flag_clear(ma[i], PG_CLEANCHK); 1564 vm_page_hold(ma[i]); /* XXX need this any more? */ 1565 } 1566 vm_pageout_flush(&ma[ib], is - ib, pagerflags); 1567 for (i = ib; i < is; ++i) /* XXX need this any more? */ 1568 vm_page_unhold(ma[i]); 1569 } 1570 1571 /* 1572 * Same as vm_object_pmap_copy, except range checking really 1573 * works, and is meant for small sections of an object. 1574 * 1575 * This code protects resident pages by making them read-only 1576 * and is typically called on a fork or split when a page 1577 * is converted to copy-on-write. 1578 * 1579 * NOTE: If the page is already at VM_PROT_NONE, calling 1580 * vm_page_protect will have no effect. 1581 */ 1582 void 1583 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1584 { 1585 vm_pindex_t idx; 1586 vm_page_t p; 1587 1588 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 1589 return; 1590 1591 vm_object_hold(object); 1592 for (idx = start; idx < end; idx++) { 1593 p = vm_page_lookup(object, idx); 1594 if (p == NULL) 1595 continue; 1596 vm_page_protect(p, VM_PROT_READ); 1597 } 1598 vm_object_drop(object); 1599 } 1600 1601 /* 1602 * Removes all physical pages in the specified object range from all 1603 * physical maps. 1604 * 1605 * The object must *not* be locked. 1606 */ 1607 1608 static int vm_object_pmap_remove_callback(vm_page_t p, void *data); 1609 1610 void 1611 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1612 { 1613 struct rb_vm_page_scan_info info; 1614 1615 if (object == NULL) 1616 return; 1617 info.start_pindex = start; 1618 info.end_pindex = end - 1; 1619 info.count = 0; 1620 1621 vm_object_hold(object); 1622 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1623 vm_object_pmap_remove_callback, &info); 1624 if (start == 0 && end == object->size) 1625 vm_object_clear_flag(object, OBJ_WRITEABLE); 1626 vm_object_drop(object); 1627 } 1628 1629 /* 1630 * The caller must hold the object 1631 */ 1632 static int 1633 vm_object_pmap_remove_callback(vm_page_t p, void *data) 1634 { 1635 struct rb_vm_page_scan_info *info = data; 1636 1637 if ((++info->count & 63) == 0) 1638 lwkt_user_yield(); 1639 1640 vm_page_protect(p, VM_PROT_NONE); 1641 return(0); 1642 } 1643 1644 /* 1645 * Implements the madvise function at the object/page level. 1646 * 1647 * MADV_WILLNEED (any object) 1648 * 1649 * Activate the specified pages if they are resident. 1650 * 1651 * MADV_DONTNEED (any object) 1652 * 1653 * Deactivate the specified pages if they are resident. 1654 * 1655 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only) 1656 * 1657 * Deactivate and clean the specified pages if they are 1658 * resident. This permits the process to reuse the pages 1659 * without faulting or the kernel to reclaim the pages 1660 * without I/O. 1661 * 1662 * No requirements. 1663 */ 1664 void 1665 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1666 { 1667 vm_pindex_t end, tpindex; 1668 vm_object_t tobject; 1669 vm_object_t xobj; 1670 vm_page_t m; 1671 int error; 1672 1673 if (object == NULL) 1674 return; 1675 1676 end = pindex + count; 1677 1678 vm_object_hold(object); 1679 tobject = object; 1680 1681 /* 1682 * Locate and adjust resident pages 1683 */ 1684 for (; pindex < end; pindex += 1) { 1685 relookup: 1686 if (tobject != object) 1687 vm_object_drop(tobject); 1688 tobject = object; 1689 tpindex = pindex; 1690 shadowlookup: 1691 /* 1692 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1693 * and those pages must be OBJ_ONEMAPPING. 1694 */ 1695 if (advise == MADV_FREE) { 1696 if ((tobject->type != OBJT_DEFAULT && 1697 tobject->type != OBJT_SWAP) || 1698 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1699 continue; 1700 } 1701 } 1702 1703 m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error); 1704 1705 if (error) { 1706 vm_page_sleep_busy(m, TRUE, "madvpo"); 1707 goto relookup; 1708 } 1709 if (m == NULL) { 1710 /* 1711 * There may be swap even if there is no backing page 1712 */ 1713 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1714 swap_pager_freespace(tobject, tpindex, 1); 1715 1716 /* 1717 * next object 1718 */ 1719 while ((xobj = tobject->backing_object) != NULL) { 1720 KKASSERT(xobj != object); 1721 vm_object_hold(xobj); 1722 if (xobj == tobject->backing_object) 1723 break; 1724 vm_object_drop(xobj); 1725 } 1726 if (xobj == NULL) 1727 continue; 1728 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1729 if (tobject != object) { 1730 vm_object_lock_swap(); 1731 vm_object_drop(tobject); 1732 } 1733 tobject = xobj; 1734 goto shadowlookup; 1735 } 1736 1737 /* 1738 * If the page is not in a normal active state, we skip it. 1739 * If the page is not managed there are no page queues to 1740 * mess with. Things can break if we mess with pages in 1741 * any of the below states. 1742 */ 1743 if (m->wire_count || 1744 (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) || 1745 m->valid != VM_PAGE_BITS_ALL 1746 ) { 1747 vm_page_wakeup(m); 1748 continue; 1749 } 1750 1751 /* 1752 * Theoretically once a page is known not to be busy, an 1753 * interrupt cannot come along and rip it out from under us. 1754 */ 1755 1756 if (advise == MADV_WILLNEED) { 1757 vm_page_activate(m); 1758 } else if (advise == MADV_DONTNEED) { 1759 vm_page_dontneed(m); 1760 } else if (advise == MADV_FREE) { 1761 /* 1762 * Mark the page clean. This will allow the page 1763 * to be freed up by the system. However, such pages 1764 * are often reused quickly by malloc()/free() 1765 * so we do not do anything that would cause 1766 * a page fault if we can help it. 1767 * 1768 * Specifically, we do not try to actually free 1769 * the page now nor do we try to put it in the 1770 * cache (which would cause a page fault on reuse). 1771 * 1772 * But we do make the page is freeable as we 1773 * can without actually taking the step of unmapping 1774 * it. 1775 */ 1776 pmap_clear_modify(m); 1777 m->dirty = 0; 1778 m->act_count = 0; 1779 vm_page_dontneed(m); 1780 if (tobject->type == OBJT_SWAP) 1781 swap_pager_freespace(tobject, tpindex, 1); 1782 } 1783 vm_page_wakeup(m); 1784 } 1785 if (tobject != object) 1786 vm_object_drop(tobject); 1787 vm_object_drop(object); 1788 } 1789 1790 /* 1791 * Create a new object which is backed by the specified existing object 1792 * range. Replace the pointer and offset that was pointing at the existing 1793 * object with the pointer/offset for the new object. 1794 * 1795 * If addref is non-zero the returned object is given an additional reference. 1796 * This mechanic exists to avoid the situation where refs might be 1 and 1797 * race against a collapse when the caller intends to bump it. So the 1798 * caller cannot add the ref after the fact. Used when the caller is 1799 * duplicating a vm_map_entry. 1800 * 1801 * No other requirements. 1802 */ 1803 void 1804 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, 1805 int addref) 1806 { 1807 vm_object_t source; 1808 vm_object_t result; 1809 int useshadowlist; 1810 1811 source = *objectp; 1812 1813 /* 1814 * Don't create the new object if the old object isn't shared. 1815 * We have to chain wait before adding the reference to avoid 1816 * racing a collapse or deallocation. 1817 * 1818 * Clear OBJ_ONEMAPPING flag when shadowing. 1819 * 1820 * The caller owns a ref on source via *objectp which we are going 1821 * to replace. This ref is inherited by the backing_object assignment. 1822 * from nobject and does not need to be incremented here. 1823 * 1824 * However, we add a temporary extra reference to the original source 1825 * prior to holding nobject in case we block, to avoid races where 1826 * someone else might believe that the source can be collapsed. 1827 */ 1828 useshadowlist = 0; 1829 if (source) { 1830 if (source->type != OBJT_VNODE) { 1831 useshadowlist = 1; 1832 vm_object_hold(source); 1833 vm_object_chain_wait(source, 0); 1834 if (source->ref_count == 1 && 1835 source->handle == NULL && 1836 (source->type == OBJT_DEFAULT || 1837 source->type == OBJT_SWAP)) { 1838 if (addref) { 1839 vm_object_reference_locked(source); 1840 vm_object_clear_flag(source, 1841 OBJ_ONEMAPPING); 1842 } 1843 vm_object_drop(source); 1844 return; 1845 } 1846 vm_object_reference_locked(source); 1847 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1848 } else { 1849 vm_object_reference_quick(source); 1850 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1851 } 1852 } 1853 1854 /* 1855 * Allocate a new object with the given length. The new object 1856 * is returned referenced but we may have to add another one. 1857 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 1858 * (typically because the caller is about to clone a vm_map_entry). 1859 * 1860 * The source object currently has an extra reference to prevent 1861 * collapses into it while we mess with its shadow list, which 1862 * we will remove later in this routine. 1863 * 1864 * The target object may require a second reference if asked for one 1865 * by the caller. 1866 */ 1867 result = vm_object_allocate(OBJT_DEFAULT, length); 1868 if (result == NULL) 1869 panic("vm_object_shadow: no object for shadowing"); 1870 vm_object_hold(result); 1871 if (addref) { 1872 vm_object_reference_locked(result); 1873 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1874 } 1875 1876 /* 1877 * The new object shadows the source object. Chain wait before 1878 * adjusting shadow_count or the shadow list to avoid races. 1879 * 1880 * Try to optimize the result object's page color when shadowing 1881 * in order to maintain page coloring consistency in the combined 1882 * shadowed object. 1883 * 1884 * The backing_object reference to source requires adding a ref to 1885 * source. We simply inherit the ref from the original *objectp 1886 * (which we are replacing) so no additional refs need to be added. 1887 * (we must still clean up the extra ref we had to prevent collapse 1888 * races). 1889 * 1890 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 1891 */ 1892 KKASSERT(result->backing_object == NULL); 1893 result->backing_object = source; 1894 if (source) { 1895 if (useshadowlist) { 1896 vm_object_chain_wait(source, 0); 1897 LIST_INSERT_HEAD(&source->shadow_head, 1898 result, shadow_list); 1899 source->shadow_count++; 1900 source->generation++; 1901 vm_object_set_flag(result, OBJ_ONSHADOW); 1902 } 1903 /* cpu localization twist */ 1904 result->pg_color = (int)(intptr_t)curthread; 1905 } 1906 1907 /* 1908 * Adjust the return storage. Drop the ref on source before 1909 * returning. 1910 */ 1911 result->backing_object_offset = *offset; 1912 vm_object_drop(result); 1913 *offset = 0; 1914 if (source) { 1915 if (useshadowlist) { 1916 vm_object_deallocate_locked(source); 1917 vm_object_drop(source); 1918 } else { 1919 vm_object_deallocate(source); 1920 } 1921 } 1922 1923 /* 1924 * Return the new things 1925 */ 1926 *objectp = result; 1927 } 1928 1929 #define OBSC_TEST_ALL_SHADOWED 0x0001 1930 #define OBSC_COLLAPSE_NOWAIT 0x0002 1931 #define OBSC_COLLAPSE_WAIT 0x0004 1932 1933 static int vm_object_backing_scan_callback(vm_page_t p, void *data); 1934 1935 /* 1936 * The caller must hold the object. 1937 */ 1938 static __inline int 1939 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op) 1940 { 1941 struct rb_vm_page_scan_info info; 1942 int n; 1943 1944 vm_object_assert_held(object); 1945 vm_object_assert_held(backing_object); 1946 1947 KKASSERT(backing_object == object->backing_object); 1948 info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1949 1950 /* 1951 * Initial conditions 1952 */ 1953 if (op & OBSC_TEST_ALL_SHADOWED) { 1954 /* 1955 * We do not want to have to test for the existence of 1956 * swap pages in the backing object. XXX but with the 1957 * new swapper this would be pretty easy to do. 1958 * 1959 * XXX what about anonymous MAP_SHARED memory that hasn't 1960 * been ZFOD faulted yet? If we do not test for this, the 1961 * shadow test may succeed! XXX 1962 */ 1963 if (backing_object->type != OBJT_DEFAULT) 1964 return(0); 1965 } 1966 if (op & OBSC_COLLAPSE_WAIT) { 1967 KKASSERT((backing_object->flags & OBJ_DEAD) == 0); 1968 vm_object_set_flag(backing_object, OBJ_DEAD); 1969 1970 n = VMOBJ_HASH(backing_object); 1971 lwkt_gettoken(&vmobj_tokens[n]); 1972 TAILQ_REMOVE(&vm_object_lists[n], backing_object, object_list); 1973 lwkt_reltoken(&vmobj_tokens[n]); 1974 atomic_add_long(&vm_object_count, -1); 1975 } 1976 1977 /* 1978 * Our scan. We have to retry if a negative error code is returned, 1979 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that 1980 * the scan had to be stopped because the parent does not completely 1981 * shadow the child. 1982 */ 1983 info.object = object; 1984 info.backing_object = backing_object; 1985 info.limit = op; 1986 do { 1987 info.error = 1; 1988 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL, 1989 vm_object_backing_scan_callback, 1990 &info); 1991 } while (info.error < 0); 1992 1993 return(info.error); 1994 } 1995 1996 /* 1997 * The caller must hold the object. 1998 */ 1999 static int 2000 vm_object_backing_scan_callback(vm_page_t p, void *data) 2001 { 2002 struct rb_vm_page_scan_info *info = data; 2003 vm_object_t backing_object; 2004 vm_object_t object; 2005 vm_pindex_t pindex; 2006 vm_pindex_t new_pindex; 2007 vm_pindex_t backing_offset_index; 2008 int op; 2009 2010 pindex = p->pindex; 2011 new_pindex = pindex - info->backing_offset_index; 2012 op = info->limit; 2013 object = info->object; 2014 backing_object = info->backing_object; 2015 backing_offset_index = info->backing_offset_index; 2016 2017 if (op & OBSC_TEST_ALL_SHADOWED) { 2018 vm_page_t pp; 2019 2020 /* 2021 * Ignore pages outside the parent object's range 2022 * and outside the parent object's mapping of the 2023 * backing object. 2024 * 2025 * note that we do not busy the backing object's 2026 * page. 2027 */ 2028 if (pindex < backing_offset_index || 2029 new_pindex >= object->size 2030 ) { 2031 return(0); 2032 } 2033 2034 /* 2035 * See if the parent has the page or if the parent's 2036 * object pager has the page. If the parent has the 2037 * page but the page is not valid, the parent's 2038 * object pager must have the page. 2039 * 2040 * If this fails, the parent does not completely shadow 2041 * the object and we might as well give up now. 2042 */ 2043 pp = vm_page_lookup(object, new_pindex); 2044 if ((pp == NULL || pp->valid == 0) && 2045 !vm_pager_has_page(object, new_pindex) 2046 ) { 2047 info->error = 0; /* problemo */ 2048 return(-1); /* stop the scan */ 2049 } 2050 } 2051 2052 /* 2053 * Check for busy page. Note that we may have lost (p) when we 2054 * possibly blocked above. 2055 */ 2056 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 2057 vm_page_t pp; 2058 2059 if (vm_page_busy_try(p, TRUE)) { 2060 if (op & OBSC_COLLAPSE_NOWAIT) { 2061 return(0); 2062 } else { 2063 /* 2064 * If we slept, anything could have 2065 * happened. Ask that the scan be restarted. 2066 * 2067 * Since the object is marked dead, the 2068 * backing offset should not have changed. 2069 */ 2070 vm_page_sleep_busy(p, TRUE, "vmocol"); 2071 info->error = -1; 2072 return(-1); 2073 } 2074 } 2075 2076 /* 2077 * If (p) is no longer valid restart the scan. 2078 */ 2079 if (p->object != backing_object || p->pindex != pindex) { 2080 kprintf("vm_object_backing_scan: Warning: page " 2081 "%p ripped out from under us\n", p); 2082 vm_page_wakeup(p); 2083 info->error = -1; 2084 return(-1); 2085 } 2086 2087 if (op & OBSC_COLLAPSE_NOWAIT) { 2088 if (p->valid == 0 || 2089 p->wire_count || 2090 (p->flags & PG_NEED_COMMIT)) { 2091 vm_page_wakeup(p); 2092 return(0); 2093 } 2094 } else { 2095 /* XXX what if p->valid == 0 , hold_count, etc? */ 2096 } 2097 2098 KASSERT( 2099 p->object == backing_object, 2100 ("vm_object_qcollapse(): object mismatch") 2101 ); 2102 2103 /* 2104 * Destroy any associated swap 2105 */ 2106 if (backing_object->type == OBJT_SWAP) 2107 swap_pager_freespace(backing_object, p->pindex, 1); 2108 2109 if ( 2110 p->pindex < backing_offset_index || 2111 new_pindex >= object->size 2112 ) { 2113 /* 2114 * Page is out of the parent object's range, we 2115 * can simply destroy it. 2116 */ 2117 vm_page_protect(p, VM_PROT_NONE); 2118 vm_page_free(p); 2119 return(0); 2120 } 2121 2122 pp = vm_page_lookup(object, new_pindex); 2123 if (pp != NULL || vm_pager_has_page(object, new_pindex)) { 2124 /* 2125 * page already exists in parent OR swap exists 2126 * for this location in the parent. Destroy 2127 * the original page from the backing object. 2128 * 2129 * Leave the parent's page alone 2130 */ 2131 vm_page_protect(p, VM_PROT_NONE); 2132 vm_page_free(p); 2133 return(0); 2134 } 2135 2136 /* 2137 * Page does not exist in parent, rename the 2138 * page from the backing object to the main object. 2139 * 2140 * If the page was mapped to a process, it can remain 2141 * mapped through the rename. 2142 */ 2143 if ((p->queue - p->pc) == PQ_CACHE) 2144 vm_page_deactivate(p); 2145 2146 vm_page_rename(p, object, new_pindex); 2147 vm_page_wakeup(p); 2148 /* page automatically made dirty by rename */ 2149 } 2150 return(0); 2151 } 2152 2153 /* 2154 * This version of collapse allows the operation to occur earlier and 2155 * when paging_in_progress is true for an object... This is not a complete 2156 * operation, but should plug 99.9% of the rest of the leaks. 2157 * 2158 * The caller must hold the object and backing_object and both must be 2159 * chainlocked. 2160 * 2161 * (only called from vm_object_collapse) 2162 */ 2163 static void 2164 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object) 2165 { 2166 if (backing_object->ref_count == 1) { 2167 atomic_add_int(&backing_object->ref_count, 2); 2168 #if defined(DEBUG_LOCKS) 2169 debugvm_object_add(backing_object, "qcollapse", 1, 2); 2170 #endif 2171 vm_object_backing_scan(object, backing_object, 2172 OBSC_COLLAPSE_NOWAIT); 2173 atomic_add_int(&backing_object->ref_count, -2); 2174 #if defined(DEBUG_LOCKS) 2175 debugvm_object_add(backing_object, "qcollapse", 2, -2); 2176 #endif 2177 } 2178 } 2179 2180 /* 2181 * Collapse an object with the object backing it. Pages in the backing 2182 * object are moved into the parent, and the backing object is deallocated. 2183 * Any conflict is resolved in favor of the parent's existing pages. 2184 * 2185 * object must be held and chain-locked on call. 2186 * 2187 * The caller must have an extra ref on object to prevent a race from 2188 * destroying it during the collapse. 2189 */ 2190 void 2191 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) 2192 { 2193 struct vm_object_dealloc_list *dlist = NULL; 2194 vm_object_t backing_object; 2195 2196 /* 2197 * Only one thread is attempting a collapse at any given moment. 2198 * There are few restrictions for (object) that callers of this 2199 * function check so reentrancy is likely. 2200 */ 2201 KKASSERT(object != NULL); 2202 vm_object_assert_held(object); 2203 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 2204 2205 for (;;) { 2206 vm_object_t bbobj; 2207 int dodealloc; 2208 2209 /* 2210 * We can only collapse a DEFAULT/SWAP object with a 2211 * DEFAULT/SWAP object. 2212 */ 2213 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) { 2214 backing_object = NULL; 2215 break; 2216 } 2217 2218 backing_object = object->backing_object; 2219 if (backing_object == NULL) 2220 break; 2221 if (backing_object->type != OBJT_DEFAULT && 2222 backing_object->type != OBJT_SWAP) { 2223 backing_object = NULL; 2224 break; 2225 } 2226 2227 /* 2228 * Hold the backing_object and check for races 2229 */ 2230 vm_object_hold(backing_object); 2231 if (backing_object != object->backing_object || 2232 (backing_object->type != OBJT_DEFAULT && 2233 backing_object->type != OBJT_SWAP)) { 2234 vm_object_drop(backing_object); 2235 continue; 2236 } 2237 2238 /* 2239 * Chain-lock the backing object too because if we 2240 * successfully merge its pages into the top object we 2241 * will collapse backing_object->backing_object as the 2242 * new backing_object. Re-check that it is still our 2243 * backing object. 2244 */ 2245 vm_object_chain_acquire(backing_object, 0); 2246 if (backing_object != object->backing_object) { 2247 vm_object_chain_release(backing_object); 2248 vm_object_drop(backing_object); 2249 continue; 2250 } 2251 2252 /* 2253 * we check the backing object first, because it is most likely 2254 * not collapsable. 2255 */ 2256 if (backing_object->handle != NULL || 2257 (backing_object->type != OBJT_DEFAULT && 2258 backing_object->type != OBJT_SWAP) || 2259 (backing_object->flags & OBJ_DEAD) || 2260 object->handle != NULL || 2261 (object->type != OBJT_DEFAULT && 2262 object->type != OBJT_SWAP) || 2263 (object->flags & OBJ_DEAD)) { 2264 break; 2265 } 2266 2267 /* 2268 * If paging is in progress we can't do a normal collapse. 2269 */ 2270 if ( 2271 object->paging_in_progress != 0 || 2272 backing_object->paging_in_progress != 0 2273 ) { 2274 vm_object_qcollapse(object, backing_object); 2275 break; 2276 } 2277 2278 /* 2279 * We know that we can either collapse the backing object (if 2280 * the parent is the only reference to it) or (perhaps) have 2281 * the parent bypass the object if the parent happens to shadow 2282 * all the resident pages in the entire backing object. 2283 * 2284 * This is ignoring pager-backed pages such as swap pages. 2285 * vm_object_backing_scan fails the shadowing test in this 2286 * case. 2287 */ 2288 if (backing_object->ref_count == 1) { 2289 /* 2290 * If there is exactly one reference to the backing 2291 * object, we can collapse it into the parent. 2292 */ 2293 KKASSERT(object->backing_object == backing_object); 2294 vm_object_backing_scan(object, backing_object, 2295 OBSC_COLLAPSE_WAIT); 2296 2297 /* 2298 * Move the pager from backing_object to object. 2299 */ 2300 if (backing_object->type == OBJT_SWAP) { 2301 vm_object_pip_add(backing_object, 1); 2302 2303 /* 2304 * scrap the paging_offset junk and do a 2305 * discrete copy. This also removes major 2306 * assumptions about how the swap-pager 2307 * works from where it doesn't belong. The 2308 * new swapper is able to optimize the 2309 * destroy-source case. 2310 */ 2311 vm_object_pip_add(object, 1); 2312 swap_pager_copy(backing_object, object, 2313 OFF_TO_IDX(object->backing_object_offset), 2314 TRUE); 2315 vm_object_pip_wakeup(object); 2316 vm_object_pip_wakeup(backing_object); 2317 } 2318 2319 /* 2320 * Object now shadows whatever backing_object did. 2321 * Remove object from backing_object's shadow_list. 2322 * 2323 * Removing object from backing_objects shadow list 2324 * requires releasing object, which we will do below. 2325 */ 2326 KKASSERT(object->backing_object == backing_object); 2327 if (object->flags & OBJ_ONSHADOW) { 2328 LIST_REMOVE(object, shadow_list); 2329 backing_object->shadow_count--; 2330 backing_object->generation++; 2331 vm_object_clear_flag(object, OBJ_ONSHADOW); 2332 } 2333 2334 /* 2335 * backing_object->backing_object moves from within 2336 * backing_object to within object. 2337 * 2338 * OBJT_VNODE bbobj's should have empty shadow lists. 2339 */ 2340 while ((bbobj = backing_object->backing_object) != NULL) { 2341 if (bbobj->type == OBJT_VNODE) 2342 vm_object_hold_shared(bbobj); 2343 else 2344 vm_object_hold(bbobj); 2345 if (bbobj == backing_object->backing_object) 2346 break; 2347 vm_object_drop(bbobj); 2348 } 2349 2350 /* 2351 * We are removing backing_object from bbobj's 2352 * shadow list and adding object to bbobj's shadow 2353 * list, so the ref_count on bbobj is unchanged. 2354 */ 2355 if (bbobj) { 2356 if (backing_object->flags & OBJ_ONSHADOW) { 2357 /* not locked exclusively if vnode */ 2358 KKASSERT(bbobj->type != OBJT_VNODE); 2359 LIST_REMOVE(backing_object, 2360 shadow_list); 2361 bbobj->shadow_count--; 2362 bbobj->generation++; 2363 vm_object_clear_flag(backing_object, 2364 OBJ_ONSHADOW); 2365 } 2366 backing_object->backing_object = NULL; 2367 } 2368 object->backing_object = bbobj; 2369 if (bbobj) { 2370 if (bbobj->type != OBJT_VNODE) { 2371 LIST_INSERT_HEAD(&bbobj->shadow_head, 2372 object, shadow_list); 2373 bbobj->shadow_count++; 2374 bbobj->generation++; 2375 vm_object_set_flag(object, 2376 OBJ_ONSHADOW); 2377 } 2378 } 2379 2380 object->backing_object_offset += 2381 backing_object->backing_object_offset; 2382 2383 vm_object_drop(bbobj); 2384 2385 /* 2386 * Discard the old backing_object. Nothing should be 2387 * able to ref it, other than a vm_map_split(), 2388 * and vm_map_split() will stall on our chain lock. 2389 * And we control the parent so it shouldn't be 2390 * possible for it to go away either. 2391 * 2392 * Since the backing object has no pages, no pager 2393 * left, and no object references within it, all 2394 * that is necessary is to dispose of it. 2395 */ 2396 KASSERT(backing_object->ref_count == 1, 2397 ("backing_object %p was somehow " 2398 "re-referenced during collapse!", 2399 backing_object)); 2400 KASSERT(RB_EMPTY(&backing_object->rb_memq), 2401 ("backing_object %p somehow has left " 2402 "over pages during collapse!", 2403 backing_object)); 2404 2405 /* 2406 * The object can be destroyed. 2407 * 2408 * XXX just fall through and dodealloc instead 2409 * of forcing destruction? 2410 */ 2411 atomic_add_int(&backing_object->ref_count, -1); 2412 #if defined(DEBUG_LOCKS) 2413 debugvm_object_add(backing_object, "collapse", 1, -1); 2414 #endif 2415 if ((backing_object->flags & OBJ_DEAD) == 0) 2416 vm_object_terminate(backing_object); 2417 object_collapses++; 2418 dodealloc = 0; 2419 } else { 2420 /* 2421 * If we do not entirely shadow the backing object, 2422 * there is nothing we can do so we give up. 2423 */ 2424 if (vm_object_backing_scan(object, backing_object, 2425 OBSC_TEST_ALL_SHADOWED) == 0) { 2426 break; 2427 } 2428 2429 /* 2430 * bbobj is backing_object->backing_object. Since 2431 * object completely shadows backing_object we can 2432 * bypass it and become backed by bbobj instead. 2433 * 2434 * The shadow list for vnode backing objects is not 2435 * used and a shared hold is allowed. 2436 */ 2437 while ((bbobj = backing_object->backing_object) != NULL) { 2438 if (bbobj->type == OBJT_VNODE) 2439 vm_object_hold_shared(bbobj); 2440 else 2441 vm_object_hold(bbobj); 2442 if (bbobj == backing_object->backing_object) 2443 break; 2444 vm_object_drop(bbobj); 2445 } 2446 2447 /* 2448 * Make object shadow bbobj instead of backing_object. 2449 * Remove object from backing_object's shadow list. 2450 * 2451 * Deallocating backing_object will not remove 2452 * it, since its reference count is at least 2. 2453 * 2454 * Removing object from backing_object's shadow 2455 * list requires releasing a ref, which we do 2456 * below by setting dodealloc to 1. 2457 */ 2458 KKASSERT(object->backing_object == backing_object); 2459 if (object->flags & OBJ_ONSHADOW) { 2460 LIST_REMOVE(object, shadow_list); 2461 backing_object->shadow_count--; 2462 backing_object->generation++; 2463 vm_object_clear_flag(object, OBJ_ONSHADOW); 2464 } 2465 2466 /* 2467 * Add a ref to bbobj, bbobj now shadows object. 2468 * 2469 * NOTE: backing_object->backing_object still points 2470 * to bbobj. That relationship remains intact 2471 * because backing_object has > 1 ref, so 2472 * someone else is pointing to it (hence why 2473 * we can't collapse it into object and can 2474 * only handle the all-shadowed bypass case). 2475 */ 2476 if (bbobj) { 2477 if (bbobj->type != OBJT_VNODE) { 2478 vm_object_chain_wait(bbobj, 0); 2479 vm_object_reference_locked(bbobj); 2480 LIST_INSERT_HEAD(&bbobj->shadow_head, 2481 object, shadow_list); 2482 bbobj->shadow_count++; 2483 bbobj->generation++; 2484 vm_object_set_flag(object, 2485 OBJ_ONSHADOW); 2486 } else { 2487 vm_object_reference_quick(bbobj); 2488 } 2489 object->backing_object_offset += 2490 backing_object->backing_object_offset; 2491 object->backing_object = bbobj; 2492 vm_object_drop(bbobj); 2493 } else { 2494 object->backing_object = NULL; 2495 } 2496 2497 /* 2498 * Drop the reference count on backing_object. To 2499 * handle ref_count races properly we can't assume 2500 * that the ref_count is still at least 2 so we 2501 * have to actually call vm_object_deallocate() 2502 * (after clearing the chainlock). 2503 */ 2504 object_bypasses++; 2505 dodealloc = 1; 2506 } 2507 2508 /* 2509 * Ok, we want to loop on the new object->bbobj association, 2510 * possibly collapsing it further. However if dodealloc is 2511 * non-zero we have to deallocate the backing_object which 2512 * itself can potentially undergo a collapse, creating a 2513 * recursion depth issue with the LWKT token subsystem. 2514 * 2515 * In the case where we must deallocate the backing_object 2516 * it is possible now that the backing_object has a single 2517 * shadow count on some other object (not represented here 2518 * as yet), since it no longer shadows us. Thus when we 2519 * call vm_object_deallocate() it may attempt to collapse 2520 * itself into its remaining parent. 2521 */ 2522 if (dodealloc) { 2523 struct vm_object_dealloc_list *dtmp; 2524 2525 vm_object_chain_release(backing_object); 2526 vm_object_unlock(backing_object); 2527 /* backing_object remains held */ 2528 2529 /* 2530 * Auto-deallocation list for caller convenience. 2531 */ 2532 if (dlistp == NULL) 2533 dlistp = &dlist; 2534 2535 dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK); 2536 dtmp->object = backing_object; 2537 dtmp->next = *dlistp; 2538 *dlistp = dtmp; 2539 } else { 2540 vm_object_chain_release(backing_object); 2541 vm_object_drop(backing_object); 2542 } 2543 /* backing_object = NULL; not needed */ 2544 /* loop */ 2545 } 2546 2547 /* 2548 * Clean up any left over backing_object 2549 */ 2550 if (backing_object) { 2551 vm_object_chain_release(backing_object); 2552 vm_object_drop(backing_object); 2553 } 2554 2555 /* 2556 * Clean up any auto-deallocation list. This is a convenience 2557 * for top-level callers so they don't have to pass &dlist. 2558 * Do not clean up any caller-passed dlistp, the caller will 2559 * do that. 2560 */ 2561 if (dlist) 2562 vm_object_deallocate_list(&dlist); 2563 2564 } 2565 2566 /* 2567 * vm_object_collapse() may collect additional objects in need of 2568 * deallocation. This routine deallocates these objects. The 2569 * deallocation itself can trigger additional collapses (which the 2570 * deallocate function takes care of). This procedure is used to 2571 * reduce procedural recursion since these vm_object shadow chains 2572 * can become quite long. 2573 */ 2574 void 2575 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp) 2576 { 2577 struct vm_object_dealloc_list *dlist; 2578 2579 while ((dlist = *dlistp) != NULL) { 2580 *dlistp = dlist->next; 2581 vm_object_lock(dlist->object); 2582 vm_object_deallocate_locked(dlist->object); 2583 vm_object_drop(dlist->object); 2584 kfree(dlist, M_TEMP); 2585 } 2586 } 2587 2588 /* 2589 * Removes all physical pages in the specified object range from the 2590 * object's list of pages. 2591 * 2592 * No requirements. 2593 */ 2594 static int vm_object_page_remove_callback(vm_page_t p, void *data); 2595 2596 void 2597 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2598 boolean_t clean_only) 2599 { 2600 struct rb_vm_page_scan_info info; 2601 int all; 2602 2603 /* 2604 * Degenerate cases and assertions 2605 */ 2606 vm_object_hold(object); 2607 if (object == NULL || 2608 (object->resident_page_count == 0 && object->swblock_count == 0)) { 2609 vm_object_drop(object); 2610 return; 2611 } 2612 KASSERT(object->type != OBJT_PHYS, 2613 ("attempt to remove pages from a physical object")); 2614 2615 /* 2616 * Indicate that paging is occuring on the object 2617 */ 2618 vm_object_pip_add(object, 1); 2619 2620 /* 2621 * Figure out the actual removal range and whether we are removing 2622 * the entire contents of the object or not. If removing the entire 2623 * contents, be sure to get all pages, even those that might be 2624 * beyond the end of the object. 2625 */ 2626 info.start_pindex = start; 2627 if (end == 0) 2628 info.end_pindex = (vm_pindex_t)-1; 2629 else 2630 info.end_pindex = end - 1; 2631 info.limit = clean_only; 2632 all = (start == 0 && info.end_pindex >= object->size - 1); 2633 2634 /* 2635 * Loop until we are sure we have gotten them all. 2636 */ 2637 do { 2638 info.error = 0; 2639 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 2640 vm_object_page_remove_callback, &info); 2641 } while (info.error); 2642 2643 /* 2644 * Remove any related swap if throwing away pages, or for 2645 * non-swap objects (the swap is a clean copy in that case). 2646 */ 2647 if (object->type != OBJT_SWAP || clean_only == FALSE) { 2648 if (all) 2649 swap_pager_freespace_all(object); 2650 else 2651 swap_pager_freespace(object, info.start_pindex, 2652 info.end_pindex - info.start_pindex + 1); 2653 } 2654 2655 /* 2656 * Cleanup 2657 */ 2658 vm_object_pip_wakeup(object); 2659 vm_object_drop(object); 2660 } 2661 2662 /* 2663 * The caller must hold the object 2664 */ 2665 static int 2666 vm_object_page_remove_callback(vm_page_t p, void *data) 2667 { 2668 struct rb_vm_page_scan_info *info = data; 2669 2670 if ((++info->count & 63) == 0) 2671 lwkt_user_yield(); 2672 2673 if (vm_page_busy_try(p, TRUE)) { 2674 vm_page_sleep_busy(p, TRUE, "vmopar"); 2675 info->error = 1; 2676 return(0); 2677 } 2678 2679 /* 2680 * Wired pages cannot be destroyed, but they can be invalidated 2681 * and we do so if clean_only (limit) is not set. 2682 * 2683 * WARNING! The page may be wired due to being part of a buffer 2684 * cache buffer, and the buffer might be marked B_CACHE. 2685 * This is fine as part of a truncation but VFSs must be 2686 * sure to fix the buffer up when re-extending the file. 2687 * 2688 * NOTE! PG_NEED_COMMIT is ignored. 2689 */ 2690 if (p->wire_count != 0) { 2691 vm_page_protect(p, VM_PROT_NONE); 2692 if (info->limit == 0) 2693 p->valid = 0; 2694 vm_page_wakeup(p); 2695 return(0); 2696 } 2697 2698 /* 2699 * limit is our clean_only flag. If set and the page is dirty or 2700 * requires a commit, do not free it. If set and the page is being 2701 * held by someone, do not free it. 2702 */ 2703 if (info->limit && p->valid) { 2704 vm_page_test_dirty(p); 2705 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) { 2706 vm_page_wakeup(p); 2707 return(0); 2708 } 2709 } 2710 2711 /* 2712 * Destroy the page 2713 */ 2714 vm_page_protect(p, VM_PROT_NONE); 2715 vm_page_free(p); 2716 2717 return(0); 2718 } 2719 2720 /* 2721 * Coalesces two objects backing up adjoining regions of memory into a 2722 * single object. 2723 * 2724 * returns TRUE if objects were combined. 2725 * 2726 * NOTE: Only works at the moment if the second object is NULL - 2727 * if it's not, which object do we lock first? 2728 * 2729 * Parameters: 2730 * prev_object First object to coalesce 2731 * prev_offset Offset into prev_object 2732 * next_object Second object into coalesce 2733 * next_offset Offset into next_object 2734 * 2735 * prev_size Size of reference to prev_object 2736 * next_size Size of reference to next_object 2737 * 2738 * The caller does not need to hold (prev_object) but must have a stable 2739 * pointer to it (typically by holding the vm_map locked). 2740 */ 2741 boolean_t 2742 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 2743 vm_size_t prev_size, vm_size_t next_size) 2744 { 2745 vm_pindex_t next_pindex; 2746 2747 if (prev_object == NULL) 2748 return (TRUE); 2749 2750 vm_object_hold(prev_object); 2751 2752 if (prev_object->type != OBJT_DEFAULT && 2753 prev_object->type != OBJT_SWAP) { 2754 vm_object_drop(prev_object); 2755 return (FALSE); 2756 } 2757 2758 /* 2759 * Try to collapse the object first 2760 */ 2761 vm_object_chain_acquire(prev_object, 0); 2762 vm_object_collapse(prev_object, NULL); 2763 2764 /* 2765 * Can't coalesce if: . more than one reference . paged out . shadows 2766 * another object . has a copy elsewhere (any of which mean that the 2767 * pages not mapped to prev_entry may be in use anyway) 2768 */ 2769 2770 if (prev_object->backing_object != NULL) { 2771 vm_object_chain_release(prev_object); 2772 vm_object_drop(prev_object); 2773 return (FALSE); 2774 } 2775 2776 prev_size >>= PAGE_SHIFT; 2777 next_size >>= PAGE_SHIFT; 2778 next_pindex = prev_pindex + prev_size; 2779 2780 if ((prev_object->ref_count > 1) && 2781 (prev_object->size != next_pindex)) { 2782 vm_object_chain_release(prev_object); 2783 vm_object_drop(prev_object); 2784 return (FALSE); 2785 } 2786 2787 /* 2788 * Remove any pages that may still be in the object from a previous 2789 * deallocation. 2790 */ 2791 if (next_pindex < prev_object->size) { 2792 vm_object_page_remove(prev_object, 2793 next_pindex, 2794 next_pindex + next_size, FALSE); 2795 if (prev_object->type == OBJT_SWAP) 2796 swap_pager_freespace(prev_object, 2797 next_pindex, next_size); 2798 } 2799 2800 /* 2801 * Extend the object if necessary. 2802 */ 2803 if (next_pindex + next_size > prev_object->size) 2804 prev_object->size = next_pindex + next_size; 2805 2806 vm_object_chain_release(prev_object); 2807 vm_object_drop(prev_object); 2808 return (TRUE); 2809 } 2810 2811 /* 2812 * Make the object writable and flag is being possibly dirty. 2813 * 2814 * The object might not be held (or might be held but held shared), 2815 * the related vnode is probably not held either. Object and vnode are 2816 * stable by virtue of the vm_page busied by the caller preventing 2817 * destruction. 2818 * 2819 * If the related mount is flagged MNTK_THR_SYNC we need to call 2820 * vsetobjdirty(). Filesystems using this option usually shortcut 2821 * synchronization by only scanning the syncer list. 2822 */ 2823 void 2824 vm_object_set_writeable_dirty(vm_object_t object) 2825 { 2826 struct vnode *vp; 2827 2828 /*vm_object_assert_held(object);*/ 2829 /* 2830 * Avoid contention in vm fault path by checking the state before 2831 * issuing an atomic op on it. 2832 */ 2833 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) != 2834 (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) { 2835 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 2836 } 2837 if (object->type == OBJT_VNODE && 2838 (vp = (struct vnode *)object->handle) != NULL) { 2839 if ((vp->v_flag & VOBJDIRTY) == 0) { 2840 if (vp->v_mount && 2841 (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) { 2842 /* 2843 * New style THR_SYNC places vnodes on the 2844 * syncer list more deterministically. 2845 */ 2846 vsetobjdirty(vp); 2847 } else { 2848 /* 2849 * Old style scan would not necessarily place 2850 * a vnode on the syncer list when possibly 2851 * modified via mmap. 2852 */ 2853 vsetflags(vp, VOBJDIRTY); 2854 } 2855 } 2856 } 2857 } 2858 2859 #include "opt_ddb.h" 2860 #ifdef DDB 2861 #include <sys/kernel.h> 2862 2863 #include <sys/cons.h> 2864 2865 #include <ddb/ddb.h> 2866 2867 static int _vm_object_in_map (vm_map_t map, vm_object_t object, 2868 vm_map_entry_t entry); 2869 static int vm_object_in_map (vm_object_t object); 2870 2871 /* 2872 * The caller must hold the object. 2873 */ 2874 static int 2875 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2876 { 2877 vm_map_t tmpm; 2878 vm_map_entry_t tmpe; 2879 vm_object_t obj, nobj; 2880 int entcount; 2881 2882 if (map == 0) 2883 return 0; 2884 if (entry == 0) { 2885 tmpe = map->header.next; 2886 entcount = map->nentries; 2887 while (entcount-- && (tmpe != &map->header)) { 2888 if( _vm_object_in_map(map, object, tmpe)) { 2889 return 1; 2890 } 2891 tmpe = tmpe->next; 2892 } 2893 return (0); 2894 } 2895 switch(entry->maptype) { 2896 case VM_MAPTYPE_SUBMAP: 2897 tmpm = entry->object.sub_map; 2898 tmpe = tmpm->header.next; 2899 entcount = tmpm->nentries; 2900 while (entcount-- && tmpe != &tmpm->header) { 2901 if( _vm_object_in_map(tmpm, object, tmpe)) { 2902 return 1; 2903 } 2904 tmpe = tmpe->next; 2905 } 2906 break; 2907 case VM_MAPTYPE_NORMAL: 2908 case VM_MAPTYPE_VPAGETABLE: 2909 obj = entry->object.vm_object; 2910 while (obj) { 2911 if (obj == object) { 2912 if (obj != entry->object.vm_object) 2913 vm_object_drop(obj); 2914 return 1; 2915 } 2916 while ((nobj = obj->backing_object) != NULL) { 2917 vm_object_hold(nobj); 2918 if (nobj == obj->backing_object) 2919 break; 2920 vm_object_drop(nobj); 2921 } 2922 if (obj != entry->object.vm_object) { 2923 if (nobj) 2924 vm_object_lock_swap(); 2925 vm_object_drop(obj); 2926 } 2927 obj = nobj; 2928 } 2929 break; 2930 default: 2931 break; 2932 } 2933 return 0; 2934 } 2935 2936 static int vm_object_in_map_callback(struct proc *p, void *data); 2937 2938 struct vm_object_in_map_info { 2939 vm_object_t object; 2940 int rv; 2941 }; 2942 2943 /* 2944 * Debugging only 2945 */ 2946 static int 2947 vm_object_in_map(vm_object_t object) 2948 { 2949 struct vm_object_in_map_info info; 2950 2951 info.rv = 0; 2952 info.object = object; 2953 2954 allproc_scan(vm_object_in_map_callback, &info); 2955 if (info.rv) 2956 return 1; 2957 if( _vm_object_in_map(&kernel_map, object, 0)) 2958 return 1; 2959 if( _vm_object_in_map(&pager_map, object, 0)) 2960 return 1; 2961 if( _vm_object_in_map(&buffer_map, object, 0)) 2962 return 1; 2963 return 0; 2964 } 2965 2966 /* 2967 * Debugging only 2968 */ 2969 static int 2970 vm_object_in_map_callback(struct proc *p, void *data) 2971 { 2972 struct vm_object_in_map_info *info = data; 2973 2974 if (p->p_vmspace) { 2975 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) { 2976 info->rv = 1; 2977 return -1; 2978 } 2979 } 2980 return (0); 2981 } 2982 2983 DB_SHOW_COMMAND(vmochk, vm_object_check) 2984 { 2985 vm_object_t object; 2986 int n; 2987 2988 /* 2989 * make sure that internal objs are in a map somewhere 2990 * and none have zero ref counts. 2991 */ 2992 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2993 for (object = TAILQ_FIRST(&vm_object_lists[n]); 2994 object != NULL; 2995 object = TAILQ_NEXT(object, object_list)) { 2996 if (object->type == OBJT_MARKER) 2997 continue; 2998 if (object->handle != NULL || 2999 (object->type != OBJT_DEFAULT && 3000 object->type != OBJT_SWAP)) { 3001 continue; 3002 } 3003 if (object->ref_count == 0) { 3004 db_printf("vmochk: internal obj has " 3005 "zero ref count: %ld\n", 3006 (long)object->size); 3007 } 3008 if (vm_object_in_map(object)) 3009 continue; 3010 db_printf("vmochk: internal obj is not in a map: " 3011 "ref: %d, size: %lu: 0x%lx, " 3012 "backing_object: %p\n", 3013 object->ref_count, (u_long)object->size, 3014 (u_long)object->size, 3015 (void *)object->backing_object); 3016 } 3017 } 3018 } 3019 3020 /* 3021 * Debugging only 3022 */ 3023 DB_SHOW_COMMAND(object, vm_object_print_static) 3024 { 3025 /* XXX convert args. */ 3026 vm_object_t object = (vm_object_t)addr; 3027 boolean_t full = have_addr; 3028 3029 vm_page_t p; 3030 3031 /* XXX count is an (unused) arg. Avoid shadowing it. */ 3032 #define count was_count 3033 3034 int count; 3035 3036 if (object == NULL) 3037 return; 3038 3039 db_iprintf( 3040 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 3041 object, (int)object->type, (u_long)object->size, 3042 object->resident_page_count, object->ref_count, object->flags); 3043 /* 3044 * XXX no %qd in kernel. Truncate object->backing_object_offset. 3045 */ 3046 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 3047 object->shadow_count, 3048 object->backing_object ? object->backing_object->ref_count : 0, 3049 object->backing_object, (long)object->backing_object_offset); 3050 3051 if (!full) 3052 return; 3053 3054 db_indent += 2; 3055 count = 0; 3056 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) { 3057 if (count == 0) 3058 db_iprintf("memory:="); 3059 else if (count == 6) { 3060 db_printf("\n"); 3061 db_iprintf(" ..."); 3062 count = 0; 3063 } else 3064 db_printf(","); 3065 count++; 3066 3067 db_printf("(off=0x%lx,page=0x%lx)", 3068 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 3069 } 3070 if (count != 0) 3071 db_printf("\n"); 3072 db_indent -= 2; 3073 } 3074 3075 /* XXX. */ 3076 #undef count 3077 3078 /* 3079 * XXX need this non-static entry for calling from vm_map_print. 3080 * 3081 * Debugging only 3082 */ 3083 void 3084 vm_object_print(/* db_expr_t */ long addr, 3085 boolean_t have_addr, 3086 /* db_expr_t */ long count, 3087 char *modif) 3088 { 3089 vm_object_print_static(addr, have_addr, count, modif); 3090 } 3091 3092 /* 3093 * Debugging only 3094 */ 3095 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 3096 { 3097 vm_object_t object; 3098 int nl = 0; 3099 int c; 3100 int n; 3101 3102 for (n = 0; n < VMOBJ_HSIZE; ++n) { 3103 for (object = TAILQ_FIRST(&vm_object_lists[n]); 3104 object != NULL; 3105 object = TAILQ_NEXT(object, object_list)) { 3106 vm_pindex_t idx, fidx; 3107 vm_pindex_t osize; 3108 vm_paddr_t pa = -1, padiff; 3109 int rcount; 3110 vm_page_t m; 3111 3112 if (object->type == OBJT_MARKER) 3113 continue; 3114 db_printf("new object: %p\n", (void *)object); 3115 if ( nl > 18) { 3116 c = cngetc(); 3117 if (c != ' ') 3118 return; 3119 nl = 0; 3120 } 3121 nl++; 3122 rcount = 0; 3123 fidx = 0; 3124 osize = object->size; 3125 if (osize > 128) 3126 osize = 128; 3127 for (idx = 0; idx < osize; idx++) { 3128 m = vm_page_lookup(object, idx); 3129 if (m == NULL) { 3130 if (rcount) { 3131 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3132 (long)fidx, rcount, (long)pa); 3133 if ( nl > 18) { 3134 c = cngetc(); 3135 if (c != ' ') 3136 return; 3137 nl = 0; 3138 } 3139 nl++; 3140 rcount = 0; 3141 } 3142 continue; 3143 } 3144 3145 if (rcount && 3146 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 3147 ++rcount; 3148 continue; 3149 } 3150 if (rcount) { 3151 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 3152 padiff >>= PAGE_SHIFT; 3153 padiff &= PQ_L2_MASK; 3154 if (padiff == 0) { 3155 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 3156 ++rcount; 3157 continue; 3158 } 3159 db_printf(" index(%ld)run(%d)pa(0x%lx)", 3160 (long)fidx, rcount, (long)pa); 3161 db_printf("pd(%ld)\n", (long)padiff); 3162 if ( nl > 18) { 3163 c = cngetc(); 3164 if (c != ' ') 3165 return; 3166 nl = 0; 3167 } 3168 nl++; 3169 } 3170 fidx = idx; 3171 pa = VM_PAGE_TO_PHYS(m); 3172 rcount = 1; 3173 } 3174 if (rcount) { 3175 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3176 (long)fidx, rcount, (long)pa); 3177 if ( nl > 18) { 3178 c = cngetc(); 3179 if (c != ' ') 3180 return; 3181 nl = 0; 3182 } 3183 nl++; 3184 } 3185 } 3186 } 3187 } 3188 #endif /* DDB */ 3189