145748Smckusick /* 245748Smckusick * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young 345748Smckusick * Copyright (c) 1987 Carnegie-Mellon University 445748Smckusick * Copyright (c) 1991 Regents of the University of California. 545748Smckusick * All rights reserved. 645748Smckusick * 745748Smckusick * This code is derived from software contributed to Berkeley by 845748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 945748Smckusick * 1045748Smckusick * The CMU software License Agreement specifies the terms and conditions 1145748Smckusick * for use and redistribution. 1245748Smckusick * 13*48386Skarels * @(#)vm_object.c 7.2 (Berkeley) 04/20/91 1445748Smckusick */ 1545748Smckusick 1645748Smckusick /* 1745748Smckusick * Virtual memory object module. 1845748Smckusick */ 1945748Smckusick 2045748Smckusick #include "param.h" 2145748Smckusick #include "malloc.h" 2245748Smckusick 23*48386Skarels #include "vm.h" 24*48386Skarels #include "vm_page.h" 25*48386Skarels 2645748Smckusick /* 2745748Smckusick * Virtual memory objects maintain the actual data 2845748Smckusick * associated with allocated virtual memory. A given 2945748Smckusick * page of memory exists within exactly one object. 3045748Smckusick * 3145748Smckusick * An object is only deallocated when all "references" 3245748Smckusick * are given up. Only one "reference" to a given 3345748Smckusick * region of an object should be writeable. 3445748Smckusick * 3545748Smckusick * Associated with each object is a list of all resident 3645748Smckusick * memory pages belonging to that object; this list is 3745748Smckusick * maintained by the "vm_page" module, and locked by the object's 3845748Smckusick * lock. 3945748Smckusick * 4045748Smckusick * Each object also records a "pager" routine which is 4145748Smckusick * used to retrieve (and store) pages to the proper backing 4245748Smckusick * storage. In addition, objects may be backed by other 4345748Smckusick * objects from which they were virtual-copied. 4445748Smckusick * 4545748Smckusick * The only items within the object structure which are 4645748Smckusick * modified after time of creation are: 4745748Smckusick * reference count locked by object's lock 4845748Smckusick * pager routine locked by object's lock 4945748Smckusick * 5045748Smckusick */ 5145748Smckusick 5245748Smckusick struct vm_object kernel_object_store; 5345748Smckusick struct vm_object kmem_object_store; 5445748Smckusick 5545748Smckusick #define VM_OBJECT_HASH_COUNT 157 5645748Smckusick 5745748Smckusick int vm_cache_max = 100; /* can patch if necessary */ 5845748Smckusick queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 5945748Smckusick 6045748Smckusick long object_collapses = 0; 6145748Smckusick long object_bypasses = 0; 6245748Smckusick 6345748Smckusick /* 6445748Smckusick * vm_object_init: 6545748Smckusick * 6645748Smckusick * Initialize the VM objects module. 6745748Smckusick */ 6845748Smckusick void vm_object_init() 6945748Smckusick { 7045748Smckusick register int i; 7145748Smckusick 7245748Smckusick queue_init(&vm_object_cached_list); 7345748Smckusick queue_init(&vm_object_list); 7445748Smckusick vm_object_count = 0; 7545748Smckusick simple_lock_init(&vm_cache_lock); 7645748Smckusick simple_lock_init(&vm_object_list_lock); 7745748Smckusick 7845748Smckusick for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 7945748Smckusick queue_init(&vm_object_hashtable[i]); 8045748Smckusick 8145748Smckusick kernel_object = &kernel_object_store; 8245748Smckusick _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 8345748Smckusick kernel_object); 8445748Smckusick 8545748Smckusick kmem_object = &kmem_object_store; 8645748Smckusick _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); 8745748Smckusick } 8845748Smckusick 8945748Smckusick /* 9045748Smckusick * vm_object_allocate: 9145748Smckusick * 9245748Smckusick * Returns a new object with the given size. 9345748Smckusick */ 9445748Smckusick 9545748Smckusick vm_object_t vm_object_allocate(size) 9645748Smckusick vm_size_t size; 9745748Smckusick { 9845748Smckusick register vm_object_t result; 9945748Smckusick 10045748Smckusick result = (vm_object_t) 10145748Smckusick malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 10245748Smckusick 10345748Smckusick _vm_object_allocate(size, result); 10445748Smckusick 10545748Smckusick return(result); 10645748Smckusick } 10745748Smckusick 10845748Smckusick _vm_object_allocate(size, object) 10945748Smckusick vm_size_t size; 11045748Smckusick register vm_object_t object; 11145748Smckusick { 11245748Smckusick queue_init(&object->memq); 11345748Smckusick vm_object_lock_init(object); 11445748Smckusick object->ref_count = 1; 11545748Smckusick object->resident_page_count = 0; 11645748Smckusick object->size = size; 11745748Smckusick object->can_persist = FALSE; 11845748Smckusick object->paging_in_progress = 0; 119*48386Skarels object->copy = NULL; 12045748Smckusick 12145748Smckusick /* 12245748Smckusick * Object starts out read-write, with no pager. 12345748Smckusick */ 12445748Smckusick 125*48386Skarels object->pager = NULL; 12645748Smckusick object->pager_ready = FALSE; 12745748Smckusick object->internal = TRUE; /* vm_allocate_with_pager will reset */ 12845748Smckusick object->paging_offset = 0; 129*48386Skarels object->shadow = NULL; 13045748Smckusick object->shadow_offset = (vm_offset_t) 0; 13145748Smckusick 13245748Smckusick simple_lock(&vm_object_list_lock); 13345748Smckusick queue_enter(&vm_object_list, object, vm_object_t, object_list); 13445748Smckusick vm_object_count++; 13545748Smckusick simple_unlock(&vm_object_list_lock); 13645748Smckusick } 13745748Smckusick 13845748Smckusick /* 13945748Smckusick * vm_object_reference: 14045748Smckusick * 14145748Smckusick * Gets another reference to the given object. 14245748Smckusick */ 14345748Smckusick void vm_object_reference(object) 14445748Smckusick register vm_object_t object; 14545748Smckusick { 146*48386Skarels if (object == NULL) 14745748Smckusick return; 14845748Smckusick 14945748Smckusick vm_object_lock(object); 15045748Smckusick object->ref_count++; 15145748Smckusick vm_object_unlock(object); 15245748Smckusick } 15345748Smckusick 15445748Smckusick /* 15545748Smckusick * vm_object_deallocate: 15645748Smckusick * 15745748Smckusick * Release a reference to the specified object, 15845748Smckusick * gained either through a vm_object_allocate 15945748Smckusick * or a vm_object_reference call. When all references 16045748Smckusick * are gone, storage associated with this object 16145748Smckusick * may be relinquished. 16245748Smckusick * 16345748Smckusick * No object may be locked. 16445748Smckusick */ 16545748Smckusick void vm_object_deallocate(object) 16645748Smckusick register vm_object_t object; 16745748Smckusick { 16845748Smckusick vm_object_t temp; 16945748Smckusick 170*48386Skarels while (object != NULL) { 17145748Smckusick 17245748Smckusick /* 17345748Smckusick * The cache holds a reference (uncounted) to 17445748Smckusick * the object; we must lock it before removing 17545748Smckusick * the object. 17645748Smckusick */ 17745748Smckusick 17845748Smckusick vm_object_cache_lock(); 17945748Smckusick 18045748Smckusick /* 18145748Smckusick * Lose the reference 18245748Smckusick */ 18345748Smckusick vm_object_lock(object); 18445748Smckusick if (--(object->ref_count) != 0) { 18545748Smckusick 18645748Smckusick /* 18745748Smckusick * If there are still references, then 18845748Smckusick * we are done. 18945748Smckusick */ 19045748Smckusick vm_object_unlock(object); 19145748Smckusick vm_object_cache_unlock(); 19245748Smckusick return; 19345748Smckusick } 19445748Smckusick 19545748Smckusick /* 19645748Smckusick * See if this object can persist. If so, enter 19745748Smckusick * it in the cache, then deactivate all of its 19845748Smckusick * pages. 19945748Smckusick */ 20045748Smckusick 20145748Smckusick if (object->can_persist) { 20245748Smckusick 20345748Smckusick queue_enter(&vm_object_cached_list, object, 20445748Smckusick vm_object_t, cached_list); 20545748Smckusick vm_object_cached++; 20645748Smckusick vm_object_cache_unlock(); 20745748Smckusick 20845748Smckusick vm_object_deactivate_pages(object); 20945748Smckusick vm_object_unlock(object); 21045748Smckusick 21145748Smckusick vm_object_cache_trim(); 21245748Smckusick return; 21345748Smckusick } 21445748Smckusick 21545748Smckusick /* 21645748Smckusick * Make sure no one can look us up now. 21745748Smckusick */ 21845748Smckusick vm_object_remove(object->pager); 21945748Smckusick vm_object_cache_unlock(); 22045748Smckusick 22145748Smckusick temp = object->shadow; 22245748Smckusick vm_object_terminate(object); 22345748Smckusick /* unlocks and deallocates object */ 22445748Smckusick object = temp; 22545748Smckusick } 22645748Smckusick } 22745748Smckusick 22845748Smckusick 22945748Smckusick /* 23045748Smckusick * vm_object_terminate actually destroys the specified object, freeing 23145748Smckusick * up all previously used resources. 23245748Smckusick * 23345748Smckusick * The object must be locked. 23445748Smckusick */ 23545748Smckusick void vm_object_terminate(object) 23645748Smckusick register vm_object_t object; 23745748Smckusick { 23845748Smckusick register vm_page_t p; 23945748Smckusick vm_object_t shadow_object; 24045748Smckusick 24145748Smckusick /* 24245748Smckusick * Detach the object from its shadow if we are the shadow's 24345748Smckusick * copy. 24445748Smckusick */ 245*48386Skarels if ((shadow_object = object->shadow) != NULL) { 24645748Smckusick vm_object_lock(shadow_object); 24745748Smckusick if (shadow_object->copy == object) 248*48386Skarels shadow_object->copy = NULL; 24945748Smckusick #if 0 250*48386Skarels else if (shadow_object->copy != NULL) 25145748Smckusick panic("vm_object_terminate: copy/shadow inconsistency"); 25245748Smckusick #endif 25345748Smckusick vm_object_unlock(shadow_object); 25445748Smckusick } 25545748Smckusick 25645748Smckusick /* 25745748Smckusick * Wait until the pageout daemon is through 25845748Smckusick * with the object. 25945748Smckusick */ 26045748Smckusick 26145748Smckusick while (object->paging_in_progress != 0) { 26245748Smckusick vm_object_sleep(object, object, FALSE); 26345748Smckusick vm_object_lock(object); 26445748Smckusick } 26545748Smckusick 26645748Smckusick 26745748Smckusick /* 26845748Smckusick * While the paging system is locked, 26945748Smckusick * pull the object's pages off the active 27045748Smckusick * and inactive queues. This keeps the 27145748Smckusick * pageout daemon from playing with them 27245748Smckusick * during vm_pager_deallocate. 27345748Smckusick * 27445748Smckusick * We can't free the pages yet, because the 27545748Smckusick * object's pager may have to write them out 27645748Smckusick * before deallocating the paging space. 27745748Smckusick */ 27845748Smckusick 27945748Smckusick p = (vm_page_t) queue_first(&object->memq); 28045748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 28145748Smckusick VM_PAGE_CHECK(p); 28245748Smckusick 28345748Smckusick vm_page_lock_queues(); 28445748Smckusick if (p->active) { 28545748Smckusick queue_remove(&vm_page_queue_active, p, vm_page_t, 28645748Smckusick pageq); 28745748Smckusick p->active = FALSE; 28845748Smckusick vm_page_active_count--; 28945748Smckusick } 29045748Smckusick 29145748Smckusick if (p->inactive) { 29245748Smckusick queue_remove(&vm_page_queue_inactive, p, vm_page_t, 29345748Smckusick pageq); 29445748Smckusick p->inactive = FALSE; 29545748Smckusick vm_page_inactive_count--; 29645748Smckusick } 29745748Smckusick vm_page_unlock_queues(); 29845748Smckusick p = (vm_page_t) queue_next(&p->listq); 29945748Smckusick } 30045748Smckusick 30145748Smckusick vm_object_unlock(object); 30245748Smckusick 30345748Smckusick if (object->paging_in_progress != 0) 30445748Smckusick panic("vm_object_deallocate: pageout in progress"); 30545748Smckusick 30645748Smckusick /* 30745748Smckusick * Clean and free the pages, as appropriate. 30845748Smckusick * All references to the object are gone, 30945748Smckusick * so we don't need to lock it. 31045748Smckusick */ 31145748Smckusick 31245748Smckusick if (!object->internal) { 31345748Smckusick vm_object_lock(object); 31445748Smckusick vm_object_page_clean(object, 0, 0); 31545748Smckusick vm_object_unlock(object); 31645748Smckusick } 31745748Smckusick while (!queue_empty(&object->memq)) { 31845748Smckusick p = (vm_page_t) queue_first(&object->memq); 31945748Smckusick 32045748Smckusick VM_PAGE_CHECK(p); 32145748Smckusick 32245748Smckusick vm_page_lock_queues(); 32345748Smckusick vm_page_free(p); 32445748Smckusick vm_page_unlock_queues(); 32545748Smckusick } 32645748Smckusick 32745748Smckusick /* 32845748Smckusick * Let the pager know object is dead. 32945748Smckusick */ 33045748Smckusick 331*48386Skarels if (object->pager != NULL) 33245748Smckusick vm_pager_deallocate(object->pager); 33345748Smckusick 33445748Smckusick 33545748Smckusick simple_lock(&vm_object_list_lock); 33645748Smckusick queue_remove(&vm_object_list, object, vm_object_t, object_list); 33745748Smckusick vm_object_count--; 33845748Smckusick simple_unlock(&vm_object_list_lock); 33945748Smckusick 34045748Smckusick /* 34145748Smckusick * Free the space for the object. 34245748Smckusick */ 34345748Smckusick 34445748Smckusick free((caddr_t)object, M_VMOBJ); 34545748Smckusick } 34645748Smckusick 34745748Smckusick /* 34845748Smckusick * vm_object_page_clean 34945748Smckusick * 35045748Smckusick * Clean all dirty pages in the specified range of object. 35145748Smckusick * Leaves page on whatever queue it is currently on. 35245748Smckusick * 35345748Smckusick * Odd semantics: if start == end, we clean everything. 35445748Smckusick * 35545748Smckusick * The object must be locked. 35645748Smckusick */ 35745748Smckusick vm_object_page_clean(object, start, end) 35845748Smckusick register vm_object_t object; 35945748Smckusick register vm_offset_t start; 36045748Smckusick register vm_offset_t end; 36145748Smckusick { 36245748Smckusick register vm_page_t p; 36345748Smckusick 364*48386Skarels if (object->pager == NULL) 36545748Smckusick return; 36645748Smckusick 36745748Smckusick again: 36845748Smckusick p = (vm_page_t) queue_first(&object->memq); 36945748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 37045748Smckusick if (start == end || 37145748Smckusick p->offset >= start && p->offset < end) { 37245748Smckusick if (p->clean && pmap_is_modified(VM_PAGE_TO_PHYS(p))) 37345748Smckusick p->clean = FALSE; 37445748Smckusick pmap_remove_all(VM_PAGE_TO_PHYS(p)); 37545748Smckusick if (!p->clean) { 37645748Smckusick p->busy = TRUE; 37745748Smckusick object->paging_in_progress++; 37845748Smckusick vm_object_unlock(object); 37945748Smckusick (void) vm_pager_put(object->pager, p, TRUE); 38045748Smckusick vm_object_lock(object); 38145748Smckusick object->paging_in_progress--; 38245748Smckusick p->busy = FALSE; 38345748Smckusick PAGE_WAKEUP(p); 38445748Smckusick goto again; 38545748Smckusick } 38645748Smckusick } 38745748Smckusick p = (vm_page_t) queue_next(&p->listq); 38845748Smckusick } 38945748Smckusick } 39045748Smckusick 39145748Smckusick /* 39245748Smckusick * vm_object_deactivate_pages 39345748Smckusick * 39445748Smckusick * Deactivate all pages in the specified object. (Keep its pages 39545748Smckusick * in memory even though it is no longer referenced.) 39645748Smckusick * 39745748Smckusick * The object must be locked. 39845748Smckusick */ 39945748Smckusick vm_object_deactivate_pages(object) 40045748Smckusick register vm_object_t object; 40145748Smckusick { 40245748Smckusick register vm_page_t p, next; 40345748Smckusick 40445748Smckusick p = (vm_page_t) queue_first(&object->memq); 40545748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 40645748Smckusick next = (vm_page_t) queue_next(&p->listq); 40745748Smckusick vm_page_lock_queues(); 40845748Smckusick vm_page_deactivate(p); 40945748Smckusick vm_page_unlock_queues(); 41045748Smckusick p = next; 41145748Smckusick } 41245748Smckusick } 41345748Smckusick 41445748Smckusick /* 41545748Smckusick * Trim the object cache to size. 41645748Smckusick */ 41745748Smckusick vm_object_cache_trim() 41845748Smckusick { 41945748Smckusick register vm_object_t object; 42045748Smckusick 42145748Smckusick vm_object_cache_lock(); 42245748Smckusick while (vm_object_cached > vm_cache_max) { 42345748Smckusick object = (vm_object_t) queue_first(&vm_object_cached_list); 42445748Smckusick vm_object_cache_unlock(); 42545748Smckusick 42645748Smckusick if (object != vm_object_lookup(object->pager)) 42745748Smckusick panic("vm_object_deactivate: I'm sooo confused."); 42845748Smckusick 42945748Smckusick pager_cache(object, FALSE); 43045748Smckusick 43145748Smckusick vm_object_cache_lock(); 43245748Smckusick } 43345748Smckusick vm_object_cache_unlock(); 43445748Smckusick } 43545748Smckusick 43645748Smckusick 43745748Smckusick /* 43845748Smckusick * vm_object_shutdown() 43945748Smckusick * 44045748Smckusick * Shut down the object system. Unfortunately, while we 44145748Smckusick * may be trying to do this, init is happily waiting for 44245748Smckusick * processes to exit, and therefore will be causing some objects 44345748Smckusick * to be deallocated. To handle this, we gain a fake reference 44445748Smckusick * to all objects we release paging areas for. This will prevent 44545748Smckusick * a duplicate deallocation. This routine is probably full of 44645748Smckusick * race conditions! 44745748Smckusick */ 44845748Smckusick 44945748Smckusick void vm_object_shutdown() 45045748Smckusick { 45145748Smckusick register vm_object_t object; 45245748Smckusick 45345748Smckusick /* 45445748Smckusick * Clean up the object cache *before* we screw up the reference 45545748Smckusick * counts on all of the objects. 45645748Smckusick */ 45745748Smckusick 45845748Smckusick vm_object_cache_clear(); 45945748Smckusick 46045748Smckusick printf("free paging spaces: "); 46145748Smckusick 46245748Smckusick /* 46345748Smckusick * First we gain a reference to each object so that 46445748Smckusick * no one else will deallocate them. 46545748Smckusick */ 46645748Smckusick 46745748Smckusick simple_lock(&vm_object_list_lock); 46845748Smckusick object = (vm_object_t) queue_first(&vm_object_list); 46945748Smckusick while (!queue_end(&vm_object_list, (queue_entry_t) object)) { 47045748Smckusick vm_object_reference(object); 47145748Smckusick object = (vm_object_t) queue_next(&object->object_list); 47245748Smckusick } 47345748Smckusick simple_unlock(&vm_object_list_lock); 47445748Smckusick 47545748Smckusick /* 47645748Smckusick * Now we deallocate all the paging areas. We don't need 47745748Smckusick * to lock anything because we've reduced to a single 47845748Smckusick * processor while shutting down. This also assumes that 47945748Smckusick * no new objects are being created. 48045748Smckusick */ 48145748Smckusick 48245748Smckusick object = (vm_object_t) queue_first(&vm_object_list); 48345748Smckusick while (!queue_end(&vm_object_list, (queue_entry_t) object)) { 484*48386Skarels if (object->pager != NULL) 48545748Smckusick vm_pager_deallocate(object->pager); 48645748Smckusick object = (vm_object_t) queue_next(&object->object_list); 48745748Smckusick printf("."); 48845748Smckusick } 48945748Smckusick printf("done.\n"); 49045748Smckusick } 49145748Smckusick 49245748Smckusick /* 49345748Smckusick * vm_object_pmap_copy: 49445748Smckusick * 49545748Smckusick * Makes all physical pages in the specified 49645748Smckusick * object range copy-on-write. No writeable 49745748Smckusick * references to these pages should remain. 49845748Smckusick * 49945748Smckusick * The object must *not* be locked. 50045748Smckusick */ 50145748Smckusick void vm_object_pmap_copy(object, start, end) 50245748Smckusick register vm_object_t object; 50345748Smckusick register vm_offset_t start; 50445748Smckusick register vm_offset_t end; 50545748Smckusick { 50645748Smckusick register vm_page_t p; 50745748Smckusick 508*48386Skarels if (object == NULL) 50945748Smckusick return; 51045748Smckusick 51145748Smckusick vm_object_lock(object); 51245748Smckusick p = (vm_page_t) queue_first(&object->memq); 51345748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 51445748Smckusick if ((start <= p->offset) && (p->offset < end)) { 51545748Smckusick if (!p->copy_on_write) { 51645748Smckusick pmap_copy_on_write(VM_PAGE_TO_PHYS(p)); 51745748Smckusick p->copy_on_write = TRUE; 51845748Smckusick } 51945748Smckusick } 52045748Smckusick p = (vm_page_t) queue_next(&p->listq); 52145748Smckusick } 52245748Smckusick vm_object_unlock(object); 52345748Smckusick } 52445748Smckusick 52545748Smckusick /* 52645748Smckusick * vm_object_pmap_remove: 52745748Smckusick * 52845748Smckusick * Removes all physical pages in the specified 52945748Smckusick * object range from all physical maps. 53045748Smckusick * 53145748Smckusick * The object must *not* be locked. 53245748Smckusick */ 53345748Smckusick void vm_object_pmap_remove(object, start, end) 53445748Smckusick register vm_object_t object; 53545748Smckusick register vm_offset_t start; 53645748Smckusick register vm_offset_t end; 53745748Smckusick { 53845748Smckusick register vm_page_t p; 53945748Smckusick 540*48386Skarels if (object == NULL) 54145748Smckusick return; 54245748Smckusick 54345748Smckusick vm_object_lock(object); 54445748Smckusick p = (vm_page_t) queue_first(&object->memq); 54545748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 54645748Smckusick if ((start <= p->offset) && (p->offset < end)) { 54745748Smckusick pmap_remove_all(VM_PAGE_TO_PHYS(p)); 54845748Smckusick } 54945748Smckusick p = (vm_page_t) queue_next(&p->listq); 55045748Smckusick } 55145748Smckusick vm_object_unlock(object); 55245748Smckusick } 55345748Smckusick 55445748Smckusick /* 55545748Smckusick * vm_object_copy: 55645748Smckusick * 55745748Smckusick * Create a new object which is a copy of an existing 55845748Smckusick * object, and mark all of the pages in the existing 55945748Smckusick * object 'copy-on-write'. The new object has one reference. 56045748Smckusick * Returns the new object. 56145748Smckusick * 56245748Smckusick * May defer the copy until later if the object is not backed 56345748Smckusick * up by a non-default pager. 56445748Smckusick */ 56545748Smckusick void vm_object_copy(src_object, src_offset, size, 56645748Smckusick dst_object, dst_offset, src_needs_copy) 56745748Smckusick register vm_object_t src_object; 56845748Smckusick vm_offset_t src_offset; 56945748Smckusick vm_size_t size; 57045748Smckusick vm_object_t *dst_object; /* OUT */ 57145748Smckusick vm_offset_t *dst_offset; /* OUT */ 57245748Smckusick boolean_t *src_needs_copy; /* OUT */ 57345748Smckusick { 57445748Smckusick register vm_object_t new_copy; 57545748Smckusick register vm_object_t old_copy; 57645748Smckusick vm_offset_t new_start, new_end; 57745748Smckusick 57845748Smckusick register vm_page_t p; 57945748Smckusick 580*48386Skarels if (src_object == NULL) { 58145748Smckusick /* 58245748Smckusick * Nothing to copy 58345748Smckusick */ 584*48386Skarels *dst_object = NULL; 58545748Smckusick *dst_offset = 0; 58645748Smckusick *src_needs_copy = FALSE; 58745748Smckusick return; 58845748Smckusick } 58945748Smckusick 59045748Smckusick /* 59145748Smckusick * If the object's pager is null_pager or the 59245748Smckusick * default pager, we don't have to make a copy 59345748Smckusick * of it. Instead, we set the needs copy flag and 59445748Smckusick * make a shadow later. 59545748Smckusick */ 59645748Smckusick 59745748Smckusick vm_object_lock(src_object); 598*48386Skarels if (src_object->pager == NULL || 59945748Smckusick src_object->internal) { 60045748Smckusick 60145748Smckusick /* 60245748Smckusick * Make another reference to the object 60345748Smckusick */ 60445748Smckusick src_object->ref_count++; 60545748Smckusick 60645748Smckusick /* 60745748Smckusick * Mark all of the pages copy-on-write. 60845748Smckusick */ 60945748Smckusick for (p = (vm_page_t) queue_first(&src_object->memq); 61045748Smckusick !queue_end(&src_object->memq, (queue_entry_t)p); 61145748Smckusick p = (vm_page_t) queue_next(&p->listq)) { 61245748Smckusick if (src_offset <= p->offset && 61345748Smckusick p->offset < src_offset + size) 61445748Smckusick p->copy_on_write = TRUE; 61545748Smckusick } 61645748Smckusick vm_object_unlock(src_object); 61745748Smckusick 61845748Smckusick *dst_object = src_object; 61945748Smckusick *dst_offset = src_offset; 62045748Smckusick 62145748Smckusick /* 62245748Smckusick * Must make a shadow when write is desired 62345748Smckusick */ 62445748Smckusick *src_needs_copy = TRUE; 62545748Smckusick return; 62645748Smckusick } 62745748Smckusick 62845748Smckusick /* 62945748Smckusick * Try to collapse the object before copying it. 63045748Smckusick */ 63145748Smckusick vm_object_collapse(src_object); 63245748Smckusick 63345748Smckusick /* 63445748Smckusick * If the object has a pager, the pager wants to 63545748Smckusick * see all of the changes. We need a copy-object 63645748Smckusick * for the changed pages. 63745748Smckusick * 63845748Smckusick * If there is a copy-object, and it is empty, 63945748Smckusick * no changes have been made to the object since the 64045748Smckusick * copy-object was made. We can use the same copy- 64145748Smckusick * object. 64245748Smckusick */ 64345748Smckusick 64445748Smckusick Retry1: 64545748Smckusick old_copy = src_object->copy; 646*48386Skarels if (old_copy != NULL) { 64745748Smckusick /* 64845748Smckusick * Try to get the locks (out of order) 64945748Smckusick */ 65045748Smckusick if (!vm_object_lock_try(old_copy)) { 65145748Smckusick vm_object_unlock(src_object); 65245748Smckusick 65345748Smckusick /* should spin a bit here... */ 65445748Smckusick vm_object_lock(src_object); 65545748Smckusick goto Retry1; 65645748Smckusick } 65745748Smckusick 65845748Smckusick if (old_copy->resident_page_count == 0 && 659*48386Skarels old_copy->pager == NULL) { 66045748Smckusick /* 66145748Smckusick * Return another reference to 66245748Smckusick * the existing copy-object. 66345748Smckusick */ 66445748Smckusick old_copy->ref_count++; 66545748Smckusick vm_object_unlock(old_copy); 66645748Smckusick vm_object_unlock(src_object); 66745748Smckusick *dst_object = old_copy; 66845748Smckusick *dst_offset = src_offset; 66945748Smckusick *src_needs_copy = FALSE; 67045748Smckusick return; 67145748Smckusick } 67245748Smckusick vm_object_unlock(old_copy); 67345748Smckusick } 67445748Smckusick vm_object_unlock(src_object); 67545748Smckusick 67645748Smckusick /* 67745748Smckusick * If the object has a pager, the pager wants 67845748Smckusick * to see all of the changes. We must make 67945748Smckusick * a copy-object and put the changed pages there. 68045748Smckusick * 68145748Smckusick * The copy-object is always made large enough to 68245748Smckusick * completely shadow the original object, since 68345748Smckusick * it may have several users who want to shadow 68445748Smckusick * the original object at different points. 68545748Smckusick */ 68645748Smckusick 68745748Smckusick new_copy = vm_object_allocate(src_object->size); 68845748Smckusick 68945748Smckusick Retry2: 69045748Smckusick vm_object_lock(src_object); 69145748Smckusick /* 69245748Smckusick * Copy object may have changed while we were unlocked 69345748Smckusick */ 69445748Smckusick old_copy = src_object->copy; 695*48386Skarels if (old_copy != NULL) { 69645748Smckusick /* 69745748Smckusick * Try to get the locks (out of order) 69845748Smckusick */ 69945748Smckusick if (!vm_object_lock_try(old_copy)) { 70045748Smckusick vm_object_unlock(src_object); 70145748Smckusick goto Retry2; 70245748Smckusick } 70345748Smckusick 70445748Smckusick /* 70545748Smckusick * Consistency check 70645748Smckusick */ 70745748Smckusick if (old_copy->shadow != src_object || 70845748Smckusick old_copy->shadow_offset != (vm_offset_t) 0) 70945748Smckusick panic("vm_object_copy: copy/shadow inconsistency"); 71045748Smckusick 71145748Smckusick /* 71245748Smckusick * Make the old copy-object shadow the new one. 71345748Smckusick * It will receive no more pages from the original 71445748Smckusick * object. 71545748Smckusick */ 71645748Smckusick 71745748Smckusick src_object->ref_count--; /* remove ref. from old_copy */ 71845748Smckusick old_copy->shadow = new_copy; 71945748Smckusick new_copy->ref_count++; /* locking not needed - we 72045748Smckusick have the only pointer */ 72145748Smckusick vm_object_unlock(old_copy); /* done with old_copy */ 72245748Smckusick } 72345748Smckusick 72445748Smckusick new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 72545748Smckusick new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 72645748Smckusick 72745748Smckusick /* 72845748Smckusick * Point the new copy at the existing object. 72945748Smckusick */ 73045748Smckusick 73145748Smckusick new_copy->shadow = src_object; 73245748Smckusick new_copy->shadow_offset = new_start; 73345748Smckusick src_object->ref_count++; 73445748Smckusick src_object->copy = new_copy; 73545748Smckusick 73645748Smckusick /* 73745748Smckusick * Mark all the affected pages of the existing object 73845748Smckusick * copy-on-write. 73945748Smckusick */ 74045748Smckusick p = (vm_page_t) queue_first(&src_object->memq); 74145748Smckusick while (!queue_end(&src_object->memq, (queue_entry_t) p)) { 74245748Smckusick if ((new_start <= p->offset) && (p->offset < new_end)) { 74345748Smckusick p->copy_on_write = TRUE; 74445748Smckusick } 74545748Smckusick p = (vm_page_t) queue_next(&p->listq); 74645748Smckusick } 74745748Smckusick 74845748Smckusick vm_object_unlock(src_object); 74945748Smckusick 75045748Smckusick *dst_object = new_copy; 75145748Smckusick *dst_offset = src_offset - new_start; 75245748Smckusick *src_needs_copy = FALSE; 75345748Smckusick } 75445748Smckusick 75545748Smckusick /* 75645748Smckusick * vm_object_shadow: 75745748Smckusick * 75845748Smckusick * Create a new object which is backed by the 75945748Smckusick * specified existing object range. The source 76045748Smckusick * object reference is deallocated. 76145748Smckusick * 76245748Smckusick * The new object and offset into that object 76345748Smckusick * are returned in the source parameters. 76445748Smckusick */ 76545748Smckusick 76645748Smckusick void vm_object_shadow(object, offset, length) 76745748Smckusick vm_object_t *object; /* IN/OUT */ 76845748Smckusick vm_offset_t *offset; /* IN/OUT */ 76945748Smckusick vm_size_t length; 77045748Smckusick { 77145748Smckusick register vm_object_t source; 77245748Smckusick register vm_object_t result; 77345748Smckusick 77445748Smckusick source = *object; 77545748Smckusick 77645748Smckusick /* 77745748Smckusick * Allocate a new object with the given length 77845748Smckusick */ 77945748Smckusick 780*48386Skarels if ((result = vm_object_allocate(length)) == NULL) 78145748Smckusick panic("vm_object_shadow: no object for shadowing"); 78245748Smckusick 78345748Smckusick /* 78445748Smckusick * The new object shadows the source object, adding 78545748Smckusick * a reference to it. Our caller changes his reference 78645748Smckusick * to point to the new object, removing a reference to 78745748Smckusick * the source object. Net result: no change of reference 78845748Smckusick * count. 78945748Smckusick */ 79045748Smckusick result->shadow = source; 79145748Smckusick 79245748Smckusick /* 79345748Smckusick * Store the offset into the source object, 79445748Smckusick * and fix up the offset into the new object. 79545748Smckusick */ 79645748Smckusick 79745748Smckusick result->shadow_offset = *offset; 79845748Smckusick 79945748Smckusick /* 80045748Smckusick * Return the new things 80145748Smckusick */ 80245748Smckusick 80345748Smckusick *offset = 0; 80445748Smckusick *object = result; 80545748Smckusick } 80645748Smckusick 80745748Smckusick /* 80845748Smckusick * Set the specified object's pager to the specified pager. 80945748Smckusick */ 81045748Smckusick 81145748Smckusick void vm_object_setpager(object, pager, paging_offset, 81245748Smckusick read_only) 81345748Smckusick vm_object_t object; 81445748Smckusick vm_pager_t pager; 81545748Smckusick vm_offset_t paging_offset; 81645748Smckusick boolean_t read_only; 81745748Smckusick { 81845748Smckusick #ifdef lint 81945748Smckusick read_only++; /* No longer used */ 82045748Smckusick #endif lint 82145748Smckusick 82245748Smckusick vm_object_lock(object); /* XXX ? */ 82345748Smckusick object->pager = pager; 82445748Smckusick object->paging_offset = paging_offset; 82545748Smckusick vm_object_unlock(object); /* XXX ? */ 82645748Smckusick } 82745748Smckusick 82845748Smckusick /* 82945748Smckusick * vm_object_hash hashes the pager/id pair. 83045748Smckusick */ 83145748Smckusick 83245748Smckusick #define vm_object_hash(pager) \ 83345748Smckusick (((unsigned)pager)%VM_OBJECT_HASH_COUNT) 83445748Smckusick 83545748Smckusick /* 83645748Smckusick * vm_object_lookup looks in the object cache for an object with the 83745748Smckusick * specified pager and paging id. 83845748Smckusick */ 83945748Smckusick 84045748Smckusick vm_object_t vm_object_lookup(pager) 84145748Smckusick vm_pager_t pager; 84245748Smckusick { 84345748Smckusick register queue_t bucket; 84445748Smckusick register vm_object_hash_entry_t entry; 84545748Smckusick vm_object_t object; 84645748Smckusick 84745748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 84845748Smckusick 84945748Smckusick vm_object_cache_lock(); 85045748Smckusick 85145748Smckusick entry = (vm_object_hash_entry_t) queue_first(bucket); 85245748Smckusick while (!queue_end(bucket, (queue_entry_t) entry)) { 85345748Smckusick object = entry->object; 85445748Smckusick if (object->pager == pager) { 85545748Smckusick vm_object_lock(object); 85645748Smckusick if (object->ref_count == 0) { 85745748Smckusick queue_remove(&vm_object_cached_list, object, 85845748Smckusick vm_object_t, cached_list); 85945748Smckusick vm_object_cached--; 86045748Smckusick } 86145748Smckusick object->ref_count++; 86245748Smckusick vm_object_unlock(object); 86345748Smckusick vm_object_cache_unlock(); 86445748Smckusick return(object); 86545748Smckusick } 86645748Smckusick entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); 86745748Smckusick } 86845748Smckusick 86945748Smckusick vm_object_cache_unlock(); 870*48386Skarels return(NULL); 87145748Smckusick } 87245748Smckusick 87345748Smckusick /* 87445748Smckusick * vm_object_enter enters the specified object/pager/id into 87545748Smckusick * the hash table. 87645748Smckusick */ 87745748Smckusick 87845748Smckusick void vm_object_enter(object, pager) 87945748Smckusick vm_object_t object; 88045748Smckusick vm_pager_t pager; 88145748Smckusick { 88245748Smckusick register queue_t bucket; 88345748Smckusick register vm_object_hash_entry_t entry; 88445748Smckusick 88545748Smckusick /* 88645748Smckusick * We don't cache null objects, and we can't cache 88745748Smckusick * objects with the null pager. 88845748Smckusick */ 88945748Smckusick 890*48386Skarels if (object == NULL) 89145748Smckusick return; 892*48386Skarels if (pager == NULL) 89345748Smckusick return; 89445748Smckusick 89545748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 89645748Smckusick entry = (vm_object_hash_entry_t) 89745748Smckusick malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 89845748Smckusick entry->object = object; 89945748Smckusick object->can_persist = TRUE; 90045748Smckusick 90145748Smckusick vm_object_cache_lock(); 90245748Smckusick queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links); 90345748Smckusick vm_object_cache_unlock(); 90445748Smckusick } 90545748Smckusick 90645748Smckusick /* 90745748Smckusick * vm_object_remove: 90845748Smckusick * 90945748Smckusick * Remove the pager from the hash table. 91045748Smckusick * Note: This assumes that the object cache 91145748Smckusick * is locked. XXX this should be fixed 91245748Smckusick * by reorganizing vm_object_deallocate. 91345748Smckusick */ 91445748Smckusick vm_object_remove(pager) 91545748Smckusick register vm_pager_t pager; 91645748Smckusick { 91745748Smckusick register queue_t bucket; 91845748Smckusick register vm_object_hash_entry_t entry; 91945748Smckusick register vm_object_t object; 92045748Smckusick 92145748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 92245748Smckusick 92345748Smckusick entry = (vm_object_hash_entry_t) queue_first(bucket); 92445748Smckusick while (!queue_end(bucket, (queue_entry_t) entry)) { 92545748Smckusick object = entry->object; 92645748Smckusick if (object->pager == pager) { 92745748Smckusick queue_remove(bucket, entry, vm_object_hash_entry_t, 92845748Smckusick hash_links); 92945748Smckusick free((caddr_t)entry, M_VMOBJHASH); 93045748Smckusick break; 93145748Smckusick } 93245748Smckusick entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); 93345748Smckusick } 93445748Smckusick } 93545748Smckusick 93645748Smckusick /* 93745748Smckusick * vm_object_cache_clear removes all objects from the cache. 93845748Smckusick * 93945748Smckusick */ 94045748Smckusick 94145748Smckusick void vm_object_cache_clear() 94245748Smckusick { 94345748Smckusick register vm_object_t object; 94445748Smckusick 94545748Smckusick /* 94645748Smckusick * Remove each object in the cache by scanning down the 94745748Smckusick * list of cached objects. 94845748Smckusick */ 94945748Smckusick vm_object_cache_lock(); 95045748Smckusick while (!queue_empty(&vm_object_cached_list)) { 95145748Smckusick object = (vm_object_t) queue_first(&vm_object_cached_list); 95245748Smckusick vm_object_cache_unlock(); 95345748Smckusick 95445748Smckusick /* 95545748Smckusick * Note: it is important that we use vm_object_lookup 95645748Smckusick * to gain a reference, and not vm_object_reference, because 95745748Smckusick * the logic for removing an object from the cache lies in 95845748Smckusick * lookup. 95945748Smckusick */ 96045748Smckusick if (object != vm_object_lookup(object->pager)) 96145748Smckusick panic("vm_object_cache_clear: I'm sooo confused."); 96245748Smckusick pager_cache(object, FALSE); 96345748Smckusick 96445748Smckusick vm_object_cache_lock(); 96545748Smckusick } 96645748Smckusick vm_object_cache_unlock(); 96745748Smckusick } 96845748Smckusick 96945748Smckusick boolean_t vm_object_collapse_allowed = TRUE; 97045748Smckusick /* 97145748Smckusick * vm_object_collapse: 97245748Smckusick * 97345748Smckusick * Collapse an object with the object backing it. 97445748Smckusick * Pages in the backing object are moved into the 97545748Smckusick * parent, and the backing object is deallocated. 97645748Smckusick * 97745748Smckusick * Requires that the object be locked and the page 97845748Smckusick * queues be unlocked. 97945748Smckusick * 98045748Smckusick */ 98145748Smckusick void vm_object_collapse(object) 98245748Smckusick register vm_object_t object; 98345748Smckusick 98445748Smckusick { 98545748Smckusick register vm_object_t backing_object; 98645748Smckusick register vm_offset_t backing_offset; 98745748Smckusick register vm_size_t size; 98845748Smckusick register vm_offset_t new_offset; 98945748Smckusick register vm_page_t p, pp; 99045748Smckusick 99145748Smckusick if (!vm_object_collapse_allowed) 99245748Smckusick return; 99345748Smckusick 99445748Smckusick while (TRUE) { 99545748Smckusick /* 99645748Smckusick * Verify that the conditions are right for collapse: 99745748Smckusick * 99845748Smckusick * The object exists and no pages in it are currently 99945748Smckusick * being paged out (or have ever been paged out). 100045748Smckusick */ 1001*48386Skarels if (object == NULL || 100245748Smckusick object->paging_in_progress != 0 || 1003*48386Skarels object->pager != NULL) 100445748Smckusick return; 100545748Smckusick 100645748Smckusick /* 100745748Smckusick * There is a backing object, and 100845748Smckusick */ 100945748Smckusick 1010*48386Skarels if ((backing_object = object->shadow) == NULL) 101145748Smckusick return; 101245748Smckusick 101345748Smckusick vm_object_lock(backing_object); 101445748Smckusick /* 101545748Smckusick * ... 101645748Smckusick * The backing object is not read_only, 101745748Smckusick * and no pages in the backing object are 101845748Smckusick * currently being paged out. 101945748Smckusick * The backing object is internal. 102045748Smckusick */ 102145748Smckusick 102245748Smckusick if (!backing_object->internal || 102345748Smckusick backing_object->paging_in_progress != 0) { 102445748Smckusick vm_object_unlock(backing_object); 102545748Smckusick return; 102645748Smckusick } 102745748Smckusick 102845748Smckusick /* 102945748Smckusick * The backing object can't be a copy-object: 103045748Smckusick * the shadow_offset for the copy-object must stay 103145748Smckusick * as 0. Furthermore (for the 'we have all the 103245748Smckusick * pages' case), if we bypass backing_object and 103345748Smckusick * just shadow the next object in the chain, old 103445748Smckusick * pages from that object would then have to be copied 103545748Smckusick * BOTH into the (former) backing_object and into the 103645748Smckusick * parent object. 103745748Smckusick */ 1038*48386Skarels if (backing_object->shadow != NULL && 1039*48386Skarels backing_object->shadow->copy != NULL) { 104045748Smckusick vm_object_unlock(backing_object); 104145748Smckusick return; 104245748Smckusick } 104345748Smckusick 104445748Smckusick /* 104545748Smckusick * We know that we can either collapse the backing 104645748Smckusick * object (if the parent is the only reference to 104745748Smckusick * it) or (perhaps) remove the parent's reference 104845748Smckusick * to it. 104945748Smckusick */ 105045748Smckusick 105145748Smckusick backing_offset = object->shadow_offset; 105245748Smckusick size = object->size; 105345748Smckusick 105445748Smckusick /* 105545748Smckusick * If there is exactly one reference to the backing 105645748Smckusick * object, we can collapse it into the parent. 105745748Smckusick */ 105845748Smckusick 105945748Smckusick if (backing_object->ref_count == 1) { 106045748Smckusick 106145748Smckusick /* 106245748Smckusick * We can collapse the backing object. 106345748Smckusick * 106445748Smckusick * Move all in-memory pages from backing_object 106545748Smckusick * to the parent. Pages that have been paged out 106645748Smckusick * will be overwritten by any of the parent's 106745748Smckusick * pages that shadow them. 106845748Smckusick */ 106945748Smckusick 107045748Smckusick while (!queue_empty(&backing_object->memq)) { 107145748Smckusick 107245748Smckusick p = (vm_page_t) 107345748Smckusick queue_first(&backing_object->memq); 107445748Smckusick 107545748Smckusick new_offset = (p->offset - backing_offset); 107645748Smckusick 107745748Smckusick /* 107845748Smckusick * If the parent has a page here, or if 107945748Smckusick * this page falls outside the parent, 108045748Smckusick * dispose of it. 108145748Smckusick * 108245748Smckusick * Otherwise, move it as planned. 108345748Smckusick */ 108445748Smckusick 108545748Smckusick if (p->offset < backing_offset || 108645748Smckusick new_offset >= size) { 108745748Smckusick vm_page_lock_queues(); 108845748Smckusick vm_page_free(p); 108945748Smckusick vm_page_unlock_queues(); 109045748Smckusick } else { 109145748Smckusick pp = vm_page_lookup(object, new_offset); 1092*48386Skarels if (pp != NULL && !pp->fake) { 109345748Smckusick vm_page_lock_queues(); 109445748Smckusick vm_page_free(p); 109545748Smckusick vm_page_unlock_queues(); 109645748Smckusick } 109745748Smckusick else { 109845748Smckusick if (pp) { 109945748Smckusick /* may be someone waiting for it */ 110045748Smckusick PAGE_WAKEUP(pp); 110145748Smckusick vm_page_lock_queues(); 110245748Smckusick vm_page_free(pp); 110345748Smckusick vm_page_unlock_queues(); 110445748Smckusick } 110545748Smckusick vm_page_rename(p, object, new_offset); 110645748Smckusick } 110745748Smckusick } 110845748Smckusick } 110945748Smckusick 111045748Smckusick /* 111145748Smckusick * Move the pager from backing_object to object. 111245748Smckusick * 111345748Smckusick * XXX We're only using part of the paging space 111445748Smckusick * for keeps now... we ought to discard the 111545748Smckusick * unused portion. 111645748Smckusick */ 111745748Smckusick 111845748Smckusick object->pager = backing_object->pager; 111945748Smckusick object->paging_offset += backing_offset; 112045748Smckusick 1121*48386Skarels backing_object->pager = NULL; 112245748Smckusick 112345748Smckusick /* 112445748Smckusick * Object now shadows whatever backing_object did. 112545748Smckusick * Note that the reference to backing_object->shadow 112645748Smckusick * moves from within backing_object to within object. 112745748Smckusick */ 112845748Smckusick 112945748Smckusick object->shadow = backing_object->shadow; 113045748Smckusick object->shadow_offset += backing_object->shadow_offset; 1131*48386Skarels if (object->shadow != NULL && 1132*48386Skarels object->shadow->copy != NULL) { 113345748Smckusick panic("vm_object_collapse: we collapsed a copy-object!"); 113445748Smckusick } 113545748Smckusick /* 113645748Smckusick * Discard backing_object. 113745748Smckusick * 113845748Smckusick * Since the backing object has no pages, no 113945748Smckusick * pager left, and no object references within it, 114045748Smckusick * all that is necessary is to dispose of it. 114145748Smckusick */ 114245748Smckusick 114345748Smckusick vm_object_unlock(backing_object); 114445748Smckusick 114545748Smckusick simple_lock(&vm_object_list_lock); 114645748Smckusick queue_remove(&vm_object_list, backing_object, 114745748Smckusick vm_object_t, object_list); 114845748Smckusick vm_object_count--; 114945748Smckusick simple_unlock(&vm_object_list_lock); 115045748Smckusick 115145748Smckusick free((caddr_t)backing_object, M_VMOBJ); 115245748Smckusick 115345748Smckusick object_collapses++; 115445748Smckusick } 115545748Smckusick else { 115645748Smckusick /* 115745748Smckusick * If all of the pages in the backing object are 115845748Smckusick * shadowed by the parent object, the parent 115945748Smckusick * object no longer has to shadow the backing 116045748Smckusick * object; it can shadow the next one in the 116145748Smckusick * chain. 116245748Smckusick * 116345748Smckusick * The backing object must not be paged out - we'd 116445748Smckusick * have to check all of the paged-out pages, as 116545748Smckusick * well. 116645748Smckusick */ 116745748Smckusick 1168*48386Skarels if (backing_object->pager != NULL) { 116945748Smckusick vm_object_unlock(backing_object); 117045748Smckusick return; 117145748Smckusick } 117245748Smckusick 117345748Smckusick /* 117445748Smckusick * Should have a check for a 'small' number 117545748Smckusick * of pages here. 117645748Smckusick */ 117745748Smckusick 117845748Smckusick p = (vm_page_t) queue_first(&backing_object->memq); 117945748Smckusick while (!queue_end(&backing_object->memq, 118045748Smckusick (queue_entry_t) p)) { 118145748Smckusick 118245748Smckusick new_offset = (p->offset - backing_offset); 118345748Smckusick 118445748Smckusick /* 118545748Smckusick * If the parent has a page here, or if 118645748Smckusick * this page falls outside the parent, 118745748Smckusick * keep going. 118845748Smckusick * 118945748Smckusick * Otherwise, the backing_object must be 119045748Smckusick * left in the chain. 119145748Smckusick */ 119245748Smckusick 119345748Smckusick if (p->offset >= backing_offset && 119445748Smckusick new_offset <= size && 119545748Smckusick ((pp = vm_page_lookup(object, new_offset)) 1196*48386Skarels == NULL || 119745748Smckusick pp->fake)) { 119845748Smckusick /* 119945748Smckusick * Page still needed. 120045748Smckusick * Can't go any further. 120145748Smckusick */ 120245748Smckusick vm_object_unlock(backing_object); 120345748Smckusick return; 120445748Smckusick } 120545748Smckusick p = (vm_page_t) queue_next(&p->listq); 120645748Smckusick } 120745748Smckusick 120845748Smckusick /* 120945748Smckusick * Make the parent shadow the next object 121045748Smckusick * in the chain. Deallocating backing_object 121145748Smckusick * will not remove it, since its reference 121245748Smckusick * count is at least 2. 121345748Smckusick */ 121445748Smckusick 121545748Smckusick vm_object_reference(object->shadow = backing_object->shadow); 121645748Smckusick object->shadow_offset += backing_object->shadow_offset; 121745748Smckusick 121845748Smckusick /* Drop the reference count on backing_object. 121945748Smckusick * Since its ref_count was at least 2, it 122045748Smckusick * will not vanish; so we don't need to call 122145748Smckusick * vm_object_deallocate. 122245748Smckusick */ 122345748Smckusick backing_object->ref_count--; 122445748Smckusick vm_object_unlock(backing_object); 122545748Smckusick 122645748Smckusick object_bypasses ++; 122745748Smckusick 122845748Smckusick } 122945748Smckusick 123045748Smckusick /* 123145748Smckusick * Try again with this object's new backing object. 123245748Smckusick */ 123345748Smckusick } 123445748Smckusick } 123545748Smckusick 123645748Smckusick /* 123745748Smckusick * vm_object_page_remove: [internal] 123845748Smckusick * 123945748Smckusick * Removes all physical pages in the specified 124045748Smckusick * object range from the object's list of pages. 124145748Smckusick * 124245748Smckusick * The object must be locked. 124345748Smckusick */ 124445748Smckusick void vm_object_page_remove(object, start, end) 124545748Smckusick register vm_object_t object; 124645748Smckusick register vm_offset_t start; 124745748Smckusick register vm_offset_t end; 124845748Smckusick { 124945748Smckusick register vm_page_t p, next; 125045748Smckusick 1251*48386Skarels if (object == NULL) 125245748Smckusick return; 125345748Smckusick 125445748Smckusick p = (vm_page_t) queue_first(&object->memq); 125545748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 125645748Smckusick next = (vm_page_t) queue_next(&p->listq); 125745748Smckusick if ((start <= p->offset) && (p->offset < end)) { 125845748Smckusick pmap_remove_all(VM_PAGE_TO_PHYS(p)); 125945748Smckusick vm_page_lock_queues(); 126045748Smckusick vm_page_free(p); 126145748Smckusick vm_page_unlock_queues(); 126245748Smckusick } 126345748Smckusick p = next; 126445748Smckusick } 126545748Smckusick } 126645748Smckusick 126745748Smckusick /* 126845748Smckusick * Routine: vm_object_coalesce 126945748Smckusick * Function: Coalesces two objects backing up adjoining 127045748Smckusick * regions of memory into a single object. 127145748Smckusick * 127245748Smckusick * returns TRUE if objects were combined. 127345748Smckusick * 127445748Smckusick * NOTE: Only works at the moment if the second object is NULL - 127545748Smckusick * if it's not, which object do we lock first? 127645748Smckusick * 127745748Smckusick * Parameters: 127845748Smckusick * prev_object First object to coalesce 127945748Smckusick * prev_offset Offset into prev_object 128045748Smckusick * next_object Second object into coalesce 128145748Smckusick * next_offset Offset into next_object 128245748Smckusick * 128345748Smckusick * prev_size Size of reference to prev_object 128445748Smckusick * next_size Size of reference to next_object 128545748Smckusick * 128645748Smckusick * Conditions: 128745748Smckusick * The object must *not* be locked. 128845748Smckusick */ 128945748Smckusick boolean_t vm_object_coalesce(prev_object, next_object, 129045748Smckusick prev_offset, next_offset, 129145748Smckusick prev_size, next_size) 129245748Smckusick 129345748Smckusick register vm_object_t prev_object; 129445748Smckusick vm_object_t next_object; 129545748Smckusick vm_offset_t prev_offset, next_offset; 129645748Smckusick vm_size_t prev_size, next_size; 129745748Smckusick { 129845748Smckusick vm_size_t newsize; 129945748Smckusick 130045748Smckusick #ifdef lint 130145748Smckusick next_offset++; 130245748Smckusick #endif lint 130345748Smckusick 1304*48386Skarels if (next_object != NULL) { 130545748Smckusick return(FALSE); 130645748Smckusick } 130745748Smckusick 1308*48386Skarels if (prev_object == NULL) { 130945748Smckusick return(TRUE); 131045748Smckusick } 131145748Smckusick 131245748Smckusick vm_object_lock(prev_object); 131345748Smckusick 131445748Smckusick /* 131545748Smckusick * Try to collapse the object first 131645748Smckusick */ 131745748Smckusick vm_object_collapse(prev_object); 131845748Smckusick 131945748Smckusick /* 132045748Smckusick * Can't coalesce if: 132145748Smckusick * . more than one reference 132245748Smckusick * . paged out 132345748Smckusick * . shadows another object 132445748Smckusick * . has a copy elsewhere 132545748Smckusick * (any of which mean that the pages not mapped to 132645748Smckusick * prev_entry may be in use anyway) 132745748Smckusick */ 132845748Smckusick 132945748Smckusick if (prev_object->ref_count > 1 || 1330*48386Skarels prev_object->pager != NULL || 1331*48386Skarels prev_object->shadow != NULL || 1332*48386Skarels prev_object->copy != NULL) { 133345748Smckusick vm_object_unlock(prev_object); 133445748Smckusick return(FALSE); 133545748Smckusick } 133645748Smckusick 133745748Smckusick /* 133845748Smckusick * Remove any pages that may still be in the object from 133945748Smckusick * a previous deallocation. 134045748Smckusick */ 134145748Smckusick 134245748Smckusick vm_object_page_remove(prev_object, 134345748Smckusick prev_offset + prev_size, 134445748Smckusick prev_offset + prev_size + next_size); 134545748Smckusick 134645748Smckusick /* 134745748Smckusick * Extend the object if necessary. 134845748Smckusick */ 134945748Smckusick newsize = prev_offset + prev_size + next_size; 135045748Smckusick if (newsize > prev_object->size) 135145748Smckusick prev_object->size = newsize; 135245748Smckusick 135345748Smckusick vm_object_unlock(prev_object); 135445748Smckusick return(TRUE); 135545748Smckusick } 135645748Smckusick 135745748Smckusick /* 135845748Smckusick * vm_object_print: [ debug ] 135945748Smckusick */ 136045748Smckusick void vm_object_print(object, full) 136145748Smckusick vm_object_t object; 136245748Smckusick boolean_t full; 136345748Smckusick { 136445748Smckusick register vm_page_t p; 136545748Smckusick extern indent; 136645748Smckusick 136745748Smckusick register int count; 136845748Smckusick 1369*48386Skarels if (object == NULL) 137045748Smckusick return; 137145748Smckusick 137245748Smckusick iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 137345748Smckusick (int) object, (int) object->size, 137445748Smckusick object->resident_page_count, object->ref_count); 137545748Smckusick printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 137645748Smckusick (int) object->pager, (int) object->paging_offset, 137745748Smckusick (int) object->shadow, (int) object->shadow_offset); 137845748Smckusick printf("cache: next=0x%x, prev=0x%x\n", 137945748Smckusick object->cached_list.next, object->cached_list.prev); 138045748Smckusick 138145748Smckusick if (!full) 138245748Smckusick return; 138345748Smckusick 138445748Smckusick indent += 2; 138545748Smckusick count = 0; 138645748Smckusick p = (vm_page_t) queue_first(&object->memq); 138745748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 138845748Smckusick if (count == 0) 138945748Smckusick iprintf("memory:="); 139045748Smckusick else if (count == 6) { 139145748Smckusick printf("\n"); 139245748Smckusick iprintf(" ..."); 139345748Smckusick count = 0; 139445748Smckusick } else 139545748Smckusick printf(","); 139645748Smckusick count++; 139745748Smckusick 139845748Smckusick printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 139945748Smckusick p = (vm_page_t) queue_next(&p->listq); 140045748Smckusick } 140145748Smckusick if (count != 0) 140245748Smckusick printf("\n"); 140345748Smckusick indent -= 2; 140445748Smckusick } 1405