145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*60345Storek * @(#)vm_object.c 7.14 (Berkeley) 05/24/93 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory object module. 4145748Smckusick */ 4245748Smckusick 4353354Sbostic #include <sys/param.h> 4453354Sbostic #include <sys/systm.h> 4553354Sbostic #include <sys/malloc.h> 4645748Smckusick 4753354Sbostic #include <vm/vm.h> 4853354Sbostic #include <vm/vm_page.h> 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Virtual memory objects maintain the actual data 5245748Smckusick * associated with allocated virtual memory. A given 5345748Smckusick * page of memory exists within exactly one object. 5445748Smckusick * 5545748Smckusick * An object is only deallocated when all "references" 5645748Smckusick * are given up. Only one "reference" to a given 5745748Smckusick * region of an object should be writeable. 5845748Smckusick * 5945748Smckusick * Associated with each object is a list of all resident 6045748Smckusick * memory pages belonging to that object; this list is 6145748Smckusick * maintained by the "vm_page" module, and locked by the object's 6245748Smckusick * lock. 6345748Smckusick * 6445748Smckusick * Each object also records a "pager" routine which is 6545748Smckusick * used to retrieve (and store) pages to the proper backing 6645748Smckusick * storage. In addition, objects may be backed by other 6745748Smckusick * objects from which they were virtual-copied. 6845748Smckusick * 6945748Smckusick * The only items within the object structure which are 7045748Smckusick * modified after time of creation are: 7145748Smckusick * reference count locked by object's lock 7245748Smckusick * pager routine locked by object's lock 7345748Smckusick * 7445748Smckusick */ 7545748Smckusick 7645748Smckusick struct vm_object kernel_object_store; 7745748Smckusick struct vm_object kmem_object_store; 7845748Smckusick 7945748Smckusick #define VM_OBJECT_HASH_COUNT 157 8045748Smckusick 8145748Smckusick int vm_cache_max = 100; /* can patch if necessary */ 8245748Smckusick queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 8345748Smckusick 8445748Smckusick long object_collapses = 0; 8545748Smckusick long object_bypasses = 0; 8645748Smckusick 8753354Sbostic static void _vm_object_allocate __P((vm_size_t, vm_object_t)); 8853354Sbostic 8945748Smckusick /* 9045748Smckusick * vm_object_init: 9145748Smckusick * 9245748Smckusick * Initialize the VM objects module. 9345748Smckusick */ 9451770Smarc void vm_object_init(size) 9551770Smarc vm_size_t size; 9645748Smckusick { 9745748Smckusick register int i; 9845748Smckusick 9945748Smckusick queue_init(&vm_object_cached_list); 10045748Smckusick queue_init(&vm_object_list); 10145748Smckusick vm_object_count = 0; 10245748Smckusick simple_lock_init(&vm_cache_lock); 10345748Smckusick simple_lock_init(&vm_object_list_lock); 10445748Smckusick 10545748Smckusick for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 10645748Smckusick queue_init(&vm_object_hashtable[i]); 10745748Smckusick 10845748Smckusick kernel_object = &kernel_object_store; 10951770Smarc _vm_object_allocate(size, kernel_object); 11045748Smckusick 11145748Smckusick kmem_object = &kmem_object_store; 11245748Smckusick _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); 11345748Smckusick } 11445748Smckusick 11545748Smckusick /* 11645748Smckusick * vm_object_allocate: 11745748Smckusick * 11845748Smckusick * Returns a new object with the given size. 11945748Smckusick */ 12045748Smckusick 12145748Smckusick vm_object_t vm_object_allocate(size) 12245748Smckusick vm_size_t size; 12345748Smckusick { 12445748Smckusick register vm_object_t result; 12545748Smckusick 12645748Smckusick result = (vm_object_t) 12745748Smckusick malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 12845748Smckusick 12945748Smckusick _vm_object_allocate(size, result); 13045748Smckusick 13145748Smckusick return(result); 13245748Smckusick } 13345748Smckusick 13453354Sbostic static void 13545748Smckusick _vm_object_allocate(size, object) 13645748Smckusick vm_size_t size; 13745748Smckusick register vm_object_t object; 13845748Smckusick { 13945748Smckusick queue_init(&object->memq); 14045748Smckusick vm_object_lock_init(object); 14145748Smckusick object->ref_count = 1; 14245748Smckusick object->resident_page_count = 0; 14345748Smckusick object->size = size; 14450917Smckusick object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 14545748Smckusick object->paging_in_progress = 0; 14648386Skarels object->copy = NULL; 14745748Smckusick 14845748Smckusick /* 14945748Smckusick * Object starts out read-write, with no pager. 15045748Smckusick */ 15145748Smckusick 15248386Skarels object->pager = NULL; 15345748Smckusick object->paging_offset = 0; 15448386Skarels object->shadow = NULL; 15545748Smckusick object->shadow_offset = (vm_offset_t) 0; 15645748Smckusick 15745748Smckusick simple_lock(&vm_object_list_lock); 15845748Smckusick queue_enter(&vm_object_list, object, vm_object_t, object_list); 15945748Smckusick vm_object_count++; 16045748Smckusick simple_unlock(&vm_object_list_lock); 16145748Smckusick } 16245748Smckusick 16345748Smckusick /* 16445748Smckusick * vm_object_reference: 16545748Smckusick * 16645748Smckusick * Gets another reference to the given object. 16745748Smckusick */ 16845748Smckusick void vm_object_reference(object) 16945748Smckusick register vm_object_t object; 17045748Smckusick { 17148386Skarels if (object == NULL) 17245748Smckusick return; 17345748Smckusick 17445748Smckusick vm_object_lock(object); 17545748Smckusick object->ref_count++; 17645748Smckusick vm_object_unlock(object); 17745748Smckusick } 17845748Smckusick 17945748Smckusick /* 18045748Smckusick * vm_object_deallocate: 18145748Smckusick * 18245748Smckusick * Release a reference to the specified object, 18345748Smckusick * gained either through a vm_object_allocate 18445748Smckusick * or a vm_object_reference call. When all references 18545748Smckusick * are gone, storage associated with this object 18645748Smckusick * may be relinquished. 18745748Smckusick * 18845748Smckusick * No object may be locked. 18945748Smckusick */ 19045748Smckusick void vm_object_deallocate(object) 19145748Smckusick register vm_object_t object; 19245748Smckusick { 19345748Smckusick vm_object_t temp; 19445748Smckusick 19548386Skarels while (object != NULL) { 19645748Smckusick 19745748Smckusick /* 19845748Smckusick * The cache holds a reference (uncounted) to 19945748Smckusick * the object; we must lock it before removing 20045748Smckusick * the object. 20145748Smckusick */ 20245748Smckusick 20345748Smckusick vm_object_cache_lock(); 20445748Smckusick 20545748Smckusick /* 20645748Smckusick * Lose the reference 20745748Smckusick */ 20845748Smckusick vm_object_lock(object); 20945748Smckusick if (--(object->ref_count) != 0) { 21045748Smckusick 21145748Smckusick /* 21245748Smckusick * If there are still references, then 21345748Smckusick * we are done. 21445748Smckusick */ 21545748Smckusick vm_object_unlock(object); 21645748Smckusick vm_object_cache_unlock(); 21745748Smckusick return; 21845748Smckusick } 21945748Smckusick 22045748Smckusick /* 22145748Smckusick * See if this object can persist. If so, enter 22245748Smckusick * it in the cache, then deactivate all of its 22345748Smckusick * pages. 22445748Smckusick */ 22545748Smckusick 22650917Smckusick if (object->flags & OBJ_CANPERSIST) { 22745748Smckusick 22845748Smckusick queue_enter(&vm_object_cached_list, object, 22945748Smckusick vm_object_t, cached_list); 23045748Smckusick vm_object_cached++; 23145748Smckusick vm_object_cache_unlock(); 23245748Smckusick 23345748Smckusick vm_object_deactivate_pages(object); 23445748Smckusick vm_object_unlock(object); 23545748Smckusick 23645748Smckusick vm_object_cache_trim(); 23745748Smckusick return; 23845748Smckusick } 23945748Smckusick 24045748Smckusick /* 24145748Smckusick * Make sure no one can look us up now. 24245748Smckusick */ 24345748Smckusick vm_object_remove(object->pager); 24445748Smckusick vm_object_cache_unlock(); 24545748Smckusick 24645748Smckusick temp = object->shadow; 24745748Smckusick vm_object_terminate(object); 24845748Smckusick /* unlocks and deallocates object */ 24945748Smckusick object = temp; 25045748Smckusick } 25145748Smckusick } 25245748Smckusick 25345748Smckusick 25445748Smckusick /* 25545748Smckusick * vm_object_terminate actually destroys the specified object, freeing 25645748Smckusick * up all previously used resources. 25745748Smckusick * 25845748Smckusick * The object must be locked. 25945748Smckusick */ 26045748Smckusick void vm_object_terminate(object) 26145748Smckusick register vm_object_t object; 26245748Smckusick { 26345748Smckusick register vm_page_t p; 26445748Smckusick vm_object_t shadow_object; 26545748Smckusick 26645748Smckusick /* 26745748Smckusick * Detach the object from its shadow if we are the shadow's 26845748Smckusick * copy. 26945748Smckusick */ 27048386Skarels if ((shadow_object = object->shadow) != NULL) { 27145748Smckusick vm_object_lock(shadow_object); 27245748Smckusick if (shadow_object->copy == object) 27348386Skarels shadow_object->copy = NULL; 27445748Smckusick #if 0 27548386Skarels else if (shadow_object->copy != NULL) 27645748Smckusick panic("vm_object_terminate: copy/shadow inconsistency"); 27745748Smckusick #endif 27845748Smckusick vm_object_unlock(shadow_object); 27945748Smckusick } 28045748Smckusick 28145748Smckusick /* 28251771Smarc * Wait until the pageout daemon is through with the object. 28345748Smckusick */ 28451771Smarc while (object->paging_in_progress) { 28550856Smckusick vm_object_sleep((int)object, object, FALSE); 28645748Smckusick vm_object_lock(object); 28745748Smckusick } 28845748Smckusick 28945748Smckusick /* 29051771Smarc * If not an internal object clean all the pages, removing them 29151771Smarc * from paging queues as we go. 29245748Smckusick */ 29351771Smarc if ((object->flags & OBJ_INTERNAL) == 0) { 29451771Smarc vm_object_page_clean(object, 0, 0, TRUE); 29551771Smarc vm_object_unlock(object); 29645748Smckusick } 29745748Smckusick 29845748Smckusick /* 29951771Smarc * Now free the pages. 30051771Smarc * For internal objects, this also removes them from paging queues. 30145748Smckusick */ 30245748Smckusick while (!queue_empty(&object->memq)) { 30345748Smckusick p = (vm_page_t) queue_first(&object->memq); 30445748Smckusick VM_PAGE_CHECK(p); 30545748Smckusick vm_page_lock_queues(); 30645748Smckusick vm_page_free(p); 30745748Smckusick vm_page_unlock_queues(); 30845748Smckusick } 30951771Smarc if ((object->flags & OBJ_INTERNAL) == 0) 31051771Smarc vm_object_unlock(object); 31145748Smckusick 31245748Smckusick /* 31351771Smarc * Let the pager know object is dead. 31445748Smckusick */ 31548386Skarels if (object->pager != NULL) 31645748Smckusick vm_pager_deallocate(object->pager); 31745748Smckusick 31845748Smckusick simple_lock(&vm_object_list_lock); 31945748Smckusick queue_remove(&vm_object_list, object, vm_object_t, object_list); 32045748Smckusick vm_object_count--; 32145748Smckusick simple_unlock(&vm_object_list_lock); 32245748Smckusick 32345748Smckusick /* 32451771Smarc * Free the space for the object. 32545748Smckusick */ 32645748Smckusick free((caddr_t)object, M_VMOBJ); 32745748Smckusick } 32845748Smckusick 32945748Smckusick /* 33045748Smckusick * vm_object_page_clean 33145748Smckusick * 33245748Smckusick * Clean all dirty pages in the specified range of object. 33352197Smarc * If de_queue is TRUE, pages are removed from any paging queue 33451771Smarc * they were on, otherwise they are left on whatever queue they 33551771Smarc * were on before the cleaning operation began. 33645748Smckusick * 33745748Smckusick * Odd semantics: if start == end, we clean everything. 33845748Smckusick * 33945748Smckusick * The object must be locked. 34045748Smckusick */ 34153354Sbostic void 34252197Smarc vm_object_page_clean(object, start, end, de_queue) 34345748Smckusick register vm_object_t object; 34445748Smckusick register vm_offset_t start; 34545748Smckusick register vm_offset_t end; 34652197Smarc boolean_t de_queue; 34745748Smckusick { 34845748Smckusick register vm_page_t p; 34951771Smarc int onqueue; 35045748Smckusick 35148386Skarels if (object->pager == NULL) 35245748Smckusick return; 35345748Smckusick 35445748Smckusick again: 35551771Smarc /* 35651771Smarc * Wait until the pageout daemon is through with the object. 35751771Smarc */ 35851771Smarc while (object->paging_in_progress) { 35951771Smarc vm_object_sleep((int)object, object, FALSE); 36051771Smarc vm_object_lock(object); 36151771Smarc } 36251771Smarc /* 36351771Smarc * Loop through the object page list cleaning as necessary. 36451771Smarc */ 36545748Smckusick p = (vm_page_t) queue_first(&object->memq); 36645748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 36745748Smckusick if (start == end || 36845748Smckusick p->offset >= start && p->offset < end) { 36956382Smckusick if ((p->flags & PG_CLEAN) && 37056382Smckusick pmap_is_modified(VM_PAGE_TO_PHYS(p))) 37156382Smckusick p->flags &= ~PG_CLEAN; 37251771Smarc /* 37351771Smarc * Remove the page from any paging queue. 37451771Smarc * This needs to be done if either we have been 37551771Smarc * explicitly asked to do so or it is about to 37651771Smarc * be cleaned (see comment below). 37751771Smarc */ 37856382Smckusick if (de_queue || !(p->flags & PG_CLEAN)) { 37951771Smarc vm_page_lock_queues(); 38056382Smckusick if (p->flags & PG_ACTIVE) { 38151771Smarc queue_remove(&vm_page_queue_active, 38251771Smarc p, vm_page_t, pageq); 38356382Smckusick p->flags &= ~PG_ACTIVE; 38451771Smarc cnt.v_active_count--; 38551771Smarc onqueue = 1; 38656382Smckusick } else if (p->flags & PG_INACTIVE) { 38751771Smarc queue_remove(&vm_page_queue_inactive, 38851771Smarc p, vm_page_t, pageq); 38956382Smckusick p->flags &= ~PG_INACTIVE; 39051771Smarc cnt.v_inactive_count--; 39151771Smarc onqueue = -1; 39251771Smarc } else 39351771Smarc onqueue = 0; 39451771Smarc vm_page_unlock_queues(); 39551771Smarc } 39651771Smarc /* 39751771Smarc * To ensure the state of the page doesn't change 39851771Smarc * during the clean operation we do two things. 39951771Smarc * First we set the busy bit and invalidate all 40051771Smarc * mappings to ensure that thread accesses to the 40151771Smarc * page block (in vm_fault). Second, we remove 40251771Smarc * the page from any paging queue to foil the 40351771Smarc * pageout daemon (vm_pageout_scan). 40451771Smarc */ 40549292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 40656382Smckusick if (!(p->flags & PG_CLEAN)) { 40756382Smckusick p->flags |= PG_BUSY; 40845748Smckusick object->paging_in_progress++; 40945748Smckusick vm_object_unlock(object); 41045748Smckusick (void) vm_pager_put(object->pager, p, TRUE); 41145748Smckusick vm_object_lock(object); 41245748Smckusick object->paging_in_progress--; 41352197Smarc if (!de_queue && onqueue) { 41451771Smarc vm_page_lock_queues(); 41551771Smarc if (onqueue > 0) 41651771Smarc vm_page_activate(p); 41751771Smarc else 41851771Smarc vm_page_deactivate(p); 41951771Smarc vm_page_unlock_queues(); 42051771Smarc } 42156382Smckusick p->flags &= ~PG_BUSY; 42245748Smckusick PAGE_WAKEUP(p); 42345748Smckusick goto again; 42445748Smckusick } 42545748Smckusick } 42645748Smckusick p = (vm_page_t) queue_next(&p->listq); 42745748Smckusick } 42845748Smckusick } 42945748Smckusick 43045748Smckusick /* 43145748Smckusick * vm_object_deactivate_pages 43245748Smckusick * 43345748Smckusick * Deactivate all pages in the specified object. (Keep its pages 43445748Smckusick * in memory even though it is no longer referenced.) 43545748Smckusick * 43645748Smckusick * The object must be locked. 43745748Smckusick */ 43853354Sbostic void 43945748Smckusick vm_object_deactivate_pages(object) 44045748Smckusick register vm_object_t object; 44145748Smckusick { 44245748Smckusick register vm_page_t p, next; 44345748Smckusick 44445748Smckusick p = (vm_page_t) queue_first(&object->memq); 44545748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 44645748Smckusick next = (vm_page_t) queue_next(&p->listq); 44745748Smckusick vm_page_lock_queues(); 44845748Smckusick vm_page_deactivate(p); 44945748Smckusick vm_page_unlock_queues(); 45045748Smckusick p = next; 45145748Smckusick } 45245748Smckusick } 45345748Smckusick 45445748Smckusick /* 45545748Smckusick * Trim the object cache to size. 45645748Smckusick */ 45753354Sbostic void 45845748Smckusick vm_object_cache_trim() 45945748Smckusick { 46045748Smckusick register vm_object_t object; 46145748Smckusick 46245748Smckusick vm_object_cache_lock(); 46345748Smckusick while (vm_object_cached > vm_cache_max) { 46445748Smckusick object = (vm_object_t) queue_first(&vm_object_cached_list); 46545748Smckusick vm_object_cache_unlock(); 46645748Smckusick 46745748Smckusick if (object != vm_object_lookup(object->pager)) 46845748Smckusick panic("vm_object_deactivate: I'm sooo confused."); 46945748Smckusick 47045748Smckusick pager_cache(object, FALSE); 47145748Smckusick 47245748Smckusick vm_object_cache_lock(); 47345748Smckusick } 47445748Smckusick vm_object_cache_unlock(); 47545748Smckusick } 47645748Smckusick 47745748Smckusick 47845748Smckusick /* 47945748Smckusick * vm_object_shutdown() 48045748Smckusick * 48145748Smckusick * Shut down the object system. Unfortunately, while we 48245748Smckusick * may be trying to do this, init is happily waiting for 48345748Smckusick * processes to exit, and therefore will be causing some objects 48445748Smckusick * to be deallocated. To handle this, we gain a fake reference 48545748Smckusick * to all objects we release paging areas for. This will prevent 48645748Smckusick * a duplicate deallocation. This routine is probably full of 48745748Smckusick * race conditions! 48845748Smckusick */ 48945748Smckusick 49045748Smckusick void vm_object_shutdown() 49145748Smckusick { 49245748Smckusick register vm_object_t object; 49345748Smckusick 49445748Smckusick /* 49545748Smckusick * Clean up the object cache *before* we screw up the reference 49645748Smckusick * counts on all of the objects. 49745748Smckusick */ 49845748Smckusick 49945748Smckusick vm_object_cache_clear(); 50045748Smckusick 50145748Smckusick printf("free paging spaces: "); 50245748Smckusick 50345748Smckusick /* 50445748Smckusick * First we gain a reference to each object so that 50545748Smckusick * no one else will deallocate them. 50645748Smckusick */ 50745748Smckusick 50845748Smckusick simple_lock(&vm_object_list_lock); 50945748Smckusick object = (vm_object_t) queue_first(&vm_object_list); 51045748Smckusick while (!queue_end(&vm_object_list, (queue_entry_t) object)) { 51145748Smckusick vm_object_reference(object); 51245748Smckusick object = (vm_object_t) queue_next(&object->object_list); 51345748Smckusick } 51445748Smckusick simple_unlock(&vm_object_list_lock); 51545748Smckusick 51645748Smckusick /* 51745748Smckusick * Now we deallocate all the paging areas. We don't need 51845748Smckusick * to lock anything because we've reduced to a single 51945748Smckusick * processor while shutting down. This also assumes that 52045748Smckusick * no new objects are being created. 52145748Smckusick */ 52245748Smckusick 52345748Smckusick object = (vm_object_t) queue_first(&vm_object_list); 52445748Smckusick while (!queue_end(&vm_object_list, (queue_entry_t) object)) { 52548386Skarels if (object->pager != NULL) 52645748Smckusick vm_pager_deallocate(object->pager); 52745748Smckusick object = (vm_object_t) queue_next(&object->object_list); 52845748Smckusick printf("."); 52945748Smckusick } 53045748Smckusick printf("done.\n"); 53145748Smckusick } 53245748Smckusick 53345748Smckusick /* 53445748Smckusick * vm_object_pmap_copy: 53545748Smckusick * 53645748Smckusick * Makes all physical pages in the specified 53745748Smckusick * object range copy-on-write. No writeable 53845748Smckusick * references to these pages should remain. 53945748Smckusick * 54045748Smckusick * The object must *not* be locked. 54145748Smckusick */ 54245748Smckusick void vm_object_pmap_copy(object, start, end) 54345748Smckusick register vm_object_t object; 54445748Smckusick register vm_offset_t start; 54545748Smckusick register vm_offset_t end; 54645748Smckusick { 54745748Smckusick register vm_page_t p; 54845748Smckusick 54948386Skarels if (object == NULL) 55045748Smckusick return; 55145748Smckusick 55245748Smckusick vm_object_lock(object); 55345748Smckusick p = (vm_page_t) queue_first(&object->memq); 55445748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 55545748Smckusick if ((start <= p->offset) && (p->offset < end)) { 55649292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 55756382Smckusick p->flags |= PG_COPYONWRITE; 55845748Smckusick } 55945748Smckusick p = (vm_page_t) queue_next(&p->listq); 56045748Smckusick } 56145748Smckusick vm_object_unlock(object); 56245748Smckusick } 56345748Smckusick 56445748Smckusick /* 56545748Smckusick * vm_object_pmap_remove: 56645748Smckusick * 56745748Smckusick * Removes all physical pages in the specified 56845748Smckusick * object range from all physical maps. 56945748Smckusick * 57045748Smckusick * The object must *not* be locked. 57145748Smckusick */ 57245748Smckusick void vm_object_pmap_remove(object, start, end) 57345748Smckusick register vm_object_t object; 57445748Smckusick register vm_offset_t start; 57545748Smckusick register vm_offset_t end; 57645748Smckusick { 57745748Smckusick register vm_page_t p; 57845748Smckusick 57948386Skarels if (object == NULL) 58045748Smckusick return; 58145748Smckusick 58245748Smckusick vm_object_lock(object); 58345748Smckusick p = (vm_page_t) queue_first(&object->memq); 58445748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 58549292Shibler if ((start <= p->offset) && (p->offset < end)) 58649292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 58745748Smckusick p = (vm_page_t) queue_next(&p->listq); 58845748Smckusick } 58945748Smckusick vm_object_unlock(object); 59045748Smckusick } 59145748Smckusick 59245748Smckusick /* 59345748Smckusick * vm_object_copy: 59445748Smckusick * 59545748Smckusick * Create a new object which is a copy of an existing 59645748Smckusick * object, and mark all of the pages in the existing 59745748Smckusick * object 'copy-on-write'. The new object has one reference. 59845748Smckusick * Returns the new object. 59945748Smckusick * 60045748Smckusick * May defer the copy until later if the object is not backed 60145748Smckusick * up by a non-default pager. 60245748Smckusick */ 60345748Smckusick void vm_object_copy(src_object, src_offset, size, 60445748Smckusick dst_object, dst_offset, src_needs_copy) 60545748Smckusick register vm_object_t src_object; 60645748Smckusick vm_offset_t src_offset; 60745748Smckusick vm_size_t size; 60845748Smckusick vm_object_t *dst_object; /* OUT */ 60945748Smckusick vm_offset_t *dst_offset; /* OUT */ 61045748Smckusick boolean_t *src_needs_copy; /* OUT */ 61145748Smckusick { 61245748Smckusick register vm_object_t new_copy; 61345748Smckusick register vm_object_t old_copy; 61445748Smckusick vm_offset_t new_start, new_end; 61545748Smckusick 61645748Smckusick register vm_page_t p; 61745748Smckusick 61848386Skarels if (src_object == NULL) { 61945748Smckusick /* 62045748Smckusick * Nothing to copy 62145748Smckusick */ 62248386Skarels *dst_object = NULL; 62345748Smckusick *dst_offset = 0; 62445748Smckusick *src_needs_copy = FALSE; 62545748Smckusick return; 62645748Smckusick } 62745748Smckusick 62845748Smckusick /* 62945748Smckusick * If the object's pager is null_pager or the 63045748Smckusick * default pager, we don't have to make a copy 63145748Smckusick * of it. Instead, we set the needs copy flag and 63245748Smckusick * make a shadow later. 63345748Smckusick */ 63445748Smckusick 63545748Smckusick vm_object_lock(src_object); 63648386Skarels if (src_object->pager == NULL || 63750917Smckusick (src_object->flags & OBJ_INTERNAL)) { 63845748Smckusick 63945748Smckusick /* 64045748Smckusick * Make another reference to the object 64145748Smckusick */ 64245748Smckusick src_object->ref_count++; 64345748Smckusick 64445748Smckusick /* 64545748Smckusick * Mark all of the pages copy-on-write. 64645748Smckusick */ 64745748Smckusick for (p = (vm_page_t) queue_first(&src_object->memq); 64845748Smckusick !queue_end(&src_object->memq, (queue_entry_t)p); 64945748Smckusick p = (vm_page_t) queue_next(&p->listq)) { 65045748Smckusick if (src_offset <= p->offset && 65145748Smckusick p->offset < src_offset + size) 65256382Smckusick p->flags |= PG_COPYONWRITE; 65345748Smckusick } 65445748Smckusick vm_object_unlock(src_object); 65545748Smckusick 65645748Smckusick *dst_object = src_object; 65745748Smckusick *dst_offset = src_offset; 65845748Smckusick 65945748Smckusick /* 66045748Smckusick * Must make a shadow when write is desired 66145748Smckusick */ 66245748Smckusick *src_needs_copy = TRUE; 66345748Smckusick return; 66445748Smckusick } 66545748Smckusick 66645748Smckusick /* 66745748Smckusick * Try to collapse the object before copying it. 66845748Smckusick */ 66945748Smckusick vm_object_collapse(src_object); 67045748Smckusick 67145748Smckusick /* 67245748Smckusick * If the object has a pager, the pager wants to 67345748Smckusick * see all of the changes. We need a copy-object 67445748Smckusick * for the changed pages. 67545748Smckusick * 67645748Smckusick * If there is a copy-object, and it is empty, 67745748Smckusick * no changes have been made to the object since the 67845748Smckusick * copy-object was made. We can use the same copy- 67945748Smckusick * object. 68045748Smckusick */ 68145748Smckusick 68245748Smckusick Retry1: 68345748Smckusick old_copy = src_object->copy; 68448386Skarels if (old_copy != NULL) { 68545748Smckusick /* 68645748Smckusick * Try to get the locks (out of order) 68745748Smckusick */ 68845748Smckusick if (!vm_object_lock_try(old_copy)) { 68945748Smckusick vm_object_unlock(src_object); 69045748Smckusick 69145748Smckusick /* should spin a bit here... */ 69245748Smckusick vm_object_lock(src_object); 69345748Smckusick goto Retry1; 69445748Smckusick } 69545748Smckusick 69645748Smckusick if (old_copy->resident_page_count == 0 && 69748386Skarels old_copy->pager == NULL) { 69845748Smckusick /* 69945748Smckusick * Return another reference to 70045748Smckusick * the existing copy-object. 70145748Smckusick */ 70245748Smckusick old_copy->ref_count++; 70345748Smckusick vm_object_unlock(old_copy); 70445748Smckusick vm_object_unlock(src_object); 70545748Smckusick *dst_object = old_copy; 70645748Smckusick *dst_offset = src_offset; 70745748Smckusick *src_needs_copy = FALSE; 70845748Smckusick return; 70945748Smckusick } 71045748Smckusick vm_object_unlock(old_copy); 71145748Smckusick } 71245748Smckusick vm_object_unlock(src_object); 71345748Smckusick 71445748Smckusick /* 71545748Smckusick * If the object has a pager, the pager wants 71645748Smckusick * to see all of the changes. We must make 71745748Smckusick * a copy-object and put the changed pages there. 71845748Smckusick * 71945748Smckusick * The copy-object is always made large enough to 72045748Smckusick * completely shadow the original object, since 72145748Smckusick * it may have several users who want to shadow 72245748Smckusick * the original object at different points. 72345748Smckusick */ 72445748Smckusick 72545748Smckusick new_copy = vm_object_allocate(src_object->size); 72645748Smckusick 72745748Smckusick Retry2: 72845748Smckusick vm_object_lock(src_object); 72945748Smckusick /* 73045748Smckusick * Copy object may have changed while we were unlocked 73145748Smckusick */ 73245748Smckusick old_copy = src_object->copy; 73348386Skarels if (old_copy != NULL) { 73445748Smckusick /* 73545748Smckusick * Try to get the locks (out of order) 73645748Smckusick */ 73745748Smckusick if (!vm_object_lock_try(old_copy)) { 73845748Smckusick vm_object_unlock(src_object); 73945748Smckusick goto Retry2; 74045748Smckusick } 74145748Smckusick 74245748Smckusick /* 74345748Smckusick * Consistency check 74445748Smckusick */ 74545748Smckusick if (old_copy->shadow != src_object || 74645748Smckusick old_copy->shadow_offset != (vm_offset_t) 0) 74745748Smckusick panic("vm_object_copy: copy/shadow inconsistency"); 74845748Smckusick 74945748Smckusick /* 75045748Smckusick * Make the old copy-object shadow the new one. 75145748Smckusick * It will receive no more pages from the original 75245748Smckusick * object. 75345748Smckusick */ 75445748Smckusick 75545748Smckusick src_object->ref_count--; /* remove ref. from old_copy */ 75645748Smckusick old_copy->shadow = new_copy; 75745748Smckusick new_copy->ref_count++; /* locking not needed - we 75845748Smckusick have the only pointer */ 75945748Smckusick vm_object_unlock(old_copy); /* done with old_copy */ 76045748Smckusick } 76145748Smckusick 76245748Smckusick new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 76345748Smckusick new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 76445748Smckusick 76545748Smckusick /* 76645748Smckusick * Point the new copy at the existing object. 76745748Smckusick */ 76845748Smckusick 76945748Smckusick new_copy->shadow = src_object; 77045748Smckusick new_copy->shadow_offset = new_start; 77145748Smckusick src_object->ref_count++; 77245748Smckusick src_object->copy = new_copy; 77345748Smckusick 77445748Smckusick /* 77545748Smckusick * Mark all the affected pages of the existing object 77645748Smckusick * copy-on-write. 77745748Smckusick */ 77845748Smckusick p = (vm_page_t) queue_first(&src_object->memq); 77945748Smckusick while (!queue_end(&src_object->memq, (queue_entry_t) p)) { 78049292Shibler if ((new_start <= p->offset) && (p->offset < new_end)) 78156382Smckusick p->flags |= PG_COPYONWRITE; 78245748Smckusick p = (vm_page_t) queue_next(&p->listq); 78345748Smckusick } 78445748Smckusick 78545748Smckusick vm_object_unlock(src_object); 78645748Smckusick 78745748Smckusick *dst_object = new_copy; 78845748Smckusick *dst_offset = src_offset - new_start; 78945748Smckusick *src_needs_copy = FALSE; 79045748Smckusick } 79145748Smckusick 79245748Smckusick /* 79345748Smckusick * vm_object_shadow: 79445748Smckusick * 79545748Smckusick * Create a new object which is backed by the 79645748Smckusick * specified existing object range. The source 79745748Smckusick * object reference is deallocated. 79845748Smckusick * 79945748Smckusick * The new object and offset into that object 80045748Smckusick * are returned in the source parameters. 80145748Smckusick */ 80245748Smckusick 80345748Smckusick void vm_object_shadow(object, offset, length) 80445748Smckusick vm_object_t *object; /* IN/OUT */ 80545748Smckusick vm_offset_t *offset; /* IN/OUT */ 80645748Smckusick vm_size_t length; 80745748Smckusick { 80845748Smckusick register vm_object_t source; 80945748Smckusick register vm_object_t result; 81045748Smckusick 81145748Smckusick source = *object; 81245748Smckusick 81345748Smckusick /* 81445748Smckusick * Allocate a new object with the given length 81545748Smckusick */ 81645748Smckusick 81748386Skarels if ((result = vm_object_allocate(length)) == NULL) 81845748Smckusick panic("vm_object_shadow: no object for shadowing"); 81945748Smckusick 82045748Smckusick /* 82145748Smckusick * The new object shadows the source object, adding 82245748Smckusick * a reference to it. Our caller changes his reference 82345748Smckusick * to point to the new object, removing a reference to 82445748Smckusick * the source object. Net result: no change of reference 82545748Smckusick * count. 82645748Smckusick */ 82745748Smckusick result->shadow = source; 82845748Smckusick 82945748Smckusick /* 83045748Smckusick * Store the offset into the source object, 83145748Smckusick * and fix up the offset into the new object. 83245748Smckusick */ 83345748Smckusick 83445748Smckusick result->shadow_offset = *offset; 83545748Smckusick 83645748Smckusick /* 83745748Smckusick * Return the new things 83845748Smckusick */ 83945748Smckusick 84045748Smckusick *offset = 0; 84145748Smckusick *object = result; 84245748Smckusick } 84345748Smckusick 84445748Smckusick /* 84545748Smckusick * Set the specified object's pager to the specified pager. 84645748Smckusick */ 84745748Smckusick 84845748Smckusick void vm_object_setpager(object, pager, paging_offset, 84945748Smckusick read_only) 85045748Smckusick vm_object_t object; 85145748Smckusick vm_pager_t pager; 85245748Smckusick vm_offset_t paging_offset; 85345748Smckusick boolean_t read_only; 85445748Smckusick { 85545748Smckusick #ifdef lint 85645748Smckusick read_only++; /* No longer used */ 857*60345Storek #endif 85845748Smckusick 85945748Smckusick vm_object_lock(object); /* XXX ? */ 86045748Smckusick object->pager = pager; 86145748Smckusick object->paging_offset = paging_offset; 86245748Smckusick vm_object_unlock(object); /* XXX ? */ 86345748Smckusick } 86445748Smckusick 86545748Smckusick /* 86645748Smckusick * vm_object_hash hashes the pager/id pair. 86745748Smckusick */ 86845748Smckusick 86945748Smckusick #define vm_object_hash(pager) \ 87045748Smckusick (((unsigned)pager)%VM_OBJECT_HASH_COUNT) 87145748Smckusick 87245748Smckusick /* 87345748Smckusick * vm_object_lookup looks in the object cache for an object with the 87445748Smckusick * specified pager and paging id. 87545748Smckusick */ 87645748Smckusick 87745748Smckusick vm_object_t vm_object_lookup(pager) 87845748Smckusick vm_pager_t pager; 87945748Smckusick { 88045748Smckusick register queue_t bucket; 88145748Smckusick register vm_object_hash_entry_t entry; 88245748Smckusick vm_object_t object; 88345748Smckusick 88445748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 88545748Smckusick 88645748Smckusick vm_object_cache_lock(); 88745748Smckusick 88845748Smckusick entry = (vm_object_hash_entry_t) queue_first(bucket); 88945748Smckusick while (!queue_end(bucket, (queue_entry_t) entry)) { 89045748Smckusick object = entry->object; 89145748Smckusick if (object->pager == pager) { 89245748Smckusick vm_object_lock(object); 89345748Smckusick if (object->ref_count == 0) { 89445748Smckusick queue_remove(&vm_object_cached_list, object, 89545748Smckusick vm_object_t, cached_list); 89645748Smckusick vm_object_cached--; 89745748Smckusick } 89845748Smckusick object->ref_count++; 89945748Smckusick vm_object_unlock(object); 90045748Smckusick vm_object_cache_unlock(); 90145748Smckusick return(object); 90245748Smckusick } 90345748Smckusick entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); 90445748Smckusick } 90545748Smckusick 90645748Smckusick vm_object_cache_unlock(); 90748386Skarels return(NULL); 90845748Smckusick } 90945748Smckusick 91045748Smckusick /* 91145748Smckusick * vm_object_enter enters the specified object/pager/id into 91245748Smckusick * the hash table. 91345748Smckusick */ 91445748Smckusick 91545748Smckusick void vm_object_enter(object, pager) 91645748Smckusick vm_object_t object; 91745748Smckusick vm_pager_t pager; 91845748Smckusick { 91945748Smckusick register queue_t bucket; 92045748Smckusick register vm_object_hash_entry_t entry; 92145748Smckusick 92245748Smckusick /* 92345748Smckusick * We don't cache null objects, and we can't cache 92445748Smckusick * objects with the null pager. 92545748Smckusick */ 92645748Smckusick 92748386Skarels if (object == NULL) 92845748Smckusick return; 92948386Skarels if (pager == NULL) 93045748Smckusick return; 93145748Smckusick 93245748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 93345748Smckusick entry = (vm_object_hash_entry_t) 93445748Smckusick malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 93545748Smckusick entry->object = object; 93650917Smckusick object->flags |= OBJ_CANPERSIST; 93745748Smckusick 93845748Smckusick vm_object_cache_lock(); 93945748Smckusick queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links); 94045748Smckusick vm_object_cache_unlock(); 94145748Smckusick } 94245748Smckusick 94345748Smckusick /* 94445748Smckusick * vm_object_remove: 94545748Smckusick * 94645748Smckusick * Remove the pager from the hash table. 94745748Smckusick * Note: This assumes that the object cache 94845748Smckusick * is locked. XXX this should be fixed 94945748Smckusick * by reorganizing vm_object_deallocate. 95045748Smckusick */ 95153354Sbostic void 95245748Smckusick vm_object_remove(pager) 95345748Smckusick register vm_pager_t pager; 95445748Smckusick { 95545748Smckusick register queue_t bucket; 95645748Smckusick register vm_object_hash_entry_t entry; 95745748Smckusick register vm_object_t object; 95845748Smckusick 95945748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 96045748Smckusick 96145748Smckusick entry = (vm_object_hash_entry_t) queue_first(bucket); 96245748Smckusick while (!queue_end(bucket, (queue_entry_t) entry)) { 96345748Smckusick object = entry->object; 96445748Smckusick if (object->pager == pager) { 96545748Smckusick queue_remove(bucket, entry, vm_object_hash_entry_t, 96645748Smckusick hash_links); 96745748Smckusick free((caddr_t)entry, M_VMOBJHASH); 96845748Smckusick break; 96945748Smckusick } 97045748Smckusick entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); 97145748Smckusick } 97245748Smckusick } 97345748Smckusick 97445748Smckusick /* 97545748Smckusick * vm_object_cache_clear removes all objects from the cache. 97645748Smckusick * 97745748Smckusick */ 97845748Smckusick 97945748Smckusick void vm_object_cache_clear() 98045748Smckusick { 98145748Smckusick register vm_object_t object; 98245748Smckusick 98345748Smckusick /* 98445748Smckusick * Remove each object in the cache by scanning down the 98545748Smckusick * list of cached objects. 98645748Smckusick */ 98745748Smckusick vm_object_cache_lock(); 98845748Smckusick while (!queue_empty(&vm_object_cached_list)) { 98945748Smckusick object = (vm_object_t) queue_first(&vm_object_cached_list); 99045748Smckusick vm_object_cache_unlock(); 99145748Smckusick 99245748Smckusick /* 99345748Smckusick * Note: it is important that we use vm_object_lookup 99445748Smckusick * to gain a reference, and not vm_object_reference, because 99545748Smckusick * the logic for removing an object from the cache lies in 99645748Smckusick * lookup. 99745748Smckusick */ 99845748Smckusick if (object != vm_object_lookup(object->pager)) 99945748Smckusick panic("vm_object_cache_clear: I'm sooo confused."); 100045748Smckusick pager_cache(object, FALSE); 100145748Smckusick 100245748Smckusick vm_object_cache_lock(); 100345748Smckusick } 100445748Smckusick vm_object_cache_unlock(); 100545748Smckusick } 100645748Smckusick 100745748Smckusick boolean_t vm_object_collapse_allowed = TRUE; 100845748Smckusick /* 100945748Smckusick * vm_object_collapse: 101045748Smckusick * 101145748Smckusick * Collapse an object with the object backing it. 101245748Smckusick * Pages in the backing object are moved into the 101345748Smckusick * parent, and the backing object is deallocated. 101445748Smckusick * 101545748Smckusick * Requires that the object be locked and the page 101645748Smckusick * queues be unlocked. 101745748Smckusick * 101845748Smckusick */ 101945748Smckusick void vm_object_collapse(object) 102045748Smckusick register vm_object_t object; 102145748Smckusick 102245748Smckusick { 102345748Smckusick register vm_object_t backing_object; 102445748Smckusick register vm_offset_t backing_offset; 102545748Smckusick register vm_size_t size; 102645748Smckusick register vm_offset_t new_offset; 102745748Smckusick register vm_page_t p, pp; 102845748Smckusick 102945748Smckusick if (!vm_object_collapse_allowed) 103045748Smckusick return; 103145748Smckusick 103245748Smckusick while (TRUE) { 103345748Smckusick /* 103445748Smckusick * Verify that the conditions are right for collapse: 103545748Smckusick * 103645748Smckusick * The object exists and no pages in it are currently 103745748Smckusick * being paged out (or have ever been paged out). 103845748Smckusick */ 103948386Skarels if (object == NULL || 104045748Smckusick object->paging_in_progress != 0 || 104148386Skarels object->pager != NULL) 104245748Smckusick return; 104345748Smckusick 104445748Smckusick /* 104545748Smckusick * There is a backing object, and 104645748Smckusick */ 104745748Smckusick 104848386Skarels if ((backing_object = object->shadow) == NULL) 104945748Smckusick return; 105045748Smckusick 105145748Smckusick vm_object_lock(backing_object); 105245748Smckusick /* 105345748Smckusick * ... 105445748Smckusick * The backing object is not read_only, 105545748Smckusick * and no pages in the backing object are 105645748Smckusick * currently being paged out. 105745748Smckusick * The backing object is internal. 105845748Smckusick */ 105945748Smckusick 106050917Smckusick if ((backing_object->flags & OBJ_INTERNAL) == 0 || 106145748Smckusick backing_object->paging_in_progress != 0) { 106245748Smckusick vm_object_unlock(backing_object); 106345748Smckusick return; 106445748Smckusick } 106545748Smckusick 106645748Smckusick /* 106745748Smckusick * The backing object can't be a copy-object: 106845748Smckusick * the shadow_offset for the copy-object must stay 106945748Smckusick * as 0. Furthermore (for the 'we have all the 107045748Smckusick * pages' case), if we bypass backing_object and 107145748Smckusick * just shadow the next object in the chain, old 107245748Smckusick * pages from that object would then have to be copied 107345748Smckusick * BOTH into the (former) backing_object and into the 107445748Smckusick * parent object. 107545748Smckusick */ 107648386Skarels if (backing_object->shadow != NULL && 107748386Skarels backing_object->shadow->copy != NULL) { 107845748Smckusick vm_object_unlock(backing_object); 107945748Smckusick return; 108045748Smckusick } 108145748Smckusick 108245748Smckusick /* 108345748Smckusick * We know that we can either collapse the backing 108445748Smckusick * object (if the parent is the only reference to 108545748Smckusick * it) or (perhaps) remove the parent's reference 108645748Smckusick * to it. 108745748Smckusick */ 108845748Smckusick 108945748Smckusick backing_offset = object->shadow_offset; 109045748Smckusick size = object->size; 109145748Smckusick 109245748Smckusick /* 109345748Smckusick * If there is exactly one reference to the backing 109445748Smckusick * object, we can collapse it into the parent. 109545748Smckusick */ 109645748Smckusick 109745748Smckusick if (backing_object->ref_count == 1) { 109845748Smckusick 109945748Smckusick /* 110045748Smckusick * We can collapse the backing object. 110145748Smckusick * 110245748Smckusick * Move all in-memory pages from backing_object 110345748Smckusick * to the parent. Pages that have been paged out 110445748Smckusick * will be overwritten by any of the parent's 110545748Smckusick * pages that shadow them. 110645748Smckusick */ 110745748Smckusick 110845748Smckusick while (!queue_empty(&backing_object->memq)) { 110945748Smckusick 111045748Smckusick p = (vm_page_t) 111145748Smckusick queue_first(&backing_object->memq); 111245748Smckusick 111345748Smckusick new_offset = (p->offset - backing_offset); 111445748Smckusick 111545748Smckusick /* 111645748Smckusick * If the parent has a page here, or if 111745748Smckusick * this page falls outside the parent, 111845748Smckusick * dispose of it. 111945748Smckusick * 112045748Smckusick * Otherwise, move it as planned. 112145748Smckusick */ 112245748Smckusick 112345748Smckusick if (p->offset < backing_offset || 112445748Smckusick new_offset >= size) { 112545748Smckusick vm_page_lock_queues(); 112645748Smckusick vm_page_free(p); 112745748Smckusick vm_page_unlock_queues(); 112845748Smckusick } else { 112945748Smckusick pp = vm_page_lookup(object, new_offset); 113056382Smckusick if (pp != NULL && !(pp->flags & PG_FAKE)) { 113145748Smckusick vm_page_lock_queues(); 113245748Smckusick vm_page_free(p); 113345748Smckusick vm_page_unlock_queues(); 113445748Smckusick } 113545748Smckusick else { 113645748Smckusick if (pp) { 113745748Smckusick /* may be someone waiting for it */ 113845748Smckusick PAGE_WAKEUP(pp); 113945748Smckusick vm_page_lock_queues(); 114045748Smckusick vm_page_free(pp); 114145748Smckusick vm_page_unlock_queues(); 114245748Smckusick } 114345748Smckusick vm_page_rename(p, object, new_offset); 114445748Smckusick } 114545748Smckusick } 114645748Smckusick } 114745748Smckusick 114845748Smckusick /* 114945748Smckusick * Move the pager from backing_object to object. 115045748Smckusick * 115145748Smckusick * XXX We're only using part of the paging space 115245748Smckusick * for keeps now... we ought to discard the 115345748Smckusick * unused portion. 115445748Smckusick */ 115545748Smckusick 115645748Smckusick object->pager = backing_object->pager; 115745748Smckusick object->paging_offset += backing_offset; 115845748Smckusick 115948386Skarels backing_object->pager = NULL; 116045748Smckusick 116145748Smckusick /* 116245748Smckusick * Object now shadows whatever backing_object did. 116345748Smckusick * Note that the reference to backing_object->shadow 116445748Smckusick * moves from within backing_object to within object. 116545748Smckusick */ 116645748Smckusick 116745748Smckusick object->shadow = backing_object->shadow; 116845748Smckusick object->shadow_offset += backing_object->shadow_offset; 116948386Skarels if (object->shadow != NULL && 117048386Skarels object->shadow->copy != NULL) { 117145748Smckusick panic("vm_object_collapse: we collapsed a copy-object!"); 117245748Smckusick } 117345748Smckusick /* 117445748Smckusick * Discard backing_object. 117545748Smckusick * 117645748Smckusick * Since the backing object has no pages, no 117745748Smckusick * pager left, and no object references within it, 117845748Smckusick * all that is necessary is to dispose of it. 117945748Smckusick */ 118045748Smckusick 118145748Smckusick vm_object_unlock(backing_object); 118245748Smckusick 118345748Smckusick simple_lock(&vm_object_list_lock); 118445748Smckusick queue_remove(&vm_object_list, backing_object, 118545748Smckusick vm_object_t, object_list); 118645748Smckusick vm_object_count--; 118745748Smckusick simple_unlock(&vm_object_list_lock); 118845748Smckusick 118945748Smckusick free((caddr_t)backing_object, M_VMOBJ); 119045748Smckusick 119145748Smckusick object_collapses++; 119245748Smckusick } 119345748Smckusick else { 119445748Smckusick /* 119545748Smckusick * If all of the pages in the backing object are 119645748Smckusick * shadowed by the parent object, the parent 119745748Smckusick * object no longer has to shadow the backing 119845748Smckusick * object; it can shadow the next one in the 119945748Smckusick * chain. 120045748Smckusick * 120145748Smckusick * The backing object must not be paged out - we'd 120245748Smckusick * have to check all of the paged-out pages, as 120345748Smckusick * well. 120445748Smckusick */ 120545748Smckusick 120648386Skarels if (backing_object->pager != NULL) { 120745748Smckusick vm_object_unlock(backing_object); 120845748Smckusick return; 120945748Smckusick } 121045748Smckusick 121145748Smckusick /* 121245748Smckusick * Should have a check for a 'small' number 121345748Smckusick * of pages here. 121445748Smckusick */ 121545748Smckusick 121645748Smckusick p = (vm_page_t) queue_first(&backing_object->memq); 121745748Smckusick while (!queue_end(&backing_object->memq, 121845748Smckusick (queue_entry_t) p)) { 121945748Smckusick 122045748Smckusick new_offset = (p->offset - backing_offset); 122145748Smckusick 122245748Smckusick /* 122345748Smckusick * If the parent has a page here, or if 122445748Smckusick * this page falls outside the parent, 122545748Smckusick * keep going. 122645748Smckusick * 122745748Smckusick * Otherwise, the backing_object must be 122845748Smckusick * left in the chain. 122945748Smckusick */ 123045748Smckusick 123145748Smckusick if (p->offset >= backing_offset && 123245748Smckusick new_offset <= size && 123345748Smckusick ((pp = vm_page_lookup(object, new_offset)) 123448386Skarels == NULL || 123556382Smckusick (pp->flags & PG_FAKE))) { 123645748Smckusick /* 123745748Smckusick * Page still needed. 123845748Smckusick * Can't go any further. 123945748Smckusick */ 124045748Smckusick vm_object_unlock(backing_object); 124145748Smckusick return; 124245748Smckusick } 124345748Smckusick p = (vm_page_t) queue_next(&p->listq); 124445748Smckusick } 124545748Smckusick 124645748Smckusick /* 124745748Smckusick * Make the parent shadow the next object 124845748Smckusick * in the chain. Deallocating backing_object 124945748Smckusick * will not remove it, since its reference 125045748Smckusick * count is at least 2. 125145748Smckusick */ 125245748Smckusick 125345748Smckusick vm_object_reference(object->shadow = backing_object->shadow); 125445748Smckusick object->shadow_offset += backing_object->shadow_offset; 125545748Smckusick 125645748Smckusick /* Drop the reference count on backing_object. 125745748Smckusick * Since its ref_count was at least 2, it 125845748Smckusick * will not vanish; so we don't need to call 125945748Smckusick * vm_object_deallocate. 126045748Smckusick */ 126145748Smckusick backing_object->ref_count--; 126245748Smckusick vm_object_unlock(backing_object); 126345748Smckusick 126445748Smckusick object_bypasses ++; 126545748Smckusick 126645748Smckusick } 126745748Smckusick 126845748Smckusick /* 126945748Smckusick * Try again with this object's new backing object. 127045748Smckusick */ 127145748Smckusick } 127245748Smckusick } 127345748Smckusick 127445748Smckusick /* 127545748Smckusick * vm_object_page_remove: [internal] 127645748Smckusick * 127745748Smckusick * Removes all physical pages in the specified 127845748Smckusick * object range from the object's list of pages. 127945748Smckusick * 128045748Smckusick * The object must be locked. 128145748Smckusick */ 128245748Smckusick void vm_object_page_remove(object, start, end) 128345748Smckusick register vm_object_t object; 128445748Smckusick register vm_offset_t start; 128545748Smckusick register vm_offset_t end; 128645748Smckusick { 128745748Smckusick register vm_page_t p, next; 128845748Smckusick 128948386Skarels if (object == NULL) 129045748Smckusick return; 129145748Smckusick 129245748Smckusick p = (vm_page_t) queue_first(&object->memq); 129345748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 129445748Smckusick next = (vm_page_t) queue_next(&p->listq); 129545748Smckusick if ((start <= p->offset) && (p->offset < end)) { 129649292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 129745748Smckusick vm_page_lock_queues(); 129845748Smckusick vm_page_free(p); 129945748Smckusick vm_page_unlock_queues(); 130045748Smckusick } 130145748Smckusick p = next; 130245748Smckusick } 130345748Smckusick } 130445748Smckusick 130545748Smckusick /* 130645748Smckusick * Routine: vm_object_coalesce 130745748Smckusick * Function: Coalesces two objects backing up adjoining 130845748Smckusick * regions of memory into a single object. 130945748Smckusick * 131045748Smckusick * returns TRUE if objects were combined. 131145748Smckusick * 131245748Smckusick * NOTE: Only works at the moment if the second object is NULL - 131345748Smckusick * if it's not, which object do we lock first? 131445748Smckusick * 131545748Smckusick * Parameters: 131645748Smckusick * prev_object First object to coalesce 131745748Smckusick * prev_offset Offset into prev_object 131845748Smckusick * next_object Second object into coalesce 131945748Smckusick * next_offset Offset into next_object 132045748Smckusick * 132145748Smckusick * prev_size Size of reference to prev_object 132245748Smckusick * next_size Size of reference to next_object 132345748Smckusick * 132445748Smckusick * Conditions: 132545748Smckusick * The object must *not* be locked. 132645748Smckusick */ 132745748Smckusick boolean_t vm_object_coalesce(prev_object, next_object, 132845748Smckusick prev_offset, next_offset, 132945748Smckusick prev_size, next_size) 133045748Smckusick 133145748Smckusick register vm_object_t prev_object; 133245748Smckusick vm_object_t next_object; 133345748Smckusick vm_offset_t prev_offset, next_offset; 133445748Smckusick vm_size_t prev_size, next_size; 133545748Smckusick { 133645748Smckusick vm_size_t newsize; 133745748Smckusick 133845748Smckusick #ifdef lint 133945748Smckusick next_offset++; 1340*60345Storek #endif 134145748Smckusick 134248386Skarels if (next_object != NULL) { 134345748Smckusick return(FALSE); 134445748Smckusick } 134545748Smckusick 134648386Skarels if (prev_object == NULL) { 134745748Smckusick return(TRUE); 134845748Smckusick } 134945748Smckusick 135045748Smckusick vm_object_lock(prev_object); 135145748Smckusick 135245748Smckusick /* 135345748Smckusick * Try to collapse the object first 135445748Smckusick */ 135545748Smckusick vm_object_collapse(prev_object); 135645748Smckusick 135745748Smckusick /* 135845748Smckusick * Can't coalesce if: 135945748Smckusick * . more than one reference 136045748Smckusick * . paged out 136145748Smckusick * . shadows another object 136245748Smckusick * . has a copy elsewhere 136345748Smckusick * (any of which mean that the pages not mapped to 136445748Smckusick * prev_entry may be in use anyway) 136545748Smckusick */ 136645748Smckusick 136745748Smckusick if (prev_object->ref_count > 1 || 136848386Skarels prev_object->pager != NULL || 136948386Skarels prev_object->shadow != NULL || 137048386Skarels prev_object->copy != NULL) { 137145748Smckusick vm_object_unlock(prev_object); 137245748Smckusick return(FALSE); 137345748Smckusick } 137445748Smckusick 137545748Smckusick /* 137645748Smckusick * Remove any pages that may still be in the object from 137745748Smckusick * a previous deallocation. 137845748Smckusick */ 137945748Smckusick 138045748Smckusick vm_object_page_remove(prev_object, 138145748Smckusick prev_offset + prev_size, 138245748Smckusick prev_offset + prev_size + next_size); 138345748Smckusick 138445748Smckusick /* 138545748Smckusick * Extend the object if necessary. 138645748Smckusick */ 138745748Smckusick newsize = prev_offset + prev_size + next_size; 138845748Smckusick if (newsize > prev_object->size) 138945748Smckusick prev_object->size = newsize; 139045748Smckusick 139145748Smckusick vm_object_unlock(prev_object); 139245748Smckusick return(TRUE); 139345748Smckusick } 139445748Smckusick 139545748Smckusick /* 139645748Smckusick * vm_object_print: [ debug ] 139745748Smckusick */ 139845748Smckusick void vm_object_print(object, full) 139945748Smckusick vm_object_t object; 140045748Smckusick boolean_t full; 140145748Smckusick { 140245748Smckusick register vm_page_t p; 140345748Smckusick extern indent; 140445748Smckusick 140545748Smckusick register int count; 140645748Smckusick 140748386Skarels if (object == NULL) 140845748Smckusick return; 140945748Smckusick 141045748Smckusick iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 141145748Smckusick (int) object, (int) object->size, 141245748Smckusick object->resident_page_count, object->ref_count); 141345748Smckusick printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 141445748Smckusick (int) object->pager, (int) object->paging_offset, 141545748Smckusick (int) object->shadow, (int) object->shadow_offset); 141645748Smckusick printf("cache: next=0x%x, prev=0x%x\n", 141756462Smckusick object->cached_list.qe_next, object->cached_list.qe_prev); 141845748Smckusick 141945748Smckusick if (!full) 142045748Smckusick return; 142145748Smckusick 142245748Smckusick indent += 2; 142345748Smckusick count = 0; 142445748Smckusick p = (vm_page_t) queue_first(&object->memq); 142545748Smckusick while (!queue_end(&object->memq, (queue_entry_t) p)) { 142645748Smckusick if (count == 0) 142745748Smckusick iprintf("memory:="); 142845748Smckusick else if (count == 6) { 142945748Smckusick printf("\n"); 143045748Smckusick iprintf(" ..."); 143145748Smckusick count = 0; 143245748Smckusick } else 143345748Smckusick printf(","); 143445748Smckusick count++; 143545748Smckusick 143645748Smckusick printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 143745748Smckusick p = (vm_page_t) queue_next(&p->listq); 143845748Smckusick } 143945748Smckusick if (count != 0) 144045748Smckusick printf("\n"); 144145748Smckusick indent -= 2; 144245748Smckusick } 1443