145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*65231Smckusick * @(#)vm_object.c 8.2 (Berkeley) 12/30/93 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory object module. 4145748Smckusick */ 4245748Smckusick 4353354Sbostic #include <sys/param.h> 4453354Sbostic #include <sys/systm.h> 4553354Sbostic #include <sys/malloc.h> 4645748Smckusick 4753354Sbostic #include <vm/vm.h> 4853354Sbostic #include <vm/vm_page.h> 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Virtual memory objects maintain the actual data 5245748Smckusick * associated with allocated virtual memory. A given 5345748Smckusick * page of memory exists within exactly one object. 5445748Smckusick * 5545748Smckusick * An object is only deallocated when all "references" 5645748Smckusick * are given up. Only one "reference" to a given 5745748Smckusick * region of an object should be writeable. 5845748Smckusick * 5945748Smckusick * Associated with each object is a list of all resident 6045748Smckusick * memory pages belonging to that object; this list is 6145748Smckusick * maintained by the "vm_page" module, and locked by the object's 6245748Smckusick * lock. 6345748Smckusick * 6445748Smckusick * Each object also records a "pager" routine which is 6545748Smckusick * used to retrieve (and store) pages to the proper backing 6645748Smckusick * storage. In addition, objects may be backed by other 6745748Smckusick * objects from which they were virtual-copied. 6845748Smckusick * 6945748Smckusick * The only items within the object structure which are 7045748Smckusick * modified after time of creation are: 7145748Smckusick * reference count locked by object's lock 7245748Smckusick * pager routine locked by object's lock 7345748Smckusick * 7445748Smckusick */ 7545748Smckusick 7645748Smckusick struct vm_object kernel_object_store; 7745748Smckusick struct vm_object kmem_object_store; 7845748Smckusick 7945748Smckusick #define VM_OBJECT_HASH_COUNT 157 8045748Smckusick 81*65231Smckusick int vm_cache_max = 100; /* can patch if necessary */ 82*65231Smckusick struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 8345748Smckusick 8445748Smckusick long object_collapses = 0; 8545748Smckusick long object_bypasses = 0; 8645748Smckusick 8753354Sbostic static void _vm_object_allocate __P((vm_size_t, vm_object_t)); 8853354Sbostic 8945748Smckusick /* 9045748Smckusick * vm_object_init: 9145748Smckusick * 9245748Smckusick * Initialize the VM objects module. 9345748Smckusick */ 9451770Smarc void vm_object_init(size) 9551770Smarc vm_size_t size; 9645748Smckusick { 9745748Smckusick register int i; 9845748Smckusick 99*65231Smckusick TAILQ_INIT(&vm_object_cached_list); 100*65231Smckusick TAILQ_INIT(&vm_object_list); 10145748Smckusick vm_object_count = 0; 10245748Smckusick simple_lock_init(&vm_cache_lock); 10345748Smckusick simple_lock_init(&vm_object_list_lock); 10445748Smckusick 10545748Smckusick for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 106*65231Smckusick TAILQ_INIT(&vm_object_hashtable[i]); 10745748Smckusick 10845748Smckusick kernel_object = &kernel_object_store; 10951770Smarc _vm_object_allocate(size, kernel_object); 11045748Smckusick 11145748Smckusick kmem_object = &kmem_object_store; 11245748Smckusick _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); 11345748Smckusick } 11445748Smckusick 11545748Smckusick /* 11645748Smckusick * vm_object_allocate: 11745748Smckusick * 11845748Smckusick * Returns a new object with the given size. 11945748Smckusick */ 12045748Smckusick 12145748Smckusick vm_object_t vm_object_allocate(size) 12245748Smckusick vm_size_t size; 12345748Smckusick { 12445748Smckusick register vm_object_t result; 12545748Smckusick 12645748Smckusick result = (vm_object_t) 12745748Smckusick malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 12845748Smckusick 12945748Smckusick _vm_object_allocate(size, result); 13045748Smckusick 13145748Smckusick return(result); 13245748Smckusick } 13345748Smckusick 13453354Sbostic static void 13545748Smckusick _vm_object_allocate(size, object) 13645748Smckusick vm_size_t size; 13745748Smckusick register vm_object_t object; 13845748Smckusick { 139*65231Smckusick TAILQ_INIT(&object->memq); 14045748Smckusick vm_object_lock_init(object); 14145748Smckusick object->ref_count = 1; 14245748Smckusick object->resident_page_count = 0; 14345748Smckusick object->size = size; 14450917Smckusick object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 14545748Smckusick object->paging_in_progress = 0; 14648386Skarels object->copy = NULL; 14745748Smckusick 14845748Smckusick /* 14945748Smckusick * Object starts out read-write, with no pager. 15045748Smckusick */ 15145748Smckusick 15248386Skarels object->pager = NULL; 15345748Smckusick object->paging_offset = 0; 15448386Skarels object->shadow = NULL; 15545748Smckusick object->shadow_offset = (vm_offset_t) 0; 15645748Smckusick 15745748Smckusick simple_lock(&vm_object_list_lock); 158*65231Smckusick TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 15945748Smckusick vm_object_count++; 16045748Smckusick simple_unlock(&vm_object_list_lock); 16145748Smckusick } 16245748Smckusick 16345748Smckusick /* 16445748Smckusick * vm_object_reference: 16545748Smckusick * 16645748Smckusick * Gets another reference to the given object. 16745748Smckusick */ 16845748Smckusick void vm_object_reference(object) 16945748Smckusick register vm_object_t object; 17045748Smckusick { 17148386Skarels if (object == NULL) 17245748Smckusick return; 17345748Smckusick 17445748Smckusick vm_object_lock(object); 17545748Smckusick object->ref_count++; 17645748Smckusick vm_object_unlock(object); 17745748Smckusick } 17845748Smckusick 17945748Smckusick /* 18045748Smckusick * vm_object_deallocate: 18145748Smckusick * 18245748Smckusick * Release a reference to the specified object, 18345748Smckusick * gained either through a vm_object_allocate 18445748Smckusick * or a vm_object_reference call. When all references 18545748Smckusick * are gone, storage associated with this object 18645748Smckusick * may be relinquished. 18745748Smckusick * 18845748Smckusick * No object may be locked. 18945748Smckusick */ 19045748Smckusick void vm_object_deallocate(object) 19145748Smckusick register vm_object_t object; 19245748Smckusick { 19345748Smckusick vm_object_t temp; 19445748Smckusick 19548386Skarels while (object != NULL) { 19645748Smckusick 19745748Smckusick /* 19845748Smckusick * The cache holds a reference (uncounted) to 19945748Smckusick * the object; we must lock it before removing 20045748Smckusick * the object. 20145748Smckusick */ 20245748Smckusick 20345748Smckusick vm_object_cache_lock(); 20445748Smckusick 20545748Smckusick /* 20645748Smckusick * Lose the reference 20745748Smckusick */ 20845748Smckusick vm_object_lock(object); 20945748Smckusick if (--(object->ref_count) != 0) { 21045748Smckusick 21145748Smckusick /* 21245748Smckusick * If there are still references, then 21345748Smckusick * we are done. 21445748Smckusick */ 21545748Smckusick vm_object_unlock(object); 21645748Smckusick vm_object_cache_unlock(); 21745748Smckusick return; 21845748Smckusick } 21945748Smckusick 22045748Smckusick /* 22145748Smckusick * See if this object can persist. If so, enter 22245748Smckusick * it in the cache, then deactivate all of its 22345748Smckusick * pages. 22445748Smckusick */ 22545748Smckusick 22650917Smckusick if (object->flags & OBJ_CANPERSIST) { 22745748Smckusick 228*65231Smckusick TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 229*65231Smckusick cached_list); 23045748Smckusick vm_object_cached++; 23145748Smckusick vm_object_cache_unlock(); 23245748Smckusick 23345748Smckusick vm_object_deactivate_pages(object); 23445748Smckusick vm_object_unlock(object); 23545748Smckusick 23645748Smckusick vm_object_cache_trim(); 23745748Smckusick return; 23845748Smckusick } 23945748Smckusick 24045748Smckusick /* 24145748Smckusick * Make sure no one can look us up now. 24245748Smckusick */ 24345748Smckusick vm_object_remove(object->pager); 24445748Smckusick vm_object_cache_unlock(); 24545748Smckusick 24645748Smckusick temp = object->shadow; 24745748Smckusick vm_object_terminate(object); 24845748Smckusick /* unlocks and deallocates object */ 24945748Smckusick object = temp; 25045748Smckusick } 25145748Smckusick } 25245748Smckusick 25345748Smckusick 25445748Smckusick /* 25545748Smckusick * vm_object_terminate actually destroys the specified object, freeing 25645748Smckusick * up all previously used resources. 25745748Smckusick * 25845748Smckusick * The object must be locked. 25945748Smckusick */ 26045748Smckusick void vm_object_terminate(object) 26145748Smckusick register vm_object_t object; 26245748Smckusick { 26345748Smckusick register vm_page_t p; 26445748Smckusick vm_object_t shadow_object; 26545748Smckusick 26645748Smckusick /* 26745748Smckusick * Detach the object from its shadow if we are the shadow's 26845748Smckusick * copy. 26945748Smckusick */ 27048386Skarels if ((shadow_object = object->shadow) != NULL) { 27145748Smckusick vm_object_lock(shadow_object); 27245748Smckusick if (shadow_object->copy == object) 27348386Skarels shadow_object->copy = NULL; 27445748Smckusick #if 0 27548386Skarels else if (shadow_object->copy != NULL) 27645748Smckusick panic("vm_object_terminate: copy/shadow inconsistency"); 27745748Smckusick #endif 27845748Smckusick vm_object_unlock(shadow_object); 27945748Smckusick } 28045748Smckusick 28145748Smckusick /* 28251771Smarc * Wait until the pageout daemon is through with the object. 28345748Smckusick */ 28451771Smarc while (object->paging_in_progress) { 28550856Smckusick vm_object_sleep((int)object, object, FALSE); 28645748Smckusick vm_object_lock(object); 28745748Smckusick } 28845748Smckusick 28945748Smckusick /* 29051771Smarc * If not an internal object clean all the pages, removing them 29151771Smarc * from paging queues as we go. 29245748Smckusick */ 29351771Smarc if ((object->flags & OBJ_INTERNAL) == 0) { 29451771Smarc vm_object_page_clean(object, 0, 0, TRUE); 29551771Smarc vm_object_unlock(object); 29645748Smckusick } 29745748Smckusick 29845748Smckusick /* 29951771Smarc * Now free the pages. 30051771Smarc * For internal objects, this also removes them from paging queues. 30145748Smckusick */ 302*65231Smckusick while ((p = object->memq.tqh_first) != NULL) { 30345748Smckusick VM_PAGE_CHECK(p); 30445748Smckusick vm_page_lock_queues(); 30545748Smckusick vm_page_free(p); 30645748Smckusick vm_page_unlock_queues(); 30745748Smckusick } 30851771Smarc if ((object->flags & OBJ_INTERNAL) == 0) 30951771Smarc vm_object_unlock(object); 31045748Smckusick 31145748Smckusick /* 31251771Smarc * Let the pager know object is dead. 31345748Smckusick */ 31448386Skarels if (object->pager != NULL) 31545748Smckusick vm_pager_deallocate(object->pager); 31645748Smckusick 31745748Smckusick simple_lock(&vm_object_list_lock); 318*65231Smckusick TAILQ_REMOVE(&vm_object_list, object, object_list); 31945748Smckusick vm_object_count--; 32045748Smckusick simple_unlock(&vm_object_list_lock); 32145748Smckusick 32245748Smckusick /* 32351771Smarc * Free the space for the object. 32445748Smckusick */ 32545748Smckusick free((caddr_t)object, M_VMOBJ); 32645748Smckusick } 32745748Smckusick 32845748Smckusick /* 32945748Smckusick * vm_object_page_clean 33045748Smckusick * 33145748Smckusick * Clean all dirty pages in the specified range of object. 33252197Smarc * If de_queue is TRUE, pages are removed from any paging queue 33351771Smarc * they were on, otherwise they are left on whatever queue they 33451771Smarc * were on before the cleaning operation began. 33545748Smckusick * 33645748Smckusick * Odd semantics: if start == end, we clean everything. 33745748Smckusick * 33845748Smckusick * The object must be locked. 33945748Smckusick */ 34053354Sbostic void 34152197Smarc vm_object_page_clean(object, start, end, de_queue) 34245748Smckusick register vm_object_t object; 34345748Smckusick register vm_offset_t start; 34445748Smckusick register vm_offset_t end; 34552197Smarc boolean_t de_queue; 34645748Smckusick { 34745748Smckusick register vm_page_t p; 34851771Smarc int onqueue; 34945748Smckusick 35048386Skarels if (object->pager == NULL) 35145748Smckusick return; 35245748Smckusick 35345748Smckusick again: 35451771Smarc /* 35551771Smarc * Wait until the pageout daemon is through with the object. 35651771Smarc */ 35751771Smarc while (object->paging_in_progress) { 35851771Smarc vm_object_sleep((int)object, object, FALSE); 35951771Smarc vm_object_lock(object); 36051771Smarc } 36151771Smarc /* 36251771Smarc * Loop through the object page list cleaning as necessary. 36351771Smarc */ 364*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 36545748Smckusick if (start == end || 36645748Smckusick p->offset >= start && p->offset < end) { 36756382Smckusick if ((p->flags & PG_CLEAN) && 36856382Smckusick pmap_is_modified(VM_PAGE_TO_PHYS(p))) 36956382Smckusick p->flags &= ~PG_CLEAN; 37051771Smarc /* 37151771Smarc * Remove the page from any paging queue. 37251771Smarc * This needs to be done if either we have been 37351771Smarc * explicitly asked to do so or it is about to 37451771Smarc * be cleaned (see comment below). 37551771Smarc */ 37656382Smckusick if (de_queue || !(p->flags & PG_CLEAN)) { 37751771Smarc vm_page_lock_queues(); 37856382Smckusick if (p->flags & PG_ACTIVE) { 379*65231Smckusick TAILQ_REMOVE(&vm_page_queue_active, 380*65231Smckusick p, pageq); 38156382Smckusick p->flags &= ~PG_ACTIVE; 38251771Smarc cnt.v_active_count--; 38351771Smarc onqueue = 1; 38456382Smckusick } else if (p->flags & PG_INACTIVE) { 385*65231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, 386*65231Smckusick p, pageq); 38756382Smckusick p->flags &= ~PG_INACTIVE; 38851771Smarc cnt.v_inactive_count--; 38951771Smarc onqueue = -1; 39051771Smarc } else 39151771Smarc onqueue = 0; 39251771Smarc vm_page_unlock_queues(); 39351771Smarc } 39451771Smarc /* 39551771Smarc * To ensure the state of the page doesn't change 39651771Smarc * during the clean operation we do two things. 39751771Smarc * First we set the busy bit and invalidate all 39851771Smarc * mappings to ensure that thread accesses to the 39951771Smarc * page block (in vm_fault). Second, we remove 40051771Smarc * the page from any paging queue to foil the 40151771Smarc * pageout daemon (vm_pageout_scan). 40251771Smarc */ 40349292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 40456382Smckusick if (!(p->flags & PG_CLEAN)) { 40556382Smckusick p->flags |= PG_BUSY; 40645748Smckusick object->paging_in_progress++; 40745748Smckusick vm_object_unlock(object); 40845748Smckusick (void) vm_pager_put(object->pager, p, TRUE); 40945748Smckusick vm_object_lock(object); 41045748Smckusick object->paging_in_progress--; 41152197Smarc if (!de_queue && onqueue) { 41251771Smarc vm_page_lock_queues(); 41351771Smarc if (onqueue > 0) 41451771Smarc vm_page_activate(p); 41551771Smarc else 41651771Smarc vm_page_deactivate(p); 41751771Smarc vm_page_unlock_queues(); 41851771Smarc } 41956382Smckusick p->flags &= ~PG_BUSY; 42045748Smckusick PAGE_WAKEUP(p); 42145748Smckusick goto again; 42245748Smckusick } 42345748Smckusick } 42445748Smckusick } 42545748Smckusick } 42645748Smckusick 42745748Smckusick /* 42845748Smckusick * vm_object_deactivate_pages 42945748Smckusick * 43045748Smckusick * Deactivate all pages in the specified object. (Keep its pages 43145748Smckusick * in memory even though it is no longer referenced.) 43245748Smckusick * 43345748Smckusick * The object must be locked. 43445748Smckusick */ 43553354Sbostic void 43645748Smckusick vm_object_deactivate_pages(object) 43745748Smckusick register vm_object_t object; 43845748Smckusick { 43945748Smckusick register vm_page_t p, next; 44045748Smckusick 441*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 442*65231Smckusick next = p->listq.tqe_next; 44345748Smckusick vm_page_lock_queues(); 44445748Smckusick vm_page_deactivate(p); 44545748Smckusick vm_page_unlock_queues(); 44645748Smckusick } 44745748Smckusick } 44845748Smckusick 44945748Smckusick /* 45045748Smckusick * Trim the object cache to size. 45145748Smckusick */ 45253354Sbostic void 45345748Smckusick vm_object_cache_trim() 45445748Smckusick { 45545748Smckusick register vm_object_t object; 45645748Smckusick 45745748Smckusick vm_object_cache_lock(); 45845748Smckusick while (vm_object_cached > vm_cache_max) { 459*65231Smckusick object = vm_object_cached_list.tqh_first; 46045748Smckusick vm_object_cache_unlock(); 46145748Smckusick 46245748Smckusick if (object != vm_object_lookup(object->pager)) 46345748Smckusick panic("vm_object_deactivate: I'm sooo confused."); 46445748Smckusick 46545748Smckusick pager_cache(object, FALSE); 46645748Smckusick 46745748Smckusick vm_object_cache_lock(); 46845748Smckusick } 46945748Smckusick vm_object_cache_unlock(); 47045748Smckusick } 47145748Smckusick 47245748Smckusick 47345748Smckusick /* 47445748Smckusick * vm_object_shutdown() 47545748Smckusick * 47645748Smckusick * Shut down the object system. Unfortunately, while we 47745748Smckusick * may be trying to do this, init is happily waiting for 47845748Smckusick * processes to exit, and therefore will be causing some objects 47945748Smckusick * to be deallocated. To handle this, we gain a fake reference 48045748Smckusick * to all objects we release paging areas for. This will prevent 48145748Smckusick * a duplicate deallocation. This routine is probably full of 48245748Smckusick * race conditions! 48345748Smckusick */ 48445748Smckusick 48545748Smckusick void vm_object_shutdown() 48645748Smckusick { 48745748Smckusick register vm_object_t object; 48845748Smckusick 48945748Smckusick /* 49045748Smckusick * Clean up the object cache *before* we screw up the reference 49145748Smckusick * counts on all of the objects. 49245748Smckusick */ 49345748Smckusick 49445748Smckusick vm_object_cache_clear(); 49545748Smckusick 49645748Smckusick printf("free paging spaces: "); 49745748Smckusick 49845748Smckusick /* 49945748Smckusick * First we gain a reference to each object so that 50045748Smckusick * no one else will deallocate them. 50145748Smckusick */ 50245748Smckusick 50345748Smckusick simple_lock(&vm_object_list_lock); 504*65231Smckusick for (object = vm_object_list.tqh_first; 505*65231Smckusick object != NULL; 506*65231Smckusick object = object->object_list.tqe_next) 50745748Smckusick vm_object_reference(object); 50845748Smckusick simple_unlock(&vm_object_list_lock); 50945748Smckusick 51045748Smckusick /* 51145748Smckusick * Now we deallocate all the paging areas. We don't need 51245748Smckusick * to lock anything because we've reduced to a single 51345748Smckusick * processor while shutting down. This also assumes that 51445748Smckusick * no new objects are being created. 51545748Smckusick */ 51645748Smckusick 517*65231Smckusick for (object = vm_object_list.tqh_first; 518*65231Smckusick object != NULL; 519*65231Smckusick object = object->object_list.tqe_next) { 52048386Skarels if (object->pager != NULL) 52145748Smckusick vm_pager_deallocate(object->pager); 52245748Smckusick printf("."); 52345748Smckusick } 52445748Smckusick printf("done.\n"); 52545748Smckusick } 52645748Smckusick 52745748Smckusick /* 52845748Smckusick * vm_object_pmap_copy: 52945748Smckusick * 53045748Smckusick * Makes all physical pages in the specified 53145748Smckusick * object range copy-on-write. No writeable 53245748Smckusick * references to these pages should remain. 53345748Smckusick * 53445748Smckusick * The object must *not* be locked. 53545748Smckusick */ 53645748Smckusick void vm_object_pmap_copy(object, start, end) 53745748Smckusick register vm_object_t object; 53845748Smckusick register vm_offset_t start; 53945748Smckusick register vm_offset_t end; 54045748Smckusick { 54145748Smckusick register vm_page_t p; 54245748Smckusick 54348386Skarels if (object == NULL) 54445748Smckusick return; 54545748Smckusick 54645748Smckusick vm_object_lock(object); 547*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 54845748Smckusick if ((start <= p->offset) && (p->offset < end)) { 54949292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 55056382Smckusick p->flags |= PG_COPYONWRITE; 55145748Smckusick } 55245748Smckusick } 55345748Smckusick vm_object_unlock(object); 55445748Smckusick } 55545748Smckusick 55645748Smckusick /* 55745748Smckusick * vm_object_pmap_remove: 55845748Smckusick * 55945748Smckusick * Removes all physical pages in the specified 56045748Smckusick * object range from all physical maps. 56145748Smckusick * 56245748Smckusick * The object must *not* be locked. 56345748Smckusick */ 56445748Smckusick void vm_object_pmap_remove(object, start, end) 56545748Smckusick register vm_object_t object; 56645748Smckusick register vm_offset_t start; 56745748Smckusick register vm_offset_t end; 56845748Smckusick { 56945748Smckusick register vm_page_t p; 57045748Smckusick 57148386Skarels if (object == NULL) 57245748Smckusick return; 57345748Smckusick 57445748Smckusick vm_object_lock(object); 575*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 57649292Shibler if ((start <= p->offset) && (p->offset < end)) 57749292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 57845748Smckusick vm_object_unlock(object); 57945748Smckusick } 58045748Smckusick 58145748Smckusick /* 58245748Smckusick * vm_object_copy: 58345748Smckusick * 58445748Smckusick * Create a new object which is a copy of an existing 58545748Smckusick * object, and mark all of the pages in the existing 58645748Smckusick * object 'copy-on-write'. The new object has one reference. 58745748Smckusick * Returns the new object. 58845748Smckusick * 58945748Smckusick * May defer the copy until later if the object is not backed 59045748Smckusick * up by a non-default pager. 59145748Smckusick */ 59245748Smckusick void vm_object_copy(src_object, src_offset, size, 59345748Smckusick dst_object, dst_offset, src_needs_copy) 59445748Smckusick register vm_object_t src_object; 59545748Smckusick vm_offset_t src_offset; 59645748Smckusick vm_size_t size; 59745748Smckusick vm_object_t *dst_object; /* OUT */ 59845748Smckusick vm_offset_t *dst_offset; /* OUT */ 59945748Smckusick boolean_t *src_needs_copy; /* OUT */ 60045748Smckusick { 60145748Smckusick register vm_object_t new_copy; 60245748Smckusick register vm_object_t old_copy; 60345748Smckusick vm_offset_t new_start, new_end; 60445748Smckusick 60545748Smckusick register vm_page_t p; 60645748Smckusick 60748386Skarels if (src_object == NULL) { 60845748Smckusick /* 60945748Smckusick * Nothing to copy 61045748Smckusick */ 61148386Skarels *dst_object = NULL; 61245748Smckusick *dst_offset = 0; 61345748Smckusick *src_needs_copy = FALSE; 61445748Smckusick return; 61545748Smckusick } 61645748Smckusick 61745748Smckusick /* 61845748Smckusick * If the object's pager is null_pager or the 61945748Smckusick * default pager, we don't have to make a copy 62045748Smckusick * of it. Instead, we set the needs copy flag and 62145748Smckusick * make a shadow later. 62245748Smckusick */ 62345748Smckusick 62445748Smckusick vm_object_lock(src_object); 62548386Skarels if (src_object->pager == NULL || 62650917Smckusick (src_object->flags & OBJ_INTERNAL)) { 62745748Smckusick 62845748Smckusick /* 62945748Smckusick * Make another reference to the object 63045748Smckusick */ 63145748Smckusick src_object->ref_count++; 63245748Smckusick 63345748Smckusick /* 63445748Smckusick * Mark all of the pages copy-on-write. 63545748Smckusick */ 636*65231Smckusick for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 63745748Smckusick if (src_offset <= p->offset && 63845748Smckusick p->offset < src_offset + size) 63956382Smckusick p->flags |= PG_COPYONWRITE; 64045748Smckusick vm_object_unlock(src_object); 64145748Smckusick 64245748Smckusick *dst_object = src_object; 64345748Smckusick *dst_offset = src_offset; 64445748Smckusick 64545748Smckusick /* 64645748Smckusick * Must make a shadow when write is desired 64745748Smckusick */ 64845748Smckusick *src_needs_copy = TRUE; 64945748Smckusick return; 65045748Smckusick } 65145748Smckusick 65245748Smckusick /* 65345748Smckusick * Try to collapse the object before copying it. 65445748Smckusick */ 65545748Smckusick vm_object_collapse(src_object); 65645748Smckusick 65745748Smckusick /* 65845748Smckusick * If the object has a pager, the pager wants to 65945748Smckusick * see all of the changes. We need a copy-object 66045748Smckusick * for the changed pages. 66145748Smckusick * 66245748Smckusick * If there is a copy-object, and it is empty, 66345748Smckusick * no changes have been made to the object since the 66445748Smckusick * copy-object was made. We can use the same copy- 66545748Smckusick * object. 66645748Smckusick */ 66745748Smckusick 66845748Smckusick Retry1: 66945748Smckusick old_copy = src_object->copy; 67048386Skarels if (old_copy != NULL) { 67145748Smckusick /* 67245748Smckusick * Try to get the locks (out of order) 67345748Smckusick */ 67445748Smckusick if (!vm_object_lock_try(old_copy)) { 67545748Smckusick vm_object_unlock(src_object); 67645748Smckusick 67745748Smckusick /* should spin a bit here... */ 67845748Smckusick vm_object_lock(src_object); 67945748Smckusick goto Retry1; 68045748Smckusick } 68145748Smckusick 68245748Smckusick if (old_copy->resident_page_count == 0 && 68348386Skarels old_copy->pager == NULL) { 68445748Smckusick /* 68545748Smckusick * Return another reference to 68645748Smckusick * the existing copy-object. 68745748Smckusick */ 68845748Smckusick old_copy->ref_count++; 68945748Smckusick vm_object_unlock(old_copy); 69045748Smckusick vm_object_unlock(src_object); 69145748Smckusick *dst_object = old_copy; 69245748Smckusick *dst_offset = src_offset; 69345748Smckusick *src_needs_copy = FALSE; 69445748Smckusick return; 69545748Smckusick } 69645748Smckusick vm_object_unlock(old_copy); 69745748Smckusick } 69845748Smckusick vm_object_unlock(src_object); 69945748Smckusick 70045748Smckusick /* 70145748Smckusick * If the object has a pager, the pager wants 70245748Smckusick * to see all of the changes. We must make 70345748Smckusick * a copy-object and put the changed pages there. 70445748Smckusick * 70545748Smckusick * The copy-object is always made large enough to 70645748Smckusick * completely shadow the original object, since 70745748Smckusick * it may have several users who want to shadow 70845748Smckusick * the original object at different points. 70945748Smckusick */ 71045748Smckusick 71145748Smckusick new_copy = vm_object_allocate(src_object->size); 71245748Smckusick 71345748Smckusick Retry2: 71445748Smckusick vm_object_lock(src_object); 71545748Smckusick /* 71645748Smckusick * Copy object may have changed while we were unlocked 71745748Smckusick */ 71845748Smckusick old_copy = src_object->copy; 71948386Skarels if (old_copy != NULL) { 72045748Smckusick /* 72145748Smckusick * Try to get the locks (out of order) 72245748Smckusick */ 72345748Smckusick if (!vm_object_lock_try(old_copy)) { 72445748Smckusick vm_object_unlock(src_object); 72545748Smckusick goto Retry2; 72645748Smckusick } 72745748Smckusick 72845748Smckusick /* 72945748Smckusick * Consistency check 73045748Smckusick */ 73145748Smckusick if (old_copy->shadow != src_object || 73245748Smckusick old_copy->shadow_offset != (vm_offset_t) 0) 73345748Smckusick panic("vm_object_copy: copy/shadow inconsistency"); 73445748Smckusick 73545748Smckusick /* 73645748Smckusick * Make the old copy-object shadow the new one. 73745748Smckusick * It will receive no more pages from the original 73845748Smckusick * object. 73945748Smckusick */ 74045748Smckusick 74145748Smckusick src_object->ref_count--; /* remove ref. from old_copy */ 74245748Smckusick old_copy->shadow = new_copy; 74345748Smckusick new_copy->ref_count++; /* locking not needed - we 74445748Smckusick have the only pointer */ 74545748Smckusick vm_object_unlock(old_copy); /* done with old_copy */ 74645748Smckusick } 74745748Smckusick 74845748Smckusick new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 74945748Smckusick new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 75045748Smckusick 75145748Smckusick /* 75245748Smckusick * Point the new copy at the existing object. 75345748Smckusick */ 75445748Smckusick 75545748Smckusick new_copy->shadow = src_object; 75645748Smckusick new_copy->shadow_offset = new_start; 75745748Smckusick src_object->ref_count++; 75845748Smckusick src_object->copy = new_copy; 75945748Smckusick 76045748Smckusick /* 76145748Smckusick * Mark all the affected pages of the existing object 76245748Smckusick * copy-on-write. 76345748Smckusick */ 764*65231Smckusick for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 76549292Shibler if ((new_start <= p->offset) && (p->offset < new_end)) 76656382Smckusick p->flags |= PG_COPYONWRITE; 76745748Smckusick 76845748Smckusick vm_object_unlock(src_object); 76945748Smckusick 77045748Smckusick *dst_object = new_copy; 77145748Smckusick *dst_offset = src_offset - new_start; 77245748Smckusick *src_needs_copy = FALSE; 77345748Smckusick } 77445748Smckusick 77545748Smckusick /* 77645748Smckusick * vm_object_shadow: 77745748Smckusick * 77845748Smckusick * Create a new object which is backed by the 77945748Smckusick * specified existing object range. The source 78045748Smckusick * object reference is deallocated. 78145748Smckusick * 78245748Smckusick * The new object and offset into that object 78345748Smckusick * are returned in the source parameters. 78445748Smckusick */ 78545748Smckusick 78645748Smckusick void vm_object_shadow(object, offset, length) 78745748Smckusick vm_object_t *object; /* IN/OUT */ 78845748Smckusick vm_offset_t *offset; /* IN/OUT */ 78945748Smckusick vm_size_t length; 79045748Smckusick { 79145748Smckusick register vm_object_t source; 79245748Smckusick register vm_object_t result; 79345748Smckusick 79445748Smckusick source = *object; 79545748Smckusick 79645748Smckusick /* 79745748Smckusick * Allocate a new object with the given length 79845748Smckusick */ 79945748Smckusick 80048386Skarels if ((result = vm_object_allocate(length)) == NULL) 80145748Smckusick panic("vm_object_shadow: no object for shadowing"); 80245748Smckusick 80345748Smckusick /* 80445748Smckusick * The new object shadows the source object, adding 80545748Smckusick * a reference to it. Our caller changes his reference 80645748Smckusick * to point to the new object, removing a reference to 80745748Smckusick * the source object. Net result: no change of reference 80845748Smckusick * count. 80945748Smckusick */ 81045748Smckusick result->shadow = source; 81145748Smckusick 81245748Smckusick /* 81345748Smckusick * Store the offset into the source object, 81445748Smckusick * and fix up the offset into the new object. 81545748Smckusick */ 81645748Smckusick 81745748Smckusick result->shadow_offset = *offset; 81845748Smckusick 81945748Smckusick /* 82045748Smckusick * Return the new things 82145748Smckusick */ 82245748Smckusick 82345748Smckusick *offset = 0; 82445748Smckusick *object = result; 82545748Smckusick } 82645748Smckusick 82745748Smckusick /* 82845748Smckusick * Set the specified object's pager to the specified pager. 82945748Smckusick */ 83045748Smckusick 83145748Smckusick void vm_object_setpager(object, pager, paging_offset, 83245748Smckusick read_only) 83345748Smckusick vm_object_t object; 83445748Smckusick vm_pager_t pager; 83545748Smckusick vm_offset_t paging_offset; 83645748Smckusick boolean_t read_only; 83745748Smckusick { 83845748Smckusick #ifdef lint 83945748Smckusick read_only++; /* No longer used */ 84060345Storek #endif 84145748Smckusick 84245748Smckusick vm_object_lock(object); /* XXX ? */ 84345748Smckusick object->pager = pager; 84445748Smckusick object->paging_offset = paging_offset; 84545748Smckusick vm_object_unlock(object); /* XXX ? */ 84645748Smckusick } 84745748Smckusick 84845748Smckusick /* 84945748Smckusick * vm_object_hash hashes the pager/id pair. 85045748Smckusick */ 85145748Smckusick 85245748Smckusick #define vm_object_hash(pager) \ 85345748Smckusick (((unsigned)pager)%VM_OBJECT_HASH_COUNT) 85445748Smckusick 85545748Smckusick /* 85645748Smckusick * vm_object_lookup looks in the object cache for an object with the 85745748Smckusick * specified pager and paging id. 85845748Smckusick */ 85945748Smckusick 86045748Smckusick vm_object_t vm_object_lookup(pager) 86145748Smckusick vm_pager_t pager; 86245748Smckusick { 86345748Smckusick register vm_object_hash_entry_t entry; 86445748Smckusick vm_object_t object; 86545748Smckusick 86645748Smckusick vm_object_cache_lock(); 86745748Smckusick 868*65231Smckusick for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 869*65231Smckusick entry != NULL; 870*65231Smckusick entry = entry->hash_links.tqe_next) { 87145748Smckusick object = entry->object; 87245748Smckusick if (object->pager == pager) { 87345748Smckusick vm_object_lock(object); 87445748Smckusick if (object->ref_count == 0) { 875*65231Smckusick TAILQ_REMOVE(&vm_object_cached_list, object, 876*65231Smckusick cached_list); 87745748Smckusick vm_object_cached--; 87845748Smckusick } 87945748Smckusick object->ref_count++; 88045748Smckusick vm_object_unlock(object); 88145748Smckusick vm_object_cache_unlock(); 88245748Smckusick return(object); 88345748Smckusick } 88445748Smckusick } 88545748Smckusick 88645748Smckusick vm_object_cache_unlock(); 88748386Skarels return(NULL); 88845748Smckusick } 88945748Smckusick 89045748Smckusick /* 89145748Smckusick * vm_object_enter enters the specified object/pager/id into 89245748Smckusick * the hash table. 89345748Smckusick */ 89445748Smckusick 89545748Smckusick void vm_object_enter(object, pager) 89645748Smckusick vm_object_t object; 89745748Smckusick vm_pager_t pager; 89845748Smckusick { 899*65231Smckusick struct vm_object_hash_head *bucket; 90045748Smckusick register vm_object_hash_entry_t entry; 90145748Smckusick 90245748Smckusick /* 90345748Smckusick * We don't cache null objects, and we can't cache 90445748Smckusick * objects with the null pager. 90545748Smckusick */ 90645748Smckusick 90748386Skarels if (object == NULL) 90845748Smckusick return; 90948386Skarels if (pager == NULL) 91045748Smckusick return; 91145748Smckusick 91245748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 91345748Smckusick entry = (vm_object_hash_entry_t) 91445748Smckusick malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 91545748Smckusick entry->object = object; 91650917Smckusick object->flags |= OBJ_CANPERSIST; 91745748Smckusick 91845748Smckusick vm_object_cache_lock(); 919*65231Smckusick TAILQ_INSERT_TAIL(bucket, entry, hash_links); 92045748Smckusick vm_object_cache_unlock(); 92145748Smckusick } 92245748Smckusick 92345748Smckusick /* 92445748Smckusick * vm_object_remove: 92545748Smckusick * 92645748Smckusick * Remove the pager from the hash table. 92745748Smckusick * Note: This assumes that the object cache 92845748Smckusick * is locked. XXX this should be fixed 92945748Smckusick * by reorganizing vm_object_deallocate. 93045748Smckusick */ 93153354Sbostic void 93245748Smckusick vm_object_remove(pager) 93345748Smckusick register vm_pager_t pager; 93445748Smckusick { 935*65231Smckusick struct vm_object_hash_head *bucket; 93645748Smckusick register vm_object_hash_entry_t entry; 93745748Smckusick register vm_object_t object; 93845748Smckusick 93945748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 94045748Smckusick 941*65231Smckusick for (entry = bucket->tqh_first; 942*65231Smckusick entry != NULL; 943*65231Smckusick entry = entry->hash_links.tqe_next) { 94445748Smckusick object = entry->object; 94545748Smckusick if (object->pager == pager) { 946*65231Smckusick TAILQ_REMOVE(bucket, entry, hash_links); 94745748Smckusick free((caddr_t)entry, M_VMOBJHASH); 94845748Smckusick break; 94945748Smckusick } 95045748Smckusick } 95145748Smckusick } 95245748Smckusick 95345748Smckusick /* 95445748Smckusick * vm_object_cache_clear removes all objects from the cache. 95545748Smckusick * 95645748Smckusick */ 95745748Smckusick 95845748Smckusick void vm_object_cache_clear() 95945748Smckusick { 96045748Smckusick register vm_object_t object; 96145748Smckusick 96245748Smckusick /* 96345748Smckusick * Remove each object in the cache by scanning down the 96445748Smckusick * list of cached objects. 96545748Smckusick */ 96645748Smckusick vm_object_cache_lock(); 967*65231Smckusick while ((object = vm_object_cached_list.tqh_first) != NULL) { 96845748Smckusick vm_object_cache_unlock(); 96945748Smckusick 97045748Smckusick /* 97145748Smckusick * Note: it is important that we use vm_object_lookup 97245748Smckusick * to gain a reference, and not vm_object_reference, because 97345748Smckusick * the logic for removing an object from the cache lies in 97445748Smckusick * lookup. 97545748Smckusick */ 97645748Smckusick if (object != vm_object_lookup(object->pager)) 97745748Smckusick panic("vm_object_cache_clear: I'm sooo confused."); 97845748Smckusick pager_cache(object, FALSE); 97945748Smckusick 98045748Smckusick vm_object_cache_lock(); 98145748Smckusick } 98245748Smckusick vm_object_cache_unlock(); 98345748Smckusick } 98445748Smckusick 98545748Smckusick boolean_t vm_object_collapse_allowed = TRUE; 98645748Smckusick /* 98745748Smckusick * vm_object_collapse: 98845748Smckusick * 98945748Smckusick * Collapse an object with the object backing it. 99045748Smckusick * Pages in the backing object are moved into the 99145748Smckusick * parent, and the backing object is deallocated. 99245748Smckusick * 99345748Smckusick * Requires that the object be locked and the page 99445748Smckusick * queues be unlocked. 99545748Smckusick * 99645748Smckusick */ 99745748Smckusick void vm_object_collapse(object) 99845748Smckusick register vm_object_t object; 99945748Smckusick 100045748Smckusick { 100145748Smckusick register vm_object_t backing_object; 100245748Smckusick register vm_offset_t backing_offset; 100345748Smckusick register vm_size_t size; 100445748Smckusick register vm_offset_t new_offset; 100545748Smckusick register vm_page_t p, pp; 100645748Smckusick 100745748Smckusick if (!vm_object_collapse_allowed) 100845748Smckusick return; 100945748Smckusick 101045748Smckusick while (TRUE) { 101145748Smckusick /* 101245748Smckusick * Verify that the conditions are right for collapse: 101345748Smckusick * 101445748Smckusick * The object exists and no pages in it are currently 101545748Smckusick * being paged out (or have ever been paged out). 101645748Smckusick */ 101748386Skarels if (object == NULL || 101845748Smckusick object->paging_in_progress != 0 || 101948386Skarels object->pager != NULL) 102045748Smckusick return; 102145748Smckusick 102245748Smckusick /* 102345748Smckusick * There is a backing object, and 102445748Smckusick */ 102545748Smckusick 102648386Skarels if ((backing_object = object->shadow) == NULL) 102745748Smckusick return; 102845748Smckusick 102945748Smckusick vm_object_lock(backing_object); 103045748Smckusick /* 103145748Smckusick * ... 103245748Smckusick * The backing object is not read_only, 103345748Smckusick * and no pages in the backing object are 103445748Smckusick * currently being paged out. 103545748Smckusick * The backing object is internal. 103645748Smckusick */ 103745748Smckusick 103850917Smckusick if ((backing_object->flags & OBJ_INTERNAL) == 0 || 103945748Smckusick backing_object->paging_in_progress != 0) { 104045748Smckusick vm_object_unlock(backing_object); 104145748Smckusick return; 104245748Smckusick } 104345748Smckusick 104445748Smckusick /* 104545748Smckusick * The backing object can't be a copy-object: 104645748Smckusick * the shadow_offset for the copy-object must stay 104745748Smckusick * as 0. Furthermore (for the 'we have all the 104845748Smckusick * pages' case), if we bypass backing_object and 104945748Smckusick * just shadow the next object in the chain, old 105045748Smckusick * pages from that object would then have to be copied 105145748Smckusick * BOTH into the (former) backing_object and into the 105245748Smckusick * parent object. 105345748Smckusick */ 105448386Skarels if (backing_object->shadow != NULL && 105548386Skarels backing_object->shadow->copy != NULL) { 105645748Smckusick vm_object_unlock(backing_object); 105745748Smckusick return; 105845748Smckusick } 105945748Smckusick 106045748Smckusick /* 106145748Smckusick * We know that we can either collapse the backing 106245748Smckusick * object (if the parent is the only reference to 106345748Smckusick * it) or (perhaps) remove the parent's reference 106445748Smckusick * to it. 106545748Smckusick */ 106645748Smckusick 106745748Smckusick backing_offset = object->shadow_offset; 106845748Smckusick size = object->size; 106945748Smckusick 107045748Smckusick /* 107145748Smckusick * If there is exactly one reference to the backing 107245748Smckusick * object, we can collapse it into the parent. 107345748Smckusick */ 107445748Smckusick 107545748Smckusick if (backing_object->ref_count == 1) { 107645748Smckusick 107745748Smckusick /* 107845748Smckusick * We can collapse the backing object. 107945748Smckusick * 108045748Smckusick * Move all in-memory pages from backing_object 108145748Smckusick * to the parent. Pages that have been paged out 108245748Smckusick * will be overwritten by any of the parent's 108345748Smckusick * pages that shadow them. 108445748Smckusick */ 108545748Smckusick 1086*65231Smckusick while ((p = backing_object->memq.tqh_first) != NULL) { 108745748Smckusick 108845748Smckusick new_offset = (p->offset - backing_offset); 108945748Smckusick 109045748Smckusick /* 109145748Smckusick * If the parent has a page here, or if 109245748Smckusick * this page falls outside the parent, 109345748Smckusick * dispose of it. 109445748Smckusick * 109545748Smckusick * Otherwise, move it as planned. 109645748Smckusick */ 109745748Smckusick 109845748Smckusick if (p->offset < backing_offset || 109945748Smckusick new_offset >= size) { 110045748Smckusick vm_page_lock_queues(); 110145748Smckusick vm_page_free(p); 110245748Smckusick vm_page_unlock_queues(); 110345748Smckusick } else { 110445748Smckusick pp = vm_page_lookup(object, new_offset); 110556382Smckusick if (pp != NULL && !(pp->flags & PG_FAKE)) { 110645748Smckusick vm_page_lock_queues(); 110745748Smckusick vm_page_free(p); 110845748Smckusick vm_page_unlock_queues(); 110945748Smckusick } 111045748Smckusick else { 111145748Smckusick if (pp) { 111245748Smckusick /* may be someone waiting for it */ 111345748Smckusick PAGE_WAKEUP(pp); 111445748Smckusick vm_page_lock_queues(); 111545748Smckusick vm_page_free(pp); 111645748Smckusick vm_page_unlock_queues(); 111745748Smckusick } 111845748Smckusick vm_page_rename(p, object, new_offset); 111945748Smckusick } 112045748Smckusick } 112145748Smckusick } 112245748Smckusick 112345748Smckusick /* 112445748Smckusick * Move the pager from backing_object to object. 112545748Smckusick * 112645748Smckusick * XXX We're only using part of the paging space 112745748Smckusick * for keeps now... we ought to discard the 112845748Smckusick * unused portion. 112945748Smckusick */ 113045748Smckusick 113145748Smckusick object->pager = backing_object->pager; 113245748Smckusick object->paging_offset += backing_offset; 113345748Smckusick 113448386Skarels backing_object->pager = NULL; 113545748Smckusick 113645748Smckusick /* 113745748Smckusick * Object now shadows whatever backing_object did. 113845748Smckusick * Note that the reference to backing_object->shadow 113945748Smckusick * moves from within backing_object to within object. 114045748Smckusick */ 114145748Smckusick 114245748Smckusick object->shadow = backing_object->shadow; 114345748Smckusick object->shadow_offset += backing_object->shadow_offset; 114448386Skarels if (object->shadow != NULL && 114548386Skarels object->shadow->copy != NULL) { 114645748Smckusick panic("vm_object_collapse: we collapsed a copy-object!"); 114745748Smckusick } 114845748Smckusick /* 114945748Smckusick * Discard backing_object. 115045748Smckusick * 115145748Smckusick * Since the backing object has no pages, no 115245748Smckusick * pager left, and no object references within it, 115345748Smckusick * all that is necessary is to dispose of it. 115445748Smckusick */ 115545748Smckusick 115645748Smckusick vm_object_unlock(backing_object); 115745748Smckusick 115845748Smckusick simple_lock(&vm_object_list_lock); 1159*65231Smckusick TAILQ_REMOVE(&vm_object_list, backing_object, 1160*65231Smckusick object_list); 116145748Smckusick vm_object_count--; 116245748Smckusick simple_unlock(&vm_object_list_lock); 116345748Smckusick 116445748Smckusick free((caddr_t)backing_object, M_VMOBJ); 116545748Smckusick 116645748Smckusick object_collapses++; 116745748Smckusick } 116845748Smckusick else { 116945748Smckusick /* 117045748Smckusick * If all of the pages in the backing object are 117145748Smckusick * shadowed by the parent object, the parent 117245748Smckusick * object no longer has to shadow the backing 117345748Smckusick * object; it can shadow the next one in the 117445748Smckusick * chain. 117545748Smckusick * 117645748Smckusick * The backing object must not be paged out - we'd 117745748Smckusick * have to check all of the paged-out pages, as 117845748Smckusick * well. 117945748Smckusick */ 118045748Smckusick 118148386Skarels if (backing_object->pager != NULL) { 118245748Smckusick vm_object_unlock(backing_object); 118345748Smckusick return; 118445748Smckusick } 118545748Smckusick 118645748Smckusick /* 118745748Smckusick * Should have a check for a 'small' number 118845748Smckusick * of pages here. 118945748Smckusick */ 119045748Smckusick 1191*65231Smckusick for (p = backing_object->memq.tqh_first; 1192*65231Smckusick p != NULL; 1193*65231Smckusick p = p->listq.tqe_next) { 119445748Smckusick new_offset = (p->offset - backing_offset); 119545748Smckusick 119645748Smckusick /* 119745748Smckusick * If the parent has a page here, or if 119845748Smckusick * this page falls outside the parent, 119945748Smckusick * keep going. 120045748Smckusick * 120145748Smckusick * Otherwise, the backing_object must be 120245748Smckusick * left in the chain. 120345748Smckusick */ 120445748Smckusick 120545748Smckusick if (p->offset >= backing_offset && 120645748Smckusick new_offset <= size && 120745748Smckusick ((pp = vm_page_lookup(object, new_offset)) 120848386Skarels == NULL || 120956382Smckusick (pp->flags & PG_FAKE))) { 121045748Smckusick /* 121145748Smckusick * Page still needed. 121245748Smckusick * Can't go any further. 121345748Smckusick */ 121445748Smckusick vm_object_unlock(backing_object); 121545748Smckusick return; 121645748Smckusick } 121745748Smckusick } 121845748Smckusick 121945748Smckusick /* 122045748Smckusick * Make the parent shadow the next object 122145748Smckusick * in the chain. Deallocating backing_object 122245748Smckusick * will not remove it, since its reference 122345748Smckusick * count is at least 2. 122445748Smckusick */ 122545748Smckusick 122645748Smckusick vm_object_reference(object->shadow = backing_object->shadow); 122745748Smckusick object->shadow_offset += backing_object->shadow_offset; 122845748Smckusick 122945748Smckusick /* Drop the reference count on backing_object. 123045748Smckusick * Since its ref_count was at least 2, it 123145748Smckusick * will not vanish; so we don't need to call 123245748Smckusick * vm_object_deallocate. 123345748Smckusick */ 123445748Smckusick backing_object->ref_count--; 123545748Smckusick vm_object_unlock(backing_object); 123645748Smckusick 123745748Smckusick object_bypasses ++; 123845748Smckusick 123945748Smckusick } 124045748Smckusick 124145748Smckusick /* 124245748Smckusick * Try again with this object's new backing object. 124345748Smckusick */ 124445748Smckusick } 124545748Smckusick } 124645748Smckusick 124745748Smckusick /* 124845748Smckusick * vm_object_page_remove: [internal] 124945748Smckusick * 125045748Smckusick * Removes all physical pages in the specified 125145748Smckusick * object range from the object's list of pages. 125245748Smckusick * 125345748Smckusick * The object must be locked. 125445748Smckusick */ 125545748Smckusick void vm_object_page_remove(object, start, end) 125645748Smckusick register vm_object_t object; 125745748Smckusick register vm_offset_t start; 125845748Smckusick register vm_offset_t end; 125945748Smckusick { 126045748Smckusick register vm_page_t p, next; 126145748Smckusick 126248386Skarels if (object == NULL) 126345748Smckusick return; 126445748Smckusick 1265*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 1266*65231Smckusick next = p->listq.tqe_next; 126745748Smckusick if ((start <= p->offset) && (p->offset < end)) { 126849292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 126945748Smckusick vm_page_lock_queues(); 127045748Smckusick vm_page_free(p); 127145748Smckusick vm_page_unlock_queues(); 127245748Smckusick } 127345748Smckusick } 127445748Smckusick } 127545748Smckusick 127645748Smckusick /* 127745748Smckusick * Routine: vm_object_coalesce 127845748Smckusick * Function: Coalesces two objects backing up adjoining 127945748Smckusick * regions of memory into a single object. 128045748Smckusick * 128145748Smckusick * returns TRUE if objects were combined. 128245748Smckusick * 128345748Smckusick * NOTE: Only works at the moment if the second object is NULL - 128445748Smckusick * if it's not, which object do we lock first? 128545748Smckusick * 128645748Smckusick * Parameters: 128745748Smckusick * prev_object First object to coalesce 128845748Smckusick * prev_offset Offset into prev_object 128945748Smckusick * next_object Second object into coalesce 129045748Smckusick * next_offset Offset into next_object 129145748Smckusick * 129245748Smckusick * prev_size Size of reference to prev_object 129345748Smckusick * next_size Size of reference to next_object 129445748Smckusick * 129545748Smckusick * Conditions: 129645748Smckusick * The object must *not* be locked. 129745748Smckusick */ 129845748Smckusick boolean_t vm_object_coalesce(prev_object, next_object, 129945748Smckusick prev_offset, next_offset, 130045748Smckusick prev_size, next_size) 130145748Smckusick 130245748Smckusick register vm_object_t prev_object; 130345748Smckusick vm_object_t next_object; 130445748Smckusick vm_offset_t prev_offset, next_offset; 130545748Smckusick vm_size_t prev_size, next_size; 130645748Smckusick { 130745748Smckusick vm_size_t newsize; 130845748Smckusick 130945748Smckusick #ifdef lint 131045748Smckusick next_offset++; 131160345Storek #endif 131245748Smckusick 131348386Skarels if (next_object != NULL) { 131445748Smckusick return(FALSE); 131545748Smckusick } 131645748Smckusick 131748386Skarels if (prev_object == NULL) { 131845748Smckusick return(TRUE); 131945748Smckusick } 132045748Smckusick 132145748Smckusick vm_object_lock(prev_object); 132245748Smckusick 132345748Smckusick /* 132445748Smckusick * Try to collapse the object first 132545748Smckusick */ 132645748Smckusick vm_object_collapse(prev_object); 132745748Smckusick 132845748Smckusick /* 132945748Smckusick * Can't coalesce if: 133045748Smckusick * . more than one reference 133145748Smckusick * . paged out 133245748Smckusick * . shadows another object 133345748Smckusick * . has a copy elsewhere 133445748Smckusick * (any of which mean that the pages not mapped to 133545748Smckusick * prev_entry may be in use anyway) 133645748Smckusick */ 133745748Smckusick 133845748Smckusick if (prev_object->ref_count > 1 || 133948386Skarels prev_object->pager != NULL || 134048386Skarels prev_object->shadow != NULL || 134148386Skarels prev_object->copy != NULL) { 134245748Smckusick vm_object_unlock(prev_object); 134345748Smckusick return(FALSE); 134445748Smckusick } 134545748Smckusick 134645748Smckusick /* 134745748Smckusick * Remove any pages that may still be in the object from 134845748Smckusick * a previous deallocation. 134945748Smckusick */ 135045748Smckusick 135145748Smckusick vm_object_page_remove(prev_object, 135245748Smckusick prev_offset + prev_size, 135345748Smckusick prev_offset + prev_size + next_size); 135445748Smckusick 135545748Smckusick /* 135645748Smckusick * Extend the object if necessary. 135745748Smckusick */ 135845748Smckusick newsize = prev_offset + prev_size + next_size; 135945748Smckusick if (newsize > prev_object->size) 136045748Smckusick prev_object->size = newsize; 136145748Smckusick 136245748Smckusick vm_object_unlock(prev_object); 136345748Smckusick return(TRUE); 136445748Smckusick } 136545748Smckusick 136645748Smckusick /* 136745748Smckusick * vm_object_print: [ debug ] 136845748Smckusick */ 136945748Smckusick void vm_object_print(object, full) 137045748Smckusick vm_object_t object; 137145748Smckusick boolean_t full; 137245748Smckusick { 137345748Smckusick register vm_page_t p; 137445748Smckusick extern indent; 137545748Smckusick 137645748Smckusick register int count; 137745748Smckusick 137848386Skarels if (object == NULL) 137945748Smckusick return; 138045748Smckusick 138145748Smckusick iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 138245748Smckusick (int) object, (int) object->size, 138345748Smckusick object->resident_page_count, object->ref_count); 138445748Smckusick printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 138545748Smckusick (int) object->pager, (int) object->paging_offset, 138645748Smckusick (int) object->shadow, (int) object->shadow_offset); 138745748Smckusick printf("cache: next=0x%x, prev=0x%x\n", 1388*65231Smckusick object->cached_list.tqe_next, object->cached_list.tqe_prev); 138945748Smckusick 139045748Smckusick if (!full) 139145748Smckusick return; 139245748Smckusick 139345748Smckusick indent += 2; 139445748Smckusick count = 0; 1395*65231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 139645748Smckusick if (count == 0) 139745748Smckusick iprintf("memory:="); 139845748Smckusick else if (count == 6) { 139945748Smckusick printf("\n"); 140045748Smckusick iprintf(" ..."); 140145748Smckusick count = 0; 140245748Smckusick } else 140345748Smckusick printf(","); 140445748Smckusick count++; 140545748Smckusick 140645748Smckusick printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 140745748Smckusick } 140845748Smckusick if (count != 0) 140945748Smckusick printf("\n"); 141045748Smckusick indent -= 2; 141145748Smckusick } 1412