145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*66439Shibler * @(#)vm_object.c 8.5 (Berkeley) 03/22/94 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory object module. 4145748Smckusick */ 4245748Smckusick 4353354Sbostic #include <sys/param.h> 4453354Sbostic #include <sys/systm.h> 4553354Sbostic #include <sys/malloc.h> 4645748Smckusick 4753354Sbostic #include <vm/vm.h> 4853354Sbostic #include <vm/vm_page.h> 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Virtual memory objects maintain the actual data 5245748Smckusick * associated with allocated virtual memory. A given 5345748Smckusick * page of memory exists within exactly one object. 5445748Smckusick * 5545748Smckusick * An object is only deallocated when all "references" 5645748Smckusick * are given up. Only one "reference" to a given 5745748Smckusick * region of an object should be writeable. 5845748Smckusick * 5945748Smckusick * Associated with each object is a list of all resident 6045748Smckusick * memory pages belonging to that object; this list is 6145748Smckusick * maintained by the "vm_page" module, and locked by the object's 6245748Smckusick * lock. 6345748Smckusick * 6445748Smckusick * Each object also records a "pager" routine which is 6545748Smckusick * used to retrieve (and store) pages to the proper backing 6645748Smckusick * storage. In addition, objects may be backed by other 6745748Smckusick * objects from which they were virtual-copied. 6845748Smckusick * 6945748Smckusick * The only items within the object structure which are 7045748Smckusick * modified after time of creation are: 7145748Smckusick * reference count locked by object's lock 7245748Smckusick * pager routine locked by object's lock 7345748Smckusick * 7445748Smckusick */ 7545748Smckusick 7645748Smckusick struct vm_object kernel_object_store; 7745748Smckusick struct vm_object kmem_object_store; 7845748Smckusick 7945748Smckusick #define VM_OBJECT_HASH_COUNT 157 8045748Smckusick 8165231Smckusick int vm_cache_max = 100; /* can patch if necessary */ 8265231Smckusick struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 8345748Smckusick 8445748Smckusick long object_collapses = 0; 8545748Smckusick long object_bypasses = 0; 8645748Smckusick 8753354Sbostic static void _vm_object_allocate __P((vm_size_t, vm_object_t)); 8853354Sbostic 8945748Smckusick /* 9045748Smckusick * vm_object_init: 9145748Smckusick * 9245748Smckusick * Initialize the VM objects module. 9345748Smckusick */ 9451770Smarc void vm_object_init(size) 9551770Smarc vm_size_t size; 9645748Smckusick { 9745748Smckusick register int i; 9845748Smckusick 9965231Smckusick TAILQ_INIT(&vm_object_cached_list); 10065231Smckusick TAILQ_INIT(&vm_object_list); 10145748Smckusick vm_object_count = 0; 10245748Smckusick simple_lock_init(&vm_cache_lock); 10345748Smckusick simple_lock_init(&vm_object_list_lock); 10445748Smckusick 10545748Smckusick for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 10665231Smckusick TAILQ_INIT(&vm_object_hashtable[i]); 10745748Smckusick 10845748Smckusick kernel_object = &kernel_object_store; 10951770Smarc _vm_object_allocate(size, kernel_object); 11045748Smckusick 11145748Smckusick kmem_object = &kmem_object_store; 11245748Smckusick _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); 11345748Smckusick } 11445748Smckusick 11545748Smckusick /* 11645748Smckusick * vm_object_allocate: 11745748Smckusick * 11845748Smckusick * Returns a new object with the given size. 11945748Smckusick */ 12045748Smckusick 12145748Smckusick vm_object_t vm_object_allocate(size) 12245748Smckusick vm_size_t size; 12345748Smckusick { 12445748Smckusick register vm_object_t result; 12545748Smckusick 12645748Smckusick result = (vm_object_t) 12745748Smckusick malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 12845748Smckusick 12945748Smckusick _vm_object_allocate(size, result); 13045748Smckusick 13145748Smckusick return(result); 13245748Smckusick } 13345748Smckusick 13453354Sbostic static void 13545748Smckusick _vm_object_allocate(size, object) 13645748Smckusick vm_size_t size; 13745748Smckusick register vm_object_t object; 13845748Smckusick { 13965231Smckusick TAILQ_INIT(&object->memq); 14045748Smckusick vm_object_lock_init(object); 14145748Smckusick object->ref_count = 1; 14245748Smckusick object->resident_page_count = 0; 14345748Smckusick object->size = size; 14450917Smckusick object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 14545748Smckusick object->paging_in_progress = 0; 14648386Skarels object->copy = NULL; 14745748Smckusick 14845748Smckusick /* 14945748Smckusick * Object starts out read-write, with no pager. 15045748Smckusick */ 15145748Smckusick 15248386Skarels object->pager = NULL; 15345748Smckusick object->paging_offset = 0; 15448386Skarels object->shadow = NULL; 15545748Smckusick object->shadow_offset = (vm_offset_t) 0; 15645748Smckusick 15745748Smckusick simple_lock(&vm_object_list_lock); 15865231Smckusick TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 15945748Smckusick vm_object_count++; 16065688Shibler cnt.v_nzfod += atop(size); 16145748Smckusick simple_unlock(&vm_object_list_lock); 16245748Smckusick } 16345748Smckusick 16445748Smckusick /* 16545748Smckusick * vm_object_reference: 16645748Smckusick * 16745748Smckusick * Gets another reference to the given object. 16845748Smckusick */ 16945748Smckusick void vm_object_reference(object) 17045748Smckusick register vm_object_t object; 17145748Smckusick { 17248386Skarels if (object == NULL) 17345748Smckusick return; 17445748Smckusick 17545748Smckusick vm_object_lock(object); 17645748Smckusick object->ref_count++; 17745748Smckusick vm_object_unlock(object); 17845748Smckusick } 17945748Smckusick 18045748Smckusick /* 18145748Smckusick * vm_object_deallocate: 18245748Smckusick * 18345748Smckusick * Release a reference to the specified object, 18445748Smckusick * gained either through a vm_object_allocate 18545748Smckusick * or a vm_object_reference call. When all references 18645748Smckusick * are gone, storage associated with this object 18745748Smckusick * may be relinquished. 18845748Smckusick * 18945748Smckusick * No object may be locked. 19045748Smckusick */ 19145748Smckusick void vm_object_deallocate(object) 19245748Smckusick register vm_object_t object; 19345748Smckusick { 19445748Smckusick vm_object_t temp; 19545748Smckusick 19648386Skarels while (object != NULL) { 19745748Smckusick 19845748Smckusick /* 19945748Smckusick * The cache holds a reference (uncounted) to 20045748Smckusick * the object; we must lock it before removing 20145748Smckusick * the object. 20245748Smckusick */ 20345748Smckusick 20445748Smckusick vm_object_cache_lock(); 20545748Smckusick 20645748Smckusick /* 20745748Smckusick * Lose the reference 20845748Smckusick */ 20945748Smckusick vm_object_lock(object); 21045748Smckusick if (--(object->ref_count) != 0) { 21145748Smckusick 21245748Smckusick /* 21345748Smckusick * If there are still references, then 21445748Smckusick * we are done. 21545748Smckusick */ 21645748Smckusick vm_object_unlock(object); 21745748Smckusick vm_object_cache_unlock(); 21845748Smckusick return; 21945748Smckusick } 22045748Smckusick 22145748Smckusick /* 22245748Smckusick * See if this object can persist. If so, enter 22345748Smckusick * it in the cache, then deactivate all of its 22445748Smckusick * pages. 22545748Smckusick */ 22645748Smckusick 22750917Smckusick if (object->flags & OBJ_CANPERSIST) { 22845748Smckusick 22965231Smckusick TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 23065231Smckusick cached_list); 23145748Smckusick vm_object_cached++; 23245748Smckusick vm_object_cache_unlock(); 23345748Smckusick 23445748Smckusick vm_object_deactivate_pages(object); 23545748Smckusick vm_object_unlock(object); 23645748Smckusick 23745748Smckusick vm_object_cache_trim(); 23845748Smckusick return; 23945748Smckusick } 24045748Smckusick 24145748Smckusick /* 24245748Smckusick * Make sure no one can look us up now. 24345748Smckusick */ 24445748Smckusick vm_object_remove(object->pager); 24545748Smckusick vm_object_cache_unlock(); 24645748Smckusick 24745748Smckusick temp = object->shadow; 24845748Smckusick vm_object_terminate(object); 24945748Smckusick /* unlocks and deallocates object */ 25045748Smckusick object = temp; 25145748Smckusick } 25245748Smckusick } 25345748Smckusick 25445748Smckusick 25545748Smckusick /* 25645748Smckusick * vm_object_terminate actually destroys the specified object, freeing 25745748Smckusick * up all previously used resources. 25845748Smckusick * 25945748Smckusick * The object must be locked. 26045748Smckusick */ 26145748Smckusick void vm_object_terminate(object) 26245748Smckusick register vm_object_t object; 26345748Smckusick { 26445748Smckusick register vm_page_t p; 26545748Smckusick vm_object_t shadow_object; 26645748Smckusick 26745748Smckusick /* 26845748Smckusick * Detach the object from its shadow if we are the shadow's 26945748Smckusick * copy. 27045748Smckusick */ 27148386Skarels if ((shadow_object = object->shadow) != NULL) { 27245748Smckusick vm_object_lock(shadow_object); 27345748Smckusick if (shadow_object->copy == object) 27448386Skarels shadow_object->copy = NULL; 27545748Smckusick #if 0 27648386Skarels else if (shadow_object->copy != NULL) 27745748Smckusick panic("vm_object_terminate: copy/shadow inconsistency"); 27845748Smckusick #endif 27945748Smckusick vm_object_unlock(shadow_object); 28045748Smckusick } 28145748Smckusick 28245748Smckusick /* 28351771Smarc * Wait until the pageout daemon is through with the object. 28445748Smckusick */ 28551771Smarc while (object->paging_in_progress) { 28650856Smckusick vm_object_sleep((int)object, object, FALSE); 28745748Smckusick vm_object_lock(object); 28845748Smckusick } 28945748Smckusick 29045748Smckusick /* 29151771Smarc * If not an internal object clean all the pages, removing them 29251771Smarc * from paging queues as we go. 29365688Shibler * 29465688Shibler * XXX need to do something in the event of a cleaning error. 29545748Smckusick */ 29651771Smarc if ((object->flags & OBJ_INTERNAL) == 0) { 29765688Shibler (void) vm_object_page_clean(object, 0, 0, TRUE, TRUE); 29851771Smarc vm_object_unlock(object); 29945748Smckusick } 30045748Smckusick 30145748Smckusick /* 30251771Smarc * Now free the pages. 30351771Smarc * For internal objects, this also removes them from paging queues. 30445748Smckusick */ 30565231Smckusick while ((p = object->memq.tqh_first) != NULL) { 30645748Smckusick VM_PAGE_CHECK(p); 30745748Smckusick vm_page_lock_queues(); 30845748Smckusick vm_page_free(p); 30965688Shibler cnt.v_pfree++; 31045748Smckusick vm_page_unlock_queues(); 31145748Smckusick } 31251771Smarc if ((object->flags & OBJ_INTERNAL) == 0) 31351771Smarc vm_object_unlock(object); 31445748Smckusick 31545748Smckusick /* 31651771Smarc * Let the pager know object is dead. 31745748Smckusick */ 31848386Skarels if (object->pager != NULL) 31945748Smckusick vm_pager_deallocate(object->pager); 32045748Smckusick 32145748Smckusick simple_lock(&vm_object_list_lock); 32265231Smckusick TAILQ_REMOVE(&vm_object_list, object, object_list); 32345748Smckusick vm_object_count--; 32445748Smckusick simple_unlock(&vm_object_list_lock); 32545748Smckusick 32645748Smckusick /* 32751771Smarc * Free the space for the object. 32845748Smckusick */ 32945748Smckusick free((caddr_t)object, M_VMOBJ); 33045748Smckusick } 33145748Smckusick 33245748Smckusick /* 33345748Smckusick * vm_object_page_clean 33445748Smckusick * 33545748Smckusick * Clean all dirty pages in the specified range of object. 33665688Shibler * If syncio is TRUE, page cleaning is done synchronously. 33752197Smarc * If de_queue is TRUE, pages are removed from any paging queue 33851771Smarc * they were on, otherwise they are left on whatever queue they 33951771Smarc * were on before the cleaning operation began. 34045748Smckusick * 34145748Smckusick * Odd semantics: if start == end, we clean everything. 34245748Smckusick * 34345748Smckusick * The object must be locked. 34465688Shibler * 34565688Shibler * Returns TRUE if all was well, FALSE if there was a pager error 34665688Shibler * somewhere. We attempt to clean (and dequeue) all pages regardless 34765688Shibler * of where an error occurs. 34845748Smckusick */ 34965688Shibler boolean_t 35065688Shibler vm_object_page_clean(object, start, end, syncio, de_queue) 35145748Smckusick register vm_object_t object; 35245748Smckusick register vm_offset_t start; 35345748Smckusick register vm_offset_t end; 35465688Shibler boolean_t syncio; 35552197Smarc boolean_t de_queue; 35645748Smckusick { 35745748Smckusick register vm_page_t p; 35851771Smarc int onqueue; 35965688Shibler boolean_t noerror = TRUE; 36045748Smckusick 36165688Shibler if (object == NULL) 36265688Shibler return (TRUE); 36365688Shibler 36465688Shibler /* 36565688Shibler * If it is an internal object and there is no pager, attempt to 36665688Shibler * allocate one. Note that vm_object_collapse may relocate one 36765688Shibler * from a collapsed object so we must recheck afterward. 36865688Shibler */ 36965688Shibler if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) { 37065688Shibler vm_object_collapse(object); 37165688Shibler if (object->pager == NULL) { 37265688Shibler vm_pager_t pager; 37365688Shibler 37465688Shibler vm_object_unlock(object); 37565688Shibler pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 37665688Shibler object->size, VM_PROT_ALL, 37765688Shibler (vm_offset_t)0); 37865688Shibler if (pager) 37965688Shibler vm_object_setpager(object, pager, 0, FALSE); 38065688Shibler vm_object_lock(object); 38165688Shibler } 38265688Shibler } 38348386Skarels if (object->pager == NULL) 38465688Shibler return (FALSE); 38545748Smckusick 38645748Smckusick again: 38751771Smarc /* 38851771Smarc * Wait until the pageout daemon is through with the object. 38951771Smarc */ 39051771Smarc while (object->paging_in_progress) { 39151771Smarc vm_object_sleep((int)object, object, FALSE); 39251771Smarc vm_object_lock(object); 39351771Smarc } 39451771Smarc /* 39551771Smarc * Loop through the object page list cleaning as necessary. 39651771Smarc */ 39765231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 39865688Shibler if ((start == end || p->offset >= start && p->offset < end) && 39965688Shibler !(p->flags & PG_FICTITIOUS)) { 40056382Smckusick if ((p->flags & PG_CLEAN) && 40156382Smckusick pmap_is_modified(VM_PAGE_TO_PHYS(p))) 40256382Smckusick p->flags &= ~PG_CLEAN; 40351771Smarc /* 40451771Smarc * Remove the page from any paging queue. 40551771Smarc * This needs to be done if either we have been 40651771Smarc * explicitly asked to do so or it is about to 40751771Smarc * be cleaned (see comment below). 40851771Smarc */ 40956382Smckusick if (de_queue || !(p->flags & PG_CLEAN)) { 41051771Smarc vm_page_lock_queues(); 41156382Smckusick if (p->flags & PG_ACTIVE) { 41265231Smckusick TAILQ_REMOVE(&vm_page_queue_active, 41365231Smckusick p, pageq); 41456382Smckusick p->flags &= ~PG_ACTIVE; 41551771Smarc cnt.v_active_count--; 41651771Smarc onqueue = 1; 41756382Smckusick } else if (p->flags & PG_INACTIVE) { 41865231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, 41965231Smckusick p, pageq); 42056382Smckusick p->flags &= ~PG_INACTIVE; 42151771Smarc cnt.v_inactive_count--; 42251771Smarc onqueue = -1; 42351771Smarc } else 42451771Smarc onqueue = 0; 42551771Smarc vm_page_unlock_queues(); 42651771Smarc } 42751771Smarc /* 42851771Smarc * To ensure the state of the page doesn't change 42951771Smarc * during the clean operation we do two things. 43065688Shibler * First we set the busy bit and write-protect all 43165688Shibler * mappings to ensure that write accesses to the 43251771Smarc * page block (in vm_fault). Second, we remove 43351771Smarc * the page from any paging queue to foil the 43451771Smarc * pageout daemon (vm_pageout_scan). 43551771Smarc */ 43665688Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 43756382Smckusick if (!(p->flags & PG_CLEAN)) { 43856382Smckusick p->flags |= PG_BUSY; 43945748Smckusick object->paging_in_progress++; 44045748Smckusick vm_object_unlock(object); 44165688Shibler /* 44265688Shibler * XXX if put fails we mark the page as 44365688Shibler * clean to avoid an infinite loop. 44465688Shibler * Will loose changes to the page. 44565688Shibler */ 44665688Shibler if (vm_pager_put(object->pager, p, syncio)) { 44765688Shibler printf("%s: pager_put error\n", 44865688Shibler "vm_object_page_clean"); 44965688Shibler p->flags |= PG_CLEAN; 45065688Shibler noerror = FALSE; 45165688Shibler } 45245748Smckusick vm_object_lock(object); 45345748Smckusick object->paging_in_progress--; 45452197Smarc if (!de_queue && onqueue) { 45551771Smarc vm_page_lock_queues(); 45651771Smarc if (onqueue > 0) 45751771Smarc vm_page_activate(p); 45851771Smarc else 45951771Smarc vm_page_deactivate(p); 46051771Smarc vm_page_unlock_queues(); 46151771Smarc } 46256382Smckusick p->flags &= ~PG_BUSY; 46345748Smckusick PAGE_WAKEUP(p); 46445748Smckusick goto again; 46545748Smckusick } 46645748Smckusick } 46745748Smckusick } 46865688Shibler return (noerror); 46945748Smckusick } 47045748Smckusick 47145748Smckusick /* 47245748Smckusick * vm_object_deactivate_pages 47345748Smckusick * 47445748Smckusick * Deactivate all pages in the specified object. (Keep its pages 47545748Smckusick * in memory even though it is no longer referenced.) 47645748Smckusick * 47745748Smckusick * The object must be locked. 47845748Smckusick */ 47953354Sbostic void 48045748Smckusick vm_object_deactivate_pages(object) 48145748Smckusick register vm_object_t object; 48245748Smckusick { 48345748Smckusick register vm_page_t p, next; 48445748Smckusick 48565231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 48665231Smckusick next = p->listq.tqe_next; 48745748Smckusick vm_page_lock_queues(); 48845748Smckusick vm_page_deactivate(p); 48945748Smckusick vm_page_unlock_queues(); 49045748Smckusick } 49145748Smckusick } 49245748Smckusick 49345748Smckusick /* 49445748Smckusick * Trim the object cache to size. 49545748Smckusick */ 49653354Sbostic void 49745748Smckusick vm_object_cache_trim() 49845748Smckusick { 49945748Smckusick register vm_object_t object; 50045748Smckusick 50145748Smckusick vm_object_cache_lock(); 50245748Smckusick while (vm_object_cached > vm_cache_max) { 50365231Smckusick object = vm_object_cached_list.tqh_first; 50445748Smckusick vm_object_cache_unlock(); 50545748Smckusick 50645748Smckusick if (object != vm_object_lookup(object->pager)) 50745748Smckusick panic("vm_object_deactivate: I'm sooo confused."); 50845748Smckusick 50945748Smckusick pager_cache(object, FALSE); 51045748Smckusick 51145748Smckusick vm_object_cache_lock(); 51245748Smckusick } 51345748Smckusick vm_object_cache_unlock(); 51445748Smckusick } 51545748Smckusick 51645748Smckusick /* 51745748Smckusick * vm_object_pmap_copy: 51845748Smckusick * 51945748Smckusick * Makes all physical pages in the specified 52045748Smckusick * object range copy-on-write. No writeable 52145748Smckusick * references to these pages should remain. 52245748Smckusick * 52345748Smckusick * The object must *not* be locked. 52445748Smckusick */ 52545748Smckusick void vm_object_pmap_copy(object, start, end) 52645748Smckusick register vm_object_t object; 52745748Smckusick register vm_offset_t start; 52845748Smckusick register vm_offset_t end; 52945748Smckusick { 53045748Smckusick register vm_page_t p; 53145748Smckusick 53248386Skarels if (object == NULL) 53345748Smckusick return; 53445748Smckusick 53545748Smckusick vm_object_lock(object); 53665231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 53745748Smckusick if ((start <= p->offset) && (p->offset < end)) { 53849292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 53956382Smckusick p->flags |= PG_COPYONWRITE; 54045748Smckusick } 54145748Smckusick } 54245748Smckusick vm_object_unlock(object); 54345748Smckusick } 54445748Smckusick 54545748Smckusick /* 54645748Smckusick * vm_object_pmap_remove: 54745748Smckusick * 54845748Smckusick * Removes all physical pages in the specified 54945748Smckusick * object range from all physical maps. 55045748Smckusick * 55145748Smckusick * The object must *not* be locked. 55245748Smckusick */ 55345748Smckusick void vm_object_pmap_remove(object, start, end) 55445748Smckusick register vm_object_t object; 55545748Smckusick register vm_offset_t start; 55645748Smckusick register vm_offset_t end; 55745748Smckusick { 55845748Smckusick register vm_page_t p; 55945748Smckusick 56048386Skarels if (object == NULL) 56145748Smckusick return; 56245748Smckusick 56345748Smckusick vm_object_lock(object); 56465231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 56549292Shibler if ((start <= p->offset) && (p->offset < end)) 56649292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 56745748Smckusick vm_object_unlock(object); 56845748Smckusick } 56945748Smckusick 57045748Smckusick /* 57145748Smckusick * vm_object_copy: 57245748Smckusick * 57345748Smckusick * Create a new object which is a copy of an existing 57445748Smckusick * object, and mark all of the pages in the existing 57545748Smckusick * object 'copy-on-write'. The new object has one reference. 57645748Smckusick * Returns the new object. 57745748Smckusick * 57845748Smckusick * May defer the copy until later if the object is not backed 57945748Smckusick * up by a non-default pager. 58045748Smckusick */ 58145748Smckusick void vm_object_copy(src_object, src_offset, size, 58245748Smckusick dst_object, dst_offset, src_needs_copy) 58345748Smckusick register vm_object_t src_object; 58445748Smckusick vm_offset_t src_offset; 58545748Smckusick vm_size_t size; 58645748Smckusick vm_object_t *dst_object; /* OUT */ 58745748Smckusick vm_offset_t *dst_offset; /* OUT */ 58845748Smckusick boolean_t *src_needs_copy; /* OUT */ 58945748Smckusick { 59045748Smckusick register vm_object_t new_copy; 59145748Smckusick register vm_object_t old_copy; 59245748Smckusick vm_offset_t new_start, new_end; 59345748Smckusick 59445748Smckusick register vm_page_t p; 59545748Smckusick 59648386Skarels if (src_object == NULL) { 59745748Smckusick /* 59845748Smckusick * Nothing to copy 59945748Smckusick */ 60048386Skarels *dst_object = NULL; 60145748Smckusick *dst_offset = 0; 60245748Smckusick *src_needs_copy = FALSE; 60345748Smckusick return; 60445748Smckusick } 60545748Smckusick 60645748Smckusick /* 60745748Smckusick * If the object's pager is null_pager or the 60845748Smckusick * default pager, we don't have to make a copy 60945748Smckusick * of it. Instead, we set the needs copy flag and 61045748Smckusick * make a shadow later. 61145748Smckusick */ 61245748Smckusick 61345748Smckusick vm_object_lock(src_object); 61448386Skarels if (src_object->pager == NULL || 61550917Smckusick (src_object->flags & OBJ_INTERNAL)) { 61645748Smckusick 61745748Smckusick /* 61845748Smckusick * Make another reference to the object 61945748Smckusick */ 62045748Smckusick src_object->ref_count++; 62145748Smckusick 62245748Smckusick /* 62345748Smckusick * Mark all of the pages copy-on-write. 62445748Smckusick */ 62565231Smckusick for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 62645748Smckusick if (src_offset <= p->offset && 62745748Smckusick p->offset < src_offset + size) 62856382Smckusick p->flags |= PG_COPYONWRITE; 62945748Smckusick vm_object_unlock(src_object); 63045748Smckusick 63145748Smckusick *dst_object = src_object; 63245748Smckusick *dst_offset = src_offset; 63345748Smckusick 63445748Smckusick /* 63545748Smckusick * Must make a shadow when write is desired 63645748Smckusick */ 63745748Smckusick *src_needs_copy = TRUE; 63845748Smckusick return; 63945748Smckusick } 64045748Smckusick 64145748Smckusick /* 64245748Smckusick * Try to collapse the object before copying it. 64345748Smckusick */ 64445748Smckusick vm_object_collapse(src_object); 64545748Smckusick 64645748Smckusick /* 64745748Smckusick * If the object has a pager, the pager wants to 64845748Smckusick * see all of the changes. We need a copy-object 64945748Smckusick * for the changed pages. 65045748Smckusick * 65145748Smckusick * If there is a copy-object, and it is empty, 65245748Smckusick * no changes have been made to the object since the 65345748Smckusick * copy-object was made. We can use the same copy- 65445748Smckusick * object. 65545748Smckusick */ 65645748Smckusick 65745748Smckusick Retry1: 65845748Smckusick old_copy = src_object->copy; 65948386Skarels if (old_copy != NULL) { 66045748Smckusick /* 66145748Smckusick * Try to get the locks (out of order) 66245748Smckusick */ 66345748Smckusick if (!vm_object_lock_try(old_copy)) { 66445748Smckusick vm_object_unlock(src_object); 66545748Smckusick 66645748Smckusick /* should spin a bit here... */ 66745748Smckusick vm_object_lock(src_object); 66845748Smckusick goto Retry1; 66945748Smckusick } 67045748Smckusick 67145748Smckusick if (old_copy->resident_page_count == 0 && 67248386Skarels old_copy->pager == NULL) { 67345748Smckusick /* 67445748Smckusick * Return another reference to 67545748Smckusick * the existing copy-object. 67645748Smckusick */ 67745748Smckusick old_copy->ref_count++; 67845748Smckusick vm_object_unlock(old_copy); 67945748Smckusick vm_object_unlock(src_object); 68045748Smckusick *dst_object = old_copy; 68145748Smckusick *dst_offset = src_offset; 68245748Smckusick *src_needs_copy = FALSE; 68345748Smckusick return; 68445748Smckusick } 68545748Smckusick vm_object_unlock(old_copy); 68645748Smckusick } 68745748Smckusick vm_object_unlock(src_object); 68845748Smckusick 68945748Smckusick /* 69045748Smckusick * If the object has a pager, the pager wants 69145748Smckusick * to see all of the changes. We must make 69245748Smckusick * a copy-object and put the changed pages there. 69345748Smckusick * 69445748Smckusick * The copy-object is always made large enough to 69545748Smckusick * completely shadow the original object, since 69645748Smckusick * it may have several users who want to shadow 69745748Smckusick * the original object at different points. 69845748Smckusick */ 69945748Smckusick 70045748Smckusick new_copy = vm_object_allocate(src_object->size); 70145748Smckusick 70245748Smckusick Retry2: 70345748Smckusick vm_object_lock(src_object); 70445748Smckusick /* 70545748Smckusick * Copy object may have changed while we were unlocked 70645748Smckusick */ 70745748Smckusick old_copy = src_object->copy; 70848386Skarels if (old_copy != NULL) { 70945748Smckusick /* 71045748Smckusick * Try to get the locks (out of order) 71145748Smckusick */ 71245748Smckusick if (!vm_object_lock_try(old_copy)) { 71345748Smckusick vm_object_unlock(src_object); 71445748Smckusick goto Retry2; 71545748Smckusick } 71645748Smckusick 71745748Smckusick /* 71845748Smckusick * Consistency check 71945748Smckusick */ 72045748Smckusick if (old_copy->shadow != src_object || 72145748Smckusick old_copy->shadow_offset != (vm_offset_t) 0) 72245748Smckusick panic("vm_object_copy: copy/shadow inconsistency"); 72345748Smckusick 72445748Smckusick /* 72545748Smckusick * Make the old copy-object shadow the new one. 72645748Smckusick * It will receive no more pages from the original 72745748Smckusick * object. 72845748Smckusick */ 72945748Smckusick 73045748Smckusick src_object->ref_count--; /* remove ref. from old_copy */ 73145748Smckusick old_copy->shadow = new_copy; 73245748Smckusick new_copy->ref_count++; /* locking not needed - we 73345748Smckusick have the only pointer */ 73445748Smckusick vm_object_unlock(old_copy); /* done with old_copy */ 73545748Smckusick } 73645748Smckusick 73745748Smckusick new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 73845748Smckusick new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 73945748Smckusick 74045748Smckusick /* 74145748Smckusick * Point the new copy at the existing object. 74245748Smckusick */ 74345748Smckusick 74445748Smckusick new_copy->shadow = src_object; 74545748Smckusick new_copy->shadow_offset = new_start; 74645748Smckusick src_object->ref_count++; 74745748Smckusick src_object->copy = new_copy; 74845748Smckusick 74945748Smckusick /* 75045748Smckusick * Mark all the affected pages of the existing object 75145748Smckusick * copy-on-write. 75245748Smckusick */ 75365231Smckusick for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 75449292Shibler if ((new_start <= p->offset) && (p->offset < new_end)) 75556382Smckusick p->flags |= PG_COPYONWRITE; 75645748Smckusick 75745748Smckusick vm_object_unlock(src_object); 75845748Smckusick 75945748Smckusick *dst_object = new_copy; 76045748Smckusick *dst_offset = src_offset - new_start; 76145748Smckusick *src_needs_copy = FALSE; 76245748Smckusick } 76345748Smckusick 76445748Smckusick /* 76545748Smckusick * vm_object_shadow: 76645748Smckusick * 76745748Smckusick * Create a new object which is backed by the 76845748Smckusick * specified existing object range. The source 76945748Smckusick * object reference is deallocated. 77045748Smckusick * 77145748Smckusick * The new object and offset into that object 77245748Smckusick * are returned in the source parameters. 77345748Smckusick */ 77445748Smckusick 77545748Smckusick void vm_object_shadow(object, offset, length) 77645748Smckusick vm_object_t *object; /* IN/OUT */ 77745748Smckusick vm_offset_t *offset; /* IN/OUT */ 77845748Smckusick vm_size_t length; 77945748Smckusick { 78045748Smckusick register vm_object_t source; 78145748Smckusick register vm_object_t result; 78245748Smckusick 78345748Smckusick source = *object; 78445748Smckusick 78545748Smckusick /* 78645748Smckusick * Allocate a new object with the given length 78745748Smckusick */ 78845748Smckusick 78948386Skarels if ((result = vm_object_allocate(length)) == NULL) 79045748Smckusick panic("vm_object_shadow: no object for shadowing"); 79145748Smckusick 79245748Smckusick /* 79345748Smckusick * The new object shadows the source object, adding 79445748Smckusick * a reference to it. Our caller changes his reference 79545748Smckusick * to point to the new object, removing a reference to 79645748Smckusick * the source object. Net result: no change of reference 79745748Smckusick * count. 79845748Smckusick */ 79945748Smckusick result->shadow = source; 80045748Smckusick 80145748Smckusick /* 80245748Smckusick * Store the offset into the source object, 80345748Smckusick * and fix up the offset into the new object. 80445748Smckusick */ 80545748Smckusick 80645748Smckusick result->shadow_offset = *offset; 80745748Smckusick 80845748Smckusick /* 80945748Smckusick * Return the new things 81045748Smckusick */ 81145748Smckusick 81245748Smckusick *offset = 0; 81345748Smckusick *object = result; 81445748Smckusick } 81545748Smckusick 81645748Smckusick /* 81745748Smckusick * Set the specified object's pager to the specified pager. 81845748Smckusick */ 81945748Smckusick 82045748Smckusick void vm_object_setpager(object, pager, paging_offset, 82145748Smckusick read_only) 82245748Smckusick vm_object_t object; 82345748Smckusick vm_pager_t pager; 82445748Smckusick vm_offset_t paging_offset; 82545748Smckusick boolean_t read_only; 82645748Smckusick { 82745748Smckusick #ifdef lint 82845748Smckusick read_only++; /* No longer used */ 82960345Storek #endif 83045748Smckusick 83145748Smckusick vm_object_lock(object); /* XXX ? */ 83245748Smckusick object->pager = pager; 83345748Smckusick object->paging_offset = paging_offset; 83445748Smckusick vm_object_unlock(object); /* XXX ? */ 83545748Smckusick } 83645748Smckusick 83745748Smckusick /* 83845748Smckusick * vm_object_hash hashes the pager/id pair. 83945748Smckusick */ 84045748Smckusick 84145748Smckusick #define vm_object_hash(pager) \ 84245748Smckusick (((unsigned)pager)%VM_OBJECT_HASH_COUNT) 84345748Smckusick 84445748Smckusick /* 84545748Smckusick * vm_object_lookup looks in the object cache for an object with the 84645748Smckusick * specified pager and paging id. 84745748Smckusick */ 84845748Smckusick 84945748Smckusick vm_object_t vm_object_lookup(pager) 85045748Smckusick vm_pager_t pager; 85145748Smckusick { 85245748Smckusick register vm_object_hash_entry_t entry; 85345748Smckusick vm_object_t object; 85445748Smckusick 85545748Smckusick vm_object_cache_lock(); 85645748Smckusick 85765231Smckusick for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 85865231Smckusick entry != NULL; 85965231Smckusick entry = entry->hash_links.tqe_next) { 86045748Smckusick object = entry->object; 86145748Smckusick if (object->pager == pager) { 86245748Smckusick vm_object_lock(object); 86345748Smckusick if (object->ref_count == 0) { 86465231Smckusick TAILQ_REMOVE(&vm_object_cached_list, object, 86565231Smckusick cached_list); 86645748Smckusick vm_object_cached--; 86745748Smckusick } 86845748Smckusick object->ref_count++; 86945748Smckusick vm_object_unlock(object); 87045748Smckusick vm_object_cache_unlock(); 87145748Smckusick return(object); 87245748Smckusick } 87345748Smckusick } 87445748Smckusick 87545748Smckusick vm_object_cache_unlock(); 87648386Skarels return(NULL); 87745748Smckusick } 87845748Smckusick 87945748Smckusick /* 88045748Smckusick * vm_object_enter enters the specified object/pager/id into 88145748Smckusick * the hash table. 88245748Smckusick */ 88345748Smckusick 88445748Smckusick void vm_object_enter(object, pager) 88545748Smckusick vm_object_t object; 88645748Smckusick vm_pager_t pager; 88745748Smckusick { 88865231Smckusick struct vm_object_hash_head *bucket; 88945748Smckusick register vm_object_hash_entry_t entry; 89045748Smckusick 89145748Smckusick /* 89245748Smckusick * We don't cache null objects, and we can't cache 89345748Smckusick * objects with the null pager. 89445748Smckusick */ 89545748Smckusick 89648386Skarels if (object == NULL) 89745748Smckusick return; 89848386Skarels if (pager == NULL) 89945748Smckusick return; 90045748Smckusick 90145748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 90245748Smckusick entry = (vm_object_hash_entry_t) 90345748Smckusick malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 90445748Smckusick entry->object = object; 90550917Smckusick object->flags |= OBJ_CANPERSIST; 90645748Smckusick 90745748Smckusick vm_object_cache_lock(); 90865231Smckusick TAILQ_INSERT_TAIL(bucket, entry, hash_links); 90945748Smckusick vm_object_cache_unlock(); 91045748Smckusick } 91145748Smckusick 91245748Smckusick /* 91345748Smckusick * vm_object_remove: 91445748Smckusick * 91545748Smckusick * Remove the pager from the hash table. 91645748Smckusick * Note: This assumes that the object cache 91745748Smckusick * is locked. XXX this should be fixed 91845748Smckusick * by reorganizing vm_object_deallocate. 91945748Smckusick */ 92053354Sbostic void 92145748Smckusick vm_object_remove(pager) 92245748Smckusick register vm_pager_t pager; 92345748Smckusick { 92465231Smckusick struct vm_object_hash_head *bucket; 92545748Smckusick register vm_object_hash_entry_t entry; 92645748Smckusick register vm_object_t object; 92745748Smckusick 92845748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 92945748Smckusick 93065231Smckusick for (entry = bucket->tqh_first; 93165231Smckusick entry != NULL; 93265231Smckusick entry = entry->hash_links.tqe_next) { 93345748Smckusick object = entry->object; 93445748Smckusick if (object->pager == pager) { 93565231Smckusick TAILQ_REMOVE(bucket, entry, hash_links); 93645748Smckusick free((caddr_t)entry, M_VMOBJHASH); 93745748Smckusick break; 93845748Smckusick } 93945748Smckusick } 94045748Smckusick } 94145748Smckusick 94245748Smckusick /* 94345748Smckusick * vm_object_cache_clear removes all objects from the cache. 94445748Smckusick * 94545748Smckusick */ 94645748Smckusick 94745748Smckusick void vm_object_cache_clear() 94845748Smckusick { 94945748Smckusick register vm_object_t object; 95045748Smckusick 95145748Smckusick /* 95245748Smckusick * Remove each object in the cache by scanning down the 95345748Smckusick * list of cached objects. 95445748Smckusick */ 95545748Smckusick vm_object_cache_lock(); 95665231Smckusick while ((object = vm_object_cached_list.tqh_first) != NULL) { 95745748Smckusick vm_object_cache_unlock(); 95845748Smckusick 95945748Smckusick /* 96045748Smckusick * Note: it is important that we use vm_object_lookup 96145748Smckusick * to gain a reference, and not vm_object_reference, because 96245748Smckusick * the logic for removing an object from the cache lies in 96345748Smckusick * lookup. 96445748Smckusick */ 96545748Smckusick if (object != vm_object_lookup(object->pager)) 96645748Smckusick panic("vm_object_cache_clear: I'm sooo confused."); 96745748Smckusick pager_cache(object, FALSE); 96845748Smckusick 96945748Smckusick vm_object_cache_lock(); 97045748Smckusick } 97145748Smckusick vm_object_cache_unlock(); 97245748Smckusick } 97345748Smckusick 97445748Smckusick boolean_t vm_object_collapse_allowed = TRUE; 97545748Smckusick /* 97645748Smckusick * vm_object_collapse: 97745748Smckusick * 97845748Smckusick * Collapse an object with the object backing it. 97945748Smckusick * Pages in the backing object are moved into the 98045748Smckusick * parent, and the backing object is deallocated. 98145748Smckusick * 98245748Smckusick * Requires that the object be locked and the page 98345748Smckusick * queues be unlocked. 98445748Smckusick * 98545748Smckusick */ 98645748Smckusick void vm_object_collapse(object) 98745748Smckusick register vm_object_t object; 98845748Smckusick 98945748Smckusick { 99045748Smckusick register vm_object_t backing_object; 99145748Smckusick register vm_offset_t backing_offset; 99245748Smckusick register vm_size_t size; 99345748Smckusick register vm_offset_t new_offset; 99445748Smckusick register vm_page_t p, pp; 99545748Smckusick 99645748Smckusick if (!vm_object_collapse_allowed) 99745748Smckusick return; 99845748Smckusick 99945748Smckusick while (TRUE) { 100045748Smckusick /* 100145748Smckusick * Verify that the conditions are right for collapse: 100245748Smckusick * 100345748Smckusick * The object exists and no pages in it are currently 100445748Smckusick * being paged out (or have ever been paged out). 100545748Smckusick */ 100648386Skarels if (object == NULL || 100745748Smckusick object->paging_in_progress != 0 || 100848386Skarels object->pager != NULL) 100945748Smckusick return; 101045748Smckusick 101145748Smckusick /* 101245748Smckusick * There is a backing object, and 101345748Smckusick */ 101445748Smckusick 101548386Skarels if ((backing_object = object->shadow) == NULL) 101645748Smckusick return; 101745748Smckusick 101845748Smckusick vm_object_lock(backing_object); 101945748Smckusick /* 102045748Smckusick * ... 102145748Smckusick * The backing object is not read_only, 102245748Smckusick * and no pages in the backing object are 102345748Smckusick * currently being paged out. 102445748Smckusick * The backing object is internal. 102545748Smckusick */ 102645748Smckusick 102750917Smckusick if ((backing_object->flags & OBJ_INTERNAL) == 0 || 102845748Smckusick backing_object->paging_in_progress != 0) { 102945748Smckusick vm_object_unlock(backing_object); 103045748Smckusick return; 103145748Smckusick } 103245748Smckusick 103345748Smckusick /* 103445748Smckusick * The backing object can't be a copy-object: 103545748Smckusick * the shadow_offset for the copy-object must stay 103645748Smckusick * as 0. Furthermore (for the 'we have all the 103745748Smckusick * pages' case), if we bypass backing_object and 103845748Smckusick * just shadow the next object in the chain, old 103945748Smckusick * pages from that object would then have to be copied 104045748Smckusick * BOTH into the (former) backing_object and into the 104145748Smckusick * parent object. 104245748Smckusick */ 104348386Skarels if (backing_object->shadow != NULL && 104448386Skarels backing_object->shadow->copy != NULL) { 104545748Smckusick vm_object_unlock(backing_object); 104645748Smckusick return; 104745748Smckusick } 104845748Smckusick 104945748Smckusick /* 105045748Smckusick * We know that we can either collapse the backing 105145748Smckusick * object (if the parent is the only reference to 105245748Smckusick * it) or (perhaps) remove the parent's reference 105345748Smckusick * to it. 105445748Smckusick */ 105545748Smckusick 105645748Smckusick backing_offset = object->shadow_offset; 105745748Smckusick size = object->size; 105845748Smckusick 105945748Smckusick /* 106045748Smckusick * If there is exactly one reference to the backing 106145748Smckusick * object, we can collapse it into the parent. 106245748Smckusick */ 106345748Smckusick 106445748Smckusick if (backing_object->ref_count == 1) { 106545748Smckusick 106645748Smckusick /* 106745748Smckusick * We can collapse the backing object. 106845748Smckusick * 106945748Smckusick * Move all in-memory pages from backing_object 107045748Smckusick * to the parent. Pages that have been paged out 107145748Smckusick * will be overwritten by any of the parent's 107245748Smckusick * pages that shadow them. 107345748Smckusick */ 107445748Smckusick 107565231Smckusick while ((p = backing_object->memq.tqh_first) != NULL) { 107645748Smckusick new_offset = (p->offset - backing_offset); 107745748Smckusick 107845748Smckusick /* 107945748Smckusick * If the parent has a page here, or if 108045748Smckusick * this page falls outside the parent, 108145748Smckusick * dispose of it. 108245748Smckusick * 108345748Smckusick * Otherwise, move it as planned. 108445748Smckusick */ 108545748Smckusick 108645748Smckusick if (p->offset < backing_offset || 108745748Smckusick new_offset >= size) { 108845748Smckusick vm_page_lock_queues(); 108945748Smckusick vm_page_free(p); 109045748Smckusick vm_page_unlock_queues(); 109145748Smckusick } else { 109245748Smckusick pp = vm_page_lookup(object, new_offset); 109356382Smckusick if (pp != NULL && !(pp->flags & PG_FAKE)) { 109445748Smckusick vm_page_lock_queues(); 109545748Smckusick vm_page_free(p); 109645748Smckusick vm_page_unlock_queues(); 109745748Smckusick } 109845748Smckusick else { 109945748Smckusick if (pp) { 110045748Smckusick /* may be someone waiting for it */ 110145748Smckusick PAGE_WAKEUP(pp); 110245748Smckusick vm_page_lock_queues(); 110345748Smckusick vm_page_free(pp); 110445748Smckusick vm_page_unlock_queues(); 110545748Smckusick } 110645748Smckusick vm_page_rename(p, object, new_offset); 110745748Smckusick } 110845748Smckusick } 110945748Smckusick } 111045748Smckusick 111145748Smckusick /* 111245748Smckusick * Move the pager from backing_object to object. 111345748Smckusick * 111445748Smckusick * XXX We're only using part of the paging space 111545748Smckusick * for keeps now... we ought to discard the 111645748Smckusick * unused portion. 111745748Smckusick */ 111845748Smckusick 111965688Shibler if (backing_object->pager) { 112065688Shibler object->pager = backing_object->pager; 112165688Shibler object->paging_offset = backing_offset + 112265688Shibler backing_object->paging_offset; 112365688Shibler backing_object->pager = NULL; 112465688Shibler } 112545748Smckusick 112645748Smckusick /* 112745748Smckusick * Object now shadows whatever backing_object did. 112845748Smckusick * Note that the reference to backing_object->shadow 112945748Smckusick * moves from within backing_object to within object. 113045748Smckusick */ 113145748Smckusick 113245748Smckusick object->shadow = backing_object->shadow; 113345748Smckusick object->shadow_offset += backing_object->shadow_offset; 113448386Skarels if (object->shadow != NULL && 113548386Skarels object->shadow->copy != NULL) { 113645748Smckusick panic("vm_object_collapse: we collapsed a copy-object!"); 113745748Smckusick } 113845748Smckusick /* 113945748Smckusick * Discard backing_object. 114045748Smckusick * 114145748Smckusick * Since the backing object has no pages, no 114245748Smckusick * pager left, and no object references within it, 114345748Smckusick * all that is necessary is to dispose of it. 114445748Smckusick */ 114545748Smckusick 114645748Smckusick vm_object_unlock(backing_object); 114745748Smckusick 114845748Smckusick simple_lock(&vm_object_list_lock); 114965231Smckusick TAILQ_REMOVE(&vm_object_list, backing_object, 115065231Smckusick object_list); 115145748Smckusick vm_object_count--; 115245748Smckusick simple_unlock(&vm_object_list_lock); 115345748Smckusick 115445748Smckusick free((caddr_t)backing_object, M_VMOBJ); 115545748Smckusick 115645748Smckusick object_collapses++; 115745748Smckusick } 115845748Smckusick else { 115945748Smckusick /* 116045748Smckusick * If all of the pages in the backing object are 116145748Smckusick * shadowed by the parent object, the parent 116245748Smckusick * object no longer has to shadow the backing 116345748Smckusick * object; it can shadow the next one in the 116445748Smckusick * chain. 116545748Smckusick * 116645748Smckusick * The backing object must not be paged out - we'd 116745748Smckusick * have to check all of the paged-out pages, as 116845748Smckusick * well. 116945748Smckusick */ 117045748Smckusick 117148386Skarels if (backing_object->pager != NULL) { 117245748Smckusick vm_object_unlock(backing_object); 117345748Smckusick return; 117445748Smckusick } 117545748Smckusick 117645748Smckusick /* 117745748Smckusick * Should have a check for a 'small' number 117845748Smckusick * of pages here. 117945748Smckusick */ 118045748Smckusick 118165231Smckusick for (p = backing_object->memq.tqh_first; 118265231Smckusick p != NULL; 118365231Smckusick p = p->listq.tqe_next) { 118445748Smckusick new_offset = (p->offset - backing_offset); 118545748Smckusick 118645748Smckusick /* 118745748Smckusick * If the parent has a page here, or if 118845748Smckusick * this page falls outside the parent, 118945748Smckusick * keep going. 119045748Smckusick * 119145748Smckusick * Otherwise, the backing_object must be 119245748Smckusick * left in the chain. 119345748Smckusick */ 119445748Smckusick 119545748Smckusick if (p->offset >= backing_offset && 119665688Shibler new_offset < size && 119745748Smckusick ((pp = vm_page_lookup(object, new_offset)) 119848386Skarels == NULL || 119956382Smckusick (pp->flags & PG_FAKE))) { 120045748Smckusick /* 120145748Smckusick * Page still needed. 120245748Smckusick * Can't go any further. 120345748Smckusick */ 120445748Smckusick vm_object_unlock(backing_object); 120545748Smckusick return; 120645748Smckusick } 120745748Smckusick } 120845748Smckusick 120945748Smckusick /* 121045748Smckusick * Make the parent shadow the next object 121145748Smckusick * in the chain. Deallocating backing_object 121245748Smckusick * will not remove it, since its reference 121345748Smckusick * count is at least 2. 121445748Smckusick */ 121545748Smckusick 121665688Shibler object->shadow = backing_object->shadow; 121765688Shibler vm_object_reference(object->shadow); 121845748Smckusick object->shadow_offset += backing_object->shadow_offset; 121945748Smckusick 122066426Shibler /* 122166426Shibler * Backing object might have had a copy pointer 122266426Shibler * to us. If it did, clear it. 122366426Shibler */ 122466426Shibler if (backing_object->copy == object) { 1225*66439Shibler backing_object->copy = NULL; 122666426Shibler } 122766426Shibler 122845748Smckusick /* Drop the reference count on backing_object. 122945748Smckusick * Since its ref_count was at least 2, it 123045748Smckusick * will not vanish; so we don't need to call 123145748Smckusick * vm_object_deallocate. 123245748Smckusick */ 123345748Smckusick backing_object->ref_count--; 123445748Smckusick vm_object_unlock(backing_object); 123545748Smckusick 123645748Smckusick object_bypasses ++; 123745748Smckusick 123845748Smckusick } 123945748Smckusick 124045748Smckusick /* 124145748Smckusick * Try again with this object's new backing object. 124245748Smckusick */ 124345748Smckusick } 124445748Smckusick } 124545748Smckusick 124645748Smckusick /* 124745748Smckusick * vm_object_page_remove: [internal] 124845748Smckusick * 124945748Smckusick * Removes all physical pages in the specified 125045748Smckusick * object range from the object's list of pages. 125145748Smckusick * 125245748Smckusick * The object must be locked. 125345748Smckusick */ 125445748Smckusick void vm_object_page_remove(object, start, end) 125545748Smckusick register vm_object_t object; 125645748Smckusick register vm_offset_t start; 125745748Smckusick register vm_offset_t end; 125845748Smckusick { 125945748Smckusick register vm_page_t p, next; 126045748Smckusick 126148386Skarels if (object == NULL) 126245748Smckusick return; 126345748Smckusick 126465231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 126565231Smckusick next = p->listq.tqe_next; 126645748Smckusick if ((start <= p->offset) && (p->offset < end)) { 126749292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 126845748Smckusick vm_page_lock_queues(); 126945748Smckusick vm_page_free(p); 127045748Smckusick vm_page_unlock_queues(); 127145748Smckusick } 127245748Smckusick } 127345748Smckusick } 127445748Smckusick 127545748Smckusick /* 127645748Smckusick * Routine: vm_object_coalesce 127745748Smckusick * Function: Coalesces two objects backing up adjoining 127845748Smckusick * regions of memory into a single object. 127945748Smckusick * 128045748Smckusick * returns TRUE if objects were combined. 128145748Smckusick * 128245748Smckusick * NOTE: Only works at the moment if the second object is NULL - 128345748Smckusick * if it's not, which object do we lock first? 128445748Smckusick * 128545748Smckusick * Parameters: 128645748Smckusick * prev_object First object to coalesce 128745748Smckusick * prev_offset Offset into prev_object 128845748Smckusick * next_object Second object into coalesce 128945748Smckusick * next_offset Offset into next_object 129045748Smckusick * 129145748Smckusick * prev_size Size of reference to prev_object 129245748Smckusick * next_size Size of reference to next_object 129345748Smckusick * 129445748Smckusick * Conditions: 129545748Smckusick * The object must *not* be locked. 129645748Smckusick */ 129745748Smckusick boolean_t vm_object_coalesce(prev_object, next_object, 129845748Smckusick prev_offset, next_offset, 129945748Smckusick prev_size, next_size) 130045748Smckusick 130145748Smckusick register vm_object_t prev_object; 130245748Smckusick vm_object_t next_object; 130345748Smckusick vm_offset_t prev_offset, next_offset; 130445748Smckusick vm_size_t prev_size, next_size; 130545748Smckusick { 130645748Smckusick vm_size_t newsize; 130745748Smckusick 130845748Smckusick #ifdef lint 130945748Smckusick next_offset++; 131060345Storek #endif 131145748Smckusick 131248386Skarels if (next_object != NULL) { 131345748Smckusick return(FALSE); 131445748Smckusick } 131545748Smckusick 131648386Skarels if (prev_object == NULL) { 131745748Smckusick return(TRUE); 131845748Smckusick } 131945748Smckusick 132045748Smckusick vm_object_lock(prev_object); 132145748Smckusick 132245748Smckusick /* 132345748Smckusick * Try to collapse the object first 132445748Smckusick */ 132545748Smckusick vm_object_collapse(prev_object); 132645748Smckusick 132745748Smckusick /* 132845748Smckusick * Can't coalesce if: 132945748Smckusick * . more than one reference 133045748Smckusick * . paged out 133145748Smckusick * . shadows another object 133245748Smckusick * . has a copy elsewhere 133345748Smckusick * (any of which mean that the pages not mapped to 133445748Smckusick * prev_entry may be in use anyway) 133545748Smckusick */ 133645748Smckusick 133745748Smckusick if (prev_object->ref_count > 1 || 133848386Skarels prev_object->pager != NULL || 133948386Skarels prev_object->shadow != NULL || 134048386Skarels prev_object->copy != NULL) { 134145748Smckusick vm_object_unlock(prev_object); 134245748Smckusick return(FALSE); 134345748Smckusick } 134445748Smckusick 134545748Smckusick /* 134645748Smckusick * Remove any pages that may still be in the object from 134745748Smckusick * a previous deallocation. 134845748Smckusick */ 134945748Smckusick 135045748Smckusick vm_object_page_remove(prev_object, 135145748Smckusick prev_offset + prev_size, 135245748Smckusick prev_offset + prev_size + next_size); 135345748Smckusick 135445748Smckusick /* 135545748Smckusick * Extend the object if necessary. 135645748Smckusick */ 135745748Smckusick newsize = prev_offset + prev_size + next_size; 135845748Smckusick if (newsize > prev_object->size) 135945748Smckusick prev_object->size = newsize; 136045748Smckusick 136145748Smckusick vm_object_unlock(prev_object); 136245748Smckusick return(TRUE); 136345748Smckusick } 136445748Smckusick 136545748Smckusick /* 136645748Smckusick * vm_object_print: [ debug ] 136745748Smckusick */ 136845748Smckusick void vm_object_print(object, full) 136945748Smckusick vm_object_t object; 137045748Smckusick boolean_t full; 137145748Smckusick { 137245748Smckusick register vm_page_t p; 137345748Smckusick extern indent; 137445748Smckusick 137545748Smckusick register int count; 137645748Smckusick 137748386Skarels if (object == NULL) 137845748Smckusick return; 137945748Smckusick 138045748Smckusick iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 138145748Smckusick (int) object, (int) object->size, 138245748Smckusick object->resident_page_count, object->ref_count); 138345748Smckusick printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 138445748Smckusick (int) object->pager, (int) object->paging_offset, 138545748Smckusick (int) object->shadow, (int) object->shadow_offset); 138645748Smckusick printf("cache: next=0x%x, prev=0x%x\n", 138765231Smckusick object->cached_list.tqe_next, object->cached_list.tqe_prev); 138845748Smckusick 138945748Smckusick if (!full) 139045748Smckusick return; 139145748Smckusick 139245748Smckusick indent += 2; 139345748Smckusick count = 0; 139465231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 139545748Smckusick if (count == 0) 139645748Smckusick iprintf("memory:="); 139745748Smckusick else if (count == 6) { 139845748Smckusick printf("\n"); 139945748Smckusick iprintf(" ..."); 140045748Smckusick count = 0; 140145748Smckusick } else 140245748Smckusick printf(","); 140345748Smckusick count++; 140445748Smckusick 140545748Smckusick printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 140645748Smckusick } 140745748Smckusick if (count != 0) 140845748Smckusick printf("\n"); 140945748Smckusick indent -= 2; 141045748Smckusick } 1411