145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*68164Scgd * @(#)vm_object.c 8.6 (Berkeley) 01/09/95 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory object module. 4145748Smckusick */ 4245748Smckusick 4353354Sbostic #include <sys/param.h> 4453354Sbostic #include <sys/systm.h> 4553354Sbostic #include <sys/malloc.h> 4645748Smckusick 4753354Sbostic #include <vm/vm.h> 4853354Sbostic #include <vm/vm_page.h> 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Virtual memory objects maintain the actual data 5245748Smckusick * associated with allocated virtual memory. A given 5345748Smckusick * page of memory exists within exactly one object. 5445748Smckusick * 5545748Smckusick * An object is only deallocated when all "references" 5645748Smckusick * are given up. Only one "reference" to a given 5745748Smckusick * region of an object should be writeable. 5845748Smckusick * 5945748Smckusick * Associated with each object is a list of all resident 6045748Smckusick * memory pages belonging to that object; this list is 6145748Smckusick * maintained by the "vm_page" module, and locked by the object's 6245748Smckusick * lock. 6345748Smckusick * 6445748Smckusick * Each object also records a "pager" routine which is 6545748Smckusick * used to retrieve (and store) pages to the proper backing 6645748Smckusick * storage. In addition, objects may be backed by other 6745748Smckusick * objects from which they were virtual-copied. 6845748Smckusick * 6945748Smckusick * The only items within the object structure which are 7045748Smckusick * modified after time of creation are: 7145748Smckusick * reference count locked by object's lock 7245748Smckusick * pager routine locked by object's lock 7345748Smckusick * 7445748Smckusick */ 7545748Smckusick 7645748Smckusick struct vm_object kernel_object_store; 7745748Smckusick struct vm_object kmem_object_store; 7845748Smckusick 7945748Smckusick #define VM_OBJECT_HASH_COUNT 157 8045748Smckusick 8165231Smckusick int vm_cache_max = 100; /* can patch if necessary */ 8265231Smckusick struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 8345748Smckusick 8445748Smckusick long object_collapses = 0; 8545748Smckusick long object_bypasses = 0; 8645748Smckusick 8753354Sbostic static void _vm_object_allocate __P((vm_size_t, vm_object_t)); 8853354Sbostic 8945748Smckusick /* 9045748Smckusick * vm_object_init: 9145748Smckusick * 9245748Smckusick * Initialize the VM objects module. 9345748Smckusick */ 94*68164Scgd void 95*68164Scgd vm_object_init(size) 9651770Smarc vm_size_t size; 9745748Smckusick { 9845748Smckusick register int i; 9945748Smckusick 10065231Smckusick TAILQ_INIT(&vm_object_cached_list); 10165231Smckusick TAILQ_INIT(&vm_object_list); 10245748Smckusick vm_object_count = 0; 10345748Smckusick simple_lock_init(&vm_cache_lock); 10445748Smckusick simple_lock_init(&vm_object_list_lock); 10545748Smckusick 10645748Smckusick for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 10765231Smckusick TAILQ_INIT(&vm_object_hashtable[i]); 10845748Smckusick 10945748Smckusick kernel_object = &kernel_object_store; 11051770Smarc _vm_object_allocate(size, kernel_object); 11145748Smckusick 11245748Smckusick kmem_object = &kmem_object_store; 11345748Smckusick _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); 11445748Smckusick } 11545748Smckusick 11645748Smckusick /* 11745748Smckusick * vm_object_allocate: 11845748Smckusick * 11945748Smckusick * Returns a new object with the given size. 12045748Smckusick */ 12145748Smckusick 122*68164Scgd vm_object_t 123*68164Scgd vm_object_allocate(size) 12445748Smckusick vm_size_t size; 12545748Smckusick { 12645748Smckusick register vm_object_t result; 12745748Smckusick 12845748Smckusick result = (vm_object_t) 12945748Smckusick malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 13045748Smckusick 13145748Smckusick _vm_object_allocate(size, result); 13245748Smckusick 13345748Smckusick return(result); 13445748Smckusick } 13545748Smckusick 13653354Sbostic static void 13745748Smckusick _vm_object_allocate(size, object) 13845748Smckusick vm_size_t size; 13945748Smckusick register vm_object_t object; 14045748Smckusick { 14165231Smckusick TAILQ_INIT(&object->memq); 14245748Smckusick vm_object_lock_init(object); 14345748Smckusick object->ref_count = 1; 14445748Smckusick object->resident_page_count = 0; 14545748Smckusick object->size = size; 14650917Smckusick object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 14745748Smckusick object->paging_in_progress = 0; 14848386Skarels object->copy = NULL; 14945748Smckusick 15045748Smckusick /* 15145748Smckusick * Object starts out read-write, with no pager. 15245748Smckusick */ 15345748Smckusick 15448386Skarels object->pager = NULL; 15545748Smckusick object->paging_offset = 0; 15648386Skarels object->shadow = NULL; 15745748Smckusick object->shadow_offset = (vm_offset_t) 0; 15845748Smckusick 15945748Smckusick simple_lock(&vm_object_list_lock); 16065231Smckusick TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 16145748Smckusick vm_object_count++; 16265688Shibler cnt.v_nzfod += atop(size); 16345748Smckusick simple_unlock(&vm_object_list_lock); 16445748Smckusick } 16545748Smckusick 16645748Smckusick /* 16745748Smckusick * vm_object_reference: 16845748Smckusick * 16945748Smckusick * Gets another reference to the given object. 17045748Smckusick */ 171*68164Scgd void 172*68164Scgd vm_object_reference(object) 17345748Smckusick register vm_object_t object; 17445748Smckusick { 17548386Skarels if (object == NULL) 17645748Smckusick return; 17745748Smckusick 17845748Smckusick vm_object_lock(object); 17945748Smckusick object->ref_count++; 18045748Smckusick vm_object_unlock(object); 18145748Smckusick } 18245748Smckusick 18345748Smckusick /* 18445748Smckusick * vm_object_deallocate: 18545748Smckusick * 18645748Smckusick * Release a reference to the specified object, 18745748Smckusick * gained either through a vm_object_allocate 18845748Smckusick * or a vm_object_reference call. When all references 18945748Smckusick * are gone, storage associated with this object 19045748Smckusick * may be relinquished. 19145748Smckusick * 19245748Smckusick * No object may be locked. 19345748Smckusick */ 194*68164Scgd void 195*68164Scgd vm_object_deallocate(object) 19645748Smckusick register vm_object_t object; 19745748Smckusick { 19845748Smckusick vm_object_t temp; 19945748Smckusick 20048386Skarels while (object != NULL) { 20145748Smckusick 20245748Smckusick /* 20345748Smckusick * The cache holds a reference (uncounted) to 20445748Smckusick * the object; we must lock it before removing 20545748Smckusick * the object. 20645748Smckusick */ 20745748Smckusick 20845748Smckusick vm_object_cache_lock(); 20945748Smckusick 21045748Smckusick /* 21145748Smckusick * Lose the reference 21245748Smckusick */ 21345748Smckusick vm_object_lock(object); 21445748Smckusick if (--(object->ref_count) != 0) { 21545748Smckusick 21645748Smckusick /* 21745748Smckusick * If there are still references, then 21845748Smckusick * we are done. 21945748Smckusick */ 22045748Smckusick vm_object_unlock(object); 22145748Smckusick vm_object_cache_unlock(); 22245748Smckusick return; 22345748Smckusick } 22445748Smckusick 22545748Smckusick /* 22645748Smckusick * See if this object can persist. If so, enter 22745748Smckusick * it in the cache, then deactivate all of its 22845748Smckusick * pages. 22945748Smckusick */ 23045748Smckusick 23150917Smckusick if (object->flags & OBJ_CANPERSIST) { 23245748Smckusick 23365231Smckusick TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 23465231Smckusick cached_list); 23545748Smckusick vm_object_cached++; 23645748Smckusick vm_object_cache_unlock(); 23745748Smckusick 23845748Smckusick vm_object_deactivate_pages(object); 23945748Smckusick vm_object_unlock(object); 24045748Smckusick 24145748Smckusick vm_object_cache_trim(); 24245748Smckusick return; 24345748Smckusick } 24445748Smckusick 24545748Smckusick /* 24645748Smckusick * Make sure no one can look us up now. 24745748Smckusick */ 24845748Smckusick vm_object_remove(object->pager); 24945748Smckusick vm_object_cache_unlock(); 25045748Smckusick 25145748Smckusick temp = object->shadow; 25245748Smckusick vm_object_terminate(object); 25345748Smckusick /* unlocks and deallocates object */ 25445748Smckusick object = temp; 25545748Smckusick } 25645748Smckusick } 25745748Smckusick 25845748Smckusick 25945748Smckusick /* 26045748Smckusick * vm_object_terminate actually destroys the specified object, freeing 26145748Smckusick * up all previously used resources. 26245748Smckusick * 26345748Smckusick * The object must be locked. 26445748Smckusick */ 265*68164Scgd void 266*68164Scgd vm_object_terminate(object) 26745748Smckusick register vm_object_t object; 26845748Smckusick { 26945748Smckusick register vm_page_t p; 27045748Smckusick vm_object_t shadow_object; 27145748Smckusick 27245748Smckusick /* 27345748Smckusick * Detach the object from its shadow if we are the shadow's 27445748Smckusick * copy. 27545748Smckusick */ 27648386Skarels if ((shadow_object = object->shadow) != NULL) { 27745748Smckusick vm_object_lock(shadow_object); 27845748Smckusick if (shadow_object->copy == object) 27948386Skarels shadow_object->copy = NULL; 28045748Smckusick #if 0 28148386Skarels else if (shadow_object->copy != NULL) 28245748Smckusick panic("vm_object_terminate: copy/shadow inconsistency"); 28345748Smckusick #endif 28445748Smckusick vm_object_unlock(shadow_object); 28545748Smckusick } 28645748Smckusick 28745748Smckusick /* 28851771Smarc * Wait until the pageout daemon is through with the object. 28945748Smckusick */ 29051771Smarc while (object->paging_in_progress) { 291*68164Scgd vm_object_sleep(object, object, FALSE); 29245748Smckusick vm_object_lock(object); 29345748Smckusick } 29445748Smckusick 29545748Smckusick /* 29651771Smarc * If not an internal object clean all the pages, removing them 29751771Smarc * from paging queues as we go. 29865688Shibler * 29965688Shibler * XXX need to do something in the event of a cleaning error. 30045748Smckusick */ 30151771Smarc if ((object->flags & OBJ_INTERNAL) == 0) { 30265688Shibler (void) vm_object_page_clean(object, 0, 0, TRUE, TRUE); 30351771Smarc vm_object_unlock(object); 30445748Smckusick } 30545748Smckusick 30645748Smckusick /* 30751771Smarc * Now free the pages. 30851771Smarc * For internal objects, this also removes them from paging queues. 30945748Smckusick */ 31065231Smckusick while ((p = object->memq.tqh_first) != NULL) { 31145748Smckusick VM_PAGE_CHECK(p); 31245748Smckusick vm_page_lock_queues(); 31345748Smckusick vm_page_free(p); 31465688Shibler cnt.v_pfree++; 31545748Smckusick vm_page_unlock_queues(); 31645748Smckusick } 31751771Smarc if ((object->flags & OBJ_INTERNAL) == 0) 31851771Smarc vm_object_unlock(object); 31945748Smckusick 32045748Smckusick /* 32151771Smarc * Let the pager know object is dead. 32245748Smckusick */ 32348386Skarels if (object->pager != NULL) 32445748Smckusick vm_pager_deallocate(object->pager); 32545748Smckusick 32645748Smckusick simple_lock(&vm_object_list_lock); 32765231Smckusick TAILQ_REMOVE(&vm_object_list, object, object_list); 32845748Smckusick vm_object_count--; 32945748Smckusick simple_unlock(&vm_object_list_lock); 33045748Smckusick 33145748Smckusick /* 33251771Smarc * Free the space for the object. 33345748Smckusick */ 33445748Smckusick free((caddr_t)object, M_VMOBJ); 33545748Smckusick } 33645748Smckusick 33745748Smckusick /* 33845748Smckusick * vm_object_page_clean 33945748Smckusick * 34045748Smckusick * Clean all dirty pages in the specified range of object. 34165688Shibler * If syncio is TRUE, page cleaning is done synchronously. 34252197Smarc * If de_queue is TRUE, pages are removed from any paging queue 34351771Smarc * they were on, otherwise they are left on whatever queue they 34451771Smarc * were on before the cleaning operation began. 34545748Smckusick * 34645748Smckusick * Odd semantics: if start == end, we clean everything. 34745748Smckusick * 34845748Smckusick * The object must be locked. 34965688Shibler * 35065688Shibler * Returns TRUE if all was well, FALSE if there was a pager error 35165688Shibler * somewhere. We attempt to clean (and dequeue) all pages regardless 35265688Shibler * of where an error occurs. 35345748Smckusick */ 35465688Shibler boolean_t 35565688Shibler vm_object_page_clean(object, start, end, syncio, de_queue) 35645748Smckusick register vm_object_t object; 35745748Smckusick register vm_offset_t start; 35845748Smckusick register vm_offset_t end; 35965688Shibler boolean_t syncio; 36052197Smarc boolean_t de_queue; 36145748Smckusick { 36245748Smckusick register vm_page_t p; 36351771Smarc int onqueue; 36465688Shibler boolean_t noerror = TRUE; 36545748Smckusick 36665688Shibler if (object == NULL) 36765688Shibler return (TRUE); 36865688Shibler 36965688Shibler /* 37065688Shibler * If it is an internal object and there is no pager, attempt to 37165688Shibler * allocate one. Note that vm_object_collapse may relocate one 37265688Shibler * from a collapsed object so we must recheck afterward. 37365688Shibler */ 37465688Shibler if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) { 37565688Shibler vm_object_collapse(object); 37665688Shibler if (object->pager == NULL) { 37765688Shibler vm_pager_t pager; 37865688Shibler 37965688Shibler vm_object_unlock(object); 38065688Shibler pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 38165688Shibler object->size, VM_PROT_ALL, 38265688Shibler (vm_offset_t)0); 38365688Shibler if (pager) 38465688Shibler vm_object_setpager(object, pager, 0, FALSE); 38565688Shibler vm_object_lock(object); 38665688Shibler } 38765688Shibler } 38848386Skarels if (object->pager == NULL) 38965688Shibler return (FALSE); 39045748Smckusick 39145748Smckusick again: 39251771Smarc /* 39351771Smarc * Wait until the pageout daemon is through with the object. 39451771Smarc */ 39551771Smarc while (object->paging_in_progress) { 396*68164Scgd vm_object_sleep(object, object, FALSE); 39751771Smarc vm_object_lock(object); 39851771Smarc } 39951771Smarc /* 40051771Smarc * Loop through the object page list cleaning as necessary. 40151771Smarc */ 40265231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 40365688Shibler if ((start == end || p->offset >= start && p->offset < end) && 40465688Shibler !(p->flags & PG_FICTITIOUS)) { 40556382Smckusick if ((p->flags & PG_CLEAN) && 40656382Smckusick pmap_is_modified(VM_PAGE_TO_PHYS(p))) 40756382Smckusick p->flags &= ~PG_CLEAN; 40851771Smarc /* 40951771Smarc * Remove the page from any paging queue. 41051771Smarc * This needs to be done if either we have been 41151771Smarc * explicitly asked to do so or it is about to 41251771Smarc * be cleaned (see comment below). 41351771Smarc */ 41456382Smckusick if (de_queue || !(p->flags & PG_CLEAN)) { 41551771Smarc vm_page_lock_queues(); 41656382Smckusick if (p->flags & PG_ACTIVE) { 41765231Smckusick TAILQ_REMOVE(&vm_page_queue_active, 41865231Smckusick p, pageq); 41956382Smckusick p->flags &= ~PG_ACTIVE; 42051771Smarc cnt.v_active_count--; 42151771Smarc onqueue = 1; 42256382Smckusick } else if (p->flags & PG_INACTIVE) { 42365231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, 42465231Smckusick p, pageq); 42556382Smckusick p->flags &= ~PG_INACTIVE; 42651771Smarc cnt.v_inactive_count--; 42751771Smarc onqueue = -1; 42851771Smarc } else 42951771Smarc onqueue = 0; 43051771Smarc vm_page_unlock_queues(); 43151771Smarc } 43251771Smarc /* 43351771Smarc * To ensure the state of the page doesn't change 43451771Smarc * during the clean operation we do two things. 43565688Shibler * First we set the busy bit and write-protect all 43665688Shibler * mappings to ensure that write accesses to the 43751771Smarc * page block (in vm_fault). Second, we remove 43851771Smarc * the page from any paging queue to foil the 43951771Smarc * pageout daemon (vm_pageout_scan). 44051771Smarc */ 44165688Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 44256382Smckusick if (!(p->flags & PG_CLEAN)) { 44356382Smckusick p->flags |= PG_BUSY; 44445748Smckusick object->paging_in_progress++; 44545748Smckusick vm_object_unlock(object); 44665688Shibler /* 44765688Shibler * XXX if put fails we mark the page as 44865688Shibler * clean to avoid an infinite loop. 44965688Shibler * Will loose changes to the page. 45065688Shibler */ 45165688Shibler if (vm_pager_put(object->pager, p, syncio)) { 45265688Shibler printf("%s: pager_put error\n", 45365688Shibler "vm_object_page_clean"); 45465688Shibler p->flags |= PG_CLEAN; 45565688Shibler noerror = FALSE; 45665688Shibler } 45745748Smckusick vm_object_lock(object); 45845748Smckusick object->paging_in_progress--; 45952197Smarc if (!de_queue && onqueue) { 46051771Smarc vm_page_lock_queues(); 46151771Smarc if (onqueue > 0) 46251771Smarc vm_page_activate(p); 46351771Smarc else 46451771Smarc vm_page_deactivate(p); 46551771Smarc vm_page_unlock_queues(); 46651771Smarc } 46756382Smckusick p->flags &= ~PG_BUSY; 46845748Smckusick PAGE_WAKEUP(p); 46945748Smckusick goto again; 47045748Smckusick } 47145748Smckusick } 47245748Smckusick } 47365688Shibler return (noerror); 47445748Smckusick } 47545748Smckusick 47645748Smckusick /* 47745748Smckusick * vm_object_deactivate_pages 47845748Smckusick * 47945748Smckusick * Deactivate all pages in the specified object. (Keep its pages 48045748Smckusick * in memory even though it is no longer referenced.) 48145748Smckusick * 48245748Smckusick * The object must be locked. 48345748Smckusick */ 48453354Sbostic void 48545748Smckusick vm_object_deactivate_pages(object) 48645748Smckusick register vm_object_t object; 48745748Smckusick { 48845748Smckusick register vm_page_t p, next; 48945748Smckusick 49065231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 49165231Smckusick next = p->listq.tqe_next; 49245748Smckusick vm_page_lock_queues(); 49345748Smckusick vm_page_deactivate(p); 49445748Smckusick vm_page_unlock_queues(); 49545748Smckusick } 49645748Smckusick } 49745748Smckusick 49845748Smckusick /* 49945748Smckusick * Trim the object cache to size. 50045748Smckusick */ 50153354Sbostic void 50245748Smckusick vm_object_cache_trim() 50345748Smckusick { 50445748Smckusick register vm_object_t object; 50545748Smckusick 50645748Smckusick vm_object_cache_lock(); 50745748Smckusick while (vm_object_cached > vm_cache_max) { 50865231Smckusick object = vm_object_cached_list.tqh_first; 50945748Smckusick vm_object_cache_unlock(); 51045748Smckusick 51145748Smckusick if (object != vm_object_lookup(object->pager)) 51245748Smckusick panic("vm_object_deactivate: I'm sooo confused."); 51345748Smckusick 51445748Smckusick pager_cache(object, FALSE); 51545748Smckusick 51645748Smckusick vm_object_cache_lock(); 51745748Smckusick } 51845748Smckusick vm_object_cache_unlock(); 51945748Smckusick } 52045748Smckusick 52145748Smckusick /* 52245748Smckusick * vm_object_pmap_copy: 52345748Smckusick * 52445748Smckusick * Makes all physical pages in the specified 52545748Smckusick * object range copy-on-write. No writeable 52645748Smckusick * references to these pages should remain. 52745748Smckusick * 52845748Smckusick * The object must *not* be locked. 52945748Smckusick */ 530*68164Scgd void 531*68164Scgd vm_object_pmap_copy(object, start, end) 53245748Smckusick register vm_object_t object; 53345748Smckusick register vm_offset_t start; 53445748Smckusick register vm_offset_t end; 53545748Smckusick { 53645748Smckusick register vm_page_t p; 53745748Smckusick 53848386Skarels if (object == NULL) 53945748Smckusick return; 54045748Smckusick 54145748Smckusick vm_object_lock(object); 54265231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 54345748Smckusick if ((start <= p->offset) && (p->offset < end)) { 54449292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 54556382Smckusick p->flags |= PG_COPYONWRITE; 54645748Smckusick } 54745748Smckusick } 54845748Smckusick vm_object_unlock(object); 54945748Smckusick } 55045748Smckusick 55145748Smckusick /* 55245748Smckusick * vm_object_pmap_remove: 55345748Smckusick * 55445748Smckusick * Removes all physical pages in the specified 55545748Smckusick * object range from all physical maps. 55645748Smckusick * 55745748Smckusick * The object must *not* be locked. 55845748Smckusick */ 559*68164Scgd void 560*68164Scgd vm_object_pmap_remove(object, start, end) 56145748Smckusick register vm_object_t object; 56245748Smckusick register vm_offset_t start; 56345748Smckusick register vm_offset_t end; 56445748Smckusick { 56545748Smckusick register vm_page_t p; 56645748Smckusick 56748386Skarels if (object == NULL) 56845748Smckusick return; 56945748Smckusick 57045748Smckusick vm_object_lock(object); 57165231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 57249292Shibler if ((start <= p->offset) && (p->offset < end)) 57349292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 57445748Smckusick vm_object_unlock(object); 57545748Smckusick } 57645748Smckusick 57745748Smckusick /* 57845748Smckusick * vm_object_copy: 57945748Smckusick * 58045748Smckusick * Create a new object which is a copy of an existing 58145748Smckusick * object, and mark all of the pages in the existing 58245748Smckusick * object 'copy-on-write'. The new object has one reference. 58345748Smckusick * Returns the new object. 58445748Smckusick * 58545748Smckusick * May defer the copy until later if the object is not backed 58645748Smckusick * up by a non-default pager. 58745748Smckusick */ 588*68164Scgd void 589*68164Scgd vm_object_copy(src_object, src_offset, size, 59045748Smckusick dst_object, dst_offset, src_needs_copy) 59145748Smckusick register vm_object_t src_object; 59245748Smckusick vm_offset_t src_offset; 59345748Smckusick vm_size_t size; 59445748Smckusick vm_object_t *dst_object; /* OUT */ 59545748Smckusick vm_offset_t *dst_offset; /* OUT */ 59645748Smckusick boolean_t *src_needs_copy; /* OUT */ 59745748Smckusick { 59845748Smckusick register vm_object_t new_copy; 59945748Smckusick register vm_object_t old_copy; 60045748Smckusick vm_offset_t new_start, new_end; 60145748Smckusick 60245748Smckusick register vm_page_t p; 60345748Smckusick 60448386Skarels if (src_object == NULL) { 60545748Smckusick /* 60645748Smckusick * Nothing to copy 60745748Smckusick */ 60848386Skarels *dst_object = NULL; 60945748Smckusick *dst_offset = 0; 61045748Smckusick *src_needs_copy = FALSE; 61145748Smckusick return; 61245748Smckusick } 61345748Smckusick 61445748Smckusick /* 61545748Smckusick * If the object's pager is null_pager or the 61645748Smckusick * default pager, we don't have to make a copy 61745748Smckusick * of it. Instead, we set the needs copy flag and 61845748Smckusick * make a shadow later. 61945748Smckusick */ 62045748Smckusick 62145748Smckusick vm_object_lock(src_object); 62248386Skarels if (src_object->pager == NULL || 62350917Smckusick (src_object->flags & OBJ_INTERNAL)) { 62445748Smckusick 62545748Smckusick /* 62645748Smckusick * Make another reference to the object 62745748Smckusick */ 62845748Smckusick src_object->ref_count++; 62945748Smckusick 63045748Smckusick /* 63145748Smckusick * Mark all of the pages copy-on-write. 63245748Smckusick */ 63365231Smckusick for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 63445748Smckusick if (src_offset <= p->offset && 63545748Smckusick p->offset < src_offset + size) 63656382Smckusick p->flags |= PG_COPYONWRITE; 63745748Smckusick vm_object_unlock(src_object); 63845748Smckusick 63945748Smckusick *dst_object = src_object; 64045748Smckusick *dst_offset = src_offset; 64145748Smckusick 64245748Smckusick /* 64345748Smckusick * Must make a shadow when write is desired 64445748Smckusick */ 64545748Smckusick *src_needs_copy = TRUE; 64645748Smckusick return; 64745748Smckusick } 64845748Smckusick 64945748Smckusick /* 65045748Smckusick * Try to collapse the object before copying it. 65145748Smckusick */ 65245748Smckusick vm_object_collapse(src_object); 65345748Smckusick 65445748Smckusick /* 65545748Smckusick * If the object has a pager, the pager wants to 65645748Smckusick * see all of the changes. We need a copy-object 65745748Smckusick * for the changed pages. 65845748Smckusick * 65945748Smckusick * If there is a copy-object, and it is empty, 66045748Smckusick * no changes have been made to the object since the 66145748Smckusick * copy-object was made. We can use the same copy- 66245748Smckusick * object. 66345748Smckusick */ 66445748Smckusick 66545748Smckusick Retry1: 66645748Smckusick old_copy = src_object->copy; 66748386Skarels if (old_copy != NULL) { 66845748Smckusick /* 66945748Smckusick * Try to get the locks (out of order) 67045748Smckusick */ 67145748Smckusick if (!vm_object_lock_try(old_copy)) { 67245748Smckusick vm_object_unlock(src_object); 67345748Smckusick 67445748Smckusick /* should spin a bit here... */ 67545748Smckusick vm_object_lock(src_object); 67645748Smckusick goto Retry1; 67745748Smckusick } 67845748Smckusick 67945748Smckusick if (old_copy->resident_page_count == 0 && 68048386Skarels old_copy->pager == NULL) { 68145748Smckusick /* 68245748Smckusick * Return another reference to 68345748Smckusick * the existing copy-object. 68445748Smckusick */ 68545748Smckusick old_copy->ref_count++; 68645748Smckusick vm_object_unlock(old_copy); 68745748Smckusick vm_object_unlock(src_object); 68845748Smckusick *dst_object = old_copy; 68945748Smckusick *dst_offset = src_offset; 69045748Smckusick *src_needs_copy = FALSE; 69145748Smckusick return; 69245748Smckusick } 69345748Smckusick vm_object_unlock(old_copy); 69445748Smckusick } 69545748Smckusick vm_object_unlock(src_object); 69645748Smckusick 69745748Smckusick /* 69845748Smckusick * If the object has a pager, the pager wants 69945748Smckusick * to see all of the changes. We must make 70045748Smckusick * a copy-object and put the changed pages there. 70145748Smckusick * 70245748Smckusick * The copy-object is always made large enough to 70345748Smckusick * completely shadow the original object, since 70445748Smckusick * it may have several users who want to shadow 70545748Smckusick * the original object at different points. 70645748Smckusick */ 70745748Smckusick 70845748Smckusick new_copy = vm_object_allocate(src_object->size); 70945748Smckusick 71045748Smckusick Retry2: 71145748Smckusick vm_object_lock(src_object); 71245748Smckusick /* 71345748Smckusick * Copy object may have changed while we were unlocked 71445748Smckusick */ 71545748Smckusick old_copy = src_object->copy; 71648386Skarels if (old_copy != NULL) { 71745748Smckusick /* 71845748Smckusick * Try to get the locks (out of order) 71945748Smckusick */ 72045748Smckusick if (!vm_object_lock_try(old_copy)) { 72145748Smckusick vm_object_unlock(src_object); 72245748Smckusick goto Retry2; 72345748Smckusick } 72445748Smckusick 72545748Smckusick /* 72645748Smckusick * Consistency check 72745748Smckusick */ 72845748Smckusick if (old_copy->shadow != src_object || 72945748Smckusick old_copy->shadow_offset != (vm_offset_t) 0) 73045748Smckusick panic("vm_object_copy: copy/shadow inconsistency"); 73145748Smckusick 73245748Smckusick /* 73345748Smckusick * Make the old copy-object shadow the new one. 73445748Smckusick * It will receive no more pages from the original 73545748Smckusick * object. 73645748Smckusick */ 73745748Smckusick 73845748Smckusick src_object->ref_count--; /* remove ref. from old_copy */ 73945748Smckusick old_copy->shadow = new_copy; 74045748Smckusick new_copy->ref_count++; /* locking not needed - we 74145748Smckusick have the only pointer */ 74245748Smckusick vm_object_unlock(old_copy); /* done with old_copy */ 74345748Smckusick } 74445748Smckusick 74545748Smckusick new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 74645748Smckusick new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 74745748Smckusick 74845748Smckusick /* 74945748Smckusick * Point the new copy at the existing object. 75045748Smckusick */ 75145748Smckusick 75245748Smckusick new_copy->shadow = src_object; 75345748Smckusick new_copy->shadow_offset = new_start; 75445748Smckusick src_object->ref_count++; 75545748Smckusick src_object->copy = new_copy; 75645748Smckusick 75745748Smckusick /* 75845748Smckusick * Mark all the affected pages of the existing object 75945748Smckusick * copy-on-write. 76045748Smckusick */ 76165231Smckusick for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 76249292Shibler if ((new_start <= p->offset) && (p->offset < new_end)) 76356382Smckusick p->flags |= PG_COPYONWRITE; 76445748Smckusick 76545748Smckusick vm_object_unlock(src_object); 76645748Smckusick 76745748Smckusick *dst_object = new_copy; 76845748Smckusick *dst_offset = src_offset - new_start; 76945748Smckusick *src_needs_copy = FALSE; 77045748Smckusick } 77145748Smckusick 77245748Smckusick /* 77345748Smckusick * vm_object_shadow: 77445748Smckusick * 77545748Smckusick * Create a new object which is backed by the 77645748Smckusick * specified existing object range. The source 77745748Smckusick * object reference is deallocated. 77845748Smckusick * 77945748Smckusick * The new object and offset into that object 78045748Smckusick * are returned in the source parameters. 78145748Smckusick */ 78245748Smckusick 783*68164Scgd void 784*68164Scgd vm_object_shadow(object, offset, length) 78545748Smckusick vm_object_t *object; /* IN/OUT */ 78645748Smckusick vm_offset_t *offset; /* IN/OUT */ 78745748Smckusick vm_size_t length; 78845748Smckusick { 78945748Smckusick register vm_object_t source; 79045748Smckusick register vm_object_t result; 79145748Smckusick 79245748Smckusick source = *object; 79345748Smckusick 79445748Smckusick /* 79545748Smckusick * Allocate a new object with the given length 79645748Smckusick */ 79745748Smckusick 79848386Skarels if ((result = vm_object_allocate(length)) == NULL) 79945748Smckusick panic("vm_object_shadow: no object for shadowing"); 80045748Smckusick 80145748Smckusick /* 80245748Smckusick * The new object shadows the source object, adding 80345748Smckusick * a reference to it. Our caller changes his reference 80445748Smckusick * to point to the new object, removing a reference to 80545748Smckusick * the source object. Net result: no change of reference 80645748Smckusick * count. 80745748Smckusick */ 80845748Smckusick result->shadow = source; 80945748Smckusick 81045748Smckusick /* 81145748Smckusick * Store the offset into the source object, 81245748Smckusick * and fix up the offset into the new object. 81345748Smckusick */ 81445748Smckusick 81545748Smckusick result->shadow_offset = *offset; 81645748Smckusick 81745748Smckusick /* 81845748Smckusick * Return the new things 81945748Smckusick */ 82045748Smckusick 82145748Smckusick *offset = 0; 82245748Smckusick *object = result; 82345748Smckusick } 82445748Smckusick 82545748Smckusick /* 82645748Smckusick * Set the specified object's pager to the specified pager. 82745748Smckusick */ 82845748Smckusick 829*68164Scgd void 830*68164Scgd vm_object_setpager(object, pager, paging_offset, 83145748Smckusick read_only) 83245748Smckusick vm_object_t object; 83345748Smckusick vm_pager_t pager; 83445748Smckusick vm_offset_t paging_offset; 83545748Smckusick boolean_t read_only; 83645748Smckusick { 83745748Smckusick #ifdef lint 83845748Smckusick read_only++; /* No longer used */ 83960345Storek #endif 84045748Smckusick 84145748Smckusick vm_object_lock(object); /* XXX ? */ 84245748Smckusick object->pager = pager; 84345748Smckusick object->paging_offset = paging_offset; 84445748Smckusick vm_object_unlock(object); /* XXX ? */ 84545748Smckusick } 84645748Smckusick 84745748Smckusick /* 84845748Smckusick * vm_object_hash hashes the pager/id pair. 84945748Smckusick */ 85045748Smckusick 85145748Smckusick #define vm_object_hash(pager) \ 852*68164Scgd (((unsigned long)pager)%VM_OBJECT_HASH_COUNT) 85345748Smckusick 85445748Smckusick /* 85545748Smckusick * vm_object_lookup looks in the object cache for an object with the 85645748Smckusick * specified pager and paging id. 85745748Smckusick */ 85845748Smckusick 859*68164Scgd vm_object_t 860*68164Scgd vm_object_lookup(pager) 86145748Smckusick vm_pager_t pager; 86245748Smckusick { 86345748Smckusick register vm_object_hash_entry_t entry; 86445748Smckusick vm_object_t object; 86545748Smckusick 86645748Smckusick vm_object_cache_lock(); 86745748Smckusick 86865231Smckusick for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 86965231Smckusick entry != NULL; 87065231Smckusick entry = entry->hash_links.tqe_next) { 87145748Smckusick object = entry->object; 87245748Smckusick if (object->pager == pager) { 87345748Smckusick vm_object_lock(object); 87445748Smckusick if (object->ref_count == 0) { 87565231Smckusick TAILQ_REMOVE(&vm_object_cached_list, object, 87665231Smckusick cached_list); 87745748Smckusick vm_object_cached--; 87845748Smckusick } 87945748Smckusick object->ref_count++; 88045748Smckusick vm_object_unlock(object); 88145748Smckusick vm_object_cache_unlock(); 88245748Smckusick return(object); 88345748Smckusick } 88445748Smckusick } 88545748Smckusick 88645748Smckusick vm_object_cache_unlock(); 88748386Skarels return(NULL); 88845748Smckusick } 88945748Smckusick 89045748Smckusick /* 89145748Smckusick * vm_object_enter enters the specified object/pager/id into 89245748Smckusick * the hash table. 89345748Smckusick */ 89445748Smckusick 895*68164Scgd void 896*68164Scgd vm_object_enter(object, pager) 89745748Smckusick vm_object_t object; 89845748Smckusick vm_pager_t pager; 89945748Smckusick { 90065231Smckusick struct vm_object_hash_head *bucket; 90145748Smckusick register vm_object_hash_entry_t entry; 90245748Smckusick 90345748Smckusick /* 90445748Smckusick * We don't cache null objects, and we can't cache 90545748Smckusick * objects with the null pager. 90645748Smckusick */ 90745748Smckusick 90848386Skarels if (object == NULL) 90945748Smckusick return; 91048386Skarels if (pager == NULL) 91145748Smckusick return; 91245748Smckusick 91345748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 91445748Smckusick entry = (vm_object_hash_entry_t) 91545748Smckusick malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 91645748Smckusick entry->object = object; 91750917Smckusick object->flags |= OBJ_CANPERSIST; 91845748Smckusick 91945748Smckusick vm_object_cache_lock(); 92065231Smckusick TAILQ_INSERT_TAIL(bucket, entry, hash_links); 92145748Smckusick vm_object_cache_unlock(); 92245748Smckusick } 92345748Smckusick 92445748Smckusick /* 92545748Smckusick * vm_object_remove: 92645748Smckusick * 92745748Smckusick * Remove the pager from the hash table. 92845748Smckusick * Note: This assumes that the object cache 92945748Smckusick * is locked. XXX this should be fixed 93045748Smckusick * by reorganizing vm_object_deallocate. 93145748Smckusick */ 93253354Sbostic void 93345748Smckusick vm_object_remove(pager) 93445748Smckusick register vm_pager_t pager; 93545748Smckusick { 93665231Smckusick struct vm_object_hash_head *bucket; 93745748Smckusick register vm_object_hash_entry_t entry; 93845748Smckusick register vm_object_t object; 93945748Smckusick 94045748Smckusick bucket = &vm_object_hashtable[vm_object_hash(pager)]; 94145748Smckusick 94265231Smckusick for (entry = bucket->tqh_first; 94365231Smckusick entry != NULL; 94465231Smckusick entry = entry->hash_links.tqe_next) { 94545748Smckusick object = entry->object; 94645748Smckusick if (object->pager == pager) { 94765231Smckusick TAILQ_REMOVE(bucket, entry, hash_links); 94845748Smckusick free((caddr_t)entry, M_VMOBJHASH); 94945748Smckusick break; 95045748Smckusick } 95145748Smckusick } 95245748Smckusick } 95345748Smckusick 95445748Smckusick /* 95545748Smckusick * vm_object_cache_clear removes all objects from the cache. 95645748Smckusick * 95745748Smckusick */ 958*68164Scgd void 959*68164Scgd vm_object_cache_clear() 96045748Smckusick { 96145748Smckusick register vm_object_t object; 96245748Smckusick 96345748Smckusick /* 96445748Smckusick * Remove each object in the cache by scanning down the 96545748Smckusick * list of cached objects. 96645748Smckusick */ 96745748Smckusick vm_object_cache_lock(); 96865231Smckusick while ((object = vm_object_cached_list.tqh_first) != NULL) { 96945748Smckusick vm_object_cache_unlock(); 97045748Smckusick 97145748Smckusick /* 97245748Smckusick * Note: it is important that we use vm_object_lookup 97345748Smckusick * to gain a reference, and not vm_object_reference, because 97445748Smckusick * the logic for removing an object from the cache lies in 97545748Smckusick * lookup. 97645748Smckusick */ 97745748Smckusick if (object != vm_object_lookup(object->pager)) 97845748Smckusick panic("vm_object_cache_clear: I'm sooo confused."); 97945748Smckusick pager_cache(object, FALSE); 98045748Smckusick 98145748Smckusick vm_object_cache_lock(); 98245748Smckusick } 98345748Smckusick vm_object_cache_unlock(); 98445748Smckusick } 98545748Smckusick 98645748Smckusick boolean_t vm_object_collapse_allowed = TRUE; 98745748Smckusick /* 98845748Smckusick * vm_object_collapse: 98945748Smckusick * 99045748Smckusick * Collapse an object with the object backing it. 99145748Smckusick * Pages in the backing object are moved into the 99245748Smckusick * parent, and the backing object is deallocated. 99345748Smckusick * 99445748Smckusick * Requires that the object be locked and the page 99545748Smckusick * queues be unlocked. 99645748Smckusick * 99745748Smckusick */ 998*68164Scgd void 999*68164Scgd vm_object_collapse(object) 100045748Smckusick register vm_object_t object; 100145748Smckusick 100245748Smckusick { 100345748Smckusick register vm_object_t backing_object; 100445748Smckusick register vm_offset_t backing_offset; 100545748Smckusick register vm_size_t size; 100645748Smckusick register vm_offset_t new_offset; 100745748Smckusick register vm_page_t p, pp; 100845748Smckusick 100945748Smckusick if (!vm_object_collapse_allowed) 101045748Smckusick return; 101145748Smckusick 101245748Smckusick while (TRUE) { 101345748Smckusick /* 101445748Smckusick * Verify that the conditions are right for collapse: 101545748Smckusick * 101645748Smckusick * The object exists and no pages in it are currently 101745748Smckusick * being paged out (or have ever been paged out). 101845748Smckusick */ 101948386Skarels if (object == NULL || 102045748Smckusick object->paging_in_progress != 0 || 102148386Skarels object->pager != NULL) 102245748Smckusick return; 102345748Smckusick 102445748Smckusick /* 102545748Smckusick * There is a backing object, and 102645748Smckusick */ 102745748Smckusick 102848386Skarels if ((backing_object = object->shadow) == NULL) 102945748Smckusick return; 103045748Smckusick 103145748Smckusick vm_object_lock(backing_object); 103245748Smckusick /* 103345748Smckusick * ... 103445748Smckusick * The backing object is not read_only, 103545748Smckusick * and no pages in the backing object are 103645748Smckusick * currently being paged out. 103745748Smckusick * The backing object is internal. 103845748Smckusick */ 103945748Smckusick 104050917Smckusick if ((backing_object->flags & OBJ_INTERNAL) == 0 || 104145748Smckusick backing_object->paging_in_progress != 0) { 104245748Smckusick vm_object_unlock(backing_object); 104345748Smckusick return; 104445748Smckusick } 104545748Smckusick 104645748Smckusick /* 104745748Smckusick * The backing object can't be a copy-object: 104845748Smckusick * the shadow_offset for the copy-object must stay 104945748Smckusick * as 0. Furthermore (for the 'we have all the 105045748Smckusick * pages' case), if we bypass backing_object and 105145748Smckusick * just shadow the next object in the chain, old 105245748Smckusick * pages from that object would then have to be copied 105345748Smckusick * BOTH into the (former) backing_object and into the 105445748Smckusick * parent object. 105545748Smckusick */ 105648386Skarels if (backing_object->shadow != NULL && 105748386Skarels backing_object->shadow->copy != NULL) { 105845748Smckusick vm_object_unlock(backing_object); 105945748Smckusick return; 106045748Smckusick } 106145748Smckusick 106245748Smckusick /* 106345748Smckusick * We know that we can either collapse the backing 106445748Smckusick * object (if the parent is the only reference to 106545748Smckusick * it) or (perhaps) remove the parent's reference 106645748Smckusick * to it. 106745748Smckusick */ 106845748Smckusick 106945748Smckusick backing_offset = object->shadow_offset; 107045748Smckusick size = object->size; 107145748Smckusick 107245748Smckusick /* 107345748Smckusick * If there is exactly one reference to the backing 107445748Smckusick * object, we can collapse it into the parent. 107545748Smckusick */ 107645748Smckusick 107745748Smckusick if (backing_object->ref_count == 1) { 107845748Smckusick 107945748Smckusick /* 108045748Smckusick * We can collapse the backing object. 108145748Smckusick * 108245748Smckusick * Move all in-memory pages from backing_object 108345748Smckusick * to the parent. Pages that have been paged out 108445748Smckusick * will be overwritten by any of the parent's 108545748Smckusick * pages that shadow them. 108645748Smckusick */ 108745748Smckusick 108865231Smckusick while ((p = backing_object->memq.tqh_first) != NULL) { 108945748Smckusick new_offset = (p->offset - backing_offset); 109045748Smckusick 109145748Smckusick /* 109245748Smckusick * If the parent has a page here, or if 109345748Smckusick * this page falls outside the parent, 109445748Smckusick * dispose of it. 109545748Smckusick * 109645748Smckusick * Otherwise, move it as planned. 109745748Smckusick */ 109845748Smckusick 109945748Smckusick if (p->offset < backing_offset || 110045748Smckusick new_offset >= size) { 110145748Smckusick vm_page_lock_queues(); 110245748Smckusick vm_page_free(p); 110345748Smckusick vm_page_unlock_queues(); 110445748Smckusick } else { 110545748Smckusick pp = vm_page_lookup(object, new_offset); 110656382Smckusick if (pp != NULL && !(pp->flags & PG_FAKE)) { 110745748Smckusick vm_page_lock_queues(); 110845748Smckusick vm_page_free(p); 110945748Smckusick vm_page_unlock_queues(); 111045748Smckusick } 111145748Smckusick else { 111245748Smckusick if (pp) { 111345748Smckusick /* may be someone waiting for it */ 111445748Smckusick PAGE_WAKEUP(pp); 111545748Smckusick vm_page_lock_queues(); 111645748Smckusick vm_page_free(pp); 111745748Smckusick vm_page_unlock_queues(); 111845748Smckusick } 111945748Smckusick vm_page_rename(p, object, new_offset); 112045748Smckusick } 112145748Smckusick } 112245748Smckusick } 112345748Smckusick 112445748Smckusick /* 112545748Smckusick * Move the pager from backing_object to object. 112645748Smckusick * 112745748Smckusick * XXX We're only using part of the paging space 112845748Smckusick * for keeps now... we ought to discard the 112945748Smckusick * unused portion. 113045748Smckusick */ 113145748Smckusick 113265688Shibler if (backing_object->pager) { 113365688Shibler object->pager = backing_object->pager; 113465688Shibler object->paging_offset = backing_offset + 113565688Shibler backing_object->paging_offset; 113665688Shibler backing_object->pager = NULL; 113765688Shibler } 113845748Smckusick 113945748Smckusick /* 114045748Smckusick * Object now shadows whatever backing_object did. 114145748Smckusick * Note that the reference to backing_object->shadow 114245748Smckusick * moves from within backing_object to within object. 114345748Smckusick */ 114445748Smckusick 114545748Smckusick object->shadow = backing_object->shadow; 114645748Smckusick object->shadow_offset += backing_object->shadow_offset; 114748386Skarels if (object->shadow != NULL && 114848386Skarels object->shadow->copy != NULL) { 114945748Smckusick panic("vm_object_collapse: we collapsed a copy-object!"); 115045748Smckusick } 115145748Smckusick /* 115245748Smckusick * Discard backing_object. 115345748Smckusick * 115445748Smckusick * Since the backing object has no pages, no 115545748Smckusick * pager left, and no object references within it, 115645748Smckusick * all that is necessary is to dispose of it. 115745748Smckusick */ 115845748Smckusick 115945748Smckusick vm_object_unlock(backing_object); 116045748Smckusick 116145748Smckusick simple_lock(&vm_object_list_lock); 116265231Smckusick TAILQ_REMOVE(&vm_object_list, backing_object, 116365231Smckusick object_list); 116445748Smckusick vm_object_count--; 116545748Smckusick simple_unlock(&vm_object_list_lock); 116645748Smckusick 116745748Smckusick free((caddr_t)backing_object, M_VMOBJ); 116845748Smckusick 116945748Smckusick object_collapses++; 117045748Smckusick } 117145748Smckusick else { 117245748Smckusick /* 117345748Smckusick * If all of the pages in the backing object are 117445748Smckusick * shadowed by the parent object, the parent 117545748Smckusick * object no longer has to shadow the backing 117645748Smckusick * object; it can shadow the next one in the 117745748Smckusick * chain. 117845748Smckusick * 117945748Smckusick * The backing object must not be paged out - we'd 118045748Smckusick * have to check all of the paged-out pages, as 118145748Smckusick * well. 118245748Smckusick */ 118345748Smckusick 118448386Skarels if (backing_object->pager != NULL) { 118545748Smckusick vm_object_unlock(backing_object); 118645748Smckusick return; 118745748Smckusick } 118845748Smckusick 118945748Smckusick /* 119045748Smckusick * Should have a check for a 'small' number 119145748Smckusick * of pages here. 119245748Smckusick */ 119345748Smckusick 119465231Smckusick for (p = backing_object->memq.tqh_first; 119565231Smckusick p != NULL; 119665231Smckusick p = p->listq.tqe_next) { 119745748Smckusick new_offset = (p->offset - backing_offset); 119845748Smckusick 119945748Smckusick /* 120045748Smckusick * If the parent has a page here, or if 120145748Smckusick * this page falls outside the parent, 120245748Smckusick * keep going. 120345748Smckusick * 120445748Smckusick * Otherwise, the backing_object must be 120545748Smckusick * left in the chain. 120645748Smckusick */ 120745748Smckusick 120845748Smckusick if (p->offset >= backing_offset && 120965688Shibler new_offset < size && 121045748Smckusick ((pp = vm_page_lookup(object, new_offset)) 121148386Skarels == NULL || 121256382Smckusick (pp->flags & PG_FAKE))) { 121345748Smckusick /* 121445748Smckusick * Page still needed. 121545748Smckusick * Can't go any further. 121645748Smckusick */ 121745748Smckusick vm_object_unlock(backing_object); 121845748Smckusick return; 121945748Smckusick } 122045748Smckusick } 122145748Smckusick 122245748Smckusick /* 122345748Smckusick * Make the parent shadow the next object 122445748Smckusick * in the chain. Deallocating backing_object 122545748Smckusick * will not remove it, since its reference 122645748Smckusick * count is at least 2. 122745748Smckusick */ 122845748Smckusick 122965688Shibler object->shadow = backing_object->shadow; 123065688Shibler vm_object_reference(object->shadow); 123145748Smckusick object->shadow_offset += backing_object->shadow_offset; 123245748Smckusick 123366426Shibler /* 123466426Shibler * Backing object might have had a copy pointer 123566426Shibler * to us. If it did, clear it. 123666426Shibler */ 123766426Shibler if (backing_object->copy == object) { 123866439Shibler backing_object->copy = NULL; 123966426Shibler } 124066426Shibler 124145748Smckusick /* Drop the reference count on backing_object. 124245748Smckusick * Since its ref_count was at least 2, it 124345748Smckusick * will not vanish; so we don't need to call 124445748Smckusick * vm_object_deallocate. 124545748Smckusick */ 124645748Smckusick backing_object->ref_count--; 124745748Smckusick vm_object_unlock(backing_object); 124845748Smckusick 124945748Smckusick object_bypasses ++; 125045748Smckusick 125145748Smckusick } 125245748Smckusick 125345748Smckusick /* 125445748Smckusick * Try again with this object's new backing object. 125545748Smckusick */ 125645748Smckusick } 125745748Smckusick } 125845748Smckusick 125945748Smckusick /* 126045748Smckusick * vm_object_page_remove: [internal] 126145748Smckusick * 126245748Smckusick * Removes all physical pages in the specified 126345748Smckusick * object range from the object's list of pages. 126445748Smckusick * 126545748Smckusick * The object must be locked. 126645748Smckusick */ 1267*68164Scgd void 1268*68164Scgd vm_object_page_remove(object, start, end) 126945748Smckusick register vm_object_t object; 127045748Smckusick register vm_offset_t start; 127145748Smckusick register vm_offset_t end; 127245748Smckusick { 127345748Smckusick register vm_page_t p, next; 127445748Smckusick 127548386Skarels if (object == NULL) 127645748Smckusick return; 127745748Smckusick 127865231Smckusick for (p = object->memq.tqh_first; p != NULL; p = next) { 127965231Smckusick next = p->listq.tqe_next; 128045748Smckusick if ((start <= p->offset) && (p->offset < end)) { 128149292Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 128245748Smckusick vm_page_lock_queues(); 128345748Smckusick vm_page_free(p); 128445748Smckusick vm_page_unlock_queues(); 128545748Smckusick } 128645748Smckusick } 128745748Smckusick } 128845748Smckusick 128945748Smckusick /* 129045748Smckusick * Routine: vm_object_coalesce 129145748Smckusick * Function: Coalesces two objects backing up adjoining 129245748Smckusick * regions of memory into a single object. 129345748Smckusick * 129445748Smckusick * returns TRUE if objects were combined. 129545748Smckusick * 129645748Smckusick * NOTE: Only works at the moment if the second object is NULL - 129745748Smckusick * if it's not, which object do we lock first? 129845748Smckusick * 129945748Smckusick * Parameters: 130045748Smckusick * prev_object First object to coalesce 130145748Smckusick * prev_offset Offset into prev_object 130245748Smckusick * next_object Second object into coalesce 130345748Smckusick * next_offset Offset into next_object 130445748Smckusick * 130545748Smckusick * prev_size Size of reference to prev_object 130645748Smckusick * next_size Size of reference to next_object 130745748Smckusick * 130845748Smckusick * Conditions: 130945748Smckusick * The object must *not* be locked. 131045748Smckusick */ 1311*68164Scgd boolean_t 1312*68164Scgd vm_object_coalesce(prev_object, next_object, 131345748Smckusick prev_offset, next_offset, 131445748Smckusick prev_size, next_size) 131545748Smckusick 131645748Smckusick register vm_object_t prev_object; 131745748Smckusick vm_object_t next_object; 131845748Smckusick vm_offset_t prev_offset, next_offset; 131945748Smckusick vm_size_t prev_size, next_size; 132045748Smckusick { 132145748Smckusick vm_size_t newsize; 132245748Smckusick 132345748Smckusick #ifdef lint 132445748Smckusick next_offset++; 132560345Storek #endif 132645748Smckusick 132748386Skarels if (next_object != NULL) { 132845748Smckusick return(FALSE); 132945748Smckusick } 133045748Smckusick 133148386Skarels if (prev_object == NULL) { 133245748Smckusick return(TRUE); 133345748Smckusick } 133445748Smckusick 133545748Smckusick vm_object_lock(prev_object); 133645748Smckusick 133745748Smckusick /* 133845748Smckusick * Try to collapse the object first 133945748Smckusick */ 134045748Smckusick vm_object_collapse(prev_object); 134145748Smckusick 134245748Smckusick /* 134345748Smckusick * Can't coalesce if: 134445748Smckusick * . more than one reference 134545748Smckusick * . paged out 134645748Smckusick * . shadows another object 134745748Smckusick * . has a copy elsewhere 134845748Smckusick * (any of which mean that the pages not mapped to 134945748Smckusick * prev_entry may be in use anyway) 135045748Smckusick */ 135145748Smckusick 135245748Smckusick if (prev_object->ref_count > 1 || 135348386Skarels prev_object->pager != NULL || 135448386Skarels prev_object->shadow != NULL || 135548386Skarels prev_object->copy != NULL) { 135645748Smckusick vm_object_unlock(prev_object); 135745748Smckusick return(FALSE); 135845748Smckusick } 135945748Smckusick 136045748Smckusick /* 136145748Smckusick * Remove any pages that may still be in the object from 136245748Smckusick * a previous deallocation. 136345748Smckusick */ 136445748Smckusick 136545748Smckusick vm_object_page_remove(prev_object, 136645748Smckusick prev_offset + prev_size, 136745748Smckusick prev_offset + prev_size + next_size); 136845748Smckusick 136945748Smckusick /* 137045748Smckusick * Extend the object if necessary. 137145748Smckusick */ 137245748Smckusick newsize = prev_offset + prev_size + next_size; 137345748Smckusick if (newsize > prev_object->size) 137445748Smckusick prev_object->size = newsize; 137545748Smckusick 137645748Smckusick vm_object_unlock(prev_object); 137745748Smckusick return(TRUE); 137845748Smckusick } 137945748Smckusick 138045748Smckusick /* 138145748Smckusick * vm_object_print: [ debug ] 138245748Smckusick */ 1383*68164Scgd void 1384*68164Scgd vm_object_print(object, full) 138545748Smckusick vm_object_t object; 138645748Smckusick boolean_t full; 138745748Smckusick { 138845748Smckusick register vm_page_t p; 138945748Smckusick extern indent; 139045748Smckusick 139145748Smckusick register int count; 139245748Smckusick 139348386Skarels if (object == NULL) 139445748Smckusick return; 139545748Smckusick 139645748Smckusick iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 139745748Smckusick (int) object, (int) object->size, 139845748Smckusick object->resident_page_count, object->ref_count); 139945748Smckusick printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 140045748Smckusick (int) object->pager, (int) object->paging_offset, 140145748Smckusick (int) object->shadow, (int) object->shadow_offset); 140245748Smckusick printf("cache: next=0x%x, prev=0x%x\n", 140365231Smckusick object->cached_list.tqe_next, object->cached_list.tqe_prev); 140445748Smckusick 140545748Smckusick if (!full) 140645748Smckusick return; 140745748Smckusick 140845748Smckusick indent += 2; 140945748Smckusick count = 0; 141065231Smckusick for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 141145748Smckusick if (count == 0) 141245748Smckusick iprintf("memory:="); 141345748Smckusick else if (count == 6) { 141445748Smckusick printf("\n"); 141545748Smckusick iprintf(" ..."); 141645748Smckusick count = 0; 141745748Smckusick } else 141845748Smckusick printf(","); 141945748Smckusick count++; 142045748Smckusick 142145748Smckusick printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 142245748Smckusick } 142345748Smckusick if (count != 0) 142445748Smckusick printf("\n"); 142545748Smckusick indent -= 2; 142645748Smckusick } 1427