145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*56382Smckusick * @(#)vm_page.c 7.14 (Berkeley) 10/01/92 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Resident memory management module. 4145748Smckusick */ 4245748Smckusick 4353327Sbostic #include <sys/param.h> 4453327Sbostic #include <sys/systm.h> 4545748Smckusick 4653327Sbostic #include <vm/vm.h> 4753327Sbostic #include <vm/vm_page.h> 4853327Sbostic #include <vm/vm_map.h> 4953327Sbostic #include <vm/vm_pageout.h> 5048386Skarels 5145748Smckusick /* 5245748Smckusick * Associated with page of user-allocatable memory is a 5345748Smckusick * page structure. 5445748Smckusick */ 5545748Smckusick 5645748Smckusick queue_head_t *vm_page_buckets; /* Array of buckets */ 5745748Smckusick int vm_page_bucket_count = 0; /* How big is array? */ 5845748Smckusick int vm_page_hash_mask; /* Mask for hash function */ 5945748Smckusick simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ 6045748Smckusick 6145748Smckusick queue_head_t vm_page_queue_free; 6245748Smckusick queue_head_t vm_page_queue_active; 6345748Smckusick queue_head_t vm_page_queue_inactive; 6445748Smckusick simple_lock_data_t vm_page_queue_lock; 6545748Smckusick simple_lock_data_t vm_page_queue_free_lock; 6645748Smckusick 6750936Swilliam /* has physical page allocation been initialized? */ 6850936Swilliam boolean_t vm_page_startup_initialized; 6950936Swilliam 7045748Smckusick vm_page_t vm_page_array; 7145748Smckusick long first_page; 7245748Smckusick long last_page; 7345748Smckusick vm_offset_t first_phys_addr; 7445748Smckusick vm_offset_t last_phys_addr; 7550555Smckusick vm_size_t page_mask; 7650555Smckusick int page_shift; 7745748Smckusick 7845748Smckusick /* 7945748Smckusick * vm_set_page_size: 8045748Smckusick * 8145748Smckusick * Sets the page size, perhaps based upon the memory 8245748Smckusick * size. Must be called before any use of page-size 8345748Smckusick * dependent functions. 8445748Smckusick * 8550910Smckusick * Sets page_shift and page_mask from cnt.v_page_size. 8645748Smckusick */ 8745748Smckusick void vm_set_page_size() 8845748Smckusick { 8945748Smckusick 9050910Smckusick if (cnt.v_page_size == 0) 9150910Smckusick cnt.v_page_size = DEFAULT_PAGE_SIZE; 9250910Smckusick page_mask = cnt.v_page_size - 1; 9350910Smckusick if ((page_mask & cnt.v_page_size) != 0) 9445748Smckusick panic("vm_set_page_size: page size not a power of two"); 9545748Smckusick for (page_shift = 0; ; page_shift++) 9650910Smckusick if ((1 << page_shift) == cnt.v_page_size) 9745748Smckusick break; 9845748Smckusick } 9945748Smckusick 10045748Smckusick 10145748Smckusick /* 10245748Smckusick * vm_page_startup: 10345748Smckusick * 10445748Smckusick * Initializes the resident memory module. 10545748Smckusick * 10645748Smckusick * Allocates memory for the page cells, and 10745748Smckusick * for the object/offset-to-page hash table headers. 10845748Smckusick * Each page cell is initialized and placed on the free list. 10945748Smckusick */ 11050936Swilliam void vm_page_startup(start, end) 11150936Swilliam vm_offset_t *start; 11250936Swilliam vm_offset_t *end; 11345748Smckusick { 11445748Smckusick register vm_page_t m; 11545748Smckusick register queue_t bucket; 11645748Smckusick vm_size_t npages; 11745748Smckusick int i; 11845748Smckusick vm_offset_t pa; 11945748Smckusick extern vm_offset_t kentry_data; 12045748Smckusick extern vm_size_t kentry_data_size; 12145748Smckusick 12245748Smckusick 12345748Smckusick /* 12445748Smckusick * Initialize the locks 12545748Smckusick */ 12645748Smckusick 12745748Smckusick simple_lock_init(&vm_page_queue_free_lock); 12845748Smckusick simple_lock_init(&vm_page_queue_lock); 12945748Smckusick 13045748Smckusick /* 13145748Smckusick * Initialize the queue headers for the free queue, 13245748Smckusick * the active queue and the inactive queue. 13345748Smckusick */ 13445748Smckusick 13545748Smckusick queue_init(&vm_page_queue_free); 13645748Smckusick queue_init(&vm_page_queue_active); 13745748Smckusick queue_init(&vm_page_queue_inactive); 13845748Smckusick 13945748Smckusick /* 14050936Swilliam * Calculate the number of hash table buckets. 14145748Smckusick * 14245748Smckusick * The number of buckets MUST BE a power of 2, and 14345748Smckusick * the actual value is the next power of 2 greater 14445748Smckusick * than the number of physical pages in the system. 14545748Smckusick * 14645748Smckusick * Note: 14745748Smckusick * This computation can be tweaked if desired. 14845748Smckusick */ 14945748Smckusick 15045748Smckusick if (vm_page_bucket_count == 0) { 15145748Smckusick vm_page_bucket_count = 1; 15250936Swilliam while (vm_page_bucket_count < atop(*end - *start)) 15345748Smckusick vm_page_bucket_count <<= 1; 15445748Smckusick } 15545748Smckusick 15645748Smckusick vm_page_hash_mask = vm_page_bucket_count - 1; 15745748Smckusick 15845748Smckusick /* 15950936Swilliam * Allocate (and initialize) the hash table buckets. 16045748Smckusick */ 16150936Swilliam vm_page_buckets = (queue_t) pmap_bootstrap_alloc(vm_page_bucket_count 16250936Swilliam * sizeof(struct queue_entry)); 16350936Swilliam bucket = vm_page_buckets; 16445748Smckusick 16545748Smckusick for (i = vm_page_bucket_count; i--;) { 16645748Smckusick queue_init(bucket); 16745748Smckusick bucket++; 16845748Smckusick } 16945748Smckusick 17045748Smckusick simple_lock_init(&bucket_lock); 17145748Smckusick 17245748Smckusick /* 17350936Swilliam * Truncate the remainder of physical memory to our page size. 17445748Smckusick */ 17545748Smckusick 17650936Swilliam *end = trunc_page(*end); 17745748Smckusick 17845748Smckusick /* 17945748Smckusick * Pre-allocate maps and map entries that cannot be dynamically 18045748Smckusick * allocated via malloc(). The maps include the kernel_map and 18145748Smckusick * kmem_map which must be initialized before malloc() will 18245748Smckusick * work (obviously). Also could include pager maps which would 18345748Smckusick * be allocated before kmeminit. 18445748Smckusick * 18545748Smckusick * Allow some kernel map entries... this should be plenty 18645748Smckusick * since people shouldn't be cluttering up the kernel 18745748Smckusick * map (they should use their own maps). 18845748Smckusick */ 18945748Smckusick 19045748Smckusick kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 19145748Smckusick MAX_KMAPENT * sizeof(struct vm_map_entry); 19250936Swilliam kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size); 19345748Smckusick 19445748Smckusick /* 19545748Smckusick * Compute the number of pages of memory that will be 19645748Smckusick * available for use (taking into account the overhead 19745748Smckusick * of a page structure per page). 19845748Smckusick */ 19945748Smckusick 20050910Smckusick cnt.v_free_count = npages = 20150936Swilliam (*end - *start)/(PAGE_SIZE + sizeof(struct vm_page)); 20245748Smckusick 20345748Smckusick /* 20450936Swilliam * Record the extent of physical memory that the 20550936Swilliam * virtual memory system manages. 20645748Smckusick */ 20745748Smckusick 20850936Swilliam first_page = *start; 20945748Smckusick first_page += npages*sizeof(struct vm_page); 21045748Smckusick first_page = atop(round_page(first_page)); 21145748Smckusick last_page = first_page + npages - 1; 21245748Smckusick 21345748Smckusick first_phys_addr = ptoa(first_page); 21445748Smckusick last_phys_addr = ptoa(last_page) + PAGE_MASK; 21545748Smckusick 21650852Swilliam 21745748Smckusick /* 21850936Swilliam * Allocate and clear the mem entry structures. 21945748Smckusick */ 22045748Smckusick 22150936Swilliam m = vm_page_array = (vm_page_t) 22250936Swilliam pmap_bootstrap_alloc(npages * sizeof(struct vm_page)); 22345748Smckusick 22445748Smckusick /* 22550936Swilliam * Initialize the mem entry structures now, and 22650936Swilliam * put them in the free queue. 22745748Smckusick */ 22845748Smckusick 22945748Smckusick pa = first_phys_addr; 23045748Smckusick while (npages--) { 231*56382Smckusick m->flags = 0; 23248386Skarels m->object = NULL; 23345748Smckusick m->phys_addr = pa; 23450852Swilliam #ifdef i386 23550852Swilliam if (pmap_isvalidphys(m->phys_addr)) { 23650852Swilliam queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); 23750852Swilliam } else { 23850852Swilliam /* perhaps iomem needs it's own type, or dev pager? */ 239*56382Smckusick m->flags |= PG_FICTITIOUS | PG_BUSY; 24050910Smckusick cnt.v_free_count--; 24150852Swilliam } 24250852Swilliam #else /* i386 */ 24345748Smckusick queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); 24450852Swilliam #endif /* i386 */ 24545748Smckusick m++; 24645748Smckusick pa += PAGE_SIZE; 24745748Smckusick } 24845748Smckusick 24945748Smckusick /* 25045748Smckusick * Initialize vm_pages_needed lock here - don't wait for pageout 25145748Smckusick * daemon XXX 25245748Smckusick */ 25345748Smckusick simple_lock_init(&vm_pages_needed_lock); 25445748Smckusick 25550936Swilliam /* from now on, pmap_bootstrap_alloc can't be used */ 25650936Swilliam vm_page_startup_initialized = TRUE; 25745748Smckusick } 25845748Smckusick 25945748Smckusick /* 26045748Smckusick * vm_page_hash: 26145748Smckusick * 26245748Smckusick * Distributes the object/offset key pair among hash buckets. 26345748Smckusick * 26445748Smckusick * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 26545748Smckusick */ 26645748Smckusick #define vm_page_hash(object, offset) \ 26745748Smckusick (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) 26845748Smckusick 26945748Smckusick /* 27045748Smckusick * vm_page_insert: [ internal use only ] 27145748Smckusick * 27245748Smckusick * Inserts the given mem entry into the object/object-page 27345748Smckusick * table and object list. 27445748Smckusick * 27545748Smckusick * The object and page must be locked. 27645748Smckusick */ 27745748Smckusick 27852639Smckusick void vm_page_insert(mem, object, offset) 27945748Smckusick register vm_page_t mem; 28045748Smckusick register vm_object_t object; 28145748Smckusick register vm_offset_t offset; 28245748Smckusick { 28345748Smckusick register queue_t bucket; 28445748Smckusick int spl; 28545748Smckusick 28645748Smckusick VM_PAGE_CHECK(mem); 28745748Smckusick 288*56382Smckusick if (mem->flags & PG_TABLED) 28945748Smckusick panic("vm_page_insert: already inserted"); 29045748Smckusick 29145748Smckusick /* 29245748Smckusick * Record the object/offset pair in this page 29345748Smckusick */ 29445748Smckusick 29545748Smckusick mem->object = object; 29645748Smckusick mem->offset = offset; 29745748Smckusick 29845748Smckusick /* 29945748Smckusick * Insert it into the object_object/offset hash table 30045748Smckusick */ 30145748Smckusick 30245748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 30345748Smckusick spl = splimp(); 30445748Smckusick simple_lock(&bucket_lock); 30545748Smckusick queue_enter(bucket, mem, vm_page_t, hashq); 30645748Smckusick simple_unlock(&bucket_lock); 30745748Smckusick (void) splx(spl); 30845748Smckusick 30945748Smckusick /* 31045748Smckusick * Now link into the object's list of backed pages. 31145748Smckusick */ 31245748Smckusick 31345748Smckusick queue_enter(&object->memq, mem, vm_page_t, listq); 314*56382Smckusick mem->flags |= PG_TABLED; 31545748Smckusick 31645748Smckusick /* 31745748Smckusick * And show that the object has one more resident 31845748Smckusick * page. 31945748Smckusick */ 32045748Smckusick 32145748Smckusick object->resident_page_count++; 32245748Smckusick } 32345748Smckusick 32445748Smckusick /* 32545748Smckusick * vm_page_remove: [ internal use only ] 32650936Swilliam * NOTE: used by device pager as well -wfj 32745748Smckusick * 32845748Smckusick * Removes the given mem entry from the object/offset-page 32945748Smckusick * table and the object page list. 33045748Smckusick * 33145748Smckusick * The object and page must be locked. 33245748Smckusick */ 33345748Smckusick 33445748Smckusick void vm_page_remove(mem) 33545748Smckusick register vm_page_t mem; 33645748Smckusick { 33745748Smckusick register queue_t bucket; 33845748Smckusick int spl; 33945748Smckusick 34045748Smckusick VM_PAGE_CHECK(mem); 34145748Smckusick 342*56382Smckusick if (!(mem->flags & PG_TABLED)) 34345748Smckusick return; 34445748Smckusick 34545748Smckusick /* 34645748Smckusick * Remove from the object_object/offset hash table 34745748Smckusick */ 34845748Smckusick 34945748Smckusick bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; 35045748Smckusick spl = splimp(); 35145748Smckusick simple_lock(&bucket_lock); 35245748Smckusick queue_remove(bucket, mem, vm_page_t, hashq); 35345748Smckusick simple_unlock(&bucket_lock); 35445748Smckusick (void) splx(spl); 35545748Smckusick 35645748Smckusick /* 35745748Smckusick * Now remove from the object's list of backed pages. 35845748Smckusick */ 35945748Smckusick 36045748Smckusick queue_remove(&mem->object->memq, mem, vm_page_t, listq); 36145748Smckusick 36245748Smckusick /* 36345748Smckusick * And show that the object has one fewer resident 36445748Smckusick * page. 36545748Smckusick */ 36645748Smckusick 36745748Smckusick mem->object->resident_page_count--; 36845748Smckusick 369*56382Smckusick mem->flags &= ~PG_TABLED; 37045748Smckusick } 37145748Smckusick 37245748Smckusick /* 37345748Smckusick * vm_page_lookup: 37445748Smckusick * 37545748Smckusick * Returns the page associated with the object/offset 37648386Skarels * pair specified; if none is found, NULL is returned. 37745748Smckusick * 37845748Smckusick * The object must be locked. No side effects. 37945748Smckusick */ 38045748Smckusick 38145748Smckusick vm_page_t vm_page_lookup(object, offset) 38245748Smckusick register vm_object_t object; 38345748Smckusick register vm_offset_t offset; 38445748Smckusick { 38545748Smckusick register vm_page_t mem; 38645748Smckusick register queue_t bucket; 38745748Smckusick int spl; 38845748Smckusick 38945748Smckusick /* 39045748Smckusick * Search the hash table for this object/offset pair 39145748Smckusick */ 39245748Smckusick 39345748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 39445748Smckusick 39545748Smckusick spl = splimp(); 39645748Smckusick simple_lock(&bucket_lock); 39745748Smckusick mem = (vm_page_t) queue_first(bucket); 39845748Smckusick while (!queue_end(bucket, (queue_entry_t) mem)) { 39945748Smckusick VM_PAGE_CHECK(mem); 40045748Smckusick if ((mem->object == object) && (mem->offset == offset)) { 40145748Smckusick simple_unlock(&bucket_lock); 40245748Smckusick splx(spl); 40345748Smckusick return(mem); 40445748Smckusick } 40545748Smckusick mem = (vm_page_t) queue_next(&mem->hashq); 40645748Smckusick } 40745748Smckusick 40845748Smckusick simple_unlock(&bucket_lock); 40945748Smckusick splx(spl); 41048386Skarels return(NULL); 41145748Smckusick } 41245748Smckusick 41345748Smckusick /* 41445748Smckusick * vm_page_rename: 41545748Smckusick * 41645748Smckusick * Move the given memory entry from its 41745748Smckusick * current object to the specified target object/offset. 41845748Smckusick * 41945748Smckusick * The object must be locked. 42045748Smckusick */ 42145748Smckusick void vm_page_rename(mem, new_object, new_offset) 42245748Smckusick register vm_page_t mem; 42345748Smckusick register vm_object_t new_object; 42445748Smckusick vm_offset_t new_offset; 42545748Smckusick { 42645748Smckusick if (mem->object == new_object) 42745748Smckusick return; 42845748Smckusick 42945748Smckusick vm_page_lock_queues(); /* keep page from moving out from 43045748Smckusick under pageout daemon */ 43145748Smckusick vm_page_remove(mem); 43245748Smckusick vm_page_insert(mem, new_object, new_offset); 43345748Smckusick vm_page_unlock_queues(); 43445748Smckusick } 43545748Smckusick 43645748Smckusick /* 43745748Smckusick * vm_page_alloc: 43845748Smckusick * 43945748Smckusick * Allocate and return a memory cell associated 44045748Smckusick * with this VM object/offset pair. 44145748Smckusick * 44245748Smckusick * Object must be locked. 44345748Smckusick */ 44445748Smckusick vm_page_t vm_page_alloc(object, offset) 44545748Smckusick vm_object_t object; 44645748Smckusick vm_offset_t offset; 44745748Smckusick { 44845748Smckusick register vm_page_t mem; 44945748Smckusick int spl; 45045748Smckusick 45145748Smckusick spl = splimp(); /* XXX */ 45245748Smckusick simple_lock(&vm_page_queue_free_lock); 45345748Smckusick if (queue_empty(&vm_page_queue_free)) { 45445748Smckusick simple_unlock(&vm_page_queue_free_lock); 45545748Smckusick splx(spl); 45648386Skarels return(NULL); 45745748Smckusick } 45845748Smckusick 45945748Smckusick queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); 46045748Smckusick 46150910Smckusick cnt.v_free_count--; 46245748Smckusick simple_unlock(&vm_page_queue_free_lock); 46345748Smckusick splx(spl); 46445748Smckusick 46552594Storek VM_PAGE_INIT(mem, object, offset); 46645748Smckusick 46745748Smckusick /* 46845748Smckusick * Decide if we should poke the pageout daemon. 46945748Smckusick * We do this if the free count is less than the low 47045748Smckusick * water mark, or if the free count is less than the high 47145748Smckusick * water mark (but above the low water mark) and the inactive 47245748Smckusick * count is less than its target. 47345748Smckusick * 47445748Smckusick * We don't have the counts locked ... if they change a little, 47545748Smckusick * it doesn't really matter. 47645748Smckusick */ 47745748Smckusick 47852594Storek if (cnt.v_free_count < cnt.v_free_min || 47952594Storek (cnt.v_free_count < cnt.v_free_target && 48052594Storek cnt.v_inactive_count < cnt.v_inactive_target)) 48150855Smckusick thread_wakeup((int)&vm_pages_needed); 48252594Storek return (mem); 48345748Smckusick } 48445748Smckusick 48545748Smckusick /* 48645748Smckusick * vm_page_free: 48745748Smckusick * 48845748Smckusick * Returns the given page to the free list, 48945748Smckusick * disassociating it with any VM object. 49045748Smckusick * 49145748Smckusick * Object and page must be locked prior to entry. 49245748Smckusick */ 49345748Smckusick void vm_page_free(mem) 49445748Smckusick register vm_page_t mem; 49545748Smckusick { 49645748Smckusick vm_page_remove(mem); 497*56382Smckusick if (mem->flags & PG_ACTIVE) { 49845748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); 499*56382Smckusick mem->flags &= ~PG_ACTIVE; 50050910Smckusick cnt.v_active_count--; 50145748Smckusick } 50245748Smckusick 503*56382Smckusick if (mem->flags & PG_INACTIVE) { 50445748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); 505*56382Smckusick mem->flags &= ~PG_INACTIVE; 50650910Smckusick cnt.v_inactive_count--; 50745748Smckusick } 50845748Smckusick 509*56382Smckusick if (!(mem->flags & PG_FICTITIOUS)) { 51045748Smckusick int spl; 51145748Smckusick 51245748Smckusick spl = splimp(); 51345748Smckusick simple_lock(&vm_page_queue_free_lock); 51445748Smckusick queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); 51545748Smckusick 51650910Smckusick cnt.v_free_count++; 51745748Smckusick simple_unlock(&vm_page_queue_free_lock); 51845748Smckusick splx(spl); 51945748Smckusick } 52045748Smckusick } 52145748Smckusick 52245748Smckusick /* 52345748Smckusick * vm_page_wire: 52445748Smckusick * 52545748Smckusick * Mark this page as wired down by yet 52645748Smckusick * another map, removing it from paging queues 52745748Smckusick * as necessary. 52845748Smckusick * 52945748Smckusick * The page queues must be locked. 53045748Smckusick */ 53145748Smckusick void vm_page_wire(mem) 53245748Smckusick register vm_page_t mem; 53345748Smckusick { 53445748Smckusick VM_PAGE_CHECK(mem); 53545748Smckusick 53645748Smckusick if (mem->wire_count == 0) { 537*56382Smckusick if (mem->flags & PG_ACTIVE) { 53845748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, 53945748Smckusick pageq); 54050910Smckusick cnt.v_active_count--; 541*56382Smckusick mem->flags &= ~PG_ACTIVE; 54245748Smckusick } 543*56382Smckusick if (mem->flags & PG_INACTIVE) { 54445748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, 54545748Smckusick pageq); 54650910Smckusick cnt.v_inactive_count--; 547*56382Smckusick mem->flags &= ~PG_INACTIVE; 54845748Smckusick } 54950910Smckusick cnt.v_wire_count++; 55045748Smckusick } 55145748Smckusick mem->wire_count++; 55245748Smckusick } 55345748Smckusick 55445748Smckusick /* 55545748Smckusick * vm_page_unwire: 55645748Smckusick * 55745748Smckusick * Release one wiring of this page, potentially 55845748Smckusick * enabling it to be paged again. 55945748Smckusick * 56045748Smckusick * The page queues must be locked. 56145748Smckusick */ 56245748Smckusick void vm_page_unwire(mem) 56345748Smckusick register vm_page_t mem; 56445748Smckusick { 56545748Smckusick VM_PAGE_CHECK(mem); 56645748Smckusick 56745748Smckusick mem->wire_count--; 56845748Smckusick if (mem->wire_count == 0) { 56945748Smckusick queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); 57050910Smckusick cnt.v_active_count++; 571*56382Smckusick mem->flags |= PG_ACTIVE; 57250910Smckusick cnt.v_wire_count--; 57345748Smckusick } 57445748Smckusick } 57545748Smckusick 57645748Smckusick /* 57745748Smckusick * vm_page_deactivate: 57845748Smckusick * 57945748Smckusick * Returns the given page to the inactive list, 58045748Smckusick * indicating that no physical maps have access 58145748Smckusick * to this page. [Used by the physical mapping system.] 58245748Smckusick * 58345748Smckusick * The page queues must be locked. 58445748Smckusick */ 58545748Smckusick void vm_page_deactivate(m) 58645748Smckusick register vm_page_t m; 58745748Smckusick { 58845748Smckusick VM_PAGE_CHECK(m); 58945748Smckusick 59045748Smckusick /* 59145748Smckusick * Only move active pages -- ignore locked or already 59245748Smckusick * inactive ones. 59345748Smckusick */ 59445748Smckusick 595*56382Smckusick if (m->flags & PG_ACTIVE) { 59645748Smckusick pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 59745748Smckusick queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); 59845748Smckusick queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); 599*56382Smckusick m->flags &= ~PG_ACTIVE; 600*56382Smckusick m->flags |= PG_INACTIVE; 60150910Smckusick cnt.v_active_count--; 60250910Smckusick cnt.v_inactive_count++; 60345748Smckusick if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) 604*56382Smckusick m->flags &= ~PG_CLEAN; 605*56382Smckusick if (m->flags & PG_CLEAN) 606*56382Smckusick m->flags &= ~PG_LAUNDRY; 607*56382Smckusick else 608*56382Smckusick m->flags |= PG_LAUNDRY; 60945748Smckusick } 61045748Smckusick } 61145748Smckusick 61245748Smckusick /* 61345748Smckusick * vm_page_activate: 61445748Smckusick * 61545748Smckusick * Put the specified page on the active list (if appropriate). 61645748Smckusick * 61745748Smckusick * The page queues must be locked. 61845748Smckusick */ 61945748Smckusick 62045748Smckusick void vm_page_activate(m) 62145748Smckusick register vm_page_t m; 62245748Smckusick { 62345748Smckusick VM_PAGE_CHECK(m); 62445748Smckusick 625*56382Smckusick if (m->flags & PG_INACTIVE) { 62645748Smckusick queue_remove(&vm_page_queue_inactive, m, vm_page_t, 62745748Smckusick pageq); 62850910Smckusick cnt.v_inactive_count--; 629*56382Smckusick m->flags &= ~PG_INACTIVE; 63045748Smckusick } 63145748Smckusick if (m->wire_count == 0) { 632*56382Smckusick if (m->flags & PG_ACTIVE) 63345748Smckusick panic("vm_page_activate: already active"); 63445748Smckusick 63545748Smckusick queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); 636*56382Smckusick m->flags |= PG_ACTIVE; 63750910Smckusick cnt.v_active_count++; 63845748Smckusick } 63945748Smckusick } 64045748Smckusick 64145748Smckusick /* 64245748Smckusick * vm_page_zero_fill: 64345748Smckusick * 64445748Smckusick * Zero-fill the specified page. 64545748Smckusick * Written as a standard pagein routine, to 64645748Smckusick * be used by the zero-fill object. 64745748Smckusick */ 64845748Smckusick 64945748Smckusick boolean_t vm_page_zero_fill(m) 65045748Smckusick vm_page_t m; 65145748Smckusick { 65245748Smckusick VM_PAGE_CHECK(m); 65345748Smckusick 654*56382Smckusick m->flags &= ~PG_CLEAN; 65545748Smckusick pmap_zero_page(VM_PAGE_TO_PHYS(m)); 65645748Smckusick return(TRUE); 65745748Smckusick } 65845748Smckusick 65945748Smckusick /* 66045748Smckusick * vm_page_copy: 66145748Smckusick * 66245748Smckusick * Copy one page to another 66345748Smckusick */ 66445748Smckusick 66545748Smckusick void vm_page_copy(src_m, dest_m) 66645748Smckusick vm_page_t src_m; 66745748Smckusick vm_page_t dest_m; 66845748Smckusick { 66945748Smckusick VM_PAGE_CHECK(src_m); 67045748Smckusick VM_PAGE_CHECK(dest_m); 67145748Smckusick 672*56382Smckusick dest_m->flags &= ~PG_CLEAN; 67345748Smckusick pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 67445748Smckusick } 675