145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*65231Smckusick * @(#)vm_page.c 8.2 (Berkeley) 12/30/93 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Resident memory management module. 4145748Smckusick */ 4245748Smckusick 4353327Sbostic #include <sys/param.h> 4453327Sbostic #include <sys/systm.h> 4545748Smckusick 4653327Sbostic #include <vm/vm.h> 4753327Sbostic #include <vm/vm_page.h> 4853327Sbostic #include <vm/vm_map.h> 4953327Sbostic #include <vm/vm_pageout.h> 5048386Skarels 5145748Smckusick /* 5245748Smckusick * Associated with page of user-allocatable memory is a 5345748Smckusick * page structure. 5445748Smckusick */ 5545748Smckusick 56*65231Smckusick struct pglist *vm_page_buckets; /* Array of buckets */ 5745748Smckusick int vm_page_bucket_count = 0; /* How big is array? */ 5845748Smckusick int vm_page_hash_mask; /* Mask for hash function */ 5945748Smckusick simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ 6045748Smckusick 61*65231Smckusick struct pglist vm_page_queue_free; 62*65231Smckusick struct pglist vm_page_queue_active; 63*65231Smckusick struct pglist vm_page_queue_inactive; 6445748Smckusick simple_lock_data_t vm_page_queue_lock; 6545748Smckusick simple_lock_data_t vm_page_queue_free_lock; 6645748Smckusick 6750936Swilliam /* has physical page allocation been initialized? */ 6850936Swilliam boolean_t vm_page_startup_initialized; 6950936Swilliam 7045748Smckusick vm_page_t vm_page_array; 7145748Smckusick long first_page; 7245748Smckusick long last_page; 7345748Smckusick vm_offset_t first_phys_addr; 7445748Smckusick vm_offset_t last_phys_addr; 7550555Smckusick vm_size_t page_mask; 7650555Smckusick int page_shift; 7745748Smckusick 7845748Smckusick /* 7945748Smckusick * vm_set_page_size: 8045748Smckusick * 8145748Smckusick * Sets the page size, perhaps based upon the memory 8245748Smckusick * size. Must be called before any use of page-size 8345748Smckusick * dependent functions. 8445748Smckusick * 8550910Smckusick * Sets page_shift and page_mask from cnt.v_page_size. 8645748Smckusick */ 8745748Smckusick void vm_set_page_size() 8845748Smckusick { 8945748Smckusick 9050910Smckusick if (cnt.v_page_size == 0) 9150910Smckusick cnt.v_page_size = DEFAULT_PAGE_SIZE; 9250910Smckusick page_mask = cnt.v_page_size - 1; 9350910Smckusick if ((page_mask & cnt.v_page_size) != 0) 9445748Smckusick panic("vm_set_page_size: page size not a power of two"); 9545748Smckusick for (page_shift = 0; ; page_shift++) 9650910Smckusick if ((1 << page_shift) == cnt.v_page_size) 9745748Smckusick break; 9845748Smckusick } 9945748Smckusick 10045748Smckusick 10145748Smckusick /* 10245748Smckusick * vm_page_startup: 10345748Smckusick * 10445748Smckusick * Initializes the resident memory module. 10545748Smckusick * 10645748Smckusick * Allocates memory for the page cells, and 10745748Smckusick * for the object/offset-to-page hash table headers. 10845748Smckusick * Each page cell is initialized and placed on the free list. 10945748Smckusick */ 11050936Swilliam void vm_page_startup(start, end) 11150936Swilliam vm_offset_t *start; 11250936Swilliam vm_offset_t *end; 11345748Smckusick { 11445748Smckusick register vm_page_t m; 115*65231Smckusick register struct pglist *bucket; 11645748Smckusick vm_size_t npages; 11745748Smckusick int i; 11845748Smckusick vm_offset_t pa; 11945748Smckusick extern vm_offset_t kentry_data; 12045748Smckusick extern vm_size_t kentry_data_size; 12145748Smckusick 12245748Smckusick 12345748Smckusick /* 12445748Smckusick * Initialize the locks 12545748Smckusick */ 12645748Smckusick 12745748Smckusick simple_lock_init(&vm_page_queue_free_lock); 12845748Smckusick simple_lock_init(&vm_page_queue_lock); 12945748Smckusick 13045748Smckusick /* 13145748Smckusick * Initialize the queue headers for the free queue, 13245748Smckusick * the active queue and the inactive queue. 13345748Smckusick */ 13445748Smckusick 135*65231Smckusick TAILQ_INIT(&vm_page_queue_free); 136*65231Smckusick TAILQ_INIT(&vm_page_queue_active); 137*65231Smckusick TAILQ_INIT(&vm_page_queue_inactive); 13845748Smckusick 13945748Smckusick /* 14050936Swilliam * Calculate the number of hash table buckets. 14145748Smckusick * 14245748Smckusick * The number of buckets MUST BE a power of 2, and 14345748Smckusick * the actual value is the next power of 2 greater 14445748Smckusick * than the number of physical pages in the system. 14545748Smckusick * 14645748Smckusick * Note: 14745748Smckusick * This computation can be tweaked if desired. 14845748Smckusick */ 14945748Smckusick 15045748Smckusick if (vm_page_bucket_count == 0) { 15145748Smckusick vm_page_bucket_count = 1; 15250936Swilliam while (vm_page_bucket_count < atop(*end - *start)) 15345748Smckusick vm_page_bucket_count <<= 1; 15445748Smckusick } 15545748Smckusick 15645748Smckusick vm_page_hash_mask = vm_page_bucket_count - 1; 15745748Smckusick 15845748Smckusick /* 15950936Swilliam * Allocate (and initialize) the hash table buckets. 16045748Smckusick */ 161*65231Smckusick vm_page_buckets = (struct pglist *) 162*65231Smckusick pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist)); 16350936Swilliam bucket = vm_page_buckets; 16445748Smckusick 16545748Smckusick for (i = vm_page_bucket_count; i--;) { 166*65231Smckusick TAILQ_INIT(bucket); 16745748Smckusick bucket++; 16845748Smckusick } 16945748Smckusick 17045748Smckusick simple_lock_init(&bucket_lock); 17145748Smckusick 17245748Smckusick /* 17350936Swilliam * Truncate the remainder of physical memory to our page size. 17445748Smckusick */ 17545748Smckusick 17650936Swilliam *end = trunc_page(*end); 17745748Smckusick 17845748Smckusick /* 17945748Smckusick * Pre-allocate maps and map entries that cannot be dynamically 18045748Smckusick * allocated via malloc(). The maps include the kernel_map and 18145748Smckusick * kmem_map which must be initialized before malloc() will 18245748Smckusick * work (obviously). Also could include pager maps which would 18345748Smckusick * be allocated before kmeminit. 18445748Smckusick * 18545748Smckusick * Allow some kernel map entries... this should be plenty 18645748Smckusick * since people shouldn't be cluttering up the kernel 18745748Smckusick * map (they should use their own maps). 18845748Smckusick */ 18945748Smckusick 19045748Smckusick kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 19145748Smckusick MAX_KMAPENT * sizeof(struct vm_map_entry); 19250936Swilliam kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size); 19345748Smckusick 19445748Smckusick /* 19545748Smckusick * Compute the number of pages of memory that will be 19645748Smckusick * available for use (taking into account the overhead 19745748Smckusick * of a page structure per page). 19845748Smckusick */ 19945748Smckusick 20057914Smckusick cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page)) 20157914Smckusick / (PAGE_SIZE + sizeof(struct vm_page)); 20245748Smckusick 20345748Smckusick /* 20450936Swilliam * Record the extent of physical memory that the 20550936Swilliam * virtual memory system manages. 20645748Smckusick */ 20745748Smckusick 20850936Swilliam first_page = *start; 20945748Smckusick first_page += npages*sizeof(struct vm_page); 21045748Smckusick first_page = atop(round_page(first_page)); 21145748Smckusick last_page = first_page + npages - 1; 21245748Smckusick 21345748Smckusick first_phys_addr = ptoa(first_page); 21445748Smckusick last_phys_addr = ptoa(last_page) + PAGE_MASK; 21545748Smckusick 21650852Swilliam 21745748Smckusick /* 21850936Swilliam * Allocate and clear the mem entry structures. 21945748Smckusick */ 22045748Smckusick 22150936Swilliam m = vm_page_array = (vm_page_t) 22250936Swilliam pmap_bootstrap_alloc(npages * sizeof(struct vm_page)); 22345748Smckusick 22445748Smckusick /* 22550936Swilliam * Initialize the mem entry structures now, and 22650936Swilliam * put them in the free queue. 22745748Smckusick */ 22845748Smckusick 22945748Smckusick pa = first_phys_addr; 23045748Smckusick while (npages--) { 23156382Smckusick m->flags = 0; 23248386Skarels m->object = NULL; 23345748Smckusick m->phys_addr = pa; 23450852Swilliam #ifdef i386 23550852Swilliam if (pmap_isvalidphys(m->phys_addr)) { 236*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq); 23750852Swilliam } else { 23850852Swilliam /* perhaps iomem needs it's own type, or dev pager? */ 23956382Smckusick m->flags |= PG_FICTITIOUS | PG_BUSY; 24050910Smckusick cnt.v_free_count--; 24150852Swilliam } 24250852Swilliam #else /* i386 */ 243*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq); 24450852Swilliam #endif /* i386 */ 24545748Smckusick m++; 24645748Smckusick pa += PAGE_SIZE; 24745748Smckusick } 24845748Smckusick 24945748Smckusick /* 25045748Smckusick * Initialize vm_pages_needed lock here - don't wait for pageout 25145748Smckusick * daemon XXX 25245748Smckusick */ 25345748Smckusick simple_lock_init(&vm_pages_needed_lock); 25445748Smckusick 25550936Swilliam /* from now on, pmap_bootstrap_alloc can't be used */ 25650936Swilliam vm_page_startup_initialized = TRUE; 25745748Smckusick } 25845748Smckusick 25945748Smckusick /* 26045748Smckusick * vm_page_hash: 26145748Smckusick * 26245748Smckusick * Distributes the object/offset key pair among hash buckets. 26345748Smckusick * 26445748Smckusick * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 26545748Smckusick */ 26645748Smckusick #define vm_page_hash(object, offset) \ 26745748Smckusick (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) 26845748Smckusick 26945748Smckusick /* 27045748Smckusick * vm_page_insert: [ internal use only ] 27145748Smckusick * 27245748Smckusick * Inserts the given mem entry into the object/object-page 27345748Smckusick * table and object list. 27445748Smckusick * 27545748Smckusick * The object and page must be locked. 27645748Smckusick */ 27745748Smckusick 27852639Smckusick void vm_page_insert(mem, object, offset) 27945748Smckusick register vm_page_t mem; 28045748Smckusick register vm_object_t object; 28145748Smckusick register vm_offset_t offset; 28245748Smckusick { 283*65231Smckusick register struct pglist *bucket; 28445748Smckusick int spl; 28545748Smckusick 28645748Smckusick VM_PAGE_CHECK(mem); 28745748Smckusick 28856382Smckusick if (mem->flags & PG_TABLED) 28945748Smckusick panic("vm_page_insert: already inserted"); 29045748Smckusick 29145748Smckusick /* 29245748Smckusick * Record the object/offset pair in this page 29345748Smckusick */ 29445748Smckusick 29545748Smckusick mem->object = object; 29645748Smckusick mem->offset = offset; 29745748Smckusick 29845748Smckusick /* 29945748Smckusick * Insert it into the object_object/offset hash table 30045748Smckusick */ 30145748Smckusick 30245748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 30345748Smckusick spl = splimp(); 30445748Smckusick simple_lock(&bucket_lock); 305*65231Smckusick TAILQ_INSERT_TAIL(bucket, mem, hashq); 30645748Smckusick simple_unlock(&bucket_lock); 30745748Smckusick (void) splx(spl); 30845748Smckusick 30945748Smckusick /* 31045748Smckusick * Now link into the object's list of backed pages. 31145748Smckusick */ 31245748Smckusick 313*65231Smckusick TAILQ_INSERT_TAIL(&object->memq, mem, listq); 31456382Smckusick mem->flags |= PG_TABLED; 31545748Smckusick 31645748Smckusick /* 31745748Smckusick * And show that the object has one more resident 31845748Smckusick * page. 31945748Smckusick */ 32045748Smckusick 32145748Smckusick object->resident_page_count++; 32245748Smckusick } 32345748Smckusick 32445748Smckusick /* 32545748Smckusick * vm_page_remove: [ internal use only ] 32650936Swilliam * NOTE: used by device pager as well -wfj 32745748Smckusick * 32845748Smckusick * Removes the given mem entry from the object/offset-page 32945748Smckusick * table and the object page list. 33045748Smckusick * 33145748Smckusick * The object and page must be locked. 33245748Smckusick */ 33345748Smckusick 33445748Smckusick void vm_page_remove(mem) 33545748Smckusick register vm_page_t mem; 33645748Smckusick { 337*65231Smckusick register struct pglist *bucket; 33845748Smckusick int spl; 33945748Smckusick 34045748Smckusick VM_PAGE_CHECK(mem); 34145748Smckusick 34256382Smckusick if (!(mem->flags & PG_TABLED)) 34345748Smckusick return; 34445748Smckusick 34545748Smckusick /* 34645748Smckusick * Remove from the object_object/offset hash table 34745748Smckusick */ 34845748Smckusick 34945748Smckusick bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; 35045748Smckusick spl = splimp(); 35145748Smckusick simple_lock(&bucket_lock); 352*65231Smckusick TAILQ_REMOVE(bucket, mem, hashq); 35345748Smckusick simple_unlock(&bucket_lock); 35445748Smckusick (void) splx(spl); 35545748Smckusick 35645748Smckusick /* 35745748Smckusick * Now remove from the object's list of backed pages. 35845748Smckusick */ 35945748Smckusick 360*65231Smckusick TAILQ_REMOVE(&mem->object->memq, mem, listq); 36145748Smckusick 36245748Smckusick /* 36345748Smckusick * And show that the object has one fewer resident 36445748Smckusick * page. 36545748Smckusick */ 36645748Smckusick 36745748Smckusick mem->object->resident_page_count--; 36845748Smckusick 36956382Smckusick mem->flags &= ~PG_TABLED; 37045748Smckusick } 37145748Smckusick 37245748Smckusick /* 37345748Smckusick * vm_page_lookup: 37445748Smckusick * 37545748Smckusick * Returns the page associated with the object/offset 37648386Skarels * pair specified; if none is found, NULL is returned. 37745748Smckusick * 37845748Smckusick * The object must be locked. No side effects. 37945748Smckusick */ 38045748Smckusick 38145748Smckusick vm_page_t vm_page_lookup(object, offset) 38245748Smckusick register vm_object_t object; 38345748Smckusick register vm_offset_t offset; 38445748Smckusick { 38545748Smckusick register vm_page_t mem; 386*65231Smckusick register struct pglist *bucket; 38745748Smckusick int spl; 38845748Smckusick 38945748Smckusick /* 39045748Smckusick * Search the hash table for this object/offset pair 39145748Smckusick */ 39245748Smckusick 39345748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 39445748Smckusick 39545748Smckusick spl = splimp(); 39645748Smckusick simple_lock(&bucket_lock); 397*65231Smckusick for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) { 39845748Smckusick VM_PAGE_CHECK(mem); 39945748Smckusick if ((mem->object == object) && (mem->offset == offset)) { 40045748Smckusick simple_unlock(&bucket_lock); 40145748Smckusick splx(spl); 40245748Smckusick return(mem); 40345748Smckusick } 40445748Smckusick } 40545748Smckusick 40645748Smckusick simple_unlock(&bucket_lock); 40745748Smckusick splx(spl); 40848386Skarels return(NULL); 40945748Smckusick } 41045748Smckusick 41145748Smckusick /* 41245748Smckusick * vm_page_rename: 41345748Smckusick * 41445748Smckusick * Move the given memory entry from its 41545748Smckusick * current object to the specified target object/offset. 41645748Smckusick * 41745748Smckusick * The object must be locked. 41845748Smckusick */ 41945748Smckusick void vm_page_rename(mem, new_object, new_offset) 42045748Smckusick register vm_page_t mem; 42145748Smckusick register vm_object_t new_object; 42245748Smckusick vm_offset_t new_offset; 42345748Smckusick { 42445748Smckusick if (mem->object == new_object) 42545748Smckusick return; 42645748Smckusick 42745748Smckusick vm_page_lock_queues(); /* keep page from moving out from 42845748Smckusick under pageout daemon */ 42945748Smckusick vm_page_remove(mem); 43045748Smckusick vm_page_insert(mem, new_object, new_offset); 43145748Smckusick vm_page_unlock_queues(); 43245748Smckusick } 43345748Smckusick 43445748Smckusick /* 43545748Smckusick * vm_page_alloc: 43645748Smckusick * 43745748Smckusick * Allocate and return a memory cell associated 43845748Smckusick * with this VM object/offset pair. 43945748Smckusick * 44045748Smckusick * Object must be locked. 44145748Smckusick */ 44245748Smckusick vm_page_t vm_page_alloc(object, offset) 44345748Smckusick vm_object_t object; 44445748Smckusick vm_offset_t offset; 44545748Smckusick { 44645748Smckusick register vm_page_t mem; 44745748Smckusick int spl; 44845748Smckusick 44945748Smckusick spl = splimp(); /* XXX */ 45045748Smckusick simple_lock(&vm_page_queue_free_lock); 451*65231Smckusick if (vm_page_queue_free.tqh_first == NULL) { 45245748Smckusick simple_unlock(&vm_page_queue_free_lock); 45345748Smckusick splx(spl); 45448386Skarels return(NULL); 45545748Smckusick } 45645748Smckusick 457*65231Smckusick mem = vm_page_queue_free.tqh_first; 458*65231Smckusick TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); 45945748Smckusick 46050910Smckusick cnt.v_free_count--; 46145748Smckusick simple_unlock(&vm_page_queue_free_lock); 46245748Smckusick splx(spl); 46345748Smckusick 46452594Storek VM_PAGE_INIT(mem, object, offset); 46545748Smckusick 46645748Smckusick /* 46745748Smckusick * Decide if we should poke the pageout daemon. 46845748Smckusick * We do this if the free count is less than the low 46945748Smckusick * water mark, or if the free count is less than the high 47045748Smckusick * water mark (but above the low water mark) and the inactive 47145748Smckusick * count is less than its target. 47245748Smckusick * 47345748Smckusick * We don't have the counts locked ... if they change a little, 47445748Smckusick * it doesn't really matter. 47545748Smckusick */ 47645748Smckusick 47752594Storek if (cnt.v_free_count < cnt.v_free_min || 47852594Storek (cnt.v_free_count < cnt.v_free_target && 47952594Storek cnt.v_inactive_count < cnt.v_inactive_target)) 48050855Smckusick thread_wakeup((int)&vm_pages_needed); 48152594Storek return (mem); 48245748Smckusick } 48345748Smckusick 48445748Smckusick /* 48545748Smckusick * vm_page_free: 48645748Smckusick * 48745748Smckusick * Returns the given page to the free list, 48845748Smckusick * disassociating it with any VM object. 48945748Smckusick * 49045748Smckusick * Object and page must be locked prior to entry. 49145748Smckusick */ 49245748Smckusick void vm_page_free(mem) 49345748Smckusick register vm_page_t mem; 49445748Smckusick { 49545748Smckusick vm_page_remove(mem); 49656382Smckusick if (mem->flags & PG_ACTIVE) { 497*65231Smckusick TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); 49856382Smckusick mem->flags &= ~PG_ACTIVE; 49950910Smckusick cnt.v_active_count--; 50045748Smckusick } 50145748Smckusick 50256382Smckusick if (mem->flags & PG_INACTIVE) { 503*65231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); 50456382Smckusick mem->flags &= ~PG_INACTIVE; 50550910Smckusick cnt.v_inactive_count--; 50645748Smckusick } 50745748Smckusick 50856382Smckusick if (!(mem->flags & PG_FICTITIOUS)) { 50945748Smckusick int spl; 51045748Smckusick 51145748Smckusick spl = splimp(); 51245748Smckusick simple_lock(&vm_page_queue_free_lock); 513*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq); 51445748Smckusick 51550910Smckusick cnt.v_free_count++; 51645748Smckusick simple_unlock(&vm_page_queue_free_lock); 51745748Smckusick splx(spl); 51845748Smckusick } 51945748Smckusick } 52045748Smckusick 52145748Smckusick /* 52245748Smckusick * vm_page_wire: 52345748Smckusick * 52445748Smckusick * Mark this page as wired down by yet 52545748Smckusick * another map, removing it from paging queues 52645748Smckusick * as necessary. 52745748Smckusick * 52845748Smckusick * The page queues must be locked. 52945748Smckusick */ 53045748Smckusick void vm_page_wire(mem) 53145748Smckusick register vm_page_t mem; 53245748Smckusick { 53345748Smckusick VM_PAGE_CHECK(mem); 53445748Smckusick 53545748Smckusick if (mem->wire_count == 0) { 53656382Smckusick if (mem->flags & PG_ACTIVE) { 537*65231Smckusick TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); 53850910Smckusick cnt.v_active_count--; 53956382Smckusick mem->flags &= ~PG_ACTIVE; 54045748Smckusick } 54156382Smckusick if (mem->flags & PG_INACTIVE) { 542*65231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); 54350910Smckusick cnt.v_inactive_count--; 54456382Smckusick mem->flags &= ~PG_INACTIVE; 54545748Smckusick } 54650910Smckusick cnt.v_wire_count++; 54745748Smckusick } 54845748Smckusick mem->wire_count++; 54945748Smckusick } 55045748Smckusick 55145748Smckusick /* 55245748Smckusick * vm_page_unwire: 55345748Smckusick * 55445748Smckusick * Release one wiring of this page, potentially 55545748Smckusick * enabling it to be paged again. 55645748Smckusick * 55745748Smckusick * The page queues must be locked. 55845748Smckusick */ 55945748Smckusick void vm_page_unwire(mem) 56045748Smckusick register vm_page_t mem; 56145748Smckusick { 56245748Smckusick VM_PAGE_CHECK(mem); 56345748Smckusick 56445748Smckusick mem->wire_count--; 56545748Smckusick if (mem->wire_count == 0) { 566*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq); 56750910Smckusick cnt.v_active_count++; 56856382Smckusick mem->flags |= PG_ACTIVE; 56950910Smckusick cnt.v_wire_count--; 57045748Smckusick } 57145748Smckusick } 57245748Smckusick 57345748Smckusick /* 57445748Smckusick * vm_page_deactivate: 57545748Smckusick * 57645748Smckusick * Returns the given page to the inactive list, 57745748Smckusick * indicating that no physical maps have access 57845748Smckusick * to this page. [Used by the physical mapping system.] 57945748Smckusick * 58045748Smckusick * The page queues must be locked. 58145748Smckusick */ 58245748Smckusick void vm_page_deactivate(m) 58345748Smckusick register vm_page_t m; 58445748Smckusick { 58545748Smckusick VM_PAGE_CHECK(m); 58645748Smckusick 58745748Smckusick /* 58845748Smckusick * Only move active pages -- ignore locked or already 58945748Smckusick * inactive ones. 59045748Smckusick */ 59145748Smckusick 59256382Smckusick if (m->flags & PG_ACTIVE) { 59345748Smckusick pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 594*65231Smckusick TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 595*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 59656382Smckusick m->flags &= ~PG_ACTIVE; 59756382Smckusick m->flags |= PG_INACTIVE; 59850910Smckusick cnt.v_active_count--; 59950910Smckusick cnt.v_inactive_count++; 60045748Smckusick if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) 60156382Smckusick m->flags &= ~PG_CLEAN; 60256382Smckusick if (m->flags & PG_CLEAN) 60356382Smckusick m->flags &= ~PG_LAUNDRY; 60456382Smckusick else 60556382Smckusick m->flags |= PG_LAUNDRY; 60645748Smckusick } 60745748Smckusick } 60845748Smckusick 60945748Smckusick /* 61045748Smckusick * vm_page_activate: 61145748Smckusick * 61245748Smckusick * Put the specified page on the active list (if appropriate). 61345748Smckusick * 61445748Smckusick * The page queues must be locked. 61545748Smckusick */ 61645748Smckusick 61745748Smckusick void vm_page_activate(m) 61845748Smckusick register vm_page_t m; 61945748Smckusick { 62045748Smckusick VM_PAGE_CHECK(m); 62145748Smckusick 62256382Smckusick if (m->flags & PG_INACTIVE) { 623*65231Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 62450910Smckusick cnt.v_inactive_count--; 62556382Smckusick m->flags &= ~PG_INACTIVE; 62645748Smckusick } 62745748Smckusick if (m->wire_count == 0) { 62856382Smckusick if (m->flags & PG_ACTIVE) 62945748Smckusick panic("vm_page_activate: already active"); 63045748Smckusick 631*65231Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 63256382Smckusick m->flags |= PG_ACTIVE; 63350910Smckusick cnt.v_active_count++; 63445748Smckusick } 63545748Smckusick } 63645748Smckusick 63745748Smckusick /* 63845748Smckusick * vm_page_zero_fill: 63945748Smckusick * 64045748Smckusick * Zero-fill the specified page. 64145748Smckusick * Written as a standard pagein routine, to 64245748Smckusick * be used by the zero-fill object. 64345748Smckusick */ 64445748Smckusick 64545748Smckusick boolean_t vm_page_zero_fill(m) 64645748Smckusick vm_page_t m; 64745748Smckusick { 64845748Smckusick VM_PAGE_CHECK(m); 64945748Smckusick 65056382Smckusick m->flags &= ~PG_CLEAN; 65145748Smckusick pmap_zero_page(VM_PAGE_TO_PHYS(m)); 65245748Smckusick return(TRUE); 65345748Smckusick } 65445748Smckusick 65545748Smckusick /* 65645748Smckusick * vm_page_copy: 65745748Smckusick * 65845748Smckusick * Copy one page to another 65945748Smckusick */ 66045748Smckusick 66145748Smckusick void vm_page_copy(src_m, dest_m) 66245748Smckusick vm_page_t src_m; 66345748Smckusick vm_page_t dest_m; 66445748Smckusick { 66545748Smckusick VM_PAGE_CHECK(src_m); 66645748Smckusick VM_PAGE_CHECK(dest_m); 66745748Smckusick 66856382Smckusick dest_m->flags &= ~PG_CLEAN; 66945748Smckusick pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 67045748Smckusick } 671