145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*50936Swilliam * @(#)vm_page.c 7.9 (Berkeley) 08/29/91 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Resident memory management module. 4145748Smckusick */ 4245748Smckusick 4348386Skarels #include "param.h" 4445748Smckusick 4548386Skarels #include "vm.h" 4648386Skarels #include "vm_map.h" 4748386Skarels #include "vm_page.h" 4848386Skarels #include "vm_pageout.h" 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Associated with page of user-allocatable memory is a 5245748Smckusick * page structure. 5345748Smckusick */ 5445748Smckusick 5545748Smckusick queue_head_t *vm_page_buckets; /* Array of buckets */ 5645748Smckusick int vm_page_bucket_count = 0; /* How big is array? */ 5745748Smckusick int vm_page_hash_mask; /* Mask for hash function */ 5845748Smckusick simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ 5945748Smckusick 6045748Smckusick queue_head_t vm_page_queue_free; 6145748Smckusick queue_head_t vm_page_queue_active; 6245748Smckusick queue_head_t vm_page_queue_inactive; 6345748Smckusick simple_lock_data_t vm_page_queue_lock; 6445748Smckusick simple_lock_data_t vm_page_queue_free_lock; 6545748Smckusick 66*50936Swilliam /* has physical page allocation been initialized? */ 67*50936Swilliam boolean_t vm_page_startup_initialized; 68*50936Swilliam 6945748Smckusick vm_page_t vm_page_array; 7045748Smckusick long first_page; 7145748Smckusick long last_page; 7245748Smckusick vm_offset_t first_phys_addr; 7345748Smckusick vm_offset_t last_phys_addr; 7450555Smckusick vm_size_t page_mask; 7550555Smckusick int page_shift; 7645748Smckusick 7745748Smckusick /* 7845748Smckusick * vm_set_page_size: 7945748Smckusick * 8045748Smckusick * Sets the page size, perhaps based upon the memory 8145748Smckusick * size. Must be called before any use of page-size 8245748Smckusick * dependent functions. 8345748Smckusick * 8450910Smckusick * Sets page_shift and page_mask from cnt.v_page_size. 8545748Smckusick */ 8645748Smckusick void vm_set_page_size() 8745748Smckusick { 8845748Smckusick 8950910Smckusick if (cnt.v_page_size == 0) 9050910Smckusick cnt.v_page_size = DEFAULT_PAGE_SIZE; 9150910Smckusick page_mask = cnt.v_page_size - 1; 9250910Smckusick if ((page_mask & cnt.v_page_size) != 0) 9345748Smckusick panic("vm_set_page_size: page size not a power of two"); 9445748Smckusick for (page_shift = 0; ; page_shift++) 9550910Smckusick if ((1 << page_shift) == cnt.v_page_size) 9645748Smckusick break; 9745748Smckusick } 9845748Smckusick 9945748Smckusick 10045748Smckusick /* 10145748Smckusick * vm_page_startup: 10245748Smckusick * 10345748Smckusick * Initializes the resident memory module. 10445748Smckusick * 10545748Smckusick * Allocates memory for the page cells, and 10645748Smckusick * for the object/offset-to-page hash table headers. 10745748Smckusick * Each page cell is initialized and placed on the free list. 10845748Smckusick */ 109*50936Swilliam void vm_page_startup(start, end) 110*50936Swilliam vm_offset_t *start; 111*50936Swilliam vm_offset_t *end; 11245748Smckusick { 11345748Smckusick register vm_page_t m; 11445748Smckusick register queue_t bucket; 11545748Smckusick vm_size_t npages; 11645748Smckusick int i; 11745748Smckusick vm_offset_t pa; 11845748Smckusick extern vm_offset_t kentry_data; 11945748Smckusick extern vm_size_t kentry_data_size; 12045748Smckusick 12145748Smckusick 12245748Smckusick /* 12345748Smckusick * Initialize the locks 12445748Smckusick */ 12545748Smckusick 12645748Smckusick simple_lock_init(&vm_page_queue_free_lock); 12745748Smckusick simple_lock_init(&vm_page_queue_lock); 12845748Smckusick 12945748Smckusick /* 13045748Smckusick * Initialize the queue headers for the free queue, 13145748Smckusick * the active queue and the inactive queue. 13245748Smckusick */ 13345748Smckusick 13445748Smckusick queue_init(&vm_page_queue_free); 13545748Smckusick queue_init(&vm_page_queue_active); 13645748Smckusick queue_init(&vm_page_queue_inactive); 13745748Smckusick 13845748Smckusick /* 139*50936Swilliam * Calculate the number of hash table buckets. 14045748Smckusick * 14145748Smckusick * The number of buckets MUST BE a power of 2, and 14245748Smckusick * the actual value is the next power of 2 greater 14345748Smckusick * than the number of physical pages in the system. 14445748Smckusick * 14545748Smckusick * Note: 14645748Smckusick * This computation can be tweaked if desired. 14745748Smckusick */ 14845748Smckusick 14945748Smckusick if (vm_page_bucket_count == 0) { 15045748Smckusick vm_page_bucket_count = 1; 151*50936Swilliam while (vm_page_bucket_count < atop(*end - *start)) 15245748Smckusick vm_page_bucket_count <<= 1; 15345748Smckusick } 15445748Smckusick 15545748Smckusick vm_page_hash_mask = vm_page_bucket_count - 1; 15645748Smckusick 15745748Smckusick /* 158*50936Swilliam * Allocate (and initialize) the hash table buckets. 15945748Smckusick */ 160*50936Swilliam vm_page_buckets = (queue_t) pmap_bootstrap_alloc(vm_page_bucket_count 161*50936Swilliam * sizeof(struct queue_entry)); 162*50936Swilliam bucket = vm_page_buckets; 16345748Smckusick 16445748Smckusick for (i = vm_page_bucket_count; i--;) { 16545748Smckusick queue_init(bucket); 16645748Smckusick bucket++; 16745748Smckusick } 16845748Smckusick 16945748Smckusick simple_lock_init(&bucket_lock); 17045748Smckusick 17145748Smckusick /* 172*50936Swilliam * Truncate the remainder of physical memory to our page size. 17345748Smckusick */ 17445748Smckusick 175*50936Swilliam *end = trunc_page(*end); 17645748Smckusick 17745748Smckusick /* 17845748Smckusick * Pre-allocate maps and map entries that cannot be dynamically 17945748Smckusick * allocated via malloc(). The maps include the kernel_map and 18045748Smckusick * kmem_map which must be initialized before malloc() will 18145748Smckusick * work (obviously). Also could include pager maps which would 18245748Smckusick * be allocated before kmeminit. 18345748Smckusick * 18445748Smckusick * Allow some kernel map entries... this should be plenty 18545748Smckusick * since people shouldn't be cluttering up the kernel 18645748Smckusick * map (they should use their own maps). 18745748Smckusick */ 18845748Smckusick 18945748Smckusick kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 19045748Smckusick MAX_KMAPENT * sizeof(struct vm_map_entry); 191*50936Swilliam kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size); 19245748Smckusick 19345748Smckusick /* 19445748Smckusick * Compute the number of pages of memory that will be 19545748Smckusick * available for use (taking into account the overhead 19645748Smckusick * of a page structure per page). 19745748Smckusick */ 19845748Smckusick 19950910Smckusick cnt.v_free_count = npages = 200*50936Swilliam (*end - *start)/(PAGE_SIZE + sizeof(struct vm_page)); 20145748Smckusick 20245748Smckusick /* 203*50936Swilliam * Record the extent of physical memory that the 204*50936Swilliam * virtual memory system manages. 20545748Smckusick */ 20645748Smckusick 207*50936Swilliam first_page = *start; 20845748Smckusick first_page += npages*sizeof(struct vm_page); 20945748Smckusick first_page = atop(round_page(first_page)); 21045748Smckusick last_page = first_page + npages - 1; 21145748Smckusick 21245748Smckusick first_phys_addr = ptoa(first_page); 21345748Smckusick last_phys_addr = ptoa(last_page) + PAGE_MASK; 21445748Smckusick 21550852Swilliam 21645748Smckusick /* 217*50936Swilliam * Allocate and clear the mem entry structures. 21845748Smckusick */ 21945748Smckusick 220*50936Swilliam m = vm_page_array = (vm_page_t) 221*50936Swilliam pmap_bootstrap_alloc(npages * sizeof(struct vm_page)); 22245748Smckusick 22345748Smckusick /* 224*50936Swilliam * Initialize the mem entry structures now, and 225*50936Swilliam * put them in the free queue. 22645748Smckusick */ 22745748Smckusick 22845748Smckusick pa = first_phys_addr; 22945748Smckusick while (npages--) { 23045748Smckusick m->copy_on_write = FALSE; 23145748Smckusick m->wanted = FALSE; 23245748Smckusick m->inactive = FALSE; 23345748Smckusick m->active = FALSE; 23445748Smckusick m->busy = FALSE; 23548386Skarels m->object = NULL; 23645748Smckusick m->phys_addr = pa; 23750852Swilliam #ifdef i386 23850852Swilliam if (pmap_isvalidphys(m->phys_addr)) { 23950852Swilliam queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); 24050852Swilliam } else { 24150852Swilliam /* perhaps iomem needs it's own type, or dev pager? */ 24250852Swilliam m->fictitious = 1; 24350852Swilliam m->busy = TRUE; 24450910Smckusick cnt.v_free_count--; 24550852Swilliam } 24650852Swilliam #else /* i386 */ 24745748Smckusick queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); 24850852Swilliam #endif /* i386 */ 24945748Smckusick m++; 25045748Smckusick pa += PAGE_SIZE; 25145748Smckusick } 25245748Smckusick 25345748Smckusick /* 25445748Smckusick * Initialize vm_pages_needed lock here - don't wait for pageout 25545748Smckusick * daemon XXX 25645748Smckusick */ 25745748Smckusick simple_lock_init(&vm_pages_needed_lock); 25845748Smckusick 259*50936Swilliam /* from now on, pmap_bootstrap_alloc can't be used */ 260*50936Swilliam vm_page_startup_initialized = TRUE; 26145748Smckusick } 26245748Smckusick 26345748Smckusick /* 26445748Smckusick * vm_page_hash: 26545748Smckusick * 26645748Smckusick * Distributes the object/offset key pair among hash buckets. 26745748Smckusick * 26845748Smckusick * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 26945748Smckusick */ 27045748Smckusick #define vm_page_hash(object, offset) \ 27145748Smckusick (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) 27245748Smckusick 27345748Smckusick /* 27445748Smckusick * vm_page_insert: [ internal use only ] 27545748Smckusick * 27645748Smckusick * Inserts the given mem entry into the object/object-page 27745748Smckusick * table and object list. 27845748Smckusick * 27945748Smckusick * The object and page must be locked. 28045748Smckusick */ 28145748Smckusick 282*50936Swilliam static void vm_page_insert(mem, object, offset) 28345748Smckusick register vm_page_t mem; 28445748Smckusick register vm_object_t object; 28545748Smckusick register vm_offset_t offset; 28645748Smckusick { 28745748Smckusick register queue_t bucket; 28845748Smckusick int spl; 28945748Smckusick 29045748Smckusick VM_PAGE_CHECK(mem); 29145748Smckusick 29245748Smckusick if (mem->tabled) 29345748Smckusick panic("vm_page_insert: already inserted"); 29445748Smckusick 29545748Smckusick /* 29645748Smckusick * Record the object/offset pair in this page 29745748Smckusick */ 29845748Smckusick 29945748Smckusick mem->object = object; 30045748Smckusick mem->offset = offset; 30145748Smckusick 30245748Smckusick /* 30345748Smckusick * Insert it into the object_object/offset hash table 30445748Smckusick */ 30545748Smckusick 30645748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 30745748Smckusick spl = splimp(); 30845748Smckusick simple_lock(&bucket_lock); 30945748Smckusick queue_enter(bucket, mem, vm_page_t, hashq); 31045748Smckusick simple_unlock(&bucket_lock); 31145748Smckusick (void) splx(spl); 31245748Smckusick 31345748Smckusick /* 31445748Smckusick * Now link into the object's list of backed pages. 31545748Smckusick */ 31645748Smckusick 31745748Smckusick queue_enter(&object->memq, mem, vm_page_t, listq); 31845748Smckusick mem->tabled = TRUE; 31945748Smckusick 32045748Smckusick /* 32145748Smckusick * And show that the object has one more resident 32245748Smckusick * page. 32345748Smckusick */ 32445748Smckusick 32545748Smckusick object->resident_page_count++; 32645748Smckusick } 32745748Smckusick 32845748Smckusick /* 32945748Smckusick * vm_page_remove: [ internal use only ] 330*50936Swilliam * NOTE: used by device pager as well -wfj 33145748Smckusick * 33245748Smckusick * Removes the given mem entry from the object/offset-page 33345748Smckusick * table and the object page list. 33445748Smckusick * 33545748Smckusick * The object and page must be locked. 33645748Smckusick */ 33745748Smckusick 33845748Smckusick void vm_page_remove(mem) 33945748Smckusick register vm_page_t mem; 34045748Smckusick { 34145748Smckusick register queue_t bucket; 34245748Smckusick int spl; 34345748Smckusick 34445748Smckusick VM_PAGE_CHECK(mem); 34545748Smckusick 34645748Smckusick if (!mem->tabled) 34745748Smckusick return; 34845748Smckusick 34945748Smckusick /* 35045748Smckusick * Remove from the object_object/offset hash table 35145748Smckusick */ 35245748Smckusick 35345748Smckusick bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; 35445748Smckusick spl = splimp(); 35545748Smckusick simple_lock(&bucket_lock); 35645748Smckusick queue_remove(bucket, mem, vm_page_t, hashq); 35745748Smckusick simple_unlock(&bucket_lock); 35845748Smckusick (void) splx(spl); 35945748Smckusick 36045748Smckusick /* 36145748Smckusick * Now remove from the object's list of backed pages. 36245748Smckusick */ 36345748Smckusick 36445748Smckusick queue_remove(&mem->object->memq, mem, vm_page_t, listq); 36545748Smckusick 36645748Smckusick /* 36745748Smckusick * And show that the object has one fewer resident 36845748Smckusick * page. 36945748Smckusick */ 37045748Smckusick 37145748Smckusick mem->object->resident_page_count--; 37245748Smckusick 37345748Smckusick mem->tabled = FALSE; 37445748Smckusick } 37545748Smckusick 37645748Smckusick /* 37745748Smckusick * vm_page_lookup: 37845748Smckusick * 37945748Smckusick * Returns the page associated with the object/offset 38048386Skarels * pair specified; if none is found, NULL is returned. 38145748Smckusick * 38245748Smckusick * The object must be locked. No side effects. 38345748Smckusick */ 38445748Smckusick 38545748Smckusick vm_page_t vm_page_lookup(object, offset) 38645748Smckusick register vm_object_t object; 38745748Smckusick register vm_offset_t offset; 38845748Smckusick { 38945748Smckusick register vm_page_t mem; 39045748Smckusick register queue_t bucket; 39145748Smckusick int spl; 39245748Smckusick 39345748Smckusick /* 39445748Smckusick * Search the hash table for this object/offset pair 39545748Smckusick */ 39645748Smckusick 39745748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 39845748Smckusick 39945748Smckusick spl = splimp(); 40045748Smckusick simple_lock(&bucket_lock); 40145748Smckusick mem = (vm_page_t) queue_first(bucket); 40245748Smckusick while (!queue_end(bucket, (queue_entry_t) mem)) { 40345748Smckusick VM_PAGE_CHECK(mem); 40445748Smckusick if ((mem->object == object) && (mem->offset == offset)) { 40545748Smckusick simple_unlock(&bucket_lock); 40645748Smckusick splx(spl); 40745748Smckusick return(mem); 40845748Smckusick } 40945748Smckusick mem = (vm_page_t) queue_next(&mem->hashq); 41045748Smckusick } 41145748Smckusick 41245748Smckusick simple_unlock(&bucket_lock); 41345748Smckusick splx(spl); 41448386Skarels return(NULL); 41545748Smckusick } 41645748Smckusick 41745748Smckusick /* 41845748Smckusick * vm_page_rename: 41945748Smckusick * 42045748Smckusick * Move the given memory entry from its 42145748Smckusick * current object to the specified target object/offset. 42245748Smckusick * 42345748Smckusick * The object must be locked. 42445748Smckusick */ 42545748Smckusick void vm_page_rename(mem, new_object, new_offset) 42645748Smckusick register vm_page_t mem; 42745748Smckusick register vm_object_t new_object; 42845748Smckusick vm_offset_t new_offset; 42945748Smckusick { 43045748Smckusick if (mem->object == new_object) 43145748Smckusick return; 43245748Smckusick 43345748Smckusick vm_page_lock_queues(); /* keep page from moving out from 43445748Smckusick under pageout daemon */ 43545748Smckusick vm_page_remove(mem); 43645748Smckusick vm_page_insert(mem, new_object, new_offset); 43745748Smckusick vm_page_unlock_queues(); 43845748Smckusick } 43945748Smckusick 44045748Smckusick void vm_page_init(mem, object, offset) 44145748Smckusick vm_page_t mem; 44245748Smckusick vm_object_t object; 44345748Smckusick vm_offset_t offset; 44445748Smckusick { 44549286Shibler #ifdef DEBUG 44645748Smckusick #define vm_page_init(mem, object, offset) {\ 44745748Smckusick (mem)->busy = TRUE; \ 44845748Smckusick (mem)->tabled = FALSE; \ 44945748Smckusick vm_page_insert((mem), (object), (offset)); \ 45045748Smckusick (mem)->absent = FALSE; \ 45145748Smckusick (mem)->fictitious = FALSE; \ 45245748Smckusick (mem)->page_lock = VM_PROT_NONE; \ 45345748Smckusick (mem)->unlock_request = VM_PROT_NONE; \ 45445748Smckusick (mem)->laundry = FALSE; \ 45545748Smckusick (mem)->active = FALSE; \ 45645748Smckusick (mem)->inactive = FALSE; \ 45745748Smckusick (mem)->wire_count = 0; \ 45845748Smckusick (mem)->clean = TRUE; \ 45945748Smckusick (mem)->copy_on_write = FALSE; \ 46045748Smckusick (mem)->fake = TRUE; \ 46149286Shibler (mem)->pagerowned = FALSE; \ 46249286Shibler (mem)->ptpage = FALSE; \ 46345748Smckusick } 46449286Shibler #else 46549286Shibler #define vm_page_init(mem, object, offset) {\ 46649286Shibler (mem)->busy = TRUE; \ 46749286Shibler (mem)->tabled = FALSE; \ 46849286Shibler vm_page_insert((mem), (object), (offset)); \ 46949286Shibler (mem)->absent = FALSE; \ 47049286Shibler (mem)->fictitious = FALSE; \ 47149286Shibler (mem)->page_lock = VM_PROT_NONE; \ 47249286Shibler (mem)->unlock_request = VM_PROT_NONE; \ 47349286Shibler (mem)->laundry = FALSE; \ 47449286Shibler (mem)->active = FALSE; \ 47549286Shibler (mem)->inactive = FALSE; \ 47649286Shibler (mem)->wire_count = 0; \ 47749286Shibler (mem)->clean = TRUE; \ 47849286Shibler (mem)->copy_on_write = FALSE; \ 47949286Shibler (mem)->fake = TRUE; \ 48049286Shibler } 48149286Shibler #endif 48245748Smckusick 48345748Smckusick vm_page_init(mem, object, offset); 48445748Smckusick } 48545748Smckusick 48645748Smckusick /* 48745748Smckusick * vm_page_alloc: 48845748Smckusick * 48945748Smckusick * Allocate and return a memory cell associated 49045748Smckusick * with this VM object/offset pair. 49145748Smckusick * 49245748Smckusick * Object must be locked. 49345748Smckusick */ 49445748Smckusick vm_page_t vm_page_alloc(object, offset) 49545748Smckusick vm_object_t object; 49645748Smckusick vm_offset_t offset; 49745748Smckusick { 49845748Smckusick register vm_page_t mem; 49945748Smckusick int spl; 50045748Smckusick 50145748Smckusick spl = splimp(); /* XXX */ 50245748Smckusick simple_lock(&vm_page_queue_free_lock); 50345748Smckusick if (queue_empty(&vm_page_queue_free)) { 50445748Smckusick simple_unlock(&vm_page_queue_free_lock); 50545748Smckusick splx(spl); 50648386Skarels return(NULL); 50745748Smckusick } 50845748Smckusick 50945748Smckusick queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); 51045748Smckusick 51150910Smckusick cnt.v_free_count--; 51245748Smckusick simple_unlock(&vm_page_queue_free_lock); 51345748Smckusick splx(spl); 51445748Smckusick 51545748Smckusick vm_page_init(mem, object, offset); 51645748Smckusick 51745748Smckusick /* 51845748Smckusick * Decide if we should poke the pageout daemon. 51945748Smckusick * We do this if the free count is less than the low 52045748Smckusick * water mark, or if the free count is less than the high 52145748Smckusick * water mark (but above the low water mark) and the inactive 52245748Smckusick * count is less than its target. 52345748Smckusick * 52445748Smckusick * We don't have the counts locked ... if they change a little, 52545748Smckusick * it doesn't really matter. 52645748Smckusick */ 52745748Smckusick 52850910Smckusick if ((cnt.v_free_count < cnt.v_free_min) || 52950910Smckusick ((cnt.v_free_count < cnt.v_free_target) && 53050910Smckusick (cnt.v_inactive_count < cnt.v_inactive_target))) 53150855Smckusick thread_wakeup((int)&vm_pages_needed); 53245748Smckusick return(mem); 53345748Smckusick } 53445748Smckusick 53545748Smckusick /* 53645748Smckusick * vm_page_free: 53745748Smckusick * 53845748Smckusick * Returns the given page to the free list, 53945748Smckusick * disassociating it with any VM object. 54045748Smckusick * 54145748Smckusick * Object and page must be locked prior to entry. 54245748Smckusick */ 54345748Smckusick void vm_page_free(mem) 54445748Smckusick register vm_page_t mem; 54545748Smckusick { 54645748Smckusick vm_page_remove(mem); 54745748Smckusick if (mem->active) { 54845748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); 54945748Smckusick mem->active = FALSE; 55050910Smckusick cnt.v_active_count--; 55145748Smckusick } 55245748Smckusick 55345748Smckusick if (mem->inactive) { 55445748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); 55545748Smckusick mem->inactive = FALSE; 55650910Smckusick cnt.v_inactive_count--; 55745748Smckusick } 55845748Smckusick 55945748Smckusick if (!mem->fictitious) { 56045748Smckusick int spl; 56145748Smckusick 56245748Smckusick spl = splimp(); 56345748Smckusick simple_lock(&vm_page_queue_free_lock); 56445748Smckusick queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); 56545748Smckusick 56650910Smckusick cnt.v_free_count++; 56745748Smckusick simple_unlock(&vm_page_queue_free_lock); 56845748Smckusick splx(spl); 56945748Smckusick } 57045748Smckusick } 57145748Smckusick 57245748Smckusick /* 57345748Smckusick * vm_page_wire: 57445748Smckusick * 57545748Smckusick * Mark this page as wired down by yet 57645748Smckusick * another map, removing it from paging queues 57745748Smckusick * as necessary. 57845748Smckusick * 57945748Smckusick * The page queues must be locked. 58045748Smckusick */ 58145748Smckusick void vm_page_wire(mem) 58245748Smckusick register vm_page_t mem; 58345748Smckusick { 58445748Smckusick VM_PAGE_CHECK(mem); 58545748Smckusick 58645748Smckusick if (mem->wire_count == 0) { 58745748Smckusick if (mem->active) { 58845748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, 58945748Smckusick pageq); 59050910Smckusick cnt.v_active_count--; 59145748Smckusick mem->active = FALSE; 59245748Smckusick } 59345748Smckusick if (mem->inactive) { 59445748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, 59545748Smckusick pageq); 59650910Smckusick cnt.v_inactive_count--; 59745748Smckusick mem->inactive = FALSE; 59845748Smckusick } 59950910Smckusick cnt.v_wire_count++; 60045748Smckusick } 60145748Smckusick mem->wire_count++; 60245748Smckusick } 60345748Smckusick 60445748Smckusick /* 60545748Smckusick * vm_page_unwire: 60645748Smckusick * 60745748Smckusick * Release one wiring of this page, potentially 60845748Smckusick * enabling it to be paged again. 60945748Smckusick * 61045748Smckusick * The page queues must be locked. 61145748Smckusick */ 61245748Smckusick void vm_page_unwire(mem) 61345748Smckusick register vm_page_t mem; 61445748Smckusick { 61545748Smckusick VM_PAGE_CHECK(mem); 61645748Smckusick 61745748Smckusick mem->wire_count--; 61845748Smckusick if (mem->wire_count == 0) { 61945748Smckusick queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); 62050910Smckusick cnt.v_active_count++; 62145748Smckusick mem->active = TRUE; 62250910Smckusick cnt.v_wire_count--; 62345748Smckusick } 62445748Smckusick } 62545748Smckusick 62645748Smckusick /* 62745748Smckusick * vm_page_deactivate: 62845748Smckusick * 62945748Smckusick * Returns the given page to the inactive list, 63045748Smckusick * indicating that no physical maps have access 63145748Smckusick * to this page. [Used by the physical mapping system.] 63245748Smckusick * 63345748Smckusick * The page queues must be locked. 63445748Smckusick */ 63545748Smckusick void vm_page_deactivate(m) 63645748Smckusick register vm_page_t m; 63745748Smckusick { 63845748Smckusick VM_PAGE_CHECK(m); 63945748Smckusick 64045748Smckusick /* 64145748Smckusick * Only move active pages -- ignore locked or already 64245748Smckusick * inactive ones. 64345748Smckusick */ 64445748Smckusick 64545748Smckusick if (m->active) { 64645748Smckusick pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 64745748Smckusick queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); 64845748Smckusick queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); 64945748Smckusick m->active = FALSE; 65045748Smckusick m->inactive = TRUE; 65150910Smckusick cnt.v_active_count--; 65250910Smckusick cnt.v_inactive_count++; 65345748Smckusick if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) 65445748Smckusick m->clean = FALSE; 65545748Smckusick m->laundry = !m->clean; 65645748Smckusick } 65745748Smckusick } 65845748Smckusick 65945748Smckusick /* 66045748Smckusick * vm_page_activate: 66145748Smckusick * 66245748Smckusick * Put the specified page on the active list (if appropriate). 66345748Smckusick * 66445748Smckusick * The page queues must be locked. 66545748Smckusick */ 66645748Smckusick 66745748Smckusick void vm_page_activate(m) 66845748Smckusick register vm_page_t m; 66945748Smckusick { 67045748Smckusick VM_PAGE_CHECK(m); 67145748Smckusick 67245748Smckusick if (m->inactive) { 67345748Smckusick queue_remove(&vm_page_queue_inactive, m, vm_page_t, 67445748Smckusick pageq); 67550910Smckusick cnt.v_inactive_count--; 67645748Smckusick m->inactive = FALSE; 67745748Smckusick } 67845748Smckusick if (m->wire_count == 0) { 67945748Smckusick if (m->active) 68045748Smckusick panic("vm_page_activate: already active"); 68145748Smckusick 68245748Smckusick queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); 68345748Smckusick m->active = TRUE; 68450910Smckusick cnt.v_active_count++; 68545748Smckusick } 68645748Smckusick } 68745748Smckusick 68845748Smckusick /* 68945748Smckusick * vm_page_zero_fill: 69045748Smckusick * 69145748Smckusick * Zero-fill the specified page. 69245748Smckusick * Written as a standard pagein routine, to 69345748Smckusick * be used by the zero-fill object. 69445748Smckusick */ 69545748Smckusick 69645748Smckusick boolean_t vm_page_zero_fill(m) 69745748Smckusick vm_page_t m; 69845748Smckusick { 69945748Smckusick VM_PAGE_CHECK(m); 70045748Smckusick 70145748Smckusick pmap_zero_page(VM_PAGE_TO_PHYS(m)); 70245748Smckusick return(TRUE); 70345748Smckusick } 70445748Smckusick 70545748Smckusick /* 70645748Smckusick * vm_page_copy: 70745748Smckusick * 70845748Smckusick * Copy one page to another 70945748Smckusick */ 71045748Smckusick 71145748Smckusick void vm_page_copy(src_m, dest_m) 71245748Smckusick vm_page_t src_m; 71345748Smckusick vm_page_t dest_m; 71445748Smckusick { 71545748Smckusick VM_PAGE_CHECK(src_m); 71645748Smckusick VM_PAGE_CHECK(dest_m); 71745748Smckusick 71845748Smckusick pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 71945748Smckusick } 720