145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*49286Shibler * @(#)vm_page.c 7.4 (Berkeley) 05/07/91 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Resident memory management module. 4145748Smckusick */ 4245748Smckusick 4348386Skarels #include "param.h" 4445748Smckusick 4548386Skarels #include "vm.h" 4648386Skarels #include "vm_map.h" 4748386Skarels #include "vm_page.h" 4848386Skarels #include "vm_pageout.h" 4948386Skarels 5045748Smckusick /* 5145748Smckusick * Associated with page of user-allocatable memory is a 5245748Smckusick * page structure. 5345748Smckusick */ 5445748Smckusick 5545748Smckusick queue_head_t *vm_page_buckets; /* Array of buckets */ 5645748Smckusick int vm_page_bucket_count = 0; /* How big is array? */ 5745748Smckusick int vm_page_hash_mask; /* Mask for hash function */ 5845748Smckusick simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ 5945748Smckusick 6045748Smckusick vm_size_t page_size = 4096; 6145748Smckusick vm_size_t page_mask = 4095; 6245748Smckusick int page_shift = 12; 6345748Smckusick 6445748Smckusick queue_head_t vm_page_queue_free; 6545748Smckusick queue_head_t vm_page_queue_active; 6645748Smckusick queue_head_t vm_page_queue_inactive; 6745748Smckusick simple_lock_data_t vm_page_queue_lock; 6845748Smckusick simple_lock_data_t vm_page_queue_free_lock; 6945748Smckusick 7045748Smckusick vm_page_t vm_page_array; 7145748Smckusick long first_page; 7245748Smckusick long last_page; 7345748Smckusick vm_offset_t first_phys_addr; 7445748Smckusick vm_offset_t last_phys_addr; 7545748Smckusick 7645748Smckusick int vm_page_free_count; 7745748Smckusick int vm_page_active_count; 7845748Smckusick int vm_page_inactive_count; 7945748Smckusick int vm_page_wire_count; 8045748Smckusick int vm_page_laundry_count; 8145748Smckusick 8245748Smckusick int vm_page_free_target = 0; 8345748Smckusick int vm_page_free_min = 0; 8445748Smckusick int vm_page_inactive_target = 0; 8545748Smckusick int vm_page_free_reserved = 0; 8645748Smckusick 8745748Smckusick /* 8845748Smckusick * vm_set_page_size: 8945748Smckusick * 9045748Smckusick * Sets the page size, perhaps based upon the memory 9145748Smckusick * size. Must be called before any use of page-size 9245748Smckusick * dependent functions. 9345748Smckusick * 9445748Smckusick * Sets page_shift and page_mask from page_size. 9545748Smckusick */ 9645748Smckusick void vm_set_page_size() 9745748Smckusick { 9845748Smckusick page_mask = page_size - 1; 9945748Smckusick 10045748Smckusick if ((page_mask & page_size) != 0) 10145748Smckusick panic("vm_set_page_size: page size not a power of two"); 10245748Smckusick 10345748Smckusick for (page_shift = 0; ; page_shift++) 10445748Smckusick if ((1 << page_shift) == page_size) 10545748Smckusick break; 10645748Smckusick } 10745748Smckusick 10845748Smckusick 10945748Smckusick /* 11045748Smckusick * vm_page_startup: 11145748Smckusick * 11245748Smckusick * Initializes the resident memory module. 11345748Smckusick * 11445748Smckusick * Allocates memory for the page cells, and 11545748Smckusick * for the object/offset-to-page hash table headers. 11645748Smckusick * Each page cell is initialized and placed on the free list. 11745748Smckusick */ 11845748Smckusick vm_offset_t vm_page_startup(start, end, vaddr) 11945748Smckusick register vm_offset_t start; 12045748Smckusick vm_offset_t end; 12145748Smckusick register vm_offset_t vaddr; 12245748Smckusick { 12345748Smckusick register vm_offset_t mapped; 12445748Smckusick register vm_page_t m; 12545748Smckusick register queue_t bucket; 12645748Smckusick vm_size_t npages; 12745748Smckusick register vm_offset_t new_start; 12845748Smckusick int i; 12945748Smckusick vm_offset_t pa; 13045748Smckusick 13145748Smckusick extern vm_offset_t kentry_data; 13245748Smckusick extern vm_size_t kentry_data_size; 13345748Smckusick 13445748Smckusick 13545748Smckusick /* 13645748Smckusick * Initialize the locks 13745748Smckusick */ 13845748Smckusick 13945748Smckusick simple_lock_init(&vm_page_queue_free_lock); 14045748Smckusick simple_lock_init(&vm_page_queue_lock); 14145748Smckusick 14245748Smckusick /* 14345748Smckusick * Initialize the queue headers for the free queue, 14445748Smckusick * the active queue and the inactive queue. 14545748Smckusick */ 14645748Smckusick 14745748Smckusick queue_init(&vm_page_queue_free); 14845748Smckusick queue_init(&vm_page_queue_active); 14945748Smckusick queue_init(&vm_page_queue_inactive); 15045748Smckusick 15145748Smckusick /* 15245748Smckusick * Allocate (and initialize) the hash table buckets. 15345748Smckusick * 15445748Smckusick * The number of buckets MUST BE a power of 2, and 15545748Smckusick * the actual value is the next power of 2 greater 15645748Smckusick * than the number of physical pages in the system. 15745748Smckusick * 15845748Smckusick * Note: 15945748Smckusick * This computation can be tweaked if desired. 16045748Smckusick */ 16145748Smckusick 16245748Smckusick vm_page_buckets = (queue_t) vaddr; 16345748Smckusick bucket = vm_page_buckets; 16445748Smckusick if (vm_page_bucket_count == 0) { 16545748Smckusick vm_page_bucket_count = 1; 16645748Smckusick while (vm_page_bucket_count < atop(end - start)) 16745748Smckusick vm_page_bucket_count <<= 1; 16845748Smckusick } 16945748Smckusick 17045748Smckusick vm_page_hash_mask = vm_page_bucket_count - 1; 17145748Smckusick 17245748Smckusick /* 17345748Smckusick * Validate these addresses. 17445748Smckusick */ 17545748Smckusick 17645748Smckusick new_start = round_page(((queue_t)start) + vm_page_bucket_count); 17745748Smckusick mapped = vaddr; 17845748Smckusick vaddr = pmap_map(mapped, start, new_start, 17945748Smckusick VM_PROT_READ|VM_PROT_WRITE); 18045748Smckusick start = new_start; 18145748Smckusick blkclr((caddr_t) mapped, vaddr - mapped); 18245748Smckusick mapped = vaddr; 18345748Smckusick 18445748Smckusick for (i = vm_page_bucket_count; i--;) { 18545748Smckusick queue_init(bucket); 18645748Smckusick bucket++; 18745748Smckusick } 18845748Smckusick 18945748Smckusick simple_lock_init(&bucket_lock); 19045748Smckusick 19145748Smckusick /* 19245748Smckusick * round (or truncate) the addresses to our page size. 19345748Smckusick */ 19445748Smckusick 19545748Smckusick end = trunc_page(end); 19645748Smckusick 19745748Smckusick /* 19845748Smckusick * Pre-allocate maps and map entries that cannot be dynamically 19945748Smckusick * allocated via malloc(). The maps include the kernel_map and 20045748Smckusick * kmem_map which must be initialized before malloc() will 20145748Smckusick * work (obviously). Also could include pager maps which would 20245748Smckusick * be allocated before kmeminit. 20345748Smckusick * 20445748Smckusick * Allow some kernel map entries... this should be plenty 20545748Smckusick * since people shouldn't be cluttering up the kernel 20645748Smckusick * map (they should use their own maps). 20745748Smckusick */ 20845748Smckusick 20945748Smckusick kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 21045748Smckusick MAX_KMAPENT * sizeof(struct vm_map_entry); 21145748Smckusick kentry_data_size = round_page(kentry_data_size); 21245748Smckusick kentry_data = (vm_offset_t) vaddr; 21345748Smckusick vaddr += kentry_data_size; 21445748Smckusick 21545748Smckusick /* 21645748Smckusick * Validate these zone addresses. 21745748Smckusick */ 21845748Smckusick 21945748Smckusick new_start = start + (vaddr - mapped); 22045748Smckusick pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE); 22145748Smckusick blkclr((caddr_t) mapped, (vaddr - mapped)); 22245748Smckusick mapped = vaddr; 22345748Smckusick start = new_start; 22445748Smckusick 22545748Smckusick /* 22645748Smckusick * Compute the number of pages of memory that will be 22745748Smckusick * available for use (taking into account the overhead 22845748Smckusick * of a page structure per page). 22945748Smckusick */ 23045748Smckusick 23145748Smckusick vm_page_free_count = npages = 23245748Smckusick (end - start)/(PAGE_SIZE + sizeof(struct vm_page)); 23345748Smckusick 23445748Smckusick /* 23545748Smckusick * Initialize the mem entry structures now, and 23645748Smckusick * put them in the free queue. 23745748Smckusick */ 23845748Smckusick 23945748Smckusick m = vm_page_array = (vm_page_t) vaddr; 24045748Smckusick first_page = start; 24145748Smckusick first_page += npages*sizeof(struct vm_page); 24245748Smckusick first_page = atop(round_page(first_page)); 24345748Smckusick last_page = first_page + npages - 1; 24445748Smckusick 24545748Smckusick first_phys_addr = ptoa(first_page); 24645748Smckusick last_phys_addr = ptoa(last_page) + PAGE_MASK; 24745748Smckusick 24845748Smckusick /* 24945748Smckusick * Validate these addresses. 25045748Smckusick */ 25145748Smckusick 25245748Smckusick new_start = start + (round_page(m + npages) - mapped); 25345748Smckusick mapped = pmap_map(mapped, start, new_start, 25445748Smckusick VM_PROT_READ|VM_PROT_WRITE); 25545748Smckusick start = new_start; 25645748Smckusick 25745748Smckusick /* 25845748Smckusick * Clear all of the page structures 25945748Smckusick */ 26045748Smckusick blkclr((caddr_t)m, npages * sizeof(*m)); 26145748Smckusick 26245748Smckusick pa = first_phys_addr; 26345748Smckusick while (npages--) { 26445748Smckusick m->copy_on_write = FALSE; 26545748Smckusick m->wanted = FALSE; 26645748Smckusick m->inactive = FALSE; 26745748Smckusick m->active = FALSE; 26845748Smckusick m->busy = FALSE; 26948386Skarels m->object = NULL; 27045748Smckusick m->phys_addr = pa; 27145748Smckusick queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); 27245748Smckusick m++; 27345748Smckusick pa += PAGE_SIZE; 27445748Smckusick } 27545748Smckusick 27645748Smckusick /* 27745748Smckusick * Initialize vm_pages_needed lock here - don't wait for pageout 27845748Smckusick * daemon XXX 27945748Smckusick */ 28045748Smckusick simple_lock_init(&vm_pages_needed_lock); 28145748Smckusick 28245748Smckusick return(mapped); 28345748Smckusick } 28445748Smckusick 28545748Smckusick /* 28645748Smckusick * vm_page_hash: 28745748Smckusick * 28845748Smckusick * Distributes the object/offset key pair among hash buckets. 28945748Smckusick * 29045748Smckusick * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 29145748Smckusick */ 29245748Smckusick #define vm_page_hash(object, offset) \ 29345748Smckusick (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) 29445748Smckusick 29545748Smckusick /* 29645748Smckusick * vm_page_insert: [ internal use only ] 29745748Smckusick * 29845748Smckusick * Inserts the given mem entry into the object/object-page 29945748Smckusick * table and object list. 30045748Smckusick * 30145748Smckusick * The object and page must be locked. 30245748Smckusick */ 30345748Smckusick 30445748Smckusick void vm_page_insert(mem, object, offset) 30545748Smckusick register vm_page_t mem; 30645748Smckusick register vm_object_t object; 30745748Smckusick register vm_offset_t offset; 30845748Smckusick { 30945748Smckusick register queue_t bucket; 31045748Smckusick int spl; 31145748Smckusick 31245748Smckusick VM_PAGE_CHECK(mem); 31345748Smckusick 31445748Smckusick if (mem->tabled) 31545748Smckusick panic("vm_page_insert: already inserted"); 31645748Smckusick 31745748Smckusick /* 31845748Smckusick * Record the object/offset pair in this page 31945748Smckusick */ 32045748Smckusick 32145748Smckusick mem->object = object; 32245748Smckusick mem->offset = offset; 32345748Smckusick 32445748Smckusick /* 32545748Smckusick * Insert it into the object_object/offset hash table 32645748Smckusick */ 32745748Smckusick 32845748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 32945748Smckusick spl = splimp(); 33045748Smckusick simple_lock(&bucket_lock); 33145748Smckusick queue_enter(bucket, mem, vm_page_t, hashq); 33245748Smckusick simple_unlock(&bucket_lock); 33345748Smckusick (void) splx(spl); 33445748Smckusick 33545748Smckusick /* 33645748Smckusick * Now link into the object's list of backed pages. 33745748Smckusick */ 33845748Smckusick 33945748Smckusick queue_enter(&object->memq, mem, vm_page_t, listq); 34045748Smckusick mem->tabled = TRUE; 34145748Smckusick 34245748Smckusick /* 34345748Smckusick * And show that the object has one more resident 34445748Smckusick * page. 34545748Smckusick */ 34645748Smckusick 34745748Smckusick object->resident_page_count++; 34845748Smckusick } 34945748Smckusick 35045748Smckusick /* 35145748Smckusick * vm_page_remove: [ internal use only ] 35245748Smckusick * 35345748Smckusick * Removes the given mem entry from the object/offset-page 35445748Smckusick * table and the object page list. 35545748Smckusick * 35645748Smckusick * The object and page must be locked. 35745748Smckusick */ 35845748Smckusick 35945748Smckusick void vm_page_remove(mem) 36045748Smckusick register vm_page_t mem; 36145748Smckusick { 36245748Smckusick register queue_t bucket; 36345748Smckusick int spl; 36445748Smckusick 36545748Smckusick VM_PAGE_CHECK(mem); 36645748Smckusick 36745748Smckusick if (!mem->tabled) 36845748Smckusick return; 36945748Smckusick 37045748Smckusick /* 37145748Smckusick * Remove from the object_object/offset hash table 37245748Smckusick */ 37345748Smckusick 37445748Smckusick bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; 37545748Smckusick spl = splimp(); 37645748Smckusick simple_lock(&bucket_lock); 37745748Smckusick queue_remove(bucket, mem, vm_page_t, hashq); 37845748Smckusick simple_unlock(&bucket_lock); 37945748Smckusick (void) splx(spl); 38045748Smckusick 38145748Smckusick /* 38245748Smckusick * Now remove from the object's list of backed pages. 38345748Smckusick */ 38445748Smckusick 38545748Smckusick queue_remove(&mem->object->memq, mem, vm_page_t, listq); 38645748Smckusick 38745748Smckusick /* 38845748Smckusick * And show that the object has one fewer resident 38945748Smckusick * page. 39045748Smckusick */ 39145748Smckusick 39245748Smckusick mem->object->resident_page_count--; 39345748Smckusick 39445748Smckusick mem->tabled = FALSE; 39545748Smckusick } 39645748Smckusick 39745748Smckusick /* 39845748Smckusick * vm_page_lookup: 39945748Smckusick * 40045748Smckusick * Returns the page associated with the object/offset 40148386Skarels * pair specified; if none is found, NULL is returned. 40245748Smckusick * 40345748Smckusick * The object must be locked. No side effects. 40445748Smckusick */ 40545748Smckusick 40645748Smckusick vm_page_t vm_page_lookup(object, offset) 40745748Smckusick register vm_object_t object; 40845748Smckusick register vm_offset_t offset; 40945748Smckusick { 41045748Smckusick register vm_page_t mem; 41145748Smckusick register queue_t bucket; 41245748Smckusick int spl; 41345748Smckusick 41445748Smckusick /* 41545748Smckusick * Search the hash table for this object/offset pair 41645748Smckusick */ 41745748Smckusick 41845748Smckusick bucket = &vm_page_buckets[vm_page_hash(object, offset)]; 41945748Smckusick 42045748Smckusick spl = splimp(); 42145748Smckusick simple_lock(&bucket_lock); 42245748Smckusick mem = (vm_page_t) queue_first(bucket); 42345748Smckusick while (!queue_end(bucket, (queue_entry_t) mem)) { 42445748Smckusick VM_PAGE_CHECK(mem); 42545748Smckusick if ((mem->object == object) && (mem->offset == offset)) { 42645748Smckusick simple_unlock(&bucket_lock); 42745748Smckusick splx(spl); 42845748Smckusick return(mem); 42945748Smckusick } 43045748Smckusick mem = (vm_page_t) queue_next(&mem->hashq); 43145748Smckusick } 43245748Smckusick 43345748Smckusick simple_unlock(&bucket_lock); 43445748Smckusick splx(spl); 43548386Skarels return(NULL); 43645748Smckusick } 43745748Smckusick 43845748Smckusick /* 43945748Smckusick * vm_page_rename: 44045748Smckusick * 44145748Smckusick * Move the given memory entry from its 44245748Smckusick * current object to the specified target object/offset. 44345748Smckusick * 44445748Smckusick * The object must be locked. 44545748Smckusick */ 44645748Smckusick void vm_page_rename(mem, new_object, new_offset) 44745748Smckusick register vm_page_t mem; 44845748Smckusick register vm_object_t new_object; 44945748Smckusick vm_offset_t new_offset; 45045748Smckusick { 45145748Smckusick if (mem->object == new_object) 45245748Smckusick return; 45345748Smckusick 45445748Smckusick vm_page_lock_queues(); /* keep page from moving out from 45545748Smckusick under pageout daemon */ 45645748Smckusick vm_page_remove(mem); 45745748Smckusick vm_page_insert(mem, new_object, new_offset); 45845748Smckusick vm_page_unlock_queues(); 45945748Smckusick } 46045748Smckusick 46145748Smckusick void vm_page_init(mem, object, offset) 46245748Smckusick vm_page_t mem; 46345748Smckusick vm_object_t object; 46445748Smckusick vm_offset_t offset; 46545748Smckusick { 466*49286Shibler #ifdef DEBUG 46745748Smckusick #define vm_page_init(mem, object, offset) {\ 46845748Smckusick (mem)->busy = TRUE; \ 46945748Smckusick (mem)->tabled = FALSE; \ 47045748Smckusick vm_page_insert((mem), (object), (offset)); \ 47145748Smckusick (mem)->absent = FALSE; \ 47245748Smckusick (mem)->fictitious = FALSE; \ 47345748Smckusick (mem)->page_lock = VM_PROT_NONE; \ 47445748Smckusick (mem)->unlock_request = VM_PROT_NONE; \ 47545748Smckusick (mem)->laundry = FALSE; \ 47645748Smckusick (mem)->active = FALSE; \ 47745748Smckusick (mem)->inactive = FALSE; \ 47845748Smckusick (mem)->wire_count = 0; \ 47945748Smckusick (mem)->clean = TRUE; \ 48045748Smckusick (mem)->copy_on_write = FALSE; \ 48145748Smckusick (mem)->fake = TRUE; \ 482*49286Shibler (mem)->pagerowned = FALSE; \ 483*49286Shibler (mem)->ptpage = FALSE; \ 48445748Smckusick } 485*49286Shibler #else 486*49286Shibler #define vm_page_init(mem, object, offset) {\ 487*49286Shibler (mem)->busy = TRUE; \ 488*49286Shibler (mem)->tabled = FALSE; \ 489*49286Shibler vm_page_insert((mem), (object), (offset)); \ 490*49286Shibler (mem)->absent = FALSE; \ 491*49286Shibler (mem)->fictitious = FALSE; \ 492*49286Shibler (mem)->page_lock = VM_PROT_NONE; \ 493*49286Shibler (mem)->unlock_request = VM_PROT_NONE; \ 494*49286Shibler (mem)->laundry = FALSE; \ 495*49286Shibler (mem)->active = FALSE; \ 496*49286Shibler (mem)->inactive = FALSE; \ 497*49286Shibler (mem)->wire_count = 0; \ 498*49286Shibler (mem)->clean = TRUE; \ 499*49286Shibler (mem)->copy_on_write = FALSE; \ 500*49286Shibler (mem)->fake = TRUE; \ 501*49286Shibler } 502*49286Shibler #endif 50345748Smckusick 50445748Smckusick vm_page_init(mem, object, offset); 50545748Smckusick } 50645748Smckusick 50745748Smckusick /* 50845748Smckusick * vm_page_alloc: 50945748Smckusick * 51045748Smckusick * Allocate and return a memory cell associated 51145748Smckusick * with this VM object/offset pair. 51245748Smckusick * 51345748Smckusick * Object must be locked. 51445748Smckusick */ 51545748Smckusick vm_page_t vm_page_alloc(object, offset) 51645748Smckusick vm_object_t object; 51745748Smckusick vm_offset_t offset; 51845748Smckusick { 51945748Smckusick register vm_page_t mem; 52045748Smckusick int spl; 52145748Smckusick 52245748Smckusick spl = splimp(); /* XXX */ 52345748Smckusick simple_lock(&vm_page_queue_free_lock); 52445748Smckusick if (queue_empty(&vm_page_queue_free)) { 52545748Smckusick simple_unlock(&vm_page_queue_free_lock); 52645748Smckusick splx(spl); 52748386Skarels return(NULL); 52845748Smckusick } 52945748Smckusick 53045748Smckusick queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); 53145748Smckusick 53245748Smckusick vm_page_free_count--; 53345748Smckusick simple_unlock(&vm_page_queue_free_lock); 53445748Smckusick splx(spl); 53545748Smckusick 53645748Smckusick vm_page_init(mem, object, offset); 53745748Smckusick 53845748Smckusick /* 53945748Smckusick * Decide if we should poke the pageout daemon. 54045748Smckusick * We do this if the free count is less than the low 54145748Smckusick * water mark, or if the free count is less than the high 54245748Smckusick * water mark (but above the low water mark) and the inactive 54345748Smckusick * count is less than its target. 54445748Smckusick * 54545748Smckusick * We don't have the counts locked ... if they change a little, 54645748Smckusick * it doesn't really matter. 54745748Smckusick */ 54845748Smckusick 54945748Smckusick if ((vm_page_free_count < vm_page_free_min) || 55045748Smckusick ((vm_page_free_count < vm_page_free_target) && 55145748Smckusick (vm_page_inactive_count < vm_page_inactive_target))) 55245748Smckusick thread_wakeup(&vm_pages_needed); 55345748Smckusick return(mem); 55445748Smckusick } 55545748Smckusick 55645748Smckusick /* 55745748Smckusick * vm_page_free: 55845748Smckusick * 55945748Smckusick * Returns the given page to the free list, 56045748Smckusick * disassociating it with any VM object. 56145748Smckusick * 56245748Smckusick * Object and page must be locked prior to entry. 56345748Smckusick */ 56445748Smckusick void vm_page_free(mem) 56545748Smckusick register vm_page_t mem; 56645748Smckusick { 56745748Smckusick vm_page_remove(mem); 56845748Smckusick if (mem->active) { 56945748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); 57045748Smckusick mem->active = FALSE; 57145748Smckusick vm_page_active_count--; 57245748Smckusick } 57345748Smckusick 57445748Smckusick if (mem->inactive) { 57545748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); 57645748Smckusick mem->inactive = FALSE; 57745748Smckusick vm_page_inactive_count--; 57845748Smckusick } 57945748Smckusick 58045748Smckusick if (!mem->fictitious) { 58145748Smckusick int spl; 58245748Smckusick 58345748Smckusick spl = splimp(); 58445748Smckusick simple_lock(&vm_page_queue_free_lock); 58545748Smckusick queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); 58645748Smckusick 58745748Smckusick vm_page_free_count++; 58845748Smckusick simple_unlock(&vm_page_queue_free_lock); 58945748Smckusick splx(spl); 59045748Smckusick } 59145748Smckusick } 59245748Smckusick 59345748Smckusick /* 59445748Smckusick * vm_page_wire: 59545748Smckusick * 59645748Smckusick * Mark this page as wired down by yet 59745748Smckusick * another map, removing it from paging queues 59845748Smckusick * as necessary. 59945748Smckusick * 60045748Smckusick * The page queues must be locked. 60145748Smckusick */ 60245748Smckusick void vm_page_wire(mem) 60345748Smckusick register vm_page_t mem; 60445748Smckusick { 60545748Smckusick VM_PAGE_CHECK(mem); 60645748Smckusick 60745748Smckusick if (mem->wire_count == 0) { 60845748Smckusick if (mem->active) { 60945748Smckusick queue_remove(&vm_page_queue_active, mem, vm_page_t, 61045748Smckusick pageq); 61145748Smckusick vm_page_active_count--; 61245748Smckusick mem->active = FALSE; 61345748Smckusick } 61445748Smckusick if (mem->inactive) { 61545748Smckusick queue_remove(&vm_page_queue_inactive, mem, vm_page_t, 61645748Smckusick pageq); 61745748Smckusick vm_page_inactive_count--; 61845748Smckusick mem->inactive = FALSE; 61945748Smckusick } 62045748Smckusick vm_page_wire_count++; 62145748Smckusick } 62245748Smckusick mem->wire_count++; 62345748Smckusick } 62445748Smckusick 62545748Smckusick /* 62645748Smckusick * vm_page_unwire: 62745748Smckusick * 62845748Smckusick * Release one wiring of this page, potentially 62945748Smckusick * enabling it to be paged again. 63045748Smckusick * 63145748Smckusick * The page queues must be locked. 63245748Smckusick */ 63345748Smckusick void vm_page_unwire(mem) 63445748Smckusick register vm_page_t mem; 63545748Smckusick { 63645748Smckusick VM_PAGE_CHECK(mem); 63745748Smckusick 63845748Smckusick mem->wire_count--; 63945748Smckusick if (mem->wire_count == 0) { 64045748Smckusick queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); 64145748Smckusick vm_page_active_count++; 64245748Smckusick mem->active = TRUE; 64345748Smckusick vm_page_wire_count--; 64445748Smckusick } 64545748Smckusick } 64645748Smckusick 64745748Smckusick /* 64845748Smckusick * vm_page_deactivate: 64945748Smckusick * 65045748Smckusick * Returns the given page to the inactive list, 65145748Smckusick * indicating that no physical maps have access 65245748Smckusick * to this page. [Used by the physical mapping system.] 65345748Smckusick * 65445748Smckusick * The page queues must be locked. 65545748Smckusick */ 65645748Smckusick void vm_page_deactivate(m) 65745748Smckusick register vm_page_t m; 65845748Smckusick { 65945748Smckusick VM_PAGE_CHECK(m); 66045748Smckusick 66145748Smckusick /* 66245748Smckusick * Only move active pages -- ignore locked or already 66345748Smckusick * inactive ones. 66445748Smckusick */ 66545748Smckusick 66645748Smckusick if (m->active) { 66745748Smckusick pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 66845748Smckusick queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); 66945748Smckusick queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); 67045748Smckusick m->active = FALSE; 67145748Smckusick m->inactive = TRUE; 67245748Smckusick vm_page_active_count--; 67345748Smckusick vm_page_inactive_count++; 67445748Smckusick if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) 67545748Smckusick m->clean = FALSE; 67645748Smckusick m->laundry = !m->clean; 67745748Smckusick } 67845748Smckusick } 67945748Smckusick 68045748Smckusick /* 68145748Smckusick * vm_page_activate: 68245748Smckusick * 68345748Smckusick * Put the specified page on the active list (if appropriate). 68445748Smckusick * 68545748Smckusick * The page queues must be locked. 68645748Smckusick */ 68745748Smckusick 68845748Smckusick void vm_page_activate(m) 68945748Smckusick register vm_page_t m; 69045748Smckusick { 69145748Smckusick VM_PAGE_CHECK(m); 69245748Smckusick 69345748Smckusick if (m->inactive) { 69445748Smckusick queue_remove(&vm_page_queue_inactive, m, vm_page_t, 69545748Smckusick pageq); 69645748Smckusick vm_page_inactive_count--; 69745748Smckusick m->inactive = FALSE; 69845748Smckusick } 69945748Smckusick if (m->wire_count == 0) { 70045748Smckusick if (m->active) 70145748Smckusick panic("vm_page_activate: already active"); 70245748Smckusick 70345748Smckusick queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); 70445748Smckusick m->active = TRUE; 70545748Smckusick vm_page_active_count++; 70645748Smckusick } 70745748Smckusick } 70845748Smckusick 70945748Smckusick /* 71045748Smckusick * vm_page_zero_fill: 71145748Smckusick * 71245748Smckusick * Zero-fill the specified page. 71345748Smckusick * Written as a standard pagein routine, to 71445748Smckusick * be used by the zero-fill object. 71545748Smckusick */ 71645748Smckusick 71745748Smckusick boolean_t vm_page_zero_fill(m) 71845748Smckusick vm_page_t m; 71945748Smckusick { 72045748Smckusick VM_PAGE_CHECK(m); 72145748Smckusick 72245748Smckusick pmap_zero_page(VM_PAGE_TO_PHYS(m)); 72345748Smckusick return(TRUE); 72445748Smckusick } 72545748Smckusick 72645748Smckusick /* 72745748Smckusick * vm_page_copy: 72845748Smckusick * 72945748Smckusick * Copy one page to another 73045748Smckusick */ 73145748Smckusick 73245748Smckusick void vm_page_copy(src_m, dest_m) 73345748Smckusick vm_page_t src_m; 73445748Smckusick vm_page_t dest_m; 73545748Smckusick { 73645748Smckusick VM_PAGE_CHECK(src_m); 73745748Smckusick VM_PAGE_CHECK(dest_m); 73845748Smckusick 73945748Smckusick pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 74045748Smckusick } 741