145748Smckusick /* 247663Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 547663Smckusick * This code is derived from software contributed to Berkeley by 647663Smckusick * The Mach Operating System project at Carnegie-Mellon University. 747663Smckusick * 847663Smckusick * %sccs.include.redist.c% 947663Smckusick * 10*53348Sbostic * @(#)vm_fault.c 7.11 (Berkeley) 05/04/92 1147663Smckusick * 1247663Smckusick * 1347663Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1447663Smckusick * All rights reserved. 1547663Smckusick * 1647592Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1747592Smckusick * 1847592Smckusick * Permission to use, copy, modify and distribute this software and 1947592Smckusick * its documentation is hereby granted, provided that both the copyright 2047592Smckusick * notice and this permission notice appear in all copies of the 2147592Smckusick * software, derivative works or modified versions, and any portions 2247592Smckusick * thereof, and that both notices appear in supporting documentation. 2347592Smckusick * 2447592Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2547592Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2647592Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2747592Smckusick * 2847663Smckusick * Carnegie Mellon requests users of this software to return to 2945748Smckusick * 3047663Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3147663Smckusick * School of Computer Science 3247663Smckusick * Carnegie Mellon University 3347663Smckusick * Pittsburgh PA 15213-3890 3447662Smckusick * 3547663Smckusick * any improvements or extensions that they make and grant Carnegie the 3647663Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Page fault handling module. 4145748Smckusick */ 4245748Smckusick 43*53348Sbostic #include <sys/param.h> 44*53348Sbostic #include <sys/systm.h> 4545748Smckusick 46*53348Sbostic #include <vm/vm.h> 47*53348Sbostic #include <vm/vm_page.h> 48*53348Sbostic #include <vm/vm_pageout.h> 4948386Skarels 5045748Smckusick /* 5145748Smckusick * vm_fault: 5245748Smckusick * 5345748Smckusick * Handle a page fault occuring at the given address, 5445748Smckusick * requiring the given permissions, in the map specified. 5545748Smckusick * If successful, the page is inserted into the 5645748Smckusick * associated physical map. 5745748Smckusick * 5845748Smckusick * NOTE: the given address should be truncated to the 5945748Smckusick * proper page address. 6045748Smckusick * 6145748Smckusick * KERN_SUCCESS is returned if the page fault is handled; otherwise, 6245748Smckusick * a standard error specifying why the fault is fatal is returned. 6345748Smckusick * 6445748Smckusick * 6545748Smckusick * The map in question must be referenced, and remains so. 6645748Smckusick * Caller may hold no locks. 6745748Smckusick */ 68*53348Sbostic int 6945748Smckusick vm_fault(map, vaddr, fault_type, change_wiring) 7045748Smckusick vm_map_t map; 7145748Smckusick vm_offset_t vaddr; 7245748Smckusick vm_prot_t fault_type; 7345748Smckusick boolean_t change_wiring; 7445748Smckusick { 7545748Smckusick vm_object_t first_object; 7645748Smckusick vm_offset_t first_offset; 7745748Smckusick vm_map_entry_t entry; 7845748Smckusick register vm_object_t object; 7945748Smckusick register vm_offset_t offset; 8045748Smckusick register vm_page_t m; 8145748Smckusick vm_page_t first_m; 8245748Smckusick vm_prot_t prot; 8345748Smckusick int result; 8445748Smckusick boolean_t wired; 8545748Smckusick boolean_t su; 8645748Smckusick boolean_t lookup_still_valid; 8745748Smckusick boolean_t page_exists; 8845748Smckusick vm_page_t old_m; 8945748Smckusick vm_object_t next_object; 9045748Smckusick 9150911Smckusick cnt.v_vm_faults++; /* needs lock XXX */ 9245748Smckusick /* 9345748Smckusick * Recovery actions 9445748Smckusick */ 9545748Smckusick #define FREE_PAGE(m) { \ 9645748Smckusick PAGE_WAKEUP(m); \ 9745748Smckusick vm_page_lock_queues(); \ 9845748Smckusick vm_page_free(m); \ 9945748Smckusick vm_page_unlock_queues(); \ 10045748Smckusick } 10145748Smckusick 10245748Smckusick #define RELEASE_PAGE(m) { \ 10345748Smckusick PAGE_WAKEUP(m); \ 10445748Smckusick vm_page_lock_queues(); \ 10545748Smckusick vm_page_activate(m); \ 10645748Smckusick vm_page_unlock_queues(); \ 10745748Smckusick } 10845748Smckusick 10945748Smckusick #define UNLOCK_MAP { \ 11045748Smckusick if (lookup_still_valid) { \ 11145748Smckusick vm_map_lookup_done(map, entry); \ 11245748Smckusick lookup_still_valid = FALSE; \ 11345748Smckusick } \ 11445748Smckusick } 11545748Smckusick 11645748Smckusick #define UNLOCK_THINGS { \ 11745748Smckusick object->paging_in_progress--; \ 11845748Smckusick vm_object_unlock(object); \ 11945748Smckusick if (object != first_object) { \ 12045748Smckusick vm_object_lock(first_object); \ 12145748Smckusick FREE_PAGE(first_m); \ 12245748Smckusick first_object->paging_in_progress--; \ 12345748Smckusick vm_object_unlock(first_object); \ 12445748Smckusick } \ 12545748Smckusick UNLOCK_MAP; \ 12645748Smckusick } 12745748Smckusick 12845748Smckusick #define UNLOCK_AND_DEALLOCATE { \ 12945748Smckusick UNLOCK_THINGS; \ 13045748Smckusick vm_object_deallocate(first_object); \ 13145748Smckusick } 13245748Smckusick 13345748Smckusick RetryFault: ; 13445748Smckusick 13545748Smckusick /* 13645748Smckusick * Find the backing store object and offset into 13745748Smckusick * it to begin the search. 13845748Smckusick */ 13945748Smckusick 14045748Smckusick if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 14145748Smckusick &first_object, &first_offset, 14245748Smckusick &prot, &wired, &su)) != KERN_SUCCESS) { 14345748Smckusick return(result); 14445748Smckusick } 14545748Smckusick lookup_still_valid = TRUE; 14645748Smckusick 14745748Smckusick if (wired) 14845748Smckusick fault_type = prot; 14945748Smckusick 15048386Skarels first_m = NULL; 15145748Smckusick 15245748Smckusick /* 15345748Smckusick * Make a reference to this object to 15445748Smckusick * prevent its disposal while we are messing with 15545748Smckusick * it. Once we have the reference, the map is free 15645748Smckusick * to be diddled. Since objects reference their 15745748Smckusick * shadows (and copies), they will stay around as well. 15845748Smckusick */ 15945748Smckusick 16045748Smckusick vm_object_lock(first_object); 16145748Smckusick 16245748Smckusick first_object->ref_count++; 16345748Smckusick first_object->paging_in_progress++; 16445748Smckusick 16545748Smckusick /* 16645748Smckusick * INVARIANTS (through entire routine): 16745748Smckusick * 16845748Smckusick * 1) At all times, we must either have the object 16945748Smckusick * lock or a busy page in some object to prevent 17045748Smckusick * some other thread from trying to bring in 17145748Smckusick * the same page. 17245748Smckusick * 17345748Smckusick * Note that we cannot hold any locks during the 17445748Smckusick * pager access or when waiting for memory, so 17545748Smckusick * we use a busy page then. 17645748Smckusick * 17745748Smckusick * Note also that we aren't as concerned about 17845748Smckusick * more than one thead attempting to pager_data_unlock 17945748Smckusick * the same page at once, so we don't hold the page 18045748Smckusick * as busy then, but do record the highest unlock 18145748Smckusick * value so far. [Unlock requests may also be delivered 18245748Smckusick * out of order.] 18345748Smckusick * 18445748Smckusick * 2) Once we have a busy page, we must remove it from 18545748Smckusick * the pageout queues, so that the pageout daemon 18645748Smckusick * will not grab it away. 18745748Smckusick * 18845748Smckusick * 3) To prevent another thread from racing us down the 18945748Smckusick * shadow chain and entering a new page in the top 19045748Smckusick * object before we do, we must keep a busy page in 19145748Smckusick * the top object while following the shadow chain. 19245748Smckusick * 19345748Smckusick * 4) We must increment paging_in_progress on any object 19445748Smckusick * for which we have a busy page, to prevent 19545748Smckusick * vm_object_collapse from removing the busy page 19645748Smckusick * without our noticing. 19745748Smckusick */ 19845748Smckusick 19945748Smckusick /* 20045748Smckusick * Search for the page at object/offset. 20145748Smckusick */ 20245748Smckusick 20345748Smckusick object = first_object; 20445748Smckusick offset = first_offset; 20545748Smckusick 20645748Smckusick /* 20745748Smckusick * See whether this page is resident 20845748Smckusick */ 20945748Smckusick 21045748Smckusick while (TRUE) { 21145748Smckusick m = vm_page_lookup(object, offset); 21248386Skarels if (m != NULL) { 21345748Smckusick /* 21445748Smckusick * If the page is being brought in, 21545748Smckusick * wait for it and then retry. 21645748Smckusick */ 21745748Smckusick if (m->busy) { 21845748Smckusick #ifdef DOTHREADS 21945748Smckusick int wait_result; 22045748Smckusick 22145748Smckusick PAGE_ASSERT_WAIT(m, !change_wiring); 22245748Smckusick UNLOCK_THINGS; 22345748Smckusick thread_block(); 22445748Smckusick wait_result = current_thread()->wait_result; 22545748Smckusick vm_object_deallocate(first_object); 22645748Smckusick if (wait_result != THREAD_AWAKENED) 22745748Smckusick return(KERN_SUCCESS); 22845748Smckusick goto RetryFault; 22945748Smckusick #else 23045748Smckusick PAGE_ASSERT_WAIT(m, !change_wiring); 23145748Smckusick UNLOCK_THINGS; 23245748Smckusick thread_block(); 23345748Smckusick vm_object_deallocate(first_object); 23445748Smckusick goto RetryFault; 23545748Smckusick #endif 23645748Smckusick } 23745748Smckusick 23845748Smckusick if (m->absent) 23945748Smckusick panic("vm_fault: absent"); 24045748Smckusick 24145748Smckusick /* 24245748Smckusick * If the desired access to this page has 24345748Smckusick * been locked out, request that it be unlocked. 24445748Smckusick */ 24545748Smckusick 24645748Smckusick if (fault_type & m->page_lock) { 24745748Smckusick #ifdef DOTHREADS 24845748Smckusick int wait_result; 24945748Smckusick 25045748Smckusick if ((fault_type & m->unlock_request) != fault_type) 25145748Smckusick panic("vm_fault: pager_data_unlock"); 25245748Smckusick 25345748Smckusick PAGE_ASSERT_WAIT(m, !change_wiring); 25445748Smckusick UNLOCK_THINGS; 25545748Smckusick thread_block(); 25645748Smckusick wait_result = current_thread()->wait_result; 25745748Smckusick vm_object_deallocate(first_object); 25845748Smckusick if (wait_result != THREAD_AWAKENED) 25945748Smckusick return(KERN_SUCCESS); 26045748Smckusick goto RetryFault; 26145748Smckusick #else 26245748Smckusick if ((fault_type & m->unlock_request) != fault_type) 26345748Smckusick panic("vm_fault: pager_data_unlock"); 26445748Smckusick 26545748Smckusick PAGE_ASSERT_WAIT(m, !change_wiring); 26645748Smckusick UNLOCK_THINGS; 26745748Smckusick thread_block(); 26845748Smckusick vm_object_deallocate(first_object); 26945748Smckusick goto RetryFault; 27045748Smckusick #endif 27145748Smckusick } 27245748Smckusick 27345748Smckusick /* 27445748Smckusick * Remove the page from the pageout daemon's 27545748Smckusick * reach while we play with it. 27645748Smckusick */ 27745748Smckusick 27845748Smckusick vm_page_lock_queues(); 27945748Smckusick if (m->inactive) { 28045748Smckusick queue_remove(&vm_page_queue_inactive, m, 28145748Smckusick vm_page_t, pageq); 28245748Smckusick m->inactive = FALSE; 28350911Smckusick cnt.v_inactive_count--; 28450911Smckusick cnt.v_reactivated++; 28545748Smckusick } 28645748Smckusick 28745748Smckusick if (m->active) { 28845748Smckusick queue_remove(&vm_page_queue_active, m, 28945748Smckusick vm_page_t, pageq); 29045748Smckusick m->active = FALSE; 29150911Smckusick cnt.v_active_count--; 29245748Smckusick } 29345748Smckusick vm_page_unlock_queues(); 29445748Smckusick 29545748Smckusick /* 29645748Smckusick * Mark page busy for other threads. 29745748Smckusick */ 29845748Smckusick m->busy = TRUE; 29945748Smckusick m->absent = FALSE; 30045748Smckusick break; 30145748Smckusick } 30245748Smckusick 30348386Skarels if (((object->pager != NULL) && 30445748Smckusick (!change_wiring || wired)) 30545748Smckusick || (object == first_object)) { 30645748Smckusick 30745748Smckusick /* 30845748Smckusick * Allocate a new page for this object/offset 30945748Smckusick * pair. 31045748Smckusick */ 31145748Smckusick 31245748Smckusick m = vm_page_alloc(object, offset); 31345748Smckusick 31448386Skarels if (m == NULL) { 31545748Smckusick UNLOCK_AND_DEALLOCATE; 31645748Smckusick VM_WAIT; 31745748Smckusick goto RetryFault; 31845748Smckusick } 31945748Smckusick } 32045748Smckusick 32148386Skarels if ((object->pager != NULL) && 32245748Smckusick (!change_wiring || wired)) { 32345748Smckusick int rv; 32445748Smckusick 32545748Smckusick /* 32645748Smckusick * Now that we have a busy page, we can 32745748Smckusick * release the object lock. 32845748Smckusick */ 32945748Smckusick vm_object_unlock(object); 33045748Smckusick 33145748Smckusick /* 33245748Smckusick * Call the pager to retrieve the data, if any, 33345748Smckusick * after releasing the lock on the map. 33445748Smckusick */ 33545748Smckusick UNLOCK_MAP; 33645748Smckusick 33745748Smckusick rv = vm_pager_get(object->pager, m, TRUE); 33845748Smckusick if (rv == VM_PAGER_OK) { 33945748Smckusick /* 34045748Smckusick * Found the page. 34145748Smckusick * Leave it busy while we play with it. 34245748Smckusick */ 34345748Smckusick vm_object_lock(object); 34445748Smckusick 34545748Smckusick /* 34645748Smckusick * Relookup in case pager changed page. 34745748Smckusick * Pager is responsible for disposition 34845748Smckusick * of old page if moved. 34945748Smckusick */ 35045748Smckusick m = vm_page_lookup(object, offset); 35145748Smckusick 35250911Smckusick cnt.v_pageins++; 35345748Smckusick m->fake = FALSE; 35453270Sralph m->clean = TRUE; 35545748Smckusick pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 35645748Smckusick break; 35745748Smckusick } 35845748Smckusick 35945748Smckusick /* 36045748Smckusick * Remove the bogus page (which does not 36145748Smckusick * exist at this object/offset); before 36245748Smckusick * doing so, we must get back our object 36345748Smckusick * lock to preserve our invariant. 36445748Smckusick * 36545748Smckusick * Also wake up any other thread that may want 36645748Smckusick * to bring in this page. 36745748Smckusick * 36845748Smckusick * If this is the top-level object, we must 36945748Smckusick * leave the busy page to prevent another 37045748Smckusick * thread from rushing past us, and inserting 37145748Smckusick * the page in that object at the same time 37245748Smckusick * that we are. 37345748Smckusick */ 37445748Smckusick 37545748Smckusick vm_object_lock(object); 37645748Smckusick /* 37745748Smckusick * Data outside the range of the pager; an error 37845748Smckusick */ 37945748Smckusick if (rv == VM_PAGER_BAD) { 38045748Smckusick FREE_PAGE(m); 38145748Smckusick UNLOCK_AND_DEALLOCATE; 38245748Smckusick return(KERN_PROTECTION_FAILURE); /* XXX */ 38345748Smckusick } 38445748Smckusick if (object != first_object) { 38545748Smckusick FREE_PAGE(m); 38645748Smckusick /* 38745748Smckusick * XXX - we cannot just fall out at this 38845748Smckusick * point, m has been freed and is invalid! 38945748Smckusick */ 39053270Sralph panic("vm_fault: free page"); /* XXX */ 39145748Smckusick } 39245748Smckusick } 39345748Smckusick 39445748Smckusick /* 39545748Smckusick * We get here if the object has no pager (or unwiring) 39645748Smckusick * or the pager doesn't have the page. 39745748Smckusick */ 39845748Smckusick if (object == first_object) 39945748Smckusick first_m = m; 40045748Smckusick 40145748Smckusick /* 40245748Smckusick * Move on to the next object. Lock the next 40345748Smckusick * object before unlocking the current one. 40445748Smckusick */ 40545748Smckusick 40645748Smckusick offset += object->shadow_offset; 40745748Smckusick next_object = object->shadow; 40848386Skarels if (next_object == NULL) { 40945748Smckusick /* 41045748Smckusick * If there's no object left, fill the page 41145748Smckusick * in the top object with zeros. 41245748Smckusick */ 41345748Smckusick if (object != first_object) { 41445748Smckusick object->paging_in_progress--; 41545748Smckusick vm_object_unlock(object); 41645748Smckusick 41745748Smckusick object = first_object; 41845748Smckusick offset = first_offset; 41945748Smckusick m = first_m; 42045748Smckusick vm_object_lock(object); 42145748Smckusick } 42248386Skarels first_m = NULL; 42345748Smckusick 42445748Smckusick vm_page_zero_fill(m); 42550911Smckusick cnt.v_zfod++; 42645748Smckusick m->fake = FALSE; 42745748Smckusick m->absent = FALSE; 42845748Smckusick break; 42945748Smckusick } 43045748Smckusick else { 43145748Smckusick vm_object_lock(next_object); 43245748Smckusick if (object != first_object) 43345748Smckusick object->paging_in_progress--; 43445748Smckusick vm_object_unlock(object); 43545748Smckusick object = next_object; 43645748Smckusick object->paging_in_progress++; 43745748Smckusick } 43845748Smckusick } 43945748Smckusick 44045748Smckusick if (m->absent || m->active || m->inactive || !m->busy) 44145748Smckusick panic("vm_fault: absent or active or inactive or not busy after main loop"); 44245748Smckusick 44345748Smckusick /* 44445748Smckusick * PAGE HAS BEEN FOUND. 44545748Smckusick * [Loop invariant still holds -- the object lock 44645748Smckusick * is held.] 44745748Smckusick */ 44845748Smckusick 44945748Smckusick old_m = m; /* save page that would be copied */ 45045748Smckusick 45145748Smckusick /* 45245748Smckusick * If the page is being written, but isn't 45345748Smckusick * already owned by the top-level object, 45445748Smckusick * we have to copy it into a new page owned 45545748Smckusick * by the top-level object. 45645748Smckusick */ 45745748Smckusick 45845748Smckusick if (object != first_object) { 45945748Smckusick /* 46045748Smckusick * We only really need to copy if we 46145748Smckusick * want to write it. 46245748Smckusick */ 46345748Smckusick 46445748Smckusick if (fault_type & VM_PROT_WRITE) { 46545748Smckusick 46645748Smckusick /* 46745748Smckusick * If we try to collapse first_object at this 46845748Smckusick * point, we may deadlock when we try to get 46945748Smckusick * the lock on an intermediate object (since we 47045748Smckusick * have the bottom object locked). We can't 47145748Smckusick * unlock the bottom object, because the page 47245748Smckusick * we found may move (by collapse) if we do. 47345748Smckusick * 47445748Smckusick * Instead, we first copy the page. Then, when 47545748Smckusick * we have no more use for the bottom object, 47645748Smckusick * we unlock it and try to collapse. 47745748Smckusick * 47845748Smckusick * Note that we copy the page even if we didn't 47945748Smckusick * need to... that's the breaks. 48045748Smckusick */ 48145748Smckusick 48245748Smckusick /* 48345748Smckusick * We already have an empty page in 48445748Smckusick * first_object - use it. 48545748Smckusick */ 48645748Smckusick 48745748Smckusick vm_page_copy(m, first_m); 48845748Smckusick first_m->fake = FALSE; 48945748Smckusick first_m->absent = FALSE; 49045748Smckusick 49145748Smckusick /* 49245748Smckusick * If another map is truly sharing this 49345748Smckusick * page with us, we have to flush all 49445748Smckusick * uses of the original page, since we 49545748Smckusick * can't distinguish those which want the 49645748Smckusick * original from those which need the 49745748Smckusick * new copy. 49849288Shibler * 49949288Shibler * XXX If we know that only one map has 50049288Shibler * access to this page, then we could 50149288Shibler * avoid the pmap_page_protect() call. 50245748Smckusick */ 50345748Smckusick 50445748Smckusick vm_page_lock_queues(); 50552904Smckusick vm_page_activate(m); 50649288Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 50745748Smckusick vm_page_unlock_queues(); 50845748Smckusick 50945748Smckusick /* 51045748Smckusick * We no longer need the old page or object. 51145748Smckusick */ 51245748Smckusick PAGE_WAKEUP(m); 51345748Smckusick object->paging_in_progress--; 51445748Smckusick vm_object_unlock(object); 51545748Smckusick 51645748Smckusick /* 51745748Smckusick * Only use the new page below... 51845748Smckusick */ 51945748Smckusick 52050911Smckusick cnt.v_cow_faults++; 52145748Smckusick m = first_m; 52245748Smckusick object = first_object; 52345748Smckusick offset = first_offset; 52445748Smckusick 52545748Smckusick /* 52645748Smckusick * Now that we've gotten the copy out of the 52745748Smckusick * way, let's try to collapse the top object. 52845748Smckusick */ 52945748Smckusick vm_object_lock(object); 53045748Smckusick /* 53145748Smckusick * But we have to play ugly games with 53245748Smckusick * paging_in_progress to do that... 53345748Smckusick */ 53445748Smckusick object->paging_in_progress--; 53545748Smckusick vm_object_collapse(object); 53645748Smckusick object->paging_in_progress++; 53745748Smckusick } 53845748Smckusick else { 53945748Smckusick prot &= (~VM_PROT_WRITE); 54045748Smckusick m->copy_on_write = TRUE; 54145748Smckusick } 54245748Smckusick } 54345748Smckusick 54445748Smckusick if (m->active || m->inactive) 54545748Smckusick panic("vm_fault: active or inactive before copy object handling"); 54645748Smckusick 54745748Smckusick /* 54845748Smckusick * If the page is being written, but hasn't been 54945748Smckusick * copied to the copy-object, we have to copy it there. 55045748Smckusick */ 55145748Smckusick RetryCopy: 55248386Skarels if (first_object->copy != NULL) { 55348386Skarels vm_object_t copy_object = first_object->copy; 55448386Skarels vm_offset_t copy_offset; 55548386Skarels vm_page_t copy_m; 55645748Smckusick 55745748Smckusick /* 55845748Smckusick * We only need to copy if we want to write it. 55945748Smckusick */ 56045748Smckusick if ((fault_type & VM_PROT_WRITE) == 0) { 56145748Smckusick prot &= ~VM_PROT_WRITE; 56245748Smckusick m->copy_on_write = TRUE; 56345748Smckusick } 56445748Smckusick else { 56545748Smckusick /* 56645748Smckusick * Try to get the lock on the copy_object. 56745748Smckusick */ 56845748Smckusick if (!vm_object_lock_try(copy_object)) { 56945748Smckusick vm_object_unlock(object); 57045748Smckusick /* should spin a bit here... */ 57145748Smckusick vm_object_lock(object); 57245748Smckusick goto RetryCopy; 57345748Smckusick } 57445748Smckusick 57545748Smckusick /* 57645748Smckusick * Make another reference to the copy-object, 57745748Smckusick * to keep it from disappearing during the 57845748Smckusick * copy. 57945748Smckusick */ 58045748Smckusick copy_object->ref_count++; 58145748Smckusick 58245748Smckusick /* 58345748Smckusick * Does the page exist in the copy? 58445748Smckusick */ 58545748Smckusick copy_offset = first_offset 58645748Smckusick - copy_object->shadow_offset; 58745748Smckusick copy_m = vm_page_lookup(copy_object, copy_offset); 58848386Skarels if (page_exists = (copy_m != NULL)) { 58945748Smckusick if (copy_m->busy) { 59045748Smckusick #ifdef DOTHREADS 59145748Smckusick int wait_result; 59245748Smckusick 59345748Smckusick /* 59445748Smckusick * If the page is being brought 59545748Smckusick * in, wait for it and then retry. 59645748Smckusick */ 59745748Smckusick PAGE_ASSERT_WAIT(copy_m, !change_wiring); 59845748Smckusick RELEASE_PAGE(m); 59945748Smckusick copy_object->ref_count--; 60045748Smckusick vm_object_unlock(copy_object); 60145748Smckusick UNLOCK_THINGS; 60245748Smckusick thread_block(); 60345748Smckusick wait_result = current_thread()->wait_result; 60445748Smckusick vm_object_deallocate(first_object); 60545748Smckusick if (wait_result != THREAD_AWAKENED) 60645748Smckusick return(KERN_SUCCESS); 60745748Smckusick goto RetryFault; 60845748Smckusick #else 60945748Smckusick /* 61045748Smckusick * If the page is being brought 61145748Smckusick * in, wait for it and then retry. 61245748Smckusick */ 61345748Smckusick PAGE_ASSERT_WAIT(copy_m, !change_wiring); 61445748Smckusick RELEASE_PAGE(m); 61545748Smckusick copy_object->ref_count--; 61645748Smckusick vm_object_unlock(copy_object); 61745748Smckusick UNLOCK_THINGS; 61845748Smckusick thread_block(); 61945748Smckusick vm_object_deallocate(first_object); 62045748Smckusick goto RetryFault; 62145748Smckusick #endif 62245748Smckusick } 62345748Smckusick } 62445748Smckusick 62545748Smckusick /* 62645748Smckusick * If the page is not in memory (in the object) 62745748Smckusick * and the object has a pager, we have to check 62845748Smckusick * if the pager has the data in secondary 62945748Smckusick * storage. 63045748Smckusick */ 63145748Smckusick if (!page_exists) { 63245748Smckusick 63345748Smckusick /* 63445748Smckusick * If we don't allocate a (blank) page 63545748Smckusick * here... another thread could try 63645748Smckusick * to page it in, allocate a page, and 63745748Smckusick * then block on the busy page in its 63845748Smckusick * shadow (first_object). Then we'd 63945748Smckusick * trip over the busy page after we 64045748Smckusick * found that the copy_object's pager 64145748Smckusick * doesn't have the page... 64245748Smckusick */ 64345748Smckusick copy_m = vm_page_alloc(copy_object, 64445748Smckusick copy_offset); 64548386Skarels if (copy_m == NULL) { 64645748Smckusick /* 64745748Smckusick * Wait for a page, then retry. 64845748Smckusick */ 64945748Smckusick RELEASE_PAGE(m); 65045748Smckusick copy_object->ref_count--; 65145748Smckusick vm_object_unlock(copy_object); 65245748Smckusick UNLOCK_AND_DEALLOCATE; 65345748Smckusick VM_WAIT; 65445748Smckusick goto RetryFault; 65545748Smckusick } 65645748Smckusick 65748386Skarels if (copy_object->pager != NULL) { 65845748Smckusick vm_object_unlock(object); 65945748Smckusick vm_object_unlock(copy_object); 66045748Smckusick UNLOCK_MAP; 66145748Smckusick 66245748Smckusick page_exists = vm_pager_has_page( 66345748Smckusick copy_object->pager, 66445748Smckusick (copy_offset + copy_object->paging_offset)); 66545748Smckusick 66645748Smckusick vm_object_lock(copy_object); 66745748Smckusick 66845748Smckusick /* 66945748Smckusick * Since the map is unlocked, someone 67045748Smckusick * else could have copied this object 67145748Smckusick * and put a different copy_object 67245748Smckusick * between the two. Or, the last 67345748Smckusick * reference to the copy-object (other 67445748Smckusick * than the one we have) may have 67545748Smckusick * disappeared - if that has happened, 67645748Smckusick * we don't need to make the copy. 67745748Smckusick */ 67845748Smckusick if (copy_object->shadow != object || 67945748Smckusick copy_object->ref_count == 1) { 68045748Smckusick /* 68145748Smckusick * Gaah... start over! 68245748Smckusick */ 68345748Smckusick FREE_PAGE(copy_m); 68445748Smckusick vm_object_unlock(copy_object); 68545748Smckusick vm_object_deallocate(copy_object); 68645748Smckusick /* may block */ 68745748Smckusick vm_object_lock(object); 68845748Smckusick goto RetryCopy; 68945748Smckusick } 69045748Smckusick vm_object_lock(object); 69145748Smckusick 69245748Smckusick if (page_exists) { 69345748Smckusick /* 69445748Smckusick * We didn't need the page 69545748Smckusick */ 69645748Smckusick FREE_PAGE(copy_m); 69745748Smckusick } 69845748Smckusick } 69945748Smckusick } 70045748Smckusick if (!page_exists) { 70145748Smckusick /* 70245748Smckusick * Must copy page into copy-object. 70345748Smckusick */ 70445748Smckusick vm_page_copy(m, copy_m); 70545748Smckusick copy_m->fake = FALSE; 70645748Smckusick copy_m->absent = FALSE; 70745748Smckusick 70845748Smckusick /* 70945748Smckusick * Things to remember: 71045748Smckusick * 1. The copied page must be marked 'dirty' 71145748Smckusick * so it will be paged out to the copy 71245748Smckusick * object. 71345748Smckusick * 2. If the old page was in use by any users 71445748Smckusick * of the copy-object, it must be removed 71545748Smckusick * from all pmaps. (We can't know which 71645748Smckusick * pmaps use it.) 71745748Smckusick */ 71845748Smckusick vm_page_lock_queues(); 71949288Shibler pmap_page_protect(VM_PAGE_TO_PHYS(old_m), 72049288Shibler VM_PROT_NONE); 72145748Smckusick copy_m->clean = FALSE; 72245748Smckusick vm_page_activate(copy_m); /* XXX */ 72345748Smckusick vm_page_unlock_queues(); 72445748Smckusick 72545748Smckusick PAGE_WAKEUP(copy_m); 72645748Smckusick } 72745748Smckusick /* 72845748Smckusick * The reference count on copy_object must be 72945748Smckusick * at least 2: one for our extra reference, 73045748Smckusick * and at least one from the outside world 73145748Smckusick * (we checked that when we last locked 73245748Smckusick * copy_object). 73345748Smckusick */ 73445748Smckusick copy_object->ref_count--; 73545748Smckusick vm_object_unlock(copy_object); 73645748Smckusick m->copy_on_write = FALSE; 73745748Smckusick } 73845748Smckusick } 73945748Smckusick 74045748Smckusick if (m->active || m->inactive) 74145748Smckusick panic("vm_fault: active or inactive before retrying lookup"); 74245748Smckusick 74345748Smckusick /* 74445748Smckusick * We must verify that the maps have not changed 74545748Smckusick * since our last lookup. 74645748Smckusick */ 74745748Smckusick 74845748Smckusick if (!lookup_still_valid) { 74945748Smckusick vm_object_t retry_object; 75045748Smckusick vm_offset_t retry_offset; 75145748Smckusick vm_prot_t retry_prot; 75245748Smckusick 75345748Smckusick /* 75445748Smckusick * Since map entries may be pageable, make sure we can 75545748Smckusick * take a page fault on them. 75645748Smckusick */ 75745748Smckusick vm_object_unlock(object); 75845748Smckusick 75945748Smckusick /* 76045748Smckusick * To avoid trying to write_lock the map while another 76145748Smckusick * thread has it read_locked (in vm_map_pageable), we 76245748Smckusick * do not try for write permission. If the page is 76345748Smckusick * still writable, we will get write permission. If it 76445748Smckusick * is not, or has been marked needs_copy, we enter the 76545748Smckusick * mapping without write permission, and will merely 76645748Smckusick * take another fault. 76745748Smckusick */ 76845748Smckusick result = vm_map_lookup(&map, vaddr, 76945748Smckusick fault_type & ~VM_PROT_WRITE, &entry, 77045748Smckusick &retry_object, &retry_offset, &retry_prot, 77145748Smckusick &wired, &su); 77245748Smckusick 77345748Smckusick vm_object_lock(object); 77445748Smckusick 77545748Smckusick /* 77645748Smckusick * If we don't need the page any longer, put it on the 77745748Smckusick * active list (the easiest thing to do here). If no 77845748Smckusick * one needs it, pageout will grab it eventually. 77945748Smckusick */ 78045748Smckusick 78145748Smckusick if (result != KERN_SUCCESS) { 78245748Smckusick RELEASE_PAGE(m); 78345748Smckusick UNLOCK_AND_DEALLOCATE; 78445748Smckusick return(result); 78545748Smckusick } 78645748Smckusick 78745748Smckusick lookup_still_valid = TRUE; 78845748Smckusick 78945748Smckusick if ((retry_object != first_object) || 79045748Smckusick (retry_offset != first_offset)) { 79145748Smckusick RELEASE_PAGE(m); 79245748Smckusick UNLOCK_AND_DEALLOCATE; 79345748Smckusick goto RetryFault; 79445748Smckusick } 79545748Smckusick 79645748Smckusick /* 79745748Smckusick * Check whether the protection has changed or the object 79845748Smckusick * has been copied while we left the map unlocked. 79945748Smckusick * Changing from read to write permission is OK - we leave 80045748Smckusick * the page write-protected, and catch the write fault. 80145748Smckusick * Changing from write to read permission means that we 80245748Smckusick * can't mark the page write-enabled after all. 80345748Smckusick */ 80445748Smckusick prot &= retry_prot; 80545748Smckusick if (m->copy_on_write) 80645748Smckusick prot &= ~VM_PROT_WRITE; 80745748Smckusick } 80845748Smckusick 80945748Smckusick /* 81045748Smckusick * (the various bits we're fiddling with here are locked by 81145748Smckusick * the object's lock) 81245748Smckusick */ 81345748Smckusick 81445748Smckusick /* XXX This distorts the meaning of the copy_on_write bit */ 81545748Smckusick 81645748Smckusick if (prot & VM_PROT_WRITE) 81745748Smckusick m->copy_on_write = FALSE; 81845748Smckusick 81945748Smckusick /* 82045748Smckusick * It's critically important that a wired-down page be faulted 82145748Smckusick * only once in each map for which it is wired. 82245748Smckusick */ 82345748Smckusick 82445748Smckusick if (m->active || m->inactive) 82545748Smckusick panic("vm_fault: active or inactive before pmap_enter"); 82645748Smckusick 82745748Smckusick vm_object_unlock(object); 82845748Smckusick 82945748Smckusick /* 83045748Smckusick * Put this page into the physical map. 83145748Smckusick * We had to do the unlock above because pmap_enter 83245748Smckusick * may cause other faults. We don't put the 83345748Smckusick * page back on the active queue until later so 83445748Smckusick * that the page-out daemon won't find us (yet). 83545748Smckusick */ 83645748Smckusick 83745748Smckusick pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 83845748Smckusick prot & ~(m->page_lock), wired); 83945748Smckusick 84045748Smckusick /* 84145748Smckusick * If the page is not wired down, then put it where the 84245748Smckusick * pageout daemon can find it. 84345748Smckusick */ 84445748Smckusick vm_object_lock(object); 84545748Smckusick vm_page_lock_queues(); 84645748Smckusick if (change_wiring) { 84745748Smckusick if (wired) 84845748Smckusick vm_page_wire(m); 84945748Smckusick else 85045748Smckusick vm_page_unwire(m); 85145748Smckusick } 85245748Smckusick else 85345748Smckusick vm_page_activate(m); 85445748Smckusick vm_page_unlock_queues(); 85545748Smckusick 85645748Smckusick /* 85745748Smckusick * Unlock everything, and return 85845748Smckusick */ 85945748Smckusick 86045748Smckusick PAGE_WAKEUP(m); 86145748Smckusick UNLOCK_AND_DEALLOCATE; 86245748Smckusick 86345748Smckusick return(KERN_SUCCESS); 86445748Smckusick 86545748Smckusick } 86645748Smckusick 86745748Smckusick /* 86845748Smckusick * vm_fault_wire: 86945748Smckusick * 87045748Smckusick * Wire down a range of virtual addresses in a map. 87145748Smckusick */ 87245748Smckusick void vm_fault_wire(map, start, end) 87345748Smckusick vm_map_t map; 87445748Smckusick vm_offset_t start, end; 87545748Smckusick { 87645748Smckusick 87745748Smckusick register vm_offset_t va; 87845748Smckusick register pmap_t pmap; 87945748Smckusick 88045748Smckusick pmap = vm_map_pmap(map); 88145748Smckusick 88245748Smckusick /* 88345748Smckusick * Inform the physical mapping system that the 88445748Smckusick * range of addresses may not fault, so that 88545748Smckusick * page tables and such can be locked down as well. 88645748Smckusick */ 88745748Smckusick 88845748Smckusick pmap_pageable(pmap, start, end, FALSE); 88945748Smckusick 89045748Smckusick /* 89145748Smckusick * We simulate a fault to get the page and enter it 89245748Smckusick * in the physical map. 89345748Smckusick */ 89445748Smckusick 89545748Smckusick for (va = start; va < end; va += PAGE_SIZE) { 89645748Smckusick (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 89745748Smckusick } 89845748Smckusick } 89945748Smckusick 90045748Smckusick 90145748Smckusick /* 90245748Smckusick * vm_fault_unwire: 90345748Smckusick * 90445748Smckusick * Unwire a range of virtual addresses in a map. 90545748Smckusick */ 90645748Smckusick void vm_fault_unwire(map, start, end) 90745748Smckusick vm_map_t map; 90845748Smckusick vm_offset_t start, end; 90945748Smckusick { 91045748Smckusick 91145748Smckusick register vm_offset_t va, pa; 91245748Smckusick register pmap_t pmap; 91345748Smckusick 91445748Smckusick pmap = vm_map_pmap(map); 91545748Smckusick 91645748Smckusick /* 91745748Smckusick * Since the pages are wired down, we must be able to 91845748Smckusick * get their mappings from the physical map system. 91945748Smckusick */ 92045748Smckusick 92145748Smckusick vm_page_lock_queues(); 92245748Smckusick 92345748Smckusick for (va = start; va < end; va += PAGE_SIZE) { 92445748Smckusick pa = pmap_extract(pmap, va); 92545748Smckusick if (pa == (vm_offset_t) 0) { 92645748Smckusick panic("unwire: page not in pmap"); 92745748Smckusick } 92845748Smckusick pmap_change_wiring(pmap, va, FALSE); 92945748Smckusick vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 93045748Smckusick } 93145748Smckusick vm_page_unlock_queues(); 93245748Smckusick 93345748Smckusick /* 93445748Smckusick * Inform the physical mapping system that the range 93545748Smckusick * of addresses may fault, so that page tables and 93645748Smckusick * such may be unwired themselves. 93745748Smckusick */ 93845748Smckusick 93945748Smckusick pmap_pageable(pmap, start, end, TRUE); 94045748Smckusick 94145748Smckusick } 94245748Smckusick 94345748Smckusick /* 94445748Smckusick * Routine: 94545748Smckusick * vm_fault_copy_entry 94645748Smckusick * Function: 94745748Smckusick * Copy all of the pages from a wired-down map entry to another. 94845748Smckusick * 94945748Smckusick * In/out conditions: 95045748Smckusick * The source and destination maps must be locked for write. 95145748Smckusick * The source map entry must be wired down (or be a sharing map 95245748Smckusick * entry corresponding to a main map entry that is wired down). 95345748Smckusick */ 95445748Smckusick 95545748Smckusick void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 95645748Smckusick vm_map_t dst_map; 95745748Smckusick vm_map_t src_map; 95845748Smckusick vm_map_entry_t dst_entry; 95945748Smckusick vm_map_entry_t src_entry; 96045748Smckusick { 96145748Smckusick 96245748Smckusick vm_object_t dst_object; 96345748Smckusick vm_object_t src_object; 96445748Smckusick vm_offset_t dst_offset; 96545748Smckusick vm_offset_t src_offset; 96645748Smckusick vm_prot_t prot; 96745748Smckusick vm_offset_t vaddr; 96845748Smckusick vm_page_t dst_m; 96945748Smckusick vm_page_t src_m; 97045748Smckusick 97145748Smckusick #ifdef lint 97245748Smckusick src_map++; 97345748Smckusick #endif lint 97445748Smckusick 97545748Smckusick src_object = src_entry->object.vm_object; 97645748Smckusick src_offset = src_entry->offset; 97745748Smckusick 97845748Smckusick /* 97945748Smckusick * Create the top-level object for the destination entry. 98045748Smckusick * (Doesn't actually shadow anything - we copy the pages 98145748Smckusick * directly.) 98245748Smckusick */ 98345748Smckusick dst_object = vm_object_allocate( 98445748Smckusick (vm_size_t) (dst_entry->end - dst_entry->start)); 98545748Smckusick 98645748Smckusick dst_entry->object.vm_object = dst_object; 98745748Smckusick dst_entry->offset = 0; 98845748Smckusick 98945748Smckusick prot = dst_entry->max_protection; 99045748Smckusick 99145748Smckusick /* 99245748Smckusick * Loop through all of the pages in the entry's range, copying 99345748Smckusick * each one from the source object (it should be there) to the 99445748Smckusick * destination object. 99545748Smckusick */ 99645748Smckusick for (vaddr = dst_entry->start, dst_offset = 0; 99745748Smckusick vaddr < dst_entry->end; 99845748Smckusick vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 99945748Smckusick 100045748Smckusick /* 100145748Smckusick * Allocate a page in the destination object 100245748Smckusick */ 100345748Smckusick vm_object_lock(dst_object); 100445748Smckusick do { 100545748Smckusick dst_m = vm_page_alloc(dst_object, dst_offset); 100648386Skarels if (dst_m == NULL) { 100745748Smckusick vm_object_unlock(dst_object); 100845748Smckusick VM_WAIT; 100945748Smckusick vm_object_lock(dst_object); 101045748Smckusick } 101148386Skarels } while (dst_m == NULL); 101245748Smckusick 101345748Smckusick /* 101445748Smckusick * Find the page in the source object, and copy it in. 101545748Smckusick * (Because the source is wired down, the page will be 101645748Smckusick * in memory.) 101745748Smckusick */ 101845748Smckusick vm_object_lock(src_object); 101945748Smckusick src_m = vm_page_lookup(src_object, dst_offset + src_offset); 102048386Skarels if (src_m == NULL) 102145748Smckusick panic("vm_fault_copy_wired: page missing"); 102245748Smckusick 102345748Smckusick vm_page_copy(src_m, dst_m); 102445748Smckusick 102545748Smckusick /* 102645748Smckusick * Enter it in the pmap... 102745748Smckusick */ 102845748Smckusick vm_object_unlock(src_object); 102945748Smckusick vm_object_unlock(dst_object); 103045748Smckusick 103145748Smckusick pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 103245748Smckusick prot, FALSE); 103345748Smckusick 103445748Smckusick /* 103545748Smckusick * Mark it no longer busy, and put it on the active list. 103645748Smckusick */ 103745748Smckusick vm_object_lock(dst_object); 103845748Smckusick vm_page_lock_queues(); 103945748Smckusick vm_page_activate(dst_m); 104045748Smckusick vm_page_unlock_queues(); 104145748Smckusick PAGE_WAKEUP(dst_m); 104245748Smckusick vm_object_unlock(dst_object); 104345748Smckusick } 104445748Smckusick 104545748Smckusick } 1046